summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
authorSean Dague <sean@dague.net>2014-11-07 14:27:03 +0100
committerSean Dague <sean@dague.net>2014-11-12 15:31:08 -0500
commit89cd6a0c493e26b5a9e017c99d731464292abbaf (patch)
treec2bf790d1684cd539b820247113492495123a163 /nova
parent5c8bbaafef590e4d346a03051a0ba55c8be26c5c (diff)
downloadnova-89cd6a0c493e26b5a9e017c99d731464292abbaf.tar.gz
move all tests to nova/tests/unit
As part of the split of functional and unit tests we need to isolate the unit tests into a separate directory for having multiple test targets in a sane way. Part of bp:functional-tests-for-nova Change-Id: Id42ba373c1bda6a312b673ab2b489ca56da8c628
Diffstat (limited to 'nova')
-rw-r--r--nova/test.py4
-rw-r--r--nova/tests/__init__.py49
-rw-r--r--nova/tests/api/ec2/test_api.py635
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py1096
-rw-r--r--nova/tests/api/ec2/test_cloud.py3255
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py277
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py734
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_password.py111
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py670
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py455
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_availability_zone.py512
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py159
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py359
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py421
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cells.py698
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_certificates.py140
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py210
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py99
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_config_drive.py260
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_console_auth_tokens.py103
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_console_output.py171
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_consoles.py587
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_createserverext.py387
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_deferred_delete.py147
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py449
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_evacuate.py268
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py184
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py114
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py101
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_ips.py189
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_ips_mac.py196
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py148
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_status.py148
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py123
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extended_volumes.py124
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fixed_ips.py256
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_access.py402
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py127
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_manage.py465
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py127
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavor_swap.py126
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavorextradata.py127
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py403
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py412
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py83
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py853
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips_bulk.py139
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_fping.py106
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py172
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py471
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py92
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hypervisors.py596
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_image_size.py138
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_actions.py327
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py210
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_keypairs.py497
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_migrate_server.py231
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_multinic.py204
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py610
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py918
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py222
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py648
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_rescue.py270
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py220
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py515
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py1767
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py132
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_group_quotas.py188
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_groups.py521
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_password.py94
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_start_stop.py183
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_server_usage.py159
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py576
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_shelve.py148
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py539
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_snapshots.py209
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_tenant_networks.py76
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py127
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_volumes.py1083
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py263
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_access_ips.py383
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_console_auth_tokens.py95
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_consoles.py270
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_create_backup.py261
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py387
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py98
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py57
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py547
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_pause_server.py60
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_pci.py236
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py1131
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_server_password.py80
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_servers.py3352
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_services.py453
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_suspend_server.py48
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_user_data.py195
-rw-r--r--nova/tests/api/openstack/compute/test_api.py186
-rw-r--r--nova/tests/api/openstack/compute/test_auth.py61
-rw-r--r--nova/tests/api/openstack/compute/test_consoles.py293
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py747
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py943
-rw-r--r--nova/tests/api/openstack/compute/test_image_metadata.py366
-rw-r--r--nova/tests/api/openstack/compute/test_images.py1046
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py1016
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py1556
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py771
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py4624
-rw-r--r--nova/tests/api/openstack/compute/test_urlmap.py171
-rw-r--r--nova/tests/api/openstack/compute/test_v3_auth.py62
-rw-r--r--nova/tests/api/openstack/compute/test_versions.py797
-rw-r--r--nova/tests/api/openstack/fakes.py662
-rw-r--r--nova/tests/api/openstack/test_common.py764
-rw-r--r--nova/tests/api/openstack/test_mapper.py46
-rw-r--r--nova/tests/api/openstack/test_wsgi.py1244
-rw-r--r--nova/tests/api/openstack/test_xmlutil.py948
-rw-r--r--nova/tests/cells/fakes.py207
-rw-r--r--nova/tests/cells/test_cells_filters.py173
-rw-r--r--nova/tests/cells/test_cells_manager.py808
-rw-r--r--nova/tests/cells/test_cells_messaging.py2129
-rw-r--r--nova/tests/cells/test_cells_rpc_driver.py207
-rw-r--r--nova/tests/cells/test_cells_rpcapi.py760
-rw-r--r--nova/tests/cells/test_cells_scheduler.py530
-rw-r--r--nova/tests/compute/monitors/test_monitors.py144
-rw-r--r--nova/tests/compute/test_claims.py320
-rw-r--r--nova/tests/compute/test_compute.py11415
-rw-r--r--nova/tests/compute/test_compute_api.py2635
-rw-r--r--nova/tests/compute/test_compute_cells.py332
-rw-r--r--nova/tests/compute/test_compute_mgr.py3053
-rw-r--r--nova/tests/compute/test_compute_utils.py827
-rw-r--r--nova/tests/compute/test_compute_xen.py67
-rw-r--r--nova/tests/compute/test_host_api.py480
-rw-r--r--nova/tests/compute/test_keypairs.py221
-rw-r--r--nova/tests/compute/test_resource_tracker.py1539
-rw-r--r--nova/tests/compute/test_resources.py344
-rw-r--r--nova/tests/compute/test_rpcapi.py486
-rw-r--r--nova/tests/compute/test_shelve.py414
-rw-r--r--nova/tests/conductor/tasks/test_live_migrate.py384
-rw-r--r--nova/tests/conductor/test_conductor.py2151
-rw-r--r--nova/tests/conf_fixture.py64
-rw-r--r--nova/tests/db/test_db_api.py7517
-rw-r--r--nova/tests/db/test_migration_utils.py256
-rw-r--r--nova/tests/fake_hosts.py37
-rw-r--r--nova/tests/fake_loadables/fake_loadable1.py44
-rw-r--r--nova/tests/fake_loadables/fake_loadable2.py39
-rw-r--r--nova/tests/fake_network.py457
-rw-r--r--nova/tests/image/test_fake.py117
-rw-r--r--nova/tests/image/test_s3.py267
-rw-r--r--nova/tests/integrated/api/client.py304
-rw-r--r--nova/tests/integrated/api_samples/README.rst29
-rw-r--r--nova/tests/integrated/api_samples_test_base.py323
-rw-r--r--nova/tests/integrated/integrated_helpers.py160
-rw-r--r--nova/tests/integrated/test_api_samples.py4433
-rw-r--r--nova/tests/integrated/test_extensions.py42
-rw-r--r--nova/tests/integrated/test_login.py36
-rw-r--r--nova/tests/integrated/test_servers.py522
-rw-r--r--nova/tests/integrated/test_xml.py51
-rw-r--r--nova/tests/integrated/v3/api_sample_base.py78
-rw-r--r--nova/tests/integrated/v3/test_access_ips.py93
-rw-r--r--nova/tests/integrated/v3/test_admin_actions.py46
-rw-r--r--nova/tests/integrated/v3/test_admin_password.py29
-rw-r--r--nova/tests/integrated/v3/test_agents.py98
-rw-r--r--nova/tests/integrated/v3/test_aggregates.py80
-rw-r--r--nova/tests/integrated/v3/test_attach_interfaces.py166
-rw-r--r--nova/tests/integrated/v3/test_availability_zone.py49
-rw-r--r--nova/tests/integrated/v3/test_cells.py107
-rw-r--r--nova/tests/integrated/v3/test_certificates.py31
-rw-r--r--nova/tests/integrated/v3/test_cloudpipe.py80
-rw-r--r--nova/tests/integrated/v3/test_config_drive.py48
-rw-r--r--nova/tests/integrated/v3/test_console_auth_tokens.py51
-rw-r--r--nova/tests/integrated/v3/test_console_output.py27
-rw-r--r--nova/tests/integrated/v3/test_consoles.py55
-rw-r--r--nova/tests/integrated/v3/test_create_backup.py38
-rw-r--r--nova/tests/integrated/v3/test_deferred_delete.py42
-rw-r--r--nova/tests/integrated/v3/test_disk_config.py80
-rw-r--r--nova/tests/integrated/v3/test_evacuate.py91
-rw-r--r--nova/tests/integrated/v3/test_extended_availability_zone.py34
-rw-r--r--nova/tests/integrated/v3/test_extended_server_attributes.py42
-rw-r--r--nova/tests/integrated/v3/test_extended_status.py35
-rw-r--r--nova/tests/integrated/v3/test_extended_volumes.py151
-rw-r--r--nova/tests/integrated/v3/test_extension_info.py71
-rw-r--r--nova/tests/integrated/v3/test_fixed_ips.py109
-rw-r--r--nova/tests/integrated/v3/test_flavor_access.py89
-rw-r--r--nova/tests/integrated/v3/test_flavor_extraspecs.py62
-rw-r--r--nova/tests/integrated/v3/test_flavor_manage.py43
-rw-r--r--nova/tests/integrated/v3/test_flavor_rxtx.py46
-rw-r--r--nova/tests/integrated/v3/test_flavors.py35
-rw-r--r--nova/tests/integrated/v3/test_floating_ip_dns.py91
-rw-r--r--nova/tests/integrated/v3/test_floating_ip_pools.py35
-rw-r--r--nova/tests/integrated/v3/test_floating_ips_bulk.py86
-rw-r--r--nova/tests/integrated/v3/test_fping.py45
-rw-r--r--nova/tests/integrated/v3/test_hide_server_addresses.py39
-rw-r--r--nova/tests/integrated/v3/test_hosts.py57
-rw-r--r--nova/tests/integrated/v3/test_hypervisors.py69
-rw-r--r--nova/tests/integrated/v3/test_image_size.py37
-rw-r--r--nova/tests/integrated/v3/test_images.py85
-rw-r--r--nova/tests/integrated/v3/test_instance_actions.py84
-rw-r--r--nova/tests/integrated/v3/test_keypairs.py72
-rw-r--r--nova/tests/integrated/v3/test_lock_server.py41
-rw-r--r--nova/tests/integrated/v3/test_migrate_server.py71
-rw-r--r--nova/tests/integrated/v3/test_migrations.py72
-rw-r--r--nova/tests/integrated/v3/test_multinic.py49
-rw-r--r--nova/tests/integrated/v3/test_multiple_create.py45
-rw-r--r--nova/tests/integrated/v3/test_networks.py73
-rw-r--r--nova/tests/integrated/v3/test_networks_associate.py76
-rw-r--r--nova/tests/integrated/v3/test_pause_server.py41
-rw-r--r--nova/tests/integrated/v3/test_pci.py182
-rw-r--r--nova/tests/integrated/v3/test_quota_sets.py70
-rw-r--r--nova/tests/integrated/v3/test_remote_consoles.py70
-rw-r--r--nova/tests/integrated/v3/test_rescue.py82
-rw-r--r--nova/tests/integrated/v3/test_scheduler_hints.py32
-rw-r--r--nova/tests/integrated/v3/test_security_group_default_rules.py40
-rw-r--r--nova/tests/integrated/v3/test_security_groups.py166
-rw-r--r--nova/tests/integrated/v3/test_server_diagnostics.py27
-rw-r--r--nova/tests/integrated/v3/test_server_external_events.py40
-rw-r--r--nova/tests/integrated/v3/test_server_groups.py66
-rw-r--r--nova/tests/integrated/v3/test_server_metadata.py80
-rw-r--r--nova/tests/integrated/v3/test_server_usage.py39
-rw-r--r--nova/tests/integrated/v3/test_servers.py188
-rw-r--r--nova/tests/integrated/v3/test_servers_ips.py35
-rw-r--r--nova/tests/integrated/v3/test_services.py87
-rw-r--r--nova/tests/integrated/v3/test_shelve.py50
-rw-r--r--nova/tests/integrated/v3/test_simple_tenant_usage.py61
-rw-r--r--nova/tests/integrated/v3/test_suspend_server.py41
-rw-r--r--nova/tests/integrated/v3/test_tenant_networks.py61
-rw-r--r--nova/tests/integrated/v3/test_used_limits.py34
-rw-r--r--nova/tests/integrated/v3/test_user_data.py36
-rw-r--r--nova/tests/integrated/v3/test_volumes.py184
-rw-r--r--nova/tests/keymgr/test_conf_key_mgr.py59
-rw-r--r--nova/tests/keymgr/test_mock_key_mgr.py102
-rw-r--r--nova/tests/keymgr/test_not_implemented_key_mgr.py47
-rw-r--r--nova/tests/keymgr/test_single_key_mgr.py72
-rw-r--r--nova/tests/network/test_api.py589
-rw-r--r--nova/tests/network/test_manager.py3358
-rw-r--r--nova/tests/network/test_network_info.py800
-rw-r--r--nova/tests/network/test_neutronv2.py3194
-rw-r--r--nova/tests/network/test_rpcapi.py353
-rw-r--r--nova/tests/objects/test_agent.py103
-rw-r--r--nova/tests/objects/test_aggregate.py199
-rw-r--r--nova/tests/objects/test_bandwidth_usage.py124
-rw-r--r--nova/tests/objects/test_block_device.py333
-rw-r--r--nova/tests/objects/test_compute_node.py240
-rw-r--r--nova/tests/objects/test_dns_domain.py85
-rw-r--r--nova/tests/objects/test_ec2.py192
-rw-r--r--nova/tests/objects/test_external_event.py46
-rw-r--r--nova/tests/objects/test_fixed_ip.py339
-rw-r--r--nova/tests/objects/test_flavor.py253
-rw-r--r--nova/tests/objects/test_floating_ip.py259
-rw-r--r--nova/tests/objects/test_hv_spec.py58
-rw-r--r--nova/tests/objects/test_instance.py1196
-rw-r--r--nova/tests/objects/test_instance_action.py365
-rw-r--r--nova/tests/objects/test_instance_fault.py126
-rw-r--r--nova/tests/objects/test_instance_group.py350
-rw-r--r--nova/tests/objects/test_instance_info_cache.py117
-rw-r--r--nova/tests/objects/test_instance_numa_topology.py78
-rw-r--r--nova/tests/objects/test_instance_pci_requests.py191
-rw-r--r--nova/tests/objects/test_keypair.py109
-rw-r--r--nova/tests/objects/test_migration.py184
-rw-r--r--nova/tests/objects/test_network.py232
-rw-r--r--nova/tests/objects/test_network_request.py102
-rw-r--r--nova/tests/objects/test_objects.py1126
-rw-r--r--nova/tests/objects/test_pci_device.py254
-rw-r--r--nova/tests/objects/test_quotas.py167
-rw-r--r--nova/tests/objects/test_security_group.py175
-rw-r--r--nova/tests/objects/test_security_group_rule.py95
-rw-r--r--nova/tests/objects/test_service.py226
-rw-r--r--nova/tests/objects/test_virtual_interface.py126
-rw-r--r--nova/tests/pci/test_manager.py364
-rw-r--r--nova/tests/pci/test_stats.py267
-rw-r--r--nova/tests/policy_fixture.py73
-rw-r--r--nova/tests/scheduler/filters/test_affinity_filters.py258
-rw-r--r--nova/tests/scheduler/filters/test_aggregate_image_properties_isolation_filters.py98
-rw-r--r--nova/tests/scheduler/filters/test_aggregate_instance_extra_specs_filters.py72
-rw-r--r--nova/tests/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py53
-rw-r--r--nova/tests/scheduler/filters/test_availability_zone_filters.py48
-rw-r--r--nova/tests/scheduler/filters/test_compute_capabilities_filters.py99
-rw-r--r--nova/tests/scheduler/filters/test_compute_filters.py50
-rw-r--r--nova/tests/scheduler/filters/test_core_filters.py87
-rw-r--r--nova/tests/scheduler/filters/test_disk_filters.py100
-rw-r--r--nova/tests/scheduler/filters/test_image_props_filters.py189
-rw-r--r--nova/tests/scheduler/filters/test_io_ops_filters.py63
-rw-r--r--nova/tests/scheduler/filters/test_isolated_hosts_filter.py90
-rw-r--r--nova/tests/scheduler/filters/test_json_filters.py289
-rw-r--r--nova/tests/scheduler/filters/test_metrics_filters.py34
-rw-r--r--nova/tests/scheduler/filters/test_num_instances_filters.py63
-rw-r--r--nova/tests/scheduler/filters/test_numa_topology_filters.py151
-rw-r--r--nova/tests/scheduler/filters/test_pci_passthrough_filters.py67
-rw-r--r--nova/tests/scheduler/filters/test_ram_filters.py89
-rw-r--r--nova/tests/scheduler/filters/test_retry_filters.py46
-rw-r--r--nova/tests/scheduler/filters/test_trusted_filters.py203
-rw-r--r--nova/tests/scheduler/filters/test_type_filters.py56
-rw-r--r--nova/tests/scheduler/test_caching_scheduler.py199
-rw-r--r--nova/tests/scheduler/test_chance_scheduler.py182
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py596
-rw-r--r--nova/tests/scheduler/test_host_filters.py38
-rw-r--r--nova/tests/scheduler/test_host_manager.py545
-rw-r--r--nova/tests/scheduler/test_ironic_host_manager.py430
-rw-r--r--nova/tests/scheduler/test_scheduler.py378
-rw-r--r--nova/tests/scheduler/test_scheduler_utils.py314
-rw-r--r--nova/tests/scheduler/test_weights.py338
-rw-r--r--nova/tests/servicegroup/test_db_servicegroup.py144
-rw-r--r--nova/tests/servicegroup/test_mc_servicegroup.py213
-rw-r--r--nova/tests/servicegroup/test_zk_driver.py65
-rw-r--r--nova/tests/test_availability_zones.py255
-rw-r--r--nova/tests/test_bdm.py248
-rw-r--r--nova/tests/test_block_device.py604
-rw-r--r--nova/tests/test_configdrive2.py104
-rw-r--r--nova/tests/test_loadables.py113
-rw-r--r--nova/tests/test_matchers.py349
-rw-r--r--nova/tests/test_metadata.py865
-rw-r--r--nova/tests/test_notifications.py394
-rw-r--r--nova/tests/test_nova_manage.py467
-rw-r--r--nova/tests/test_policy.py231
-rw-r--r--nova/tests/test_quota.py2765
-rw-r--r--nova/tests/test_service.py370
-rw-r--r--nova/tests/test_test_utils.py70
-rw-r--r--nova/tests/test_utils.py981
-rw-r--r--nova/tests/test_wsgi.py263
-rw-r--r--nova/tests/unit/CA/cacert.pem (renamed from nova/tests/CA/cacert.pem)0
-rw-r--r--nova/tests/unit/CA/private/cakey.pem (renamed from nova/tests/CA/private/cakey.pem)0
-rw-r--r--nova/tests/unit/README.rst (renamed from nova/tests/README.rst)0
-rw-r--r--nova/tests/unit/__init__.py49
-rw-r--r--nova/tests/unit/api/__init__.py (renamed from nova/tests/api/__init__.py)0
-rw-r--r--nova/tests/unit/api/ec2/__init__.py (renamed from nova/tests/api/ec2/__init__.py)0
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.fingerprint (renamed from nova/tests/api/ec2/public_key/dummy.fingerprint)0
-rw-r--r--nova/tests/unit/api/ec2/public_key/dummy.pub (renamed from nova/tests/api/ec2/public_key/dummy.pub)0
-rw-r--r--nova/tests/unit/api/ec2/test_api.py635
-rw-r--r--nova/tests/unit/api/ec2/test_apirequest.py (renamed from nova/tests/api/ec2/test_apirequest.py)0
-rw-r--r--nova/tests/unit/api/ec2/test_cinder_cloud.py1096
-rw-r--r--nova/tests/unit/api/ec2/test_cloud.py3255
-rw-r--r--nova/tests/unit/api/ec2/test_ec2_validate.py277
-rw-r--r--nova/tests/unit/api/ec2/test_ec2utils.py (renamed from nova/tests/api/ec2/test_ec2utils.py)0
-rw-r--r--nova/tests/unit/api/ec2/test_error_response.py (renamed from nova/tests/api/ec2/test_error_response.py)0
-rw-r--r--nova/tests/unit/api/ec2/test_faults.py (renamed from nova/tests/api/ec2/test_faults.py)0
-rw-r--r--nova/tests/unit/api/ec2/test_middleware.py (renamed from nova/tests/api/ec2/test_middleware.py)0
-rw-r--r--nova/tests/unit/api/openstack/__init__.py (renamed from nova/tests/api/openstack/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/common.py (renamed from nova/tests/api/openstack/common.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/__init__.py (renamed from nova/tests/api/openstack/compute/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/__init__.py (renamed from nova/tests/api/openstack/compute/contrib/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py734
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py111
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_agents.py (renamed from nova/tests/api/openstack/compute/contrib/test_agents.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py670
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py455
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py512
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py159
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py359
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py421
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cells.py698
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_certificates.py140
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py210
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py99
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py260
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py103
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_console_output.py171
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_consoles.py587
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py387
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py147
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py449
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py268
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py184
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py114
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py101
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py189
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py196
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py (renamed from nova/tests/api/openstack/compute/contrib/test_extended_rescue_with_image.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py123
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py124
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py256
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py402
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py465
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py126
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py403
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py412
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py83
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py853
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py139
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_fping.py106
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py172
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hosts.py471
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py92
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py596
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_image_size.py138
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py327
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py210
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py497
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py231
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_migrations.py (renamed from nova/tests/api/openstack/compute/contrib/test_migrations.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_multinic.py204
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_networks.py610
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py918
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py222
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_quotas.py648
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_rescue.py270
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py220
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py515
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py1767
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py132
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py (renamed from nova/tests/api/openstack/compute/contrib/test_server_external_events.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py188
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py521
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_password.py94
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py183
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py159
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_services.py576
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_shelve.py148
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py539
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py209
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py76
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py (renamed from nova/tests/api/openstack/compute/contrib/test_used_limits.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py127
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_volumes.py1083
-rw-r--r--nova/tests/unit/api/openstack/compute/extensions/__init__.py (renamed from nova/tests/api/openstack/compute/extensions/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py (renamed from nova/tests/api/openstack/compute/extensions/foxinsocks.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/__init__.py (renamed from nova/tests/api/openstack/compute/plugins/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py (renamed from nova/tests/api/openstack/compute/plugins/v3/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py263
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py383
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py95
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py270
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py261
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py387
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py98
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py57
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py (renamed from nova/tests/api/openstack/compute/plugins/v3/test_migrations.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py547
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py60
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py236
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py1131
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py (renamed from nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py80
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py3353
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py453
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py48
-rw-r--r--nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py195
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/__init__.py (renamed from nova/tests/api/openstack/compute/schemas/__init__.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/test_schemas.py (renamed from nova/tests/api/openstack/compute/schemas/test_schemas.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/valid/empty.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/valid/full.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/images/valid/refs.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/full.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml (renamed from nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml)0
-rw-r--r--nova/tests/unit/api/openstack/compute/test_api.py186
-rw-r--r--nova/tests/unit/api/openstack/compute/test_auth.py61
-rw-r--r--nova/tests/unit/api/openstack/compute/test_consoles.py293
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extensions.py747
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py943
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_metadata.py366
-rw-r--r--nova/tests/unit/api/openstack/compute/test_images.py1046
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py1016
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py1556
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py771
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py4625
-rw-r--r--nova/tests/unit/api/openstack/compute/test_urlmap.py171
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v21_extensions.py (renamed from nova/tests/api/openstack/compute/test_v21_extensions.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v3_auth.py62
-rw-r--r--nova/tests/unit/api/openstack/compute/test_v3_extensions.py (renamed from nova/tests/api/openstack/compute/test_v3_extensions.py)0
-rw-r--r--nova/tests/unit/api/openstack/compute/test_versions.py797
-rw-r--r--nova/tests/unit/api/openstack/fakes.py662
-rw-r--r--nova/tests/unit/api/openstack/test_common.py764
-rw-r--r--nova/tests/unit/api/openstack/test_faults.py (renamed from nova/tests/api/openstack/test_faults.py)0
-rw-r--r--nova/tests/unit/api/openstack/test_mapper.py46
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py1244
-rw-r--r--nova/tests/unit/api/openstack/test_xmlutil.py948
-rw-r--r--nova/tests/unit/api/test_auth.py (renamed from nova/tests/api/test_auth.py)0
-rw-r--r--nova/tests/unit/api/test_compute_req_id.py (renamed from nova/tests/api/test_compute_req_id.py)0
-rw-r--r--nova/tests/unit/api/test_validator.py (renamed from nova/tests/api/test_validator.py)0
-rw-r--r--nova/tests/unit/api/test_wsgi.py (renamed from nova/tests/api/test_wsgi.py)0
-rw-r--r--nova/tests/unit/bundle/1mb.manifest.xml (renamed from nova/tests/bundle/1mb.manifest.xml)0
-rw-r--r--nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml (renamed from nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml)0
-rw-r--r--nova/tests/unit/bundle/1mb.part.0 (renamed from nova/tests/bundle/1mb.part.0)bin1024 -> 1024 bytes
-rw-r--r--nova/tests/unit/bundle/1mb.part.1 (renamed from nova/tests/bundle/1mb.part.1)0
-rw-r--r--nova/tests/unit/cast_as_call.py (renamed from nova/tests/cast_as_call.py)0
-rw-r--r--nova/tests/unit/cells/__init__.py (renamed from nova/tests/cells/__init__.py)0
-rw-r--r--nova/tests/unit/cells/fakes.py207
-rw-r--r--nova/tests/unit/cells/test_cells_filters.py173
-rw-r--r--nova/tests/unit/cells/test_cells_manager.py808
-rw-r--r--nova/tests/unit/cells/test_cells_messaging.py2129
-rw-r--r--nova/tests/unit/cells/test_cells_rpc_driver.py207
-rw-r--r--nova/tests/unit/cells/test_cells_rpcapi.py760
-rw-r--r--nova/tests/unit/cells/test_cells_scheduler.py530
-rw-r--r--nova/tests/unit/cells/test_cells_state_manager.py (renamed from nova/tests/cells/test_cells_state_manager.py)0
-rw-r--r--nova/tests/unit/cells/test_cells_utils.py (renamed from nova/tests/cells/test_cells_utils.py)0
-rw-r--r--nova/tests/unit/cells/test_cells_weights.py (renamed from nova/tests/cells/test_cells_weights.py)0
-rw-r--r--nova/tests/unit/cert/__init__.py (renamed from nova/tests/cert/__init__.py)0
-rw-r--r--nova/tests/unit/cert/test_rpcapi.py (renamed from nova/tests/cert/test_rpcapi.py)0
-rw-r--r--nova/tests/unit/cmd/__init__.py (renamed from nova/tests/cmd/__init__.py)0
-rw-r--r--nova/tests/unit/cmd/test_idmapshift.py (renamed from nova/tests/cmd/test_idmapshift.py)0
-rw-r--r--nova/tests/unit/compute/__init__.py (renamed from nova/tests/compute/__init__.py)0
-rw-r--r--nova/tests/unit/compute/eventlet_utils.py (renamed from nova/tests/compute/eventlet_utils.py)0
-rw-r--r--nova/tests/unit/compute/fake_resource_tracker.py (renamed from nova/tests/compute/fake_resource_tracker.py)0
-rw-r--r--nova/tests/unit/compute/monitors/__init__.py (renamed from nova/tests/compute/monitors/__init__.py)0
-rw-r--r--nova/tests/unit/compute/monitors/test_cpu_monitor.py (renamed from nova/tests/compute/monitors/test_cpu_monitor.py)0
-rw-r--r--nova/tests/unit/compute/monitors/test_monitors.py144
-rw-r--r--nova/tests/unit/compute/test_arch.py (renamed from nova/tests/compute/test_arch.py)0
-rw-r--r--nova/tests/unit/compute/test_claims.py320
-rw-r--r--nova/tests/unit/compute/test_compute.py11415
-rw-r--r--nova/tests/unit/compute/test_compute_api.py2635
-rw-r--r--nova/tests/unit/compute/test_compute_cells.py332
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py3053
-rw-r--r--nova/tests/unit/compute/test_compute_utils.py827
-rw-r--r--nova/tests/unit/compute/test_compute_xen.py67
-rw-r--r--nova/tests/unit/compute/test_flavors.py (renamed from nova/tests/compute/test_flavors.py)0
-rw-r--r--nova/tests/unit/compute/test_host_api.py480
-rw-r--r--nova/tests/unit/compute/test_hvtype.py (renamed from nova/tests/compute/test_hvtype.py)0
-rw-r--r--nova/tests/unit/compute/test_keypairs.py221
-rw-r--r--nova/tests/unit/compute/test_multiple_nodes.py (renamed from nova/tests/compute/test_multiple_nodes.py)0
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py1539
-rw-r--r--nova/tests/unit/compute/test_resources.py344
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py486
-rw-r--r--nova/tests/unit/compute/test_shelve.py414
-rw-r--r--nova/tests/unit/compute/test_stats.py (renamed from nova/tests/compute/test_stats.py)0
-rw-r--r--nova/tests/unit/compute/test_tracker.py (renamed from nova/tests/compute/test_tracker.py)0
-rw-r--r--nova/tests/unit/compute/test_virtapi.py (renamed from nova/tests/compute/test_virtapi.py)0
-rw-r--r--nova/tests/unit/compute/test_vmmode.py (renamed from nova/tests/compute/test_vmmode.py)0
-rw-r--r--nova/tests/unit/conductor/__init__.py (renamed from nova/tests/conductor/__init__.py)0
-rw-r--r--nova/tests/unit/conductor/tasks/__init__.py (renamed from nova/tests/conductor/tasks/__init__.py)0
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py384
-rw-r--r--nova/tests/unit/conductor/test_conductor.py2151
-rw-r--r--nova/tests/unit/conf_fixture.py64
-rw-r--r--nova/tests/unit/console/__init__.py (renamed from nova/tests/console/__init__.py)0
-rw-r--r--nova/tests/unit/console/test_console.py (renamed from nova/tests/console/test_console.py)0
-rw-r--r--nova/tests/unit/console/test_rpcapi.py (renamed from nova/tests/console/test_rpcapi.py)0
-rw-r--r--nova/tests/unit/console/test_serial.py (renamed from nova/tests/console/test_serial.py)0
-rw-r--r--nova/tests/unit/console/test_type.py (renamed from nova/tests/console/test_type.py)0
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py (renamed from nova/tests/console/test_websocketproxy.py)0
-rw-r--r--nova/tests/unit/consoleauth/__init__.py (renamed from nova/tests/consoleauth/__init__.py)0
-rw-r--r--nova/tests/unit/consoleauth/test_consoleauth.py (renamed from nova/tests/consoleauth/test_consoleauth.py)0
-rw-r--r--nova/tests/unit/consoleauth/test_rpcapi.py (renamed from nova/tests/consoleauth/test_rpcapi.py)0
-rw-r--r--nova/tests/unit/db/__init__.py (renamed from nova/tests/db/__init__.py)0
-rw-r--r--nova/tests/unit/db/fakes.py (renamed from nova/tests/db/fakes.py)0
-rw-r--r--nova/tests/unit/db/test_db_api.py7517
-rw-r--r--nova/tests/unit/db/test_migration_utils.py256
-rw-r--r--nova/tests/unit/db/test_migrations.conf (renamed from nova/tests/db/test_migrations.conf)0
-rw-r--r--nova/tests/unit/db/test_migrations.py (renamed from nova/tests/db/test_migrations.py)0
-rw-r--r--nova/tests/unit/db/test_sqlite.py (renamed from nova/tests/db/test_sqlite.py)0
-rw-r--r--nova/tests/unit/fake_block_device.py (renamed from nova/tests/fake_block_device.py)0
-rw-r--r--nova/tests/unit/fake_crypto.py (renamed from nova/tests/fake_crypto.py)0
-rw-r--r--nova/tests/unit/fake_hosts.py37
-rw-r--r--nova/tests/unit/fake_instance.py (renamed from nova/tests/fake_instance.py)0
-rw-r--r--nova/tests/unit/fake_ldap.py (renamed from nova/tests/fake_ldap.py)0
-rw-r--r--nova/tests/unit/fake_loadables/__init__.py (renamed from nova/tests/fake_loadables/__init__.py)0
-rw-r--r--nova/tests/unit/fake_loadables/fake_loadable1.py44
-rw-r--r--nova/tests/unit/fake_loadables/fake_loadable2.py39
-rw-r--r--nova/tests/unit/fake_network.py457
-rw-r--r--nova/tests/unit/fake_network_cache_model.py (renamed from nova/tests/fake_network_cache_model.py)0
-rw-r--r--nova/tests/unit/fake_notifier.py (renamed from nova/tests/fake_notifier.py)0
-rw-r--r--nova/tests/unit/fake_policy.py (renamed from nova/tests/fake_policy.py)0
-rw-r--r--nova/tests/unit/fake_processutils.py (renamed from nova/tests/fake_processutils.py)0
-rw-r--r--nova/tests/unit/fake_server_actions.py (renamed from nova/tests/fake_server_actions.py)0
-rw-r--r--nova/tests/unit/fake_utils.py (renamed from nova/tests/fake_utils.py)0
-rw-r--r--nova/tests/unit/fake_volume.py (renamed from nova/tests/fake_volume.py)0
-rw-r--r--nova/tests/unit/functional/__init__.py (renamed from nova/tests/functional/__init__.py)0
-rw-r--r--nova/tests/unit/image/__init__.py (renamed from nova/tests/image/__init__.py)0
-rw-r--r--nova/tests/unit/image/abs.tar.gz (renamed from nova/tests/image/abs.tar.gz)bin153 -> 153 bytes
-rw-r--r--nova/tests/unit/image/fake.py (renamed from nova/tests/image/fake.py)0
-rw-r--r--nova/tests/unit/image/rel.tar.gz (renamed from nova/tests/image/rel.tar.gz)bin165 -> 165 bytes
-rw-r--r--nova/tests/unit/image/test_fake.py117
-rw-r--r--nova/tests/unit/image/test_glance.py (renamed from nova/tests/image/test_glance.py)0
-rw-r--r--nova/tests/unit/image/test_s3.py267
-rw-r--r--nova/tests/unit/image/test_transfer_modules.py (renamed from nova/tests/image/test_transfer_modules.py)0
-rw-r--r--nova/tests/unit/image_fixtures.py (renamed from nova/tests/image_fixtures.py)0
-rw-r--r--nova/tests/unit/integrated/__init__.py (renamed from nova/tests/integrated/__init__.py)0
-rw-r--r--nova/tests/unit/integrated/api/__init__.py (renamed from nova/tests/integrated/api/__init__.py)0
-rw-r--r--nova/tests/unit/integrated/api/client.py304
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl (renamed from nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl (renamed from nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl (renamed from nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl (renamed from nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/README.rst29
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-createimage.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-reboot.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-resize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-resize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/flavor-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/flavor-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/flavors-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/flavors-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/image-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-get.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/image-meta-key-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl (renamed from nova/tests/integrated/api_samples/image-metadata-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/images-details-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/images-details-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/images-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/images-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/images-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/images-list-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/images-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/images-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/limit-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/limit-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/console-output-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-console-output/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl)0
-rwxr-xr-xnova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl (renamed from nova/tests/integrated/api_samples/os-migrations/migrations-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl (renamed from nova/tests/integrated/api_samples/os-migrations/migrations-get.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-rescue-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-rescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-rescue.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/event-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl (renamed from nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-enable-put-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/services-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/services-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-services/services-list-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-shelve.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-shelve.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-unshelve.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/os-unshelve.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-shelve/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl (renamed from nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl (renamed from nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-user-data/userdata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-changepassword.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-changepassword.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-confirmresize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-confirmresize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-createimage.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-createimage.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-reboot.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-reboot.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-rebuild-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-rebuild.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-rebuild.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-resize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-resize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl (renamed from nova/tests/integrated/api_samples/server-action-revertresize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl (renamed from nova/tests/integrated/api_samples/server-action-revertresize.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-ips-network-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-ips-network-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-ips-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-ips-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-all-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-all-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-all-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-all-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-metadata-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl (renamed from nova/tests/integrated/api_samples/server-post-req.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl (renamed from nova/tests/integrated/api_samples/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/server-post-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl (renamed from nova/tests/integrated/api_samples/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/servers-details-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl (renamed from nova/tests/integrated/api_samples/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/servers-list-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl (renamed from nova/tests/integrated/api_samples/versions-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl (renamed from nova/tests/integrated/api_samples/versions-get-resp.xml.tpl)0
-rw-r--r--nova/tests/unit/integrated/api_samples_test_base.py323
-rw-r--r--nova/tests/unit/integrated/integrated_helpers.py160
-rw-r--r--nova/tests/unit/integrated/test_api_samples.py4433
-rw-r--r--nova/tests/unit/integrated/test_extensions.py42
-rw-r--r--nova/tests/unit/integrated/test_login.py36
-rw-r--r--nova/tests/unit/integrated/test_servers.py522
-rw-r--r--nova/tests/unit/integrated/test_xml.py51
-rw-r--r--nova/tests/unit/integrated/v3/__init__.py (renamed from nova/tests/integrated/v3/__init__.py)0
-rw-r--r--nova/tests/unit/integrated/v3/api_sample_base.py79
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/image-size/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-meta-key-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/images-details-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/images/images-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fping/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/network-add-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/network-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-create-image.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-reboot.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-start.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-action-stop.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl (renamed from nova/tests/integrated/v3/api_samples/servers/servers-list-resp.json.tpl)0
-rw-r--r--nova/tests/unit/integrated/v3/test_access_ips.py93
-rw-r--r--nova/tests/unit/integrated/v3/test_admin_actions.py46
-rw-r--r--nova/tests/unit/integrated/v3/test_admin_password.py29
-rw-r--r--nova/tests/unit/integrated/v3/test_agents.py98
-rw-r--r--nova/tests/unit/integrated/v3/test_aggregates.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_attach_interfaces.py166
-rw-r--r--nova/tests/unit/integrated/v3/test_availability_zone.py49
-rw-r--r--nova/tests/unit/integrated/v3/test_cells.py107
-rw-r--r--nova/tests/unit/integrated/v3/test_certificates.py31
-rw-r--r--nova/tests/unit/integrated/v3/test_cloudpipe.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_config_drive.py48
-rw-r--r--nova/tests/unit/integrated/v3/test_console_auth_tokens.py51
-rw-r--r--nova/tests/unit/integrated/v3/test_console_output.py27
-rw-r--r--nova/tests/unit/integrated/v3/test_consoles.py55
-rw-r--r--nova/tests/unit/integrated/v3/test_create_backup.py38
-rw-r--r--nova/tests/unit/integrated/v3/test_deferred_delete.py42
-rw-r--r--nova/tests/unit/integrated/v3/test_disk_config.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_evacuate.py91
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_availability_zone.py34
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_server_attributes.py42
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_status.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_extended_volumes.py151
-rw-r--r--nova/tests/unit/integrated/v3/test_extension_info.py71
-rw-r--r--nova/tests/unit/integrated/v3/test_fixed_ips.py109
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_access.py89
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_extraspecs.py62
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_manage.py43
-rw-r--r--nova/tests/unit/integrated/v3/test_flavor_rxtx.py46
-rw-r--r--nova/tests/unit/integrated/v3/test_flavors.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ip_dns.py91
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ip_pools.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_floating_ips_bulk.py86
-rw-r--r--nova/tests/unit/integrated/v3/test_fping.py45
-rw-r--r--nova/tests/unit/integrated/v3/test_hide_server_addresses.py39
-rw-r--r--nova/tests/unit/integrated/v3/test_hosts.py57
-rw-r--r--nova/tests/unit/integrated/v3/test_hypervisors.py69
-rw-r--r--nova/tests/unit/integrated/v3/test_image_size.py37
-rw-r--r--nova/tests/unit/integrated/v3/test_images.py85
-rw-r--r--nova/tests/unit/integrated/v3/test_instance_actions.py84
-rw-r--r--nova/tests/unit/integrated/v3/test_keypairs.py72
-rw-r--r--nova/tests/unit/integrated/v3/test_lock_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_migrate_server.py71
-rw-r--r--nova/tests/unit/integrated/v3/test_migrations.py72
-rw-r--r--nova/tests/unit/integrated/v3/test_multinic.py49
-rw-r--r--nova/tests/unit/integrated/v3/test_multiple_create.py45
-rw-r--r--nova/tests/unit/integrated/v3/test_networks.py73
-rw-r--r--nova/tests/unit/integrated/v3/test_networks_associate.py76
-rw-r--r--nova/tests/unit/integrated/v3/test_pause_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_pci.py182
-rw-r--r--nova/tests/unit/integrated/v3/test_quota_sets.py70
-rw-r--r--nova/tests/unit/integrated/v3/test_remote_consoles.py70
-rw-r--r--nova/tests/unit/integrated/v3/test_rescue.py82
-rw-r--r--nova/tests/unit/integrated/v3/test_scheduler_hints.py32
-rw-r--r--nova/tests/unit/integrated/v3/test_security_group_default_rules.py40
-rw-r--r--nova/tests/unit/integrated/v3/test_security_groups.py166
-rw-r--r--nova/tests/unit/integrated/v3/test_server_diagnostics.py27
-rw-r--r--nova/tests/unit/integrated/v3/test_server_external_events.py40
-rw-r--r--nova/tests/unit/integrated/v3/test_server_groups.py66
-rw-r--r--nova/tests/unit/integrated/v3/test_server_metadata.py80
-rw-r--r--nova/tests/unit/integrated/v3/test_server_usage.py39
-rw-r--r--nova/tests/unit/integrated/v3/test_servers.py188
-rw-r--r--nova/tests/unit/integrated/v3/test_servers_ips.py35
-rw-r--r--nova/tests/unit/integrated/v3/test_services.py87
-rw-r--r--nova/tests/unit/integrated/v3/test_shelve.py50
-rw-r--r--nova/tests/unit/integrated/v3/test_simple_tenant_usage.py61
-rw-r--r--nova/tests/unit/integrated/v3/test_suspend_server.py41
-rw-r--r--nova/tests/unit/integrated/v3/test_tenant_networks.py61
-rw-r--r--nova/tests/unit/integrated/v3/test_used_limits.py34
-rw-r--r--nova/tests/unit/integrated/v3/test_user_data.py36
-rw-r--r--nova/tests/unit/integrated/v3/test_volumes.py184
-rw-r--r--nova/tests/unit/keymgr/__init__.py (renamed from nova/tests/keymgr/__init__.py)0
-rw-r--r--nova/tests/unit/keymgr/fake.py (renamed from nova/tests/keymgr/fake.py)0
-rw-r--r--nova/tests/unit/keymgr/test_conf_key_mgr.py59
-rw-r--r--nova/tests/unit/keymgr/test_key.py (renamed from nova/tests/keymgr/test_key.py)0
-rw-r--r--nova/tests/unit/keymgr/test_key_mgr.py (renamed from nova/tests/keymgr/test_key_mgr.py)0
-rw-r--r--nova/tests/unit/keymgr/test_mock_key_mgr.py102
-rw-r--r--nova/tests/unit/keymgr/test_not_implemented_key_mgr.py47
-rw-r--r--nova/tests/unit/keymgr/test_single_key_mgr.py72
-rw-r--r--nova/tests/unit/matchers.py (renamed from nova/tests/matchers.py)0
-rw-r--r--nova/tests/unit/monkey_patch_example/__init__.py (renamed from nova/tests/monkey_patch_example/__init__.py)0
-rw-r--r--nova/tests/unit/monkey_patch_example/example_a.py (renamed from nova/tests/monkey_patch_example/example_a.py)0
-rw-r--r--nova/tests/unit/monkey_patch_example/example_b.py (renamed from nova/tests/monkey_patch_example/example_b.py)0
-rw-r--r--nova/tests/unit/network/__init__.py (renamed from nova/tests/network/__init__.py)0
-rw-r--r--nova/tests/unit/network/security_group/__init__.py (renamed from nova/tests/network/security_group/__init__.py)0
-rw-r--r--nova/tests/unit/network/security_group/test_neutron_driver.py (renamed from nova/tests/network/security_group/test_neutron_driver.py)0
-rw-r--r--nova/tests/unit/network/test_api.py589
-rw-r--r--nova/tests/unit/network/test_linux_net.py (renamed from nova/tests/network/test_linux_net.py)0
-rw-r--r--nova/tests/unit/network/test_manager.py3358
-rw-r--r--nova/tests/unit/network/test_network_info.py800
-rw-r--r--nova/tests/unit/network/test_neutronv2.py3194
-rw-r--r--nova/tests/unit/network/test_rpcapi.py353
-rw-r--r--nova/tests/unit/objects/__init__.py (renamed from nova/tests/objects/__init__.py)0
-rw-r--r--nova/tests/unit/objects/test_agent.py103
-rw-r--r--nova/tests/unit/objects/test_aggregate.py199
-rw-r--r--nova/tests/unit/objects/test_bandwidth_usage.py124
-rw-r--r--nova/tests/unit/objects/test_block_device.py333
-rw-r--r--nova/tests/unit/objects/test_compute_node.py240
-rw-r--r--nova/tests/unit/objects/test_dns_domain.py85
-rw-r--r--nova/tests/unit/objects/test_ec2.py192
-rw-r--r--nova/tests/unit/objects/test_external_event.py46
-rw-r--r--nova/tests/unit/objects/test_fields.py (renamed from nova/tests/objects/test_fields.py)0
-rw-r--r--nova/tests/unit/objects/test_fixed_ip.py339
-rw-r--r--nova/tests/unit/objects/test_flavor.py253
-rw-r--r--nova/tests/unit/objects/test_floating_ip.py259
-rw-r--r--nova/tests/unit/objects/test_hv_spec.py58
-rw-r--r--nova/tests/unit/objects/test_instance.py1196
-rw-r--r--nova/tests/unit/objects/test_instance_action.py365
-rw-r--r--nova/tests/unit/objects/test_instance_fault.py126
-rw-r--r--nova/tests/unit/objects/test_instance_group.py350
-rw-r--r--nova/tests/unit/objects/test_instance_info_cache.py117
-rw-r--r--nova/tests/unit/objects/test_instance_numa_topology.py78
-rw-r--r--nova/tests/unit/objects/test_instance_pci_requests.py191
-rw-r--r--nova/tests/unit/objects/test_keypair.py109
-rw-r--r--nova/tests/unit/objects/test_migration.py184
-rw-r--r--nova/tests/unit/objects/test_network.py232
-rw-r--r--nova/tests/unit/objects/test_network_request.py102
-rw-r--r--nova/tests/unit/objects/test_objects.py1126
-rw-r--r--nova/tests/unit/objects/test_pci_device.py254
-rw-r--r--nova/tests/unit/objects/test_quotas.py167
-rw-r--r--nova/tests/unit/objects/test_security_group.py175
-rw-r--r--nova/tests/unit/objects/test_security_group_rule.py95
-rw-r--r--nova/tests/unit/objects/test_service.py226
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py126
-rw-r--r--nova/tests/unit/pci/__init__.py (renamed from nova/tests/pci/__init__.py)0
-rw-r--r--nova/tests/unit/pci/fakes.py (renamed from nova/tests/pci/fakes.py)0
-rw-r--r--nova/tests/unit/pci/test_device.py (renamed from nova/tests/pci/test_device.py)0
-rw-r--r--nova/tests/unit/pci/test_devspec.py (renamed from nova/tests/pci/test_devspec.py)0
-rw-r--r--nova/tests/unit/pci/test_manager.py364
-rw-r--r--nova/tests/unit/pci/test_request.py (renamed from nova/tests/pci/test_request.py)0
-rw-r--r--nova/tests/unit/pci/test_stats.py267
-rw-r--r--nova/tests/unit/pci/test_utils.py (renamed from nova/tests/pci/test_utils.py)0
-rw-r--r--nova/tests/unit/pci/test_whitelist.py (renamed from nova/tests/pci/test_whitelist.py)0
-rw-r--r--nova/tests/unit/policy_fixture.py73
-rw-r--r--nova/tests/unit/scheduler/__init__.py (renamed from nova/tests/scheduler/__init__.py)0
-rw-r--r--nova/tests/unit/scheduler/fakes.py (renamed from nova/tests/scheduler/fakes.py)0
-rw-r--r--nova/tests/unit/scheduler/filters/__init__.py (renamed from nova/tests/scheduler/filters/__init__.py)0
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py258
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py98
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py72
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py53
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py48
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py99
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py50
-rw-r--r--nova/tests/unit/scheduler/filters/test_core_filters.py87
-rw-r--r--nova/tests/unit/scheduler/filters/test_disk_filters.py100
-rw-r--r--nova/tests/unit/scheduler/filters/test_extra_specs_ops.py (renamed from nova/tests/scheduler/filters/test_extra_specs_ops.py)0
-rw-r--r--nova/tests/unit/scheduler/filters/test_image_props_filters.py189
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py90
-rw-r--r--nova/tests/unit/scheduler/filters/test_json_filters.py289
-rw-r--r--nova/tests/unit/scheduler/filters/test_metrics_filters.py34
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py63
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py151
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py67
-rw-r--r--nova/tests/unit/scheduler/filters/test_ram_filters.py89
-rw-r--r--nova/tests/unit/scheduler/filters/test_retry_filters.py46
-rw-r--r--nova/tests/unit/scheduler/filters/test_trusted_filters.py203
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py56
-rw-r--r--nova/tests/unit/scheduler/ironic_fakes.py (renamed from nova/tests/scheduler/ironic_fakes.py)0
-rw-r--r--nova/tests/unit/scheduler/test_baremetal_host_manager.py (renamed from nova/tests/scheduler/test_baremetal_host_manager.py)0
-rw-r--r--nova/tests/unit/scheduler/test_caching_scheduler.py199
-rw-r--r--nova/tests/unit/scheduler/test_chance_scheduler.py182
-rw-r--r--nova/tests/unit/scheduler/test_client.py (renamed from nova/tests/scheduler/test_client.py)0
-rw-r--r--nova/tests/unit/scheduler/test_filter_scheduler.py596
-rw-r--r--nova/tests/unit/scheduler/test_filters.py (renamed from nova/tests/scheduler/test_filters.py)0
-rw-r--r--nova/tests/unit/scheduler/test_filters_utils.py (renamed from nova/tests/scheduler/test_filters_utils.py)0
-rw-r--r--nova/tests/unit/scheduler/test_host_filters.py38
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py545
-rw-r--r--nova/tests/unit/scheduler/test_ironic_host_manager.py430
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py (renamed from nova/tests/scheduler/test_rpcapi.py)0
-rw-r--r--nova/tests/unit/scheduler/test_scheduler.py378
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_options.py (renamed from nova/tests/scheduler/test_scheduler_options.py)0
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_utils.py314
-rw-r--r--nova/tests/unit/scheduler/test_weights.py338
-rw-r--r--nova/tests/unit/servicegroup/__init__.py (renamed from nova/tests/servicegroup/__init__.py)0
-rw-r--r--nova/tests/unit/servicegroup/test_db_servicegroup.py144
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py213
-rw-r--r--nova/tests/unit/servicegroup/test_zk_driver.py65
-rw-r--r--nova/tests/unit/ssl_cert/ca.crt (renamed from nova/tests/ssl_cert/ca.crt)0
-rw-r--r--nova/tests/unit/ssl_cert/certificate.crt (renamed from nova/tests/ssl_cert/certificate.crt)0
-rw-r--r--nova/tests/unit/ssl_cert/privatekey.key (renamed from nova/tests/ssl_cert/privatekey.key)0
-rw-r--r--nova/tests/unit/test_api_validation.py (renamed from nova/tests/test_api_validation.py)0
-rw-r--r--nova/tests/unit/test_availability_zones.py255
-rw-r--r--nova/tests/unit/test_baserpc.py (renamed from nova/tests/test_baserpc.py)0
-rw-r--r--nova/tests/unit/test_bdm.py248
-rw-r--r--nova/tests/unit/test_block_device.py604
-rw-r--r--nova/tests/unit/test_cinder.py (renamed from nova/tests/test_cinder.py)0
-rw-r--r--nova/tests/unit/test_configdrive2.py104
-rw-r--r--nova/tests/unit/test_context.py (renamed from nova/tests/test_context.py)0
-rw-r--r--nova/tests/unit/test_crypto.py (renamed from nova/tests/test_crypto.py)0
-rw-r--r--nova/tests/unit/test_exception.py (renamed from nova/tests/test_exception.py)0
-rw-r--r--nova/tests/unit/test_flavors.py (renamed from nova/tests/test_flavors.py)0
-rw-r--r--nova/tests/unit/test_hacking.py (renamed from nova/tests/test_hacking.py)0
-rw-r--r--nova/tests/unit/test_hooks.py (renamed from nova/tests/test_hooks.py)0
-rw-r--r--nova/tests/unit/test_instance_types_extra_specs.py (renamed from nova/tests/test_instance_types_extra_specs.py)0
-rw-r--r--nova/tests/unit/test_iptables_network.py (renamed from nova/tests/test_iptables_network.py)0
-rw-r--r--nova/tests/unit/test_ipv6.py (renamed from nova/tests/test_ipv6.py)0
-rw-r--r--nova/tests/unit/test_linuxscsi.py (renamed from nova/tests/test_linuxscsi.py)0
-rw-r--r--nova/tests/unit/test_loadables.py113
-rw-r--r--nova/tests/unit/test_matchers.py349
-rw-r--r--nova/tests/unit/test_metadata.py865
-rw-r--r--nova/tests/unit/test_notifications.py394
-rw-r--r--nova/tests/unit/test_nova_manage.py467
-rw-r--r--nova/tests/unit/test_objectstore.py (renamed from nova/tests/test_objectstore.py)0
-rw-r--r--nova/tests/unit/test_pipelib.py (renamed from nova/tests/test_pipelib.py)0
-rw-r--r--nova/tests/unit/test_policy.py231
-rw-r--r--nova/tests/unit/test_quota.py2765
-rw-r--r--nova/tests/unit/test_safeutils.py (renamed from nova/tests/test_safeutils.py)0
-rw-r--r--nova/tests/unit/test_service.py370
-rw-r--r--nova/tests/unit/test_test.py (renamed from nova/tests/test_test.py)0
-rw-r--r--nova/tests/unit/test_test_utils.py70
-rw-r--r--nova/tests/unit/test_utils.py981
-rw-r--r--nova/tests/unit/test_versions.py (renamed from nova/tests/test_versions.py)0
-rw-r--r--nova/tests/unit/test_weights.py (renamed from nova/tests/test_weights.py)0
-rw-r--r--nova/tests/unit/test_wsgi.py263
-rw-r--r--nova/tests/unit/utils.py (renamed from nova/tests/utils.py)0
-rw-r--r--nova/tests/unit/virt/__init__.py (renamed from nova/tests/virt/__init__.py)0
-rw-r--r--nova/tests/unit/virt/disk/__init__.py (renamed from nova/tests/virt/disk/__init__.py)0
-rw-r--r--nova/tests/unit/virt/disk/mount/__init__.py (renamed from nova/tests/virt/disk/mount/__init__.py)0
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py (renamed from nova/tests/virt/disk/mount/test_loop.py)0
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py (renamed from nova/tests/virt/disk/mount/test_nbd.py)0
-rw-r--r--nova/tests/unit/virt/disk/test_api.py (renamed from nova/tests/virt/disk/test_api.py)0
-rw-r--r--nova/tests/unit/virt/disk/test_inject.py284
-rw-r--r--nova/tests/unit/virt/disk/vfs/__init__.py (renamed from nova/tests/virt/disk/vfs/__init__.py)0
-rw-r--r--nova/tests/unit/virt/disk/vfs/fakeguestfs.py (renamed from nova/tests/virt/disk/vfs/fakeguestfs.py)0
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py264
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_localfs.py385
-rw-r--r--nova/tests/unit/virt/hyperv/__init__.py (renamed from nova/tests/virt/hyperv/__init__.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/db_fakes.py (renamed from nova/tests/virt/hyperv/db_fakes.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/fake.py (renamed from nova/tests/virt/hyperv/fake.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_basevolumeutils.py (renamed from nova/tests/virt/hyperv/test_basevolumeutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostutils.py (renamed from nova/tests/virt/hyperv/test_hostutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_hypervapi.py1967
-rw-r--r--nova/tests/unit/virt/hyperv/test_ioutils.py (renamed from nova/tests/virt/hyperv/test_ioutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py79
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutils.py (renamed from nova/tests/virt/hyperv/test_networkutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_networkutilsv2.py45
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py (renamed from nova/tests/virt/hyperv/test_pathutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py (renamed from nova/tests/virt/hyperv/test_rdpconsoleutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py (renamed from nova/tests/virt/hyperv/test_rdpconsoleutilsv2.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_utilsfactory.py (renamed from nova/tests/virt/hyperv/test_utilsfactory.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutils.py (renamed from nova/tests/virt/hyperv/test_vhdutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_vhdutilsv2.py (renamed from nova/tests/virt/hyperv/test_vhdutilsv2.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py230
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutils.py (renamed from nova/tests/virt/hyperv/test_vmutils.py)0
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmutilsv2.py197
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutils.py151
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeutilsv2.py (renamed from nova/tests/virt/hyperv/test_volumeutilsv2.py)0
-rw-r--r--nova/tests/unit/virt/ironic/__init__.py (renamed from nova/tests/virt/ironic/__init__.py)0
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py126
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py1268
-rw-r--r--nova/tests/unit/virt/ironic/test_patcher.py139
-rw-r--r--nova/tests/unit/virt/ironic/utils.py (renamed from nova/tests/virt/ironic/utils.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/__init__.py (renamed from nova/tests/virt/libvirt/__init__.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/fake_imagebackend.py (renamed from nova/tests/virt/libvirt/fake_imagebackend.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/fake_libvirt_utils.py (renamed from nova/tests/virt/libvirt/fake_libvirt_utils.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py (renamed from nova/tests/virt/libvirt/fakelibvirt.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py991
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py2344
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py (renamed from nova/tests/virt/libvirt/test_designer.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_dmcrypt.py (renamed from nova/tests/virt/libvirt/test_dmcrypt.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py12576
-rw-r--r--nova/tests/unit/virt/libvirt/test_fakelibvirt.py386
-rw-r--r--nova/tests/unit/virt/libvirt/test_firewall.py749
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py1309
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py887
-rw-r--r--nova/tests/unit/virt/libvirt/test_lvm.py (renamed from nova/tests/virt/libvirt/test_lvm.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_rbd.py (renamed from nova/tests/virt/libvirt/test_rbd.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py (renamed from nova/tests/virt/libvirt/test_utils.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py959
-rw-r--r--nova/tests/unit/virt/libvirt/test_volume.py1160
-rw-r--r--nova/tests/unit/virt/test_block_device.py684
-rw-r--r--nova/tests/unit/virt/test_configdrive.py (renamed from nova/tests/virt/test_configdrive.py)0
-rw-r--r--nova/tests/unit/virt/test_diagnostics.py (renamed from nova/tests/virt/test_diagnostics.py)0
-rw-r--r--nova/tests/unit/virt/test_driver.py58
-rw-r--r--nova/tests/unit/virt/test_events.py (renamed from nova/tests/virt/test_events.py)0
-rw-r--r--nova/tests/unit/virt/test_hardware.py1439
-rw-r--r--nova/tests/unit/virt/test_imagecache.py122
-rw-r--r--nova/tests/unit/virt/test_images.py (renamed from nova/tests/virt/test_images.py)0
-rw-r--r--nova/tests/unit/virt/test_virt.py (renamed from nova/tests/virt/test_virt.py)0
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py881
-rw-r--r--nova/tests/unit/virt/test_volumeutils.py (renamed from nova/tests/virt/test_volumeutils.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/__init__.py (renamed from nova/tests/virt/vmwareapi/__init__.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py (renamed from nova/tests/virt/vmwareapi/fake.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py131
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py168
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py2650
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py548
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py (renamed from nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py277
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py216
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_io_util.py (renamed from nova/tests/virt/vmwareapi/test_io_util.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_read_write_util.py (renamed from nova/tests/virt/vmwareapi/test_read_write_util.py)0
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py346
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vim_util.py117
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py1069
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py1293
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py95
-rw-r--r--nova/tests/unit/virt/xenapi/__init__.py (renamed from nova/tests/virt/xenapi/__init__.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/client/__init__.py (renamed from nova/tests/virt/xenapi/client/__init__.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_objects.py113
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_session.py158
-rw-r--r--nova/tests/unit/virt/xenapi/image/__init__.py (renamed from nova/tests/virt/xenapi/image/__init__.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_bittorrent.py163
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_glance.py256
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_utils.py (renamed from nova/tests/virt/xenapi/image/test_utils.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py (renamed from nova/tests/virt/xenapi/image/test_vdi_through_dev.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/stubs.py365
-rw-r--r--nova/tests/unit/virt/xenapi/test_agent.py (renamed from nova/tests/virt/xenapi/test_agent.py)0
-rw-r--r--nova/tests/unit/virt/xenapi/test_driver.py101
-rw-r--r--nova/tests/unit/virt/xenapi/test_network_utils.py76
-rw-r--r--nova/tests/unit/virt/xenapi/test_vm_utils.py2422
-rw-r--r--nova/tests/unit/virt/xenapi/test_vmops.py1124
-rw-r--r--nova/tests/unit/virt/xenapi/test_volume_utils.py232
-rw-r--r--nova/tests/unit/virt/xenapi/test_volumeops.py549
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py4105
-rw-r--r--nova/tests/unit/virt/xenapi/vm_rrd.xml (renamed from nova/tests/virt/xenapi/vm_rrd.xml)0
-rw-r--r--nova/tests/unit/volume/__init__.py (renamed from nova/tests/volume/__init__.py)0
-rw-r--r--nova/tests/unit/volume/encryptors/__init__.py (renamed from nova/tests/volume/encryptors/__init__.py)0
-rw-r--r--nova/tests/unit/volume/encryptors/test_base.py54
-rw-r--r--nova/tests/unit/volume/encryptors/test_cryptsetup.py83
-rw-r--r--nova/tests/unit/volume/encryptors/test_luks.py71
-rw-r--r--nova/tests/unit/volume/encryptors/test_nop.py28
-rw-r--r--nova/tests/unit/volume/test_cinder.py (renamed from nova/tests/volume/test_cinder.py)0
-rw-r--r--nova/tests/virt/disk/test_inject.py284
-rw-r--r--nova/tests/virt/disk/vfs/test_guestfs.py264
-rw-r--r--nova/tests/virt/disk/vfs/test_localfs.py385
-rw-r--r--nova/tests/virt/hyperv/test_hypervapi.py1967
-rw-r--r--nova/tests/virt/hyperv/test_migrationops.py79
-rw-r--r--nova/tests/virt/hyperv/test_networkutilsv2.py45
-rw-r--r--nova/tests/virt/hyperv/test_vmops.py230
-rw-r--r--nova/tests/virt/hyperv/test_vmutilsv2.py197
-rw-r--r--nova/tests/virt/hyperv/test_volumeutils.py151
-rw-r--r--nova/tests/virt/ironic/test_client_wrapper.py126
-rw-r--r--nova/tests/virt/ironic/test_driver.py1268
-rw-r--r--nova/tests/virt/ironic/test_patcher.py139
-rw-r--r--nova/tests/virt/libvirt/test_blockinfo.py991
-rw-r--r--nova/tests/virt/libvirt/test_config.py2344
-rw-r--r--nova/tests/virt/libvirt/test_driver.py12576
-rw-r--r--nova/tests/virt/libvirt/test_fakelibvirt.py386
-rw-r--r--nova/tests/virt/libvirt/test_firewall.py749
-rw-r--r--nova/tests/virt/libvirt/test_imagebackend.py1309
-rw-r--r--nova/tests/virt/libvirt/test_imagecache.py887
-rw-r--r--nova/tests/virt/libvirt/test_vif.py959
-rw-r--r--nova/tests/virt/libvirt/test_volume.py1160
-rw-r--r--nova/tests/virt/test_block_device.py684
-rw-r--r--nova/tests/virt/test_driver.py58
-rw-r--r--nova/tests/virt/test_hardware.py1439
-rw-r--r--nova/tests/virt/test_imagecache.py122
-rw-r--r--nova/tests/virt/test_virt_drivers.py879
-rw-r--r--nova/tests/virt/vmwareapi/stubs.py131
-rw-r--r--nova/tests/virt/vmwareapi/test_configdrive.py168
-rw-r--r--nova/tests/virt/vmwareapi/test_driver_api.py2650
-rw-r--r--nova/tests/virt/vmwareapi/test_ds_util.py548
-rw-r--r--nova/tests/virt/vmwareapi/test_imagecache.py277
-rw-r--r--nova/tests/virt/vmwareapi/test_images.py216
-rw-r--r--nova/tests/virt/vmwareapi/test_vif.py346
-rw-r--r--nova/tests/virt/vmwareapi/test_vim_util.py117
-rw-r--r--nova/tests/virt/vmwareapi/test_vm_util.py1069
-rw-r--r--nova/tests/virt/vmwareapi/test_vmops.py1293
-rw-r--r--nova/tests/virt/vmwareapi/test_volumeops.py95
-rw-r--r--nova/tests/virt/xenapi/client/test_objects.py113
-rw-r--r--nova/tests/virt/xenapi/client/test_session.py158
-rw-r--r--nova/tests/virt/xenapi/image/test_bittorrent.py163
-rw-r--r--nova/tests/virt/xenapi/image/test_glance.py256
-rw-r--r--nova/tests/virt/xenapi/stubs.py365
-rw-r--r--nova/tests/virt/xenapi/test_driver.py101
-rw-r--r--nova/tests/virt/xenapi/test_network_utils.py76
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py2422
-rw-r--r--nova/tests/virt/xenapi/test_vmops.py1124
-rw-r--r--nova/tests/virt/xenapi/test_volume_utils.py232
-rw-r--r--nova/tests/virt/xenapi/test_volumeops.py549
-rw-r--r--nova/tests/virt/xenapi/test_xenapi.py4104
-rw-r--r--nova/tests/volume/encryptors/test_base.py54
-rw-r--r--nova/tests/volume/encryptors/test_cryptsetup.py83
-rw-r--r--nova/tests/volume/encryptors/test_luks.py71
-rw-r--r--nova/tests/volume/encryptors/test_nop.py28
2196 files changed, 179685 insertions, 179679 deletions
diff --git a/nova/test.py b/nova/test.py
index ef6d5b7373..45e7015715 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -54,8 +54,8 @@ from nova.openstack.common import log as nova_logging
from nova import paths
from nova import rpc
from nova import service
-from nova.tests import conf_fixture
-from nova.tests import policy_fixture
+from nova.tests.unit import conf_fixture
+from nova.tests.unit import policy_fixture
from nova import utils
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index a40a666484..e69de29bb2 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -1,49 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-:mod:`nova.tests` -- Nova Unittests
-=====================================================
-
-.. automodule:: nova.tests
- :platform: Unix
-"""
-
-# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
-import os
-import sys
-import traceback
-
-
-# NOTE(mikal): All of this is because if dnspython is present in your
-# environment then eventlet monkeypatches socket.getaddrinfo() with an
-# implementation which doesn't work for IPv6. What we're checking here is
-# that the magic environment variable was set when the import happened.
-# NOTE(dims): Prevent this code from kicking in under docs generation
-# as it leads to spurious errors/warning.
-stack = traceback.extract_stack()
-if ('eventlet' in sys.modules and
- os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and
- (len(stack) < 2 or 'sphinx' not in stack[-2][0])):
- raise ImportError('eventlet imported before nova/cmd/__init__ '
- '(env var set to %s)'
- % os.environ.get('EVENTLET_NO_GREENDNS'))
-
-os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
-
-import eventlet
-
-eventlet.monkey_patch(os=False)
diff --git a/nova/tests/api/ec2/test_api.py b/nova/tests/api/ec2/test_api.py
deleted file mode 100644
index 4ceb71c53c..0000000000
--- a/nova/tests/api/ec2/test_api.py
+++ /dev/null
@@ -1,635 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for the API endpoint."""
-
-import random
-import re
-import StringIO
-
-import boto
-import boto.connection
-from boto.ec2 import regioninfo
-from boto import exception as boto_exc
-# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
-if hasattr(boto.connection, 'HTTPResponse'):
- httplib = boto.connection
-else:
- import httplib
-import fixtures
-import webob
-
-from nova.api import auth
-from nova.api import ec2
-from nova.api.ec2 import ec2utils
-from nova import block_device
-from nova import context
-from nova import exception
-from nova.openstack.common import versionutils
-from nova import test
-from nova.tests import matchers
-
-
-class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial."""
- def __init__(self, response_string):
- self.response_string = response_string
- self._buffer = StringIO.StringIO(response_string)
-
- def makefile(self, _mode, _other):
- """Returns the socket's internal buffer."""
- return self._buffer
-
-
-class FakeHttplibConnection(object):
- """A fake httplib.HTTPConnection for boto to use
-
- requests made via this connection actually get translated and routed into
- our WSGI app, we then wait for the response and turn it back into
- the HTTPResponse that boto expects.
- """
- def __init__(self, app, host, is_secure=False):
- self.app = app
- self.host = host
-
- def request(self, method, path, data, headers):
- req = webob.Request.blank(path)
- req.method = method
- req.body = data
- req.headers = headers
- req.headers['Accept'] = 'text/html'
- req.host = self.host
- # Call the WSGI app, get the HTTP response
- resp = str(req.get_response(self.app))
- # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
- # guess that's a function the web server usually provides.
- resp = "HTTP/1.0 %s" % resp
- self.sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(self.sock)
- # NOTE(vish): boto is accessing private variables for some reason
- self._HTTPConnection__response = self.http_response
- self.http_response.begin()
-
- def getresponse(self):
- return self.http_response
-
- def getresponsebody(self):
- return self.sock.response_string
-
- def close(self):
- """Required for compatibility with boto/tornado."""
- pass
-
-
-class XmlConversionTestCase(test.NoDBTestCase):
- """Unit test api xml conversion."""
- def test_number_conversion(self):
- conv = ec2utils._try_convert
- self.assertIsNone(conv('None'))
- self.assertEqual(conv('True'), True)
- self.assertEqual(conv('TRUE'), True)
- self.assertEqual(conv('true'), True)
- self.assertEqual(conv('False'), False)
- self.assertEqual(conv('FALSE'), False)
- self.assertEqual(conv('false'), False)
- self.assertEqual(conv('0'), 0)
- self.assertEqual(conv('42'), 42)
- self.assertEqual(conv('3.14'), 3.14)
- self.assertEqual(conv('-57.12'), -57.12)
- self.assertEqual(conv('0x57'), 0x57)
- self.assertEqual(conv('-0x57'), -0x57)
- self.assertEqual(conv('-'), '-')
- self.assertEqual(conv('-0'), 0)
- self.assertEqual(conv('0.0'), 0.0)
- self.assertEqual(conv('1e-8'), 0.0)
- self.assertEqual(conv('-1e-8'), 0.0)
- self.assertEqual(conv('0xDD8G'), '0xDD8G')
- self.assertEqual(conv('0XDD8G'), '0XDD8G')
- self.assertEqual(conv('-stringy'), '-stringy')
- self.assertEqual(conv('stringy'), 'stringy')
- self.assertEqual(conv('add'), 'add')
- self.assertEqual(conv('remove'), 'remove')
- self.assertEqual(conv(''), '')
-
-
-class Ec2utilsTestCase(test.NoDBTestCase):
- def test_ec2_id_to_id(self):
- self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
- self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
- self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
- self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
-
- def test_bad_ec2_id(self):
- self.assertRaises(exception.InvalidEc2Id,
- ec2utils.ec2_id_to_id,
- 'badone')
-
- def test_id_to_ec2_id(self):
- self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
- self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
- self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
- self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
-
- def test_dict_from_dotted_str(self):
- in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
- ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
- ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
- ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
- ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
- ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
- expected_dict = {
- 'block_device_mapping': {
- '1': {'device_name': '/dev/sda1',
- 'ebs': {'snapshot_id': 'snap-0000001c',
- 'volume_size': 80,
- 'delete_on_termination': False}},
- '2': {'device_name': '/dev/sdc',
- 'virtual_name': 'ephemeral0'}}}
- out_dict = ec2utils.dict_from_dotted_str(in_str)
-
- self.assertThat(out_dict, matchers.DictMatches(expected_dict))
-
- def test_properties_root_defice_name(self):
- mappings = [{"device": "/dev/sda1", "virtual": "root"}]
- properties0 = {'mappings': mappings}
- properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
-
- root_device_name = block_device.properties_root_device_name(
- properties0)
- self.assertEqual(root_device_name, '/dev/sda1')
-
- root_device_name = block_device.properties_root_device_name(
- properties1)
- self.assertEqual(root_device_name, '/dev/sdb')
-
- def test_regex_from_ec2_regex(self):
- def _test_re(ec2_regex, expected, literal, match=True):
- regex = ec2utils.regex_from_ec2_regex(ec2_regex)
- self.assertEqual(regex, expected)
- if match:
- self.assertIsNotNone(re.match(regex, literal))
- else:
- self.assertIsNone(re.match(regex, literal))
-
- # wildcards
- _test_re('foo', '\Afoo\Z(?s)', 'foo')
- _test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
- _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
- _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
- _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
-
- # backslashes and escaped wildcards
- _test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
- _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
- _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
- _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
- _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
- _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
-
- # analog to the example given in the EC2 API docs
- ec2_regex = '\*nova\?\\end'
- expected = r'\A[*]nova[?]\\end\Z(?s)'
- literal = r'*nova?\end'
- _test_re(ec2_regex, expected, literal)
-
- def test_mapping_prepend_dev(self):
- mappings = [
- {'virtual': 'ami',
- 'device': 'sda1'},
- {'virtual': 'root',
- 'device': '/dev/sda1'},
-
- {'virtual': 'swap',
- 'device': 'sdb1'},
- {'virtual': 'swap',
- 'device': '/dev/sdb2'},
-
- {'virtual': 'ephemeral0',
- 'device': 'sdc1'},
- {'virtual': 'ephemeral1',
- 'device': '/dev/sdc1'}]
- expected_result = [
- {'virtual': 'ami',
- 'device': 'sda1'},
- {'virtual': 'root',
- 'device': '/dev/sda1'},
-
- {'virtual': 'swap',
- 'device': '/dev/sdb1'},
- {'virtual': 'swap',
- 'device': '/dev/sdb2'},
-
- {'virtual': 'ephemeral0',
- 'device': '/dev/sdc1'},
- {'virtual': 'ephemeral1',
- 'device': '/dev/sdc1'}]
- self.assertThat(block_device.mappings_prepend_dev(mappings),
- matchers.DictListMatches(expected_result))
-
-
-class ApiEc2TestCase(test.TestCase):
- """Unit test for the cloud controller on an EC2 API."""
- def setUp(self):
- super(ApiEc2TestCase, self).setUp()
- self.host = '127.0.0.1'
- # NOTE(vish): skipping the Authorizer
- roles = ['sysadmin', 'netadmin']
- ctxt = context.RequestContext('fake', 'fake', roles=roles)
- self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
- ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
- ), 'nova.api.ec2.cloud.CloudController'))))
- self.useFixture(fixtures.FakeLogger('boto'))
-
- def expect_http(self, host=None, is_secure=False, api_version=None):
- """Returns a new EC2 connection."""
- self.ec2 = boto.connect_ec2(
- aws_access_key_id='fake',
- aws_secret_access_key='fake',
- is_secure=False,
- region=regioninfo.RegionInfo(None, 'test', self.host),
- port=8773,
- path='/services/Cloud')
- if api_version:
- self.ec2.APIVersion = api_version
-
- self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
- self.http = FakeHttplibConnection(
- self.app, '%s:8773' % (self.host), False)
- # pylint: disable=E1103
- if versionutils.is_compatible('2.14', boto.Version, same_major=False):
- self.ec2.new_http_connection(host or self.host, 8773,
- is_secure).AndReturn(self.http)
- elif versionutils.is_compatible('2', boto.Version, same_major=False):
- self.ec2.new_http_connection(host or '%s:8773' % (self.host),
- is_secure).AndReturn(self.http)
- else:
- self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
- return self.http
-
- def test_xmlns_version_matches_request_version(self):
- self.expect_http(api_version='2010-10-30')
- self.mox.ReplayAll()
-
- # Any request should be fine
- self.ec2.get_all_instances()
- self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
- 'The version in the xmlns of the response does '
- 'not match the API version given in the request.')
-
- def test_describe_instances(self):
- """Test that, after creating a user and a project, the describe
- instances call to the API works properly.
- """
- self.expect_http()
- self.mox.ReplayAll()
- self.assertEqual(self.ec2.get_all_instances(), [])
-
- def test_terminate_invalid_instance(self):
- # Attempt to terminate an invalid instance.
- self.expect_http()
- self.mox.ReplayAll()
- self.assertRaises(boto_exc.EC2ResponseError,
- self.ec2.terminate_instances, "i-00000005")
-
- def test_get_all_key_pairs(self):
- """Test that, after creating a user and project and generating
- a key pair, that the API call to list key pairs works properly.
- """
- keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
- for x in range(random.randint(4, 8)))
- self.expect_http()
- self.mox.ReplayAll()
- self.ec2.create_key_pair(keyname)
- rv = self.ec2.get_all_key_pairs()
- results = [k for k in rv if k.name == keyname]
- self.assertEqual(len(results), 1)
-
- def test_create_duplicate_key_pair(self):
- """Test that, after successfully generating a keypair,
- requesting a second keypair with the same name fails sanely.
- """
- self.expect_http()
- self.mox.ReplayAll()
- self.ec2.create_key_pair('test')
-
- try:
- self.ec2.create_key_pair('test')
- except boto_exc.EC2ResponseError as e:
- if e.code == 'InvalidKeyPair.Duplicate':
- pass
- else:
- self.assertEqual('InvalidKeyPair.Duplicate', e.code)
- else:
- self.fail('Exception not raised.')
-
- def test_get_all_security_groups(self):
- # Test that we can retrieve security groups.
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
-
- self.assertEqual(len(rv), 1)
- self.assertEqual(rv[0].name, 'default')
-
- def test_create_delete_security_group(self):
- # Test that we can create a security group.
- self.expect_http()
- self.mox.ReplayAll()
-
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
- for x in range(random.randint(4, 8)))
-
- self.ec2.create_security_group(security_group_name, 'test group')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
- self.assertEqual(len(rv), 2)
- self.assertIn(security_group_name, [group.name for group in rv])
-
- self.expect_http()
- self.mox.ReplayAll()
-
- self.ec2.delete_security_group(security_group_name)
-
- def test_group_name_valid_chars_security_group(self):
- """Test that we sanely handle invalid security group names.
-
- EC2 API Spec states we should only accept alphanumeric characters,
- spaces, dashes, and underscores. Amazon implementation
- accepts more characters - so, [:print:] is ok.
- """
- bad_strict_ec2 = "aa \t\x01\x02\x7f"
- bad_amazon_ec2 = "aa #^% -=99"
- test_raise = [
- (True, bad_amazon_ec2, "test desc"),
- (True, "test name", bad_amazon_ec2),
- (False, bad_strict_ec2, "test desc"),
- ]
- for t in test_raise:
- self.expect_http()
- self.mox.ReplayAll()
- self.flags(ec2_strict_validation=t[0])
- self.assertRaises(boto_exc.EC2ResponseError,
- self.ec2.create_security_group,
- t[1],
- t[2])
- test_accept = [
- (False, bad_amazon_ec2, "test desc"),
- (False, "test name", bad_amazon_ec2),
- ]
- for t in test_accept:
- self.expect_http()
- self.mox.ReplayAll()
- self.flags(ec2_strict_validation=t[0])
- self.ec2.create_security_group(t[1], t[2])
- self.expect_http()
- self.mox.ReplayAll()
- self.ec2.delete_security_group(t[1])
-
- def test_group_name_valid_length_security_group(self):
- """Test that we sanely handle invalid security group names.
-
- API Spec states that the length should not exceed 255 char.
- """
- self.expect_http()
- self.mox.ReplayAll()
-
- # Test block group_name > 255 chars
- security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
- for x in range(random.randint(256, 266)))
-
- self.assertRaises(boto_exc.EC2ResponseError,
- self.ec2.create_security_group,
- security_group_name,
- 'test group')
-
- def test_authorize_revoke_security_group_cidr(self):
- """Test that we can add and remove CIDR based rules
- to a security group
- """
- self.expect_http()
- self.mox.ReplayAll()
-
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
- for x in range(random.randint(4, 8)))
-
- group = self.ec2.create_security_group(security_group_name,
- 'test group')
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- group.authorize('tcp', 80, 81, '0.0.0.0/0')
- group.authorize('icmp', -1, -1, '0.0.0.0/0')
- group.authorize('udp', 80, 81, '0.0.0.0/0')
- group.authorize('tcp', 1, 65535, '0.0.0.0/0')
- group.authorize('udp', 1, 65535, '0.0.0.0/0')
- group.authorize('icmp', 1, 0, '0.0.0.0/0')
- group.authorize('icmp', 0, 1, '0.0.0.0/0')
- group.authorize('icmp', 0, 0, '0.0.0.0/0')
-
- def _assert(message, *args):
- try:
- group.authorize(*args)
- except boto_exc.EC2ResponseError as e:
- self.assertEqual(e.status, 400, 'Expected status to be 400')
- self.assertIn(message, e.error_message)
- else:
- raise self.failureException, 'EC2ResponseError not raised'
-
- # Invalid CIDR address
- _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
- # Missing ports
- _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
- # from port cannot be greater than to port
- _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
- # For tcp, negative values are not allowed
- _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
- # For tcp, valid port range 1-65535
- _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
- # Invalid Cidr for ICMP type
- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
- # Invalid protocol
- _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
- # Invalid port
- _assert('Invalid input received: To and From ports must be integers',
- 'tcp', " ", "81", '0.0.0.0/0')
- # Invalid icmp port
- _assert('Invalid input received: '
- 'Type and Code must be integers for ICMP protocol type',
- 'icmp', " ", "81", '0.0.0.0/0')
- # Invalid CIDR Address
- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
- # Invalid CIDR Address
- _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
- # Invalid Cidr ports
- _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
-
- group = [grp for grp in rv if grp.name == security_group_name][0]
-
- self.assertEqual(len(group.rules), 8)
- self.assertEqual(int(group.rules[0].from_port), 80)
- self.assertEqual(int(group.rules[0].to_port), 81)
- self.assertEqual(len(group.rules[0].grants), 1)
- self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- group.revoke('tcp', 80, 81, '0.0.0.0/0')
- group.revoke('icmp', -1, -1, '0.0.0.0/0')
- group.revoke('udp', 80, 81, '0.0.0.0/0')
- group.revoke('tcp', 1, 65535, '0.0.0.0/0')
- group.revoke('udp', 1, 65535, '0.0.0.0/0')
- group.revoke('icmp', 1, 0, '0.0.0.0/0')
- group.revoke('icmp', 0, 1, '0.0.0.0/0')
- group.revoke('icmp', 0, 0, '0.0.0.0/0')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- self.ec2.delete_security_group(security_group_name)
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- rv = self.ec2.get_all_security_groups()
-
- self.assertEqual(len(rv), 1)
- self.assertEqual(rv[0].name, 'default')
-
- def test_authorize_revoke_security_group_cidr_v6(self):
- """Test that we can add and remove CIDR based rules
- to a security group for IPv6
- """
- self.expect_http()
- self.mox.ReplayAll()
-
- security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
- for x in range(random.randint(4, 8)))
-
- group = self.ec2.create_security_group(security_group_name,
- 'test group')
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- group.authorize('tcp', 80, 81, '::/0')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
-
- group = [grp for grp in rv if grp.name == security_group_name][0]
- self.assertEqual(len(group.rules), 1)
- self.assertEqual(int(group.rules[0].from_port), 80)
- self.assertEqual(int(group.rules[0].to_port), 81)
- self.assertEqual(len(group.rules[0].grants), 1)
- self.assertEqual(str(group.rules[0].grants[0]), '::/0')
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- group.revoke('tcp', 80, 81, '::/0')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- self.ec2.delete_security_group(security_group_name)
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- rv = self.ec2.get_all_security_groups()
-
- self.assertEqual(len(rv), 1)
- self.assertEqual(rv[0].name, 'default')
-
- def test_authorize_revoke_security_group_foreign_group(self):
- """Test that we can grant and revoke another security group access
- to a security group
- """
- self.expect_http()
- self.mox.ReplayAll()
-
- rand_string = 'sdiuisudfsdcnpaqwertasd'
- security_group_name = "".join(random.choice(rand_string)
- for x in range(random.randint(4, 8)))
- other_security_group_name = "".join(random.choice(rand_string)
- for x in range(random.randint(4, 8)))
-
- group = self.ec2.create_security_group(security_group_name,
- 'test group')
-
- self.expect_http()
- self.mox.ReplayAll()
-
- other_group = self.ec2.create_security_group(other_security_group_name,
- 'some other group')
-
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
-
- group.authorize(src_group=other_group)
-
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
-
- # I don't bother checkng that we actually find it here,
- # because the create/delete unit test further up should
- # be good enough for that.
- for group in rv:
- if group.name == security_group_name:
- self.assertEqual(len(group.rules), 3)
- self.assertEqual(len(group.rules[0].grants), 1)
- self.assertEqual(str(group.rules[0].grants[0]),
- '%s-%s' % (other_security_group_name, 'fake'))
-
- self.expect_http()
- self.mox.ReplayAll()
-
- rv = self.ec2.get_all_security_groups()
-
- for group in rv:
- if group.name == security_group_name:
- self.expect_http()
- self.mox.ReplayAll()
- group.connection = self.ec2
- group.revoke(src_group=other_group)
-
- self.expect_http()
- self.mox.ReplayAll()
-
- self.ec2.delete_security_group(security_group_name)
- self.ec2.delete_security_group(other_security_group_name)
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
deleted file mode 100644
index 98c99fd551..0000000000
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ /dev/null
@@ -1,1096 +0,0 @@
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import uuid
-
-import fixtures
-from oslo.config import cfg
-
-from nova.api.ec2 import cloud
-from nova.api.ec2 import ec2utils
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova.compute import utils as compute_utils
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests import cast_as_call
-from nova.tests import fake_network
-from nova.tests import fake_notifier
-from nova.tests import fake_utils
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova import volume
-
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('default_flavor', 'nova.compute.flavors')
-CONF.import_opt('use_ipv6', 'nova.netconf')
-
-
-def get_fake_cache():
- def _ip(ip, fixed=True, floats=None):
- ip_dict = {'address': ip, 'type': 'fixed'}
- if not fixed:
- ip_dict['type'] = 'floating'
- if fixed and floats:
- ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
- return ip_dict
-
- info = [{'address': 'aa:bb:cc:dd:ee:ff',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [{'cidr': '192.168.0.0/24',
- 'ips': [_ip('192.168.0.3',
- floats=['1.2.3.4',
- '5.6.7.8']),
- _ip('192.168.0.4')]}]}}]
- if CONF.use_ipv6:
- ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
- info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
- 'ips': [_ip(ipv6_addr)]})
- return info
-
-
-def get_instances_with_cached_ips(orig_func, *args, **kwargs):
- """Kludge the cache into instance(s) without having to create DB
- entries
- """
- instances = orig_func(*args, **kwargs)
- if isinstance(instances, list):
- for instance in instances:
- instance['info_cache'] = {'network_info': get_fake_cache()}
- else:
- instances['info_cache'] = {'network_info': get_fake_cache()}
- return instances
-
-
-class CinderCloudTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(CinderCloudTestCase, self).setUp()
- ec2utils.reset_cache()
- self.useFixture(fixtures.TempDir()).path
- fake_utils.stub_out_utils_spawn_n(self.stubs)
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volume_api_class='nova.tests.fake_volume.API')
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': id,
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available'}}
-
- def fake_detail(_self, context, **kwargs):
- image = fake_show(None, context, None)
- image['name'] = kwargs.get('filters', {}).get('name')
- return [image]
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
- fake.stub_out_image_service(self.stubs)
-
- def dumb(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
- fake_network.set_stub_network_methods(self.stubs)
-
- # set up our cloud
- self.cloud = cloud.CloudController()
- self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
-
- # Short-circuit the conductor service
- self.flags(use_local=True, group='conductor')
-
- # Stub out the notification service so we use the no-op serializer
- # and avoid lazy-load traces with the wrap_exception decorator in
- # the compute service.
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- # set up services
- self.conductor = self.start_service('conductor',
- manager=CONF.conductor.manager)
- self.compute = self.start_service('compute')
- self.scheduler = self.start_service('scheduler')
- self.network = self.start_service('network')
- self.consoleauth = self.start_service('consoleauth')
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
- self.volume_api = volume.API()
- self.volume_api.reset_fake_api(self.context)
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- # make sure we can map ami-00000001/2 to a uuid in FakeImageService
- db.s3_image_create(self.context,
- 'cedef40a-ed67-4d10-800e-17455edce175')
- db.s3_image_create(self.context,
- '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
-
- def tearDown(self):
- self.volume_api.reset_fake_api(self.context)
- super(CinderCloudTestCase, self).tearDown()
- fake.FakeImageService_reset()
-
- def _stub_instance_get_with_fixed_ips(self, func_name):
- orig_func = getattr(self.cloud.compute_api, func_name)
-
- def fake_get(*args, **kwargs):
- return get_instances_with_cached_ips(orig_func, *args, **kwargs)
- self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
-
- def _create_key(self, name):
- # NOTE(vish): create depends on pool, so just call helper directly
- keypair_api = compute_api.KeypairAPI()
- return keypair_api.create_key_pair(self.context, self.context.user_id,
- name)
-
- def test_describe_volumes(self):
- # Makes sure describe_volumes works and filters results.
-
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- name='test-1',
- description='test volume 1')
- self.assertEqual(vol1['status'], 'available')
- vol2 = self.cloud.create_volume(self.context,
- size=1,
- name='test-2',
- description='test volume 2')
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 2)
- result = self.cloud.describe_volumes(self.context,
- [vol1['volumeId']])
- self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
-
- self.cloud.delete_volume(self.context, vol1['volumeId'])
- self.cloud.delete_volume(self.context, vol2['volumeId'])
-
- def test_format_volume_maps_status(self):
- fake_volume = {'id': 1,
- 'status': 'creating',
- 'availability_zone': 'nova',
- 'volumeId': 'vol-0000000a',
- 'attachmentSet': [{}],
- 'snapshotId': None,
- 'created_at': '2013-04-18T06:03:35.025626',
- 'size': 1,
- 'mountpoint': None,
- 'attach_status': None}
-
- self.assertEqual(self.cloud._format_volume(self.context,
- fake_volume)['status'],
- 'creating')
-
- fake_volume['status'] = 'attaching'
- self.assertEqual(self.cloud._format_volume(self.context,
- fake_volume)['status'],
- 'in-use')
- fake_volume['status'] = 'detaching'
- self.assertEqual(self.cloud._format_volume(self.context,
- fake_volume)['status'],
- 'in-use')
- fake_volume['status'] = 'banana'
- self.assertEqual(self.cloud._format_volume(self.context,
- fake_volume)['status'],
- 'banana')
-
- def test_create_volume_in_availability_zone(self):
- """Makes sure create_volume works when we specify an availability
- zone
- """
- availability_zone = 'zone1:host1'
-
- result = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- volume_id = result['volumeId']
- availabilityZone = result['availabilityZone']
- self.assertEqual(availabilityZone, availability_zone)
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 1)
- self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
- self.assertEqual(result['volumeSet'][0]['availabilityZone'],
- availabilityZone)
-
- self.cloud.delete_volume(self.context, volume_id)
-
- def test_create_volume_from_snapshot(self):
- # Makes sure create_volume works when we specify a snapshot.
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- snap = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap of vol %s'
- % vol1['volumeId'])
-
- vol2 = self.cloud.create_volume(self.context,
- snapshot_id=snap['snapshotId'])
- volume1_id = vol1['volumeId']
- volume2_id = vol2['volumeId']
-
- result = self.cloud.describe_volumes(self.context)
- self.assertEqual(len(result['volumeSet']), 2)
- self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
-
- self.cloud.delete_volume(self.context, volume2_id)
- self.cloud.delete_snapshot(self.context, snap['snapshotId'])
- self.cloud.delete_volume(self.context, volume1_id)
-
- def test_volume_status_of_attaching_volume(self):
- """Test the volume's status in response when attaching a volume."""
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- name='test-ls',
- description='test volume ls')
- self.assertEqual('available', vol1['status'])
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- ec2_instance_id = self._run_instance(**kwargs)
- resp = self.cloud.attach_volume(self.context,
- vol1['volumeId'],
- ec2_instance_id,
- '/dev/sde')
- # Here,the status should be 'attaching',but it can be 'attached' in
- # unittest scenario if the attach action is very fast.
- self.assertIn(resp['status'], ('attaching', 'attached'))
-
- def test_volume_status_of_detaching_volume(self):
- """Test the volume's status in response when detaching a volume."""
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- name='test-ls',
- description='test volume ls')
- self.assertEqual('available', vol1['status'])
- vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/sdb',
- 'volume_id': vol1_uuid,
- 'delete_on_termination': True}]}
- self._run_instance(**kwargs)
- resp = self.cloud.detach_volume(self.context,
- vol1['volumeId'])
-
- # Here,the status should be 'detaching',but it can be 'detached' in
- # unittest scenario if the detach action is very fast.
- self.assertIn(resp['status'], ('detaching', 'detached'))
-
- def test_describe_snapshots(self):
- # Makes sure describe_snapshots works and filters results.
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- snap1 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap1 of vol %s' %
- vol1['volumeId'])
- snap2 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap2 of vol %s' %
- vol1['volumeId'])
-
- result = self.cloud.describe_snapshots(self.context)
- self.assertEqual(len(result['snapshotSet']), 2)
- result = self.cloud.describe_snapshots(
- self.context,
- snapshot_id=[snap2['snapshotId']])
- self.assertEqual(len(result['snapshotSet']), 1)
-
- self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
- self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
- self.cloud.delete_volume(self.context, vol1['volumeId'])
-
- def test_format_snapshot_maps_status(self):
- fake_snapshot = {'status': 'new',
- 'id': 1,
- 'volume_id': 1,
- 'created_at': 1353560191.08117,
- 'progress': 90,
- 'project_id': str(uuid.uuid4()),
- 'volume_size': 10000,
- 'display_description': 'desc'}
-
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'pending')
-
- fake_snapshot['status'] = 'creating'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'pending')
-
- fake_snapshot['status'] = 'available'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'completed')
-
- fake_snapshot['status'] = 'active'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'completed')
-
- fake_snapshot['status'] = 'deleting'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'pending')
-
- fake_snapshot['status'] = 'deleted'
- self.assertIsNone(self.cloud._format_snapshot(self.context,
- fake_snapshot))
-
- fake_snapshot['status'] = 'error'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'error')
-
- fake_snapshot['status'] = 'banana'
- self.assertEqual(self.cloud._format_snapshot(self.context,
- fake_snapshot)['status'],
- 'banana')
-
- def test_create_snapshot(self):
- # Makes sure create_snapshot works.
- availability_zone = 'zone1:host1'
- result = self.cloud.describe_snapshots(self.context)
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- snap1 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap1 of vol %s' %
- vol1['volumeId'])
-
- snapshot_id = snap1['snapshotId']
- result = self.cloud.describe_snapshots(self.context)
- self.assertEqual(len(result['snapshotSet']), 1)
- self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
-
- self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
- self.cloud.delete_volume(self.context, vol1['volumeId'])
-
- def test_delete_snapshot(self):
- # Makes sure delete_snapshot works.
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- snap1 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap1 of vol %s' %
- vol1['volumeId'])
-
- snapshot_id = snap1['snapshotId']
- result = self.cloud.delete_snapshot(self.context,
- snapshot_id=snapshot_id)
- self.assertTrue(result)
- self.cloud.delete_volume(self.context, vol1['volumeId'])
-
- def _block_device_mapping_create(self, instance_uuid, mappings):
- volumes = []
- for bdm in mappings:
- db.block_device_mapping_create(self.context, bdm)
- if 'volume_id' in bdm:
- values = {'id': bdm['volume_id']}
- for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
- ('snapshot_size', 'volume_size'),
- ('delete_on_termination',
- 'delete_on_termination')]:
- if bdm_key in bdm:
- values[vol_key] = bdm[bdm_key]
- kwargs = {'name': 'bdmtest-volume',
- 'description': 'bdm test volume description',
- 'status': 'available',
- 'host': 'fake',
- 'size': 1,
- 'attach_status': 'detached',
- 'volume_id': values['id']}
- vol = self.volume_api.create_with_kwargs(self.context,
- **kwargs)
- if 'snapshot_id' in values:
- self.volume_api.create_snapshot(self.context,
- vol['id'],
- 'snapshot-bdm',
- 'fake snap for bdm tests',
- values['snapshot_id'])
-
- self.volume_api.attach(self.context, vol['id'],
- instance_uuid, bdm['device_name'])
- volumes.append(vol)
- return volumes
-
- def _setUpBlockDeviceMapping(self):
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- inst0 = db.instance_create(self.context,
- {'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'root_device_name': '/dev/sdb1',
- 'system_metadata': sys_meta})
- inst1 = db.instance_create(self.context,
- {'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'root_device_name': '/dev/sdc1',
- 'system_metadata': sys_meta})
- inst2 = db.instance_create(self.context,
- {'image_ref': '',
- 'instance_type_id': 1,
- 'root_device_name': '/dev/vda',
- 'system_metadata': sys_meta})
-
- instance0_uuid = inst0['uuid']
- mappings0 = [
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb1',
- 'snapshot_id': '1',
- 'volume_id': '2'},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb2',
- 'volume_id': '3',
- 'volume_size': 1},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb3',
- 'delete_on_termination': True,
- 'snapshot_id': '4',
- 'volume_id': '5'},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb4',
- 'delete_on_termination': False,
- 'snapshot_id': '6',
- 'volume_id': '7'},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb5',
- 'snapshot_id': '8',
- 'volume_id': '9',
- 'volume_size': 0},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb6',
- 'snapshot_id': '10',
- 'volume_id': '11',
- 'volume_size': 1},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb7',
- 'no_device': True},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb8',
- 'virtual_name': 'swap'},
- {'instance_uuid': instance0_uuid,
- 'device_name': '/dev/sdb9',
- 'virtual_name': 'ephemeral3'}]
- instance2_uuid = inst2['uuid']
- mappings2 = [
- {'instance_uuid': instance2_uuid,
- 'device_name': 'vda',
- 'snapshot_id': '1',
- 'volume_id': '21'}]
-
- volumes0 = self._block_device_mapping_create(instance0_uuid, mappings0)
- volumes2 = self._block_device_mapping_create(instance2_uuid, mappings2)
- return ((inst0, inst1, inst2), (volumes0, [], volumes2))
-
- def _tearDownBlockDeviceMapping(self, instances, volumes):
- for vols in volumes:
- for vol in vols:
- self.volume_api.delete(self.context, vol['id'])
- for instance in instances:
- for bdm in db.block_device_mapping_get_all_by_instance(
- self.context, instance['uuid']):
- db.block_device_mapping_destroy(self.context, bdm['id'])
- db.instance_destroy(self.context, instance['uuid'])
-
- _expected_instance_bdm0 = {
- 'instanceId': 'i-00000001',
- 'rootDeviceName': '/dev/sdb1',
- 'rootDeviceType': 'ebs'}
-
- _expected_block_device_mapping0 = [
- {'deviceName': '/dev/sdb1',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': False,
- 'volumeId': 'vol-00000002',
- }},
- {'deviceName': '/dev/sdb2',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': False,
- 'volumeId': 'vol-00000003',
- }},
- {'deviceName': '/dev/sdb3',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': True,
- 'volumeId': 'vol-00000005',
- }},
- {'deviceName': '/dev/sdb4',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': False,
- 'volumeId': 'vol-00000007',
- }},
- {'deviceName': '/dev/sdb5',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': False,
- 'volumeId': 'vol-00000009',
- }},
- {'deviceName': '/dev/sdb6',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': False,
- 'volumeId': 'vol-0000000b', }}]
- # NOTE(yamahata): swap/ephemeral device case isn't supported yet.
-
- _expected_instance_bdm1 = {
- 'instanceId': 'i-00000002',
- 'rootDeviceName': '/dev/sdc1',
- 'rootDeviceType': 'instance-store'}
-
- _expected_instance_bdm2 = {
- 'instanceId': 'i-00000003',
- 'rootDeviceName': '/dev/vda',
- 'rootDeviceType': 'ebs'}
-
- def test_format_instance_bdm(self):
- (instances, volumes) = self._setUpBlockDeviceMapping()
-
- result = {}
- self.cloud._format_instance_bdm(self.context, instances[0]['uuid'],
- '/dev/sdb1', result)
- self.assertThat(
- {'rootDeviceType': self._expected_instance_bdm0['rootDeviceType']},
- matchers.IsSubDictOf(result))
- self._assertEqualBlockDeviceMapping(
- self._expected_block_device_mapping0, result['blockDeviceMapping'])
-
- result = {}
- self.cloud._format_instance_bdm(self.context, instances[1]['uuid'],
- '/dev/sdc1', result)
- self.assertThat(
- {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
- matchers.IsSubDictOf(result))
-
- self._tearDownBlockDeviceMapping(instances, volumes)
-
- def _assertInstance(self, instance_id):
- ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
- result = self.cloud.describe_instances(self.context,
- instance_id=[ec2_instance_id])
- result = result['reservationSet'][0]
- self.assertEqual(len(result['instancesSet']), 1)
- result = result['instancesSet'][0]
- self.assertEqual(result['instanceId'], ec2_instance_id)
- return result
-
- def _assertEqualBlockDeviceMapping(self, expected, result):
- self.assertEqual(len(expected), len(result))
- for x in expected:
- found = False
- for y in result:
- if x['deviceName'] == y['deviceName']:
- self.assertThat(x, matchers.IsSubDictOf(y))
- found = True
- break
- self.assertTrue(found)
-
- def test_describe_instances_bdm(self):
- """Make sure describe_instances works with root_device_name and
- block device mappings
- """
- (instances, volumes) = self._setUpBlockDeviceMapping()
-
- result = self._assertInstance(instances[0]['id'])
- self.assertThat(
- self._expected_instance_bdm0,
- matchers.IsSubDictOf(result))
- self._assertEqualBlockDeviceMapping(
- self._expected_block_device_mapping0, result['blockDeviceMapping'])
-
- result = self._assertInstance(instances[1]['id'])
- self.assertThat(
- self._expected_instance_bdm1,
- matchers.IsSubDictOf(result))
-
- result = self._assertInstance(instances[2]['id'])
- self.assertThat(
- self._expected_instance_bdm2,
- matchers.IsSubDictOf(result))
-
- self._tearDownBlockDeviceMapping(instances, volumes)
-
- def _setUpImageSet(self, create_volumes_and_snapshots=False):
- self.flags(max_local_block_devices=-1)
- mappings1 = [
- {'device': '/dev/sda1', 'virtual': 'root'},
-
- {'device': 'sdb0', 'virtual': 'ephemeral0'},
- {'device': 'sdb1', 'virtual': 'ephemeral1'},
- {'device': 'sdb2', 'virtual': 'ephemeral2'},
- {'device': 'sdb3', 'virtual': 'ephemeral3'},
- {'device': 'sdb4', 'virtual': 'ephemeral4'},
-
- {'device': 'sdc0', 'virtual': 'swap'},
- {'device': 'sdc1', 'virtual': 'swap'},
- {'device': 'sdc2', 'virtual': 'swap'},
- {'device': 'sdc3', 'virtual': 'swap'},
- {'device': 'sdc4', 'virtual': 'swap'}]
- block_device_mapping1 = [
- {'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
- {'device_name': '/dev/sdb2', 'volume_id': 1234567},
- {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
- {'device_name': '/dev/sdb4', 'no_device': True},
-
- {'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
- {'device_name': '/dev/sdc2', 'volume_id': 12345678},
- {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
- {'device_name': '/dev/sdc4', 'no_device': True}]
- image1 = {
- 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available',
- 'mappings': mappings1,
- 'block_device_mapping': block_device_mapping1,
- }
- }
-
- mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
- block_device_mapping2 = [{'device_name': '/dev/sdb1',
- 'snapshot_id': 1234567}]
- image2 = {
- 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {
- 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'type': 'machine',
- 'root_device_name': '/dev/sdb1',
- 'mappings': mappings2,
- 'block_device_mapping': block_device_mapping2}}
-
- def fake_show(meh, context, image_id, **kwargs):
- _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
- for i in _images:
- if str(i['id']) == str(image_id):
- return i
- raise exception.ImageNotFound(image_id=image_id)
-
- def fake_detail(meh, context):
- return [copy.deepcopy(image1), copy.deepcopy(image2)]
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
-
- volumes = []
- snapshots = []
- if create_volumes_and_snapshots:
- for bdm in block_device_mapping1:
- if 'volume_id' in bdm:
- vol = self._volume_create(bdm['volume_id'])
- volumes.append(vol['id'])
- if 'snapshot_id' in bdm:
- kwargs = {'volume_id': 76543210,
- 'volume_size': 1,
- 'name': 'test-snap',
- 'description': 'test snap desc',
- 'snap_id': bdm['snapshot_id'],
- 'status': 'available'}
- snap = self.volume_api.create_snapshot_with_kwargs(
- self.context, **kwargs)
- snapshots.append(snap['id'])
- return (volumes, snapshots)
-
- def _assertImageSet(self, result, root_device_type, root_device_name):
- self.assertEqual(1, len(result['imagesSet']))
- result = result['imagesSet'][0]
- self.assertIn('rootDeviceType', result)
- self.assertEqual(result['rootDeviceType'], root_device_type)
- self.assertIn('rootDeviceName', result)
- self.assertEqual(result['rootDeviceName'], root_device_name)
- self.assertIn('blockDeviceMapping', result)
-
- return result
-
- _expected_root_device_name1 = '/dev/sda1'
- # NOTE(yamahata): noDevice doesn't make sense when returning mapping
- # It makes sense only when user overriding existing
- # mapping.
- _expected_bdms1 = [
- {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
- {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
- 'snap-00053977'}},
- {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
- 'vol-00053977'}},
- {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
-
- {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
- {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
- 'snap-00bc614e'}},
- {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
- 'vol-00bc614e'}},
- {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
- ]
-
- _expected_root_device_name2 = '/dev/sdb1'
- _expected_bdms2 = [{'deviceName': '/dev/sdb1',
- 'ebs': {'snapshotId': 'snap-00053977'}}]
-
- def _run_instance(self, **kwargs):
- rv = self.cloud.run_instances(self.context, **kwargs)
- instance_id = rv['instancesSet'][0]['instanceId']
- return instance_id
-
- def _restart_compute_service(self, periodic_interval_max=None):
- """restart compute service. NOTE: fake driver forgets all instances."""
- self.compute.kill()
- if periodic_interval_max:
- self.compute = self.start_service(
- 'compute', periodic_interval_max=periodic_interval_max)
- else:
- self.compute = self.start_service('compute')
-
- def _volume_create(self, volume_id=None):
- kwargs = {'name': 'test-volume',
- 'description': 'test volume description',
- 'status': 'available',
- 'host': 'fake',
- 'size': 1,
- 'attach_status': 'detached'}
- if volume_id:
- kwargs['volume_id'] = volume_id
- return self.volume_api.create_with_kwargs(self.context, **kwargs)
-
- def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
- self.assertEqual(vol['instance_uuid'], instance_uuid)
- self.assertEqual(vol['mountpoint'], mountpoint)
- self.assertEqual(vol['status'], "in-use")
- self.assertEqual(vol['attach_status'], "attached")
-
- def _assert_volume_detached(self, vol):
- self.assertIsNone(vol['instance_uuid'])
- self.assertIsNone(vol['mountpoint'])
- self.assertEqual(vol['status'], "available")
- self.assertEqual(vol['attach_status'], "detached")
-
- def test_stop_start_with_volume(self):
- # Make sure run instance with block device mapping works.
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- vol2 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
- vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/sdb',
- 'volume_id': vol1_uuid,
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc',
- 'volume_id': vol2_uuid,
- 'delete_on_termination': True},
- ]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
- ec2_instance_id)
- vols = self.volume_api.get_all(self.context)
- vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
-
- self.assertEqual(len(vols), 2)
- for vol in vols:
- self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
- if str(vol['id']) == str(vol1_uuid):
- self.volume_api.attach(self.context, vol['id'],
- instance_uuid, '/dev/sdb')
- elif str(vol['id']) == str(vol2_uuid):
- self.volume_api.attach(self.context, vol['id'],
- instance_uuid, '/dev/sdc')
-
- vol = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
-
- vol = self.volume_api.get(self.context, vol2_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
-
- result = self.cloud.stop_instances(self.context, [ec2_instance_id])
- self.assertTrue(result)
-
- vol = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
-
- vol = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
-
- vol = self.volume_api.get(self.context, vol2_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
-
- self.cloud.start_instances(self.context, [ec2_instance_id])
- vols = self.volume_api.get_all(self.context)
- vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
- self.assertEqual(len(vols), 2)
- for vol in vols:
- self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
- self.assertIn(vol['mountpoint'], ['/dev/sdb', '/dev/sdc'])
- self.assertEqual(vol['instance_uuid'], instance_uuid)
- self.assertEqual(vol['status'], "in-use")
- self.assertEqual(vol['attach_status'], "attached")
-
- # Here we puke...
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- admin_ctxt = context.get_admin_context(read_deleted="no")
- vol = self.volume_api.get(admin_ctxt, vol2_uuid)
- self.assertFalse(vol['deleted'])
- self.cloud.delete_volume(self.context, vol1['volumeId'])
- self._restart_compute_service()
-
- def test_stop_with_attached_volume(self):
- # Make sure attach info is reflected to block device mapping.
-
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- vol2 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
- vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
- vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
-
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/sdb',
- 'volume_id': vol1_uuid,
- 'delete_on_termination': True}]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
- ec2_instance_id)
-
- vols = self.volume_api.get_all(self.context)
- vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
- self.assertEqual(len(vols), 1)
- for vol in vols:
- self.assertEqual(vol['id'], vol1_uuid)
- self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
- vol = self.volume_api.get(self.context, vol2_uuid)
- self._assert_volume_detached(vol)
-
- inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid)
- self.cloud.compute_api.attach_volume(self.context,
- inst_obj,
- volume_id=vol2_uuid,
- device='/dev/sdc')
-
- vol1 = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
-
- vol2 = self.volume_api.get(self.context, vol2_uuid)
- self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
-
- self.cloud.compute_api.detach_volume(self.context,
- inst_obj, vol1)
-
- vol1 = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_detached(vol1)
-
- result = self.cloud.stop_instances(self.context, [ec2_instance_id])
- self.assertTrue(result)
-
- vol2 = self.volume_api.get(self.context, vol2_uuid)
- self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
-
- self.cloud.start_instances(self.context, [ec2_instance_id])
- vols = self.volume_api.get_all(self.context)
- vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
- self.assertEqual(len(vols), 1)
-
- self._assert_volume_detached(vol1)
-
- vol1 = self.volume_api.get(self.context, vol1_uuid)
- self._assert_volume_detached(vol1)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- def _create_snapshot(self, ec2_volume_id):
- result = self.cloud.create_snapshot(self.context,
- volume_id=ec2_volume_id)
- return result['snapshotId']
-
- def test_run_with_snapshot(self):
- # Makes sure run/stop/start instance with snapshot works.
- availability_zone = 'zone1:host1'
- vol1 = self.cloud.create_volume(self.context,
- size=1,
- availability_zone=availability_zone)
-
- snap1 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-1',
- description='test snap of vol %s' %
- vol1['volumeId'])
- snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
-
- snap2 = self.cloud.create_snapshot(self.context,
- vol1['volumeId'],
- name='snap-2',
- description='test snap of vol %s' %
- vol1['volumeId'])
- snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'block_device_mapping': [{'device_name': '/dev/vdb',
- 'snapshot_id': snap1_uuid,
- 'delete_on_termination': False, },
- {'device_name': '/dev/vdc',
- 'snapshot_id': snap2_uuid,
- 'delete_on_termination': True}]}
- ec2_instance_id = self._run_instance(**kwargs)
- instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
- ec2_instance_id)
-
- vols = self.volume_api.get_all(self.context)
- vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
-
- self.assertEqual(len(vols), 2)
-
- vol1_id = None
- vol2_id = None
- for vol in vols:
- snapshot_uuid = vol['snapshot_id']
- if snapshot_uuid == snap1_uuid:
- vol1_id = vol['id']
- mountpoint = '/dev/vdb'
- elif snapshot_uuid == snap2_uuid:
- vol2_id = vol['id']
- mountpoint = '/dev/vdc'
- else:
- self.fail()
-
- self._assert_volume_attached(vol, instance_uuid, mountpoint)
-
- # Just make sure we found them
- self.assertTrue(vol1_id)
- self.assertTrue(vol2_id)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- admin_ctxt = context.get_admin_context(read_deleted="no")
- vol = self.volume_api.get(admin_ctxt, vol1_id)
- self._assert_volume_detached(vol)
- self.assertFalse(vol['deleted'])
-
- def test_create_image(self):
- # Make sure that CreateImage works.
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
-
- (volumes, snapshots) = self._setUpImageSet(
- create_volumes_and_snapshots=True)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- ec2_instance_id = self._run_instance(**kwargs)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
- self._restart_compute_service()
-
- @staticmethod
- def _fake_bdm_get(ctxt, id):
- return [{'volume_id': 87654321,
- 'snapshot_id': None,
- 'no_device': None,
- 'virtual_name': None,
- 'delete_on_termination': True,
- 'device_name': '/dev/sdh'},
- {'volume_id': None,
- 'snapshot_id': 98765432,
- 'no_device': None,
- 'virtual_name': None,
- 'delete_on_termination': True,
- 'device_name': '/dev/sdi'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': True,
- 'virtual_name': None,
- 'delete_on_termination': None,
- 'device_name': None},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'virtual_name': 'ephemeral0',
- 'delete_on_termination': None,
- 'device_name': '/dev/sdb'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'virtual_name': 'swap',
- 'delete_on_termination': None,
- 'device_name': '/dev/sdc'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'virtual_name': 'ephemeral1',
- 'delete_on_termination': None,
- 'device_name': '/dev/sdd'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'virtual_name': 'ephemeral2',
- 'delete_on_termination': None,
- 'device_name': '/dev/sd3'},
- ]
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
deleted file mode 100644
index 5df870f7b5..0000000000
--- a/nova/tests/api/ec2/test_cloud.py
+++ /dev/null
@@ -1,3255 +0,0 @@
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import copy
-import datetime
-import functools
-import os
-import string
-import tempfile
-
-import fixtures
-import iso8601
-import mock
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova.api.ec2 import cloud
-from nova.api.ec2 import ec2utils
-from nova.api.ec2 import inst_state
-from nova.api.metadata import password
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova.compute import power_state
-from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import s3
-from nova.network import api as network_api
-from nova.network import base_api as base_network_api
-from nova.network import model
-from nova.network import neutronv2
-from nova import objects
-from nova.objects import base as obj_base
-from nova.openstack.common import log as logging
-from nova.openstack.common import policy as common_policy
-from nova.openstack.common import uuidutils
-from nova import policy
-from nova import test
-from nova.tests.api.openstack.compute.contrib import (
- test_neutron_security_groups as test_neutron)
-from nova.tests import cast_as_call
-from nova.tests import fake_block_device
-from nova.tests import fake_network
-from nova.tests import fake_notifier
-from nova.tests import fake_utils
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova import utils
-from nova.virt import fake as fake_virt
-from nova import volume
-
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('default_flavor', 'nova.compute.flavors')
-CONF.import_opt('use_ipv6', 'nova.netconf')
-LOG = logging.getLogger(__name__)
-
-HOST = "testhost"
-
-
-def get_fake_cache(get_floating):
- def _ip(ip, fixed=True, floats=None):
- ip_dict = {'address': ip, 'type': 'fixed'}
- if not fixed:
- ip_dict['type'] = 'floating'
- if fixed and floats:
- ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
- return ip_dict
-
- if get_floating:
- ip_info = [_ip('192.168.0.3',
- floats=['1.2.3.4', '5.6.7.8']),
- _ip('192.168.0.4')]
- else:
- ip_info = [_ip('192.168.0.3'),
- _ip('192.168.0.4')]
-
- info = [{'address': 'aa:bb:cc:dd:ee:ff',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [{'cidr': '192.168.0.0/24',
- 'ips': ip_info}]}}]
-
- if CONF.use_ipv6:
- ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
- info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
- 'ips': [_ip(ipv6_addr)]})
-
- return model.NetworkInfo.hydrate(info)
-
-
-def get_instances_with_cached_ips(orig_func, get_floating,
- *args, **kwargs):
- """Kludge the cache into instance(s) without having to create DB
- entries
- """
- instances = orig_func(*args, **kwargs)
-
- if kwargs.get('want_objects', False):
- info_cache = objects.InstanceInfoCache()
- info_cache.network_info = get_fake_cache(get_floating)
- info_cache.obj_reset_changes()
- else:
- info_cache = {'network_info': get_fake_cache(get_floating)}
-
- if isinstance(instances, (list, obj_base.ObjectListBase)):
- for instance in instances:
- instance['info_cache'] = info_cache
- else:
- instances['info_cache'] = info_cache
- return instances
-
-
-class CloudTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(CloudTestCase, self).setUp()
- self.useFixture(test.SampleNetworks())
- ec2utils.reset_cache()
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- volume_api_class='nova.tests.fake_volume.API')
- self.useFixture(fixtures.FakeLogger('boto'))
- fake_utils.stub_out_utils_spawn_n(self.stubs)
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': id,
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available'}}
-
- def fake_detail(_self, context, **kwargs):
- image = fake_show(None, context, None)
- image['name'] = kwargs.get('filters', {}).get('name')
- return [image]
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
- fake.stub_out_image_service(self.stubs)
-
- def dumb(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
- fake_network.set_stub_network_methods(self.stubs)
-
- # set up our cloud
- self.cloud = cloud.CloudController()
- self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
-
- # Short-circuit the conductor service
- self.flags(use_local=True, group='conductor')
-
- # Stub out the notification service so we use the no-op serializer
- # and avoid lazy-load traces with the wrap_exception decorator in
- # the compute service.
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- # set up services
- self.conductor = self.start_service('conductor',
- manager=CONF.conductor.manager)
- self.compute = self.start_service('compute')
- self.scheduler = self.start_service('scheduler')
- self.network = self.start_service('network')
- self.consoleauth = self.start_service('consoleauth')
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
- self.volume_api = volume.API()
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- # make sure we can map ami-00000001/2 to a uuid in FakeImageService
- db.s3_image_create(self.context,
- 'cedef40a-ed67-4d10-800e-17455edce175')
- db.s3_image_create(self.context,
- '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
-
- def tearDown(self):
- self.volume_api.reset_fake_api(self.context)
- super(CloudTestCase, self).tearDown()
- fake.FakeImageService_reset()
-
- def fake_get_target(obj, iqn):
- return 1
-
- def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
- pass
-
- def _stub_instance_get_with_fixed_ips(self,
- func_name, get_floating=True):
- orig_func = getattr(self.cloud.compute_api, func_name)
-
- def fake_get(*args, **kwargs):
- return get_instances_with_cached_ips(orig_func, get_floating,
- *args, **kwargs)
- self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
-
- def _create_key(self, name):
- # NOTE(vish): create depends on pool, so just call helper directly
- keypair_api = compute_api.KeypairAPI()
- return keypair_api.create_key_pair(self.context, self.context.user_id,
- name)
-
- def test_describe_regions(self):
- # Makes sure describe regions runs without raising an exception.
- result = self.cloud.describe_regions(self.context)
- self.assertEqual(len(result['regionInfo']), 1)
- self.flags(region_list=["one=test_host1", "two=test_host2"])
- result = self.cloud.describe_regions(self.context)
- self.assertEqual(len(result['regionInfo']), 2)
-
- def test_describe_addresses(self):
- # Makes sure describe addresses runs without raising an exception.
- address = "10.10.10.10"
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.flags(network_api_class='nova.network.api.API')
- self.cloud.allocate_address(self.context)
- self.cloud.describe_addresses(self.context)
- self.cloud.release_address(self.context,
- public_ip=address)
- db.floating_ip_destroy(self.context, address)
-
- def test_describe_addresses_in_neutron(self):
- # Makes sure describe addresses runs without raising an exception.
- address = "10.10.10.10"
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.cloud.allocate_address(self.context)
- self.cloud.describe_addresses(self.context)
- self.cloud.release_address(self.context,
- public_ip=address)
- db.floating_ip_destroy(self.context, address)
-
- def test_describe_specific_address(self):
- # Makes sure describe specific address works.
- addresses = ["10.10.10.10", "10.10.10.11"]
- for address in addresses:
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.cloud.allocate_address(self.context)
- result = self.cloud.describe_addresses(self.context)
- self.assertEqual(len(result['addressesSet']), 2)
- result = self.cloud.describe_addresses(self.context,
- public_ip=['10.10.10.10'])
- self.assertEqual(len(result['addressesSet']), 1)
- for address in addresses:
- self.cloud.release_address(self.context,
- public_ip=address)
- db.floating_ip_destroy(self.context, address)
-
- def test_allocate_address(self):
- address = "10.10.10.10"
- allocate = self.cloud.allocate_address
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.assertEqual(allocate(self.context)['publicIp'], address)
- db.floating_ip_destroy(self.context, address)
- self.assertRaises(exception.NoMoreFloatingIps,
- allocate,
- self.context)
-
- def test_release_address(self):
- address = "10.10.10.10"
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova',
- 'project_id': self.project_id})
- result = self.cloud.release_address(self.context, address)
- self.assertEqual(result.get('return', None), 'true')
-
- def test_associate_disassociate_address(self):
- # Verifies associate runs cleanly without raising an exception.
- address = "10.10.10.10"
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.cloud.allocate_address(self.context)
- # TODO(jkoelker) Probably need to query for instance_type_id and
- # make sure we get a valid one
- inst = db.instance_create(self.context, {'host': self.compute.host,
- 'display_name': HOST,
- 'instance_type_id': 1})
- networks = db.network_get_all(self.context)
- for network in networks:
- db.network_update(self.context, network['id'],
- {'host': self.network.host})
- project_id = self.context.project_id
- nw_info = self.network.allocate_for_instance(self.context,
- instance_id=inst['id'],
- instance_uuid=inst['uuid'],
- host=inst['host'],
- vpn=None,
- rxtx_factor=3,
- project_id=project_id,
- macs=None)
-
- fixed_ips = nw_info.fixed_ips()
- ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
-
- self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
- lambda *args: {'fixed_ips': ['10.0.0.1'],
- 'fixed_ip6s': [],
- 'floating_ips': []})
- self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
- lambda *args: 1)
-
- def fake_update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None,
- update_cells=True):
-
- return
-
- self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
- fake_update_instance_cache_with_nw_info)
-
- self.cloud.associate_address(self.context,
- instance_id=ec2_id,
- public_ip=address)
- self.cloud.disassociate_address(self.context,
- public_ip=address)
- self.cloud.release_address(self.context,
- public_ip=address)
- self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
- inst['host'])
- db.instance_destroy(self.context, inst['uuid'])
- db.floating_ip_destroy(self.context, address)
-
- def test_disassociate_auto_assigned_address(self):
- """Verifies disassociating auto assigned floating IP
- raises an exception
- """
- address = "10.10.10.10"
-
- def fake_get(*args, **kwargs):
- pass
-
- def fake_disassociate_floating_ip(*args, **kwargs):
- raise exception.CannotDisassociateAutoAssignedFloatingIP()
-
- self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
- lambda *args: 1)
- self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
- self.stubs.Set(network_api.API, 'disassociate_floating_ip',
- fake_disassociate_floating_ip)
-
- self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
- self.cloud.disassociate_address,
- self.context, public_ip=address)
-
- def test_disassociate_unassociated_address(self):
- address = "10.10.10.10"
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- self.cloud.allocate_address(self.context)
- self.cloud.describe_addresses(self.context)
- self.assertRaises(exception.InvalidAssociation,
- self.cloud.disassociate_address,
- self.context, public_ip=address)
- db.floating_ip_destroy(self.context, address)
-
- def test_describe_security_groups(self):
- # Makes sure describe_security_groups works and filters results.
- sec = db.security_group_create(self.context,
- {'project_id': self.context.project_id,
- 'name': 'test'})
- result = self.cloud.describe_security_groups(self.context)
- # NOTE(vish): should have the default group as well
- self.assertEqual(len(result['securityGroupInfo']), 2)
- result = self.cloud.describe_security_groups(self.context,
- group_name=[sec['name']])
- self.assertEqual(len(result['securityGroupInfo']), 1)
- self.assertEqual(
- result['securityGroupInfo'][0]['groupName'],
- sec['name'])
- db.security_group_destroy(self.context, sec['id'])
-
- def test_describe_security_groups_all_tenants(self):
- # Makes sure describe_security_groups works and filters results.
- sec = db.security_group_create(self.context,
- {'project_id': 'foobar',
- 'name': 'test'})
-
- def _check_name(result, i, expected):
- self.assertEqual(result['securityGroupInfo'][i]['groupName'],
- expected)
-
- # include all tenants
- filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
- result = self.cloud.describe_security_groups(self.context,
- filter=filter)
- self.assertEqual(len(result['securityGroupInfo']), 2)
- _check_name(result, 0, 'default')
- _check_name(result, 1, sec['name'])
-
- # exclude all tenants
- filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
- result = self.cloud.describe_security_groups(self.context,
- filter=filter)
- self.assertEqual(len(result['securityGroupInfo']), 1)
- _check_name(result, 0, 'default')
-
- # default all tenants
- result = self.cloud.describe_security_groups(self.context)
- self.assertEqual(len(result['securityGroupInfo']), 1)
- _check_name(result, 0, 'default')
-
- db.security_group_destroy(self.context, sec['id'])
-
- def test_describe_security_groups_by_id(self):
- sec = db.security_group_create(self.context,
- {'project_id': self.context.project_id,
- 'name': 'test'})
- result = self.cloud.describe_security_groups(self.context,
- group_id=[sec['id']])
- self.assertEqual(len(result['securityGroupInfo']), 1)
- self.assertEqual(
- result['securityGroupInfo'][0]['groupName'],
- sec['name'])
- default = db.security_group_get_by_name(self.context,
- self.context.project_id,
- 'default')
- result = self.cloud.describe_security_groups(self.context,
- group_id=[default['id']])
- self.assertEqual(len(result['securityGroupInfo']), 1)
- self.assertEqual(
- result['securityGroupInfo'][0]['groupName'],
- 'default')
- db.security_group_destroy(self.context, sec['id'])
-
- def test_create_delete_security_group(self):
- descript = 'test description'
- create = self.cloud.create_security_group
- result = create(self.context, 'testgrp', descript)
- group_descript = result['securityGroupSet'][0]['groupDescription']
- self.assertEqual(descript, group_descript)
- delete = self.cloud.delete_security_group
- self.assertTrue(delete(self.context, 'testgrp'))
-
- def test_security_group_quota_limit(self):
- self.flags(quota_security_groups=10)
- for i in range(1, CONF.quota_security_groups):
- name = 'test name %i' % i
- descript = 'test description %i' % i
- create = self.cloud.create_security_group
- create(self.context, name, descript)
-
- # 11'th group should fail
- self.assertRaises(exception.SecurityGroupLimitExceeded,
- create, self.context, 'foo', 'bar')
-
- def test_delete_security_group_by_id(self):
- sec = db.security_group_create(self.context,
- {'project_id': self.context.project_id,
- 'name': 'test'})
- delete = self.cloud.delete_security_group
- self.assertTrue(delete(self.context, group_id=sec['id']))
-
- def test_delete_security_group_with_bad_name(self):
- delete = self.cloud.delete_security_group
- notfound = exception.SecurityGroupNotFound
- self.assertRaises(notfound, delete, self.context, 'badname')
-
- def test_delete_security_group_with_bad_group_id(self):
- delete = self.cloud.delete_security_group
- notfound = exception.SecurityGroupNotFound
- self.assertRaises(notfound, delete, self.context, group_id=999)
-
- def test_delete_security_group_no_params(self):
- delete = self.cloud.delete_security_group
- self.assertRaises(exception.MissingParameter, delete, self.context)
-
- def test_delete_security_group_policy_not_allowed(self):
- rules = {'compute_extension:security_groups':
- common_policy.parse_rule('project_id:%(project_id)s')}
- policy.set_rules(rules)
-
- with mock.patch.object(self.cloud.security_group_api,
- 'get') as get:
- get.return_value = {'project_id': 'invalid'}
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.cloud.delete_security_group, self.context,
- 'fake-name', 'fake-id')
-
- def test_authorize_security_group_ingress_policy_not_allowed(self):
- rules = {'compute_extension:security_groups':
- common_policy.parse_rule('project_id:%(project_id)s')}
- policy.set_rules(rules)
-
- with mock.patch.object(self.cloud.security_group_api,
- 'get') as get:
- get.return_value = {'project_id': 'invalid'}
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.cloud.authorize_security_group_ingress, self.context,
- 'fake-name', 'fake-id')
-
- def test_authorize_security_group_ingress(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
-
- def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
- 'ip_ranges':
- {'1': {'cidr_ip': u'0.0.0.0/0'},
- '2': {'cidr_ip': u'10.10.10.10/32'}},
- 'ip_protocol': u'tcp'}]}
- self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
-
- def test_authorize_security_group_fail_missing_source_group(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
- 'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
- '2': {'cidr_ip': u'10.10.10.10/32'}},
- 'groups': {'1': {'user_id': u'someuser',
- 'group_name': u'somegroup1'}},
- 'ip_protocol': u'tcp'}]}
- self.assertRaises(exception.SecurityGroupNotFound, authz,
- self.context, group_name=sec['name'], **kwargs)
-
- def test_authorize_security_group_ingress_ip_permissions_groups(self):
- kwargs = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- 'name': 'test'
- }
- sec = db.security_group_create(self.context,
- {'project_id': 'someuser',
- 'user_id': 'someuser',
- 'description': '',
- 'name': 'somegroup1'})
- sec = db.security_group_create(self.context,
- {'project_id': 'someuser',
- 'user_id': 'someuser',
- 'description': '',
- 'name': 'othergroup2'})
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
- 'groups': {'1': {'user_id': u'someuser',
- 'group_name': u'somegroup1'},
- '2': {'user_id': u'someuser',
- 'group_name': u'othergroup2'}},
- 'ip_protocol': u'tcp'}]}
- self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
-
- def test_describe_security_group_ingress_groups(self):
- kwargs = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- 'name': 'test'
- }
- sec1 = db.security_group_create(self.context, kwargs)
- sec2 = db.security_group_create(self.context,
- {'project_id': 'someuser',
- 'user_id': 'someuser',
- 'description': '',
- 'name': 'somegroup1'})
- sec3 = db.security_group_create(self.context,
- {'project_id': 'someuser',
- 'user_id': 'someuser',
- 'description': '',
- 'name': 'othergroup2'})
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'ip_permissions': [
- {'groups': {'1': {'user_id': u'someuser',
- 'group_name': u'somegroup1'}}},
- {'ip_protocol': 'tcp',
- 'from_port': 80,
- 'to_port': 80,
- 'groups': {'1': {'user_id': u'someuser',
- 'group_name': u'othergroup2'}}}]}
- self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
- describe = self.cloud.describe_security_groups
- groups = describe(self.context, group_name=['test'])
- self.assertEqual(len(groups['securityGroupInfo']), 1)
- actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
- self.assertEqual(len(actual_rules), 4)
- expected_rules = [{'fromPort': -1,
- 'groups': [{'groupName': 'somegroup1',
- 'userId': 'someuser'}],
- 'ipProtocol': 'icmp',
- 'ipRanges': [],
- 'toPort': -1},
- {'fromPort': 1,
- 'groups': [{'groupName': u'somegroup1',
- 'userId': u'someuser'}],
- 'ipProtocol': 'tcp',
- 'ipRanges': [],
- 'toPort': 65535},
- {'fromPort': 1,
- 'groups': [{'groupName': u'somegroup1',
- 'userId': u'someuser'}],
- 'ipProtocol': 'udp',
- 'ipRanges': [],
- 'toPort': 65535},
- {'fromPort': 80,
- 'groups': [{'groupName': u'othergroup2',
- 'userId': u'someuser'}],
- 'ipProtocol': u'tcp',
- 'ipRanges': [],
- 'toPort': 80}]
- for rule in expected_rules:
- self.assertIn(rule, actual_rules)
-
- db.security_group_destroy(self.context, sec3['id'])
- db.security_group_destroy(self.context, sec2['id'])
- db.security_group_destroy(self.context, sec1['id'])
-
- def test_revoke_security_group_ingress_policy_not_allowed(self):
- rules = {'compute_extension:security_groups':
- common_policy.parse_rule('project_id:%(project_id)s')}
- policy.set_rules(rules)
-
- with mock.patch.object(self.cloud.security_group_api,
- 'get') as get:
- get.return_value = {'project_id': 'invalid'}
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.cloud.revoke_security_group_ingress, self.context,
- 'fake-name', 'fake-id')
-
- def test_revoke_security_group_ingress(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- authz(self.context, group_id=sec['id'], **kwargs)
- revoke = self.cloud.revoke_security_group_ingress
- self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
-
- def test_authorize_revoke_security_group_ingress_by_id(self):
- sec = db.security_group_create(self.context,
- {'project_id': self.context.project_id,
- 'name': 'test'})
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- authz(self.context, group_id=sec['id'], **kwargs)
- revoke = self.cloud.revoke_security_group_ingress
- self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
-
- def test_authorize_security_group_ingress_missing_protocol_params(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- self.assertRaises(exception.MissingParameter, authz, self.context,
- 'test')
-
- def test_authorize_security_group_ingress_missing_group_name_or_id(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- authz = self.cloud.authorize_security_group_ingress
- self.assertRaises(exception.MissingParameter, authz, self.context,
- **kwargs)
-
- def test_authorize_security_group_ingress_already_exists(self):
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- authz(self.context, group_name=sec['name'], **kwargs)
- self.assertRaises(exception.SecurityGroupRuleExists, authz,
- self.context, group_name=sec['name'], **kwargs)
-
- def test_security_group_ingress_quota_limit(self):
- self.flags(quota_security_group_rules=20)
- kwargs = {'project_id': self.context.project_id, 'name': 'test'}
- sec_group = db.security_group_create(self.context, kwargs)
- authz = self.cloud.authorize_security_group_ingress
- for i in range(100, 120):
- kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
- authz(self.context, group_id=sec_group['id'], **kwargs)
-
- kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
- self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
- self.context, group_id=sec_group['id'], **kwargs)
-
- def _test_authorize_security_group_no_ports_with_source_group(self, proto):
- kwargs = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- 'description': '',
- 'name': 'test'
- }
- sec = db.security_group_create(self.context, kwargs)
-
- authz = self.cloud.authorize_security_group_ingress
- auth_kwargs = {'ip_protocol': proto,
- 'groups': {'1': {'user_id': self.context.user_id,
- 'group_name': u'test'}}}
- self.assertTrue(authz(self.context, group_name=sec['name'],
- **auth_kwargs))
-
- describe = self.cloud.describe_security_groups
- groups = describe(self.context, group_name=['test'])
- self.assertEqual(len(groups['securityGroupInfo']), 1)
- actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
- expected_rules = [{'groups': [{'groupName': 'test',
- 'userId': self.context.user_id}],
- 'ipProtocol': proto,
- 'ipRanges': []}]
- if proto == 'icmp':
- expected_rules[0]['fromPort'] = -1
- expected_rules[0]['toPort'] = -1
- else:
- expected_rules[0]['fromPort'] = 1
- expected_rules[0]['toPort'] = 65535
- self.assertTrue(expected_rules == actual_rules)
- describe = self.cloud.describe_security_groups
- groups = describe(self.context, group_name=['test'])
-
- db.security_group_destroy(self.context, sec['id'])
-
- def _test_authorize_security_group_no_ports_no_source_group(self, proto):
- kwargs = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- 'description': '',
- 'name': 'test'
- }
- sec = db.security_group_create(self.context, kwargs)
-
- authz = self.cloud.authorize_security_group_ingress
- auth_kwargs = {'ip_protocol': proto}
- self.assertRaises(exception.MissingParameter, authz, self.context,
- group_name=sec['name'], **auth_kwargs)
-
- db.security_group_destroy(self.context, sec['id'])
-
- def test_authorize_security_group_no_ports_icmp(self):
- self._test_authorize_security_group_no_ports_with_source_group('icmp')
- self._test_authorize_security_group_no_ports_no_source_group('icmp')
-
- def test_authorize_security_group_no_ports_tcp(self):
- self._test_authorize_security_group_no_ports_with_source_group('tcp')
- self._test_authorize_security_group_no_ports_no_source_group('tcp')
-
- def test_authorize_security_group_no_ports_udp(self):
- self._test_authorize_security_group_no_ports_with_source_group('udp')
- self._test_authorize_security_group_no_ports_no_source_group('udp')
-
- def test_revoke_security_group_ingress_missing_group_name_or_id(self):
- kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
- revoke = self.cloud.revoke_security_group_ingress
- self.assertRaises(exception.MissingParameter, revoke,
- self.context, **kwargs)
-
- def test_delete_security_group_in_use_by_group(self):
- self.cloud.create_security_group(self.context, 'testgrp1',
- "test group 1")
- self.cloud.create_security_group(self.context, 'testgrp2',
- "test group 2")
- kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
- 'group_name': u'testgrp2'}},
- }
- self.cloud.authorize_security_group_ingress(self.context,
- group_name='testgrp1', **kwargs)
-
- group1 = db.security_group_get_by_name(self.context,
- self.project_id, 'testgrp1')
- get_rules = db.security_group_rule_get_by_security_group
-
- self.assertTrue(get_rules(self.context, group1['id']))
- self.cloud.delete_security_group(self.context, 'testgrp2')
- self.assertFalse(get_rules(self.context, group1['id']))
-
- def test_delete_security_group_in_use_by_instance(self):
- # Ensure that a group can not be deleted if in use by an instance.
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- args = {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active'}
- inst = db.instance_create(self.context, args)
-
- args = {'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'name': 'testgrp',
- 'description': 'Test group'}
- group = db.security_group_create(self.context, args)
-
- db.instance_add_security_group(self.context, inst['uuid'], group['id'])
-
- self.assertRaises(exception.InvalidGroup,
- self.cloud.delete_security_group,
- self.context, 'testgrp')
-
- db.instance_destroy(self.context, inst['uuid'])
-
- self.cloud.delete_security_group(self.context, 'testgrp')
-
- def test_describe_availability_zones(self):
- # Makes sure describe_availability_zones works and filters results.
- service1 = db.service_create(self.context, {'host': 'host1_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- service2 = db.service_create(self.context, {'host': 'host2_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- # Aggregate based zones
- agg = db.aggregate_create(self.context,
- {'name': 'agg1'}, {'availability_zone': 'zone1'})
- db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
- agg = db.aggregate_create(self.context,
- {'name': 'agg2'}, {'availability_zone': 'zone2'})
- db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
- result = self.cloud.describe_availability_zones(self.context)
- self.assertEqual(len(result['availabilityZoneInfo']), 3)
- admin_ctxt = context.get_admin_context(read_deleted="no")
- result = self.cloud.describe_availability_zones(admin_ctxt,
- zone_name='verbose')
- self.assertEqual(len(result['availabilityZoneInfo']), 18)
- db.service_destroy(self.context, service1['id'])
- db.service_destroy(self.context, service2['id'])
-
- def test_describe_availability_zones_verbose(self):
- # Makes sure describe_availability_zones works and filters results.
- service1 = db.service_create(self.context, {'host': 'host1_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- service2 = db.service_create(self.context, {'host': 'host2_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- agg = db.aggregate_create(self.context,
- {'name': 'agg1'}, {'availability_zone': 'second_zone'})
- db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
-
- admin_ctxt = context.get_admin_context(read_deleted="no")
- result = self.cloud.describe_availability_zones(admin_ctxt,
- zone_name='verbose')
-
- self.assertEqual(len(result['availabilityZoneInfo']), 17)
- db.service_destroy(self.context, service1['id'])
- db.service_destroy(self.context, service2['id'])
-
- def assertEqualSorted(self, x, y):
- self.assertEqual(sorted(x), sorted(y))
-
- def test_describe_instances(self):
- # Makes sure describe_instances works and filters results.
- self.flags(use_ipv6=True)
-
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
-
- sys_meta['EC2_client_token'] = "client-token-1"
- inst1 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'hostname': 'server-1234',
- 'vm_state': 'active',
- 'system_metadata': sys_meta})
-
- sys_meta['EC2_client_token'] = "client-token-2"
- inst2 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host2',
- 'hostname': 'server-4321',
- 'vm_state': 'active',
- 'system_metadata': sys_meta})
- comp1 = db.service_create(self.context, {'host': 'host1',
- 'topic': "compute"})
- agg = db.aggregate_create(self.context,
- {'name': 'agg1'}, {'availability_zone': 'zone1'})
- db.aggregate_host_add(self.context, agg['id'], 'host1')
-
- comp2 = db.service_create(self.context, {'host': 'host2',
- 'topic': "compute"})
- agg2 = db.aggregate_create(self.context,
- {'name': 'agg2'}, {'availability_zone': 'zone2'})
- db.aggregate_host_add(self.context, agg2['id'], 'host2')
-
- result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]
- self.assertEqual(len(result['instancesSet']), 2)
-
- # Now try filtering.
- instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
- result = self.cloud.describe_instances(self.context,
- instance_id=[instance_id])
- result = result['reservationSet'][0]
- self.assertEqual(len(result['instancesSet']), 1)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], instance_id)
- self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
- self.assertEqual(instance['ipAddress'], '1.2.3.4')
- self.assertEqual(instance['dnsName'], '1.2.3.4')
- self.assertEqual(instance['tagSet'], [])
- self.assertEqual(instance['privateDnsName'], 'server-4321')
- self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
- self.assertEqual(instance['dnsNameV6'],
- 'fe80:b33f::a8bb:ccff:fedd:eeff')
- self.assertEqual(instance['clientToken'], 'client-token-2')
-
- # A filter with even one invalid id should cause an exception to be
- # raised
- self.assertRaises(exception.InstanceNotFound,
- self.cloud.describe_instances, self.context,
- instance_id=[instance_id, '435679'])
-
- db.instance_destroy(self.context, inst1['uuid'])
- db.instance_destroy(self.context, inst2['uuid'])
- db.service_destroy(self.context, comp1['id'])
- db.service_destroy(self.context, comp2['id'])
-
- def test_describe_instances_all_invalid(self):
- # Makes sure describe_instances works and filters results.
- self.flags(use_ipv6=True)
-
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- instance_id = ec2utils.id_to_ec2_inst_id('435679')
- self.assertRaises(exception.InstanceNotFound,
- self.cloud.describe_instances, self.context,
- instance_id=[instance_id])
-
- def test_describe_instances_with_filters(self):
- # Makes sure describe_instances works and filters results.
- filters = {'filter': [{'name': 'test',
- 'value': ['a', 'b']},
- {'name': 'another_test',
- 'value': 'a string'}]}
-
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': []})
-
- def test_describe_instances_with_filters_tags(self):
- # Makes sure describe_instances works and filters tag results.
-
- # We need to stub network calls
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- # We need to stub out the MQ call - it won't succeed. We do want
- # to check that the method is called, though
- meta_changes = [None]
-
- def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
- instance_uuid=None):
- meta_changes[0] = diff
-
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
-
- utc = iso8601.iso8601.Utc()
-
- # Create some test images
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst1_kwargs = {
- 'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active',
- 'launched_at': timeutils.utcnow(),
- 'hostname': 'server-1111',
- 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
- tzinfo=utc),
- 'system_metadata': sys_meta
- }
-
- inst2_kwargs = {
- 'reservation_id': 'b',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host2',
- 'vm_state': 'active',
- 'launched_at': timeutils.utcnow(),
- 'hostname': 'server-1112',
- 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
- tzinfo=utc),
- 'system_metadata': sys_meta
- }
-
- inst1 = db.instance_create(self.context, inst1_kwargs)
- ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
-
- inst2 = db.instance_create(self.context, inst2_kwargs)
- ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
-
- # Create some tags
- # We get one overlapping pair, one overlapping key, and a
- # disparate pair
- # inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
- # inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
-
- md = {'key': 'foo', 'value': 'bar'}
- self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
- tag=[md])
-
- md2 = {'key': 'baz', 'value': 'wibble'}
- md3 = {'key': 'bax', 'value': 'wobble'}
- self.cloud.create_tags(self.context, resource_id=[ec2_id1],
- tag=[md2, md3])
-
- md4 = {'key': 'baz', 'value': 'quux'}
- md5 = {'key': 'zog', 'value': 'bobble'}
- self.cloud.create_tags(self.context, resource_id=[ec2_id2],
- tag=[md4, md5])
- # We should be able to search by:
-
- inst1_ret = {
- 'groupSet': None,
- 'instancesSet': [{'amiLaunchIndex': None,
- 'dnsName': '1.2.3.4',
- 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
- 'imageId': 'ami-00000001',
- 'instanceId': 'i-00000001',
- 'instanceState': {'code': 16,
- 'name': 'running'},
- 'instanceType': u'm1.medium',
- 'ipAddress': '1.2.3.4',
- 'keyName': 'None (None, host1)',
- 'launchTime':
- datetime.datetime(2012, 5, 1, 1, 1, 1,
- tzinfo=utc),
- 'placement': {
- 'availabilityZone': 'nova'},
- 'privateDnsName': u'server-1111',
- 'privateIpAddress': '192.168.0.3',
- 'productCodesSet': None,
- 'rootDeviceName': '/dev/sda1',
- 'rootDeviceType': 'instance-store',
- 'tagSet': [{'key': u'foo',
- 'value': u'bar'},
- {'key': u'baz',
- 'value': u'wibble'},
- {'key': u'bax',
- 'value': u'wobble'}]}],
- 'ownerId': None,
- 'reservationId': u'a'}
-
- inst2_ret = {
- 'groupSet': None,
- 'instancesSet': [{'amiLaunchIndex': None,
- 'dnsName': '1.2.3.4',
- 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
- 'imageId': 'ami-00000001',
- 'instanceId': 'i-00000002',
- 'instanceState': {'code': 16,
- 'name': 'running'},
- 'instanceType': u'm1.medium',
- 'ipAddress': '1.2.3.4',
- 'keyName': u'None (None, host2)',
- 'launchTime':
- datetime.datetime(2012, 5, 1, 1, 1, 2,
- tzinfo=utc),
- 'placement': {
- 'availabilityZone': 'nova'},
- 'privateDnsName': u'server-1112',
- 'privateIpAddress': '192.168.0.3',
- 'productCodesSet': None,
- 'rootDeviceName': '/dev/sda1',
- 'rootDeviceType': 'instance-store',
- 'tagSet': [{'key': u'foo',
- 'value': u'bar'},
- {'key': u'baz',
- 'value': u'quux'},
- {'key': u'zog',
- 'value': u'bobble'}]}],
- 'ownerId': None,
- 'reservationId': u'b'}
-
- # No filter
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
-
- # Key search
- # Both should have tags with key 'foo' and value 'bar'
- filters = {'filter': [{'name': 'tag:foo',
- 'value': ['bar']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
-
- # Both should have tags with key 'foo'
- filters = {'filter': [{'name': 'tag-key',
- 'value': ['foo']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
-
- # Value search
- # Only inst2 should have tags with key 'baz' and value 'quux'
- filters = {'filter': [{'name': 'tag:baz',
- 'value': ['quux']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst2_ret]})
-
- # Only inst2 should have tags with value 'quux'
- filters = {'filter': [{'name': 'tag-value',
- 'value': ['quux']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst2_ret]})
-
- # Multiple values
- # Both should have tags with key 'baz' and values in the set
- # ['quux', 'wibble']
- filters = {'filter': [{'name': 'tag:baz',
- 'value': ['quux', 'wibble']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
-
- # Both should have tags with key 'baz' or tags with value 'bar'
- filters = {'filter': [{'name': 'tag-key',
- 'value': ['baz']},
- {'name': 'tag-value',
- 'value': ['bar']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
-
- # Confirm deletion of tags
- # Check for format 'tag:'
- self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
- filters = {'filter': [{'name': 'tag:foo',
- 'value': ['bar']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst2_ret]})
-
- # Check for format 'tag-'
- filters = {'filter': [{'name': 'tag-key',
- 'value': ['foo']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst2_ret]})
- filters = {'filter': [{'name': 'tag-value',
- 'value': ['bar']}]}
- result = self.cloud.describe_instances(self.context, **filters)
- self.assertEqual(result, {'reservationSet': [inst2_ret]})
-
- # destroy the test instances
- db.instance_destroy(self.context, inst1['uuid'])
- db.instance_destroy(self.context, inst2['uuid'])
-
- def test_describe_instances_sorting(self):
- # Makes sure describe_instances works and is sorted as expected.
- self.flags(use_ipv6=True)
-
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- inst_base = {
- 'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'vm_state': 'active',
- 'system_metadata': sys_meta,
- }
-
- utc = iso8601.iso8601.Utc()
-
- inst1_kwargs = {}
- inst1_kwargs.update(inst_base)
- inst1_kwargs['host'] = 'host1'
- inst1_kwargs['hostname'] = 'server-1111'
- inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
- tzinfo=utc)
- inst1 = db.instance_create(self.context, inst1_kwargs)
-
- inst2_kwargs = {}
- inst2_kwargs.update(inst_base)
- inst2_kwargs['host'] = 'host2'
- inst2_kwargs['hostname'] = 'server-2222'
- inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
- tzinfo=utc)
- inst2 = db.instance_create(self.context, inst2_kwargs)
-
- inst3_kwargs = {}
- inst3_kwargs.update(inst_base)
- inst3_kwargs['host'] = 'host3'
- inst3_kwargs['hostname'] = 'server-3333'
- inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
- tzinfo=utc)
- inst3 = db.instance_create(self.context, inst3_kwargs)
-
- comp1 = db.service_create(self.context, {'host': 'host1',
- 'topic': "compute"})
-
- comp2 = db.service_create(self.context, {'host': 'host2',
- 'topic': "compute"})
-
- result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]['instancesSet']
- self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
- self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
- self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
-
- db.instance_destroy(self.context, inst1['uuid'])
- db.instance_destroy(self.context, inst2['uuid'])
- db.instance_destroy(self.context, inst3['uuid'])
- db.service_destroy(self.context, comp1['id'])
- db.service_destroy(self.context, comp2['id'])
-
- def test_describe_instance_state(self):
- # Makes sure describe_instances for instanceState works.
-
- def test_instance_state(expected_code, expected_name,
- power_state_, vm_state_, values=None):
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- values = values or {}
- values.update({'image_ref': image_uuid, 'instance_type_id': 1,
- 'power_state': power_state_, 'vm_state': vm_state_,
- 'system_metadata': sys_meta})
- inst = db.instance_create(self.context, values)
-
- instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
- result = self.cloud.describe_instances(self.context,
- instance_id=[instance_id])
- result = result['reservationSet'][0]
- result = result['instancesSet'][0]['instanceState']
-
- name = result['name']
- code = result['code']
- self.assertEqual(code, expected_code)
- self.assertEqual(name, expected_name)
-
- db.instance_destroy(self.context, inst['uuid'])
-
- test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
- power_state.RUNNING, vm_states.ACTIVE)
- test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
- power_state.NOSTATE, vm_states.STOPPED,
- {'shutdown_terminate': False})
-
- def test_describe_instances_no_ipv6(self):
- # Makes sure describe_instances w/ no ipv6 works.
- self.flags(use_ipv6=False)
-
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- inst1 = db.instance_create(self.context, {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'hostname': 'server-1234',
- 'vm_state': 'active',
- 'system_metadata': sys_meta})
- comp1 = db.service_create(self.context, {'host': 'host1',
- 'topic': "compute"})
- result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]
- self.assertEqual(len(result['instancesSet']), 1)
- instance = result['instancesSet'][0]
- instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
- self.assertEqual(instance['instanceId'], instance_id)
- self.assertEqual(instance['ipAddress'], '1.2.3.4')
- self.assertEqual(instance['dnsName'], '1.2.3.4')
- self.assertEqual(instance['privateDnsName'], 'server-1234')
- self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
- self.assertNotIn('dnsNameV6', instance)
- db.instance_destroy(self.context, inst1['uuid'])
- db.service_destroy(self.context, comp1['id'])
-
- def test_describe_instances_deleted(self):
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- args1 = {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active',
- 'system_metadata': sys_meta}
- inst1 = db.instance_create(self.context, args1)
- args2 = {'reservation_id': 'b',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active',
- 'system_metadata': sys_meta}
- inst2 = db.instance_create(self.context, args2)
- db.instance_destroy(self.context, inst1['uuid'])
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(len(result['reservationSet']), 1)
- result1 = result['reservationSet'][0]['instancesSet']
- self.assertEqual(result1[0]['instanceId'],
- ec2utils.id_to_ec2_inst_id(inst2['uuid']))
-
- def test_describe_instances_with_image_deleted(self):
- image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- args1 = {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active',
- 'system_metadata': sys_meta}
- db.instance_create(self.context, args1)
- args2 = {'reservation_id': 'b',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'vm_state': 'active',
- 'system_metadata': sys_meta}
- db.instance_create(self.context, args2)
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(len(result['reservationSet']), 2)
-
- def test_describe_instances_dnsName_set(self):
- # Verifies dnsName doesn't get set if floating IP is set.
- self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
- self._stub_instance_get_with_fixed_ips('get', get_floating=False)
-
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- db.instance_create(self.context, {'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'host': 'host1',
- 'hostname': 'server-1234',
- 'vm_state': 'active',
- 'system_metadata': sys_meta})
- result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]
- instance = result['instancesSet'][0]
- self.assertIsNone(instance['dnsName'])
-
- def test_describe_instances_booting_from_a_volume(self):
- sys_meta = flavors.save_flavor_info(
- {}, flavors.get_flavor(1))
- inst = objects.Instance(self.context)
- inst.reservation_id = 'a'
- inst.image_ref = ''
- inst.root_device_name = '/dev/sdh'
- inst.instance_type_id = 1
- inst.vm_state = vm_states.ACTIVE
- inst.host = 'host1'
- inst.system_metadata = sys_meta
- inst.create()
- result = self.cloud.describe_instances(self.context)
- result = result['reservationSet'][0]
- instance = result['instancesSet'][0]
- self.assertIsNone(instance['imageId'])
-
- def test_describe_images(self):
- describe_images = self.cloud.describe_images
-
- def fake_detail(meh, context, **kwargs):
- return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'}}]
-
- def fake_show_none(meh, context, id):
- raise exception.ImageNotFound(image_id='bad_image_id')
-
- def fake_detail_none(self, context, **kwargs):
- return []
-
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
- # list all
- result1 = describe_images(self.context)
- result1 = result1['imagesSet'][0]
- self.assertEqual(result1['imageId'], 'ami-00000001')
- # provided a valid image_id
- result2 = describe_images(self.context, ['ami-00000001'])
- self.assertEqual(1, len(result2['imagesSet']))
- # provide more than 1 valid image_id
- result3 = describe_images(self.context, ['ami-00000001',
- 'ami-00000002'])
- self.assertEqual(2, len(result3['imagesSet']))
- # provide a non-existing image_id
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
- self.assertRaises(exception.ImageNotFound, describe_images,
- self.context, ['ami-fake'])
-
- def assertDictListUnorderedMatch(self, L1, L2, key):
- self.assertEqual(len(L1), len(L2))
- for d1 in L1:
- self.assertIn(key, d1)
- for d2 in L2:
- self.assertIn(key, d2)
- if d1[key] == d2[key]:
- self.assertThat(d1, matchers.DictMatches(d2))
-
- def _setUpImageSet(self, create_volumes_and_snapshots=False):
- self.flags(max_local_block_devices=-1)
- mappings1 = [
- {'device': '/dev/sda1', 'virtual': 'root'},
-
- {'device': 'sdb0', 'virtual': 'ephemeral0'},
- {'device': 'sdb1', 'virtual': 'ephemeral1'},
- {'device': 'sdb2', 'virtual': 'ephemeral2'},
- {'device': 'sdb3', 'virtual': 'ephemeral3'},
- {'device': 'sdb4', 'virtual': 'ephemeral4'},
-
- {'device': 'sdc0', 'virtual': 'swap'},
- {'device': 'sdc1', 'virtual': 'swap'},
- {'device': 'sdc2', 'virtual': 'swap'},
- {'device': 'sdc3', 'virtual': 'swap'},
- {'device': 'sdc4', 'virtual': 'swap'}]
- block_device_mapping1 = [
- {'device_name': '/dev/sdb1',
- 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
- {'device_name': '/dev/sdb2',
- 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
- {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
- {'device_name': '/dev/sdb4', 'no_device': True},
-
- {'device_name': '/dev/sdc1',
- 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
- {'device_name': '/dev/sdc2',
- 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
- {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
- {'device_name': '/dev/sdc4', 'no_device': True}]
- image1 = {
- 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available',
- 'mappings': mappings1,
- 'block_device_mapping': block_device_mapping1,
- }
- }
-
- mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
- block_device_mapping2 = [{'device_name': '/dev/sdb1',
- 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
- 'volume_id': None}]
- image2 = {
- 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {
- 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'type': 'machine',
- 'root_device_name': '/dev/sdb1',
- 'mappings': mappings2,
- 'block_device_mapping': block_device_mapping2}}
-
- def fake_show(meh, context, image_id, **kwargs):
- _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
- for i in _images:
- if str(i['id']) == str(image_id):
- return i
- raise exception.ImageNotFound(image_id=image_id)
-
- def fake_detail(meh, context, **kwargs):
- return [copy.deepcopy(image1), copy.deepcopy(image2)]
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
-
- volumes = []
- snapshots = []
- if create_volumes_and_snapshots:
- for bdm in block_device_mapping1:
- if 'volume_id' in bdm:
- vol = self._volume_create(bdm['volume_id'])
- volumes.append(vol['id'])
- if 'snapshot_id' in bdm:
- snap = self._snapshot_create(bdm['snapshot_id'])
- snapshots.append(snap['id'])
- return (volumes, snapshots)
-
- def _assertImageSet(self, result, root_device_type, root_device_name):
- self.assertEqual(1, len(result['imagesSet']))
- result = result['imagesSet'][0]
- self.assertIn('rootDeviceType', result)
- self.assertEqual(result['rootDeviceType'], root_device_type)
- self.assertIn('rootDeviceName', result)
- self.assertEqual(result['rootDeviceName'], root_device_name)
- self.assertIn('blockDeviceMapping', result)
-
- return result
-
- _expected_root_device_name1 = '/dev/sda1'
- # NOTE(yamahata): noDevice doesn't make sense when returning mapping
- # It makes sense only when user overriding existing
- # mapping.
- _expected_bdms1 = [
- {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
- {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
- 'snap-00000001'}},
- {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
- 'vol-00000001'}},
- {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
- # {'deviceName': '/dev/sdb4', 'noDevice': True},
-
- {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
- {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
- 'snap-00000002'}},
- {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
- 'vol-00000002'}},
- {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
- # {'deviceName': '/dev/sdc4', 'noDevice': True}
- ]
-
- _expected_root_device_name2 = '/dev/sdb1'
- _expected_bdms2 = [{'deviceName': '/dev/sdb1',
- 'ebs': {'snapshotId': 'snap-00000003'}}]
-
- # NOTE(yamahata):
- # InstanceBlockDeviceMappingItemType
- # rootDeviceType
- # rootDeviceName
- # blockDeviceMapping
- # deviceName
- # virtualName
- # ebs
- # snapshotId
- # volumeSize
- # deleteOnTermination
- # noDevice
- def test_describe_image_mapping(self):
- # test for rootDeviceName and blockDeviceMapping.
- describe_images = self.cloud.describe_images
- self._setUpImageSet()
-
- result = describe_images(self.context, ['ami-00000001'])
- result = self._assertImageSet(result, 'instance-store',
- self._expected_root_device_name1)
-
- self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
- self._expected_bdms1, 'deviceName')
-
- result = describe_images(self.context, ['ami-00000002'])
- result = self._assertImageSet(result, 'ebs',
- self._expected_root_device_name2)
-
- self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
- self._expected_bdms2, 'deviceName')
-
- def test_describe_image_attribute(self):
- describe_image_attribute = self.cloud.describe_image_attribute
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'container_format': 'ami',
- 'is_public': True}
-
- def fake_detail(self, context, **kwargs):
- image = fake_show(None, context, None)
- image['name'] = kwargs.get('filters', {}).get('name')
- return [image]
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
- result = describe_image_attribute(self.context, 'ami-00000001',
- 'launchPermission')
- self.assertEqual([{'group': 'all'}], result['launchPermission'])
- result = describe_image_attribute(self.context, 'ami-00000001',
- 'kernel')
- self.assertEqual('aki-00000001', result['kernel']['value'])
- result = describe_image_attribute(self.context, 'ami-00000001',
- 'ramdisk')
- self.assertEqual('ari-00000001', result['ramdisk']['value'])
-
- def test_describe_image_attribute_root_device_name(self):
- describe_image_attribute = self.cloud.describe_image_attribute
- self._setUpImageSet()
-
- result = describe_image_attribute(self.context, 'ami-00000001',
- 'rootDeviceName')
- self.assertEqual(result['rootDeviceName'],
- self._expected_root_device_name1)
- result = describe_image_attribute(self.context, 'ami-00000002',
- 'rootDeviceName')
- self.assertEqual(result['rootDeviceName'],
- self._expected_root_device_name2)
-
- def test_describe_image_attribute_block_device_mapping(self):
- describe_image_attribute = self.cloud.describe_image_attribute
- self._setUpImageSet()
-
- result = describe_image_attribute(self.context, 'ami-00000001',
- 'blockDeviceMapping')
- self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
- self._expected_bdms1, 'deviceName')
- result = describe_image_attribute(self.context, 'ami-00000002',
- 'blockDeviceMapping')
- self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
- self._expected_bdms2, 'deviceName')
-
- def test_modify_image_attribute(self):
- modify_image_attribute = self.cloud.modify_image_attribute
-
- fake_metadata = {
- 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'is_public': False}
-
- def fake_show(meh, context, id, **kwargs):
- return copy.deepcopy(fake_metadata)
-
- def fake_detail(self, context, **kwargs):
- image = fake_show(None, context, None)
- image['name'] = kwargs.get('filters', {}).get('name')
- return [image]
-
- def fake_update(meh, context, image_id, metadata, data=None):
- self.assertEqual(metadata['properties']['kernel_id'],
- fake_metadata['properties']['kernel_id'])
- self.assertEqual(metadata['properties']['ramdisk_id'],
- fake_metadata['properties']['ramdisk_id'])
- self.assertTrue(metadata['is_public'])
- image = copy.deepcopy(fake_metadata)
- image.update(metadata)
- return image
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
- self.stubs.Set(fake._FakeImageService, 'update', fake_update)
- result = modify_image_attribute(self.context, 'ami-00000001',
- 'launchPermission', 'add',
- user_group=['all'])
- self.assertTrue(result['is_public'])
-
- def test_register_image(self):
- register_image = self.cloud.register_image
-
- def fake_create(*args, **kwargs):
- # NOTE(vish): We are mocking s3 so make sure we have converted
- # to ids instead of uuids.
- return {'id': 1,
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'properties': {'kernel_id': 1,
- 'ramdisk_id': 1,
- 'type': 'machine'
- },
- 'is_public': False
- }
-
- self.stubs.Set(s3.S3ImageService, 'create', fake_create)
- image_location = 'fake_bucket/fake.img.manifest.xml'
- result = register_image(self.context, image_location)
- self.assertEqual(result['imageId'], 'ami-00000001')
-
- def test_register_image_empty(self):
- register_image = self.cloud.register_image
- self.assertRaises(exception.MissingParameter, register_image,
- self.context, image_location=None)
-
- def test_register_image_name(self):
- register_image = self.cloud.register_image
-
- def fake_create(_self, context, metadata, data=None):
- self.assertEqual(metadata['name'], self.expected_name)
- metadata['id'] = 1
- metadata['container_format'] = 'ami'
- metadata['is_public'] = False
- return metadata
-
- self.stubs.Set(s3.S3ImageService, 'create', fake_create)
- self.expected_name = 'fake_bucket/fake.img.manifest.xml'
- register_image(self.context,
- image_location=self.expected_name,
- name=None)
- self.expected_name = 'an image name'
- register_image(self.context,
- image_location='some_location',
- name=self.expected_name)
-
- def test_format_image(self):
- image = {
- 'id': 1,
- 'container_format': 'ami',
- 'name': 'name',
- 'owner': 'someone',
- 'properties': {
- 'image_location': 'location',
- 'kernel_id': 1,
- 'ramdisk_id': 1,
- 'type': 'machine'},
- 'is_public': False}
- expected = {'name': 'name',
- 'imageOwnerId': 'someone',
- 'isPublic': False,
- 'imageId': 'ami-00000001',
- 'imageState': None,
- 'rootDeviceType': 'instance-store',
- 'architecture': None,
- 'imageLocation': 'location',
- 'kernelId': 'aki-00000001',
- 'ramdiskId': 'ari-00000001',
- 'rootDeviceName': '/dev/sda1',
- 'imageType': 'machine',
- 'description': None}
- result = self.cloud._format_image(image)
- self.assertThat(result, matchers.DictMatches(expected))
- image['properties']['image_location'] = None
- expected['imageLocation'] = 'None (name)'
- result = self.cloud._format_image(image)
- self.assertThat(result, matchers.DictMatches(expected))
- image['name'] = None
- image['properties']['image_location'] = 'location'
- expected['imageLocation'] = 'location'
- expected['name'] = 'location'
- result = self.cloud._format_image(image)
- self.assertThat(result, matchers.DictMatches(expected))
-
- def test_deregister_image(self):
- deregister_image = self.cloud.deregister_image
-
- def fake_delete(self, context, id):
- return None
-
- self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
- # valid image
- result = deregister_image(self.context, 'ami-00000001')
- self.assertTrue(result)
- # invalid image
- self.stubs.UnsetAll()
-
- def fake_detail_empty(self, context, **kwargs):
- return []
-
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
- self.assertRaises(exception.ImageNotFound, deregister_image,
- self.context, 'ami-bad001')
-
- def test_deregister_image_wrong_container_type(self):
- deregister_image = self.cloud.deregister_image
-
- def fake_delete(self, context, id):
- return None
-
- self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
- self.assertRaises(exception.NotFound, deregister_image, self.context,
- 'aki-00000001')
-
- def _run_instance(self, **kwargs):
- rv = self.cloud.run_instances(self.context, **kwargs)
- instance_id = rv['instancesSet'][0]['instanceId']
- return instance_id
-
- def test_get_password_data(self):
- instance_id = self._run_instance(
- image_id='ami-1',
- instance_type=CONF.default_flavor,
- max_count=1)
- self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
- output = self.cloud.get_password_data(context=self.context,
- instance_id=[instance_id])
- self.assertEqual(output['passwordData'], 'fakepass')
- self.cloud.terminate_instances(self.context, [instance_id])
-
- def test_console_output(self):
- instance_id = self._run_instance(
- image_id='ami-1',
- instance_type=CONF.default_flavor,
- max_count=1)
- output = self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
- self.assertEqual(base64.b64decode(output['output']),
- 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
- # TODO(soren): We need this until we can stop polling in the rpc code
- # for unit tests.
- self.cloud.terminate_instances(self.context, [instance_id])
-
- def test_key_generation(self):
- result, private_key = self._create_key('test')
-
- expected = db.key_pair_get(self.context,
- self.context.user_id,
- 'test')['public_key']
-
- (fd, fname) = tempfile.mkstemp()
- os.write(fd, private_key)
-
- public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
-
- os.unlink(fname)
-
- # assert key fields are equal
- self.assertEqual(''.join(public_key.split("\n")[2:-2]),
- expected.split(" ")[1].strip())
-
- def test_describe_key_pairs(self):
- self._create_key('test1')
- self._create_key('test2')
- result = self.cloud.describe_key_pairs(self.context)
- keys = result["keySet"]
- self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
- self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
-
- def test_describe_bad_key_pairs(self):
- self.assertRaises(exception.KeypairNotFound,
- self.cloud.describe_key_pairs, self.context,
- key_name=['DoesNotExist'])
-
- def test_import_key_pair(self):
- pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
- with open(pubkey_path + '/dummy.pub') as f:
- dummypub = f.readline().rstrip()
- with open(pubkey_path + '/dummy.fingerprint') as f:
- dummyfprint = f.readline().rstrip()
- key_name = 'testimportkey'
- public_key_material = base64.b64encode(dummypub)
- result = self.cloud.import_key_pair(self.context,
- key_name,
- public_key_material)
- self.assertEqual(result['keyName'], key_name)
- self.assertEqual(result['keyFingerprint'], dummyfprint)
- keydata = db.key_pair_get(self.context,
- self.context.user_id,
- key_name)
- self.assertEqual(dummypub, keydata['public_key'])
- self.assertEqual(dummyfprint, keydata['fingerprint'])
-
- def test_import_key_pair_quota_limit(self):
- self.flags(quota_key_pairs=0)
- pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
- f = open(pubkey_path + '/dummy.pub', 'r')
- dummypub = f.readline().rstrip()
- f.close
- f = open(pubkey_path + '/dummy.fingerprint', 'r')
- f.readline().rstrip()
- f.close
- key_name = 'testimportkey'
- public_key_material = base64.b64encode(dummypub)
- self.assertRaises(exception.KeypairLimitExceeded,
- self.cloud.import_key_pair, self.context, key_name,
- public_key_material)
-
- def test_create_key_pair(self):
- good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
- bad_names = ('', 'a' * 256, '*', '/')
-
- for key_name in good_names:
- result = self.cloud.create_key_pair(self.context,
- key_name)
- self.assertEqual(result['keyName'], key_name)
-
- for key_name in bad_names:
- self.assertRaises(exception.InvalidKeypair,
- self.cloud.create_key_pair,
- self.context,
- key_name)
-
- def test_create_key_pair_quota_limit(self):
- self.flags(quota_key_pairs=10)
- for i in range(0, 10):
- key_name = 'key_%i' % i
- result = self.cloud.create_key_pair(self.context,
- key_name)
- self.assertEqual(result['keyName'], key_name)
-
- # 11'th group should fail
- self.assertRaises(exception.KeypairLimitExceeded,
- self.cloud.create_key_pair,
- self.context,
- 'foo')
-
- def test_delete_key_pair(self):
- self._create_key('test')
- self.cloud.delete_key_pair(self.context, 'test')
-
- def test_run_instances(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'container_format': 'ami',
- 'status': 'active'}
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
-
- def dumb(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['imageId'], 'ami-00000001')
- self.assertEqual(instance['instanceId'], 'i-00000001')
- self.assertEqual(instance['instanceState']['name'], 'running')
- self.assertEqual(instance['instanceType'], 'm1.small')
-
- def test_run_instances_invalid_maxcount(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 0}
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'status': 'active'}
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.assertRaises(exception.InvalidInput, run_instances,
- self.context, **kwargs)
-
- def test_run_instances_invalid_mincount(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'min_count': 0}
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'status': 'active'}
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.assertRaises(exception.InvalidInput, run_instances,
- self.context, **kwargs)
-
- def test_run_instances_invalid_count(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'min_count': 2}
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'status': 'active'}
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.assertRaises(exception.InvalidInput, run_instances,
- self.context, **kwargs)
-
- def test_run_instances_availability_zone(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1,
- 'placement': {'availability_zone': 'fake'},
- }
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'container_format': 'ami',
- 'status': 'active'}
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- def fake_format(*args, **kwargs):
- pass
-
- self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
-
- def fake_create(*args, **kwargs):
- self.assertEqual(kwargs['availability_zone'], 'fake')
- return ({'id': 'fake-instance'}, 'fake-res-id')
-
- self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
-
- # NOTE(vish) the assert for this call is in the fake_create method.
- run_instances(self.context, **kwargs)
-
- def test_empty_reservation_id_from_token(self):
- client_token = 'client-token-1'
-
- def fake_get_all_system_metadata(context, search_filts):
- reference = [{'key': ['EC2_client_token']},
- {'value': ['client-token-1']}]
- self.assertEqual(search_filts, reference)
- return []
-
- self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
- fake_get_all_system_metadata)
- resv_id = self.cloud._resv_id_from_token(self.context, client_token)
- self.assertIsNone(resv_id)
-
- def test_run_instances_idempotent(self):
- # Ensure subsequent run_instances calls with same client token
- # are idempotent and that ones with different client_token are not
-
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
-
- run_instances = self.cloud.run_instances
-
- def fake_show(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'container_format': 'ami',
- 'status': 'active'}
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
-
- def dumb(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- kwargs['client_token'] = 'client-token-1'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000001')
-
- kwargs['client_token'] = 'client-token-2'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000002')
-
- kwargs['client_token'] = 'client-token-2'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000002')
-
- kwargs['client_token'] = 'client-token-1'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000001')
-
- kwargs['client_token'] = 'client-token-3'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000003')
-
- # make sure terminated instances lose their client tokens
- self.cloud.stop_instances(self.context,
- instance_id=[instance['instanceId']])
- self.cloud.terminate_instances(self.context,
- instance_id=[instance['instanceId']])
-
- kwargs['client_token'] = 'client-token-3'
- result = run_instances(self.context, **kwargs)
- instance = result['instancesSet'][0]
- self.assertEqual(instance['instanceId'], 'i-00000004')
-
- def test_run_instances_image_state_none(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- run_instances = self.cloud.run_instances
-
- def fake_show_no_state(self, context, id):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'}, 'container_format': 'ami'}
-
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
- self.assertRaises(exception.ImageNotActive, run_instances,
- self.context, **kwargs)
-
- def test_run_instances_image_state_invalid(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- run_instances = self.cloud.run_instances
-
- def fake_show_decrypt(self, context, id):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'status': 'active',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine', 'image_state': 'decrypting'}}
-
- self.stubs.UnsetAll()
- self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
- self.assertRaises(exception.ImageNotActive, run_instances,
- self.context, **kwargs)
-
- def test_run_instances_image_status_active(self):
- kwargs = {'image_id': 'ami-00000001',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- run_instances = self.cloud.run_instances
-
- def fake_show_stat_active(self, context, id, **kwargs):
- return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'name': 'fake_name',
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'},
- 'status': 'active'}
-
- def fake_id_to_glance_id(context, id):
- return 'cedef40a-ed67-4d10-800e-17455edce175'
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
- self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
-
- result = run_instances(self.context, **kwargs)
- self.assertEqual(len(result['instancesSet']), 1)
-
- def _restart_compute_service(self, periodic_interval_max=None):
- """restart compute service. NOTE: fake driver forgets all instances."""
- self.compute.kill()
- if periodic_interval_max:
- self.compute = self.start_service(
- 'compute', periodic_interval_max=periodic_interval_max)
- else:
- self.compute = self.start_service('compute')
-
- def test_stop_start_instance(self):
- # Makes sure stop/start instance works.
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- # a running instance can't be started.
- self.assertRaises(exception.InstanceInvalidState,
- self.cloud.start_instances,
- self.context, [instance_id])
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 64,
- 'name': 'stopping'}}]}
- result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 80,
- 'name': 'stopped'},
- 'currentState': {'code': 0,
- 'name': 'pending'}}]}
- result = self.cloud.start_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 64,
- 'name': 'stopping'}}]}
- result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 80,
- 'name': 'stopped'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- def test_start_instances(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertTrue(result)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 80,
- 'name': 'stopped'},
- 'currentState': {'code': 0,
- 'name': 'pending'}}]}
- result = self.cloud.start_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- def test_start_instances_policy_failed(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
- rules = {
- "compute:start":
- common_policy.parse_rule("project_id:non_fake"),
- }
- policy.set_rules(rules)
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.cloud.start_instances,
- self.context, [instance_id])
- self.assertIn("compute:start", exc.format_message())
- self._restart_compute_service()
-
- def test_stop_instances(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 64,
- 'name': 'stopping'}}]}
- result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 80,
- 'name': 'stopped'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- def test_stop_instances_policy_failed(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
- rules = {
- "compute:stop":
- common_policy.parse_rule("project_id:non_fake")
- }
- policy.set_rules(rules)
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.cloud.stop_instances,
- self.context, [instance_id])
- self.assertIn("compute:stop", exc.format_message())
- self._restart_compute_service()
-
- def test_terminate_instances(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- # a running instance can't be started.
- self.assertRaises(exception.InstanceInvalidState,
- self.cloud.start_instances,
- self.context, [instance_id])
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- def test_terminate_instances_invalid_instance_id(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- self._run_instance(**kwargs)
-
- self.assertRaises(exception.InstanceNotFound,
- self.cloud.terminate_instances,
- self.context, ['i-2'])
- self._restart_compute_service()
-
- def test_terminate_instances_disable_terminate(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
- ec2utils.ec2_id_to_id(instance_id))
- db.instance_update(self.context, internal_uuid,
- {'disable_terminate': True})
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 16,
- 'name': 'running'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
-
- db.instance_update(self.context, internal_uuid,
- {'disable_terminate': False})
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [instance_id])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- def test_terminate_instances_two_instances(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- inst1 = self._run_instance(**kwargs)
- inst2 = self._run_instance(**kwargs)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 64,
- 'name': 'stopping'}}]}
- result = self.cloud.stop_instances(self.context, [inst1])
- self.assertEqual(result, expected)
-
- expected = {'instancesSet': [
- {'instanceId': 'i-00000001',
- 'previousState': {'code': 80,
- 'name': 'stopped'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}},
- {'instanceId': 'i-00000002',
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context, [inst1, inst2])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- def test_reboot_instances(self):
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1, }
- instance_id = self._run_instance(**kwargs)
-
- # a running instance can't be started.
- self.assertRaises(exception.InstanceInvalidState,
- self.cloud.start_instances,
- self.context, [instance_id])
-
- result = self.cloud.reboot_instances(self.context, [instance_id])
- self.assertTrue(result)
-
- def _volume_create(self, volume_id=None):
- kwargs = {'name': 'test-volume',
- 'description': 'test volume description',
- 'status': 'available',
- 'host': 'fake',
- 'size': 1,
- 'attach_status': 'detached'}
- if volume_id:
- kwargs['volume_id'] = volume_id
- return self.volume_api.create_with_kwargs(self.context, **kwargs)
-
- def _snapshot_create(self, snapshot_id=None):
- kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
- 'status': "available",
- 'volume_size': 1}
- if snapshot_id:
- kwargs['snap_id'] = snapshot_id
- return self.volume_api.create_snapshot_with_kwargs(self.context,
- **kwargs)
-
- def _create_snapshot(self, ec2_volume_id):
- result = self.cloud.create_snapshot(self.context,
- volume_id=ec2_volume_id)
- return result['snapshotId']
-
- def _do_test_create_image(self, no_reboot):
- """Make sure that CreateImage works."""
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
-
- (volumes, snapshots) = self._setUpImageSet(
- create_volumes_and_snapshots=True)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- ec2_instance_id = self._run_instance(**kwargs)
-
- def fake_show(meh, context, id, **kwargs):
- bdm = [dict(snapshot_id=snapshots[0],
- volume_size=1,
- device_name='sda1',
- delete_on_termination=False)]
- props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
- ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- root_device_name='/dev/sda1',
- block_device_mapping=bdm)
- return dict(id=id,
- properties=props,
- container_format='ami',
- status='active',
- is_public=True)
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': volumes[0],
- 'snapshot_id': snapshots[0],
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'sda1',
- 'boot_index': 0,
- 'delete_on_termination': False,
- 'connection_info': '{"foo":"bar"}',
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- virt_driver = {}
-
- def fake_power_on(self, context, instance, network_info,
- block_device_info):
- virt_driver['powered_on'] = True
-
- self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
-
- def fake_power_off(self, instance,
- shutdown_timeout, shutdown_attempts):
- virt_driver['powered_off'] = True
-
- self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
-
- result = self.cloud.create_image(self.context, ec2_instance_id,
- no_reboot=no_reboot)
- ec2_ids = [result['imageId']]
- created_image = self.cloud.describe_images(self.context,
- ec2_ids)['imagesSet'][0]
-
- self.assertIn('blockDeviceMapping', created_image)
- bdm = created_image['blockDeviceMapping'][0]
- self.assertEqual(bdm.get('deviceName'), 'sda1')
- self.assertIn('ebs', bdm)
- self.assertEqual(bdm['ebs'].get('snapshotId'),
- ec2utils.id_to_ec2_snap_id(snapshots[0]))
- self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
- self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
- self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
- self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
- self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
-
- self.cloud.terminate_instances(self.context, [ec2_instance_id])
-
- self._restart_compute_service()
-
- def test_create_image_no_reboot(self):
- # Make sure that CreateImage works.
- self._do_test_create_image(True)
-
- def test_create_image_with_reboot(self):
- # Make sure that CreateImage works.
- self._do_test_create_image(False)
-
- def test_create_image_instance_store(self):
- """Ensure CreateImage fails as expected for an instance-store-backed
- instance
- """
- # enforce periodic tasks run in short time to avoid wait for 60s.
- self._restart_compute_service(periodic_interval_max=0.3)
-
- (volumes, snapshots) = self._setUpImageSet(
- create_volumes_and_snapshots=True)
-
- kwargs = {'image_id': 'ami-1',
- 'instance_type': CONF.default_flavor,
- 'max_count': 1}
- ec2_instance_id = self._run_instance(**kwargs)
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': volumes[0],
- 'snapshot_id': snapshots[0],
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'vda',
- 'delete_on_termination': False,
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- self.assertRaises(exception.InvalidParameterValue,
- self.cloud.create_image,
- self.context,
- ec2_instance_id,
- no_reboot=True)
-
- @staticmethod
- def _fake_bdm_get(ctxt, id, use_slave=False):
- blockdms = [{'volume_id': 87654321,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'snapshot_id': None,
- 'no_device': None,
- 'delete_on_termination': True,
- 'device_name': '/dev/sdh'},
- {'volume_id': None,
- 'snapshot_id': 98765432,
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'no_device': None,
- 'delete_on_termination': True,
- 'device_name': '/dev/sdi'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': True,
- 'source_type': 'blank',
- 'destination_type': None,
- 'delete_on_termination': None,
- 'device_name': None},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': None,
- 'delete_on_termination': None,
- 'device_name': '/dev/sdb'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': 'swap',
- 'delete_on_termination': None,
- 'device_name': '/dev/sdc'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': None,
- 'delete_on_termination': None,
- 'device_name': '/dev/sdd'},
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': None,
- 'delete_on_termination': None,
- 'device_name': '/dev/sd3'},
- ]
-
- extra = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 0,
- 'device_type': None,
- 'disk_bus': None,
- 'instance_uuid': '',
- 'image_id': None,
- 'volume_size': None,
- 'connection_info': None,
- 'boot_index': None,
- 'guest_format': None,
- }
-
- for bdm in blockdms:
- bdm.update(extra)
-
- return blockdms
-
- def test_describe_instance_attribute(self):
- # Make sure that describe_instance_attribute works.
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- self._fake_bdm_get)
-
- def fake_get(ctxt, instance_id, want_objects=False):
- self.assertTrue(want_objects)
- inst_type = flavors.get_default_flavor()
- inst_type['name'] = 'fake_type'
- sys_meta = flavors.save_flavor_info({}, inst_type)
- secgroups = objects.SecurityGroupList()
- secgroups.objects.append(
- objects.SecurityGroup(name='fake0'))
- secgroups.objects.append(
- objects.SecurityGroup(name='fake1'))
- instance = objects.Instance(ctxt)
- instance.id = 0
- instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
- instance.root_device_name = '/dev/sdh'
- instance.security_groups = secgroups
- instance.vm_state = vm_states.STOPPED
- instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
- instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- instance.user_data = 'fake-user data'
- instance.shutdown_terminate = False
- instance.disable_terminate = False
- instance.system_metadata = sys_meta
- return instance
- self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
-
- def fake_ec2_instance_get_by_id(ctxt, int_id):
- if int_id == 305419896:
- fake_map = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 305419896,
- 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
- }
- return fake_map
- raise exception.InstanceNotFound(instance_id=int_id)
- self.stubs.Set(db, 'ec2_instance_get_by_id',
- fake_ec2_instance_get_by_id)
-
- get_attribute = functools.partial(
- self.cloud.describe_instance_attribute,
- self.context, 'i-12345678')
-
- bdm = get_attribute('blockDeviceMapping')
- bdm['blockDeviceMapping'].sort()
-
- expected_bdm = {'instance_id': 'i-12345678',
- 'rootDeviceType': 'ebs',
- 'blockDeviceMapping': [
- {'deviceName': '/dev/sdh',
- 'ebs': {'status': 'attached',
- 'deleteOnTermination': True,
- 'volumeId': 'vol-05397fb1',
- 'attachTime': '13:56:24'}}]}
- expected_bdm['blockDeviceMapping'].sort()
- self.assertEqual(bdm, expected_bdm)
- groupSet = get_attribute('groupSet')
- groupSet['groupSet'].sort()
- expected_groupSet = {'instance_id': 'i-12345678',
- 'groupSet': [{'groupId': 'fake0'},
- {'groupId': 'fake1'}]}
- expected_groupSet['groupSet'].sort()
- self.assertEqual(groupSet, expected_groupSet)
- self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
- {'instance_id': 'i-12345678',
- 'instanceInitiatedShutdownBehavior': 'stop'})
- self.assertEqual(get_attribute('disableApiTermination'),
- {'instance_id': 'i-12345678',
- 'disableApiTermination': False})
- self.assertEqual(get_attribute('instanceType'),
- {'instance_id': 'i-12345678',
- 'instanceType': 'fake_type'})
- self.assertEqual(get_attribute('kernel'),
- {'instance_id': 'i-12345678',
- 'kernel': 'aki-00000001'})
- self.assertEqual(get_attribute('ramdisk'),
- {'instance_id': 'i-12345678',
- 'ramdisk': 'ari-00000002'})
- self.assertEqual(get_attribute('rootDeviceName'),
- {'instance_id': 'i-12345678',
- 'rootDeviceName': '/dev/sdh'})
- # NOTE(yamahata): this isn't supported
- # get_attribute('sourceDestCheck')
- self.assertEqual(get_attribute('userData'),
- {'instance_id': 'i-12345678',
- 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
-
- def test_instance_initiated_shutdown_behavior(self):
- def test_dia_iisb(expected_result, **kwargs):
- """test describe_instance_attribute
- attribute instance_initiated_shutdown_behavior
- """
- kwargs.update({'instance_type': CONF.default_flavor,
- 'max_count': 1})
- instance_id = self._run_instance(**kwargs)
-
- result = self.cloud.describe_instance_attribute(self.context,
- instance_id, 'instanceInitiatedShutdownBehavior')
- self.assertEqual(result['instanceInitiatedShutdownBehavior'],
- expected_result)
-
- expected = {'instancesSet': [
- {'instanceId': instance_id,
- 'previousState': {'code': 16,
- 'name': 'running'},
- 'currentState': {'code': 32,
- 'name': 'shutting-down'}}]}
- result = self.cloud.terminate_instances(self.context,
- [instance_id])
- self.assertEqual(result, expected)
- self._restart_compute_service()
-
- test_dia_iisb('stop', image_id='ami-1')
-
- block_device_mapping = [{'device_name': '/dev/vdb',
- 'virtual_name': 'ephemeral0'}]
- test_dia_iisb('stop', image_id='ami-2',
- block_device_mapping=block_device_mapping)
-
- def fake_show(self, context, id_, **kwargs):
- LOG.debug("id_ %s", id_)
-
- prop = {}
- if id_ == 'ami-3':
- pass
- elif id_ == 'ami-4':
- prop = {'mappings': [{'device': 'sdb0',
- 'virtual': 'ephemeral0'}]}
- elif id_ == 'ami-5':
- prop = {'block_device_mapping':
- [{'device_name': '/dev/sdb0',
- 'virtual_name': 'ephemeral0'}]}
- elif id_ == 'ami-6':
- prop = {'mappings': [{'device': 'sdb0',
- 'virtual': 'ephemeral0'}],
- 'block_device_mapping':
- [{'device_name': '/dev/sdb0',
- 'virtual_name': 'ephemeral0'}]}
-
- prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine'}
- prop_base.update(prop)
-
- return {
- 'id': id_,
- 'name': 'fake_name',
- 'properties': prop_base,
- 'container_format': 'ami',
- 'status': 'active'}
-
- # NOTE(yamahata): create ami-3 ... ami-7
- # ami-1 and ami-2 is already created by setUp()
- for i in range(3, 8):
- db.s3_image_create(self.context, 'ami-%d' % i)
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
-
- test_dia_iisb('stop', image_id='ami-3')
- test_dia_iisb('stop', image_id='ami-4')
- test_dia_iisb('stop', image_id='ami-5')
- test_dia_iisb('stop', image_id='ami-6')
- test_dia_iisb('terminate', image_id='ami-7',
- instance_initiated_shutdown_behavior='terminate')
-
- def test_create_delete_tags(self):
-
- # We need to stub network calls
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- # We need to stub out the MQ call - it won't succeed. We do want
- # to check that the method is called, though
- meta_changes = [None]
-
- def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
- instance_uuid=None):
- meta_changes[0] = diff
-
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
-
- # Create a test image
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst1_kwargs = {
- 'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'vm_state': 'active',
- 'launched_at': timeutils.utcnow(),
- 'hostname': 'server-1111',
- 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
- }
-
- inst1 = db.instance_create(self.context, inst1_kwargs)
- ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
-
- # Create some tags
- md = {'key': 'foo', 'value': 'bar'}
- md_result = {'foo': 'bar'}
- self.cloud.create_tags(self.context, resource_id=[ec2_id],
- tag=[md])
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst1)
- self.assertEqual(metadata, md_result)
- self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
-
- # Delete them
- self.cloud.delete_tags(self.context, resource_id=[ec2_id],
- tag=[{'key': 'foo', 'value': 'bar'}])
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst1)
- self.assertEqual(metadata, {})
- self.assertEqual(meta_changes, [{'foo': ['-']}])
-
- def test_describe_tags(self):
- # We need to stub network calls
- self._stub_instance_get_with_fixed_ips('get_all')
- self._stub_instance_get_with_fixed_ips('get')
-
- # We need to stub out the MQ call - it won't succeed. We do want
- # to check that the method is called, though
- meta_changes = [None]
-
- def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
- instance_uuid=None):
- meta_changes[0] = diff
-
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
-
- # Create some test images
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst1_kwargs = {
- 'reservation_id': 'a',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'vm_state': 'active',
- 'launched_at': timeutils.utcnow(),
- 'hostname': 'server-1111',
- 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
- }
-
- inst2_kwargs = {
- 'reservation_id': 'b',
- 'image_ref': image_uuid,
- 'instance_type_id': 1,
- 'vm_state': 'active',
- 'launched_at': timeutils.utcnow(),
- 'hostname': 'server-1112',
- 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
- }
-
- inst1 = db.instance_create(self.context, inst1_kwargs)
- ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
-
- inst2 = db.instance_create(self.context, inst2_kwargs)
- ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
-
- # Create some tags
- # We get one overlapping pair, and each has a different key value pair
- # inst1 : {'foo': 'bar', 'bax': 'wibble'}
- # inst1 : {'foo': 'bar', 'baz': 'quux'}
-
- md = {'key': 'foo', 'value': 'bar'}
- md_result = {'foo': 'bar'}
- self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
- tag=[md])
-
- self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst1)
- self.assertEqual(metadata, md_result)
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst2)
- self.assertEqual(metadata, md_result)
-
- md2 = {'key': 'baz', 'value': 'quux'}
- md2_result = {'baz': 'quux'}
- md2_result.update(md_result)
- self.cloud.create_tags(self.context, resource_id=[ec2_id2],
- tag=[md2])
-
- self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst2)
- self.assertEqual(metadata, md2_result)
-
- md3 = {'key': 'bax', 'value': 'wibble'}
- md3_result = {'bax': 'wibble'}
- md3_result.update(md_result)
- self.cloud.create_tags(self.context, resource_id=[ec2_id1],
- tag=[md3])
-
- self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
-
- metadata = self.cloud.compute_api.get_instance_metadata(self.context,
- inst1)
- self.assertEqual(metadata, md3_result)
-
- inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
- 'resource_type': 'instance', 'value': u'bar'}
- inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
- 'resource_type': 'instance', 'value': u'wibble'}
- inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
- 'resource_type': 'instance', 'value': u'bar'}
- inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
- 'resource_type': 'instance', 'value': u'quux'}
-
- # We should be able to search by:
- # No filter
- tags = self.cloud.describe_tags(self.context)['tagSet']
- self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
- inst2_key_baz, inst1_key_bax])
-
- # Resource ID
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'resource-id',
- 'value': [ec2_id1]}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
-
- # Resource Type
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'resource-type',
- 'value': ['instance']}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
- inst2_key_baz, inst1_key_bax])
-
- # Key, either bare or with wildcards
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['foo']}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
-
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['baz']}])['tagSet']
- self.assertEqualSorted(tags, [inst2_key_baz])
-
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['ba?']}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
-
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['b*']}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
-
- # Value, either bare or with wildcards
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'value',
- 'value': ['bar']}])['tagSet']
- self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
-
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'value',
- 'value': ['wi*']}])['tagSet']
- self.assertEqual(tags, [inst1_key_bax])
-
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'value',
- 'value': ['quu?']}])['tagSet']
- self.assertEqual(tags, [inst2_key_baz])
-
- # Multiple values
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['baz', 'bax']}])['tagSet']
- self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
-
- # Multiple filters (AND): no match
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['baz']},
- {'name': 'value',
- 'value': ['wibble']}])['tagSet']
- self.assertEqual(tags, [])
-
- # Multiple filters (AND): match
- tags = self.cloud.describe_tags(self.context,
- filter=[{'name': 'key',
- 'value': ['baz']},
- {'name': 'value',
- 'value': ['quux']}])['tagSet']
- self.assertEqualSorted(tags, [inst2_key_baz])
-
- # And we should fail on supported resource types
- self.assertRaises(exception.InvalidParameterValue,
- self.cloud.describe_tags,
- self.context,
- filter=[{'name': 'resource-type',
- 'value': ['instance', 'volume']}])
-
- def test_resource_type_from_id(self):
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'i-12345'),
- 'instance')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'r-12345'),
- 'reservation')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'vol-12345'),
- 'volume')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'snap-12345'),
- 'snapshot')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'ami-12345'),
- 'image')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'ari-12345'),
- 'image')
- self.assertEqual(
- ec2utils.resource_type_from_id(self.context, 'aki-12345'),
- 'image')
- self.assertIsNone(
- ec2utils.resource_type_from_id(self.context, 'x-12345'))
-
- @mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
- side_effect=lambda
- ec2_volume_id: uuidutils.generate_uuid())
- def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
- # Validates that VolumeUnattached is raised if the volume doesn't
- # have an instance_uuid value.
- ec2_volume_id = 'vol-987654321'
-
- with mock.patch.object(self.cloud.volume_api, 'get',
- side_effect=lambda context, volume_id:
- {'id': volume_id}) as mock_get:
- self.assertRaises(exception.VolumeUnattached,
- self.cloud.detach_volume,
- self.context,
- ec2_volume_id)
- mock_get.assert_called_once_with(self.context, mock.ANY)
- mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
-
-
-class CloudTestCaseNeutronProxy(test.NoDBTestCase):
- def setUp(self):
- super(CloudTestCaseNeutronProxy, self).setUp()
- cfg.CONF.set_override('security_group_api', 'neutron')
- self.cloud = cloud.CloudController()
- self.original_client = neutronv2.get_client
- neutronv2.get_client = test_neutron.get_client
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
-
- def tearDown(self):
- neutronv2.get_client = self.original_client
- test_neutron.get_client()._reset()
- super(CloudTestCaseNeutronProxy, self).tearDown()
-
- def test_describe_security_groups(self):
- # Makes sure describe_security_groups works and filters results.
- group_name = 'test'
- description = 'test'
- self.cloud.create_security_group(self.context, group_name,
- description)
- result = self.cloud.describe_security_groups(self.context)
- # NOTE(vish): should have the default group as well
- self.assertEqual(len(result['securityGroupInfo']), 2)
- result = self.cloud.describe_security_groups(self.context,
- group_name=[group_name])
- self.assertEqual(len(result['securityGroupInfo']), 1)
- self.assertEqual(result['securityGroupInfo'][0]['groupName'],
- group_name)
- self.cloud.delete_security_group(self.context, group_name)
-
- def test_describe_security_groups_by_id(self):
- group_name = 'test'
- description = 'test'
- self.cloud.create_security_group(self.context, group_name,
- description)
- neutron = test_neutron.get_client()
- # Get id from neutron since cloud.create_security_group
- # does not expose it.
- search_opts = {'name': group_name}
- groups = neutron.list_security_groups(
- **search_opts)['security_groups']
- result = self.cloud.describe_security_groups(self.context,
- group_id=[groups[0]['id']])
- self.assertEqual(len(result['securityGroupInfo']), 1)
- self.assertEqual(
- result['securityGroupInfo'][0]['groupName'],
- group_name)
- self.cloud.delete_security_group(self.context, group_name)
-
- def test_create_delete_security_group(self):
- descript = 'test description'
- create = self.cloud.create_security_group
- result = create(self.context, 'testgrp', descript)
- group_descript = result['securityGroupSet'][0]['groupDescription']
- self.assertEqual(descript, group_descript)
- delete = self.cloud.delete_security_group
- self.assertTrue(delete(self.context, 'testgrp'))
-
-
-class FormatMappingTestCase(test.TestCase):
-
- def test_format_mapping(self):
- properties = {'block_device_mapping':
- [{'guest_format': None, 'boot_index': 0,
- 'no_device': None, 'volume_id': None,
- 'volume_size': None, 'disk_bus': 'virtio',
- 'image_id': None, 'source_type': 'snapshot',
- 'device_type': 'disk',
- 'snapshot_id': '993b31ac-452e-4fed-b745-7718385f1811',
- 'destination_type': 'volume',
- 'delete_on_termination': None},
- {'guest_format': None, 'boot_index': None,
- 'no_device': None, 'volume_id': None,
- 'volume_size': None, 'disk_bus': None,
- 'image_id': None, 'source_type': 'snapshot',
- 'device_type': None,
- 'snapshot_id': 'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef',
- 'destination_type': 'volume',
- 'delete_on_termination': None}],
- 'checksum': '50bdc35edb03a38d91b1b071afb20a3c',
- 'min_ram': '0', 'disk_format': 'qcow2',
- 'image_name': 'cirros-0.3.0-x86_64-disk', 'bdm_v2': 'True',
- 'image_id': '4fce9db9-d89e-4eea-8d20-e2bae15292c1',
- 'root_device_name': '/dev/vda', 'container_format': 'bare',
- 'min_disk': '0', 'size': '9761280'}
- result = {'description': None,
- 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
- 'isPublic': False, 'imageId': 'ami-00000002',
- 'imageState': 'available', 'architecture': None,
- 'imageLocation': 'None (xb)',
- 'rootDeviceType': 'instance-store',
- 'rootDeviceName': '/dev/vda',
- 'imageType': 'machine', 'name': 'xb'}
- cloud._format_mappings(properties, result)
- expected = {'architecture': None,
- 'blockDeviceMapping':
- [{'ebs': {'snapshotId': 'snap-00000002'}}],
- 'description': None,
- 'imageId': 'ami-00000002',
- 'imageLocation': 'None (xb)',
- 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
- 'imageState': 'available',
- 'imageType': 'machine',
- 'isPublic': False,
- 'name': 'xb',
- 'rootDeviceName': '/dev/vda',
- 'rootDeviceType': 'instance-store'}
- self.assertEqual(expected, result)
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
deleted file mode 100644
index b75e729a1b..0000000000
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright 2012 Cloudscaling, Inc.
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova.api.ec2 import cloud
-from nova.api.ec2 import ec2utils
-from nova.compute import utils as compute_utils
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests import cast_as_call
-from nova.tests import fake_network
-from nova.tests import fake_notifier
-from nova.tests.image import fake
-
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
-
-class EC2ValidateTestCase(test.TestCase):
- def setUp(self):
- super(EC2ValidateTestCase, self).setUp()
- self.flags(compute_driver='nova.virt.fake.FakeDriver')
-
- def dumb(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
- fake_network.set_stub_network_methods(self.stubs)
-
- # set up our cloud
- self.cloud = cloud.CloudController()
-
- # Short-circuit the conductor service
- self.flags(use_local=True, group='conductor')
-
- # Stub out the notification service so we use the no-op serializer
- # and avoid lazy-load traces with the wrap_exception decorator in
- # the compute service.
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- # set up services
- self.conductor = self.start_service('conductor',
- manager=CONF.conductor.manager)
- self.compute = self.start_service('compute')
- self.scheduter = self.start_service('scheduler')
- self.network = self.start_service('network')
- self.image_service = fake.FakeImageService()
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
-
- self.EC2_MALFORMED_IDS = ['foobar', '', 123]
- self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
-
- self.ec2_id_exception_map = [(x,
- exception.InvalidInstanceIDMalformed)
- for x in self.EC2_MALFORMED_IDS]
- self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
- for x in self.EC2_VALID__IDS])
- self.volume_id_exception_map = [(x,
- exception.InvalidVolumeIDMalformed)
- for x in self.EC2_MALFORMED_IDS]
- self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
- for x in self.EC2_VALID__IDS])
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': id,
- 'container_format': 'ami',
- 'properties': {
- 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'type': 'machine',
- 'image_state': 'available'}}
-
- def fake_detail(self, context, **kwargs):
- image = fake_show(self, context, None)
- image['name'] = kwargs.get('name')
- return [image]
-
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(fake._FakeImageService, 'show', fake_show)
- self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- # make sure we can map ami-00000001/2 to a uuid in FakeImageService
- db.s3_image_create(self.context,
- 'cedef40a-ed67-4d10-800e-17455edce175')
- db.s3_image_create(self.context,
- '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
-
- def tearDown(self):
- super(EC2ValidateTestCase, self).tearDown()
- fake.FakeImageService_reset()
-
- # EC2_API tests (InvalidInstanceID.Malformed)
- def test_console_output(self):
- for ec2_id, e in self.ec2_id_exception_map:
- self.assertRaises(e,
- self.cloud.get_console_output,
- context=self.context,
- instance_id=[ec2_id])
-
- def test_describe_instance_attribute(self):
- for ec2_id, e in self.ec2_id_exception_map:
- self.assertRaises(e,
- self.cloud.describe_instance_attribute,
- context=self.context,
- instance_id=ec2_id,
- attribute='kernel')
-
- def test_instance_lifecycle(self):
- lifecycle = [self.cloud.terminate_instances,
- self.cloud.reboot_instances,
- self.cloud.stop_instances,
- self.cloud.start_instances,
- ]
- for cmd in lifecycle:
- for ec2_id, e in self.ec2_id_exception_map:
- self.assertRaises(e,
- cmd,
- context=self.context,
- instance_id=[ec2_id])
-
- def test_create_image(self):
- for ec2_id, e in self.ec2_id_exception_map:
- self.assertRaises(e,
- self.cloud.create_image,
- context=self.context,
- instance_id=ec2_id)
-
- def test_create_snapshot(self):
- for ec2_id, e in self.volume_id_exception_map:
- self.assertRaises(e,
- self.cloud.create_snapshot,
- context=self.context,
- volume_id=ec2_id)
-
- def test_describe_volumes(self):
- for ec2_id, e in self.volume_id_exception_map:
- self.assertRaises(e,
- self.cloud.describe_volumes,
- context=self.context,
- volume_id=[ec2_id])
-
- def test_delete_volume(self):
- for ec2_id, e in self.volume_id_exception_map:
- self.assertRaises(e,
- self.cloud.delete_volume,
- context=self.context,
- volume_id=ec2_id)
-
- def test_detach_volume(self):
- for ec2_id, e in self.volume_id_exception_map:
- self.assertRaises(e,
- self.cloud.detach_volume,
- context=self.context,
- volume_id=ec2_id)
-
-
-class EC2TimestampValidationTestCase(test.NoDBTestCase):
- """Test case for EC2 request timestamp validation."""
-
- def test_validate_ec2_timestamp_valid(self):
- params = {'Timestamp': '2011-04-22T11:29:49Z'}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertFalse(expired)
-
- def test_validate_ec2_timestamp_old_format(self):
- params = {'Timestamp': '2011-04-22T11:29:49'}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertTrue(expired)
-
- def test_validate_ec2_timestamp_not_set(self):
- params = {}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertFalse(expired)
-
- def test_validate_ec2_timestamp_ms_time_regex(self):
- result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
- self.assertIsNotNone(result)
- result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
- self.assertIsNotNone(result)
- result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
- self.assertIsNone(result)
- result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
- self.assertIsNone(result)
- result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
- self.assertIsNone(result)
-
- def test_validate_ec2_timestamp_aws_sdk_format(self):
- params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertFalse(expired)
- expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
- self.assertTrue(expired)
-
- def test_validate_ec2_timestamp_invalid_format(self):
- params = {'Timestamp': '2011-04-22T11:29:49.000P'}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertTrue(expired)
-
- def test_validate_ec2_timestamp_advanced_time(self):
-
- # EC2 request with Timestamp in advanced time
- timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
- params = {'Timestamp': timeutils.strtime(timestamp,
- "%Y-%m-%dT%H:%M:%SZ")}
- expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
- self.assertFalse(expired)
-
- def test_validate_ec2_timestamp_advanced_time_expired(self):
- timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
- params = {'Timestamp': timeutils.strtime(timestamp,
- "%Y-%m-%dT%H:%M:%SZ")}
- expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
- self.assertTrue(expired)
-
- def test_validate_ec2_req_timestamp_not_expired(self):
- params = {'Timestamp': timeutils.isotime()}
- expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
- self.assertFalse(expired)
-
- def test_validate_ec2_req_timestamp_expired(self):
- params = {'Timestamp': '2011-04-22T12:00:00Z'}
- compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
- self.assertTrue(compare)
-
- def test_validate_ec2_req_expired(self):
- params = {'Expires': timeutils.isotime()}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertTrue(expired)
-
- def test_validate_ec2_req_not_expired(self):
- expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
- params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertFalse(expired)
-
- def test_validate_Expires_timestamp_invalid_format(self):
-
- # EC2 request with invalid Expires
- params = {'Expires': '2011-04-22T11:29:49'}
- expired = ec2utils.is_ec2_timestamp_expired(params)
- self.assertTrue(expired)
-
- def test_validate_ec2_req_timestamp_Expires(self):
-
- # EC2 request with both Timestamp and Expires
- params = {'Timestamp': '2011-04-22T11:29:49Z',
- 'Expires': timeutils.isotime()}
- self.assertRaises(exception.InvalidRequest,
- ec2utils.is_ec2_timestamp_expired,
- params)
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
deleted file mode 100644
index d801abda64..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ /dev/null
@@ -1,734 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import webob
-
-from nova.api.openstack import common
-from nova.api.openstack.compute.contrib import admin_actions as \
- admin_actions_v2
-from nova.api.openstack.compute.plugins.v3 import admin_actions as \
- admin_actions_v21
-from nova.compute import vm_states
-import nova.context
-from nova import exception
-from nova import objects
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-class CommonMixin(object):
- admin_actions = None
- fake_url = None
-
- def _make_request(self, url, body):
- req = webob.Request.blank(self.fake_url + url)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.content_type = 'application/json'
- return req.get_response(self.app)
-
- def _stub_instance_get(self, uuid=None):
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_db_instance(
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
- task_state=None, launched_at=timeutils.utcnow())
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance)
- self.compute_api.get(self.context, uuid, expected_attrs=None,
- want_objects=True).AndReturn(instance)
- return instance
-
- def _stub_instance_get_failure(self, exc_info, uuid=None):
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- self.compute_api.get(self.context, uuid, expected_attrs=None,
- want_objects=True).AndRaise(exc_info)
- return uuid
-
- def _test_non_existing_instance(self, action, body_map=None):
- uuid = uuidutils.generate_uuid()
- self._stub_instance_get_failure(
- exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % uuid,
- {action: body_map.get(action)})
- self.assertEqual(404, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_action(self, action, body=None, method=None):
- if method is None:
- method = action
-
- instance = self._stub_instance_get()
- getattr(self.compute_api, method)(self.context, instance)
-
- self.mox.ReplayAll()
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: None})
- self.assertEqual(202, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_invalid_state(self, action, method=None, body_map=None,
- compute_api_args_map=None):
- if method is None:
- method = action
- if body_map is None:
- body_map = {}
- if compute_api_args_map is None:
- compute_api_args_map = {}
-
- instance = self._stub_instance_get()
-
- args, kwargs = compute_api_args_map.get(action, ((), {}))
-
- getattr(self.compute_api, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- exception.InstanceInvalidState(
- attr='vm_state', instance_uuid=instance['uuid'],
- state='foo', method=method))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: body_map.get(action)})
- self.assertEqual(409, res.status_int)
- self.assertIn("Cannot \'%(action)s\' instance %(id)s"
- % {'id': instance['uuid'], 'action': action}, res.body)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_locked_instance(self, action, method=None, body_map=None,
- compute_api_args_map=None):
- if method is None:
- method = action
-
- instance = self._stub_instance_get()
-
- args, kwargs = (), {}
- act = None
-
- if compute_api_args_map:
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- act = body_map.get(action)
-
- getattr(self.compute_api, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- exception.InstanceIsLocked(instance_uuid=instance['uuid']))
- self.mox.ReplayAll()
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: act})
- self.assertEqual(409, res.status_int)
- self.assertIn('Instance %s is locked' % instance['uuid'], res.body)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
-
-class AdminActionsTestV21(CommonMixin, test.NoDBTestCase):
- admin_actions = admin_actions_v21
- fake_url = '/v2/fake'
-
- def setUp(self):
- super(AdminActionsTestV21, self).setUp()
- self.controller = self.admin_actions.AdminActionsController()
- self.compute_api = self.controller.compute_api
- self.context = nova.context.RequestContext('fake', 'fake')
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(self.admin_actions, 'AdminActionsController',
- _fake_controller)
-
- self.app = self._get_app()
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers',
- 'os-admin-actions'),
- fake_auth_context=self.context)
-
- def test_actions(self):
- actions = ['resetNetwork', 'injectNetworkInfo']
- method_translations = {'resetNetwork': 'reset_network',
- 'injectNetworkInfo': 'inject_network_info'}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_action(action, method=method)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_actions_with_non_existed_instance(self):
- actions = ['resetNetwork', 'injectNetworkInfo', 'os-resetState']
- body_map = {'os-resetState': {'state': 'active'}}
-
- for action in actions:
- self._test_non_existing_instance(action,
- body_map=body_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_actions_with_locked_instance(self):
- actions = ['resetNetwork', 'injectNetworkInfo']
- method_translations = {'resetNetwork': 'reset_network',
- 'injectNetworkInfo': 'inject_network_info'}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_locked_instance(action, method=method)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
-
-class AdminActionsTestV2(AdminActionsTestV21):
- admin_actions = admin_actions_v2
-
- def setUp(self):
- super(AdminActionsTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Admin_actions'])
-
- def _get_app(self):
- return fakes.wsgi_app(init_only=('servers',),
- fake_auth_context=self.context)
-
- def test_actions(self):
- actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
- 'resetNetwork', 'injectNetworkInfo', 'lock',
- 'unlock']
- method_translations = {'migrate': 'resize',
- 'resetNetwork': 'reset_network',
- 'injectNetworkInfo': 'inject_network_info'}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_action(action, method=method)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_actions_raise_conflict_on_invalid_state(self):
- actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
- 'os-migrateLive']
- method_translations = {'migrate': 'resize',
- 'os-migrateLive': 'live_migrate'}
- body_map = {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_invalid_state(action, method=method, body_map=body_map,
- compute_api_args_map=args_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_actions_with_non_existed_instance(self):
- actions = ['pause', 'unpause', 'suspend', 'resume',
- 'resetNetwork', 'injectNetworkInfo', 'lock',
- 'unlock', 'os-resetState', 'migrate', 'os-migrateLive']
- body_map = {'os-resetState': {'state': 'active'},
- 'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- for action in actions:
- self._test_non_existing_instance(action,
- body_map=body_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_actions_with_locked_instance(self):
- actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
- 'resetNetwork', 'injectNetworkInfo', 'os-migrateLive']
- method_translations = {'migrate': 'resize',
- 'resetNetwork': 'reset_network',
- 'injectNetworkInfo': 'inject_network_info',
- 'os-migrateLive': 'live_migrate'}
- args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
- body_map = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_locked_instance(action, method=method,
- body_map=body_map,
- compute_api_args_map=args_map)
-
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _test_migrate_exception(self, exc_info, expected_result):
- self.mox.StubOutWithMock(self.compute_api, 'resize')
- instance = self._stub_instance_get()
- self.compute_api.resize(self.context, instance).AndRaise(exc_info)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {'migrate': None})
- self.assertEqual(expected_result, res.status_int)
-
- def _test_migrate_live_succeeded(self, param):
- self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
- instance = self._stub_instance_get()
- self.compute_api.live_migrate(self.context, instance, False,
- False, 'hostname')
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {'os-migrateLive': param})
- self.assertEqual(202, res.status_int)
-
- def test_migrate_live_enabled(self):
- param = {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}
- self._test_migrate_live_succeeded(param)
-
- def test_migrate_live_enabled_with_string_param(self):
- param = {'host': 'hostname',
- 'block_migration': "False",
- 'disk_over_commit': "False"}
- self._test_migrate_live_succeeded(param)
-
- def test_migrate_live_missing_dict_param(self):
- body = {'os-migrateLive': {'dummy': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- res = self._make_request('/servers/FAKE/action', body)
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_with_invalid_block_migration(self):
- body = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': "foo",
- 'disk_over_commit': False}}
- res = self._make_request('/servers/FAKE/action', body)
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_with_invalid_disk_over_commit(self):
- body = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': "foo"}}
- res = self._make_request('/servers/FAKE/action', body)
- self.assertEqual(400, res.status_int)
-
- def _test_migrate_live_failed_with_exception(self, fake_exc,
- uuid=None):
- self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
-
- instance = self._stub_instance_get(uuid=uuid)
- self.compute_api.live_migrate(self.context, instance, False,
- False, 'hostname').AndRaise(fake_exc)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}})
- self.assertEqual(400, res.status_int)
- self.assertIn(unicode(fake_exc), res.body)
-
- def test_migrate_live_compute_service_unavailable(self):
- self._test_migrate_live_failed_with_exception(
- exception.ComputeServiceUnavailable(host='host'))
-
- def test_migrate_live_invalid_hypervisor_type(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidHypervisorType())
-
- def test_migrate_live_invalid_cpu_info(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidCPUInfo(reason=""))
-
- def test_migrate_live_unable_to_migrate_to_self(self):
- uuid = uuidutils.generate_uuid()
- self._test_migrate_live_failed_with_exception(
- exception.UnableToMigrateToSelf(instance_id=uuid,
- host='host'),
- uuid=uuid)
-
- def test_migrate_live_destination_hypervisor_too_old(self):
- self._test_migrate_live_failed_with_exception(
- exception.DestinationHypervisorTooOld())
-
- def test_migrate_live_no_valid_host(self):
- self._test_migrate_live_failed_with_exception(
- exception.NoValidHost(reason=''))
-
- def test_migrate_live_invalid_local_storage(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidLocalStorage(path='', reason=''))
-
- def test_migrate_live_invalid_shared_storage(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidSharedStorage(path='', reason=''))
-
- def test_migrate_live_hypervisor_unavailable(self):
- self._test_migrate_live_failed_with_exception(
- exception.HypervisorUnavailable(host=""))
-
- def test_migrate_live_instance_not_running(self):
- self._test_migrate_live_failed_with_exception(
- exception.InstanceNotRunning(instance_id=""))
-
- def test_migrate_live_migration_pre_check_error(self):
- self._test_migrate_live_failed_with_exception(
- exception.MigrationPreCheckError(reason=''))
-
- def test_unlock_not_authorized(self):
- self.mox.StubOutWithMock(self.compute_api, 'unlock')
-
- instance = self._stub_instance_get()
-
- self.compute_api.unlock(self.context, instance).AndRaise(
- exception.PolicyNotAuthorized(action='unlock'))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {'unlock': None})
- self.assertEqual(403, res.status_int)
-
-
-class CreateBackupTestsV2(CommonMixin, test.NoDBTestCase):
- fake_url = '/v2/fake'
-
- def setUp(self):
- super(CreateBackupTestsV2, self).setUp()
- self.controller = admin_actions_v2.AdminActionsController()
- self.compute_api = self.controller.compute_api
- self.context = nova.context.RequestContext('fake', 'fake')
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(admin_actions_v2, 'AdminActionsController',
- _fake_controller)
-
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Admin_actions'])
-
- self.app = fakes.wsgi_app(init_only=('servers',),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
- self.mox.StubOutWithMock(common,
- 'check_img_metadata_properties_quota')
- self.mox.StubOutWithMock(self.compute_api,
- 'backup')
-
- def _make_url(self, uuid):
- return '/servers/%s/action' % uuid
-
- def test_create_backup_with_metadata(self):
- metadata = {'123': 'asdf'}
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- 'metadata': metadata,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties=metadata)
-
- common.check_img_metadata_properties_quota(self.context, metadata)
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 1,
- extra_properties=metadata).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance['uuid']), body=body)
- self.assertEqual(202, res.status_int)
- self.assertIn('fake-image-id', res.headers['Location'])
-
- def test_create_backup_no_name(self):
- # Name is required for backups.
- body = {
- 'createBackup': {
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_no_rotation(self):
- # Rotation is required for backup requests.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- },
- }
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_negative_rotation(self):
- """Rotation must be greater than or equal to zero
- for backup requests
- """
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': -1,
- },
- }
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_no_backup_type(self):
- # Backup Type (daily or weekly) is required for backup requests.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_bad_entity(self):
- body = {'createBackup': 'go'}
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_rotation_is_zero(self):
- # The happy path for creating backups if rotation is zero.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 0,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties={})
- common.check_img_metadata_properties_quota(self.context, {})
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 0,
- extra_properties={}).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance['uuid']), body=body)
- self.assertEqual(202, res.status_int)
- self.assertNotIn('Location', res.headers)
-
- def test_create_backup_rotation_is_positive(self):
- # The happy path for creating backups if rotation is positive.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties={})
- common.check_img_metadata_properties_quota(self.context, {})
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 1,
- extra_properties={}).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance['uuid']), body=body)
- self.assertEqual(202, res.status_int)
- self.assertIn('fake-image-id', res.headers['Location'])
-
- def test_create_backup_raises_conflict_on_invalid_state(self):
- body_map = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- args_map = {
- 'createBackup': (
- ('Backup 1', 'daily', 1), {'extra_properties': {}}
- ),
- }
- common.check_img_metadata_properties_quota(self.context, {})
- self._test_invalid_state('createBackup', method='backup',
- body_map=body_map,
- compute_api_args_map=args_map)
-
- def test_create_backup_with_non_existed_instance(self):
- body_map = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- common.check_img_metadata_properties_quota(self.context, {})
- self._test_non_existing_instance('createBackup',
- body_map=body_map)
-
- def test_create_backup_with_invalid_createBackup(self):
- body = {
- 'createBackupup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url('fake'), body=body)
- self.assertEqual(400, res.status_int)
-
-
-class ResetStateTestsV21(test.NoDBTestCase):
- admin_act = admin_actions_v21
- bad_request = exception.ValidationError
- fake_url = '/servers'
-
- def setUp(self):
- super(ResetStateTestsV21, self).setUp()
- self.uuid = uuidutils.generate_uuid()
- self.admin_api = self.admin_act.AdminActionsController()
- self.compute_api = self.admin_api.compute_api
-
- url = '%s/%s/action' % (self.fake_url, self.uuid)
- self.request = self._get_request(url)
- self.context = self.request.environ['nova.context']
-
- def _get_request(self, url):
- return fakes.HTTPRequest.blank(url)
-
- def test_no_state(self):
- self.assertRaises(self.bad_request,
- self.admin_api._reset_state,
- self.request, self.uuid,
- body={"os-resetState": None})
-
- def test_bad_state(self):
- self.assertRaises(self.bad_request,
- self.admin_api._reset_state,
- self.request, self.uuid,
- body={"os-resetState": {"state": "spam"}})
-
- def test_no_instance(self):
- self.mox.StubOutWithMock(self.compute_api, 'get')
- exc = exception.InstanceNotFound(instance_id='inst_ud')
- self.compute_api.get(self.context, self.uuid, expected_attrs=None,
- want_objects=True).AndRaise(exc)
- self.mox.ReplayAll()
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.admin_api._reset_state,
- self.request, self.uuid,
- body={"os-resetState": {"state": "active"}})
-
- def _setup_mock(self, expected):
- instance = objects.Instance()
- instance.uuid = self.uuid
- instance.vm_state = 'fake'
- instance.task_state = 'fake'
- instance.obj_reset_changes()
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def check_state(admin_state_reset=True):
- self.assertEqual(set(expected.keys()),
- instance.obj_what_changed())
- for k, v in expected.items():
- self.assertEqual(v, getattr(instance, k),
- "Instance.%s doesn't match" % k)
- instance.obj_reset_changes()
-
- self.compute_api.get(self.context, instance.uuid, expected_attrs=None,
- want_objects=True).AndReturn(instance)
- instance.save(admin_state_reset=True).WithSideEffects(check_state)
-
- def test_reset_active(self):
- self._setup_mock(dict(vm_state=vm_states.ACTIVE,
- task_state=None))
- self.mox.ReplayAll()
-
- body = {"os-resetState": {"state": "active"}}
- result = self.admin_api._reset_state(self.request, self.uuid,
- body=body)
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.admin_api,
- admin_actions_v21.AdminActionsController):
- status_int = self.admin_api._reset_state.wsgi_code
- else:
- status_int = result.status_int
- self.assertEqual(202, status_int)
-
- def test_reset_error(self):
- self._setup_mock(dict(vm_state=vm_states.ERROR,
- task_state=None))
- self.mox.ReplayAll()
- body = {"os-resetState": {"state": "error"}}
- result = self.admin_api._reset_state(self.request, self.uuid,
- body=body)
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.admin_api,
- admin_actions_v21.AdminActionsController):
- status_int = self.admin_api._reset_state.wsgi_code
- else:
- status_int = result.status_int
- self.assertEqual(202, status_int)
-
-
-class ResetStateTestsV2(ResetStateTestsV21):
- admin_act = admin_actions_v2
- bad_request = webob.exc.HTTPBadRequest
- fake_url = '/fake/servers'
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_password.py b/nova/tests/api/openstack/compute/contrib/test_admin_password.py
deleted file mode 100644
index 26b2d442b5..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_admin_password.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.plugins.v3 import admin_password \
- as admin_password_v21
-from nova.compute import api as compute_api
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_get(self, context, id, expected_attrs=None, want_objects=False):
- return {'uuid': id}
-
-
-def fake_get_non_existent(self, context, id, expected_attrs=None,
- want_objects=False):
- raise exception.InstanceNotFound(instance_id=id)
-
-
-def fake_set_admin_password(self, context, instance, password=None):
- pass
-
-
-def fake_set_admin_password_failed(self, context, instance, password=None):
- raise exception.InstancePasswordSetFailed(instance=instance, reason='')
-
-
-def fake_set_admin_password_not_implemented(self, context, instance,
- password=None):
- raise NotImplementedError()
-
-
-class AdminPasswordTestV21(test.NoDBTestCase):
- plugin = admin_password_v21
-
- def setUp(self):
- super(AdminPasswordTestV21, self).setUp()
- self.stubs.Set(compute_api.API, 'set_admin_password',
- fake_set_admin_password)
- self.stubs.Set(compute_api.API, 'get', fake_get)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- self.plugin.ALIAS))
-
- def _make_request(self, body):
- req = webob.Request.blank('/v2/fake/servers/1/action')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.content_type = 'application/json'
- res = req.get_response(self.app)
- return res
-
- def test_change_password(self):
- body = {'changePassword': {'adminPass': 'test'}}
- res = self._make_request(body)
- self.assertEqual(res.status_int, 202)
-
- def test_change_password_empty_string(self):
- body = {'changePassword': {'adminPass': ''}}
- res = self._make_request(body)
- self.assertEqual(res.status_int, 202)
-
- def test_change_password_with_non_implement(self):
- body = {'changePassword': {'adminPass': 'test'}}
- self.stubs.Set(compute_api.API, 'set_admin_password',
- fake_set_admin_password_not_implemented)
- res = self._make_request(body)
- self.assertEqual(res.status_int, 501)
-
- def test_change_password_with_non_existed_instance(self):
- body = {'changePassword': {'adminPass': 'test'}}
- self.stubs.Set(compute_api.API, 'get', fake_get_non_existent)
- res = self._make_request(body)
- self.assertEqual(res.status_int, 404)
-
- def test_change_password_with_non_string_password(self):
- body = {'changePassword': {'adminPass': 1234}}
- res = self._make_request(body)
- self.assertEqual(res.status_int, 400)
-
- def test_change_password_failed(self):
- body = {'changePassword': {'adminPass': 'test'}}
- self.stubs.Set(compute_api.API, 'set_admin_password',
- fake_set_admin_password_failed)
- res = self._make_request(body)
- self.assertEqual(res.status_int, 409)
-
- def test_change_password_without_admin_password(self):
- body = {'changPassword': {}}
- res = self._make_request(body)
- self.assertEqual(res.status_int, 400)
-
- def test_change_password_none(self):
- body = {'changePassword': None}
- res = self._make_request(body)
- self.assertEqual(res.status_int, 400)
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
deleted file mode 100644
index e1df95248e..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ /dev/null
@@ -1,670 +0,0 @@
-# Copyright (c) 2012 Citrix Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for the aggregates admin api."""
-
-import mock
-from webob import exc
-
-from nova.api.openstack.compute.contrib import aggregates as aggregates_v2
-from nova.api.openstack.compute.plugins.v3 import aggregates as aggregates_v21
-from nova import context
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-AGGREGATE_LIST = [
- {"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
- {"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
- {"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
- {"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
-AGGREGATE = {"name": "aggregate1",
- "id": "1",
- "availability_zone": "nova1",
- "metadata": {"foo": "bar"},
- "hosts": ["host1, host2"]}
-
-FORMATTED_AGGREGATE = {"name": "aggregate1",
- "id": "1",
- "availability_zone": "nova1"}
-
-
-class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
-
-
-class AggregateTestCaseV21(test.NoDBTestCase):
- """Test Case for aggregates admin api."""
-
- add_host = 'self.controller._add_host'
- remove_host = 'self.controller._remove_host'
- set_metadata = 'self.controller._set_metadata'
- bad_request = exception.ValidationError
-
- def _set_up(self):
- self.controller = aggregates_v21.AggregateController()
- self.req = fakes.HTTPRequest.blank('/v3/os-aggregates',
- use_admin_context=True)
- self.user_req = fakes.HTTPRequest.blank('/v3/os-aggregates')
- self.context = self.req.environ['nova.context']
-
- def setUp(self):
- super(AggregateTestCaseV21, self).setUp()
- self._set_up()
-
- def test_index(self):
- def stub_list_aggregates(context):
- if context is None:
- raise Exception()
- return AGGREGATE_LIST
- self.stubs.Set(self.controller.api, 'get_aggregate_list',
- stub_list_aggregates)
-
- result = self.controller.index(self.req)
-
- self.assertEqual(AGGREGATE_LIST, result["aggregates"])
-
- def test_index_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index,
- self.user_req)
-
- def test_create(self):
- def stub_create_aggregate(context, name, availability_zone):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("test", name, "name")
- self.assertEqual("nova1", availability_zone, "availability_zone")
- return AGGREGATE
- self.stubs.Set(self.controller.api, "create_aggregate",
- stub_create_aggregate)
-
- result = self.controller.create(self.req, body={"aggregate":
- {"name": "test",
- "availability_zone": "nova1"}})
- self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
-
- def test_create_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create, self.user_req,
- body={"aggregate":
- {"name": "test",
- "availability_zone": "nova1"}})
-
- def test_create_with_duplicate_aggregate_name(self):
- def stub_create_aggregate(context, name, availability_zone):
- raise exception.AggregateNameExists(aggregate_name=name)
- self.stubs.Set(self.controller.api, "create_aggregate",
- stub_create_aggregate)
-
- self.assertRaises(exc.HTTPConflict, self.controller.create,
- self.req, body={"aggregate":
- {"name": "test",
- "availability_zone": "nova1"}})
-
- def test_create_with_incorrect_availability_zone(self):
- def stub_create_aggregate(context, name, availability_zone):
- raise exception.InvalidAggregateAction(action='create_aggregate',
- aggregate_id="'N/A'",
- reason='invalid zone')
-
- self.stubs.Set(self.controller.api, "create_aggregate",
- stub_create_aggregate)
-
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create,
- self.req, body={"aggregate":
- {"name": "test",
- "availability_zone": "nova_bad"}})
-
- def test_create_with_no_aggregate(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"foo":
- {"name": "test",
- "availability_zone": "nova1"}})
-
- def test_create_with_no_name(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"aggregate":
- {"foo": "test",
- "availability_zone": "nova1"}})
-
- def test_create_with_no_availability_zone(self):
- def stub_create_aggregate(context, name, availability_zone):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("test", name, "name")
- self.assertIsNone(availability_zone, "availability_zone")
- return AGGREGATE
- self.stubs.Set(self.controller.api, "create_aggregate",
- stub_create_aggregate)
-
- result = self.controller.create(self.req,
- body={"aggregate": {"name": "test"}})
- self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
-
- def test_create_with_null_name(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"aggregate":
- {"name": "",
- "availability_zone": "nova1"}})
-
- def test_create_with_name_too_long(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"aggregate":
- {"name": "x" * 256,
- "availability_zone": "nova1"}})
-
- def test_create_with_availability_zone_too_long(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"aggregate":
- {"name": "test",
- "availability_zone": "x" * 256}})
-
- def test_create_with_null_availability_zone(self):
- aggregate = {"name": "aggregate1",
- "id": "1",
- "availability_zone": None,
- "metadata": {},
- "hosts": []}
-
- formatted_aggregate = {"name": "aggregate1",
- "id": "1",
- "availability_zone": None}
-
- def stub_create_aggregate(context, name, az_name):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("aggregate1", name, "name")
- self.assertIsNone(az_name, "availability_zone")
- return aggregate
- self.stubs.Set(self.controller.api, 'create_aggregate',
- stub_create_aggregate)
-
- result = self.controller.create(self.req,
- body={"aggregate":
- {"name": "aggregate1",
- "availability_zone": None}})
- self.assertEqual(formatted_aggregate, result["aggregate"])
-
- def test_create_with_empty_availability_zone(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"aggregate":
- {"name": "test",
- "availability_zone": ""}})
-
- def test_create_with_extra_invalid_arg(self):
- self.assertRaises(self.bad_request, self.controller.create,
- self.req, body={"name": "test",
- "availability_zone": "nova1",
- "foo": 'bar'})
-
- def test_show(self):
- def stub_get_aggregate(context, id):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", id, "id")
- return AGGREGATE
- self.stubs.Set(self.controller.api, 'get_aggregate',
- stub_get_aggregate)
-
- aggregate = self.controller.show(self.req, "1")
-
- self.assertEqual(AGGREGATE, aggregate["aggregate"])
-
- def test_show_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show,
- self.user_req, "1")
-
- def test_show_with_invalid_id(self):
- def stub_get_aggregate(context, id):
- raise exception.AggregateNotFound(aggregate_id=2)
-
- self.stubs.Set(self.controller.api, 'get_aggregate',
- stub_get_aggregate)
-
- self.assertRaises(exc.HTTPNotFound,
- self.controller.show, self.req, "2")
-
- def test_update(self):
- body = {"aggregate": {"name": "new_name",
- "availability_zone": "nova1"}}
-
- def stub_update_aggregate(context, aggregate, values):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- self.assertEqual(body["aggregate"], values, "values")
- return AGGREGATE
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
-
- result = self.controller.update(self.req, "1", body=body)
-
- self.assertEqual(AGGREGATE, result["aggregate"])
-
- def test_update_no_admin(self):
- body = {"aggregate": {"availability_zone": "nova"}}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.update,
- self.user_req, "1", body=body)
-
- def test_update_with_only_name(self):
- body = {"aggregate": {"name": "new_name"}}
-
- def stub_update_aggregate(context, aggregate, values):
- return AGGREGATE
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
-
- result = self.controller.update(self.req, "1", body=body)
-
- self.assertEqual(AGGREGATE, result["aggregate"])
-
- def test_update_with_only_availability_zone(self):
- body = {"aggregate": {"availability_zone": "nova1"}}
-
- def stub_update_aggregate(context, aggregate, values):
- return AGGREGATE
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
- result = self.controller.update(self.req, "1", body=body)
- self.assertEqual(AGGREGATE, result["aggregate"])
-
- def test_update_with_no_updates(self):
- test_metadata = {"aggregate": {}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_no_update_key(self):
- test_metadata = {"asdf": {}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_wrong_updates(self):
- test_metadata = {"aggregate": {"status": "disable",
- "foo": "bar"}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_null_name(self):
- test_metadata = {"aggregate": {"name": ""}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_name_too_long(self):
- test_metadata = {"aggregate": {"name": "x" * 256}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_availability_zone_too_long(self):
- test_metadata = {"aggregate": {"availability_zone": "x" * 256}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_empty_availability_zone(self):
- test_metadata = {"aggregate": {"availability_zone": ""}}
- self.assertRaises(self.bad_request, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_null_availability_zone(self):
- body = {"aggregate": {"availability_zone": None}}
- aggre = {"name": "aggregate1",
- "id": "1",
- "availability_zone": None}
-
- def stub_update_aggregate(context, aggregate, values):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- self.assertIsNone(values["availability_zone"], "availability_zone")
- return aggre
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
-
- result = self.controller.update(self.req, "1", body=body)
-
- self.assertEqual(aggre, result["aggregate"])
-
- def test_update_with_bad_aggregate(self):
- test_metadata = {"aggregate": {"name": "test_name"}}
-
- def stub_update_aggregate(context, aggregate, metadata):
- raise exception.AggregateNotFound(aggregate_id=2)
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_update_with_duplicated_name(self):
- test_metadata = {"aggregate": {"name": "test_name"}}
-
- def stub_update_aggregate(context, aggregate, metadata):
- raise exception.AggregateNameExists(aggregate_name="test_name")
-
- self.stubs.Set(self.controller.api, "update_aggregate",
- stub_update_aggregate)
- self.assertRaises(exc.HTTPConflict, self.controller.update,
- self.req, "2", body=test_metadata)
-
- def test_invalid_action(self):
- body = {"append_host": {"host": "host1"}}
- self.assertRaises(self.bad_request,
- eval(self.add_host), self.req, "1", body=body)
-
- def test_update_with_invalid_action(self):
- with mock.patch.object(self.controller.api, "update_aggregate",
- side_effect=exception.InvalidAggregateAction(
- action='invalid', aggregate_id='agg1', reason= "not empty")):
- body = {"aggregate": {"availability_zone": "nova"}}
- self.assertRaises(exc.HTTPBadRequest, self.controller.update,
- self.req, "1", body=body)
-
- def test_add_host(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- self.assertEqual("host1", host, "host")
- return AGGREGATE
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- aggregate = eval(self.add_host)(self.req, "1",
- body={"add_host": {"host":
- "host1"}})
-
- self.assertEqual(aggregate["aggregate"], AGGREGATE)
-
- def test_add_host_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.add_host),
- self.user_req, "1",
- body={"add_host": {"host": "host1"}})
-
- def test_add_host_with_already_added_host(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.AggregateHostExists(aggregate_id=aggregate,
- host=host)
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- self.assertRaises(exc.HTTPConflict, eval(self.add_host),
- self.req, "1",
- body={"add_host": {"host": "host1"}})
-
- def test_add_host_with_bad_aggregate(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.AggregateNotFound(aggregate_id=aggregate)
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
- self.req, "bogus_aggregate",
- body={"add_host": {"host": "host1"}})
-
- def test_add_host_with_bad_host(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise exception.ComputeHostNotFound(host=host)
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
- self.req, "1",
- body={"add_host": {"host": "bogus_host"}})
-
- def test_add_host_with_missing_host(self):
- self.assertRaises(self.bad_request, eval(self.add_host),
- self.req, "1", body={"add_host": {"asdf": "asdf"}})
-
- def test_add_host_with_invalid_format_host(self):
- self.assertRaises(self.bad_request, eval(self.add_host),
- self.req, "1", body={"add_host": {"host": "a" * 300}})
-
- def test_add_host_with_multiple_hosts(self):
- self.assertRaises(self.bad_request, eval(self.add_host),
- self.req, "1", body={"add_host": {"host": ["host1", "host2"]}})
-
- def test_add_host_raises_key_error(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise KeyError
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
- self.assertRaises(exc.HTTPInternalServerError,
- eval(self.add_host), self.req, "1",
- body={"add_host": {"host": "host1"}})
-
- def test_add_host_with_invalid_request(self):
- self.assertRaises(self.bad_request, eval(self.add_host),
- self.req, "1", body={"add_host": "1"})
-
- def test_add_host_with_non_string(self):
- self.assertRaises(self.bad_request, eval(self.add_host),
- self.req, "1", body={"add_host": {"host": 1}})
-
- def test_remove_host(self):
- def stub_remove_host_from_aggregate(context, aggregate, host):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- self.assertEqual("host1", host, "host")
- stub_remove_host_from_aggregate.called = True
- return {}
- self.stubs.Set(self.controller.api,
- "remove_host_from_aggregate",
- stub_remove_host_from_aggregate)
- eval(self.remove_host)(self.req, "1",
- body={"remove_host": {"host": "host1"}})
-
- self.assertTrue(stub_remove_host_from_aggregate.called)
-
- def test_remove_host_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.remove_host),
- self.user_req, "1",
- body={"remove_host": {"host": "host1"}})
-
- def test_remove_host_with_bad_aggregate(self):
- def stub_remove_host_from_aggregate(context, aggregate, host):
- raise exception.AggregateNotFound(aggregate_id=aggregate)
- self.stubs.Set(self.controller.api,
- "remove_host_from_aggregate",
- stub_remove_host_from_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
- self.req, "bogus_aggregate",
- body={"remove_host": {"host": "host1"}})
-
- def test_remove_host_with_host_not_in_aggregate(self):
- def stub_remove_host_from_aggregate(context, aggregate, host):
- raise exception.AggregateHostNotFound(aggregate_id=aggregate,
- host=host)
- self.stubs.Set(self.controller.api,
- "remove_host_from_aggregate",
- stub_remove_host_from_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
- self.req, "1",
- body={"remove_host": {"host": "host1"}})
-
- def test_remove_host_with_bad_host(self):
- def stub_remove_host_from_aggregate(context, aggregate, host):
- raise exception.ComputeHostNotFound(host=host)
- self.stubs.Set(self.controller.api,
- "remove_host_from_aggregate",
- stub_remove_host_from_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
- self.req, "1", body={"remove_host": {"host": "bogushost"}})
-
- def test_remove_host_with_missing_host(self):
- self.assertRaises(self.bad_request, eval(self.remove_host),
- self.req, "1", body={"asdf": "asdf"})
-
- def test_remove_host_with_multiple_hosts(self):
- self.assertRaises(self.bad_request, eval(self.remove_host),
- self.req, "1", body={"remove_host": {"host":
- ["host1", "host2"]}})
-
- def test_remove_host_with_extra_param(self):
- self.assertRaises(self.bad_request, eval(self.remove_host),
- self.req, "1", body={"remove_host": {"asdf": "asdf",
- "host": "asdf"}})
-
- def test_remove_host_with_invalid_request(self):
- self.assertRaises(self.bad_request,
- eval(self.remove_host),
- self.req, "1", body={"remove_host": "1"})
-
- def test_remove_host_with_missing_host_empty(self):
- self.assertRaises(self.bad_request,
- eval(self.remove_host),
- self.req, "1", body={"remove_host": {}})
-
- def test_set_metadata(self):
- body = {"set_metadata": {"metadata": {"foo": "bar"}}}
-
- def stub_update_aggregate(context, aggregate, values):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- self.assertThat(body["set_metadata"]['metadata'],
- matchers.DictMatches(values))
- return AGGREGATE
- self.stubs.Set(self.controller.api,
- "update_aggregate_metadata",
- stub_update_aggregate)
-
- result = eval(self.set_metadata)(self.req, "1", body=body)
-
- self.assertEqual(AGGREGATE, result["aggregate"])
-
- def test_set_metadata_delete(self):
- body = {"set_metadata": {"metadata": {"foo": None}}}
-
- with mock.patch.object(self.controller.api,
- 'update_aggregate_metadata') as mocked:
- mocked.return_value = AGGREGATE
- result = eval(self.set_metadata)(self.req, "1", body=body)
-
- self.assertEqual(AGGREGATE, result["aggregate"])
- mocked.assert_called_once_with(self.context, "1",
- body["set_metadata"]["metadata"])
-
- def test_set_metadata_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- eval(self.set_metadata),
- self.user_req, "1",
- body={"set_metadata": {"metadata":
- {"foo": "bar"}}})
-
- def test_set_metadata_with_bad_aggregate(self):
- body = {"set_metadata": {"metadata": {"foo": "bar"}}}
-
- def stub_update_aggregate(context, aggregate, metadata):
- raise exception.AggregateNotFound(aggregate_id=aggregate)
- self.stubs.Set(self.controller.api,
- "update_aggregate_metadata",
- stub_update_aggregate)
- self.assertRaises(exc.HTTPNotFound, eval(self.set_metadata),
- self.req, "bad_aggregate", body=body)
-
- def test_set_metadata_with_missing_metadata(self):
- body = {"asdf": {"foo": "bar"}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_with_extra_params(self):
- body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_without_dict(self):
- body = {"set_metadata": {'metadata': 1}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_with_empty_key(self):
- body = {"set_metadata": {"metadata": {"": "value"}}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_with_key_too_long(self):
- body = {"set_metadata": {"metadata": {"x" * 256: "value"}}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_with_value_too_long(self):
- body = {"set_metadata": {"metadata": {"key": "x" * 256}}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_set_metadata_with_string(self):
- body = {"set_metadata": {"metadata": "test"}}
- self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
- self.req, "1", body=body)
-
- def test_delete_aggregate(self):
- def stub_delete_aggregate(context, aggregate):
- self.assertEqual(context, self.context, "context")
- self.assertEqual("1", aggregate, "aggregate")
- stub_delete_aggregate.called = True
- self.stubs.Set(self.controller.api, "delete_aggregate",
- stub_delete_aggregate)
-
- self.controller.delete(self.req, "1")
- self.assertTrue(stub_delete_aggregate.called)
-
- def test_delete_aggregate_no_admin(self):
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.delete,
- self.user_req, "1")
-
- def test_delete_aggregate_with_bad_aggregate(self):
- def stub_delete_aggregate(context, aggregate):
- raise exception.AggregateNotFound(aggregate_id=aggregate)
- self.stubs.Set(self.controller.api, "delete_aggregate",
- stub_delete_aggregate)
-
- self.assertRaises(exc.HTTPNotFound, self.controller.delete,
- self.req, "bogus_aggregate")
-
- def test_delete_aggregate_with_host(self):
- with mock.patch.object(self.controller.api, "delete_aggregate",
- side_effect=exception.InvalidAggregateAction(
- action="delete", aggregate_id="agg1",
- reason="not empty")):
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.delete,
- self.req, "agg1")
-
-
-class AggregateTestCaseV2(AggregateTestCaseV21):
- add_host = 'self.controller.action'
- remove_host = 'self.controller.action'
- set_metadata = 'self.controller.action'
- bad_request = exc.HTTPBadRequest
-
- def _set_up(self):
- self.controller = aggregates_v2.AggregateController()
- self.req = FakeRequest()
- self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
- self.context = self.req.environ['nova.context']
-
- def test_add_host_raises_key_error(self):
- def stub_add_host_to_aggregate(context, aggregate, host):
- raise KeyError
- self.stubs.Set(self.controller.api, "add_host_to_aggregate",
- stub_add_host_to_aggregate)
- # NOTE(mtreinish) The check for a KeyError here is to ensure that
- # if add_host_to_aggregate() raises a KeyError it propagates. At
- # one point the api code would mask the error as a HTTPBadRequest.
- # This test is to ensure that this doesn't occur again.
- self.assertRaises(KeyError, eval(self.add_host), self.req, "1",
- body={"add_host": {"host": "host1"}})
diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
deleted file mode 100644
index de2cce46b8..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Copyright 2012 SINA Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.api.openstack.compute.contrib import attach_interfaces \
- as attach_interfaces_v2
-from nova.api.openstack.compute.plugins.v3 import attach_interfaces \
- as attach_interfaces_v3
-from nova.compute import api as compute_api
-from nova import context
-from nova import exception
-from nova.network import api as network_api
-from nova import objects
-from nova import test
-from nova.tests import fake_network_cache_model
-
-import webob
-from webob import exc
-
-
-CONF = cfg.CONF
-
-FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-
-FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
-FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
-FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
-
-FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
-FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
-FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
-FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
-
-port_data1 = {
- "id": FAKE_PORT_ID1,
- "network_id": FAKE_NET_ID1,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "aa:aa:aa:aa:aa:aa",
- "fixed_ips": ["10.0.1.2"],
- "device_id": FAKE_UUID1,
-}
-
-port_data2 = {
- "id": FAKE_PORT_ID2,
- "network_id": FAKE_NET_ID2,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "bb:bb:bb:bb:bb:bb",
- "fixed_ips": ["10.0.2.2"],
- "device_id": FAKE_UUID1,
-}
-
-port_data3 = {
- "id": FAKE_PORT_ID3,
- "network_id": FAKE_NET_ID3,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "bb:bb:bb:bb:bb:bb",
- "fixed_ips": ["10.0.2.2"],
- "device_id": '',
-}
-
-fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
-ports = [port_data1, port_data2, port_data3]
-
-
-def fake_list_ports(self, *args, **kwargs):
- result = []
- for port in ports:
- if port['device_id'] == kwargs['device_id']:
- result.append(port)
- return {'ports': result}
-
-
-def fake_show_port(self, context, port_id, **kwargs):
- for port in ports:
- if port['id'] == port_id:
- return {'port': port}
- else:
- raise exception.PortNotFound(port_id=port_id)
-
-
-def fake_attach_interface(self, context, instance, network_id, port_id,
- requested_ip='192.168.1.3'):
- if not network_id:
- # if no network_id is given when add a port to an instance, use the
- # first default network.
- network_id = fake_networks[0]
- if network_id == FAKE_BAD_NET_ID:
- raise exception.NetworkNotFound(network_id=network_id)
- if not port_id:
- port_id = ports[fake_networks.index(network_id)]['id']
- vif = fake_network_cache_model.new_vif()
- vif['id'] = port_id
- vif['network']['id'] = network_id
- vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
- return vif
-
-
-def fake_detach_interface(self, context, instance, port_id):
- for port in ports:
- if port['id'] == port_id:
- return
- raise exception.PortNotFound(port_id=port_id)
-
-
-def fake_get_instance(self, *args, **kwargs):
- return objects.Instance(uuid=FAKE_UUID1)
-
-
-class InterfaceAttachTestsV21(test.NoDBTestCase):
- url = '/v3/os-interfaces'
- controller_cls = attach_interfaces_v3.InterfaceAttachmentController
- validate_exc = exception.ValidationError
-
- def setUp(self):
- super(InterfaceAttachTestsV21, self).setUp()
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- self.stubs.Set(network_api.API, 'show_port', fake_show_port)
- self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
- self.stubs.Set(compute_api.API, 'get', fake_get_instance)
- self.context = context.get_admin_context()
- self.expected_show = {'interfaceAttachment':
- {'net_id': FAKE_NET_ID1,
- 'port_id': FAKE_PORT_ID1,
- 'mac_addr': port_data1['mac_address'],
- 'port_state': port_data1['status'],
- 'fixed_ips': port_data1['fixed_ips'],
- }}
- self.attachments = self.controller_cls()
-
- @mock.patch.object(compute_api.API, 'get',
- side_effect=exception.InstanceNotFound(instance_id=''))
- def _test_instance_not_found(self, url, func, args, mock_get, kwargs=None,
- method='GET'):
- req = webob.Request.blank(url)
- req.method = method
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- if not kwargs:
- kwargs = {}
- self.assertRaises(exc.HTTPNotFound, func, req, *args, **kwargs)
-
- def test_show_instance_not_found(self):
- self._test_instance_not_found(self.url + 'fake',
- self.attachments.show, ('fake', 'fake'))
-
- def test_index_instance_not_found(self):
- self._test_instance_not_found(self.url,
- self.attachments.index, ('fake', ))
-
- def test_detach_interface_instance_not_found(self):
- self._test_instance_not_found(self.url + '/fake',
- self.attachments.delete,
- ('fake', 'fake'), method='DELETE')
-
- def test_attach_interface_instance_not_found(self):
- self._test_instance_not_found(
- '/v2/fake/os-interfaces', self.attachments.create, ('fake', ),
- kwargs={'body': {'interfaceAttachment': {}}}, method='POST')
-
- def test_show(self):
- req = webob.Request.blank(self.url + '/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = self.attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
- self.assertEqual(self.expected_show, result)
-
- def test_show_invalid(self):
- req = webob.Request.blank(self.url + '/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.show, req, FAKE_UUID2,
- FAKE_PORT_ID1)
-
- @mock.patch.object(network_api.API, 'show_port',
- side_effect=exception.Forbidden)
- def test_show_forbidden(self, show_port_mock):
- req = webob.Request.blank(self.url + '/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPForbidden,
- self.attachments.show, req, FAKE_UUID1,
- FAKE_PORT_ID1)
-
- def test_delete(self):
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- req = webob.Request.blank(self.url + '/delete')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = self.attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.attachments,
- attach_interfaces_v3.InterfaceAttachmentController):
- status_int = self.attachments.delete.wsgi_code
- else:
- status_int = result.status_int
- self.assertEqual(202, status_int)
-
- def test_detach_interface_instance_locked(self):
- def fake_detach_interface_from_locked_server(self, context,
- instance, port_id):
- raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
-
- self.stubs.Set(compute_api.API,
- 'detach_interface',
- fake_detach_interface_from_locked_server)
- req = webob.Request.blank(self.url + '/delete')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPConflict,
- self.attachments.delete,
- req,
- FAKE_UUID1,
- FAKE_PORT_ID1)
-
- def test_delete_interface_not_found(self):
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- req = webob.Request.blank(self.url + '/delete')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.delete,
- req,
- FAKE_UUID1,
- 'invaid-port-id')
-
- def test_attach_interface_instance_locked(self):
- def fake_attach_interface_to_locked_server(self, context,
- instance, network_id, port_id, requested_ip):
- raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
-
- self.stubs.Set(compute_api.API,
- 'attach_interface',
- fake_attach_interface_to_locked_server)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPConflict,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_without_network_id(self):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- result = self.attachments.create(req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
- self.assertEqual(result['interfaceAttachment']['net_id'],
- FAKE_NET_ID1)
-
- def test_attach_interface_with_network_id(self):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interfaceAttachment':
- {'net_id': FAKE_NET_ID2}})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- result = self.attachments.create(req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
- self.assertEqual(result['interfaceAttachment']['net_id'],
- FAKE_NET_ID2)
-
- def _attach_interface_bad_request_case(self, body):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_with_port_and_network_id(self):
- body = {
- 'interfaceAttachment': {
- 'port_id': FAKE_PORT_ID1,
- 'net_id': FAKE_NET_ID2
- }
- }
- self._attach_interface_bad_request_case(body)
-
- def test_attach_interface_with_invalid_data(self):
- body = {
- 'interfaceAttachment': {
- 'net_id': FAKE_BAD_NET_ID
- }
- }
- self._attach_interface_bad_request_case(body)
-
- def test_attach_interface_with_invalid_state(self):
- def fake_attach_interface_invalid_state(*args, **kwargs):
- raise exception.InstanceInvalidState(
- instance_uuid='', attr='', state='',
- method='attach_interface')
-
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface_invalid_state)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interfaceAttachment':
- {'net_id': FAKE_NET_ID1}})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPConflict,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_detach_interface_with_invalid_state(self):
- def fake_detach_interface_invalid_state(*args, **kwargs):
- raise exception.InstanceInvalidState(
- instance_uuid='', attr='', state='',
- method='detach_interface')
-
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface_invalid_state)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPConflict,
- self.attachments.delete,
- req,
- FAKE_UUID1,
- FAKE_NET_ID1)
-
- def test_attach_interface_invalid_fixed_ip(self):
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- body = {
- 'interfaceAttachment': {
- 'net_id': FAKE_NET_ID1,
- 'fixed_ips': [{'ip_address': 'invalid_ip'}]
- }
- }
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(self.validate_exc,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- @mock.patch.object(compute_api.API, 'get')
- @mock.patch.object(compute_api.API, 'attach_interface')
- def test_attach_interface_fixed_ip_already_in_use(self,
- attach_mock,
- get_mock):
- fake_instance = objects.Instance(uuid=FAKE_UUID1)
- get_mock.return_value = fake_instance
- attach_mock.side_effect = exception.FixedIpAlreadyInUse(
- address='10.0.2.2', instance_uuid=FAKE_UUID1)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
- attach_mock.assert_called_once_with(self.context, fake_instance, None,
- None, None)
- get_mock.assert_called_once_with(self.context, FAKE_UUID1,
- want_objects=True,
- expected_attrs=None)
-
- def _test_attach_interface_with_invalid_parameter(self, param):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- req = webob.Request.blank(self.url + '/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interface_attachment': param})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exception.ValidationError,
- self.attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_instance_with_non_uuid_net_id(self):
- param = {'net_id': 'non_uuid'}
- self._test_attach_interface_with_invalid_parameter(param)
-
- def test_attach_interface_instance_with_non_uuid_port_id(self):
- param = {'port_id': 'non_uuid'}
- self._test_attach_interface_with_invalid_parameter(param)
-
- def test_attach_interface_instance_with_non_array_fixed_ips(self):
- param = {'fixed_ips': 'non_array'}
- self._test_attach_interface_with_invalid_parameter(param)
-
-
-class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
- url = '/v2/fake/os-interfaces'
- controller_cls = attach_interfaces_v2.InterfaceAttachmentController
- validate_exc = exc.HTTPBadRequest
-
- def test_attach_interface_instance_with_non_uuid_net_id(self):
- pass
-
- def test_attach_interface_instance_with_non_uuid_port_id(self):
- pass
-
- def test_attach_interface_instance_with_non_array_fixed_ips(self):
- pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
deleted file mode 100644
index 6a09b03fc5..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
+++ /dev/null
@@ -1,512 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import availability_zone as az_v2
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import availability_zone as az_v21
-from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
-from nova.api.openstack.compute import servers as servers_v2
-from nova.api.openstack import extensions
-from nova import availability_zones
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import context
-from nova import db
-from nova import exception
-from nova import servicegroup
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova.tests.objects import test_service
-
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def fake_service_get_all(context, disabled=None):
- def __fake_service(binary, availability_zone,
- created_at, updated_at, host, disabled):
- return dict(test_service.fake_service,
- binary=binary,
- availability_zone=availability_zone,
- available_zones=availability_zone,
- created_at=created_at,
- updated_at=updated_at,
- host=host,
- disabled=disabled)
-
- if disabled:
- return [__fake_service("nova-compute", "zone-2",
- datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
- "fake_host-1", True),
- __fake_service("nova-scheduler", "internal",
- datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
- "fake_host-1", True),
- __fake_service("nova-network", "internal",
- datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
- "fake_host-2", True)]
- else:
- return [__fake_service("nova-compute", "zone-1",
- datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
- "fake_host-1", False),
- __fake_service("nova-sched", "internal",
- datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
- "fake_host-1", False),
- __fake_service("nova-network", "internal",
- datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
- datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
- "fake_host-2", False)]
-
-
-def fake_service_is_up(self, service):
- return service['binary'] != u"nova-network"
-
-
-def fake_set_availability_zones(context, services):
- return services
-
-
-def fake_get_availability_zones(context):
- return ['nova'], []
-
-
-CONF = cfg.CONF
-
-
-class AvailabilityZoneApiTestV21(test.NoDBTestCase):
- availability_zone = az_v21
- url = '/v2/fake/os-availability-zone'
-
- def setUp(self):
- super(AvailabilityZoneApiTestV21, self).setUp()
- availability_zones.reset_cache()
- self.stubs.Set(db, 'service_get_all', fake_service_get_all)
- self.stubs.Set(availability_zones, 'set_availability_zones',
- fake_set_availability_zones)
- self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
-
- def _get_wsgi_instance(self):
- return fakes.wsgi_app_v21(init_only=('os-availability-zone',
- 'servers'))
-
- def test_filtered_availability_zones(self):
- az = self.availability_zone.AvailabilityZoneController()
- zones = ['zone1', 'internal']
- expected = [{'zoneName': 'zone1',
- 'zoneState': {'available': True},
- "hosts": None}]
- result = az._get_filtered_availability_zones(zones, True)
- self.assertEqual(result, expected)
-
- expected = [{'zoneName': 'zone1',
- 'zoneState': {'available': False},
- "hosts": None}]
- result = az._get_filtered_availability_zones(zones, False)
- self.assertEqual(result, expected)
-
- def test_availability_zone_index(self):
- req = webob.Request.blank(self.url)
- resp = req.get_response(self._get_wsgi_instance())
- self.assertEqual(resp.status_int, 200)
- resp_dict = jsonutils.loads(resp.body)
-
- self.assertIn('availabilityZoneInfo', resp_dict)
- zones = resp_dict['availabilityZoneInfo']
- self.assertEqual(len(zones), 2)
- self.assertEqual(zones[0]['zoneName'], u'zone-1')
- self.assertTrue(zones[0]['zoneState']['available'])
- self.assertIsNone(zones[0]['hosts'])
- self.assertEqual(zones[1]['zoneName'], u'zone-2')
- self.assertFalse(zones[1]['zoneState']['available'])
- self.assertIsNone(zones[1]['hosts'])
-
- def test_availability_zone_detail(self):
- def _formatZone(zone_dict):
- result = []
-
- # Zone tree view item
- result.append({'zoneName': zone_dict['zoneName'],
- 'zoneState': u'available'
- if zone_dict['zoneState']['available'] else
- u'not available'})
-
- if zone_dict['hosts'] is not None:
- for (host, services) in zone_dict['hosts'].items():
- # Host tree view item
- result.append({'zoneName': u'|- %s' % host,
- 'zoneState': u''})
- for (svc, state) in services.items():
- # Service tree view item
- result.append({'zoneName': u'| |- %s' % svc,
- 'zoneState': u'%s %s %s' % (
- 'enabled' if state['active'] else
- 'disabled',
- ':-)' if state['available'] else
- 'XXX',
- jsonutils.to_primitive(
- state['updated_at']))})
- return result
-
- def _assertZone(zone, name, status):
- self.assertEqual(zone['zoneName'], name)
- self.assertEqual(zone['zoneState'], status)
-
- availabilityZone = self.availability_zone.AvailabilityZoneController()
-
- req_url = self.url + '/detail'
- req = webob.Request.blank(req_url)
- req.method = 'GET'
- req.environ['nova.context'] = context.get_admin_context()
- resp_dict = availabilityZone.detail(req)
-
- self.assertIn('availabilityZoneInfo', resp_dict)
- zones = resp_dict['availabilityZoneInfo']
- self.assertEqual(len(zones), 3)
-
- ''' availabilityZoneInfo field content in response body:
- [{'zoneName': 'zone-1',
- 'zoneState': {'available': True},
- 'hosts': {'fake_host-1': {
- 'nova-compute': {'active': True, 'available': True,
- 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}},
- {'zoneName': 'internal',
- 'zoneState': {'available': True},
- 'hosts': {'fake_host-1': {
- 'nova-sched': {'active': True, 'available': True,
- 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}},
- 'fake_host-2': {
- 'nova-network': {'active': True, 'available': False,
- 'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}},
- {'zoneName': 'zone-2',
- 'zoneState': {'available': False},
- 'hosts': None}]
- '''
-
- l0 = [u'zone-1', u'available']
- l1 = [u'|- fake_host-1', u'']
- l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000']
- l3 = [u'internal', u'available']
- l4 = [u'|- fake_host-1', u'']
- l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000']
- l6 = [u'|- fake_host-2', u'']
- l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000']
- l8 = [u'zone-2', u'not available']
-
- z0 = _formatZone(zones[0])
- z1 = _formatZone(zones[1])
- z2 = _formatZone(zones[2])
-
- self.assertEqual(len(z0), 3)
- self.assertEqual(len(z1), 5)
- self.assertEqual(len(z2), 1)
-
- _assertZone(z0[0], l0[0], l0[1])
- _assertZone(z0[1], l1[0], l1[1])
- _assertZone(z0[2], l2[0], l2[1])
- _assertZone(z1[0], l3[0], l3[1])
- _assertZone(z1[1], l4[0], l4[1])
- _assertZone(z1[2], l5[0], l5[1])
- _assertZone(z1[3], l6[0], l6[1])
- _assertZone(z1[4], l7[0], l7[1])
- _assertZone(z2[0], l8[0], l8[1])
-
- def test_availability_zone_detail_no_services(self):
- expected_response = {'availabilityZoneInfo':
- [{'zoneState': {'available': True},
- 'hosts': {},
- 'zoneName': 'nova'}]}
- self.stubs.Set(availability_zones, 'get_availability_zones',
- fake_get_availability_zones)
- availabilityZone = self.availability_zone.AvailabilityZoneController()
-
- req_url = self.url + '/detail'
- req = webob.Request.blank(req_url)
- req.method = 'GET'
- req.environ['nova.context'] = context.get_admin_context()
- resp_dict = availabilityZone.detail(req)
-
- self.assertThat(resp_dict,
- matchers.DictMatches(expected_response))
-
-
-class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
- availability_zone = az_v2
-
- def _get_wsgi_instance(self):
- return fakes.wsgi_app()
-
-
-class ServersControllerCreateTestV21(test.TestCase):
- base_url = '/v2/fake/'
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTestV21, self).setUp()
-
- self.instance_cache_num = 0
-
- self._set_up_controller()
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'availability_zone': 'nova',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- return instance
-
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(db, 'instance_create', instance_create)
-
- def _set_up_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers_v21.ServersController(
- extension_info=ext_info)
- CONF.set_override('extensions_blacklist',
- 'os-availability-zone',
- 'osapi_v3')
- self.no_availability_zone_controller = servers_v21.ServersController(
- extension_info=ext_info)
-
- def _verify_no_availability_zone(self, **kwargs):
- self.assertNotIn('availability_zone', kwargs)
-
- def _test_create_extra(self, params, controller):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- server.update(params)
- body = dict(server=server)
- req = fakes.HTTPRequest.blank(self.base_url + 'servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- server = controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_availability_zone_disabled(self):
- params = {'availability_zone': 'foo'}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self._verify_no_availability_zone(**kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params, self.no_availability_zone_controller)
-
- def _create_instance_with_availability_zone(self, zone_name):
- def create(*args, **kwargs):
- self.assertIn('availability_zone', kwargs)
- self.assertEqual('nova', kwargs['availability_zone'])
- return old_create(*args, **kwargs)
-
- old_create = compute_api.API.create
- self.stubs.Set(compute_api.API, 'create', create)
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- 'availability_zone': zone_name,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.base_url + 'servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- admin_context = context.get_admin_context()
- db.service_create(admin_context, {'host': 'host1_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- agg = db.aggregate_create(admin_context,
- {'name': 'agg1'}, {'availability_zone': 'nova'})
- db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
- return req, body
-
- def test_create_instance_with_availability_zone(self):
- zone_name = 'nova'
- req, body = self._create_instance_with_availability_zone(zone_name)
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
-
- def test_create_instance_with_invalid_availability_zone_too_long(self):
- zone_name = 'a' * 256
- req, body = self._create_instance_with_availability_zone(zone_name)
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_instance_with_invalid_availability_zone_too_short(self):
- zone_name = ''
- req, body = self._create_instance_with_availability_zone(zone_name)
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_instance_with_invalid_availability_zone_not_str(self):
- zone_name = 111
- req, body = self._create_instance_with_availability_zone(zone_name)
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_instance_without_availability_zone(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
-
- req = fakes.HTTPRequest.blank(self.base_url + 'servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
-
-
-class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
-
- def _set_up_controller(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {'os-availability-zone': 'fake'}
- self.controller = servers_v2.Controller(ext_mgr)
- ext_mgr_no_az = extensions.ExtensionManager()
- ext_mgr_no_az.extensions = {}
- self.no_availability_zone_controller = servers_v2.Controller(
- ext_mgr_no_az)
-
- def _verify_no_availability_zone(self, **kwargs):
- self.assertIsNone(kwargs['availability_zone'])
-
- def test_create_instance_with_invalid_availability_zone_too_long(self):
- # NOTE: v2.0 API does not check this bad request case.
- # So we skip this test for v2.0 API.
- pass
-
- def test_create_instance_with_invalid_availability_zone_too_short(self):
- # NOTE: v2.0 API does not check this bad request case.
- # So we skip this test for v2.0 API.
- pass
-
- def test_create_instance_with_invalid_availability_zone_not_str(self):
- # NOTE: v2.0 API does not check this bad request case.
- # So we skip this test for v2.0 API.
- pass
-
-
-class AvailabilityZoneSerializerTest(test.NoDBTestCase):
- def test_availability_zone_index_detail_serializer(self):
- def _verify_zone(zone_dict, tree):
- self.assertEqual(tree.tag, 'availabilityZone')
- self.assertEqual(zone_dict['zoneName'], tree.get('name'))
- self.assertEqual(str(zone_dict['zoneState']['available']),
- tree[0].get('available'))
-
- for _idx, host_child in enumerate(tree[1]):
- self.assertIn(host_child.get('name'), zone_dict['hosts'])
- svcs = zone_dict['hosts'][host_child.get('name')]
- for _idx, svc_child in enumerate(host_child[0]):
- self.assertIn(svc_child.get('name'), svcs)
- svc = svcs[svc_child.get('name')]
- self.assertEqual(len(svc_child), 1)
-
- self.assertEqual(str(svc['available']),
- svc_child[0].get('available'))
- self.assertEqual(str(svc['active']),
- svc_child[0].get('active'))
- self.assertEqual(str(svc['updated_at']),
- svc_child[0].get('updated_at'))
-
- serializer = az_v2.AvailabilityZonesTemplate()
- raw_availability_zones = \
- [{'zoneName': 'zone-1',
- 'zoneState': {'available': True},
- 'hosts': {'fake_host-1': {
- 'nova-compute': {'active': True, 'available': True,
- 'updated_at':
- datetime.datetime(
- 2012, 12, 26, 14, 45, 25)}}}},
- {'zoneName': 'internal',
- 'zoneState': {'available': True},
- 'hosts': {'fake_host-1': {
- 'nova-sched': {'active': True, 'available': True,
- 'updated_at':
- datetime.datetime(
- 2012, 12, 26, 14, 45, 25)}},
- 'fake_host-2': {
- 'nova-network': {'active': True,
- 'available': False,
- 'updated_at':
- datetime.datetime(
- 2012, 12, 26, 14, 45, 24)}}}},
- {'zoneName': 'zone-2',
- 'zoneState': {'available': False},
- 'hosts': None}]
-
- text = serializer.serialize(
- dict(availabilityZoneInfo=raw_availability_zones))
- tree = etree.fromstring(text)
-
- self.assertEqual('availabilityZones', tree.tag)
- self.assertEqual(len(raw_availability_zones), len(tree))
- for idx, child in enumerate(tree):
- _verify_zone(raw_availability_zones[idx], child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
deleted file mode 100644
index 9650f9560a..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_baremetal_nodes.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright (c) 2013 NTT DOCOMO, INC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from webob import exc
-
-from nova.api.openstack.compute.contrib import baremetal_nodes as b_nodes_v2
-from nova.api.openstack.compute.plugins.v3 import baremetal_nodes \
- as b_nodes_v21
-from nova.api.openstack import extensions
-from nova import context
-from nova import test
-from nova.tests.virt.ironic import utils as ironic_utils
-
-
-class FakeRequest(object):
-
- def __init__(self, context):
- self.environ = {"nova.context": context}
-
-
-def fake_node(**updates):
- node = {
- 'id': 1,
- 'service_host': "host",
- 'cpus': 8,
- 'memory_mb': 8192,
- 'local_gb': 128,
- 'pm_address': "10.1.2.3",
- 'pm_user': "pm_user",
- 'pm_password': "pm_pass",
- 'terminal_port': 8000,
- 'interfaces': [],
- 'instance_uuid': 'fake-instance-uuid',
- }
- if updates:
- node.update(updates)
- return node
-
-
-def fake_node_ext_status(**updates):
- node = fake_node(uuid='fake-uuid',
- task_state='fake-task-state',
- updated_at='fake-updated-at',
- pxe_config_path='fake-pxe-config-path')
- if updates:
- node.update(updates)
- return node
-
-
-FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
-
-
-@mock.patch.object(b_nodes_v21, '_get_ironic_client',
- lambda *_: FAKE_IRONIC_CLIENT)
-class BareMetalNodesTestV21(test.NoDBTestCase):
- def setUp(self):
- super(BareMetalNodesTestV21, self).setUp()
-
- self._setup()
- self.context = context.get_admin_context()
- self.request = FakeRequest(self.context)
-
- def _setup(self):
- self.controller = b_nodes_v21.BareMetalNodeController()
-
- @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
- def test_index_ironic(self, mock_list):
- properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
- node = ironic_utils.get_test_node(properties=properties)
- mock_list.return_value = [node]
-
- res_dict = self.controller.index(self.request)
- expected_output = {'nodes':
- [{'memory_mb': properties['memory_mb'],
- 'host': 'IRONIC MANAGED',
- 'disk_gb': properties['local_gb'],
- 'interfaces': [],
- 'task_state': None,
- 'id': node.uuid,
- 'cpus': properties['cpus']}]}
- self.assertEqual(expected_output, res_dict)
- mock_list.assert_called_once_with(detail=True)
-
- @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
- @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
- def test_show_ironic(self, mock_get, mock_list_ports):
- properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
- node = ironic_utils.get_test_node(properties=properties)
- port = ironic_utils.get_test_port()
- mock_get.return_value = node
- mock_list_ports.return_value = [port]
-
- res_dict = self.controller.show(self.request, node.uuid)
- expected_output = {'node':
- {'memory_mb': properties['memory_mb'],
- 'instance_uuid': None,
- 'host': 'IRONIC MANAGED',
- 'disk_gb': properties['local_gb'],
- 'interfaces': [{'address': port.address}],
- 'task_state': None,
- 'id': node.uuid,
- 'cpus': properties['cpus']}}
- self.assertEqual(expected_output, res_dict)
- mock_get.assert_called_once_with(node.uuid)
- mock_list_ports.assert_called_once_with(node.uuid)
-
- @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
- @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
- def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
- properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
- node = ironic_utils.get_test_node(properties=properties)
- mock_get.return_value = node
- mock_list_ports.return_value = []
-
- res_dict = self.controller.show(self.request, node.uuid)
- self.assertEqual([], res_dict['node']['interfaces'])
- mock_get.assert_called_once_with(node.uuid)
- mock_list_ports.assert_called_once_with(node.uuid)
-
- def test_create_ironic_not_supported(self):
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create,
- self.request, {'node': object()})
-
- def test_delete_ironic_not_supported(self):
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.delete,
- self.request, 'fake-id')
-
- def test_add_interface_ironic_not_supported(self):
- self.assertRaises(exc.HTTPBadRequest,
- self.controller._add_interface,
- self.request, 'fake-id', 'fake-body')
-
- def test_remove_interface_ironic_not_supported(self):
- self.assertRaises(exc.HTTPBadRequest,
- self.controller._remove_interface,
- self.request, 'fake-id', 'fake-body')
-
-
-@mock.patch.object(b_nodes_v2, '_get_ironic_client',
- lambda *_: FAKE_IRONIC_CLIENT)
-class BareMetalNodesTestV2(BareMetalNodesTestV21):
- def _setup(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
diff --git a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py
deleted file mode 100644
index 2c988d501c..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping.py
+++ /dev/null
@@ -1,359 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from webob import exc
-
-from nova.api.openstack.compute import extensions
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import block_device_mapping
-from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
-from nova.api.openstack.compute import servers as servers_v2
-from nova import block_device
-from nova.compute import api as compute_api
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.image import fake
-from nova.tests import matchers
-
-CONF = cfg.CONF
-
-
-class BlockDeviceMappingTestV21(test.TestCase):
-
- def _setup_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers_v3.ServersController(extension_info=ext_info)
- CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
- 'osapi_v3')
- self.no_bdm_v2_controller = servers_v3.ServersController(
- extension_info=ext_info)
- CONF.set_override('extensions_blacklist', '', 'osapi_v3')
-
- def setUp(self):
- super(BlockDeviceMappingTestV21, self).setUp()
- self._setup_controller()
- fake.stub_out_image_service(self.stubs)
-
- self.bdm = [{
- 'no_device': None,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'uuid': 'fake',
- 'device_name': 'vda',
- 'delete_on_termination': False,
- }]
-
- def _get_servers_body(self, no_image=False):
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'flavorRef': 'http://localhost/123/flavors/3',
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
- if no_image:
- del body['server']['imageRef']
- return body
-
- def _test_create(self, params, no_image=False, override_controller=None):
- body = self._get_servers_body(no_image)
- body['server'].update(params)
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers')
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
-
- req.body = jsonutils.dumps(body)
-
- if override_controller:
- override_controller.create(req, body=body).obj['server']
- else:
- self.controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_block_device_mapping_disabled(self):
- bdm = [{'device_name': 'foo'}]
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('block_device_mapping', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
- self._test_create(params,
- override_controller=self.no_bdm_v2_controller)
-
- def test_create_instance_with_volumes_enabled_no_image(self):
- """Test that the create will fail if there is no image
- and no bdms supplied in the request
- """
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('imageRef', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, {}, no_image=True)
-
- def test_create_instance_with_bdms_and_no_image(self):
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertThat(
- block_device.BlockDeviceDict(self.bdm[0]),
- matchers.DictMatches(kwargs['block_device_mapping'][0])
- )
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
- self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
-
- compute_api.API._validate_bdm(
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(True)
- compute_api.API._get_bdm_image_metadata(
- mox.IgnoreArg(), mox.IgnoreArg(), False).AndReturn({})
- self.mox.ReplayAll()
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self._test_create(params, no_image=True)
-
- def test_create_instance_with_device_name_not_string(self):
- self.bdm[0]['device_name'] = 123
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_with_bdm_param_not_list(self, mock_create):
- self.params = {'block_device_mapping': '/dev/vdb'}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, self.params)
-
- def test_create_instance_with_device_name_empty(self):
- self.bdm[0]['device_name'] = ''
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- def test_create_instance_with_device_name_too_long(self):
- self.bdm[0]['device_name'] = 'a' * 256
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- def test_create_instance_with_space_in_device_name(self):
- self.bdm[0]['device_name'] = 'v da'
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertTrue(kwargs['legacy_bdm'])
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- def test_create_instance_with_invalid_size(self):
- self.bdm[0]['volume_size'] = 'hello world'
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- def test_create_instance_bdm(self):
- bdm = [{
- 'source_type': 'volume',
- 'device_name': 'fake_dev',
- 'uuid': 'fake_vol'
- }]
- bdm_expected = [{
- 'source_type': 'volume',
- 'device_name': 'fake_dev',
- 'volume_id': 'fake_vol'
- }]
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertFalse(kwargs['legacy_bdm'])
- for expected, received in zip(bdm_expected,
- kwargs['block_device_mapping']):
- self.assertThat(block_device.BlockDeviceDict(expected),
- matchers.DictMatches(received))
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
- self._test_create(params, no_image=True)
-
- def test_create_instance_bdm_missing_device_name(self):
- del self.bdm[0]['device_name']
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertFalse(kwargs['legacy_bdm'])
- self.assertNotIn(None,
- kwargs['block_device_mapping'][0]['device_name'])
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self._test_create(params, no_image=True)
-
- def test_create_instance_bdm_validation_error(self):
- def _validate(*args, **kwargs):
- raise exception.InvalidBDMFormat(details='Wrong BDM')
-
- self.stubs.Set(block_device.BlockDeviceDict,
- '_validate', _validate)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
- def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
- params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
- no_image=True)
-
- def test_create_instance_bdm_api_validation_fails(self):
- self.validation_fail_test_validate_called = False
- self.validation_fail_instance_destroy_called = False
-
- bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
- (exception.InvalidBDMVolume, {'id': 'fake'}),
- (exception.InvalidBDMImage, {'id': 'fake'}),
- (exception.InvalidBDMBootSequence, {}),
- (exception.InvalidBDMLocalsLimit, {}))
-
- ex_iter = iter(bdm_exceptions)
-
- def _validate_bdm(*args, **kwargs):
- self.validation_fail_test_validate_called = True
- ex, kargs = ex_iter.next()
- raise ex(**kargs)
-
- def _instance_destroy(*args, **kwargs):
- self.validation_fail_instance_destroy_called = True
-
- self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
- self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
-
- for _unused in xrange(len(bdm_exceptions)):
- params = {block_device_mapping.ATTRIBUTE_NAME:
- [self.bdm[0].copy()]}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params)
- self.assertTrue(self.validation_fail_test_validate_called)
- self.assertTrue(self.validation_fail_instance_destroy_called)
- self.validation_fail_test_validate_called = False
- self.validation_fail_instance_destroy_called = False
-
-
-class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
-
- def _setup_controller(self):
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {'os-volumes': 'fake',
- 'os-block-device-mapping-v2-boot': 'fake'}
- self.controller = servers_v2.Controller(self.ext_mgr)
- self.ext_mgr_bdm_v2 = extensions.ExtensionManager()
- self.ext_mgr_bdm_v2.extensions = {'os-volumes': 'fake'}
- self.no_bdm_v2_controller = servers_v2.Controller(
- self.ext_mgr_bdm_v2)
-
- def test_create_instance_with_block_device_mapping_disabled(self):
- bdm = [{'device_name': 'foo'}]
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['block_device_mapping'], None)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
- self._test_create(params,
- override_controller=self.no_bdm_v2_controller)
diff --git a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py b/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py
deleted file mode 100644
index 2e680b9dad..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_block_device_mapping_v1.py
+++ /dev/null
@@ -1,421 +0,0 @@
-# Copyright (c) 2014 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from webob import exc
-
-from nova.api.openstack.compute import extensions
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import block_device_mapping_v1 as \
- block_device_mapping
-from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
-from nova.api.openstack.compute import servers as servers_v2
-from nova.compute import api as compute_api
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.image import fake
-
-CONF = cfg.CONF
-
-
-class BlockDeviceMappingTestV21(test.TestCase):
-
- def _setup_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
- 'osapi_v3')
- self.controller = servers_v3.ServersController(extension_info=ext_info)
- CONF.set_override('extensions_blacklist',
- ['os-block-device-mapping-v1',
- 'os-block-device-mapping'],
- 'osapi_v3')
- self.no_volumes_controller = servers_v3.ServersController(
- extension_info=ext_info)
- CONF.set_override('extensions_blacklist', '', 'osapi_v3')
-
- def setUp(self):
- super(BlockDeviceMappingTestV21, self).setUp()
- self._setup_controller()
- fake.stub_out_image_service(self.stubs)
- self.volume_id = fakes.FAKE_UUID
- self.bdm = [{
- 'id': 1,
- 'no_device': None,
- 'virtual_name': None,
- 'snapshot_id': None,
- 'volume_id': self.volume_id,
- 'status': 'active',
- 'device_name': 'vda',
- 'delete_on_termination': False,
- 'volume_image_metadata':
- {'test_key': 'test_value'}
- }]
-
- def _get_servers_body(self, no_image=False):
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'flavorRef': 'http://localhost/123/flavors/3',
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
- if no_image:
- del body['server']['imageRef']
- return body
-
- def _test_create(self, params, no_image=False, override_controller=None):
- body = self._get_servers_body(no_image)
- body['server'].update(params)
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers')
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
-
- req.body = jsonutils.dumps(body)
-
- if override_controller:
- override_controller.create(req, body=body).obj['server']
- else:
- self.controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_volumes_enabled(self):
- params = {'block_device_mapping': self.bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
- self._test_create(params)
-
- def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
- """Test that the create works if there is no image supplied but
- os-volumes extension is enabled and bdms are supplied
- """
- self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
- self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
- volume = self.bdm[0]
- compute_api.API._validate_bdm(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(True)
- compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
- self.bdm,
- True).AndReturn(volume)
- params = {'block_device_mapping': self.bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- self.assertNotIn('imageRef', kwargs)
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.mox.ReplayAll()
- self._test_create(params, no_image=True)
-
- def test_create_instance_with_volumes_disabled(self):
- bdm = [{'device_name': 'foo'}]
- params = {'block_device_mapping': bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn(block_device_mapping, kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create(params,
- override_controller=self.no_volumes_controller)
-
- @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
- def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
- bdm = [{
- 'id': 1,
- 'bootable': False,
- 'volume_id': self.volume_id,
- 'status': 'active',
- 'device_name': 'vda',
- }]
- params = {'block_device_mapping': bdm}
- fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params, no_image=True)
-
- def test_create_instance_with_device_name_not_string(self):
- old_create = compute_api.API.create
- self.params = {'block_device_mapping': self.bdm}
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, self.params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_with_bdm_param_not_list(self, mock_create):
- self.params = {'block_device_mapping': '/dev/vdb'}
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, self.params)
-
- def test_create_instance_with_device_name_empty(self):
- self.bdm[0]['device_name'] = ''
- params = {'block_device_mapping': self.bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params)
-
- def test_create_instance_with_device_name_too_long(self):
- self.bdm[0]['device_name'] = 'a' * 256,
- params = {'block_device_mapping': self.bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params)
-
- def test_create_instance_with_space_in_device_name(self):
- self.bdm[0]['device_name'] = 'vd a',
- params = {'block_device_mapping': self.bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertTrue(kwargs['legacy_bdm'])
- self.assertEqual(kwargs['block_device_mapping'], self.bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params)
-
- def test_create_instance_with_invalid_size(self):
- bdm = [{'delete_on_termination': 1,
- 'device_name': 'vda',
- 'volume_size': "hello world",
- 'volume_id': '11111111-1111-1111-1111-111111111111'}]
- params = {'block_device_mapping': bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['block_device_mapping'], bdm)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(exc.HTTPBadRequest,
- self._test_create, params)
-
- def test_create_instance_with_bdm_delete_on_termination(self):
- bdm = [{'device_name': 'foo1', 'volume_id': 'fake_vol',
- 'delete_on_termination': 1},
- {'device_name': 'foo2', 'volume_id': 'fake_vol',
- 'delete_on_termination': True},
- {'device_name': 'foo3', 'volume_id': 'fake_vol',
- 'delete_on_termination': 'invalid'},
- {'device_name': 'foo4', 'volume_id': 'fake_vol',
- 'delete_on_termination': 0},
- {'device_name': 'foo5', 'volume_id': 'fake_vol',
- 'delete_on_termination': False}]
- expected_bdm = [
- {'device_name': 'foo1', 'volume_id': 'fake_vol',
- 'delete_on_termination': True},
- {'device_name': 'foo2', 'volume_id': 'fake_vol',
- 'delete_on_termination': True},
- {'device_name': 'foo3', 'volume_id': 'fake_vol',
- 'delete_on_termination': False},
- {'device_name': 'foo4', 'volume_id': 'fake_vol',
- 'delete_on_termination': False},
- {'device_name': 'foo5', 'volume_id': 'fake_vol',
- 'delete_on_termination': False}]
- params = {'block_device_mapping': bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
- self._test_create(params)
-
- def test_create_instance_decide_format_legacy(self):
- ext_info = plugins.LoadedExtensionInfo()
- CONF.set_override('extensions_blacklist',
- ['os-block-device-mapping',
- 'os-block-device-mapping-v1'],
- 'osapi_v3')
- controller = servers_v3.ServersController(extension_info=ext_info)
- bdm = [{'device_name': 'foo1',
- 'volume_id': 'fake_vol',
- 'delete_on_termination': 1}]
-
- expected_legacy_flag = True
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- legacy_bdm = kwargs.get('legacy_bdm', True)
- self.assertEqual(legacy_bdm, expected_legacy_flag)
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm',
- _validate_bdm)
-
- self._test_create({}, override_controller=controller)
-
- params = {'block_device_mapping': bdm}
- self._test_create(params, override_controller=controller)
-
- def test_create_instance_both_bdm_formats(self):
- bdm = [{'device_name': 'foo'}]
- bdm_v2 = [{'source_type': 'volume',
- 'uuid': 'fake_vol'}]
- params = {'block_device_mapping': bdm,
- 'block_device_mapping_v2': bdm_v2}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
-
-
-class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
-
- def _setup_controller(self):
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {'os-volumes': 'fake'}
- self.controller = servers_v2.Controller(self.ext_mgr)
- self.ext_mgr_no_vols = extensions.ExtensionManager()
- self.ext_mgr_no_vols.extensions = {}
- self.no_volumes_controller = servers_v2.Controller(
- self.ext_mgr_no_vols)
-
- def test_create_instance_with_volumes_disabled(self):
- bdm = [{'device_name': 'foo'}]
- params = {'block_device_mapping': bdm}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['block_device_mapping'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create(params,
- override_controller=self.no_volumes_controller)
-
- def test_create_instance_decide_format_legacy(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {'os-volumes': 'fake',
- 'os-block-device-mapping-v2-boot': 'fake'}
- controller = servers_v2.Controller(self.ext_mgr)
- bdm = [{'device_name': 'foo1',
- 'volume_id': 'fake_vol',
- 'delete_on_termination': 1}]
-
- expected_legacy_flag = True
-
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- legacy_bdm = kwargs.get('legacy_bdm', True)
- self.assertEqual(legacy_bdm, expected_legacy_flag)
- return old_create(*args, **kwargs)
-
- def _validate_bdm(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.stubs.Set(compute_api.API, '_validate_bdm',
- _validate_bdm)
-
- self._test_create({}, override_controller=controller)
-
- params = {'block_device_mapping': bdm}
- self._test_create(params, override_controller=controller)
-
-
-class TestServerCreateRequestXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestServerCreateRequestXMLDeserializer, self).setUp()
- self.deserializer = servers_v2.CreateDeserializer()
-
- def test_request_with_block_device_mapping(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <block_device_mapping>
- <mapping volume_id="7329b667-50c7-46a6-b913-cb2a09dfeee0"
- device_name="/dev/vda" virtual_name="root"
- delete_on_termination="False" />
- <mapping snapshot_id="f31efb24-34d2-43e1-8b44-316052956a39"
- device_name="/dev/vdb" virtual_name="ephemeral0"
- delete_on_termination="False" />
- <mapping device_name="/dev/vdc" no_device="True" />
- </block_device_mapping>
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "block_device_mapping": [
- {
- "volume_id": "7329b667-50c7-46a6-b913-cb2a09dfeee0",
- "device_name": "/dev/vda",
- "virtual_name": "root",
- "delete_on_termination": False,
- },
- {
- "snapshot_id": "f31efb24-34d2-43e1-8b44-316052956a39",
- "device_name": "/dev/vdb",
- "virtual_name": "ephemeral0",
- "delete_on_termination": False,
- },
- {
- "device_name": "/dev/vdc",
- "no_device": True,
- },
- ]
- }}
- self.assertEqual(request['body'], expected)
diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py
deleted file mode 100644
index a317dc13f6..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_cells.py
+++ /dev/null
@@ -1,698 +0,0 @@
-# Copyright 2011-2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from lxml import etree
-from oslo.utils import timeutils
-from webob import exc
-
-from nova.api.openstack.compute.contrib import cells as cells_ext_v2
-from nova.api.openstack.compute.plugins.v3 import cells as cells_ext_v21
-from nova.api.openstack import extensions
-from nova.api.openstack import xmlutil
-from nova.cells import rpcapi as cells_rpcapi
-from nova import context
-from nova import exception
-from nova import rpc
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import utils
-
-
-class BaseCellsTest(test.NoDBTestCase):
- def setUp(self):
- super(BaseCellsTest, self).setUp()
-
- self.fake_cells = [
- dict(id=1, name='cell1', is_parent=True,
- weight_scale=1.0, weight_offset=0.0,
- transport_url='rabbit://bob:xxxx@r1.example.org/'),
- dict(id=2, name='cell2', is_parent=False,
- weight_scale=1.0, weight_offset=0.0,
- transport_url='rabbit://alice:qwerty@r2.example.org/')]
-
- self.fake_capabilities = [
- {'cap1': '0,1', 'cap2': '2,3'},
- {'cap3': '4,5', 'cap4': '5,6'}]
-
- def fake_cell_get(_self, context, cell_name):
- for cell in self.fake_cells:
- if cell_name == cell['name']:
- return cell
- else:
- raise exception.CellNotFound(cell_name=cell_name)
-
- def fake_cell_create(_self, context, values):
- cell = dict(id=1)
- cell.update(values)
- return cell
-
- def fake_cell_update(_self, context, cell_id, values):
- cell = fake_cell_get(_self, context, cell_id)
- cell.update(values)
- return cell
-
- def fake_cells_api_get_all_cell_info(*args):
- return self._get_all_cell_info(*args)
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', fake_cell_get)
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_update', fake_cell_update)
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_create', fake_cell_create)
- self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
- fake_cells_api_get_all_cell_info)
-
- def _get_all_cell_info(self, *args):
- def insecure_transport_url(url):
- transport_url = rpc.get_transport_url(url)
- transport_url.hosts[0].password = None
- return str(transport_url)
-
- cells = copy.deepcopy(self.fake_cells)
- cells[0]['transport_url'] = insecure_transport_url(
- cells[0]['transport_url'])
- cells[1]['transport_url'] = insecure_transport_url(
- cells[1]['transport_url'])
- for i, cell in enumerate(cells):
- cell['capabilities'] = self.fake_capabilities[i]
- return cells
-
-
-class CellsTestV21(BaseCellsTest):
- cell_extension = 'compute_extension:v3:os-cells'
- bad_request = exception.ValidationError
-
- def _get_cell_controller(self, ext_mgr):
- return cells_ext_v21.CellsController()
-
- def _get_request(self, resource):
- return fakes.HTTPRequest.blank('/v2/fake/' + resource)
-
- def setUp(self):
- super(CellsTestV21, self).setUp()
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = self._get_cell_controller(self.ext_mgr)
- self.context = context.get_admin_context()
- self.flags(enable=True, group='cells')
-
- def test_index(self):
- req = self._get_request("cells")
- res_dict = self.controller.index(req)
-
- self.assertEqual(len(res_dict['cells']), 2)
- for i, cell in enumerate(res_dict['cells']):
- self.assertEqual(cell['name'], self.fake_cells[i]['name'])
- self.assertNotIn('capabilitiles', cell)
- self.assertNotIn('password', cell)
-
- def test_detail(self):
- req = self._get_request("cells/detail")
- res_dict = self.controller.detail(req)
-
- self.assertEqual(len(res_dict['cells']), 2)
- for i, cell in enumerate(res_dict['cells']):
- self.assertEqual(cell['name'], self.fake_cells[i]['name'])
- self.assertEqual(cell['capabilities'], self.fake_capabilities[i])
- self.assertNotIn('password', cell)
-
- def test_show_bogus_cell_raises(self):
- req = self._get_request("cells/bogus")
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
-
- def test_get_cell_by_name(self):
- req = self._get_request("cells/cell1")
- res_dict = self.controller.show(req, 'cell1')
- cell = res_dict['cell']
-
- self.assertEqual(cell['name'], 'cell1')
- self.assertEqual(cell['rpc_host'], 'r1.example.org')
- self.assertNotIn('password', cell)
-
- def _cell_delete(self):
- call_info = {'delete_called': 0}
-
- def fake_cell_delete(inst, context, cell_name):
- self.assertEqual(cell_name, 'cell999')
- call_info['delete_called'] += 1
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
-
- req = self._get_request("cells/cell999")
- req.environ['nova.context'] = self.context
- self.controller.delete(req, 'cell999')
- self.assertEqual(call_info['delete_called'], 1)
-
- def test_cell_delete(self):
- # Test cell delete with just cell policy
- rules = {"default": "is_admin:true",
- self.cell_extension: "is_admin:true"}
- self.policy.set_rules(rules)
- self._cell_delete()
-
- def test_cell_delete_with_delete_policy(self):
- self._cell_delete()
-
- def test_delete_bogus_cell_raises(self):
- def fake_cell_delete(inst, context, cell_name):
- return 0
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
-
- req = self._get_request("cells/cell999")
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
- 'cell999')
-
- def test_cell_delete_fails_for_invalid_policy(self):
- def fake_cell_delete(inst, context, cell_name):
- pass
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
-
- req = self._get_request("cells/cell999")
- req.environ['nova.context'] = self.context
- req.environ["nova.context"].is_admin = False
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.delete, req, 'cell999')
-
- def _cell_create_parent(self):
- body = {'cell': {'name': 'meow',
- 'username': 'fred',
- 'password': 'fubar',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- res_dict = self.controller.create(req, body=body)
- cell = res_dict['cell']
-
- self.assertEqual(cell['name'], 'meow')
- self.assertEqual(cell['username'], 'fred')
- self.assertEqual(cell['rpc_host'], 'r3.example.org')
- self.assertEqual(cell['type'], 'parent')
- self.assertNotIn('password', cell)
- self.assertNotIn('is_parent', cell)
-
- def test_cell_create_parent(self):
- # Test create with just cells policy
- rules = {"default": "is_admin:true",
- self.cell_extension: "is_admin:true"}
- self.policy.set_rules(rules)
- self._cell_create_parent()
-
- def test_cell_create_parent_with_create_policy(self):
- self._cell_create_parent()
-
- def _cell_create_child(self):
- body = {'cell': {'name': 'meow',
- 'username': 'fred',
- 'password': 'fubar',
- 'rpc_host': 'r3.example.org',
- 'type': 'child'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- res_dict = self.controller.create(req, body=body)
- cell = res_dict['cell']
-
- self.assertEqual(cell['name'], 'meow')
- self.assertEqual(cell['username'], 'fred')
- self.assertEqual(cell['rpc_host'], 'r3.example.org')
- self.assertEqual(cell['type'], 'child')
- self.assertNotIn('password', cell)
- self.assertNotIn('is_parent', cell)
-
- def test_cell_create_child(self):
- # Test create with just cells policy
- rules = {"default": "is_admin:true",
- self.cell_extension: "is_admin:true"}
- self.policy.set_rules(rules)
- self._cell_create_child()
-
- def test_cell_create_child_with_create_policy(self):
- self._cell_create_child()
-
- def test_cell_create_no_name_raises(self):
- body = {'cell': {'username': 'moocow',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.create, req, body=body)
-
- def test_cell_create_name_empty_string_raises(self):
- body = {'cell': {'name': '',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.create, req, body=body)
-
- def test_cell_create_name_with_bang_raises(self):
- body = {'cell': {'name': 'moo!cow',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.create, req, body=body)
-
- def test_cell_create_name_with_dot_raises(self):
- body = {'cell': {'name': 'moo.cow',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- res_dict = self.controller.create(req, body=body)
- cell = res_dict['cell']
- self.assertEqual(cell['name'], 'moo.cow')
-
- def test_cell_create_name_with_invalid_type_raises(self):
- body = {'cell': {'name': 'moocow',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'invalid'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.create, req, body=body)
-
- def test_cell_create_fails_for_invalid_policy(self):
- body = {'cell': {'name': 'fake'}}
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- req.environ['nova.context'].is_admin = False
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create, req, body=body)
-
- def _cell_update(self):
- body = {'cell': {'username': 'zeb',
- 'password': 'sneaky'}}
-
- req = self._get_request("cells/cell1")
- req.environ['nova.context'] = self.context
- res_dict = self.controller.update(req, 'cell1', body=body)
- cell = res_dict['cell']
-
- self.assertEqual(cell['name'], 'cell1')
- self.assertEqual(cell['rpc_host'], 'r1.example.org')
- self.assertEqual(cell['username'], 'zeb')
- self.assertNotIn('password', cell)
-
- def test_cell_update(self):
- # Test cell update with just cell policy
- rules = {"default": "is_admin:true",
- self.cell_extension: "is_admin:true"}
- self.policy.set_rules(rules)
- self._cell_update()
-
- def test_cell_update_with_update_policy(self):
- self._cell_update()
-
- def test_cell_update_fails_for_invalid_policy(self):
- body = {'cell': {'name': 'got_changed'}}
- req = self._get_request("cells/cell1")
- req.environ['nova.context'] = self.context
- req.environ['nova.context'].is_admin = False
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create, req, body=body)
-
- def test_cell_update_empty_name_raises(self):
- body = {'cell': {'name': '',
- 'username': 'zeb',
- 'password': 'sneaky'}}
-
- req = self._get_request("cells/cell1")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.update, req, 'cell1', body=body)
-
- def test_cell_update_invalid_type_raises(self):
- body = {'cell': {'username': 'zeb',
- 'type': 'invalid',
- 'password': 'sneaky'}}
-
- req = self._get_request("cells/cell1")
- req.environ['nova.context'] = self.context
- self.assertRaises(self.bad_request,
- self.controller.update, req, 'cell1', body=body)
-
- def test_cell_update_without_type_specified(self):
- body = {'cell': {'username': 'wingwj'}}
-
- req = self._get_request("cells/cell1")
- req.environ['nova.context'] = self.context
- res_dict = self.controller.update(req, 'cell1', body=body)
- cell = res_dict['cell']
-
- self.assertEqual(cell['name'], 'cell1')
- self.assertEqual(cell['rpc_host'], 'r1.example.org')
- self.assertEqual(cell['username'], 'wingwj')
- self.assertEqual(cell['type'], 'parent')
-
- def test_cell_update_with_type_specified(self):
- body1 = {'cell': {'username': 'wingwj', 'type': 'child'}}
- body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}}
-
- req1 = self._get_request("cells/cell1")
- req1.environ['nova.context'] = self.context
- res_dict1 = self.controller.update(req1, 'cell1', body=body1)
- cell1 = res_dict1['cell']
-
- req2 = self._get_request("cells/cell2")
- req2.environ['nova.context'] = self.context
- res_dict2 = self.controller.update(req2, 'cell2', body=body2)
- cell2 = res_dict2['cell']
-
- self.assertEqual(cell1['name'], 'cell1')
- self.assertEqual(cell1['rpc_host'], 'r1.example.org')
- self.assertEqual(cell1['username'], 'wingwj')
- self.assertEqual(cell1['type'], 'child')
-
- self.assertEqual(cell2['name'], 'cell2')
- self.assertEqual(cell2['rpc_host'], 'r2.example.org')
- self.assertEqual(cell2['username'], 'wingwj')
- self.assertEqual(cell2['type'], 'parent')
-
- def test_cell_info(self):
- caps = ['cap1=a;b', 'cap2=c;d']
- self.flags(name='darksecret', capabilities=caps, group='cells')
-
- req = self._get_request("cells/info")
- res_dict = self.controller.info(req)
- cell = res_dict['cell']
- cell_caps = cell['capabilities']
-
- self.assertEqual(cell['name'], 'darksecret')
- self.assertEqual(cell_caps['cap1'], 'a;b')
- self.assertEqual(cell_caps['cap2'], 'c;d')
-
- def test_show_capacities(self):
- if (self.cell_extension == 'compute_extension:cells'):
- self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
- self.mox.StubOutWithMock(self.controller.cells_rpcapi,
- 'get_capacities')
- response = {"ram_free":
- {"units_by_mb": {"8192": 0, "512": 13,
- "4096": 1, "2048": 3, "16384": 0},
- "total_mb": 7680},
- "disk_free":
- {"units_by_mb": {"81920": 11, "20480": 46,
- "40960": 23, "163840": 5, "0": 0},
- "total_mb": 1052672}
- }
- self.controller.cells_rpcapi.\
- get_capacities(self.context, cell_name=None).AndReturn(response)
- self.mox.ReplayAll()
- req = self._get_request("cells/capacities")
- req.environ["nova.context"] = self.context
- res_dict = self.controller.capacities(req)
- self.assertEqual(response, res_dict['cell']['capacities'])
-
- def test_show_capacity_fails_with_non_admin_context(self):
- if (self.cell_extension == 'compute_extension:cells'):
- self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
- rules = {self.cell_extension: "is_admin:true"}
- self.policy.set_rules(rules)
-
- self.mox.ReplayAll()
- req = self._get_request("cells/capacities")
- req.environ["nova.context"] = self.context
- req.environ["nova.context"].is_admin = False
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.capacities, req)
-
- def test_show_capacities_for_invalid_cell(self):
- if (self.cell_extension == 'compute_extension:cells'):
- self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
- self.mox.StubOutWithMock(self.controller.cells_rpcapi,
- 'get_capacities')
- self.controller.cells_rpcapi. \
- get_capacities(self.context, cell_name="invalid_cell").AndRaise(
- exception.CellNotFound(cell_name="invalid_cell"))
- self.mox.ReplayAll()
- req = self._get_request("cells/invalid_cell/capacities")
- req.environ["nova.context"] = self.context
- self.assertRaises(exc.HTTPNotFound,
- self.controller.capacities, req, "invalid_cell")
-
- def test_show_capacities_for_cell(self):
- if (self.cell_extension == 'compute_extension:cells'):
- self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
- self.mox.StubOutWithMock(self.controller.cells_rpcapi,
- 'get_capacities')
- response = {"ram_free":
- {"units_by_mb": {"8192": 0, "512": 13,
- "4096": 1, "2048": 3, "16384": 0},
- "total_mb": 7680},
- "disk_free":
- {"units_by_mb": {"81920": 11, "20480": 46,
- "40960": 23, "163840": 5, "0": 0},
- "total_mb": 1052672}
- }
- self.controller.cells_rpcapi.\
- get_capacities(self.context, cell_name='cell_name').\
- AndReturn(response)
- self.mox.ReplayAll()
- req = self._get_request("cells/capacities")
- req.environ["nova.context"] = self.context
- res_dict = self.controller.capacities(req, 'cell_name')
- self.assertEqual(response, res_dict['cell']['capacities'])
-
- def test_sync_instances(self):
- call_info = {}
-
- def sync_instances(self, context, **kwargs):
- call_info['project_id'] = kwargs.get('project_id')
- call_info['updated_since'] = kwargs.get('updated_since')
- call_info['deleted'] = kwargs.get('deleted')
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
-
- req = self._get_request("cells/sync_instances")
- req.environ['nova.context'] = self.context
- body = {}
- self.controller.sync_instances(req, body=body)
- self.assertIsNone(call_info['project_id'])
- self.assertIsNone(call_info['updated_since'])
-
- body = {'project_id': 'test-project'}
- self.controller.sync_instances(req, body=body)
- self.assertEqual(call_info['project_id'], 'test-project')
- self.assertIsNone(call_info['updated_since'])
-
- expected = timeutils.utcnow().isoformat()
- if not expected.endswith("+00:00"):
- expected += "+00:00"
-
- body = {'updated_since': expected}
- self.controller.sync_instances(req, body=body)
- self.assertIsNone(call_info['project_id'])
- self.assertEqual(call_info['updated_since'], expected)
-
- body = {'updated_since': 'skjdfkjsdkf'}
- self.assertRaises(self.bad_request,
- self.controller.sync_instances, req, body=body)
-
- body = {'deleted': False}
- self.controller.sync_instances(req, body=body)
- self.assertIsNone(call_info['project_id'])
- self.assertIsNone(call_info['updated_since'])
- self.assertEqual(call_info['deleted'], False)
-
- body = {'deleted': 'False'}
- self.controller.sync_instances(req, body=body)
- self.assertIsNone(call_info['project_id'])
- self.assertIsNone(call_info['updated_since'])
- self.assertEqual(call_info['deleted'], False)
-
- body = {'deleted': 'True'}
- self.controller.sync_instances(req, body=body)
- self.assertIsNone(call_info['project_id'])
- self.assertIsNone(call_info['updated_since'])
- self.assertEqual(call_info['deleted'], True)
-
- body = {'deleted': 'foo'}
- self.assertRaises(self.bad_request,
- self.controller.sync_instances, req, body=body)
-
- body = {'foo': 'meow'}
- self.assertRaises(self.bad_request,
- self.controller.sync_instances, req, body=body)
-
- def test_sync_instances_fails_for_invalid_policy(self):
- def sync_instances(self, context, **kwargs):
- pass
-
- self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
-
- req = self._get_request("cells/sync_instances")
- req.environ['nova.context'] = self.context
- req.environ['nova.context'].is_admin = False
-
- body = {}
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.sync_instances, req, body=body)
-
- def test_cells_disabled(self):
- self.flags(enable=False, group='cells')
-
- req = self._get_request("cells")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.index, req)
-
- req = self._get_request("cells/detail")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.detail, req)
-
- req = self._get_request("cells/cell1")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.show, req)
-
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.delete, req, 'cell999')
-
- req = self._get_request("cells/cells")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.create, req, {})
-
- req = self._get_request("cells/capacities")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.capacities, req)
-
- req = self._get_request("cells/sync_instances")
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.sync_instances, req, {})
-
-
-class CellsTestV2(CellsTestV21):
- cell_extension = 'compute_extension:cells'
- bad_request = exc.HTTPBadRequest
-
- def _get_cell_controller(self, ext_mgr):
- return cells_ext_v2.Controller(ext_mgr)
-
- def test_cell_create_name_with_dot_raises(self):
- body = {'cell': {'name': 'moo.cow',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body=body)
-
-
-class TestCellsXMLSerializer(BaseCellsTest):
- def test_multiple_cells(self):
- fixture = {'cells': self._get_all_cell_info()}
-
- serializer = cells_ext_v2.CellsTemplate()
- output = serializer.serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
-
- def test_single_cell_with_caps(self):
- cell = {'id': 1,
- 'name': 'darksecret',
- 'username': 'meow',
- 'capabilities': {'cap1': 'a;b',
- 'cap2': 'c;d'}}
- fixture = {'cell': cell}
-
- serializer = cells_ext_v2.CellTemplate()
- output = serializer.serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('name'), 'darksecret')
- self.assertEqual(res_tree.get('username'), 'meow')
- self.assertIsNone(res_tree.get('password'))
- self.assertEqual(len(res_tree), 1)
-
- child = res_tree[0]
- self.assertEqual(child.tag,
- '{%s}capabilities' % xmlutil.XMLNS_V10)
- for elem in child:
- self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
- '{%s}cap2' % xmlutil.XMLNS_V10))
- if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'a;b')
- elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'c;d')
-
- def test_single_cell_without_caps(self):
- cell = {'id': 1,
- 'username': 'woof',
- 'name': 'darksecret'}
- fixture = {'cell': cell}
-
- serializer = cells_ext_v2.CellTemplate()
- output = serializer.serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('name'), 'darksecret')
- self.assertEqual(res_tree.get('username'), 'woof')
- self.assertIsNone(res_tree.get('password'))
- self.assertEqual(len(res_tree), 0)
-
-
-class TestCellsXMLDeserializer(test.NoDBTestCase):
- def test_cell_deserializer(self):
- caps_dict = {'cap1': 'a;b',
- 'cap2': 'c;d'}
- caps_xml = ("<capabilities><cap1>a;b</cap1>"
- "<cap2>c;d</cap2></capabilities>")
- expected = {'cell': {'name': 'testcell1',
- 'type': 'child',
- 'rpc_host': 'localhost',
- 'capabilities': caps_dict}}
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- "<cell><name>testcell1</name><type>child</type>"
- "<rpc_host>localhost</rpc_host>"
- "%s</cell>") % caps_xml
- deserializer = cells_ext_v2.CellDeserializer()
- result = deserializer.deserialize(intext)
- self.assertEqual(dict(body=expected), result)
-
- def test_with_corrupt_xml(self):
- deserializer = cells_ext_v2.CellDeserializer()
- self.assertRaises(
- exception.MalformedRequestBody,
- deserializer.deserialize,
- utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/contrib/test_certificates.py b/nova/tests/api/openstack/compute/contrib/test_certificates.py
deleted file mode 100644
index 5d8634f4d0..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_certificates.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import mock
-import mox
-from webob import exc
-
-from nova.api.openstack.compute.contrib import certificates as certificates_v2
-from nova.api.openstack.compute.plugins.v3 import certificates \
- as certificates_v21
-from nova.cert import rpcapi
-from nova import context
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class CertificatesTestV21(test.NoDBTestCase):
- certificates = certificates_v21
- url = '/v3/os-certificates'
- certificate_show_extension = 'compute_extension:v3:os-certificates:show'
- certificate_create_extension = \
- 'compute_extension:v3:os-certificates:create'
-
- def setUp(self):
- super(CertificatesTestV21, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.controller = self.certificates.CertificatesController()
-
- def test_translate_certificate_view(self):
- pk, cert = 'fakepk', 'fakecert'
- view = self.certificates._translate_certificate_view(cert, pk)
- self.assertEqual(view['data'], cert)
- self.assertEqual(view['private_key'], pk)
-
- def test_certificates_show_root(self):
- self.mox.StubOutWithMock(self.controller.cert_rpcapi, 'fetch_ca')
-
- self.controller.cert_rpcapi.fetch_ca(
- mox.IgnoreArg(), project_id='fake').AndReturn('fakeroot')
-
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequest.blank(self.url + '/root')
- res_dict = self.controller.show(req, 'root')
-
- response = {'certificate': {'data': 'fakeroot', 'private_key': None}}
- self.assertEqual(res_dict, response)
-
- def test_certificates_show_policy_failed(self):
- rules = {
- self.certificate_show_extension:
- common_policy.parse_rule("!")
- }
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.url + '/root')
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show, req, 'root')
- self.assertIn(self.certificate_show_extension,
- exc.format_message())
-
- def test_certificates_create_certificate(self):
- self.mox.StubOutWithMock(self.controller.cert_rpcapi,
- 'generate_x509_cert')
-
- self.controller.cert_rpcapi.generate_x509_cert(
- mox.IgnoreArg(),
- user_id='fake_user',
- project_id='fake').AndReturn(('fakepk', 'fakecert'))
-
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.create(req)
-
- response = {
- 'certificate': {'data': 'fakecert',
- 'private_key': 'fakepk'}
- }
- self.assertEqual(res_dict, response)
-
- def test_certificates_create_policy_failed(self):
- rules = {
- self.certificate_create_extension:
- common_policy.parse_rule("!")
- }
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.url)
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.create, req)
- self.assertIn(self.certificate_create_extension,
- exc.format_message())
-
- @mock.patch.object(rpcapi.CertAPI, 'fetch_ca',
- side_effect=exception.CryptoCAFileNotFound(project='fake'))
- def test_non_exist_certificates_show(self, mock_fetch_ca):
- req = fakes.HTTPRequest.blank(self.url + '/root')
- self.assertRaises(
- exc.HTTPNotFound,
- self.controller.show,
- req, 'root')
-
-
-class CertificatesTestV2(CertificatesTestV21):
- certificates = certificates_v2
- url = '/v2/fake/os-certificates'
- certificate_show_extension = 'compute_extension:certificates'
- certificate_create_extension = 'compute_extension:certificates'
-
-
-class CertificatesSerializerTest(test.NoDBTestCase):
- def test_index_serializer(self):
- serializer = certificates_v2.CertificateTemplate()
- text = serializer.serialize(dict(
- certificate=dict(
- data='fakecert',
- private_key='fakepk'),
- ))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('certificate', tree.tag)
- self.assertEqual('fakepk', tree.get('private_key'))
- self.assertEqual('fakecert', tree.get('data'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
deleted file mode 100644
index d2358f3bf3..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid as uuid_lib
-
-from lxml import etree
-from oslo.config import cfg
-from oslo.utils import timeutils
-from webob import exc
-
-from nova.api.openstack.compute.contrib import cloudpipe as cloudpipe_v2
-from nova.api.openstack.compute.plugins.v3 import cloudpipe as cloudpipe_v21
-from nova.api.openstack import wsgi
-from nova.compute import utils as compute_utils
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_network
-from nova.tests import matchers
-from nova import utils
-
-CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
-
-
-project_id = str(uuid_lib.uuid4().hex)
-uuid = str(uuid_lib.uuid4())
-
-
-def fake_vpn_instance():
- return {
- 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
- 'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
- 'uuid': uuid, 'project_id': project_id,
- }
-
-
-def compute_api_get_all_empty(context, search_opts=None):
- return []
-
-
-def compute_api_get_all(context, search_opts=None):
- return [fake_vpn_instance()]
-
-
-def utils_vpn_ping(addr, port, timoeout=0.05, session_id=None):
- return True
-
-
-class CloudpipeTestV21(test.NoDBTestCase):
- cloudpipe = cloudpipe_v21
- url = '/v2/fake/os-cloudpipe'
-
- def setUp(self):
- super(CloudpipeTestV21, self).setUp()
- self.controller = self.cloudpipe.CloudpipeController()
- self.stubs.Set(self.controller.compute_api, "get_all",
- compute_api_get_all_empty)
- self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
-
- def test_cloudpipe_list_no_network(self):
-
- def fake_get_nw_info_for_instance(instance):
- return {}
-
- self.stubs.Set(compute_utils, "get_nw_info_for_instance",
- fake_get_nw_info_for_instance)
- self.stubs.Set(self.controller.compute_api, "get_all",
- compute_api_get_all)
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.index(req)
- response = {'cloudpipes': [{'project_id': project_id,
- 'instance_id': uuid,
- 'created_at': '1981-10-20T00:00:00Z'}]}
- self.assertEqual(res_dict, response)
-
- def test_cloudpipe_list(self):
-
- def network_api_get(context, network_id):
- self.assertEqual(context.project_id, project_id)
- return {'vpn_public_address': '127.0.0.1',
- 'vpn_public_port': 22}
-
- def fake_get_nw_info_for_instance(instance):
- return fake_network.fake_get_instance_nw_info(self.stubs)
-
- self.stubs.Set(compute_utils, "get_nw_info_for_instance",
- fake_get_nw_info_for_instance)
- self.stubs.Set(self.controller.network_api, "get",
- network_api_get)
- self.stubs.Set(self.controller.compute_api, "get_all",
- compute_api_get_all)
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.index(req)
- response = {'cloudpipes': [{'project_id': project_id,
- 'internal_ip': '192.168.1.100',
- 'public_ip': '127.0.0.1',
- 'public_port': 22,
- 'state': 'running',
- 'instance_id': uuid,
- 'created_at': '1981-10-20T00:00:00Z'}]}
- self.assertThat(res_dict, matchers.DictMatches(response))
-
- def test_cloudpipe_create(self):
- def launch_vpn_instance(context):
- return ([fake_vpn_instance()], 'fake-reservation')
-
- self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
- launch_vpn_instance)
- body = {'cloudpipe': {'project_id': project_id}}
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.create(req, body=body)
-
- response = {'instance_id': uuid}
- self.assertEqual(res_dict, response)
-
- def test_cloudpipe_create_no_networks(self):
- def launch_vpn_instance(context):
- raise exception.NoMoreNetworks
-
- self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
- launch_vpn_instance)
- body = {'cloudpipe': {'project_id': project_id}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body=body)
-
- def test_cloudpipe_create_already_running(self):
- def launch_vpn_instance(*args, **kwargs):
- self.fail("Method should not have been called")
-
- self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
- launch_vpn_instance)
- self.stubs.Set(self.controller.compute_api, "get_all",
- compute_api_get_all)
- body = {'cloudpipe': {'project_id': project_id}}
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.create(req, body=body)
- response = {'instance_id': uuid}
- self.assertEqual(res_dict, response)
-
- def test_cloudpipe_create_with_bad_project_id_failed(self):
- body = {'cloudpipe': {'project_id': 'bad.project.id'}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
-
-class CloudpipeTestV2(CloudpipeTestV21):
- cloudpipe = cloudpipe_v2
-
- def test_cloudpipe_create_with_bad_project_id_failed(self):
- pass
-
-
-class CloudpipesXMLSerializerTestV2(test.NoDBTestCase):
- def test_default_serializer(self):
- serializer = cloudpipe_v2.CloudpipeTemplate()
- exemplar = dict(cloudpipe=dict(instance_id='1234-1234-1234-1234'))
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
- self.assertEqual('cloudpipe', tree.tag)
- for child in tree:
- self.assertIn(child.tag, exemplar['cloudpipe'])
- self.assertEqual(child.text, exemplar['cloudpipe'][child.tag])
-
- def test_index_serializer(self):
- serializer = cloudpipe_v2.CloudpipesTemplate()
- exemplar = dict(cloudpipes=[
- dict(
- project_id='1234',
- public_ip='1.2.3.4',
- public_port='321',
- instance_id='1234-1234-1234-1234',
- created_at=timeutils.isotime(),
- state='running'),
- dict(
- project_id='4321',
- public_ip='4.3.2.1',
- public_port='123',
- state='pending')])
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
- self.assertEqual('cloudpipes', tree.tag)
- self.assertEqual(len(exemplar['cloudpipes']), len(tree))
- for idx, cl_pipe in enumerate(tree):
- kp_data = exemplar['cloudpipes'][idx]
- for child in cl_pipe:
- self.assertIn(child.tag, kp_data)
- self.assertEqual(child.text, kp_data[child.tag])
-
- def test_deserializer(self):
- deserializer = wsgi.XMLDeserializer()
- exemplar = dict(cloudpipe=dict(project_id='4321'))
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<cloudpipe><project_id>4321</project_id></cloudpipe>')
- result = deserializer.deserialize(intext)['body']
- self.assertEqual(result, exemplar)
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
deleted file mode 100644
index 5caccf14b5..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova.api.openstack.compute.contrib import cloudpipe_update as clup_v2
-from nova.api.openstack.compute.plugins.v3 import cloudpipe as clup_v21
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_network
-
-
-fake_networks = [fake_network.fake_network(1),
- fake_network.fake_network(2)]
-
-
-def fake_project_get_networks(context, project_id, associate=True):
- return fake_networks
-
-
-def fake_network_update(context, network_id, values):
- for network in fake_networks:
- if network['id'] == network_id:
- for key in values:
- network[key] = values[key]
-
-
-class CloudpipeUpdateTestV21(test.NoDBTestCase):
- bad_request = exception.ValidationError
-
- def setUp(self):
- super(CloudpipeUpdateTestV21, self).setUp()
- self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
- self.stubs.Set(db, "network_update", fake_network_update)
- self._setup()
-
- def _setup(self):
- self.controller = clup_v21.CloudpipeController()
-
- def _check_status(self, expected_status, res, controller_methord):
- self.assertEqual(expected_status, controller_methord.wsgi_code)
-
- def test_cloudpipe_configure_project(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-cloudpipe/configure-project')
- body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
- result = self.controller.update(req, 'configure-project',
- body=body)
- self._check_status(202, result, self.controller.update)
- self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
- self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
-
- def test_cloudpipe_configure_project_bad_url(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-cloudpipe/configure-projectx')
- body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req,
- 'configure-projectx', body=body)
-
- def test_cloudpipe_configure_project_bad_data(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-cloudpipe/configure-project')
- body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
- self.assertRaises(self.bad_request,
- self.controller.update, req,
- 'configure-project', body=body)
-
- def test_cloudpipe_configure_project_bad_vpn_port(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-cloudpipe/configure-project')
- body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
- "vpn_port": "foo"}}
- self.assertRaises(self.bad_request,
- self.controller.update, req,
- 'configure-project', body=body)
-
-
-class CloudpipeUpdateTestV2(CloudpipeUpdateTestV21):
- bad_request = webob.exc.HTTPBadRequest
-
- def _setup(self):
- self.controller = clup_v2.CloudpipeUpdateController()
-
- def _check_status(self, expected_status, res, controller_methord):
- self.assertEqual(expected_status, res.status_int)
diff --git a/nova/tests/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/api/openstack/compute/contrib/test_config_drive.py
deleted file mode 100644
index a9d624b7bc..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_config_drive.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import config_drive as config_drive_v2
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import config_drive \
- as config_drive_v21
-from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
-from nova.api.openstack.compute import servers as servers_v2
-from nova.api.openstack import extensions
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-
-CONF = cfg.CONF
-
-
-class ConfigDriveTestV21(test.TestCase):
- base_url = '/v2/fake/servers/'
-
- def _setup_wsgi(self):
- self.app = fakes.wsgi_app_v21(init_only=('servers', 'os-config-drive'))
-
- def _get_config_drive_controller(self):
- return config_drive_v21.ConfigDriveController()
-
- def setUp(self):
- super(ConfigDriveTestV21, self).setUp()
- self.Controller = self._get_config_drive_controller()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- fake.stub_out_image_service(self.stubs)
- self._setup_wsgi()
-
- def test_show(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = webob.Request.blank(self.base_url + '1')
- req.headers['Content-Type'] = 'application/json'
- response = req.get_response(self.app)
- self.assertEqual(response.status_int, 200)
- res_dict = jsonutils.loads(response.body)
- self.assertIn('config_drive', res_dict['server'])
-
- def test_detail_servers(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fakes.fake_instance_get_all_by_filters())
- req = fakes.HTTPRequest.blank(self.base_url + 'detail')
- res = req.get_response(self.app)
- server_dicts = jsonutils.loads(res.body)['servers']
- self.assertNotEqual(len(server_dicts), 0)
- for server_dict in server_dicts:
- self.assertIn('config_drive', server_dict)
-
-
-class ConfigDriveTestV2(ConfigDriveTestV21):
-
- def _get_config_drive_controller(self):
- return config_drive_v2.Controller()
-
- def _setup_wsgi(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Config_drive'])
- self.app = fakes.wsgi_app(init_only=('servers',))
-
-
-class ServersControllerCreateTestV21(test.TestCase):
- base_url = '/v2/fake/'
- bad_request = exception.ValidationError
-
- def _set_up_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers_v21.ServersController(
- extension_info=ext_info)
- CONF.set_override('extensions_blacklist',
- 'os-config-drive',
- 'osapi_v3')
- self.no_config_drive_controller = servers_v21.ServersController(
- extension_info=ext_info)
-
- def _verfiy_config_drive(self, **kwargs):
- self.assertNotIn('config_drive', kwargs)
-
- def _initialize_extension(self):
- pass
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTestV21, self).setUp()
-
- self.instance_cache_num = 0
- self._set_up_controller()
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': fakes.FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- return instance
-
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(db, 'instance_create', instance_create)
-
- def _test_create_extra(self, params, override_controller):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- server.update(params)
- body = dict(server=server)
- req = fakes.HTTPRequest.blank(self.base_url + 'servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- if override_controller is not None:
- server = override_controller.create(req, body=body).obj['server']
- else:
- server = self.controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_config_drive_disabled(self):
- params = {'config_drive': "False"}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self._verfiy_config_drive(**kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params,
- override_controller=self.no_config_drive_controller)
-
- def _create_instance_body_of_config_drive(self, param):
- self._initialize_extension()
-
- def create(*args, **kwargs):
- self.assertIn('config_drive', kwargs)
- return old_create(*args, **kwargs)
-
- old_create = compute_api.API.create
- self.stubs.Set(compute_api.API, 'create', create)
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
- body = {
- 'server': {
- 'name': 'config_drive_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'config_drive': param,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.base_url + 'servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- return req, body
-
- def test_create_instance_with_config_drive(self):
- param = True
- req, body = self._create_instance_body_of_config_drive(param)
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
-
- def test_create_instance_with_config_drive_as_boolean_string(self):
- param = 'false'
- req, body = self._create_instance_body_of_config_drive(param)
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
-
- def test_create_instance_with_bad_config_drive(self):
- param = 12345
- req, body = self._create_instance_body_of_config_drive(param)
- self.assertRaises(self.bad_request,
- self.controller.create, req, body=body)
-
- def test_create_instance_without_config_drive(self):
- param = True
- req, body = self._create_instance_body_of_config_drive(param)
- del body['server']['config_drive']
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
-
- def test_create_instance_with_empty_config_drive(self):
- param = ''
- req, body = self._create_instance_body_of_config_drive(param)
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
-
-class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
- bad_request = webob.exc.HTTPBadRequest
-
- def _set_up_controller(self):
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers_v2.Controller(self.ext_mgr)
- self.no_config_drive_controller = None
-
- def _verfiy_config_drive(self, **kwargs):
- self.assertIsNone(kwargs['config_drive'])
-
- def _initialize_extension(self):
- self.ext_mgr.extensions = {'os-config-drive': 'fake'}
-
- def test_create_instance_with_empty_config_drive(self):
- param = ''
- req, body = self._create_instance_body_of_config_drive(param)
- res = self.controller.create(req, body=body).obj
- server = res['server']
- self.assertEqual(fakes.FAKE_UUID, server['id'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_console_auth_tokens.py b/nova/tests/api/openstack/compute/contrib/test_console_auth_tokens.py
deleted file mode 100644
index 3c3fee2871..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_console_auth_tokens.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2013 Cloudbase Solutions Srl
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.consoleauth import rpcapi as consoleauth_rpcapi
-from nova import context
-from nova import test
-from nova.tests.api.openstack import fakes
-
-CONF = cfg.CONF
-CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
-
-_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
- 'host': 'fake_host',
- 'port': 'fake_port',
- 'internal_access_path': 'fake_access_path',
- 'console_type': 'rdp-html5'}
-
-
-def _fake_check_token(self, context, token):
- return _FAKE_CONNECT_INFO
-
-
-def _fake_check_token_not_found(self, context, token):
- return None
-
-
-def _fake_check_token_unauthorized(self, context, token):
- connect_info = _FAKE_CONNECT_INFO
- connect_info['console_type'] = 'unauthorized_console_type'
- return connect_info
-
-
-class ConsoleAuthTokensExtensionTest(test.TestCase):
-
- _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
-
- _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
- 'host': 'fake_host',
- 'port': 'fake_port',
- 'internal_access_path':
- 'fake_access_path'}}
-
- def setUp(self):
- super(ConsoleAuthTokensExtensionTest, self).setUp()
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Console_auth_tokens'])
-
- ctxt = self._get_admin_context()
- self.app = fakes.wsgi_app(init_only=('os-console-auth-tokens',),
- fake_auth_context=ctxt)
-
- def _get_admin_context(self):
- ctxt = context.get_admin_context()
- ctxt.user_id = 'fake'
- ctxt.project_id = 'fake'
- return ctxt
-
- def _create_request(self):
- req = webob.Request.blank(self._FAKE_URL)
- req.method = "GET"
- req.headers["content-type"] = "application/json"
- return req
-
- def test_get_console_connect_info(self):
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(200, res.status_int)
- output = jsonutils.loads(res.body)
- self.assertEqual(self._EXPECTED_OUTPUT, output)
-
- def test_get_console_connect_info_token_not_found(self):
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token_not_found)
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(404, res.status_int)
-
- def test_get_console_connect_info_unauthorized_console_type(self):
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token_unauthorized)
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(401, res.status_int)
diff --git a/nova/tests/api/openstack/compute/contrib/test_console_output.py b/nova/tests/api/openstack/compute/contrib/test_console_output.py
deleted file mode 100644
index faf39da0e1..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_console_output.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2011 Eldar Nugaev
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import string
-
-from oslo.serialization import jsonutils
-
-from nova.compute import api as compute_api
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-def fake_get_console_output(self, _context, _instance, tail_length):
- fixture = [str(i) for i in range(5)]
-
- if tail_length is None:
- pass
- elif tail_length == 0:
- fixture = []
- else:
- fixture = fixture[-int(tail_length):]
-
- return '\n'.join(fixture)
-
-
-def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
- raise exception.InstanceNotReady(instance_id=_instance["uuid"])
-
-
-def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
- return string.printable
-
-
-def fake_get(self, context, instance_uuid, want_objects=False,
- expected_attrs=None):
- return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
-
-
-def fake_get_not_found(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
-
-class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
- application_type = "application/json"
- action_url = '/v2/fake/servers/1/action'
-
- def setUp(self):
- super(ConsoleOutputExtensionTestV21, self).setUp()
- self.stubs.Set(compute_api.API, 'get_console_output',
- fake_get_console_output)
- self.stubs.Set(compute_api.API, 'get', fake_get)
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers',
- 'os-console-output'))
-
- def _get_response(self, length_dict=None):
- length_dict = length_dict or {}
- body = {'os-getConsoleOutput': length_dict}
- req = fakes.HTTPRequest.blank(self.action_url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = self.application_type
- res = req.get_response(self.app)
- return res
-
- def test_get_text_console_instance_action(self):
- res = self._get_response()
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
-
- def test_get_console_output_with_tail(self):
- res = self._get_response(length_dict={'length': 3})
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual({'output': '2\n3\n4'}, output)
-
- def test_get_console_output_with_none_length(self):
- res = self._get_response(length_dict={'length': None})
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
-
- def test_get_console_output_with_length_as_str(self):
- res = self._get_response(length_dict={'length': '3'})
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual({'output': '2\n3\n4'}, output)
-
- def test_get_console_output_filtered_characters(self):
- self.stubs.Set(compute_api.API, 'get_console_output',
- fake_get_console_output_all_characters)
- res = self._get_response()
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- expect = string.digits + string.letters + string.punctuation + ' \t\n'
- self.assertEqual({'output': expect}, output)
-
- def test_get_text_console_no_instance(self):
- self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
- res = self._get_response()
- self.assertEqual(404, res.status_int)
-
- def test_get_text_console_no_instance_on_get_output(self):
- self.stubs.Set(compute_api.API,
- 'get_console_output',
- fake_get_not_found)
- res = self._get_response()
- self.assertEqual(404, res.status_int)
-
- def _get_console_output_bad_request_case(self, body):
- req = fakes.HTTPRequest.blank(self.action_url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_console_output_with_non_integer_length(self):
- body = {'os-getConsoleOutput': {'length': 'NaN'}}
- self._get_console_output_bad_request_case(body)
-
- def test_get_text_console_bad_body(self):
- body = {}
- self._get_console_output_bad_request_case(body)
-
- def test_get_console_output_with_length_as_float(self):
- body = {'os-getConsoleOutput': {'length': 2.5}}
- self._get_console_output_bad_request_case(body)
-
- def test_get_console_output_not_ready(self):
- self.stubs.Set(compute_api.API, 'get_console_output',
- fake_get_console_output_not_ready)
- res = self._get_response(length_dict={'length': 3})
- self.assertEqual(409, res.status_int)
-
- def test_not_implemented(self):
- self.stubs.Set(compute_api.API, 'get_console_output',
- fakes.fake_not_implemented)
- res = self._get_response()
- self.assertEqual(501, res.status_int)
-
- def test_get_console_output_with_boolean_length(self):
- res = self._get_response(length_dict={'length': True})
- self.assertEqual(400, res.status_int)
-
-
-class ConsoleOutputExtensionTestV2(ConsoleOutputExtensionTestV21):
- need_osapi_compute_extension = True
-
- def _get_app(self):
- self.flags(osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Console_output'])
- return fakes.wsgi_app(init_only=('servers',))
diff --git a/nova/tests/api/openstack/compute/contrib/test_consoles.py b/nova/tests/api/openstack/compute/contrib/test_consoles.py
deleted file mode 100644
index 1faff0d78c..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_consoles.py
+++ /dev/null
@@ -1,587 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import api as compute_api
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_get_vnc_console(self, _context, _instance, _console_type):
- return {'url': 'http://fake'}
-
-
-def fake_get_spice_console(self, _context, _instance, _console_type):
- return {'url': 'http://fake'}
-
-
-def fake_get_rdp_console(self, _context, _instance, _console_type):
- return {'url': 'http://fake'}
-
-
-def fake_get_serial_console(self, _context, _instance, _console_type):
- return {'url': 'http://fake'}
-
-
-def fake_get_vnc_console_invalid_type(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeInvalid(console_type=_console_type)
-
-
-def fake_get_spice_console_invalid_type(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeInvalid(console_type=_console_type)
-
-
-def fake_get_rdp_console_invalid_type(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeInvalid(console_type=_console_type)
-
-
-def fake_get_vnc_console_type_unavailable(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeUnavailable(console_type=_console_type)
-
-
-def fake_get_spice_console_type_unavailable(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeUnavailable(console_type=_console_type)
-
-
-def fake_get_rdp_console_type_unavailable(self, _context,
- _instance, _console_type):
- raise exception.ConsoleTypeUnavailable(console_type=_console_type)
-
-
-def fake_get_vnc_console_not_ready(self, _context, instance, _console_type):
- raise exception.InstanceNotReady(instance_id=instance["uuid"])
-
-
-def fake_get_spice_console_not_ready(self, _context, instance, _console_type):
- raise exception.InstanceNotReady(instance_id=instance["uuid"])
-
-
-def fake_get_rdp_console_not_ready(self, _context, instance, _console_type):
- raise exception.InstanceNotReady(instance_id=instance["uuid"])
-
-
-def fake_get_vnc_console_not_found(self, _context, instance, _console_type):
- raise exception.InstanceNotFound(instance_id=instance["uuid"])
-
-
-def fake_get_spice_console_not_found(self, _context, instance, _console_type):
- raise exception.InstanceNotFound(instance_id=instance["uuid"])
-
-
-def fake_get_rdp_console_not_found(self, _context, instance, _console_type):
- raise exception.InstanceNotFound(instance_id=instance["uuid"])
-
-
-def fake_get(self, context, instance_uuid, want_objects=False,
- expected_attrs=None):
- return {'uuid': instance_uuid}
-
-
-def fake_get_not_found(self, context, instance_uuid, want_objects=False,
- expected_attrs=None):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
-
-class ConsolesExtensionTestV21(test.NoDBTestCase):
- url = '/v2/fake/servers/1/action'
-
- def _setup_wsgi(self):
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-remote-consoles'))
-
- def setUp(self):
- super(ConsolesExtensionTestV21, self).setUp()
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console)
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console)
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console)
- self.stubs.Set(compute_api.API, 'get_serial_console',
- fake_get_serial_console)
- self.stubs.Set(compute_api.API, 'get', fake_get)
- self._setup_wsgi()
-
- def test_get_vnc_console(self):
- body = {'os-getVNCConsole': {'type': 'novnc'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- output = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(output,
- {u'console': {u'url': u'http://fake', u'type': u'novnc'}})
-
- def test_get_vnc_console_not_ready(self):
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console_not_ready)
- body = {'os-getVNCConsole': {'type': 'novnc'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 409)
-
- def test_get_vnc_console_no_type(self):
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console_invalid_type)
- body = {'os-getVNCConsole': {}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_vnc_console_no_instance(self):
- self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
- body = {'os-getVNCConsole': {'type': 'novnc'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_vnc_console_no_instance_on_console_get(self):
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console_not_found)
- body = {'os-getVNCConsole': {'type': 'novnc'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_vnc_console_invalid_type(self):
- body = {'os-getVNCConsole': {'type': 'invalid'}}
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console_invalid_type)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_vnc_console_type_unavailable(self):
- body = {'os-getVNCConsole': {'type': 'unavailable'}}
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fake_get_vnc_console_type_unavailable)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_vnc_console_not_implemented(self):
- self.stubs.Set(compute_api.API, 'get_vnc_console',
- fakes.fake_not_implemented)
-
- body = {'os-getVNCConsole': {'type': 'novnc'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 501)
-
- def test_get_spice_console(self):
- body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- output = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(output,
- {u'console': {u'url': u'http://fake', u'type': u'spice-html5'}})
-
- def test_get_spice_console_not_ready(self):
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console_not_ready)
- body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 409)
-
- def test_get_spice_console_no_type(self):
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console_invalid_type)
- body = {'os-getSPICEConsole': {}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_spice_console_no_instance(self):
- self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
- body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_spice_console_no_instance_on_console_get(self):
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console_not_found)
- body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_spice_console_invalid_type(self):
- body = {'os-getSPICEConsole': {'type': 'invalid'}}
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console_invalid_type)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_spice_console_not_implemented(self):
- body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fakes.fake_not_implemented)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 501)
-
- def test_get_spice_console_type_unavailable(self):
- body = {'os-getSPICEConsole': {'type': 'unavailable'}}
- self.stubs.Set(compute_api.API, 'get_spice_console',
- fake_get_spice_console_type_unavailable)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_rdp_console(self):
- body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- output = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(output,
- {u'console': {u'url': u'http://fake', u'type': u'rdp-html5'}})
-
- def test_get_rdp_console_not_ready(self):
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console_not_ready)
- body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 409)
-
- def test_get_rdp_console_no_type(self):
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console_invalid_type)
- body = {'os-getRDPConsole': {}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_rdp_console_no_instance(self):
- self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
- body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_rdp_console_no_instance_on_console_get(self):
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console_not_found)
- body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_get_rdp_console_invalid_type(self):
- body = {'os-getRDPConsole': {'type': 'invalid'}}
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console_invalid_type)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_get_rdp_console_type_unavailable(self):
- body = {'os-getRDPConsole': {'type': 'unavailable'}}
- self.stubs.Set(compute_api.API, 'get_rdp_console',
- fake_get_rdp_console_type_unavailable)
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_vnc_console_with_undefined_param(self):
- body = {'os-getVNCConsole': {'type': 'novnc', 'undefined': 'foo'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_spice_console_with_undefined_param(self):
- body = {'os-getSPICEConsole': {'type': 'spice-html5',
- 'undefined': 'foo'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_get_rdp_console_with_undefined_param(self):
- body = {'os-getRDPConsole': {'type': 'rdp-html5', 'undefined': 'foo'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
-
-class ConsolesExtensionTestV2(ConsolesExtensionTestV21):
-
- def _setup_wsgi(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Consoles'])
- self.app = fakes.wsgi_app(init_only=('servers',))
-
- def test_get_vnc_console_with_undefined_param(self):
- pass
-
- def test_get_spice_console_with_undefined_param(self):
- pass
-
- def test_get_rdp_console_with_undefined_param(self):
- pass
-
- def test_get_serial_console(self):
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- output = jsonutils.loads(res.body)
- self.assertEqual(200, res.status_int)
- self.assertEqual({u'console': {u'url': u'http://fake',
- u'type': u'serial'}},
- output)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_not_enable(self, get_serial_console):
- get_serial_console.side_effect = exception.ConsoleTypeUnavailable(
- console_type="serial")
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_invalid_type(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.ConsoleTypeInvalid(console_type='invalid'))
-
- body = {'os-getSerialConsole': {'type': 'invalid'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_no_type(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.ConsoleTypeInvalid(console_type=''))
-
- body = {'os-getSerialConsole': {}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_no_instance(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.InstanceNotFound(instance_id='xxx'))
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_instance_not_ready(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.InstanceNotReady(instance_id='xxx'))
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 409)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_socket_exhausted(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.SocketPortRangeExhaustedException(
- host='127.0.0.1'))
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 500)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_image_nport_invalid(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.ImageSerialPortNumberInvalid(
- num_ports='x', property="hw_serial_port_count"))
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- self.assertTrue(get_serial_console.called)
-
- @mock.patch.object(compute_api.API, 'get_serial_console')
- def test_get_serial_console_image_nport_exceed(self, get_serial_console):
- get_serial_console.side_effect = (
- exception.ImageSerialPortNumberExceedFlavorValue())
-
- body = {'os-getSerialConsole': {'type': 'serial'}}
- req = webob.Request.blank(self.url)
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- self.assertTrue(get_serial_console.called)
diff --git a/nova/tests/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
deleted file mode 100644
index 62b135ac0e..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_createserverext.py
+++ /dev/null
@@ -1,387 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-from xml.dom import minidom
-
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import api as compute_api
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-FAKE_UUID = fakes.FAKE_UUID
-
-FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
- ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
-
-DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
-
-INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
-
-
-def return_security_group_non_existing(context, project_id, group_name):
- raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
- security_group_id=group_name)
-
-
-def return_security_group_get_by_name(context, project_id, group_name):
- return {'id': 1, 'name': group_name}
-
-
-def return_security_group_get(context, security_group_id, session):
- return {'id': security_group_id}
-
-
-def return_instance_add_security_group(context, instance_id,
- security_group_id):
- pass
-
-
-class CreateserverextTest(test.TestCase):
- def setUp(self):
- super(CreateserverextTest, self).setUp()
-
- self.security_group = None
- self.injected_files = None
- self.networks = None
- self.user_data = None
-
- def create(*args, **kwargs):
- if 'security_group' in kwargs:
- self.security_group = kwargs['security_group']
- else:
- self.security_group = None
- if 'injected_files' in kwargs:
- self.injected_files = kwargs['injected_files']
- else:
- self.injected_files = None
-
- if 'requested_networks' in kwargs:
- self.networks = kwargs['requested_networks']
- else:
- self.networks = None
-
- if 'user_data' in kwargs:
- self.user_data = kwargs['user_data']
-
- resv_id = None
-
- return ([{'id': '1234', 'display_name': 'fakeinstance',
- 'uuid': FAKE_UUID,
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'created_at': "",
- 'updated_at': "",
- 'fixed_ips': [],
- 'progress': 0}], resv_id)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Createserverext', 'User_data',
- 'Security_groups', 'Os_networks'])
-
- def _make_stub_method(self, canned_return):
- def stub_method(*args, **kwargs):
- return canned_return
- return stub_method
-
- def _create_security_group_request_dict(self, security_groups):
- server = {}
- server['name'] = 'new-server-test'
- server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
- server['flavorRef'] = 1
- if security_groups is not None:
- sg_list = []
- for name in security_groups:
- sg_list.append({'name': name})
- server['security_groups'] = sg_list
- return {'server': server}
-
- def _create_networks_request_dict(self, networks):
- server = {}
- server['name'] = 'new-server-test'
- server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
- server['flavorRef'] = 1
- if networks is not None:
- network_list = []
- for uuid, fixed_ip in networks:
- network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip})
- server['networks'] = network_list
- return {'server': server}
-
- def _create_user_data_request_dict(self, user_data):
- server = {}
- server['name'] = 'new-server-test'
- server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
- server['flavorRef'] = 1
- server['user_data'] = user_data
- return {'server': server}
-
- def _get_create_request_json(self, body_dict):
- req = webob.Request.blank('/v2/fake/os-create-server-ext')
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(body_dict)
- return req
-
- def _format_xml_request_body(self, body_dict):
- server = body_dict['server']
- body_parts = []
- body_parts.extend([
- '<?xml version="1.0" encoding="UTF-8"?>',
- '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.1"',
- ' name="%s" imageRef="%s" flavorRef="%s">' % (
- server['name'], server['imageRef'], server['flavorRef'])])
- if 'metadata' in server:
- metadata = server['metadata']
- body_parts.append('<metadata>')
- for item in metadata.iteritems():
- body_parts.append('<meta key="%s">%s</meta>' % item)
- body_parts.append('</metadata>')
- if 'personality' in server:
- personalities = server['personality']
- body_parts.append('<personality>')
- for file in personalities:
- item = (file['path'], file['contents'])
- body_parts.append('<file path="%s">%s</file>' % item)
- body_parts.append('</personality>')
- if 'networks' in server:
- networks = server['networks']
- body_parts.append('<networks>')
- for network in networks:
- item = (network['uuid'], network['fixed_ip'])
- body_parts.append('<network uuid="%s" fixed_ip="%s"></network>'
- % item)
- body_parts.append('</networks>')
- body_parts.append('</server>')
- return ''.join(body_parts)
-
- def _get_create_request_xml(self, body_dict):
- req = webob.Request.blank('/v2/fake/os-create-server-ext')
- req.content_type = 'application/xml'
- req.accept = 'application/xml'
- req.method = 'POST'
- req.body = self._format_xml_request_body(body_dict)
- return req
-
- def _create_instance_with_networks_json(self, networks):
- body_dict = self._create_networks_request_dict(networks)
- request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- return request, response, self.networks
-
- def _create_instance_with_user_data_json(self, networks):
- body_dict = self._create_user_data_request_dict(networks)
- request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- return request, response, self.user_data
-
- def _create_instance_with_networks_xml(self, networks):
- body_dict = self._create_networks_request_dict(networks)
- request = self._get_create_request_xml(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- return request, response, self.networks
-
- def test_create_instance_with_no_networks(self):
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(networks=None)
- self.assertEqual(response.status_int, 202)
- self.assertIsNone(networks)
-
- def test_create_instance_with_no_networks_xml(self):
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst(networks=None)
- self.assertEqual(response.status_int, 202)
- self.assertIsNone(networks)
-
- def test_create_instance_with_one_network(self):
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst([FAKE_NETWORKS[0]])
- self.assertEqual(response.status_int, 202)
- self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
-
- def test_create_instance_with_one_network_xml(self):
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst([FAKE_NETWORKS[0]])
- self.assertEqual(response.status_int, 202)
- self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
-
- def test_create_instance_with_two_networks(self):
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(FAKE_NETWORKS)
- self.assertEqual(response.status_int, 202)
- self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
-
- def test_create_instance_with_two_networks_xml(self):
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst(FAKE_NETWORKS)
- self.assertEqual(response.status_int, 202)
- self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
-
- def test_create_instance_with_duplicate_networks(self):
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(DUPLICATE_NETWORKS)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_duplicate_networks_xml(self):
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst(DUPLICATE_NETWORKS)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_no_id(self):
- body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
- del body_dict['server']['networks'][0]['uuid']
- request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(self.networks)
-
- def test_create_instance_with_network_no_id_xml(self):
- body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
- request = self._get_create_request_xml(body_dict)
- uuid = ' uuid="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"'
- request.body = request.body.replace(uuid, '')
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(self.networks)
-
- def test_create_instance_with_network_invalid_id(self):
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(INVALID_NETWORKS)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_invalid_id_xml(self):
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst(INVALID_NETWORKS)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_empty_fixed_ip(self):
- networks = [('1', '')]
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(networks)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_non_string_fixed_ip(self):
- networks = [('1', 12345)]
- _create_inst = self._create_instance_with_networks_json
- request, response, networks = _create_inst(networks)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_empty_fixed_ip_xml(self):
- networks = [('1', '')]
- _create_inst = self._create_instance_with_networks_xml
- request, response, networks = _create_inst(networks)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(networks)
-
- def test_create_instance_with_network_no_fixed_ip(self):
- body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
- del body_dict['server']['networks'][0]['fixed_ip']
- request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- self.assertEqual(response.status_int, 202)
- self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
- self.networks.as_tuples())
-
- def test_create_instance_with_network_no_fixed_ip_xml(self):
- body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
- request = self._get_create_request_xml(body_dict)
- request.body = request.body.replace(' fixed_ip="10.0.1.12"', '')
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- self.assertEqual(response.status_int, 202)
- self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
- self.networks.as_tuples())
-
- def test_create_instance_with_userdata(self):
- user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
- user_data_contents = base64.b64encode(user_data_contents)
- _create_inst = self._create_instance_with_user_data_json
- request, response, user_data = _create_inst(user_data_contents)
- self.assertEqual(response.status_int, 202)
- self.assertEqual(user_data, user_data_contents)
-
- def test_create_instance_with_userdata_none(self):
- user_data_contents = None
- _create_inst = self._create_instance_with_user_data_json
- request, response, user_data = _create_inst(user_data_contents)
- self.assertEqual(response.status_int, 202)
- self.assertEqual(user_data, user_data_contents)
-
- def test_create_instance_with_userdata_with_non_b64_content(self):
- user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
- _create_inst = self._create_instance_with_user_data_json
- request, response, user_data = _create_inst(user_data_contents)
- self.assertEqual(response.status_int, 400)
- self.assertIsNone(user_data)
-
- def test_create_instance_with_security_group_json(self):
- security_groups = ['test', 'test1']
- self.stubs.Set(db, 'security_group_get_by_name',
- return_security_group_get_by_name)
- self.stubs.Set(db, 'instance_add_security_group',
- return_instance_add_security_group)
- body_dict = self._create_security_group_request_dict(security_groups)
- request = self._get_create_request_json(body_dict)
- response = request.get_response(fakes.wsgi_app(
- init_only=('servers', 'os-create-server-ext')))
- self.assertEqual(response.status_int, 202)
- self.assertEqual(self.security_group, security_groups)
-
- def test_get_server_by_id_verify_security_groups_json(self):
- self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
- req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
- req.headers['Content-Type'] = 'application/json'
- response = req.get_response(fakes.wsgi_app(
- init_only=('os-create-server-ext', 'servers')))
- self.assertEqual(response.status_int, 200)
- res_dict = jsonutils.loads(response.body)
- expected_security_group = [{"name": "test"}]
- self.assertEqual(res_dict['server'].get('security_groups'),
- expected_security_group)
-
- def test_get_server_by_id_verify_security_groups_xml(self):
- self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
- req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
- req.headers['Accept'] = 'application/xml'
- response = req.get_response(fakes.wsgi_app(
- init_only=('os-create-server-ext', 'servers')))
- self.assertEqual(response.status_int, 200)
- dom = minidom.parseString(response.body)
- server = dom.childNodes[0]
- sec_groups = server.getElementsByTagName('security_groups')[0]
- sec_group = sec_groups.getElementsByTagName('security_group')[0]
- self.assertEqual('test', sec_group.getAttribute("name"))
diff --git a/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py b/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py
deleted file mode 100644
index 0c335d0a02..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_deferred_delete.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import webob
-
-from nova.api.openstack.compute.contrib import deferred_delete
-from nova.api.openstack.compute.plugins.v3 import deferred_delete as dd_v21
-from nova.compute import api as compute_api
-from nova import context
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class FakeRequest(object):
- def __init__(self, context):
- self.environ = {'nova.context': context}
-
-
-class DeferredDeleteExtensionTestV21(test.NoDBTestCase):
- ext_ver = dd_v21.DeferredDeleteController
-
- def setUp(self):
- super(DeferredDeleteExtensionTestV21, self).setUp()
- self.fake_input_dict = {}
- self.fake_uuid = 'fake_uuid'
- self.fake_context = context.RequestContext('fake', 'fake')
- self.fake_req = FakeRequest(self.fake_context)
- self.extension = self.ext_ver()
-
- def test_force_delete(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- self.mox.StubOutWithMock(compute_api.API, 'force_delete')
-
- fake_instance = 'fake_instance'
-
- compute_api.API.get(self.fake_context, self.fake_uuid,
- expected_attrs=None,
- want_objects=True).AndReturn(fake_instance)
- compute_api.API.force_delete(self.fake_context, fake_instance)
-
- self.mox.ReplayAll()
- res = self.extension._force_delete(self.fake_req, self.fake_uuid,
- self.fake_input_dict)
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.extension, dd_v21.DeferredDeleteController):
- status_int = self.extension._force_delete.wsgi_code
- else:
- status_int = res.status_int
- self.assertEqual(202, status_int)
-
- def test_force_delete_instance_not_found(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
-
- compute_api.API.get(self.fake_context, self.fake_uuid,
- expected_attrs=None,
- want_objects=True).AndRaise(
- exception.InstanceNotFound(instance_id='instance-0000'))
-
- self.mox.ReplayAll()
- self.assertRaises(webob.exc.HTTPNotFound,
- self.extension._force_delete,
- self.fake_req,
- self.fake_uuid,
- self.fake_input_dict)
-
- @mock.patch.object(compute_api.API, 'get')
- @mock.patch.object(compute_api.API, 'force_delete',
- side_effect=exception.InstanceIsLocked(
- instance_uuid='fake_uuid'))
- def test_force_delete_instance_locked(self, mock_force_delete, mock_get):
- req = fakes.HTTPRequest.blank('/v2/fake/servers/fake_uuid/action')
- ex = self.assertRaises(webob.exc.HTTPConflict,
- self.extension._force_delete,
- req, 'fake_uuid', '')
- self.assertIn('Instance fake_uuid is locked', ex.explanation)
-
- def test_restore(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- self.mox.StubOutWithMock(compute_api.API, 'restore')
-
- fake_instance = 'fake_instance'
-
- compute_api.API.get(self.fake_context, self.fake_uuid,
- expected_attrs=None,
- want_objects=True).AndReturn(fake_instance)
- compute_api.API.restore(self.fake_context, fake_instance)
-
- self.mox.ReplayAll()
- res = self.extension._restore(self.fake_req, self.fake_uuid,
- self.fake_input_dict)
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.extension, dd_v21.DeferredDeleteController):
- status_int = self.extension._restore.wsgi_code
- else:
- status_int = res.status_int
- self.assertEqual(202, status_int)
-
- def test_restore_instance_not_found(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
-
- compute_api.API.get(self.fake_context, self.fake_uuid,
- expected_attrs=None, want_objects=True).AndRaise(
- exception.InstanceNotFound(instance_id='instance-0000'))
-
- self.mox.ReplayAll()
- self.assertRaises(webob.exc.HTTPNotFound, self.extension._restore,
- self.fake_req, self.fake_uuid,
- self.fake_input_dict)
-
- def test_restore_raises_conflict_on_invalid_state(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- self.mox.StubOutWithMock(compute_api.API, 'restore')
-
- fake_instance = 'fake_instance'
- exc = exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- compute_api.API.get(self.fake_context, self.fake_uuid,
- expected_attrs=None,
- want_objects=True).AndReturn(fake_instance)
- compute_api.API.restore(self.fake_context, fake_instance).AndRaise(
- exc)
-
- self.mox.ReplayAll()
- self.assertRaises(webob.exc.HTTPConflict, self.extension._restore,
- self.fake_req, self.fake_uuid, self.fake_input_dict)
-
-
-class DeferredDeleteExtensionTestV2(DeferredDeleteExtensionTestV21):
- ext_ver = deferred_delete.DeferredDeleteController
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
deleted file mode 100644
index 1cdafcdd5f..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ /dev/null
@@ -1,449 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.serialization import jsonutils
-
-from nova.api.openstack import compute
-from nova.compute import api as compute_api
-from nova import db
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-import nova.tests.image.fake
-
-
-MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
-AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
-
-stub_instance = fakes.stub_instance
-
-API_DISK_CONFIG = 'OS-DCF:diskConfig'
-
-
-def instance_addresses(context, instance_id):
- return None
-
-
-class DiskConfigTestCaseV21(test.TestCase):
-
- def setUp(self):
- super(DiskConfigTestCaseV21, self).setUp()
- self._set_up_app()
- self._setup_fake_image_service()
-
- fakes.stub_out_nw_api(self.stubs)
-
- FAKE_INSTANCES = [
- fakes.stub_instance(1,
- uuid=MANUAL_INSTANCE_UUID,
- auto_disk_config=False),
- fakes.stub_instance(2,
- uuid=AUTO_INSTANCE_UUID,
- auto_disk_config=True)
- ]
-
- def fake_instance_get(context, id_):
- for instance in FAKE_INSTANCES:
- if id_ == instance['id']:
- return instance
-
- self.stubs.Set(db, 'instance_get', fake_instance_get)
-
- def fake_instance_get_by_uuid(context, uuid,
- columns_to_join=None, use_slave=False):
- for instance in FAKE_INSTANCES:
- if uuid == instance['uuid']:
- return instance
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
-
- def fake_instance_get_all(context, *args, **kwargs):
- return FAKE_INSTANCES
-
- self.stubs.Set(db, 'instance_get_all', fake_instance_get_all)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_instance_get_all)
-
- self.stubs.Set(objects.Instance, 'save',
- lambda *args, **kwargs: None)
-
- def fake_rebuild(*args, **kwargs):
- pass
-
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- def fake_instance_create(context, inst_, session=None):
- inst = fake_instance.fake_db_instance(**{
- 'id': 1,
- 'uuid': AUTO_INSTANCE_UUID,
- 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- 'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- 'progress': 0,
- 'name': 'instance-1', # this is a property
- 'task_state': '',
- 'vm_state': '',
- 'auto_disk_config': inst_['auto_disk_config'],
- 'security_groups': inst_['security_groups'],
- })
-
- def fake_instance_get_for_create(context, id_, *args, **kwargs):
- return (inst, inst)
-
- self.stubs.Set(db, 'instance_update_and_get_original',
- fake_instance_get_for_create)
-
- def fake_instance_get_all_for_create(context, *args, **kwargs):
- return [inst]
- self.stubs.Set(db, 'instance_get_all',
- fake_instance_get_all_for_create)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_instance_get_all_for_create)
-
- def fake_instance_add_security_group(context, instance_id,
- security_group_id):
- pass
-
- self.stubs.Set(db,
- 'instance_add_security_group',
- fake_instance_add_security_group)
-
- return inst
-
- self.stubs.Set(db, 'instance_create', fake_instance_create)
-
- def _set_up_app(self):
- self.app = compute.APIRouterV21(init_only=('servers', 'images',
- 'os-disk-config'))
-
- def _get_expected_msg_for_invalid_disk_config(self):
- return ('{{"badRequest": {{"message": "Invalid input for'
- ' field/attribute {0}. Value: {1}. u\'{1}\' is'
- ' not one of [\'AUTO\', \'MANUAL\']", "code": 400}}}}')
-
- def _setup_fake_image_service(self):
- self.image_service = nova.tests.image.fake.stub_out_image_service(
- self.stubs)
- timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
- image = {'id': '88580842-f50a-11e2-8d3a-f23c91aec05e',
- 'name': 'fakeimage7',
- 'created_at': timestamp,
- 'updated_at': timestamp,
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'ova',
- 'disk_format': 'vhd',
- 'size': '74185822',
- 'properties': {'auto_disk_config': 'Disabled'}}
- self.image_service.create(None, image)
-
- def tearDown(self):
- super(DiskConfigTestCaseV21, self).tearDown()
- nova.tests.image.fake.FakeImageService_reset()
-
- def assertDiskConfig(self, dict_, value):
- self.assertIn(API_DISK_CONFIG, dict_)
- self.assertEqual(dict_[API_DISK_CONFIG], value)
-
- def test_show_server(self):
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'MANUAL')
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s' % AUTO_INSTANCE_UUID)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'AUTO')
-
- def test_detail_servers(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail')
- res = req.get_response(self.app)
- server_dicts = jsonutils.loads(res.body)['servers']
-
- expectations = ['MANUAL', 'AUTO']
- for server_dict, expected in zip(server_dicts, expectations):
- self.assertDiskConfig(server_dict, expected)
-
- def test_show_image(self):
- req = fakes.HTTPRequest.blank(
- '/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379')
- res = req.get_response(self.app)
- image_dict = jsonutils.loads(res.body)['image']
- self.assertDiskConfig(image_dict, 'MANUAL')
-
- req = fakes.HTTPRequest.blank(
- '/fake/images/70a599e0-31e7-49b7-b260-868f441e862b')
- res = req.get_response(self.app)
- image_dict = jsonutils.loads(res.body)['image']
- self.assertDiskConfig(image_dict, 'AUTO')
-
- def test_detail_image(self):
- req = fakes.HTTPRequest.blank('/fake/images/detail')
- res = req.get_response(self.app)
- image_dicts = jsonutils.loads(res.body)['images']
-
- expectations = ['MANUAL', 'AUTO']
- for image_dict, expected in zip(image_dicts, expectations):
- # NOTE(sirp): image fixtures 6 and 7 are setup for
- # auto_disk_config testing
- if image_dict['id'] in (6, 7):
- self.assertDiskConfig(image_dict, expected)
-
- def test_create_server_override_auto(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- API_DISK_CONFIG: 'AUTO'
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'AUTO')
-
- def test_create_server_override_manual(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- API_DISK_CONFIG: 'MANUAL'
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'MANUAL')
-
- def test_create_server_detect_from_image(self):
- """If user doesn't pass in diskConfig for server, use image metadata
- to specify AUTO or MANUAL.
- """
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
- 'flavorRef': '1',
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'MANUAL')
-
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
- 'flavorRef': '1',
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'AUTO')
-
- def test_create_server_detect_from_image_disabled_goes_to_manual(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
- 'flavorRef': '1',
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'MANUAL')
-
- def test_create_server_errors_when_disabled_and_auto(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
- 'flavorRef': '1',
- API_DISK_CONFIG: 'AUTO'
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_create_server_when_disabled_and_manual(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
- 'flavorRef': '1',
- API_DISK_CONFIG: 'MANUAL'
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'MANUAL')
-
- def _test_update_server_disk_config(self, uuid, disk_config):
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s' % uuid)
- req.method = 'PUT'
- req.content_type = 'application/json'
- body = {'server': {API_DISK_CONFIG: disk_config}}
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, disk_config)
-
- def test_update_server_override_auto(self):
- self._test_update_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
-
- def test_update_server_override_manual(self):
- self._test_update_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
-
- def test_update_server_invalid_disk_config(self):
- # Return BadRequest if user passes an invalid diskConfig value.
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
- req.method = 'PUT'
- req.content_type = 'application/json'
- body = {'server': {API_DISK_CONFIG: 'server_test'}}
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- expected_msg = self._get_expected_msg_for_invalid_disk_config()
- self.assertEqual(expected_msg.format(API_DISK_CONFIG, 'server_test'),
- res.body)
-
- def _test_rebuild_server_disk_config(self, uuid, disk_config):
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s/action' % uuid)
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {"rebuild": {
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- API_DISK_CONFIG: disk_config
- }}
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, disk_config)
-
- def test_rebuild_server_override_auto(self):
- self._test_rebuild_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
-
- def test_rebuild_server_override_manual(self):
- self._test_rebuild_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
-
- def test_create_server_with_auto_disk_config(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- API_DISK_CONFIG: 'AUTO'
- }}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIn('auto_disk_config', kwargs)
- self.assertEqual(True, kwargs['auto_disk_config'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'AUTO')
-
- def test_rebuild_server_with_auto_disk_config(self):
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {"rebuild": {
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- API_DISK_CONFIG: 'AUTO'
- }}
-
- def rebuild(*args, **kwargs):
- self.assertIn('auto_disk_config', kwargs)
- self.assertEqual(True, kwargs['auto_disk_config'])
-
- self.stubs.Set(compute_api.API, 'rebuild', rebuild)
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- server_dict = jsonutils.loads(res.body)['server']
- self.assertDiskConfig(server_dict, 'AUTO')
-
- def test_resize_server_with_auto_disk_config(self):
- req = fakes.HTTPRequest.blank(
- '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {"resize": {
- "flavorRef": "3",
- API_DISK_CONFIG: 'AUTO'
- }}
-
- def resize(*args, **kwargs):
- self.assertIn('auto_disk_config', kwargs)
- self.assertEqual(True, kwargs['auto_disk_config'])
-
- self.stubs.Set(compute_api.API, 'resize', resize)
-
- req.body = jsonutils.dumps(body)
- req.get_response(self.app)
-
-
-class DiskConfigTestCaseV2(DiskConfigTestCaseV21):
- def _set_up_app(self):
- self.flags(verbose=True,
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Disk_config'])
-
- self.app = compute.APIRouter(init_only=('servers', 'images'))
-
- def _get_expected_msg_for_invalid_disk_config(self):
- return ('{{"badRequest": {{"message": "{0} must be either'
- ' \'MANUAL\' or \'AUTO\'.", "code": 400}}}}')
diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
deleted file mode 100644
index c2094518bf..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_evacuate.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import api as compute_api
-from nova.compute import vm_states
-from nova import context
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-
-
-def fake_compute_api(*args, **kwargs):
- return True
-
-
-def fake_compute_api_get(self, context, instance_id, want_objects=False,
- **kwargs):
- # BAD_UUID is something that does not exist
- if instance_id == 'BAD_UUID':
- raise exception.InstanceNotFound(instance_id=instance_id)
- else:
- return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
- task_state=None, host='host1',
- vm_state=vm_states.ACTIVE)
-
-
-def fake_service_get_by_compute_host(self, context, host):
- if host == 'bad-host':
- raise exception.ComputeHostNotFound(host=host)
- else:
- return {
- 'host_name': host,
- 'service': 'compute',
- 'zone': 'nova'
- }
-
-
-class EvacuateTestV21(test.NoDBTestCase):
-
- _methods = ('resize', 'evacuate')
- fake_url = '/v2/fake'
-
- def setUp(self):
- super(EvacuateTestV21, self).setUp()
- self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
- self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- self.UUID = uuid.uuid4()
- for _method in self._methods:
- self.stubs.Set(compute_api.API, _method, fake_compute_api)
-
- def _fake_wsgi_app(self, ctxt):
- return fakes.wsgi_app_v21(fake_auth_context=ctxt)
-
- def _gen_resource_with_app(self, json_load, is_admin=True, uuid=None):
- ctxt = context.get_admin_context()
- ctxt.user_id = 'fake'
- ctxt.project_id = 'fake'
- ctxt.is_admin = is_admin
- app = self._fake_wsgi_app(ctxt)
- req = webob.Request.blank('%s/servers/%s/action' % (self.fake_url,
- uuid or self.UUID))
- req.method = 'POST'
- base_json_load = {'evacuate': json_load}
- req.body = jsonutils.dumps(base_json_load)
- req.content_type = 'application/json'
-
- return req.get_response(app)
-
- def _fake_update(self, inst, context, instance, task_state,
- expected_task_state):
- return None
-
- def test_evacuate_with_valid_instance(self):
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
-
- self.assertEqual(res.status_int, 200)
-
- def test_evacuate_with_invalid_instance(self):
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'},
- uuid='BAD_UUID')
-
- self.assertEqual(res.status_int, 404)
-
- def test_evacuate_with_active_service(self):
- def fake_evacuate(*args, **kwargs):
- raise exception.ComputeServiceInUse("Service still in use")
-
- self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 400)
-
- def test_evacuate_instance_with_no_target(self):
- res = self._gen_resource_with_app({'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(200, res.status_int)
-
- def test_evacuate_instance_without_on_shared_storage(self):
- res = self._gen_resource_with_app({'host': 'my-host',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 400)
-
- def test_evacuate_instance_with_invalid_characters_host(self):
- host = 'abc!#'
- res = self._gen_resource_with_app({'host': host,
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(400, res.status_int)
-
- def test_evacuate_instance_with_too_long_host(self):
- host = 'a' * 256
- res = self._gen_resource_with_app({'host': host,
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(400, res.status_int)
-
- def test_evacuate_instance_with_invalid_on_shared_storage(self):
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'foo',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(400, res.status_int)
-
- def test_evacuate_instance_with_bad_target(self):
- res = self._gen_resource_with_app({'host': 'bad-host',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 404)
-
- def test_evacuate_instance_with_target(self):
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
-
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 200)
- resp_json = jsonutils.loads(res.body)
- self.assertEqual("MyNewPass", resp_json['adminPass'])
-
- def test_evacuate_shared_and_pass(self):
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'True',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 400)
-
- def test_evacuate_not_shared_pass_generated(self):
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'False'})
- self.assertEqual(res.status_int, 200)
- resp_json = jsonutils.loads(res.body)
- self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
-
- def test_evacuate_shared(self):
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'True'})
- self.assertEqual(res.status_int, 200)
-
- def test_not_admin(self):
- res = self._gen_resource_with_app({'host': 'my-host',
- 'onSharedStorage': 'True'},
- is_admin=False)
- self.assertEqual(res.status_int, 403)
-
- def test_evacuate_to_same_host(self):
- res = self._gen_resource_with_app({'host': 'host1',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(res.status_int, 400)
-
- def test_evacuate_instance_with_empty_host(self):
- res = self._gen_resource_with_app({'host': '',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(400, res.status_int)
-
- def test_evacuate_instance_with_underscore_in_hostname(self):
- # NOTE: The hostname grammar in RFC952 does not allow for
- # underscores in hostnames. However, we should test that it
- # is supported because it sometimes occurs in real systems.
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
- res = self._gen_resource_with_app({'host': 'underscore_hostname',
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(200, res.status_int)
- resp_json = jsonutils.loads(res.body)
- self.assertEqual("MyNewPass", resp_json['adminPass'])
-
- def test_evacuate_disable_password_return(self):
- self._test_evacuate_enable_instance_password_conf(False)
-
- def test_evacuate_enable_password_return(self):
- self._test_evacuate_enable_instance_password_conf(True)
-
- def _test_evacuate_enable_instance_password_conf(self, enable_pass):
- self.flags(enable_instance_password=enable_pass)
- self.stubs.Set(compute_api.API, 'update', self._fake_update)
-
- res = self._gen_resource_with_app({'host': 'my_host',
- 'onSharedStorage': 'False'})
- self.assertEqual(res.status_int, 200)
- resp_json = jsonutils.loads(res.body)
- if enable_pass:
- self.assertIn('adminPass', resp_json)
- else:
- self.assertIsNone(resp_json.get('adminPass'))
-
-
-class EvacuateTestV2(EvacuateTestV21):
-
- def setUp(self):
- super(EvacuateTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Evacuate'])
-
- def _fake_wsgi_app(self, ctxt):
- return fakes.wsgi_app(fake_auth_context=ctxt)
-
- def test_evacuate_instance_with_no_target(self):
- res = self._gen_resource_with_app({'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'})
- self.assertEqual(400, res.status_int)
-
- def test_evacuate_instance_with_too_long_host(self):
- pass
-
- def test_evacuate_instance_with_invalid_characters_host(self):
- pass
-
- def test_evacuate_instance_with_invalid_on_shared_storage(self):
- pass
-
- def test_evacuate_disable_password_return(self):
- pass
-
- def test_evacuate_enable_password_return(self):
- pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
deleted file mode 100644
index cec539f0e1..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_availability_zone.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_availability_zone
-from nova import availability_zones
-from nova import compute
-from nova.compute import vm_states
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get_az(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
- vm_state=vm_states.ACTIVE,
- availability_zone='fakeaz')
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_empty(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="",
- vm_state=vm_states.ACTIVE,
- availability_zone='fakeaz')
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
- vm_state=vm_states.ACTIVE)
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host",
- vm_state=vm_states.ACTIVE)
- inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host",
- vm_state=vm_states.ACTIVE)
- db_list = [inst1, inst2]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-def fake_get_host_availability_zone(context, host):
- return host
-
-
-def fake_get_no_host_availability_zone(context, host):
- return None
-
-
-class ExtendedAvailabilityZoneTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = 'OS-EXT-AZ:'
- base_url = '/v2/fake/servers/'
-
- def setUp(self):
- super(ExtendedAvailabilityZoneTestV21, self).setUp()
- availability_zones.reset_cache()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(availability_zones, 'get_host_availability_zone',
- fake_get_host_availability_zone)
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=None))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertAvailabilityZone(self, server, az):
- self.assertEqual(server.get('%savailability_zone' % self.prefix),
- az)
-
- def test_show_no_host_az(self):
- self.stubs.Set(compute.api.API, 'get', fake_compute_get_az)
- self.stubs.Set(availability_zones, 'get_host_availability_zone',
- fake_get_no_host_availability_zone)
-
- url = self.base_url + UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
-
- def test_show_empty_host_az(self):
- self.stubs.Set(compute.api.API, 'get', fake_compute_get_empty)
- self.stubs.Set(availability_zones, 'get_host_availability_zone',
- fake_get_no_host_availability_zone)
-
- url = self.base_url + UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
-
- def test_show(self):
- url = self.base_url + UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertAvailabilityZone(self._get_server(res.body), 'get-host')
-
- def test_detail(self):
- url = self.base_url + 'detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertAvailabilityZone(server, 'all-host')
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = self.base_url + '70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class ExtendedAvailabilityZoneTestV2(ExtendedAvailabilityZoneTestV21):
-
- def setUp(self):
- super(ExtendedAvailabilityZoneTestV2, self).setUp()
-
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_availability_zone'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
-
-class ExtendedAvailabilityZoneXmlTestV2(ExtendedAvailabilityZoneTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % extended_availability_zone.\
- Extended_availability_zone.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py b/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
deleted file mode 100644
index a011c7dddf..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import vm_states
-from nova import context
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
-
- def setUp(self):
- super(ExtendedEvacuateFindHostTest, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_evacuate_find_host',
- 'Evacuate'])
- self.UUID = uuid.uuid4()
-
- def _get_admin_context(self, user_id='fake', project_id='fake'):
- ctxt = context.get_admin_context()
- ctxt.user_id = user_id
- ctxt.project_id = project_id
- return ctxt
-
- def _fake_compute_api(*args, **kwargs):
- return True
-
- def _fake_compute_api_get(self, context, instance_id, **kwargs):
- instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
- task_state=None,
- host='host1',
- vm_state=vm_states.ACTIVE)
- instance = instance_obj.Instance._from_db_object(context,
- instance_obj.Instance(),
- instance)
- return instance
-
- def _fake_service_get_by_compute_host(self, context, host):
- return {'host_name': host,
- 'service': 'compute',
- 'zone': 'nova'
- }
-
- @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
- @mock.patch('nova.compute.api.API.get')
- @mock.patch('nova.compute.api.API.evacuate')
- def test_evacuate_instance_with_no_target(self, evacuate_mock,
- api_get_mock,
- service_get_mock):
- service_get_mock.side_effects = self._fake_service_get_by_compute_host
- api_get_mock.side_effects = self._fake_compute_api_get
- evacuate_mock.side_effects = self._fake_compute_api
-
- ctxt = self._get_admin_context()
- app = fakes.wsgi_app(fake_auth_context=ctxt)
- req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps({
- 'evacuate': {
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'
- }
- })
- req.content_type = 'application/json'
- res = req.get_response(app)
- self.assertEqual(200, res.status_int)
- evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
- mock.ANY, mock.ANY)
-
- @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
- @mock.patch('nova.compute.api.API.get')
- def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
- service_get_mock):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Evacuate'])
- service_get_mock.side_effects = self._fake_service_get_by_compute_host
- api_get_mock.side_effects = self._fake_compute_api_get
-
- ctxt = self._get_admin_context()
- app = fakes.wsgi_app(fake_auth_context=ctxt)
- req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps({
- 'evacuate': {
- 'onSharedStorage': 'False',
- 'adminPass': 'MyNewPass'
- }
- })
- req.content_type = 'application/json'
- res = req.get_response(app)
- self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py
deleted file mode 100644
index a249c7c087..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-
-from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
-from nova.api.openstack.compute.plugins.v3 import hypervisors \
- as hypervisors_v21
-from nova.api.openstack import extensions
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack.compute.contrib import test_hypervisors
-from nova.tests.api.openstack import fakes
-
-
-def fake_compute_node_get(context, compute_id):
- for hyper in test_hypervisors.TEST_HYPERS:
- if hyper['id'] == compute_id:
- return hyper
- raise exception.ComputeHostNotFound(host=compute_id)
-
-
-def fake_compute_node_get_all(context):
- return test_hypervisors.TEST_HYPERS
-
-
-class ExtendedHypervisorsTestV21(test.NoDBTestCase):
- DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
- del DETAIL_HYPERS_DICTS[0]['service_id']
- del DETAIL_HYPERS_DICTS[1]['service_id']
- DETAIL_HYPERS_DICTS[0].update({'state': 'up',
- 'status': 'enabled',
- 'service': dict(id=1, host='compute1',
- disabled_reason=None)})
- DETAIL_HYPERS_DICTS[1].update({'state': 'up',
- 'status': 'enabled',
- 'service': dict(id=2, host='compute2',
- disabled_reason=None)})
-
- def _set_up_controller(self):
- self.controller = hypervisors_v21.HypervisorsController()
- self.controller.servicegroup_api.service_is_up = mock.MagicMock(
- return_value=True)
-
- def _get_request(self):
- return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
- use_admin_context=True)
-
- def setUp(self):
- super(ExtendedHypervisorsTestV21, self).setUp()
- self._set_up_controller()
-
- self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
- self.stubs.Set(db, 'compute_node_get',
- fake_compute_node_get)
-
- def test_view_hypervisor_detail_noservers(self):
- result = self.controller._view_hypervisor(
- test_hypervisors.TEST_HYPERS[0], True)
-
- self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
-
- def test_detail(self):
- req = self._get_request()
- result = self.controller.detail(req)
-
- self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
-
- def test_show_withid(self):
- req = self._get_request()
- result = self.controller.show(req, '1')
-
- self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
-
-
-class ExtendedHypervisorsTestV2(ExtendedHypervisorsTestV21):
- DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
- del DETAIL_HYPERS_DICTS[0]['service_id']
- del DETAIL_HYPERS_DICTS[1]['service_id']
- DETAIL_HYPERS_DICTS[0].update({'service': dict(id=1, host='compute1')})
- DETAIL_HYPERS_DICTS[1].update({'service': dict(id=2, host='compute2')})
-
- def _set_up_controller(self):
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.ext_mgr.extensions['os-extended-hypervisors'] = True
- self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_ips.py b/nova/tests/api/openstack/compute/contrib/test_extended_ips.py
deleted file mode 100644
index c2df53c2f0..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_ips.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2013 Nebula, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_ips
-from nova.api.openstack import xmlutil
-from nova import compute
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-NW_CACHE = [
- {
- 'address': 'aa:aa:aa:aa:aa:aa',
- 'id': 1,
- 'network': {
- 'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [
- {
- 'cidr': '192.168.1.0/24',
- 'ips': [
- {
- 'address': '192.168.1.100',
- 'type': 'fixed',
- 'floating_ips': [
- {'address': '5.0.0.1', 'type': 'floating'},
- ],
- },
- ],
- },
- ]
- }
- },
- {
- 'address': 'bb:bb:bb:bb:bb:bb',
- 'id': 2,
- 'network': {
- 'bridge': 'br1',
- 'id': 2,
- 'label': 'public',
- 'subnets': [
- {
- 'cidr': '10.0.0.0/24',
- 'ips': [
- {
- 'address': '10.0.0.100',
- 'type': 'fixed',
- 'floating_ips': [
- {'address': '5.0.0.2', 'type': 'floating'},
- ],
- }
- ],
- },
- ]
- }
- }
-]
-ALL_IPS = []
-for cache in NW_CACHE:
- for subnet in cache['network']['subnets']:
- for fixed in subnet['ips']:
- sanitized = dict(fixed)
- sanitized.pop('floating_ips')
- ALL_IPS.append(sanitized)
- for floating in fixed['floating_ips']:
- ALL_IPS.append(floating)
-ALL_IPS.sort()
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
- return fake_instance.fake_instance_obj(args[1],
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
- fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedIpsTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = 'OS-EXT-IPS:'
-
- def setUp(self):
- super(ExtendedIpsTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def _get_ips(self, server):
- for network in server['addresses'].itervalues():
- for ip in network:
- yield ip
-
- def assertServerStates(self, server):
- results = []
- for ip in self._get_ips(server):
- results.append({'address': ip.get('addr'),
- 'type': ip.get('%stype' % self.prefix)})
-
- self.assertEqual(ALL_IPS, sorted(results))
-
- def test_show(self):
- url = '/v2/fake/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerStates(self._get_server(res.body))
-
- def test_detail(self):
- url = '/v2/fake/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerStates(server)
-
-
-class ExtendedIpsTestV2(ExtendedIpsTestV21):
-
- def setUp(self):
- super(ExtendedIpsTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_ips'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
-
-class ExtendedIpsXmlTest(ExtendedIpsTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % extended_ips.Extended_ips.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
-
- def _get_ips(self, server):
- for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
- for ip in network:
- yield ip
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_ips_mac.py b/nova/tests/api/openstack/compute/contrib/test_extended_ips_mac.py
deleted file mode 100644
index d67b04ea46..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_ips_mac.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_ips_mac
-from nova.api.openstack import xmlutil
-from nova import compute
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-NW_CACHE = [
- {
- 'address': 'aa:aa:aa:aa:aa:aa',
- 'id': 1,
- 'network': {
- 'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [
- {
- 'cidr': '192.168.1.0/24',
- 'ips': [
- {
- 'address': '192.168.1.100',
- 'type': 'fixed',
- 'floating_ips': [
- {'address': '5.0.0.1', 'type': 'floating'},
- ],
- },
- ],
- },
- ]
- }
- },
- {
- 'address': 'bb:bb:bb:bb:bb:bb',
- 'id': 2,
- 'network': {
- 'bridge': 'br1',
- 'id': 2,
- 'label': 'public',
- 'subnets': [
- {
- 'cidr': '10.0.0.0/24',
- 'ips': [
- {
- 'address': '10.0.0.100',
- 'type': 'fixed',
- 'floating_ips': [
- {'address': '5.0.0.2', 'type': 'floating'},
- ],
- }
- ],
- },
- ]
- }
- }
-]
-ALL_IPS = []
-for cache in NW_CACHE:
- for subnet in cache['network']['subnets']:
- for fixed in subnet['ips']:
- sanitized = dict(fixed)
- sanitized['mac_address'] = cache['address']
- sanitized.pop('floating_ips')
- sanitized.pop('type')
- ALL_IPS.append(sanitized)
- for floating in fixed['floating_ips']:
- sanitized = dict(floating)
- sanitized['mac_address'] = cache['address']
- sanitized.pop('type')
- ALL_IPS.append(sanitized)
-ALL_IPS.sort()
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
- return fake_instance.fake_instance_obj(args[1],
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
- fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedIpsMacTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
-
- def setUp(self):
- super(ExtendedIpsMacTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def _get_ips(self, server):
- for network in server['addresses'].itervalues():
- for ip in network:
- yield ip
-
- def assertServerStates(self, server):
- results = []
- for ip in self._get_ips(server):
- results.append({'address': ip.get('addr'),
- 'mac_address': ip.get('%smac_addr' % self.prefix)})
-
- self.assertEqual(ALL_IPS, sorted(results))
-
- def test_show(self):
- url = '/v2/fake/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerStates(self._get_server(res.body))
-
- def test_detail(self):
- url = '/v2/fake/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for _i, server in enumerate(self._get_servers(res.body)):
- self.assertServerStates(server)
-
-
-class ExtendedIpsMacTestV2(ExtendedIpsMacTestV21):
- content_type = 'application/json'
- prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
-
- def setUp(self):
- super(ExtendedIpsMacTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_ips_mac'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
-
-class ExtendedIpsMacXmlTest(ExtendedIpsMacTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % extended_ips_mac.Extended_ips_mac.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
-
- def _get_ips(self, server):
- for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
- for ip in network:
- yield ip
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
deleted file mode 100644
index 0b82d7a750..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_server_attributes
-from nova import compute
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-
-from oslo.config import cfg
-
-
-NAME_FMT = cfg.CONF.instance_name_template
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return objects.Instance._from_db_object(
- args[1], objects.Instance(),
- fakes.stub_instance(1, uuid=UUID3, host="host-fake",
- node="node-fake"), fields)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"),
- fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2")
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedServerAttributesTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = 'OS-EXT-SRV-ATTR:'
- fake_url = '/v2/fake'
-
- def setUp(self):
- super(ExtendedServerAttributesTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(
- fakes.wsgi_app_v21(init_only=('servers',
- 'os-extended-server-attributes')))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertServerAttributes(self, server, host, node, instance_name):
- self.assertEqual(server.get('%shost' % self.prefix), host)
- self.assertEqual(server.get('%sinstance_name' % self.prefix),
- instance_name)
- self.assertEqual(server.get('%shypervisor_hostname' % self.prefix),
- node)
-
- def test_show(self):
- url = self.fake_url + '/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerAttributes(self._get_server(res.body),
- host='host-fake',
- node='node-fake',
- instance_name=NAME_FMT % 1)
-
- def test_detail(self):
- url = self.fake_url + '/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerAttributes(server,
- host='host-%s' % (i + 1),
- node='node-%s' % (i + 1),
- instance_name=NAME_FMT % (i + 1))
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class ExtendedServerAttributesTestV2(ExtendedServerAttributesTestV21):
-
- def setUp(self):
- super(ExtendedServerAttributesTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_server_attributes'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
-
-class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTestV2):
- content_type = 'application/xml'
- ext = extended_server_attributes
- prefix = '{%s}' % ext.Extended_server_attributes.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
deleted file mode 100644
index 218f277501..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_status.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_status
-from nova import compute
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
- vm_state="slightly crunchy", power_state=1)
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
- vm_state="vm-1", power_state=1),
- fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
- vm_state="vm-2", power_state=2),
- ]
-
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedStatusTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = 'OS-EXT-STS:'
- fake_url = '/v2/fake'
-
- def _set_flags(self):
- pass
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(
- init_only=('servers',
- 'os-extended-status')))
- return res
-
- def setUp(self):
- super(ExtendedStatusTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self._set_flags()
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertServerStates(self, server, vm_state, power_state, task_state):
- self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
- self.assertEqual(int(server.get('%spower_state' % self.prefix)),
- power_state)
- self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
-
- def test_show(self):
- url = self.fake_url + '/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerStates(self._get_server(res.body),
- vm_state='slightly crunchy',
- power_state=1,
- task_state='kayaking')
-
- def test_detail(self):
- url = self.fake_url + '/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerStates(server,
- vm_state='vm-%s' % (i + 1),
- power_state=(i + 1),
- task_state='task-%s' % (i + 1))
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class ExtendedStatusTestV2(ExtendedStatusTestV21):
-
- def _set_flags(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_status'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
-
-class ExtendedStatusXmlTest(ExtendedStatusTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % extended_status.Extended_status.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py b/nova/tests/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
deleted file mode 100644
index abe99643b8..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_virtual_interfaces_net
-from nova.api.openstack import wsgi
-from nova import compute
-from nova import network
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-FAKE_VIFS = [{'uuid': '00000000-0000-0000-0000-00000000000000000',
- 'address': '00-00-00-00-00-00',
- 'net_uuid': '00000000-0000-0000-0000-00000000000000001'},
- {'uuid': '11111111-1111-1111-1111-11111111111111111',
- 'address': '11-11-11-11-11-11',
- 'net_uuid': '11111111-1111-1111-1111-11111111111111112'}]
-
-EXPECTED_NET_UUIDS = ['00000000-0000-0000-0000-00000000000000001',
- '11111111-1111-1111-1111-11111111111111112']
-
-
-def compute_api_get(self, context, instance_id, expected_attrs=None,
- want_objects=False):
- return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
-
-
-def get_vifs_by_instance(self, context, instance_id):
- return FAKE_VIFS
-
-
-def get_vif_by_mac_address(self, context, mac_address):
- if mac_address == "00-00-00-00-00-00":
- return {'net_uuid': '00000000-0000-0000-0000-00000000000000001'}
- else:
- return {'net_uuid': '11111111-1111-1111-1111-11111111111111112'}
-
-
-class ExtendedServerVIFNetTest(test.NoDBTestCase):
- content_type = 'application/json'
- prefix = "%s:" % extended_virtual_interfaces_net. \
- Extended_virtual_interfaces_net.alias
-
- def setUp(self):
- super(ExtendedServerVIFNetTest, self).setUp()
- self.stubs.Set(compute.api.API, "get",
- compute_api_get)
- self.stubs.Set(network.api.API, "get_vifs_by_instance",
- get_vifs_by_instance)
- self.stubs.Set(network.api.API, "get_vif_by_mac_address",
- get_vif_by_mac_address)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Virtual_interfaces',
- 'Extended_virtual_interfaces_net'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=(
- 'os-virtual-interfaces', 'OS-EXT-VIF-NET')))
- return res
-
- def _get_vifs(self, body):
- return jsonutils.loads(body).get('virtual_interfaces')
-
- def _get_net_id(self, vifs):
- for vif in vifs:
- yield vif['%snet_id' % self.prefix]
-
- def assertVIFs(self, vifs):
- result = []
- for net_id in self._get_net_id(vifs):
- result.append(net_id)
- sorted(result)
-
- for i, net_uuid in enumerate(result):
- self.assertEqual(net_uuid, EXPECTED_NET_UUIDS[i])
-
- def test_get_extend_virtual_interfaces_list(self):
- res = self._make_request('/v2/fake/servers/abcd/os-virtual-interfaces')
-
- self.assertEqual(res.status_int, 200)
- self.assertVIFs(self._get_vifs(res.body))
-
-
-class ExtendedServerVIFNetSerializerTest(ExtendedServerVIFNetTest):
- content_type = 'application/xml'
- prefix = "{%s}" % extended_virtual_interfaces_net. \
- Extended_virtual_interfaces_net.namespace
-
- def setUp(self):
- super(ExtendedServerVIFNetSerializerTest, self).setUp()
- self.namespace = wsgi.XMLNS_V11
- self.serializer = extended_virtual_interfaces_net. \
- ExtendedVirtualInterfaceNetTemplate()
-
- def _get_vifs(self, body):
- return etree.XML(body).getchildren()
-
- def _get_net_id(self, vifs):
- for vif in vifs:
- yield vif.attrib['%snet_id' % self.prefix]
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_volumes.py b/nova/tests/api/openstack/compute/contrib/test_extended_volumes.py
deleted file mode 100644
index 9c02792e8b..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_extended_volumes.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import extended_volumes
-from nova import compute
-from nova import db
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID1)
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-def fake_bdms_get_all_by_instance(*args, **kwargs):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': UUID1, 'source_type': 'volume',
- 'destination_type': 'volume', 'id': 1}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': UUID2, 'source_type': 'volume',
- 'destination_type': 'volume', 'id': 2})]
-
-
-class ExtendedVolumesTest(test.TestCase):
- content_type = 'application/json'
- prefix = 'os-extended-volumes:'
-
- def setUp(self):
- super(ExtendedVolumesTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_volumes'])
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def test_show(self):
- url = '/v2/fake/servers/%s' % UUID1
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- server = self._get_server(res.body)
- exp_volumes = [{'id': UUID1}, {'id': UUID2}]
- if self.content_type == 'application/json':
- actual = server.get('%svolumes_attached' % self.prefix)
- elif self.content_type == 'application/xml':
- actual = [dict(elem.items()) for elem in
- server.findall('%svolume_attached' % self.prefix)]
- self.assertEqual(exp_volumes, actual)
-
- def test_detail(self):
- url = '/v2/fake/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- exp_volumes = [{'id': UUID1}, {'id': UUID2}]
- for i, server in enumerate(self._get_servers(res.body)):
- if self.content_type == 'application/json':
- actual = server.get('%svolumes_attached' % self.prefix)
- elif self.content_type == 'application/xml':
- actual = [dict(elem.items()) for elem in
- server.findall('%svolume_attached' % self.prefix)]
- self.assertEqual(exp_volumes, actual)
-
-
-class ExtendedVolumesXmlTest(ExtendedVolumesTest):
- content_type = 'application/xml'
- prefix = '{%s}' % extended_volumes.Extended_volumes.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
deleted file mode 100644
index 9e97c294b3..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova.api.openstack.compute.contrib import fixed_ips as fixed_ips_v2
-from nova.api.openstack.compute.plugins.v3 import fixed_ips as fixed_ips_v21
-from nova import context
-from nova import db
-from nova import exception
-from nova.i18n import _
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_network
-
-
-fake_fixed_ips = [{'id': 1,
- 'address': '192.168.1.1',
- 'network_id': 1,
- 'virtual_interface_id': 1,
- 'instance_uuid': '1',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'host': None,
- 'instance': None,
- 'network': test_network.fake_network,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False},
- {'id': 2,
- 'address': '192.168.1.2',
- 'network_id': 1,
- 'virtual_interface_id': 2,
- 'instance_uuid': '2',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'host': None,
- 'instance': None,
- 'network': test_network.fake_network,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False},
- {'id': 3,
- 'address': '10.0.0.2',
- 'network_id': 1,
- 'virtual_interface_id': 3,
- 'instance_uuid': '3',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'host': None,
- 'instance': None,
- 'network': test_network.fake_network,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': True},
- ]
-
-
-def fake_fixed_ip_get_by_address(context, address, columns_to_join=None):
- if address == 'inv.ali.d.ip':
- msg = _("Invalid fixed IP Address %s in request") % address
- raise exception.FixedIpInvalid(msg)
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address and not fixed_ip['deleted']:
- return fixed_ip
- raise exception.FixedIpNotFoundForAddress(address=address)
-
-
-def fake_fixed_ip_get_by_address_detailed(context, address):
- network = {'id': 1,
- 'cidr': "192.168.1.0/24"}
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address and not fixed_ip['deleted']:
- return (fixed_ip, FakeModel(network), None)
- raise exception.FixedIpNotFoundForAddress(address=address)
-
-
-def fake_fixed_ip_update(context, address, values):
- fixed_ip = fake_fixed_ip_get_by_address(context, address)
- if fixed_ip is None:
- raise exception.FixedIpNotFoundForAddress(address=address)
- else:
- for key in values:
- fixed_ip[key] = values[key]
-
-
-class FakeModel(object):
- """Stubs out for model."""
- def __init__(self, values):
- self.values = values
-
- def __getattr__(self, name):
- return self.values[name]
-
- def __getitem__(self, key):
- if key in self.values:
- return self.values[key]
- else:
- raise NotImplementedError()
-
- def __repr__(self):
- return '<FakeModel: %s>' % self.values
-
-
-def fake_network_get_all(context):
- network = {'id': 1,
- 'cidr': "192.168.1.0/24"}
- return [FakeModel(network)]
-
-
-class FixedIpTestV21(test.NoDBTestCase):
-
- fixed_ips = fixed_ips_v21
- url = '/v2/fake/os-fixed-ips'
-
- def setUp(self):
- super(FixedIpTestV21, self).setUp()
-
- self.stubs.Set(db, "fixed_ip_get_by_address",
- fake_fixed_ip_get_by_address)
- self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
- fake_fixed_ip_get_by_address_detailed)
- self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
-
- self.context = context.get_admin_context()
- self.controller = self.fixed_ips.FixedIPController()
-
- def _assert_equal(self, ret, exp):
- self.assertEqual(ret.wsgi_code, exp)
-
- def _get_reserve_action(self):
- return self.controller.reserve
-
- def _get_unreserve_action(self):
- return self.controller.unreserve
-
- def test_fixed_ips_get(self):
- req = fakes.HTTPRequest.blank('%s/192.168.1.1' % self.url)
- res_dict = self.controller.show(req, '192.168.1.1')
- response = {'fixed_ip': {'cidr': '192.168.1.0/24',
- 'hostname': None,
- 'host': None,
- 'address': '192.168.1.1'}}
- self.assertEqual(response, res_dict)
-
- def test_fixed_ips_get_bad_ip_fail(self):
- req = fakes.HTTPRequest.blank('%s/10.0.0.1' % self.url)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
- '10.0.0.1')
-
- def test_fixed_ips_get_invalid_ip_address(self):
- req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip' % self.url)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.show, req,
- 'inv.ali.d.ip')
-
- def test_fixed_ips_get_deleted_ip_fail(self):
- req = fakes.HTTPRequest.blank('%s/10.0.0.2' % self.url)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
- '10.0.0.2')
-
- def test_fixed_ip_reserve(self):
- fake_fixed_ips[0]['reserved'] = False
- body = {'reserve': None}
- req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
- action = self._get_reserve_action()
- result = action(req, "192.168.1.1", body)
-
- self._assert_equal(result or action, 202)
- self.assertEqual(fake_fixed_ips[0]['reserved'], True)
-
- def test_fixed_ip_reserve_bad_ip(self):
- body = {'reserve': None}
- req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
- action = self._get_reserve_action()
-
- self.assertRaises(webob.exc.HTTPNotFound, action, req,
- '10.0.0.1', body)
-
- def test_fixed_ip_reserve_invalid_ip_address(self):
- body = {'reserve': None}
- req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
- action = self._get_reserve_action()
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- action, req, 'inv.ali.d.ip', body)
-
- def test_fixed_ip_reserve_deleted_ip(self):
- body = {'reserve': None}
- action = self._get_reserve_action()
-
- req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
- self.assertRaises(webob.exc.HTTPNotFound, action, req,
- '10.0.0.2', body)
-
- def test_fixed_ip_unreserve(self):
- fake_fixed_ips[0]['reserved'] = True
- body = {'unreserve': None}
- req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
- action = self._get_unreserve_action()
- result = action(req, "192.168.1.1", body)
-
- self._assert_equal(result or action, 202)
- self.assertEqual(fake_fixed_ips[0]['reserved'], False)
-
- def test_fixed_ip_unreserve_bad_ip(self):
- body = {'unreserve': None}
- req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
- action = self._get_unreserve_action()
-
- self.assertRaises(webob.exc.HTTPNotFound, action, req,
- '10.0.0.1', body)
-
- def test_fixed_ip_unreserve_invalid_ip_address(self):
- body = {'unreserve': None}
- req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
- action = self._get_unreserve_action()
- self.assertRaises(webob.exc.HTTPBadRequest,
- action, req, 'inv.ali.d.ip', body)
-
- def test_fixed_ip_unreserve_deleted_ip(self):
- body = {'unreserve': None}
- req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
- action = self._get_unreserve_action()
- self.assertRaises(webob.exc.HTTPNotFound, action, req,
- '10.0.0.2', body)
-
-
-class FixedIpTestV2(FixedIpTestV21):
-
- fixed_ips = fixed_ips_v2
-
- def _assert_equal(self, ret, exp):
- self.assertEqual(ret.status, '202 Accepted')
-
- def _get_reserve_action(self):
- return self.controller.action
-
- def _get_unreserve_action(self):
- return self.controller.action
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
deleted file mode 100644
index 63b1a97924..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-from webob import exc
-
-from nova.api.openstack.compute.contrib import flavor_access \
- as flavor_access_v2
-from nova.api.openstack.compute import flavors as flavors_api
-from nova.api.openstack.compute.plugins.v3 import flavor_access \
- as flavor_access_v3
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def generate_flavor(flavorid, ispublic):
- return {
- 'id': flavorid,
- 'flavorid': str(flavorid),
- 'root_gb': 1,
- 'ephemeral_gb': 1,
- 'name': u'test',
- 'deleted': False,
- 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
- 'updated_at': None,
- 'memory_mb': 512,
- 'vcpus': 1,
- 'swap': 512,
- 'rxtx_factor': 1.0,
- 'disabled': False,
- 'extra_specs': {},
- 'deleted_at': None,
- 'vcpu_weight': None,
- 'is_public': bool(ispublic)
- }
-
-
-INSTANCE_TYPES = {
- '0': generate_flavor(0, True),
- '1': generate_flavor(1, True),
- '2': generate_flavor(2, False),
- '3': generate_flavor(3, False)}
-
-
-ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
- {'flavor_id': '2', 'project_id': 'proj3'},
- {'flavor_id': '3', 'project_id': 'proj3'}]
-
-
-def fake_get_flavor_access_by_flavor_id(context, flavorid):
- res = []
- for access in ACCESS_LIST:
- if access['flavor_id'] == flavorid:
- res.append(access)
- return res
-
-
-def fake_get_flavor_by_flavor_id(context, flavorid, read_deleted=None):
- return INSTANCE_TYPES[flavorid]
-
-
-def _has_flavor_access(flavorid, projectid):
- for access in ACCESS_LIST:
- if access['flavor_id'] == flavorid and \
- access['project_id'] == projectid:
- return True
- return False
-
-
-def fake_get_all_flavors_sorted_list(context, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- if filters is None or filters['is_public'] is None:
- return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
-
- res = {}
- for k, v in INSTANCE_TYPES.iteritems():
- if filters['is_public'] and _has_flavor_access(k, context.project_id):
- res.update({k: v})
- continue
- if v['is_public'] == filters['is_public']:
- res.update({k: v})
-
- res = sorted(res.values(), key=lambda item: item[sort_key])
- return res
-
-
-class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
-
- def get_db_flavor(self, flavor_id):
- return INSTANCE_TYPES[flavor_id]
-
-
-class FakeResponse(object):
- obj = {'flavor': {'id': '0'},
- 'flavors': [
- {'id': '0'},
- {'id': '2'}]
- }
-
- def attach(self, **kwargs):
- pass
-
-
-class FlavorAccessTestV21(test.NoDBTestCase):
- api_version = "2.1"
- FlavorAccessController = flavor_access_v3.FlavorAccessController
- FlavorActionController = flavor_access_v3.FlavorActionController
- _prefix = "/v3"
- validation_ex = exception.ValidationError
-
- def setUp(self):
- super(FlavorAccessTestV21, self).setUp()
- self.flavor_controller = flavors_api.Controller()
- self.req = FakeRequest()
- self.context = self.req.environ['nova.context']
- self.stubs.Set(db, 'flavor_get_by_flavor_id',
- fake_get_flavor_by_flavor_id)
- self.stubs.Set(db, 'flavor_get_all',
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(db, 'flavor_access_get_by_flavor_id',
- fake_get_flavor_access_by_flavor_id)
-
- self.flavor_access_controller = self.FlavorAccessController()
- self.flavor_action_controller = self.FlavorActionController()
-
- def _verify_flavor_list(self, result, expected):
- # result already sorted by flavor_id
- self.assertEqual(len(result), len(expected))
-
- for d1, d2 in zip(result, expected):
- self.assertEqual(d1['id'], d2['id'])
-
- def test_list_flavor_access_public(self):
- # query os-flavor-access on public flavor should return 404
- self.assertRaises(exc.HTTPNotFound,
- self.flavor_access_controller.index,
- self.req, '1')
-
- def test_list_flavor_access_private(self):
- expected = {'flavor_access': [
- {'flavor_id': '2', 'tenant_id': 'proj2'},
- {'flavor_id': '2', 'tenant_id': 'proj3'}]}
- result = self.flavor_access_controller.index(self.req, '2')
- self.assertEqual(result, expected)
-
- def test_list_with_no_context(self):
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/fake/flavors')
-
- def fake_authorize(context, target=None, action=None):
- raise exception.PolicyNotAuthorized(action='index')
-
- if self.api_version == "2.1":
- self.stubs.Set(flavor_access_v3,
- 'authorize',
- fake_authorize)
- else:
- self.stubs.Set(flavor_access_v2,
- 'authorize',
- fake_authorize)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.flavor_access_controller.index,
- req, 'fake')
-
- def test_list_flavor_with_admin_default_proj1(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- req = fakes.HTTPRequest.blank(self._prefix + '/fake/flavors',
- use_admin_context=True)
- req.environ['nova.context'].project_id = 'proj1'
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_admin_default_proj2(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
- use_admin_context=True)
- req.environ['nova.context'].project_id = 'proj2'
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_admin_ispublic_true(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- url = self._prefix + '/flavors?is_public=true'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=True)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_admin_ispublic_false(self):
- expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
- url = self._prefix + '/flavors?is_public=false'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=True)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_admin_ispublic_false_proj2(self):
- expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
- url = self._prefix + '/flavors?is_public=false'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=True)
- req.environ['nova.context'].project_id = 'proj2'
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_admin_ispublic_none(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
- {'id': '3'}]}
- url = self._prefix + '/flavors?is_public=none'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=True)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_no_admin_default(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
- use_admin_context=False)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_no_admin_ispublic_true(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- url = self._prefix + '/flavors?is_public=true'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=False)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_no_admin_ispublic_false(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- url = self._prefix + '/flavors?is_public=false'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=False)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_list_flavor_with_no_admin_ispublic_none(self):
- expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
- url = self._prefix + '/flavors?is_public=none'
- req = fakes.HTTPRequest.blank(url,
- use_admin_context=False)
- result = self.flavor_controller.index(req)
- self._verify_flavor_list(result['flavors'], expected['flavors'])
-
- def test_show(self):
- resp = FakeResponse()
- self.flavor_action_controller.show(self.req, resp, '0')
- self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
- resp.obj['flavor'])
- self.flavor_action_controller.show(self.req, resp, '2')
- self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
- resp.obj['flavor'])
-
- def test_detail(self):
- resp = FakeResponse()
- self.flavor_action_controller.detail(self.req, resp)
- self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
- {'id': '2', 'os-flavor-access:is_public': False}],
- resp.obj['flavors'])
-
- def test_create(self):
- resp = FakeResponse()
- self.flavor_action_controller.create(self.req, {}, resp)
- self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
- resp.obj['flavor'])
-
- def _get_add_access(self):
- if self.api_version == "2.1":
- return self.flavor_action_controller._add_tenant_access
- else:
- return self.flavor_action_controller._addTenantAccess
-
- def _get_remove_access(self):
- if self.api_version == "2.1":
- return self.flavor_action_controller._remove_tenant_access
- else:
- return self.flavor_action_controller._removeTenantAccess
-
- def test_add_tenant_access(self):
- def stub_add_flavor_access(context, flavorid, projectid):
- self.assertEqual('3', flavorid, "flavorid")
- self.assertEqual("proj2", projectid, "projectid")
- self.stubs.Set(db, 'flavor_access_add',
- stub_add_flavor_access)
- expected = {'flavor_access':
- [{'flavor_id': '3', 'tenant_id': 'proj3'}]}
- body = {'addTenantAccess': {'tenant': 'proj2'}}
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
- use_admin_context=True)
-
- add_access = self._get_add_access()
- result = add_access(req, '3', body=body)
- self.assertEqual(result, expected)
-
- def test_add_tenant_access_with_no_admin_user(self):
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
- use_admin_context=False)
- body = {'addTenantAccess': {'tenant': 'proj2'}}
- add_access = self._get_add_access()
- self.assertRaises(exception.PolicyNotAuthorized,
- add_access, req, '2', body=body)
-
- def test_add_tenant_access_with_no_tenant(self):
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
- use_admin_context=True)
- body = {'addTenantAccess': {'foo': 'proj2'}}
- add_access = self._get_add_access()
- self.assertRaises(self.validation_ex,
- add_access, req, '2', body=body)
- body = {'addTenantAccess': {'tenant': ''}}
- self.assertRaises(self.validation_ex,
- add_access, req, '2', body=body)
-
- def test_add_tenant_access_with_already_added_access(self):
- def stub_add_flavor_access(context, flavorid, projectid):
- raise exception.FlavorAccessExists(flavor_id=flavorid,
- project_id=projectid)
- self.stubs.Set(db, 'flavor_access_add',
- stub_add_flavor_access)
- body = {'addTenantAccess': {'tenant': 'proj2'}}
- add_access = self._get_add_access()
- self.assertRaises(exc.HTTPConflict,
- add_access, self.req, '3', body=body)
-
- def test_remove_tenant_access_with_bad_access(self):
- def stub_remove_flavor_access(context, flavorid, projectid):
- raise exception.FlavorAccessNotFound(flavor_id=flavorid,
- project_id=projectid)
- self.stubs.Set(db, 'flavor_access_remove',
- stub_remove_flavor_access)
- body = {'removeTenantAccess': {'tenant': 'proj2'}}
- remove_access = self._get_remove_access()
- self.assertRaises(exc.HTTPNotFound,
- remove_access, self.req, '3', body=body)
-
- def test_delete_tenant_access_with_no_tenant(self):
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
- use_admin_context=True)
- remove_access = self._get_remove_access()
- body = {'removeTenantAccess': {'foo': 'proj2'}}
- self.assertRaises(self.validation_ex,
- remove_access, req, '2', body=body)
- body = {'removeTenantAccess': {'tenant': ''}}
- self.assertRaises(self.validation_ex,
- remove_access, req, '2', body=body)
-
- def test_remove_tenant_access_with_no_admin_user(self):
- req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
- use_admin_context=False)
- body = {'removeTenantAccess': {'tenant': 'proj2'}}
- remove_access = self._get_remove_access()
- self.assertRaises(exception.PolicyNotAuthorized,
- remove_access, req, '2', body=body)
-
-
-class FlavorAccessTestV20(FlavorAccessTestV21):
- api_version = "2.0"
- FlavorAccessController = flavor_access_v2.FlavorAccessController
- FlavorActionController = flavor_access_v2.FlavorActionController
- _prefix = "/v2/fake"
- validation_ex = exc.HTTPBadRequest
-
-
-class FlavorAccessSerializerTest(test.NoDBTestCase):
- def test_serializer_empty(self):
- serializer = flavor_access_v2.FlavorAccessTemplate()
- text = serializer.serialize(dict(flavor_access=[]))
- tree = etree.fromstring(text)
- self.assertEqual(len(tree), 0)
-
- def test_serializer(self):
- expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<flavor_access>'
- '<access tenant_id="proj2" flavor_id="2"/>'
- '<access tenant_id="proj3" flavor_id="2"/>'
- '</flavor_access>')
- access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
- {'flavor_id': '2', 'tenant_id': 'proj3'}]
-
- serializer = flavor_access_v2.FlavorAccessTemplate()
- text = serializer.serialize(dict(flavor_access=access_list))
- self.assertEqual(text, expected)
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
deleted file mode 100644
index fbb630949a..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_disabled.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import flavor_disabled
-from nova.compute import flavors
-from nova import test
-from nova.tests.api.openstack import fakes
-
-FAKE_FLAVORS = {
- 'flavor 1': {
- "flavorid": '1',
- "name": 'flavor 1',
- "memory_mb": '256',
- "root_gb": '10',
- "swap": 512,
- "vcpus": 1,
- "ephemeral_gb": 1,
- "disabled": False,
- },
- 'flavor 2': {
- "flavorid": '2',
- "name": 'flavor 2',
- "memory_mb": '512',
- "root_gb": '20',
- "swap": None,
- "vcpus": 1,
- "ephemeral_gb": 1,
- "disabled": True,
- },
-}
-
-
-def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
- return FAKE_FLAVORS['flavor %s' % flavorid]
-
-
-def fake_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- return [
- fake_flavor_get_by_flavor_id(1),
- fake_flavor_get_by_flavor_id(2)
- ]
-
-
-class FlavorDisabledTestV21(test.NoDBTestCase):
- base_url = '/v2/fake/flavors'
- content_type = 'application/json'
- prefix = "OS-FLV-DISABLED:"
-
- def setUp(self):
- super(FlavorDisabledTestV21, self).setUp()
- ext = ('nova.api.openstack.compute.contrib'
- '.flavor_disabled.Flavor_disabled')
- self.flags(osapi_compute_extension=[ext])
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(flavors, "get_all_flavors_sorted_list",
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(flavors,
- "get_flavor_by_flavor_id",
- fake_flavor_get_by_flavor_id)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
- return res
-
- def _get_flavor(self, body):
- return jsonutils.loads(body).get('flavor')
-
- def _get_flavors(self, body):
- return jsonutils.loads(body).get('flavors')
-
- def assertFlavorDisabled(self, flavor, disabled):
- self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
-
- def test_show(self):
- url = self.base_url + '/1'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
-
- def test_detail(self):
- url = self.base_url + '/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- flavors = self._get_flavors(res.body)
- self.assertFlavorDisabled(flavors[0], 'False')
- self.assertFlavorDisabled(flavors[1], 'True')
-
-
-class FlavorDisabledTestV2(FlavorDisabledTestV21):
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app())
- return res
-
-
-class FlavorDisabledXmlTest(FlavorDisabledTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % flavor_disabled.Flavor_disabled.namespace
-
- def _get_flavor(self, body):
- return etree.XML(body)
-
- def _get_flavors(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
deleted file mode 100644
index 937fa8581e..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
+++ /dev/null
@@ -1,465 +0,0 @@
-# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import flavor_access
-from nova.api.openstack.compute.contrib import flavormanage as flavormanage_v2
-from nova.api.openstack.compute.plugins.v3 import flavor_manage as \
- flavormanage_v21
-from nova.compute import flavors
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_db_flavor(**updates):
- db_flavor = {
- 'root_gb': 1,
- 'ephemeral_gb': 1,
- 'name': u'frob',
- 'deleted': False,
- 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
- 'updated_at': None,
- 'memory_mb': 256,
- 'vcpus': 1,
- 'flavorid': 1,
- 'swap': 0,
- 'rxtx_factor': 1.0,
- 'extra_specs': {},
- 'deleted_at': None,
- 'vcpu_weight': None,
- 'id': 7,
- 'is_public': True,
- 'disabled': False,
- }
- if updates:
- db_flavor.update(updates)
- return db_flavor
-
-
-def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
- if flavorid == 'failtest':
- raise exception.FlavorNotFound(flavor_id=flavorid)
- elif not str(flavorid) == '1234':
- raise Exception("This test expects flavorid 1234, not %s" % flavorid)
- if read_deleted != 'no':
- raise test.TestingException("Should not be reading deleted")
- return fake_db_flavor(flavorid=flavorid)
-
-
-def fake_destroy(flavorname):
- pass
-
-
-def fake_create(context, kwargs, projects=None):
- newflavor = fake_db_flavor()
-
- flavorid = kwargs.get('flavorid')
- if flavorid is None:
- flavorid = 1234
-
- newflavor['flavorid'] = flavorid
- newflavor["name"] = kwargs.get('name')
- newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
- newflavor["vcpus"] = int(kwargs.get('vcpus'))
- newflavor["root_gb"] = int(kwargs.get('root_gb'))
- newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
- newflavor["swap"] = kwargs.get('swap')
- newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
- newflavor["is_public"] = bool(kwargs.get('is_public'))
- newflavor["disabled"] = bool(kwargs.get('disabled'))
-
- return newflavor
-
-
-class FlavorManageTestV21(test.NoDBTestCase):
- controller = flavormanage_v21.FlavorManageController()
- validation_error = exception.ValidationError
- base_url = '/v2/fake/flavors'
-
- def setUp(self):
- super(FlavorManageTestV21, self).setUp()
- self.stubs.Set(flavors,
- "get_flavor_by_flavor_id",
- fake_get_flavor_by_flavor_id)
- self.stubs.Set(flavors, "destroy", fake_destroy)
- self.stubs.Set(db, "flavor_create", fake_create)
- self.ctxt = context.RequestContext('fake', 'fake',
- is_admin=True, auth_token=True)
- self.app = self._setup_app()
-
- self.request_body = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "id": unicode('1234'),
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
- self.expected_flavor = self.request_body
-
- def _setup_app(self):
- return fakes.wsgi_app_v21(init_only=('flavor-manage', 'os-flavor-rxtx',
- 'os-flavor-access', 'flavors',
- 'os-flavor-extra-data'))
-
- def test_delete(self):
- req = fakes.HTTPRequest.blank(self.base_url + '/1234')
- res = self.controller._delete(req, 1234)
-
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.controller,
- flavormanage_v21.FlavorManageController):
- status_int = self.controller._delete.wsgi_code
- else:
- status_int = res.status_int
- self.assertEqual(202, status_int)
-
- # subsequent delete should fail
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._delete, req, "failtest")
-
- def _test_create_missing_parameter(self, parameter):
- body = {
- "flavor": {
- "name": "azAZ09. -_",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "id": unicode('1234'),
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
-
- del body['flavor'][parameter]
-
- req = fakes.HTTPRequest.blank(self.base_url)
- self.assertRaises(self.validation_error, self.controller._create,
- req, body=body)
-
- def test_create_missing_name(self):
- self._test_create_missing_parameter('name')
-
- def test_create_missing_ram(self):
- self._test_create_missing_parameter('ram')
-
- def test_create_missing_vcpus(self):
- self._test_create_missing_parameter('vcpus')
-
- def test_create_missing_disk(self):
- self._test_create_missing_parameter('disk')
-
- def _create_flavor_success_case(self, body):
- req = webob.Request.blank(self.base_url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(200, res.status_code)
- return jsonutils.loads(res.body)
-
- def test_create(self):
- body = self._create_flavor_success_case(self.request_body)
- for key in self.expected_flavor["flavor"]:
- self.assertEqual(body["flavor"][key],
- self.expected_flavor["flavor"][key])
-
- def test_create_public_default(self):
- del self.request_body['flavor']['os-flavor-access:is_public']
- body = self._create_flavor_success_case(self.request_body)
- for key in self.expected_flavor["flavor"]:
- self.assertEqual(body["flavor"][key],
- self.expected_flavor["flavor"][key])
-
- def test_create_without_flavorid(self):
- del self.request_body['flavor']['id']
- body = self._create_flavor_success_case(self.request_body)
- for key in self.expected_flavor["flavor"]:
- self.assertEqual(body["flavor"][key],
- self.expected_flavor["flavor"][key])
-
- def _create_flavor_bad_request_case(self, body):
- self.stubs.UnsetAll()
-
- req = webob.Request.blank(self.base_url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
-
- def test_create_invalid_name(self):
- self.request_body['flavor']['name'] = 'bad !@#!$% name'
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_flavor_name_is_whitespace(self):
- self.request_body['flavor']['name'] = ' '
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_name_too_long(self):
- self.request_body['flavor']['name'] = 'a' * 256
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_without_flavorname(self):
- del self.request_body['flavor']['name']
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_empty_body(self):
- body = {
- "flavor": {}
- }
- self._create_flavor_bad_request_case(body)
-
- def test_create_no_body(self):
- body = {}
- self._create_flavor_bad_request_case(body)
-
- def test_create_invalid_format_body(self):
- body = {
- "flavor": []
- }
- self._create_flavor_bad_request_case(body)
-
- def test_create_invalid_flavorid(self):
- self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_check_flavor_id_length(self):
- MAX_LENGTH = 255
- self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
- self.request_body['flavor']['id'] = " bad_id "
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_without_ram(self):
- del self.request_body['flavor']['ram']
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_0_ram(self):
- self.request_body['flavor']['ram'] = 0
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_without_vcpus(self):
- del self.request_body['flavor']['vcpus']
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_0_vcpus(self):
- self.request_body['flavor']['vcpus'] = 0
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_without_disk(self):
- del self.request_body['flavor']['disk']
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_minus_disk(self):
- self.request_body['flavor']['disk'] = -1
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_minus_ephemeral(self):
- self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_minus_swap(self):
- self.request_body['flavor']['swap'] = -1
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_minus_rxtx_factor(self):
- self.request_body['flavor']['rxtx_factor'] = -1
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_create_with_non_boolean_is_public(self):
- self.request_body['flavor']['os-flavor-access:is_public'] = 123
- self._create_flavor_bad_request_case(self.request_body)
-
- def test_flavor_exists_exception_returns_409(self):
- expected = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "id": 1235,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
-
- def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
- flavorid, swap, rxtx_factor, is_public):
- raise exception.FlavorExists(name=name)
-
- self.stubs.Set(flavors, "create", fake_create)
- req = webob.Request.blank(self.base_url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 409)
-
- @mock.patch('nova.compute.flavors.create',
- side_effect=exception.FlavorCreateFailed)
- def test_flavor_create_db_failed(self, mock_create):
- request_dict = {
- "flavor": {
- "name": "test",
- 'id': "12345",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
- req = webob.Request.blank(self.base_url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(request_dict)
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 500)
- self.assertIn('Unable to create flavor', res.body)
-
- def test_invalid_memory_mb(self):
- """Check negative and decimal number can't be accepted."""
-
- self.stubs.UnsetAll()
- self.assertRaises(exception.InvalidInput, flavors.create, "abc",
- -512, 2, 1, 1, 1234, 512, 1, True)
- self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
- 512.2, 2, 1, 1, 1234, 512, 1, True)
- self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
- None, 2, 1, 1, 1234, 512, 1, True)
- self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
- 512, 2, None, 1, 1234, 512, 1, True)
- self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
- "test_memory_mb", 2, None, 1, 1234, 512, 1, True)
-
-
-class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
-
-
-class PrivateFlavorManageTestV21(test.TestCase):
- controller = flavormanage_v21.FlavorManageController()
- base_url = '/v2/fake/flavors'
-
- def setUp(self):
- super(PrivateFlavorManageTestV21, self).setUp()
- self.flavor_access_controller = flavor_access.FlavorAccessController()
- self.ctxt = context.RequestContext('fake', 'fake',
- is_admin=True, auth_token=True)
- self.app = self._setup_app()
- self.expected = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1
- }
- }
-
- def _setup_app(self):
- return fakes.wsgi_app_v21(init_only=('flavor-manage',
- 'os-flavor-access',
- 'os-flavor-rxtx', 'flavors',
- 'os-flavor-extra-data'),
- fake_auth_context=self.ctxt)
-
- def _get_response(self):
- req = webob.Request.blank(self.base_url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(self.expected)
- res = req.get_response(self.app)
- return jsonutils.loads(res.body)
-
- def test_create_private_flavor_should_not_grant_flavor_access(self):
- self.expected["flavor"]["os-flavor-access:is_public"] = False
- body = self._get_response()
- for key in self.expected["flavor"]:
- self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
- flavor_access_body = self.flavor_access_controller.index(
- FakeRequest(), body["flavor"]["id"])
- expected_flavor_access_body = {
- "tenant_id": "%s" % self.ctxt.project_id,
- "flavor_id": "%s" % body["flavor"]["id"]
- }
- self.assertNotIn(expected_flavor_access_body,
- flavor_access_body["flavor_access"])
-
- def test_create_public_flavor_should_not_create_flavor_access(self):
- self.expected["flavor"]["os-flavor-access:is_public"] = True
- self.mox.StubOutWithMock(flavors, "add_flavor_access")
- self.mox.ReplayAll()
- body = self._get_response()
- for key in self.expected["flavor"]:
- self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
-
-
-class FlavorManageTestV2(FlavorManageTestV21):
- controller = flavormanage_v2.FlavorManageController()
- validation_error = webob.exc.HTTPBadRequest
-
- def setUp(self):
- super(FlavorManageTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
- 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
-
- def _setup_app(self):
- return fakes.wsgi_app(init_only=('flavors',),
- fake_auth_context=self.ctxt)
-
-
-class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
- controller = flavormanage_v2.FlavorManageController()
-
- def setUp(self):
- super(PrivateFlavorManageTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
- 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
-
- def _setup_app(self):
- return fakes.wsgi_app(init_only=('flavors',),
- fake_auth_context=self.ctxt)
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
deleted file mode 100644
index 9b4a06cc14..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_rxtx.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import flavors
-from nova import test
-from nova.tests.api.openstack import fakes
-
-FAKE_FLAVORS = {
- 'flavor 1': {
- "flavorid": '1',
- "name": 'flavor 1',
- "memory_mb": '256',
- "root_gb": '10',
- "swap": '5',
- "disabled": False,
- "ephemeral_gb": '20',
- "rxtx_factor": '1.0',
- "vcpus": 1,
- },
- 'flavor 2': {
- "flavorid": '2',
- "name": 'flavor 2',
- "memory_mb": '512',
- "root_gb": '10',
- "swap": '10',
- "ephemeral_gb": '25',
- "rxtx_factor": None,
- "disabled": False,
- "vcpus": 1,
- },
-}
-
-
-def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
- return FAKE_FLAVORS['flavor %s' % flavorid]
-
-
-def fake_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- return [
- fake_flavor_get_by_flavor_id(1),
- fake_flavor_get_by_flavor_id(2)
- ]
-
-
-class FlavorRxtxTestV21(test.NoDBTestCase):
- content_type = 'application/json'
- _prefix = "/v2/fake"
-
- def setUp(self):
- super(FlavorRxtxTestV21, self).setUp()
- ext = ('nova.api.openstack.compute.contrib'
- '.flavor_rxtx.Flavor_rxtx')
- self.flags(osapi_compute_extension=[ext])
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(flavors, "get_all_flavors_sorted_list",
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(flavors,
- "get_flavor_by_flavor_id",
- fake_flavor_get_by_flavor_id)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(self._get_app())
- return res
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers',
- 'flavors', 'os-flavor-rxtx'))
-
- def _get_flavor(self, body):
- return jsonutils.loads(body).get('flavor')
-
- def _get_flavors(self, body):
- return jsonutils.loads(body).get('flavors')
-
- def assertFlavorRxtx(self, flavor, rxtx):
- self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
-
- def test_show(self):
- url = self._prefix + '/flavors/1'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
-
- def test_detail(self):
- url = self._prefix + '/flavors/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- flavors = self._get_flavors(res.body)
- self.assertFlavorRxtx(flavors[0], '1.0')
- self.assertFlavorRxtx(flavors[1], '')
-
-
-class FlavorRxtxTestV20(FlavorRxtxTestV21):
-
- def _get_app(self):
- return fakes.wsgi_app()
-
-
-class FlavorRxtxXmlTest(FlavorRxtxTestV20):
- content_type = 'application/xml'
-
- def _get_flavor(self, body):
- return etree.XML(body)
-
- def _get_flavors(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
deleted file mode 100644
index 763fa3335e..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import flavors
-from nova import test
-from nova.tests.api.openstack import fakes
-
-FAKE_FLAVORS = {
- 'flavor 1': {
- "flavorid": '1',
- "name": 'flavor 1',
- "memory_mb": '256',
- "root_gb": '10',
- "swap": 512,
- "vcpus": 1,
- "ephemeral_gb": 1,
- "disabled": False,
- },
- 'flavor 2': {
- "flavorid": '2',
- "name": 'flavor 2',
- "memory_mb": '512',
- "root_gb": '10',
- "swap": None,
- "vcpus": 1,
- "ephemeral_gb": 1,
- "disabled": False,
- },
-}
-
-
-# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor*
-def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
- return FAKE_FLAVORS['flavor %s' % flavorid]
-
-
-def fake_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- return [
- fake_flavor_get_by_flavor_id(1),
- fake_flavor_get_by_flavor_id(2)
- ]
-
-
-class FlavorSwapTestV21(test.NoDBTestCase):
- base_url = '/v2/fake/flavors'
- content_type = 'application/json'
- prefix = ''
-
- def setUp(self):
- super(FlavorSwapTestV21, self).setUp()
- ext = ('nova.api.openstack.compute.contrib'
- '.flavor_swap.Flavor_swap')
- self.flags(osapi_compute_extension=[ext])
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(flavors, "get_all_flavors_sorted_list",
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(flavors,
- "get_flavor_by_flavor_id",
- fake_flavor_get_by_flavor_id)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
- return res
-
- def _get_flavor(self, body):
- return jsonutils.loads(body).get('flavor')
-
- def _get_flavors(self, body):
- return jsonutils.loads(body).get('flavors')
-
- def assertFlavorSwap(self, flavor, swap):
- self.assertEqual(str(flavor.get('%sswap' % self.prefix)), swap)
-
- def test_show(self):
- url = self.base_url + '/1'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertFlavorSwap(self._get_flavor(res.body), '512')
-
- def test_detail(self):
- url = self.base_url + '/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- flavors = self._get_flavors(res.body)
- self.assertFlavorSwap(flavors[0], '512')
- self.assertFlavorSwap(flavors[1], '')
-
-
-class FlavorSwapTestV2(FlavorSwapTestV21):
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app())
- return res
-
-
-class FlavorSwapXmlTest(FlavorSwapTestV2):
- content_type = 'application/xml'
-
- def _get_flavor(self, body):
- return etree.XML(body)
-
- def _get_flavors(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py b/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
deleted file mode 100644
index 3a730754e0..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavorextradata.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.serialization import jsonutils
-import webob
-
-from nova.compute import flavors
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_get_flavor_by_flavor_id(flavorid, ctxt=None):
- return {
- 'id': flavorid,
- 'flavorid': str(flavorid),
- 'root_gb': 1,
- 'ephemeral_gb': 1,
- 'name': u'test',
- 'deleted': False,
- 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
- 'updated_at': None,
- 'memory_mb': 512,
- 'vcpus': 1,
- 'extra_specs': {},
- 'deleted_at': None,
- 'vcpu_weight': None,
- 'swap': 0,
- 'disabled': False,
- }
-
-
-def fake_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- return [
- fake_get_flavor_by_flavor_id(1),
- fake_get_flavor_by_flavor_id(2)
- ]
-
-
-class FlavorExtraDataTestV21(test.NoDBTestCase):
- base_url = '/v2/fake/flavors'
-
- def setUp(self):
- super(FlavorExtraDataTestV21, self).setUp()
- ext = ('nova.api.openstack.compute.contrib'
- '.flavorextradata.Flavorextradata')
- self.flags(osapi_compute_extension=[ext])
- self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
- fake_get_flavor_by_flavor_id)
- self.stubs.Set(flavors, 'get_all_flavors_sorted_list',
- fake_get_all_flavors_sorted_list)
- self._setup_app()
-
- def _setup_app(self):
- self.app = fakes.wsgi_app_v21(init_only=('flavors'))
-
- def _verify_flavor_response(self, flavor, expected):
- for key in expected:
- self.assertEqual(flavor[key], expected[key])
-
- def test_show(self):
- expected = {
- 'flavor': {
- 'id': '1',
- 'name': 'test',
- 'ram': 512,
- 'vcpus': 1,
- 'disk': 1,
- 'OS-FLV-EXT-DATA:ephemeral': 1,
- }
- }
-
- url = self.base_url + '/1'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- self._verify_flavor_response(body['flavor'], expected['flavor'])
-
- def test_detail(self):
- expected = [
- {
- 'id': '1',
- 'name': 'test',
- 'ram': 512,
- 'vcpus': 1,
- 'disk': 1,
- 'OS-FLV-EXT-DATA:ephemeral': 1,
- },
- {
- 'id': '2',
- 'name': 'test',
- 'ram': 512,
- 'vcpus': 1,
- 'disk': 1,
- 'OS-FLV-EXT-DATA:ephemeral': 1,
- },
- ]
-
- url = self.base_url + '/detail'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for i, flavor in enumerate(body['flavors']):
- self._verify_flavor_response(flavor, expected[i])
-
-
-class FlavorExtraDataTestV2(FlavorExtraDataTestV21):
-
- def _setup_app(self):
- self.app = fakes.wsgi_app(init_only=('flavors',))
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
deleted file mode 100644
index 31d2dc0e41..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright 2011 University of Southern California
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import webob
-
-from nova.api.openstack.compute.contrib import flavorextraspecs \
- as flavorextraspecs_v2
-from nova.api.openstack.compute.plugins.v3 import flavors_extraspecs \
- as flavorextraspecs_v21
-import nova.db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_flavor
-
-
-def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
- return stub_flavor_extra_specs()
-
-
-def return_flavor_extra_specs(context, flavor_id):
- return stub_flavor_extra_specs()
-
-
-def return_flavor_extra_specs_item(context, flavor_id, key):
- return {key: stub_flavor_extra_specs()[key]}
-
-
-def return_empty_flavor_extra_specs(context, flavor_id):
- return {}
-
-
-def delete_flavor_extra_specs(context, flavor_id, key):
- pass
-
-
-def stub_flavor_extra_specs():
- specs = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- "key4": "value4",
- "key5": "value5"}
- return specs
-
-
-class FlavorsExtraSpecsTestV21(test.TestCase):
- bad_request = exception.ValidationError
- flavorextraspecs = flavorextraspecs_v21
-
- def _get_request(self, url, use_admin_context=False):
- req_url = '/v2/fake/flavors/' + url
- return fakes.HTTPRequest.blank(req_url,
- use_admin_context=use_admin_context)
-
- def setUp(self):
- super(FlavorsExtraSpecsTestV21, self).setUp()
- fakes.stub_out_key_pair_funcs(self.stubs)
- self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
-
- def test_index(self):
- flavor = dict(test_flavor.fake_flavor,
- extra_specs={'key1': 'value1'})
-
- req = self._get_request('1/os-extra_specs')
- with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
- mock_get.return_value = flavor
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
-
- def test_index_no_data(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_get',
- return_empty_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs')
- res_dict = self.controller.index(req, 1)
-
- self.assertEqual(0, len(res_dict['extra_specs']))
-
- def test_show(self):
- flavor = dict(test_flavor.fake_flavor,
- extra_specs={'key5': 'value5'})
- req = self._get_request('1/os-extra_specs/key5')
- with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
- mock_get.return_value = flavor
- res_dict = self.controller.show(req, 1, 'key5')
-
- self.assertEqual('value5', res_dict['key5'])
-
- def test_show_spec_not_found(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_get',
- return_empty_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/key6')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, 1, 'key6')
-
- def test_not_found_because_flavor(self):
- req = self._get_request('1/os-extra_specs/key5',
- use_admin_context=True)
- with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
- mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, 1, 'key5')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, 1, 'key5', body={'key5': 'value5'})
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, 1, 'key5')
-
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
- mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, 1, body={'extra_specs': {'key5': 'value5'}})
-
- def test_delete(self):
- flavor = dict(test_flavor.fake_flavor,
- extra_specs={'key5': 'value5'})
- self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
- delete_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/key5',
- use_admin_context=True)
- with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
- mock_get.return_value = flavor
- self.controller.delete(req, 1, 'key5')
-
- def test_delete_no_admin(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
- delete_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/key5')
- self.assertRaises(exception.Forbidden, self.controller.delete,
- req, 1, 'key 5')
-
- def test_delete_spec_not_found(self):
- req = self._get_request('1/os-extra_specs/key6',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, 1, 'key6')
-
- def test_create(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
- body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
-
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- res_dict = self.controller.create(req, 1, body=body)
-
- self.assertEqual('value1', res_dict['extra_specs']['key1'])
- self.assertEqual(0.5, res_dict['extra_specs']['key2'])
- self.assertEqual(5, res_dict['extra_specs']['key3'])
-
- def test_create_no_admin(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
- body = {"extra_specs": {"key1": "value1"}}
-
- req = self._get_request('1/os-extra_specs')
- self.assertRaises(exception.Forbidden, self.controller.create,
- req, 1, body=body)
-
- def test_create_flavor_not_found(self):
- def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
- raise exception.FlavorNotFound(flavor_id='')
-
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
- body = {"extra_specs": {"key1": "value1"}}
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, 1, body=body)
-
- def test_create_flavor_db_duplicate(self):
- def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
- raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
-
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
- body = {"extra_specs": {"key1": "value1"}}
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
- req, 1, body=body)
-
- def _test_create_bad_request(self, body):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- self.assertRaises(self.bad_request, self.controller.create,
- req, 1, body=body)
-
- def test_create_empty_body(self):
- self._test_create_bad_request('')
-
- def test_create_non_dict_extra_specs(self):
- self._test_create_bad_request({"extra_specs": "non_dict"})
-
- def test_create_non_string_key(self):
- self._test_create_bad_request({"extra_specs": {None: "value1"}})
-
- def test_create_non_string_value(self):
- self._test_create_bad_request({"extra_specs": {"key1": None}})
-
- def test_create_zero_length_key(self):
- self._test_create_bad_request({"extra_specs": {"": "value1"}})
-
- def test_create_long_key(self):
- key = "a" * 256
- self._test_create_bad_request({"extra_specs": {key: "value1"}})
-
- def test_create_long_value(self):
- value = "a" * 256
- self._test_create_bad_request({"extra_specs": {"key1": value}})
-
- @mock.patch('nova.db.flavor_extra_specs_update_or_create')
- def test_create_really_long_integer_value(self, mock_flavor_extra_specs):
- value = 10 ** 1000
- mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
-
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, 1, body={"extra_specs": {"key1": value}})
-
- @mock.patch('nova.db.flavor_extra_specs_update_or_create')
- def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
- invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
- mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
-
- for key in invalid_keys:
- body = {"extra_specs": {key: "value1"}}
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- self.assertRaises(self.bad_request, self.controller.create,
- req, 1, body=body)
-
- @mock.patch('nova.db.flavor_extra_specs_update_or_create')
- def test_create_valid_specs_key(self, mock_flavor_extra_specs):
- valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
- mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
-
- for key in valid_keys:
- body = {"extra_specs": {key: "value1"}}
- req = self._get_request('1/os-extra_specs', use_admin_context=True)
- res_dict = self.controller.create(req, 1, body=body)
- self.assertEqual('value1', res_dict['extra_specs'][key])
-
- def test_update_item(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
- body = {"key1": "value1"}
-
- req = self._get_request('1/os-extra_specs/key1',
- use_admin_context=True)
- res_dict = self.controller.update(req, 1, 'key1', body=body)
-
- self.assertEqual('value1', res_dict['key1'])
-
- def test_update_item_no_admin(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
- body = {"key1": "value1"}
-
- req = self._get_request('1/os-extra_specs/key1')
- self.assertRaises(exception.Forbidden, self.controller.update,
- req, 1, 'key1', body=body)
-
- def _test_update_item_bad_request(self, body):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/key1',
- use_admin_context=True)
- self.assertRaises(self.bad_request, self.controller.update,
- req, 1, 'key1', body=body)
-
- def test_update_item_empty_body(self):
- self._test_update_item_bad_request('')
-
- def test_update_item_too_many_keys(self):
- body = {"key1": "value1", "key2": "value2"}
- self._test_update_item_bad_request(body)
-
- def test_update_item_non_dict_extra_specs(self):
- self._test_update_item_bad_request("non_dict")
-
- def test_update_item_non_string_key(self):
- self._test_update_item_bad_request({None: "value1"})
-
- def test_update_item_non_string_value(self):
- self._test_update_item_bad_request({"key1": None})
-
- def test_update_item_zero_length_key(self):
- self._test_update_item_bad_request({"": "value1"})
-
- def test_update_item_long_key(self):
- key = "a" * 256
- self._test_update_item_bad_request({key: "value1"})
-
- def test_update_item_long_value(self):
- value = "a" * 256
- self._test_update_item_bad_request({"key1": value})
-
- def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
- body = {"key1": "value1"}
-
- req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'bad', body=body)
-
- def test_update_flavor_not_found(self):
- def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
- raise exception.FlavorNotFound(flavor_id='')
-
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
- body = {"key1": "value1"}
-
- req = self._get_request('1/os-extra_specs/key1',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, 1, 'key1', body=body)
-
- def test_update_flavor_db_duplicate(self):
- def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
- raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
-
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
- body = {"key1": "value1"}
-
- req = self._get_request('1/os-extra_specs/key1',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
- req, 1, 'key1', body=body)
-
- def test_update_really_long_integer_value(self):
- value = 10 ** 1000
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
-
- req = self._get_request('1/os-extra_specs/key1',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', body={"key1": value})
-
-
-class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
- bad_request = webob.exc.HTTPBadRequest
- flavorextraspecs = flavorextraspecs_v2
-
-
-class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
- def test_serializer(self):
- serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
- expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_specs><key1>value1</key1></extra_specs>')
- text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
- self.assertEqual(text, expected)
-
- def test_show_update_serializer(self):
- serializer = flavorextraspecs_v2.ExtraSpecTemplate()
- expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_spec key="key1">value1</extra_spec>')
- text = serializer.serialize(dict({"key1": "value1"}))
- self.assertEqual(text, expected)
-
- def test_serializer_with_colon_tagname(self):
- # Our test object to serialize
- obj = {'extra_specs': {'foo:bar': '999'}}
- serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
- expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
- '</extra_specs>'))
- result = serializer.serialize(obj)
- self.assertEqual(expected_xml, result)
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py
deleted file mode 100644
index 9ad0e11899..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py
+++ /dev/null
@@ -1,412 +0,0 @@
-# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import urllib
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.compute.contrib import floating_ip_dns as fipdns_v2
-from nova.api.openstack.compute.plugins.v3 import floating_ip_dns as \
- fipdns_v21
-from nova import context
-from nova import db
-from nova import exception
-from nova import network
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-name = "arbitraryname"
-name2 = "anotherarbitraryname"
-
-test_ipv4_address = '10.0.0.66'
-test_ipv4_address2 = '10.0.0.67'
-
-test_ipv6_address = 'fe80:0:0:0:0:0:a00:42'
-
-domain = "example.org"
-domain2 = "example.net"
-floating_ip_id = '1'
-
-
-def _quote_domain(domain):
- """Domain names tend to have .'s in them. Urllib doesn't quote dots,
- but Routes tends to choke on them, so we need an extra level of
- by-hand quoting here. This function needs to duplicate the one in
- python-novaclient/novaclient/v1_1/floating_ip_dns.py
- """
- return urllib.quote(domain.replace('.', '%2E'))
-
-
-def network_api_get_floating_ip(self, context, id):
- return {'id': floating_ip_id, 'address': test_ipv4_address,
- 'fixed_ip': None}
-
-
-def network_get_dns_domains(self, context):
- return [{'domain': 'example.org', 'scope': 'public'},
- {'domain': 'example.com', 'scope': 'public',
- 'project': 'project1'},
- {'domain': 'private.example.com', 'scope': 'private',
- 'availability_zone': 'avzone'}]
-
-
-def network_get_dns_entries_by_address(self, context, address, domain):
- return [name, name2]
-
-
-def network_get_dns_entries_by_name(self, context, address, domain):
- return [test_ipv4_address]
-
-
-def network_add_dns_entry(self, context, address, name, dns_type, domain):
- return {'dns_entry': {'ip': test_ipv4_address,
- 'name': name,
- 'type': dns_type,
- 'domain': domain}}
-
-
-def network_modify_dns_entry(self, context, address, name, domain):
- return {'dns_entry': {'name': name,
- 'ip': address,
- 'domain': domain}}
-
-
-def network_create_private_dns_domain(self, context, domain, avail_zone):
- pass
-
-
-def network_create_public_dns_domain(self, context, domain, project):
- pass
-
-
-class FloatingIpDNSTestV21(test.TestCase):
- floating_ip_dns = fipdns_v21
-
- def _create_floating_ip(self):
- """Create a floating ip object."""
- host = "fake_host"
- db.floating_ip_create(self.context,
- {'address': test_ipv4_address,
- 'host': host})
- db.floating_ip_create(self.context,
- {'address': test_ipv6_address,
- 'host': host})
-
- def _delete_floating_ip(self):
- db.floating_ip_destroy(self.context, test_ipv4_address)
- db.floating_ip_destroy(self.context, test_ipv6_address)
-
- def _check_status(self, expected_status, res, controller_methord):
- self.assertEqual(expected_status, controller_methord.wsgi_code)
-
- def _bad_request(self):
- return webob.exc.HTTPBadRequest
-
- def setUp(self):
- super(FloatingIpDNSTestV21, self).setUp()
- self.stubs.Set(network.api.API, "get_dns_domains",
- network_get_dns_domains)
- self.stubs.Set(network.api.API, "get_dns_entries_by_address",
- network_get_dns_entries_by_address)
- self.stubs.Set(network.api.API, "get_dns_entries_by_name",
- network_get_dns_entries_by_name)
- self.stubs.Set(network.api.API, "get_floating_ip",
- network_api_get_floating_ip)
- self.stubs.Set(network.api.API, "add_dns_entry",
- network_add_dns_entry)
- self.stubs.Set(network.api.API, "modify_dns_entry",
- network_modify_dns_entry)
- self.stubs.Set(network.api.API, "create_public_dns_domain",
- network_create_public_dns_domain)
- self.stubs.Set(network.api.API, "create_private_dns_domain",
- network_create_private_dns_domain)
-
- self.context = context.get_admin_context()
-
- self._create_floating_ip()
- temp = self.floating_ip_dns.FloatingIPDNSDomainController()
- self.domain_controller = temp
- self.entry_controller = self.floating_ip_dns.\
- FloatingIPDNSEntryController()
-
- def tearDown(self):
- self._delete_floating_ip()
- super(FloatingIpDNSTestV21, self).tearDown()
-
- def test_dns_domains_list(self):
- req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns')
- res_dict = self.domain_controller.index(req)
- entries = res_dict['domain_entries']
- self.assertTrue(entries)
- self.assertEqual(entries[0]['domain'], "example.org")
- self.assertFalse(entries[0]['project'])
- self.assertFalse(entries[0]['availability_zone'])
- self.assertEqual(entries[1]['domain'], "example.com")
- self.assertEqual(entries[1]['project'], "project1")
- self.assertFalse(entries[1]['availability_zone'])
- self.assertEqual(entries[2]['domain'], "private.example.com")
- self.assertFalse(entries[2]['project'])
- self.assertEqual(entries[2]['availability_zone'], "avzone")
-
- def _test_get_dns_entries_by_address(self, address):
-
- qparams = {'ip': address}
- params = "?%s" % urllib.urlencode(qparams) if qparams else ""
-
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s'
- % (_quote_domain(domain), params))
- entries = self.entry_controller.show(req, _quote_domain(domain),
- address)
- entries = entries.obj
- self.assertEqual(len(entries['dns_entries']), 2)
- self.assertEqual(entries['dns_entries'][0]['name'],
- name)
- self.assertEqual(entries['dns_entries'][1]['name'],
- name2)
- self.assertEqual(entries['dns_entries'][0]['domain'],
- domain)
-
- def test_get_dns_entries_by_ipv4_address(self):
- self._test_get_dns_entries_by_address(test_ipv4_address)
-
- def test_get_dns_entries_by_ipv6_address(self):
- self._test_get_dns_entries_by_address(test_ipv6_address)
-
- def test_get_dns_entries_by_name(self):
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' %
- (_quote_domain(domain), name))
- entry = self.entry_controller.show(req, _quote_domain(domain), name)
-
- self.assertEqual(entry['dns_entry']['ip'],
- test_ipv4_address)
- self.assertEqual(entry['dns_entry']['domain'],
- domain)
-
- def test_dns_entries_not_found(self):
- def fake_get_dns_entries_by_name(self, context, address, domain):
- raise webob.exc.HTTPNotFound()
-
- self.stubs.Set(network.api.API, "get_dns_entries_by_name",
- fake_get_dns_entries_by_name)
-
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' %
- (_quote_domain(domain), 'nonexistent'))
- self.assertRaises(webob.exc.HTTPNotFound,
- self.entry_controller.show,
- req, _quote_domain(domain), 'nonexistent')
-
- def test_create_entry(self):
- body = {'dns_entry':
- {'ip': test_ipv4_address,
- 'dns_type': 'A'}}
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' %
- (_quote_domain(domain), name))
- entry = self.entry_controller.update(req, _quote_domain(domain),
- name, body=body)
- self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address)
-
- def test_create_domain(self):
- req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
- _quote_domain(domain))
- body = {'domain_entry':
- {'scope': 'private',
- 'project': 'testproject'}}
- self.assertRaises(self._bad_request(),
- self.domain_controller.update,
- req, _quote_domain(domain), body=body)
-
- body = {'domain_entry':
- {'scope': 'public',
- 'availability_zone': 'zone1'}}
- self.assertRaises(self._bad_request(),
- self.domain_controller.update,
- req, _quote_domain(domain), body=body)
-
- body = {'domain_entry':
- {'scope': 'public',
- 'project': 'testproject'}}
- entry = self.domain_controller.update(req, _quote_domain(domain),
- body=body)
- self.assertEqual(entry['domain_entry']['domain'], domain)
- self.assertEqual(entry['domain_entry']['scope'], 'public')
- self.assertEqual(entry['domain_entry']['project'], 'testproject')
-
- body = {'domain_entry':
- {'scope': 'private',
- 'availability_zone': 'zone1'}}
- entry = self.domain_controller.update(req, _quote_domain(domain),
- body=body)
- self.assertEqual(entry['domain_entry']['domain'], domain)
- self.assertEqual(entry['domain_entry']['scope'], 'private')
- self.assertEqual(entry['domain_entry']['availability_zone'], 'zone1')
-
- def test_delete_entry(self):
- calls = []
-
- def network_delete_dns_entry(fakeself, context, name, domain):
- calls.append((name, domain))
-
- self.stubs.Set(network.api.API, "delete_dns_entry",
- network_delete_dns_entry)
-
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' %
- (_quote_domain(domain), name))
- res = self.entry_controller.delete(req, _quote_domain(domain), name)
-
- self._check_status(202, res, self.entry_controller.delete)
- self.assertEqual([(name, domain)], calls)
-
- def test_delete_entry_notfound(self):
- def delete_dns_entry_notfound(fakeself, context, name, domain):
- raise exception.NotFound
-
- self.stubs.Set(network.api.API, "delete_dns_entry",
- delete_dns_entry_notfound)
-
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' %
- (_quote_domain(domain), name))
- self.assertRaises(webob.exc.HTTPNotFound,
- self.entry_controller.delete, req, _quote_domain(domain), name)
-
- def test_delete_domain(self):
- calls = []
-
- def network_delete_dns_domain(fakeself, context, fqdomain):
- calls.append(fqdomain)
-
- self.stubs.Set(network.api.API, "delete_dns_domain",
- network_delete_dns_domain)
-
- req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
- _quote_domain(domain))
- res = self.domain_controller.delete(req, _quote_domain(domain))
-
- self._check_status(202, res, self.domain_controller.delete)
- self.assertEqual([domain], calls)
-
- def test_delete_domain_notfound(self):
- def delete_dns_domain_notfound(fakeself, context, fqdomain):
- raise exception.NotFound
-
- self.stubs.Set(network.api.API, "delete_dns_domain",
- delete_dns_domain_notfound)
-
- req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
- _quote_domain(domain))
- self.assertRaises(webob.exc.HTTPNotFound,
- self.domain_controller.delete, req, _quote_domain(domain))
-
- def test_modify(self):
- body = {'dns_entry':
- {'ip': test_ipv4_address2,
- 'dns_type': 'A'}}
- req = fakes.HTTPRequest.blank(
- '/v2/123/os-floating-ip-dns/%s/entries/%s' % (domain, name))
- entry = self.entry_controller.update(req, domain, name, body=body)
-
- self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2)
-
-
-class FloatingIpDNSTestV2(FloatingIpDNSTestV21):
- floating_ip_dns = fipdns_v2
-
- def _check_status(self, expected_status, res, controller_methord):
- self.assertEqual(expected_status, res.status_int)
-
- def _bad_request(self):
- return webob.exc.HTTPUnprocessableEntity
-
-
-class FloatingIpDNSSerializerTestV2(test.TestCase):
- floating_ip_dns = fipdns_v2
-
- def test_domains(self):
- serializer = self.floating_ip_dns.DomainsTemplate()
- text = serializer.serialize(dict(
- domain_entries=[
- dict(domain=domain, scope='public', project='testproject'),
- dict(domain=domain2, scope='private',
- availability_zone='avzone')]))
-
- tree = etree.fromstring(text)
- self.assertEqual('domain_entries', tree.tag)
- self.assertEqual(2, len(tree))
- self.assertEqual(domain, tree[0].get('domain'))
- self.assertEqual(domain2, tree[1].get('domain'))
- self.assertEqual('avzone', tree[1].get('availability_zone'))
-
- def test_domain_serializer(self):
- serializer = self.floating_ip_dns.DomainTemplate()
- text = serializer.serialize(dict(
- domain_entry=dict(domain=domain,
- scope='public',
- project='testproject')))
-
- tree = etree.fromstring(text)
- self.assertEqual('domain_entry', tree.tag)
- self.assertEqual(domain, tree.get('domain'))
- self.assertEqual('testproject', tree.get('project'))
-
- def test_entries_serializer(self):
- serializer = self.floating_ip_dns.FloatingIPDNSsTemplate()
- text = serializer.serialize(dict(
- dns_entries=[
- dict(ip=test_ipv4_address,
- type='A',
- domain=domain,
- name=name),
- dict(ip=test_ipv4_address2,
- type='C',
- domain=domain,
- name=name2)]))
-
- tree = etree.fromstring(text)
- self.assertEqual('dns_entries', tree.tag)
- self.assertEqual(2, len(tree))
- self.assertEqual('dns_entry', tree[0].tag)
- self.assertEqual('dns_entry', tree[1].tag)
- self.assertEqual(test_ipv4_address, tree[0].get('ip'))
- self.assertEqual('A', tree[0].get('type'))
- self.assertEqual(domain, tree[0].get('domain'))
- self.assertEqual(name, tree[0].get('name'))
- self.assertEqual(test_ipv4_address2, tree[1].get('ip'))
- self.assertEqual('C', tree[1].get('type'))
- self.assertEqual(domain, tree[1].get('domain'))
- self.assertEqual(name2, tree[1].get('name'))
-
- def test_entry_serializer(self):
- serializer = self.floating_ip_dns.FloatingIPDNSTemplate()
- text = serializer.serialize(dict(
- dns_entry=dict(
- ip=test_ipv4_address,
- type='A',
- domain=domain,
- name=name)))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('dns_entry', tree.tag)
- self.assertEqual(test_ipv4_address, tree.get('ip'))
- self.assertEqual(domain, tree.get('domain'))
- self.assertEqual(name, tree.get('name'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
deleted file mode 100644
index 1b24fe5fdf..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-
-from nova.api.openstack.compute.contrib import floating_ip_pools as fipp_v2
-from nova.api.openstack.compute.plugins.v3 import floating_ip_pools as\
- fipp_v21
-from nova import context
-from nova import network
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_get_floating_ip_pools(self, context):
- return ['nova', 'other']
-
-
-class FloatingIpPoolTestV21(test.NoDBTestCase):
- floating_ip_pools = fipp_v21
- url = '/v2/fake/os-floating-ip-pools'
-
- def setUp(self):
- super(FloatingIpPoolTestV21, self).setUp()
- self.stubs.Set(network.api.API, "get_floating_ip_pools",
- fake_get_floating_ip_pools)
-
- self.context = context.RequestContext('fake', 'fake')
- self.controller = self.floating_ip_pools.FloatingIPPoolsController()
-
- def test_translate_floating_ip_pools_view(self):
- pools = fake_get_floating_ip_pools(None, self.context)
- view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
- self.assertIn('floating_ip_pools', view)
- self.assertEqual(view['floating_ip_pools'][0]['name'],
- pools[0])
- self.assertEqual(view['floating_ip_pools'][1]['name'],
- pools[1])
-
- def test_floating_ips_pools_list(self):
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.index(req)
-
- pools = fake_get_floating_ip_pools(None, self.context)
- response = {'floating_ip_pools': [{'name': name} for name in pools]}
- self.assertEqual(res_dict, response)
-
-
-class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
- floating_ip_pools = fipp_v2
-
-
-class FloatingIpPoolSerializerTestV2(test.NoDBTestCase):
- floating_ip_pools = fipp_v2
-
- def test_index_serializer(self):
- serializer = self.floating_ip_pools.FloatingIPPoolsTemplate()
- text = serializer.serialize(dict(
- floating_ip_pools=[
- dict(name='nova'),
- dict(name='other')
- ]))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('floating_ip_pools', tree.tag)
- self.assertEqual(2, len(tree))
- self.assertEqual('floating_ip_pool', tree[0].tag)
- self.assertEqual('floating_ip_pool', tree[1].tag)
- self.assertEqual('nova', tree[0].get('name'))
- self.assertEqual('other', tree[1].get('name'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
deleted file mode 100644
index 34e8ab4af0..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ /dev/null
@@ -1,853 +0,0 @@
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2011 Eldar Nugaev
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import uuid
-
-from lxml import etree
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import floating_ips
-from nova.api.openstack import extensions
-from nova import compute
-from nova.compute import utils as compute_utils
-from nova import context
-from nova import db
-from nova import exception
-from nova import network
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_network
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-def network_api_get_floating_ip(self, context, id):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip_id': None}
-
-
-def network_api_get_floating_ip_by_address(self, context, address):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip_id': 10}
-
-
-def network_api_get_floating_ips_by_project(self, context):
- return [{'id': 1,
- 'address': '10.10.10.10',
- 'pool': 'nova',
- 'fixed_ip': {'address': '10.0.0.1',
- 'instance_uuid': FAKE_UUID,
- 'instance': {'uuid': FAKE_UUID}}},
- {'id': 2,
- 'pool': 'nova', 'interface': 'eth0',
- 'address': '10.10.10.11',
- 'fixed_ip': None}]
-
-
-def compute_api_get(self, context, instance_id, expected_attrs=None,
- want_objects=False):
- return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
-
-
-def network_api_allocate(self, context):
- return '10.10.10.10'
-
-
-def network_api_release(self, context, address):
- pass
-
-
-def compute_api_associate(self, context, instance_id, address):
- pass
-
-
-def network_api_associate(self, context, floating_address, fixed_address):
- pass
-
-
-def network_api_disassociate(self, context, instance, floating_address):
- pass
-
-
-def fake_instance_get(context, instance_id):
- return {
- "id": 1,
- "uuid": uuid.uuid4(),
- "name": 'fake',
- "user_id": 'fakeuser',
- "project_id": '123'}
-
-
-def stub_nw_info(stubs):
- def get_nw_info_for_instance(instance):
- return fake_network.fake_get_instance_nw_info(stubs)
- return get_nw_info_for_instance
-
-
-def get_instance_by_floating_ip_addr(self, context, address):
- return None
-
-
-class FloatingIpTestNeutron(test.NoDBTestCase):
-
- def setUp(self):
- super(FloatingIpTestNeutron, self).setUp()
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- self.controller = floating_ips.FloatingIPController()
-
- def _get_fake_request(self):
- return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
-
- def test_floatingip_delete(self):
- req = self._get_fake_request()
- fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
- with contextlib.nested(
- mock.patch.object(self.controller.network_api,
- 'disassociate_floating_ip'),
- mock.patch.object(self.controller.network_api,
- 'disassociate_and_release_floating_ip'),
- mock.patch.object(self.controller.network_api,
- 'release_floating_ip'),
- mock.patch.object(self.controller.network_api,
- 'get_instance_id_by_floating_address',
- return_value=None),
- mock.patch.object(self.controller.network_api,
- 'get_floating_ip',
- return_value=fip_val)) as (
- disoc_fip, dis_and_del, rel_fip, _, _):
- self.controller.delete(req, 1)
- self.assertFalse(disoc_fip.called)
- self.assertFalse(rel_fip.called)
- # Only disassociate_and_release_floating_ip is
- # called if using neutron
- self.assertTrue(dis_and_del.called)
-
-
-class FloatingIpTest(test.TestCase):
- floating_ip = "10.10.10.10"
- floating_ip_2 = "10.10.10.11"
-
- def _create_floating_ips(self, floating_ips=None):
- """Create a floating ip object."""
- if floating_ips is None:
- floating_ips = [self.floating_ip]
- elif not isinstance(floating_ips, (list, tuple)):
- floating_ips = [floating_ips]
-
- def make_ip_dict(ip):
- """Shortcut for creating floating ip dict."""
- return
-
- dict_ = {'pool': 'nova', 'host': 'fake_host'}
- return db.floating_ip_bulk_create(
- self.context, [dict(address=ip, **dict_) for ip in floating_ips],
- )
-
- def _delete_floating_ip(self):
- db.floating_ip_destroy(self.context, self.floating_ip)
-
- def _get_fake_fip_request(self, act=''):
- return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/%s' % act)
-
- def _get_fake_server_request(self):
- return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
-
- def _get_fake_response(self, req, init_only):
- return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
-
- def setUp(self):
- super(FloatingIpTest, self).setUp()
- self.stubs.Set(compute.api.API, "get",
- compute_api_get)
- self.stubs.Set(network.api.API, "get_floating_ip",
- network_api_get_floating_ip)
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- network_api_get_floating_ip_by_address)
- self.stubs.Set(network.api.API, "get_floating_ips_by_project",
- network_api_get_floating_ips_by_project)
- self.stubs.Set(network.api.API, "release_floating_ip",
- network_api_release)
- self.stubs.Set(network.api.API, "disassociate_floating_ip",
- network_api_disassociate)
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
- self.stubs.Set(compute_utils, "get_nw_info_for_instance",
- stub_nw_info(self.stubs))
-
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
- self.stubs.Set(db, 'instance_get',
- fake_instance_get)
-
- self.context = context.get_admin_context()
- self._create_floating_ips()
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = floating_ips.FloatingIPController()
- self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
-
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Floating_ips'])
-
- def tearDown(self):
- self._delete_floating_ip()
- super(FloatingIpTest, self).tearDown()
-
- def test_floatingip_delete(self):
- req = self._get_fake_fip_request('1')
- fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
- with contextlib.nested(
- mock.patch.object(self.controller.network_api,
- 'disassociate_floating_ip'),
- mock.patch.object(self.controller.network_api,
- 'release_floating_ip'),
- mock.patch.object(self.controller.network_api,
- 'get_instance_id_by_floating_address',
- return_value=None),
- mock.patch.object(self.controller.network_api,
- 'get_floating_ip',
- return_value=fip_val)) as (
- disoc_fip, rel_fip, _, _):
- self.controller.delete(req, 1)
- self.assertTrue(disoc_fip.called)
- self.assertTrue(rel_fip.called)
-
- def test_translate_floating_ip_view(self):
- floating_ip_address = self.floating_ip
- floating_ip = db.floating_ip_get_by_address(self.context,
- floating_ip_address)
- # NOTE(vish): network_get uses the id not the address
- floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
- view = floating_ips._translate_floating_ip_view(floating_ip)
- self.assertIn('floating_ip', view)
- self.assertTrue(view['floating_ip']['id'])
- self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
- self.assertIsNone(view['floating_ip']['fixed_ip'])
- self.assertIsNone(view['floating_ip']['instance_id'])
-
- def test_translate_floating_ip_view_dict(self):
- floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
- 'fixed_ip': None}
- view = floating_ips._translate_floating_ip_view(floating_ip)
- self.assertIn('floating_ip', view)
-
- def test_floating_ips_list(self):
- req = self._get_fake_fip_request()
- res_dict = self.controller.index(req)
-
- response = {'floating_ips': [{'instance_id': FAKE_UUID,
- 'ip': '10.10.10.10',
- 'pool': 'nova',
- 'fixed_ip': '10.0.0.1',
- 'id': 1},
- {'instance_id': None,
- 'ip': '10.10.10.11',
- 'pool': 'nova',
- 'fixed_ip': None,
- 'id': 2}]}
- self.assertEqual(res_dict, response)
-
- def test_floating_ip_release_nonexisting(self):
- def fake_get_floating_ip(*args, **kwargs):
- raise exception.FloatingIpNotFound(id=id)
-
- self.stubs.Set(network.api.API, "get_floating_ip",
- fake_get_floating_ip)
-
- req = self._get_fake_fip_request('9876')
- req.method = 'DELETE'
- res = self._get_fake_response(req, 'os-floating-ips')
- self.assertEqual(res.status_int, 404)
- expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
- 'for id 9876", "code": 404}}')
- self.assertEqual(res.body, expected_msg)
-
- def test_floating_ip_release_race_cond(self):
- def fake_get_floating_ip(*args, **kwargs):
- return {'fixed_ip_id': 1, 'address': self.floating_ip}
-
- def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
- return 'test-inst'
-
- def fake_disassociate_floating_ip(*args, **kwargs):
- raise exception.FloatingIpNotAssociated(args[3])
-
- self.stubs.Set(network.api.API, "get_floating_ip",
- fake_get_floating_ip)
- self.stubs.Set(floating_ips, "get_instance_by_floating_ip_addr",
- fake_get_instance_by_floating_ip_addr)
- self.stubs.Set(floating_ips, "disassociate_floating_ip",
- fake_disassociate_floating_ip)
-
- req = self._get_fake_fip_request('1')
- req.method = 'DELETE'
- res = self._get_fake_response(req, 'os-floating-ips')
- self.assertEqual(res.status_int, 202)
-
- def test_floating_ip_show(self):
- req = self._get_fake_fip_request('1')
- res_dict = self.controller.show(req, 1)
-
- self.assertEqual(res_dict['floating_ip']['id'], 1)
- self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
- self.assertIsNone(res_dict['floating_ip']['instance_id'])
-
- def test_floating_ip_show_not_found(self):
- def fake_get_floating_ip(*args, **kwargs):
- raise exception.FloatingIpNotFound(id='fake')
-
- self.stubs.Set(network.api.API, "get_floating_ip",
- fake_get_floating_ip)
-
- req = self._get_fake_fip_request('9876')
- res = self._get_fake_response(req, 'os-floating-ips')
- self.assertEqual(res.status_int, 404)
- expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
- 'for id 9876", "code": 404}}')
- self.assertEqual(res.body, expected_msg)
-
- def test_show_associated_floating_ip(self):
- def get_floating_ip(self, context, id):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip': {'address': '10.0.0.1',
- 'instance_uuid': FAKE_UUID,
- 'instance': {'uuid': FAKE_UUID}}}
-
- self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
-
- req = self._get_fake_fip_request('1')
- res_dict = self.controller.show(req, 1)
-
- self.assertEqual(res_dict['floating_ip']['id'], 1)
- self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
- self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
- self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
-
- def test_recreation_of_floating_ip(self):
- self._delete_floating_ip()
- self._create_floating_ips()
-
- def test_floating_ip_in_bulk_creation(self):
- self._delete_floating_ip()
-
- self._create_floating_ips([self.floating_ip, self.floating_ip_2])
- all_ips = db.floating_ip_get_all(self.context)
- ip_list = [ip['address'] for ip in all_ips]
- self.assertIn(self.floating_ip, ip_list)
- self.assertIn(self.floating_ip_2, ip_list)
-
- def test_fail_floating_ip_in_bulk_creation(self):
- self.assertRaises(exception.FloatingIpExists,
- self._create_floating_ips,
- [self.floating_ip, self.floating_ip_2])
- all_ips = db.floating_ip_get_all(self.context)
- ip_list = [ip['address'] for ip in all_ips]
- self.assertIn(self.floating_ip, ip_list)
- self.assertNotIn(self.floating_ip_2, ip_list)
-
- def test_floating_ip_allocate_no_free_ips(self):
- def fake_allocate(*args, **kwargs):
- raise exception.NoMoreFloatingIps()
-
- self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
-
- req = self._get_fake_fip_request()
- ex = self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create, req)
-
- self.assertIn('No more floating ips', ex.explanation)
-
- def test_floating_ip_allocate_no_free_ips_pool(self):
- def fake_allocate(*args, **kwargs):
- raise exception.NoMoreFloatingIps()
-
- self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
-
- req = self._get_fake_fip_request()
- ex = self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create, req, {'pool': 'non_existent_pool'})
-
- self.assertIn('No more floating ips in pool non_existent_pool',
- ex.explanation)
-
- @mock.patch('nova.network.api.API.allocate_floating_ip',
- side_effect=exception.FloatingIpLimitExceeded())
- def test_floating_ip_allocate_over_quota(self, allocate_mock):
- req = self._get_fake_fip_request()
- ex = self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, req)
-
- self.assertIn('IP allocation over quota', ex.explanation)
-
- @mock.patch('nova.network.api.API.allocate_floating_ip',
- side_effect=exception.FloatingIpLimitExceeded())
- def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
- req = self._get_fake_fip_request()
- ex = self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, req, {'pool': 'non_existent_pool'})
-
- self.assertIn('IP allocation over quota in pool non_existent_pool.',
- ex.explanation)
-
- @mock.patch('nova.network.api.API.allocate_floating_ip',
- side_effect=exception.FloatingIpPoolNotFound())
- def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
- req = self._get_fake_fip_request()
- ex = self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create, req, {'pool': 'non_existent_pool'})
-
- self.assertIn('Floating ip pool not found.', ex.explanation)
-
- def test_floating_ip_allocate(self):
- def fake1(*args, **kwargs):
- pass
-
- def fake2(*args, **kwargs):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
-
- self.stubs.Set(network.api.API, "allocate_floating_ip",
- fake1)
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- fake2)
-
- req = self._get_fake_fip_request()
- res_dict = self.controller.create(req)
-
- ip = res_dict['floating_ip']
-
- expected = {
- "id": 1,
- "instance_id": None,
- "ip": "10.10.10.10",
- "fixed_ip": None,
- "pool": 'nova'}
- self.assertEqual(ip, expected)
-
- def test_floating_ip_release(self):
- req = self._get_fake_fip_request('1')
- self.controller.delete(req, 1)
-
- def test_floating_ip_associate(self):
- fixed_address = '192.168.1.100'
-
- def fake_associate_floating_ip(*args, **kwargs):
- self.assertEqual(fixed_address, kwargs['fixed_address'])
-
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_associate_floating_ip)
- body = dict(addFloatingIp=dict(address=self.floating_ip))
-
- req = self._get_fake_server_request()
- rsp = self.manager._add_floating_ip(req, 'test_inst', body)
- self.assertEqual(202, rsp.status_int)
-
- def test_floating_ip_associate_invalid_instance(self):
-
- def fake_get(self, context, id, expected_attrs=None,
- want_objects=False):
- raise exception.InstanceNotFound(instance_id=id)
-
- self.stubs.Set(compute.api.API, "get", fake_get)
-
- body = dict(addFloatingIp=dict(address=self.floating_ip))
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._add_floating_ip, req, 'test_inst',
- body)
-
- def test_not_extended_floating_ip_associate_fixed(self):
- # Check that fixed_address is ignored if os-extended-floating-ips
- # is not loaded
- fixed_address_requested = '192.168.1.101'
- fixed_address_allocated = '192.168.1.100'
-
- def fake_associate_floating_ip(*args, **kwargs):
- self.assertEqual(fixed_address_allocated,
- kwargs['fixed_address'])
-
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_associate_floating_ip)
- body = dict(addFloatingIp=dict(address=self.floating_ip,
- fixed_address=fixed_address_requested))
-
- req = self._get_fake_server_request()
- rsp = self.manager._add_floating_ip(req, 'test_inst', body)
- self.assertEqual(202, rsp.status_int)
-
- def test_associate_not_allocated_floating_ip_to_instance(self):
- def fake_associate_floating_ip(self, context, instance,
- floating_address, fixed_address,
- affect_auto_assigned=False):
- raise exception.FloatingIpNotFoundForAddress(
- address=floating_address)
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_associate_floating_ip)
- floating_ip = '10.10.10.11'
- body = dict(addFloatingIp=dict(address=floating_ip))
- req = self._get_fake_server_request()
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = self._get_fake_response(req, 'servers')
- res_dict = jsonutils.loads(resp.body)
- self.assertEqual(resp.status_int, 404)
- self.assertEqual(res_dict['itemNotFound']['message'],
- "floating ip not found")
-
- @mock.patch.object(network.api.API, 'associate_floating_ip',
- side_effect=exception.Forbidden)
- def test_associate_floating_ip_forbidden(self, associate_mock):
- body = dict(addFloatingIp=dict(address='10.10.10.11'))
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPForbidden,
- self.manager._add_floating_ip, req, 'test_inst',
- body)
-
- def test_associate_floating_ip_bad_address_key(self):
- body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._add_floating_ip, req, 'test_inst',
- body)
-
- def test_associate_floating_ip_bad_addfloatingip_key(self):
- body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._add_floating_ip, req, 'test_inst',
- body)
-
- def test_floating_ip_disassociate(self):
- def get_instance_by_floating_ip_addr(self, context, address):
- if address == '10.10.10.10':
- return 'test_inst'
-
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
-
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
-
- req = self._get_fake_server_request()
- rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
- self.assertEqual(202, rsp.status_int)
-
- def test_floating_ip_disassociate_missing(self):
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.manager._remove_floating_ip,
- req, 'test_inst', body)
-
- def test_floating_ip_associate_non_existent_ip(self):
- def fake_network_api_associate(self, context, instance,
- floating_address=None,
- fixed_address=None):
- floating_ips = ["10.10.10.10", "10.10.10.11"]
- if floating_address not in floating_ips:
- raise exception.FloatingIpNotFoundForAddress(
- address=floating_address)
-
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_network_api_associate)
-
- body = dict(addFloatingIp=dict(address='1.1.1.1'))
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._add_floating_ip,
- req, 'test_inst', body)
-
- def test_floating_ip_disassociate_non_existent_ip(self):
- def network_api_get_floating_ip_by_address(self, context,
- floating_address):
- floating_ips = ["10.10.10.10", "10.10.10.11"]
- if floating_address not in floating_ips:
- raise exception.FloatingIpNotFoundForAddress(
- address=floating_address)
-
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- network_api_get_floating_ip_by_address)
-
- body = dict(removeFloatingIp=dict(address='1.1.1.1'))
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._remove_floating_ip,
- req, 'test_inst', body)
-
- def test_floating_ip_disassociate_wrong_instance_uuid(self):
- def get_instance_by_floating_ip_addr(self, context, address):
- if address == '10.10.10.10':
- return 'test_inst'
-
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
-
- wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.manager._remove_floating_ip,
- req, wrong_uuid, body)
-
- def test_floating_ip_disassociate_wrong_instance_id(self):
- def get_instance_by_floating_ip_addr(self, context, address):
- if address == '10.10.10.10':
- return 'wrong_inst'
-
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
-
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.manager._remove_floating_ip,
- req, 'test_inst', body)
-
- def test_floating_ip_disassociate_auto_assigned(self):
- def fake_get_floating_ip_addr_auto_assigned(self, context, address):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip_id': 10, 'auto_assigned': 1}
-
- def get_instance_by_floating_ip_addr(self, context, address):
- if address == '10.10.10.10':
- return 'test_inst'
-
- def network_api_disassociate(self, context, instance,
- floating_address):
- raise exception.CannotDisassociateAutoAssignedFloatingIP()
-
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- fake_get_floating_ip_addr_auto_assigned)
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
- self.stubs.Set(network.api.API, "disassociate_floating_ip",
- network_api_disassociate)
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPForbidden,
- self.manager._remove_floating_ip,
- req, 'test_inst', body)
-
- def test_floating_ip_disassociate_map_authorization_exc(self):
- def fake_get_floating_ip_addr_auto_assigned(self, context, address):
- return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
- 'fixed_ip_id': 10, 'auto_assigned': 1}
-
- def get_instance_by_floating_ip_addr(self, context, address):
- if address == '10.10.10.10':
- return 'test_inst'
-
- def network_api_disassociate(self, context, instance, address):
- raise exception.Forbidden()
-
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- fake_get_floating_ip_addr_auto_assigned)
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
- self.stubs.Set(network.api.API, "disassociate_floating_ip",
- network_api_disassociate)
- body = dict(removeFloatingIp=dict(address='10.10.10.10'))
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPForbidden,
- self.manager._remove_floating_ip,
- req, 'test_inst', body)
-
-# these are a few bad param tests
-
- def test_bad_address_param_in_remove_floating_ip(self):
- body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._remove_floating_ip, req, 'test_inst',
- body)
-
- def test_missing_dict_param_in_remove_floating_ip(self):
- body = dict(removeFloatingIp='11.0.0.1')
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._remove_floating_ip, req, 'test_inst',
- body)
-
- def test_missing_dict_param_in_add_floating_ip(self):
- body = dict(addFloatingIp='11.0.0.1')
-
- req = self._get_fake_server_request()
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._add_floating_ip, req, 'test_inst',
- body)
-
-
-class ExtendedFloatingIpTest(test.TestCase):
- floating_ip = "10.10.10.10"
- floating_ip_2 = "10.10.10.11"
-
- def _create_floating_ips(self, floating_ips=None):
- """Create a floating ip object."""
- if floating_ips is None:
- floating_ips = [self.floating_ip]
- elif not isinstance(floating_ips, (list, tuple)):
- floating_ips = [floating_ips]
-
- def make_ip_dict(ip):
- """Shortcut for creating floating ip dict."""
- return
-
- dict_ = {'pool': 'nova', 'host': 'fake_host'}
- return db.floating_ip_bulk_create(
- self.context, [dict(address=ip, **dict_) for ip in floating_ips],
- )
-
- def _delete_floating_ip(self):
- db.floating_ip_destroy(self.context, self.floating_ip)
-
- def _get_fake_request(self):
- return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
-
- def _get_fake_response(self, req, init_only):
- return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
-
- def setUp(self):
- super(ExtendedFloatingIpTest, self).setUp()
- self.stubs.Set(compute.api.API, "get",
- compute_api_get)
- self.stubs.Set(network.api.API, "get_floating_ip",
- network_api_get_floating_ip)
- self.stubs.Set(network.api.API, "get_floating_ip_by_address",
- network_api_get_floating_ip_by_address)
- self.stubs.Set(network.api.API, "get_floating_ips_by_project",
- network_api_get_floating_ips_by_project)
- self.stubs.Set(network.api.API, "release_floating_ip",
- network_api_release)
- self.stubs.Set(network.api.API, "disassociate_floating_ip",
- network_api_disassociate)
- self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
- get_instance_by_floating_ip_addr)
- self.stubs.Set(compute_utils, "get_nw_info_for_instance",
- stub_nw_info(self.stubs))
-
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
- self.stubs.Set(db, 'instance_get',
- fake_instance_get)
-
- self.context = context.get_admin_context()
- self._create_floating_ips()
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.ext_mgr.extensions['os-floating-ips'] = True
- self.ext_mgr.extensions['os-extended-floating-ips'] = True
- self.controller = floating_ips.FloatingIPController()
- self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
-
- def tearDown(self):
- self._delete_floating_ip()
- super(ExtendedFloatingIpTest, self).tearDown()
-
- def test_extended_floating_ip_associate_fixed(self):
- fixed_address = '192.168.1.101'
-
- def fake_associate_floating_ip(*args, **kwargs):
- self.assertEqual(fixed_address, kwargs['fixed_address'])
-
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_associate_floating_ip)
- body = dict(addFloatingIp=dict(address=self.floating_ip,
- fixed_address=fixed_address))
-
- req = self._get_fake_request()
- rsp = self.manager._add_floating_ip(req, 'test_inst', body)
- self.assertEqual(202, rsp.status_int)
-
- def test_extended_floating_ip_associate_fixed_not_allocated(self):
- def fake_associate_floating_ip(*args, **kwargs):
- pass
-
- self.stubs.Set(network.api.API, "associate_floating_ip",
- fake_associate_floating_ip)
- body = dict(addFloatingIp=dict(address=self.floating_ip,
- fixed_address='11.11.11.11'))
-
- req = self._get_fake_request()
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- resp = self._get_fake_response(req, 'servers')
- res_dict = jsonutils.loads(resp.body)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual(res_dict['badRequest']['message'],
- "Specified fixed address not assigned to instance")
-
-
-class FloatingIpSerializerTest(test.TestCase):
- def test_default_serializer(self):
- serializer = floating_ips.FloatingIPTemplate()
- text = serializer.serialize(dict(
- floating_ip=dict(
- instance_id=1,
- ip='10.10.10.10',
- fixed_ip='10.0.0.1',
- id=1)))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('floating_ip', tree.tag)
- self.assertEqual('1', tree.get('instance_id'))
- self.assertEqual('10.10.10.10', tree.get('ip'))
- self.assertEqual('10.0.0.1', tree.get('fixed_ip'))
- self.assertEqual('1', tree.get('id'))
-
- def test_index_serializer(self):
- serializer = floating_ips.FloatingIPsTemplate()
- text = serializer.serialize(dict(
- floating_ips=[
- dict(instance_id=1,
- ip='10.10.10.10',
- fixed_ip='10.0.0.1',
- id=1),
- dict(instance_id=None,
- ip='10.10.10.11',
- fixed_ip=None,
- id=2)]))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('floating_ips', tree.tag)
- self.assertEqual(2, len(tree))
- self.assertEqual('floating_ip', tree[0].tag)
- self.assertEqual('floating_ip', tree[1].tag)
- self.assertEqual('1', tree[0].get('instance_id'))
- self.assertEqual('None', tree[1].get('instance_id'))
- self.assertEqual('10.10.10.10', tree[0].get('ip'))
- self.assertEqual('10.10.10.11', tree[1].get('ip'))
- self.assertEqual('10.0.0.1', tree[0].get('fixed_ip'))
- self.assertEqual('None', tree[1].get('fixed_ip'))
- self.assertEqual('1', tree[0].get('id'))
- self.assertEqual('2', tree[1].get('id'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips_bulk.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips_bulk.py
deleted file mode 100644
index bac9ea66db..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips_bulk.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-from oslo.config import cfg
-import webob
-
-from nova.api.openstack.compute.contrib import floating_ips_bulk as fipbulk_v2
-from nova.api.openstack.compute.plugins.v3 import floating_ips_bulk as\
- fipbulk_v21
-from nova import context
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-CONF = cfg.CONF
-
-
-class FloatingIPBulkV21(test.TestCase):
-
- floating_ips_bulk = fipbulk_v21
- url = '/v2/fake/os-floating-ips-bulk'
- delete_url = '/v2/fake/os-fixed-ips/delete'
- bad_request = exception.ValidationError
-
- def setUp(self):
- super(FloatingIPBulkV21, self).setUp()
-
- self.context = context.get_admin_context()
- self.controller = self.floating_ips_bulk.FloatingIPBulkController()
-
- def _setup_floating_ips(self, ip_range):
- body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.create(req, body=body)
- response = {"floating_ips_bulk_create": {
- 'ip_range': ip_range,
- 'pool': CONF.default_floating_pool,
- 'interface': CONF.public_interface}}
- self.assertEqual(res_dict, response)
-
- def test_create_ips(self):
- ip_range = '192.168.1.0/24'
- self._setup_floating_ips(ip_range)
-
- def test_create_ips_pool(self):
- ip_range = '10.0.1.0/20'
- pool = 'a new pool'
- body = {'floating_ips_bulk_create':
- {'ip_range': ip_range,
- 'pool': pool}}
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.create(req, body=body)
- response = {"floating_ips_bulk_create": {
- 'ip_range': ip_range,
- 'pool': pool,
- 'interface': CONF.public_interface}}
- self.assertEqual(res_dict, response)
-
- def test_list_ips(self):
- ip_range = '192.168.1.1/28'
- self._setup_floating_ips(ip_range)
- req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
- res_dict = self.controller.index(req)
-
- ip_info = [{'address': str(ip_addr),
- 'pool': CONF.default_floating_pool,
- 'interface': CONF.public_interface,
- 'project_id': None,
- 'instance_uuid': None}
- for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
- response = {'floating_ip_info': ip_info}
-
- self.assertEqual(res_dict, response)
-
- def test_list_ip_by_host(self):
- ip_range = '192.168.1.1/28'
- self._setup_floating_ips(ip_range)
- req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, 'host')
-
- def test_delete_ips(self):
- ip_range = '192.168.1.0/20'
- self._setup_floating_ips(ip_range)
-
- body = {'ip_range': ip_range}
- req = fakes.HTTPRequest.blank(self.delete_url)
- res_dict = self.controller.update(req, "delete", body=body)
-
- response = {"floating_ips_bulk_delete": ip_range}
- self.assertEqual(res_dict, response)
-
- # Check that the IPs are actually deleted
- req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
- res_dict = self.controller.index(req)
- response = {'floating_ip_info': []}
- self.assertEqual(res_dict, response)
-
- def test_create_duplicate_fail(self):
- ip_range = '192.168.1.0/20'
- self._setup_floating_ips(ip_range)
-
- ip_range = '192.168.1.0/28'
- body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, body=body)
-
- def test_create_bad_cidr_fail(self):
- # netaddr can't handle /32 or 31 cidrs
- ip_range = '192.168.1.1/32'
- body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, body=body)
-
- def test_create_invalid_cidr_fail(self):
- ip_range = 'not a cidr'
- body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(self.bad_request, self.controller.create,
- req, body=body)
-
-
-class FloatingIPBulkV2(FloatingIPBulkV21):
- floating_ips_bulk = fipbulk_v2
- bad_request = webob.exc.HTTPBadRequest
diff --git a/nova/tests/api/openstack/compute/contrib/test_fping.py b/nova/tests/api/openstack/compute/contrib/test_fping.py
deleted file mode 100644
index b35c1168f7..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_fping.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2011 Grid Dynamics
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute.contrib import fping
-from nova.api.openstack.compute.plugins.v3 import fping as fping_v21
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-import nova.utils
-
-
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def execute(*cmd, **args):
- return "".join(["%s is alive" % ip for ip in cmd[1:]])
-
-
-class FpingTestV21(test.TestCase):
- controller_cls = fping_v21.FpingController
-
- def setUp(self):
- super(FpingTestV21, self).setUp()
- self.flags(verbose=True, use_ipv6=False)
- return_server = fakes.fake_instance_get()
- return_servers = fakes.fake_instance_get_all_by_filters()
- self.stubs.Set(nova.db, "instance_get_all_by_filters",
- return_servers)
- self.stubs.Set(nova.db, "instance_get_by_uuid",
- return_server)
- self.stubs.Set(nova.utils, "execute",
- execute)
- self.stubs.Set(self.controller_cls, "check_fping",
- lambda self: None)
- self.controller = self.controller_cls()
-
- def _get_url(self):
- return "/v3"
-
- def test_fping_index(self):
- req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
- res_dict = self.controller.index(req)
- self.assertIn("servers", res_dict)
- for srv in res_dict["servers"]:
- for key in "project_id", "id", "alive":
- self.assertIn(key, srv)
-
- def test_fping_index_policy(self):
- req = fakes.HTTPRequest.blank(self._get_url() +
- "os-fping?all_tenants=1")
- self.assertRaises(exception.Forbidden, self.controller.index, req)
- req = fakes.HTTPRequest.blank(self._get_url() +
- "/os-fping?all_tenants=1")
- req.environ["nova.context"].is_admin = True
- res_dict = self.controller.index(req)
- self.assertIn("servers", res_dict)
-
- def test_fping_index_include(self):
- req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
- res_dict = self.controller.index(req)
- ids = [srv["id"] for srv in res_dict["servers"]]
- req = fakes.HTTPRequest.blank(self._get_url() +
- "/os-fping?include=%s" % ids[0])
- res_dict = self.controller.index(req)
- self.assertEqual(len(res_dict["servers"]), 1)
- self.assertEqual(res_dict["servers"][0]["id"], ids[0])
-
- def test_fping_index_exclude(self):
- req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
- res_dict = self.controller.index(req)
- ids = [srv["id"] for srv in res_dict["servers"]]
- req = fakes.HTTPRequest.blank(self._get_url() +
- "/os-fping?exclude=%s" %
- ",".join(ids[1:]))
- res_dict = self.controller.index(req)
- self.assertEqual(len(res_dict["servers"]), 1)
- self.assertEqual(res_dict["servers"][0]["id"], ids[0])
-
- def test_fping_show(self):
- req = fakes.HTTPRequest.blank(self._get_url() +
- "os-fping/%s" % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
- self.assertIn("server", res_dict)
- srv = res_dict["server"]
- for key in "project_id", "id", "alive":
- self.assertIn(key, srv)
-
-
-class FpingTestV2(FpingTestV21):
- controller_cls = fping.FpingController
-
- def _get_url(self):
- return "/v2/1234"
diff --git a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
deleted file mode 100644
index a2f5535d81..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import itertools
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack import wsgi
-from nova import compute
-from nova.compute import vm_states
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-SENTINEL = object()
-
-
-def fake_compute_get(*args, **kwargs):
- def _return_server(*_args, **_kwargs):
- inst = fakes.stub_instance(*args, **kwargs)
- return fake_instance.fake_instance_obj(_args[1], **inst)
- return _return_server
-
-
-class HideServerAddressesTestV21(test.TestCase):
- content_type = 'application/json'
- base_url = '/v2/fake/servers'
-
- def _setup_wsgi(self):
- self.wsgi_app = fakes.wsgi_app_v21(
- init_only=('servers', 'os-hide-server-addresses'))
-
- def setUp(self):
- super(HideServerAddressesTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- self._setup_wsgi()
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(self.wsgi_app)
- return res
-
- @staticmethod
- def _get_server(body):
- return jsonutils.loads(body).get('server')
-
- @staticmethod
- def _get_servers(body):
- return jsonutils.loads(body).get('servers')
-
- @staticmethod
- def _get_addresses(server):
- return server.get('addresses', SENTINEL)
-
- def _check_addresses(self, addresses, exists):
- self.assertTrue(addresses is not SENTINEL)
- if exists:
- self.assertTrue(addresses)
- else:
- self.assertFalse(addresses)
-
- def test_show_hides_in_building(self):
- instance_id = 1
- uuid = fakes.get_fake_uuid(instance_id)
- self.stubs.Set(compute.api.API, 'get',
- fake_compute_get(instance_id, uuid=uuid,
- vm_state=vm_states.BUILDING))
- res = self._make_request(self.base_url + '/%s' % uuid)
- self.assertEqual(res.status_int, 200)
-
- server = self._get_server(res.body)
- addresses = self._get_addresses(server)
- self._check_addresses(addresses, exists=False)
-
- def test_show(self):
- instance_id = 1
- uuid = fakes.get_fake_uuid(instance_id)
- self.stubs.Set(compute.api.API, 'get',
- fake_compute_get(instance_id, uuid=uuid,
- vm_state=vm_states.ACTIVE))
- res = self._make_request(self.base_url + '/%s' % uuid)
- self.assertEqual(res.status_int, 200)
-
- server = self._get_server(res.body)
- addresses = self._get_addresses(server)
- self._check_addresses(addresses, exists=True)
-
- def test_detail_hides_building_server_addresses(self):
- instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0),
- vm_state=vm_states.ACTIVE)
- instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1),
- vm_state=vm_states.BUILDING)
- instances = [instance_0, instance_1]
-
- def get_all(*args, **kwargs):
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(
- args[1], objects.InstanceList(), instances, fields)
-
- self.stubs.Set(compute.api.API, 'get_all', get_all)
- res = self._make_request(self.base_url + '/detail')
-
- self.assertEqual(res.status_int, 200)
- servers = self._get_servers(res.body)
-
- self.assertEqual(len(servers), len(instances))
-
- for instance, server in itertools.izip(instances, servers):
- addresses = self._get_addresses(server)
- exists = (instance['vm_state'] == vm_states.ACTIVE)
- self._check_addresses(addresses, exists=exists)
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- res = self._make_request(self.base_url + '/' + fakes.get_fake_uuid())
-
- self.assertEqual(res.status_int, 404)
-
-
-class HideServerAddressesTestV2(HideServerAddressesTestV21):
-
- def _setup_wsgi(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Hide_server_addresses'])
- self.wsgi_app = fakes.wsgi_app(init_only=('servers',))
-
-
-class HideAddressesXmlTest(HideServerAddressesTestV2):
- content_type = 'application/xml'
-
- @staticmethod
- def _get_server(body):
- return etree.XML(body)
-
- @staticmethod
- def _get_servers(body):
- return etree.XML(body).getchildren()
-
- @staticmethod
- def _get_addresses(server):
- addresses = server.find('{%s}addresses' % wsgi.XMLNS_V11)
- if addresses is None:
- return SENTINEL
- return addresses
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
deleted file mode 100644
index 22d3173874..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ /dev/null
@@ -1,471 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import testtools
-import webob.exc
-
-from nova.api.openstack.compute.contrib import hosts as os_hosts_v2
-from nova.api.openstack.compute.plugins.v3 import hosts as os_hosts_v3
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import context as context_maker
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests import fake_hosts
-from nova.tests import utils
-
-
-def stub_service_get_all(context, disabled=None):
- return fake_hosts.SERVICES_LIST
-
-
-def stub_service_get_by_host_and_topic(context, host_name, topic):
- for service in stub_service_get_all(context):
- if service['host'] == host_name and service['topic'] == topic:
- return service
-
-
-def stub_set_host_enabled(context, host_name, enabled):
- """Simulates three possible behaviours for VM drivers or compute
- drivers when enabling or disabling a host.
-
- 'enabled' means new instances can go to this host
- 'disabled' means they can't
- """
- results = {True: "enabled", False: "disabled"}
- if host_name == "notimplemented":
- # The vm driver for this host doesn't support this feature
- raise NotImplementedError()
- elif host_name == "dummydest":
- # The host does not exist
- raise exception.ComputeHostNotFound(host=host_name)
- elif host_name == "host_c2":
- # Simulate a failure
- return results[not enabled]
- else:
- # Do the right thing
- return results[enabled]
-
-
-def stub_set_host_maintenance(context, host_name, mode):
- # We'll simulate success and failure by assuming
- # that 'host_c1' always succeeds, and 'host_c2'
- # always fails
- results = {True: "on_maintenance", False: "off_maintenance"}
- if host_name == "notimplemented":
- # The vm driver for this host doesn't support this feature
- raise NotImplementedError()
- elif host_name == "dummydest":
- # The host does not exist
- raise exception.ComputeHostNotFound(host=host_name)
- elif host_name == "host_c2":
- # Simulate a failure
- return results[not mode]
- else:
- # Do the right thing
- return results[mode]
-
-
-def stub_host_power_action(context, host_name, action):
- if host_name == "notimplemented":
- raise NotImplementedError()
- elif host_name == "dummydest":
- # The host does not exist
- raise exception.ComputeHostNotFound(host=host_name)
- return action
-
-
-def _create_instance(**kwargs):
- """Create a test instance."""
- ctxt = context_maker.get_admin_context()
- return db.instance_create(ctxt, _create_instance_dict(**kwargs))
-
-
-def _create_instance_dict(**kwargs):
- """Create a dictionary for a test instance."""
- inst = {}
- inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = kwargs.get('user_id', 'admin')
- inst['project_id'] = kwargs.get('project_id', 'fake')
- inst['instance_type_id'] = '1'
- if 'host' in kwargs:
- inst['host'] = kwargs.get('host')
- inst['vcpus'] = kwargs.get('vcpus', 1)
- inst['memory_mb'] = kwargs.get('memory_mb', 20)
- inst['root_gb'] = kwargs.get('root_gb', 30)
- inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
- inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
- inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
- inst['task_state'] = kwargs.get('task_state', None)
- inst['availability_zone'] = kwargs.get('availability_zone', None)
- inst['ami_launch_index'] = 0
- inst['launched_on'] = kwargs.get('launched_on', 'dummy')
- return inst
-
-
-class FakeRequest(object):
- environ = {"nova.context": context_maker.get_admin_context()}
- GET = {}
-
-
-class FakeRequestWithNovaZone(object):
- environ = {"nova.context": context_maker.get_admin_context()}
- GET = {"zone": "nova"}
-
-
-class FakeRequestWithNovaService(object):
- environ = {"nova.context": context_maker.get_admin_context()}
- GET = {"service": "compute"}
-
-
-class FakeRequestWithInvalidNovaService(object):
- environ = {"nova.context": context_maker.get_admin_context()}
- GET = {"service": "invalid"}
-
-
-class HostTestCaseV21(test.TestCase):
- """Test Case for hosts."""
- validation_ex = exception.ValidationError
- Controller = os_hosts_v3.HostController
- policy_ex = exception.PolicyNotAuthorized
-
- def _setup_stubs(self):
- # Pretend we have fake_hosts.HOST_LIST in the DB
- self.stubs.Set(db, 'service_get_all',
- stub_service_get_all)
- # Only hosts in our fake DB exist
- self.stubs.Set(db, 'service_get_by_host_and_topic',
- stub_service_get_by_host_and_topic)
- # 'host_c1' always succeeds, and 'host_c2'
- self.stubs.Set(self.hosts_api, 'set_host_enabled',
- stub_set_host_enabled)
- # 'host_c1' always succeeds, and 'host_c2'
- self.stubs.Set(self.hosts_api, 'set_host_maintenance',
- stub_set_host_maintenance)
- self.stubs.Set(self.hosts_api, 'host_power_action',
- stub_host_power_action)
-
- def setUp(self):
- super(HostTestCaseV21, self).setUp()
- self.controller = self.Controller()
- self.hosts_api = self.controller.api
- self.req = FakeRequest()
-
- self._setup_stubs()
-
- def _test_host_update(self, host, key, val, expected_value):
- body = {key: val}
- result = self.controller.update(self.req, host, body=body)
- self.assertEqual(result[key], expected_value)
-
- def test_list_hosts(self):
- """Verify that the compute hosts are returned."""
- result = self.controller.index(self.req)
- self.assertIn('hosts', result)
- hosts = result['hosts']
- self.assertEqual(fake_hosts.HOST_LIST, hosts)
-
- def test_disable_host(self):
- self._test_host_update('host_c1', 'status', 'disable', 'disabled')
- self._test_host_update('host_c2', 'status', 'disable', 'enabled')
-
- def test_enable_host(self):
- self._test_host_update('host_c1', 'status', 'enable', 'enabled')
- self._test_host_update('host_c2', 'status', 'enable', 'disabled')
-
- def test_enable_maintenance(self):
- self._test_host_update('host_c1', 'maintenance_mode',
- 'enable', 'on_maintenance')
-
- def test_disable_maintenance(self):
- self._test_host_update('host_c1', 'maintenance_mode',
- 'disable', 'off_maintenance')
-
- def _test_host_update_notimpl(self, key, val):
- def stub_service_get_all_notimpl(self, req):
- return [{'host': 'notimplemented', 'topic': None,
- 'availability_zone': None}]
- self.stubs.Set(db, 'service_get_all',
- stub_service_get_all_notimpl)
- body = {key: val}
- self.assertRaises(webob.exc.HTTPNotImplemented,
- self.controller.update,
- self.req, 'notimplemented', body=body)
-
- def test_disable_host_notimpl(self):
- self._test_host_update_notimpl('status', 'disable')
-
- def test_enable_maintenance_notimpl(self):
- self._test_host_update_notimpl('maintenance_mode', 'enable')
-
- def test_host_startup(self):
- result = self.controller.startup(self.req, "host_c1")
- self.assertEqual(result["power_action"], "startup")
-
- def test_host_shutdown(self):
- result = self.controller.shutdown(self.req, "host_c1")
- self.assertEqual(result["power_action"], "shutdown")
-
- def test_host_reboot(self):
- result = self.controller.reboot(self.req, "host_c1")
- self.assertEqual(result["power_action"], "reboot")
-
- def _test_host_power_action_notimpl(self, method):
- self.assertRaises(webob.exc.HTTPNotImplemented,
- method, self.req, "notimplemented")
-
- def test_host_startup_notimpl(self):
- self._test_host_power_action_notimpl(self.controller.startup)
-
- def test_host_shutdown_notimpl(self):
- self._test_host_power_action_notimpl(self.controller.shutdown)
-
- def test_host_reboot_notimpl(self):
- self._test_host_power_action_notimpl(self.controller.reboot)
-
- def test_host_status_bad_host(self):
- # A host given as an argument does not exist.
- self.req.environ["nova.context"].is_admin = True
- dest = 'dummydest'
- with testtools.ExpectedException(webob.exc.HTTPNotFound,
- ".*%s.*" % dest):
- self.controller.update(self.req, dest, body={'status': 'enable'})
-
- def test_host_maintenance_bad_host(self):
- # A host given as an argument does not exist.
- self.req.environ["nova.context"].is_admin = True
- dest = 'dummydest'
- with testtools.ExpectedException(webob.exc.HTTPNotFound,
- ".*%s.*" % dest):
- self.controller.update(self.req, dest,
- body={'maintenance_mode': 'enable'})
-
- def test_host_power_action_bad_host(self):
- # A host given as an argument does not exist.
- self.req.environ["nova.context"].is_admin = True
- dest = 'dummydest'
- with testtools.ExpectedException(webob.exc.HTTPNotFound,
- ".*%s.*" % dest):
- self.controller.reboot(self.req, dest)
-
- def test_bad_status_value(self):
- bad_body = {"status": "bad"}
- self.assertRaises(self.validation_ex, self.controller.update,
- self.req, "host_c1", body=bad_body)
- bad_body2 = {"status": "disablabc"}
- self.assertRaises(self.validation_ex, self.controller.update,
- self.req, "host_c1", body=bad_body2)
-
- def test_bad_update_key(self):
- bad_body = {"crazy": "bad"}
- self.assertRaises(self.validation_ex, self.controller.update,
- self.req, "host_c1", body=bad_body)
-
- def test_bad_update_key_and_correct_update_key(self):
- bad_body = {"status": "disable", "crazy": "bad"}
- self.assertRaises(self.validation_ex, self.controller.update,
- self.req, "host_c1", body=bad_body)
-
- def test_good_update_keys(self):
- body = {"status": "disable", "maintenance_mode": "enable"}
- result = self.controller.update(self.req, 'host_c1', body=body)
- self.assertEqual(result["host"], "host_c1")
- self.assertEqual(result["status"], "disabled")
- self.assertEqual(result["maintenance_mode"], "on_maintenance")
-
- def test_show_forbidden(self):
- self.req.environ["nova.context"].is_admin = False
- dest = 'dummydest'
- self.assertRaises(self.policy_ex,
- self.controller.show,
- self.req, dest)
- self.req.environ["nova.context"].is_admin = True
-
- def test_show_host_not_exist(self):
- # A host given as an argument does not exist.
- self.req.environ["nova.context"].is_admin = True
- dest = 'dummydest'
- with testtools.ExpectedException(webob.exc.HTTPNotFound,
- ".*%s.*" % dest):
- self.controller.show(self.req, dest)
-
- def _create_compute_service(self):
- """Create compute-manager(ComputeNode and Service record)."""
- ctxt = self.req.environ["nova.context"]
- dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
- 'report_count': 0}
- s_ref = db.service_create(ctxt, dic)
-
- dic = {'service_id': s_ref['id'],
- 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
- 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
- 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
- 'cpu_info': '', 'stats': ''}
- db.compute_node_create(ctxt, dic)
-
- return db.service_get(ctxt, s_ref['id'])
-
- def test_show_no_project(self):
- """No instances are running on the given host."""
- ctxt = context_maker.get_admin_context()
- s_ref = self._create_compute_service()
-
- result = self.controller.show(self.req, s_ref['host'])
-
- proj = ['(total)', '(used_now)', '(used_max)']
- column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
- self.assertEqual(len(result['host']), 3)
- for resource in result['host']:
- self.assertIn(resource['resource']['project'], proj)
- self.assertEqual(len(resource['resource']), 5)
- self.assertEqual(set(column), set(resource['resource'].keys()))
- db.service_destroy(ctxt, s_ref['id'])
-
- def test_show_works_correctly(self):
- """show() works correctly as expected."""
- ctxt = context_maker.get_admin_context()
- s_ref = self._create_compute_service()
- i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
- i_ref2 = _create_instance(project_id='p-02', vcpus=3,
- host=s_ref['host'])
-
- result = self.controller.show(self.req, s_ref['host'])
-
- proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
- column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
- self.assertEqual(len(result['host']), 5)
- for resource in result['host']:
- self.assertIn(resource['resource']['project'], proj)
- self.assertEqual(len(resource['resource']), 5)
- self.assertEqual(set(column), set(resource['resource'].keys()))
- db.service_destroy(ctxt, s_ref['id'])
- db.instance_destroy(ctxt, i_ref1['uuid'])
- db.instance_destroy(ctxt, i_ref2['uuid'])
-
- def test_list_hosts_with_zone(self):
- result = self.controller.index(FakeRequestWithNovaZone())
- self.assertIn('hosts', result)
- hosts = result['hosts']
- self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
-
- def test_list_hosts_with_service(self):
- result = self.controller.index(FakeRequestWithNovaService())
- self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, result['hosts'])
-
- def test_list_hosts_with_invalid_service(self):
- result = self.controller.index(FakeRequestWithInvalidNovaService())
- self.assertEqual([], result['hosts'])
-
-
-class HostTestCaseV20(HostTestCaseV21):
- validation_ex = webob.exc.HTTPBadRequest
- policy_ex = webob.exc.HTTPForbidden
- Controller = os_hosts_v2.HostController
-
- # Note: V2 api don't support list with services
- def test_list_hosts_with_service(self):
- pass
-
- def test_list_hosts_with_invalid_service(self):
- pass
-
-
-class HostSerializerTest(test.TestCase):
- def setUp(self):
- super(HostSerializerTest, self).setUp()
- self.deserializer = os_hosts_v2.HostUpdateDeserializer()
-
- def test_index_serializer(self):
- serializer = os_hosts_v2.HostIndexTemplate()
- text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('hosts', tree.tag)
- self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
- for i in range(len(fake_hosts.HOST_LIST)):
- self.assertEqual('host', tree[i].tag)
- self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
- tree[i].get('host_name'))
- self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
- tree[i].get('service'))
- self.assertEqual(fake_hosts.HOST_LIST[i]['zone'],
- tree[i].get('zone'))
-
- def test_update_serializer_with_status(self):
- exemplar = dict(host='host_c1', status='enabled')
- serializer = os_hosts_v2.HostUpdateTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('host', tree.tag)
- for key, value in exemplar.items():
- self.assertEqual(value, tree.get(key))
-
- def test_update_serializer_with_maintenance_mode(self):
- exemplar = dict(host='host_c1', maintenance_mode='enabled')
- serializer = os_hosts_v2.HostUpdateTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('host', tree.tag)
- for key, value in exemplar.items():
- self.assertEqual(value, tree.get(key))
-
- def test_update_serializer_with_maintenance_mode_and_status(self):
- exemplar = dict(host='host_c1',
- maintenance_mode='enabled',
- status='enabled')
- serializer = os_hosts_v2.HostUpdateTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('host', tree.tag)
- for key, value in exemplar.items():
- self.assertEqual(value, tree.get(key))
-
- def test_action_serializer(self):
- exemplar = dict(host='host_c1', power_action='reboot')
- serializer = os_hosts_v2.HostActionTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('host', tree.tag)
- for key, value in exemplar.items():
- self.assertEqual(value, tree.get(key))
-
- def test_update_deserializer(self):
- exemplar = dict(status='enabled', maintenance_mode='disable')
- intext = """<?xml version='1.0' encoding='UTF-8'?>
- <updates>
- <status>enabled</status>
- <maintenance_mode>disable</maintenance_mode>
- </updates>"""
- result = self.deserializer.deserialize(intext)
-
- self.assertEqual(dict(body=exemplar), result)
-
- def test_corrupt_xml(self):
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py b/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py
deleted file mode 100644
index 13cdf5f8fe..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2014 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-
-from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
-from nova.api.openstack.compute.plugins.v3 import hypervisors \
- as hypervisors_v21
-from nova.api.openstack import extensions
-from nova import test
-from nova.tests.api.openstack.compute.contrib import test_hypervisors
-
-TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0],
- service=dict(id=1,
- host="compute1",
- binary="nova-compute",
- topic="compute_topic",
- report_count=5,
- disabled=False,
- disabled_reason=None,
- availability_zone="nova"),
- )
-
-
-class HypervisorStatusTestV21(test.NoDBTestCase):
- def _prepare_extension(self):
- self.controller = hypervisors_v21.HypervisorsController()
- self.controller.servicegroup_api.service_is_up = mock.MagicMock(
- return_value=True)
-
- def test_view_hypervisor_service_status(self):
- self._prepare_extension()
- result = self.controller._view_hypervisor(
- TEST_HYPER, False)
- self.assertEqual('enabled', result['status'])
- self.assertEqual('up', result['state'])
- self.assertEqual('enabled', result['status'])
-
- self.controller.servicegroup_api.service_is_up.return_value = False
- result = self.controller._view_hypervisor(
- TEST_HYPER, False)
- self.assertEqual('down', result['state'])
-
- hyper = copy.deepcopy(TEST_HYPER)
- hyper['service']['disabled'] = True
- result = self.controller._view_hypervisor(hyper, False)
- self.assertEqual('disabled', result['status'])
-
- def test_view_hypervisor_detail_status(self):
- self._prepare_extension()
-
- result = self.controller._view_hypervisor(
- TEST_HYPER, True)
-
- self.assertEqual('enabled', result['status'])
- self.assertEqual('up', result['state'])
- self.assertIsNone(result['service']['disabled_reason'])
-
- self.controller.servicegroup_api.service_is_up.return_value = False
- result = self.controller._view_hypervisor(
- TEST_HYPER, True)
- self.assertEqual('down', result['state'])
-
- hyper = copy.deepcopy(TEST_HYPER)
- hyper['service']['disabled'] = True
- hyper['service']['disabled_reason'] = "fake"
- result = self.controller._view_hypervisor(hyper, True)
- self.assertEqual('disabled', result['status'],)
- self.assertEqual('fake', result['service']['disabled_reason'])
-
-
-class HypervisorStatusTestV2(HypervisorStatusTestV21):
- def _prepare_extension(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {}
- ext_mgr.extensions['os-hypervisor-status'] = True
- self.controller = hypervisors_v2.HypervisorsController(ext_mgr)
- self.controller.servicegroup_api.service_is_up = mock.MagicMock(
- return_value=True)
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
deleted file mode 100644
index ebb0b8593b..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
+++ /dev/null
@@ -1,596 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from lxml import etree
-import mock
-from webob import exc
-
-from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
-from nova.api.openstack.compute.plugins.v3 import hypervisors \
- as hypervisors_v21
-from nova.api.openstack import extensions
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-TEST_HYPERS = [
- dict(id=1,
- service_id=1,
- service=dict(id=1,
- host="compute1",
- binary="nova-compute",
- topic="compute_topic",
- report_count=5,
- disabled=False,
- disabled_reason=None,
- availability_zone="nova"),
- vcpus=4,
- memory_mb=10 * 1024,
- local_gb=250,
- vcpus_used=2,
- memory_mb_used=5 * 1024,
- local_gb_used=125,
- hypervisor_type="xen",
- hypervisor_version=3,
- hypervisor_hostname="hyper1",
- free_ram_mb=5 * 1024,
- free_disk_gb=125,
- current_workload=2,
- running_vms=2,
- cpu_info='cpu_info',
- disk_available_least=100,
- host_ip='1.1.1.1'),
- dict(id=2,
- service_id=2,
- service=dict(id=2,
- host="compute2",
- binary="nova-compute",
- topic="compute_topic",
- report_count=5,
- disabled=False,
- disabled_reason=None,
- availability_zone="nova"),
- vcpus=4,
- memory_mb=10 * 1024,
- local_gb=250,
- vcpus_used=2,
- memory_mb_used=5 * 1024,
- local_gb_used=125,
- hypervisor_type="xen",
- hypervisor_version=3,
- hypervisor_hostname="hyper2",
- free_ram_mb=5 * 1024,
- free_disk_gb=125,
- current_workload=2,
- running_vms=2,
- cpu_info='cpu_info',
- disk_available_least=100,
- host_ip='2.2.2.2')]
-TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
- dict(name="inst2", uuid="uuid2", host="compute2"),
- dict(name="inst3", uuid="uuid3", host="compute1"),
- dict(name="inst4", uuid="uuid4", host="compute2")]
-
-
-def fake_compute_node_get_all(context):
- return TEST_HYPERS
-
-
-def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
- return TEST_HYPERS
-
-
-def fake_compute_node_get(context, compute_id):
- for hyper in TEST_HYPERS:
- if hyper['id'] == compute_id:
- return hyper
- raise exception.ComputeHostNotFound(host=compute_id)
-
-
-def fake_compute_node_statistics(context):
- result = dict(
- count=0,
- vcpus=0,
- memory_mb=0,
- local_gb=0,
- vcpus_used=0,
- memory_mb_used=0,
- local_gb_used=0,
- free_ram_mb=0,
- free_disk_gb=0,
- current_workload=0,
- running_vms=0,
- disk_available_least=0,
- )
-
- for hyper in TEST_HYPERS:
- for key in result:
- if key == 'count':
- result[key] += 1
- else:
- result[key] += hyper[key]
-
- return result
-
-
-def fake_instance_get_all_by_host(context, host):
- results = []
- for inst in TEST_SERVERS:
- if inst['host'] == host:
- results.append(inst)
- return results
-
-
-class HypervisorsTestV21(test.NoDBTestCase):
- DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
- del DETAIL_HYPERS_DICTS[0]['service_id']
- del DETAIL_HYPERS_DICTS[1]['service_id']
- DETAIL_HYPERS_DICTS[0].update({'state': 'up',
- 'status': 'enabled',
- 'service': dict(id=1, host='compute1',
- disabled_reason=None)})
- DETAIL_HYPERS_DICTS[1].update({'state': 'up',
- 'status': 'enabled',
- 'service': dict(id=2, host='compute2',
- disabled_reason=None)})
-
- INDEX_HYPER_DICTS = [
- dict(id=1, hypervisor_hostname="hyper1",
- state='up', status='enabled'),
- dict(id=2, hypervisor_hostname="hyper2",
- state='up', status='enabled')]
-
- NO_SERVER_HYPER_DICTS = copy.deepcopy(INDEX_HYPER_DICTS)
- NO_SERVER_HYPER_DICTS[0].update({'servers': []})
- NO_SERVER_HYPER_DICTS[1].update({'servers': []})
-
- def _get_request(self, use_admin_context):
- return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics',
- use_admin_context=use_admin_context)
-
- def _set_up_controller(self):
- self.controller = hypervisors_v21.HypervisorsController()
- self.controller.servicegroup_api.service_is_up = mock.MagicMock(
- return_value=True)
-
- def setUp(self):
- super(HypervisorsTestV21, self).setUp()
- self._set_up_controller()
-
- self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
- self.stubs.Set(db, 'compute_node_search_by_hypervisor',
- fake_compute_node_search_by_hypervisor)
- self.stubs.Set(db, 'compute_node_get',
- fake_compute_node_get)
- self.stubs.Set(db, 'compute_node_statistics',
- fake_compute_node_statistics)
- self.stubs.Set(db, 'instance_get_all_by_host',
- fake_instance_get_all_by_host)
-
- def test_view_hypervisor_nodetail_noservers(self):
- result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
-
- self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
-
- def test_view_hypervisor_detail_noservers(self):
- result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
-
- self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
-
- def test_view_hypervisor_servers(self):
- result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
- TEST_SERVERS)
- expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
- expected_dict.update({'servers': [
- dict(name="inst1", uuid="uuid1"),
- dict(name="inst2", uuid="uuid2"),
- dict(name="inst3", uuid="uuid3"),
- dict(name="inst4", uuid="uuid4")]})
-
- self.assertEqual(result, expected_dict)
-
- def test_index(self):
- req = self._get_request(True)
- result = self.controller.index(req)
-
- self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
-
- def test_index_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, req)
-
- def test_detail(self):
- req = self._get_request(True)
- result = self.controller.detail(req)
-
- self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
-
- def test_detail_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.detail, req)
-
- def test_show_noid(self):
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
-
- def test_show_non_integer_id(self):
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
-
- def test_show_withid(self):
- req = self._get_request(True)
- result = self.controller.show(req, '1')
-
- self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
-
- def test_show_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show, req, '1')
-
- def test_uptime_noid(self):
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
-
- def test_uptime_notimplemented(self):
- def fake_get_host_uptime(context, hyp):
- raise exc.HTTPNotImplemented()
-
- self.stubs.Set(self.controller.host_api, 'get_host_uptime',
- fake_get_host_uptime)
-
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotImplemented,
- self.controller.uptime, req, '1')
-
- def test_uptime_implemented(self):
- def fake_get_host_uptime(context, hyp):
- return "fake uptime"
-
- self.stubs.Set(self.controller.host_api, 'get_host_uptime',
- fake_get_host_uptime)
-
- req = self._get_request(True)
- result = self.controller.uptime(req, '1')
-
- expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
- expected_dict.update({'uptime': "fake uptime"})
- self.assertEqual(result, dict(hypervisor=expected_dict))
-
- def test_uptime_non_integer_id(self):
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
-
- def test_uptime_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.uptime, req, '1')
-
- def test_search(self):
- req = self._get_request(True)
- result = self.controller.search(req, 'hyper')
-
- self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
-
- def test_search_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.search, req, '1')
-
- def test_search_non_exist(self):
- def fake_compute_node_search_by_hypervisor_return_empty(context,
- hypervisor_re):
- return []
- self.stubs.Set(db, 'compute_node_search_by_hypervisor',
- fake_compute_node_search_by_hypervisor_return_empty)
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
-
- def test_servers(self):
- req = self._get_request(True)
- result = self.controller.servers(req, 'hyper')
-
- expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
- expected_dict[0].update({'servers': [
- dict(name="inst1", uuid="uuid1"),
- dict(name="inst3", uuid="uuid3")]})
- expected_dict[1].update({'servers': [
- dict(name="inst2", uuid="uuid2"),
- dict(name="inst4", uuid="uuid4")]})
-
- self.assertEqual(result, dict(hypervisors=expected_dict))
-
- def test_servers_non_id(self):
- def fake_compute_node_search_by_hypervisor_return_empty(context,
- hypervisor_re):
- return []
- self.stubs.Set(db, 'compute_node_search_by_hypervisor',
- fake_compute_node_search_by_hypervisor_return_empty)
-
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers,
- req, '115')
-
- def test_servers_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.servers, req, '1')
-
- def test_servers_with_non_integer_hypervisor_id(self):
- def fake_compute_node_search_by_hypervisor_return_empty(context,
- hypervisor_re):
- return []
- self.stubs.Set(db, 'compute_node_search_by_hypervisor',
- fake_compute_node_search_by_hypervisor_return_empty)
-
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers, req, 'abc')
-
- def test_servers_with_no_server(self):
- def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
- return []
- self.stubs.Set(db, 'instance_get_all_by_host',
- fake_instance_get_all_by_host_return_empty)
- req = self._get_request(True)
- result = self.controller.servers(req, '1')
- self.assertEqual(result, dict(hypervisors=self.NO_SERVER_HYPER_DICTS))
-
- def test_statistics(self):
- req = self._get_request(True)
- result = self.controller.statistics(req)
-
- self.assertEqual(result, dict(hypervisor_statistics=dict(
- count=2,
- vcpus=8,
- memory_mb=20 * 1024,
- local_gb=500,
- vcpus_used=4,
- memory_mb_used=10 * 1024,
- local_gb_used=250,
- free_ram_mb=10 * 1024,
- free_disk_gb=250,
- current_workload=4,
- running_vms=4,
- disk_available_least=200)))
-
- def test_statistics_non_admin(self):
- req = self._get_request(False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.statistics, req)
-
-
-class HypervisorsTestV2(HypervisorsTestV21):
- DETAIL_HYPERS_DICTS = copy.deepcopy(
- HypervisorsTestV21.DETAIL_HYPERS_DICTS)
- del DETAIL_HYPERS_DICTS[0]['state']
- del DETAIL_HYPERS_DICTS[1]['state']
- del DETAIL_HYPERS_DICTS[0]['status']
- del DETAIL_HYPERS_DICTS[1]['status']
- del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
- del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
- del DETAIL_HYPERS_DICTS[0]['host_ip']
- del DETAIL_HYPERS_DICTS[1]['host_ip']
-
- INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
- del INDEX_HYPER_DICTS[0]['state']
- del INDEX_HYPER_DICTS[1]['state']
- del INDEX_HYPER_DICTS[0]['status']
- del INDEX_HYPER_DICTS[1]['status']
-
- NO_SERVER_HYPER_DICTS = copy.deepcopy(
- HypervisorsTestV21.NO_SERVER_HYPER_DICTS)
- del NO_SERVER_HYPER_DICTS[0]['state']
- del NO_SERVER_HYPER_DICTS[1]['state']
- del NO_SERVER_HYPER_DICTS[0]['status']
- del NO_SERVER_HYPER_DICTS[1]['status']
- del NO_SERVER_HYPER_DICTS[0]['servers']
- del NO_SERVER_HYPER_DICTS[1]['servers']
-
- def _set_up_controller(self):
- self.context = context.get_admin_context()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
-
-
-class HypervisorsSerializersTest(test.NoDBTestCase):
- def compare_to_exemplar(self, exemplar, hyper):
- # Check attributes
- for key, value in exemplar.items():
- if key in ('service', 'servers'):
- # These turn into child elements and get tested
- # separately below...
- continue
-
- self.assertEqual(str(value), hyper.get(key))
-
- # Check child elements
- required_children = set([child for child in ('service', 'servers')
- if child in exemplar])
- for child in hyper:
- self.assertIn(child.tag, required_children)
- required_children.remove(child.tag)
-
- # Check the node...
- if child.tag == 'service':
- for key, value in exemplar['service'].items():
- self.assertEqual(str(value), child.get(key))
- elif child.tag == 'servers':
- for idx, grandchild in enumerate(child):
- self.assertEqual('server', grandchild.tag)
- for key, value in exemplar['servers'][idx].items():
- self.assertEqual(str(value), grandchild.get(key))
-
- # Are they all accounted for?
- self.assertEqual(len(required_children), 0)
-
- def test_index_serializer(self):
- serializer = hypervisors_v2.HypervisorIndexTemplate()
- exemplar = dict(hypervisors=[
- dict(hypervisor_hostname="hyper1",
- id=1),
- dict(hypervisor_hostname="hyper2",
- id=2)])
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisors', tree.tag)
- self.assertEqual(len(exemplar['hypervisors']), len(tree))
- for idx, hyper in enumerate(tree):
- self.assertEqual('hypervisor', hyper.tag)
- self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
-
- def test_detail_serializer(self):
- serializer = hypervisors_v2.HypervisorDetailTemplate()
- exemplar = dict(hypervisors=[
- dict(hypervisor_hostname="hyper1",
- id=1,
- vcpus=4,
- memory_mb=10 * 1024,
- local_gb=500,
- vcpus_used=2,
- memory_mb_used=5 * 1024,
- local_gb_used=250,
- hypervisor_type='xen',
- hypervisor_version=3,
- free_ram_mb=5 * 1024,
- free_disk_gb=250,
- current_workload=2,
- running_vms=2,
- cpu_info="json data",
- disk_available_least=100,
- host_ip='1.1.1.1',
- service=dict(id=1, host="compute1")),
- dict(hypervisor_hostname="hyper2",
- id=2,
- vcpus=4,
- memory_mb=10 * 1024,
- local_gb=500,
- vcpus_used=2,
- memory_mb_used=5 * 1024,
- local_gb_used=250,
- hypervisor_type='xen',
- hypervisor_version=3,
- free_ram_mb=5 * 1024,
- free_disk_gb=250,
- current_workload=2,
- running_vms=2,
- cpu_info="json data",
- disk_available_least=100,
- host_ip='2.2.2.2',
- service=dict(id=2, host="compute2"))])
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisors', tree.tag)
- self.assertEqual(len(exemplar['hypervisors']), len(tree))
- for idx, hyper in enumerate(tree):
- self.assertEqual('hypervisor', hyper.tag)
- self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
-
- def test_show_serializer(self):
- serializer = hypervisors_v2.HypervisorTemplate()
- exemplar = dict(hypervisor=dict(
- hypervisor_hostname="hyper1",
- id=1,
- vcpus=4,
- memory_mb=10 * 1024,
- local_gb=500,
- vcpus_used=2,
- memory_mb_used=5 * 1024,
- local_gb_used=250,
- hypervisor_type='xen',
- hypervisor_version=3,
- free_ram_mb=5 * 1024,
- free_disk_gb=250,
- current_workload=2,
- running_vms=2,
- cpu_info="json data",
- disk_available_least=100,
- host_ip='1.1.1.1',
- service=dict(id=1, host="compute1")))
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisor', tree.tag)
- self.compare_to_exemplar(exemplar['hypervisor'], tree)
-
- def test_uptime_serializer(self):
- serializer = hypervisors_v2.HypervisorUptimeTemplate()
- exemplar = dict(hypervisor=dict(
- hypervisor_hostname="hyper1",
- id=1,
- uptime='fake uptime'))
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisor', tree.tag)
- self.compare_to_exemplar(exemplar['hypervisor'], tree)
-
- def test_servers_serializer(self):
- serializer = hypervisors_v2.HypervisorServersTemplate()
- exemplar = dict(hypervisors=[
- dict(hypervisor_hostname="hyper1",
- id=1,
- servers=[
- dict(name="inst1",
- uuid="uuid1"),
- dict(name="inst2",
- uuid="uuid2")]),
- dict(hypervisor_hostname="hyper2",
- id=2,
- servers=[
- dict(name="inst3",
- uuid="uuid3"),
- dict(name="inst4",
- uuid="uuid4")])])
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisors', tree.tag)
- self.assertEqual(len(exemplar['hypervisors']), len(tree))
- for idx, hyper in enumerate(tree):
- self.assertEqual('hypervisor', hyper.tag)
- self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
-
- def test_statistics_serializer(self):
- serializer = hypervisors_v2.HypervisorStatisticsTemplate()
- exemplar = dict(hypervisor_statistics=dict(
- count=2,
- vcpus=8,
- memory_mb=20 * 1024,
- local_gb=500,
- vcpus_used=4,
- memory_mb_used=10 * 1024,
- local_gb_used=250,
- free_ram_mb=10 * 1024,
- free_disk_gb=250,
- current_workload=4,
- running_vms=4,
- disk_available_least=200))
- text = serializer.serialize(exemplar)
- tree = etree.fromstring(text)
-
- self.assertEqual('hypervisor_statistics', tree.tag)
- self.compare_to_exemplar(exemplar['hypervisor_statistics'], tree)
diff --git a/nova/tests/api/openstack/compute/contrib/test_image_size.py b/nova/tests/api/openstack/compute/contrib/test_image_size.py
deleted file mode 100644
index cfc63a9eba..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_image_size.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# Copyright 2013 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import image_size
-from nova.image import glance
-from nova import test
-from nova.tests.api.openstack import fakes
-
-NOW_API_FORMAT = "2010-10-11T10:30:22Z"
-IMAGES = [{
- 'id': '123',
- 'name': 'public image',
- 'metadata': {'key1': 'value1'},
- 'updated': NOW_API_FORMAT,
- 'created': NOW_API_FORMAT,
- 'status': 'ACTIVE',
- 'progress': 100,
- 'minDisk': 10,
- 'minRam': 128,
- 'size': 12345678,
- "links": [{
- "rel": "self",
- "href": "http://localhost/v2/fake/images/123",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/123",
- }],
- },
- {
- 'id': '124',
- 'name': 'queued snapshot',
- 'updated': NOW_API_FORMAT,
- 'created': NOW_API_FORMAT,
- 'status': 'SAVING',
- 'progress': 25,
- 'minDisk': 0,
- 'minRam': 0,
- 'size': 87654321,
- "links": [{
- "rel": "self",
- "href": "http://localhost/v2/fake/images/124",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/images/124",
- }],
- }]
-
-
-def fake_show(*args, **kwargs):
- return IMAGES[0]
-
-
-def fake_detail(*args, **kwargs):
- return IMAGES
-
-
-class ImageSizeTestV21(test.NoDBTestCase):
- content_type = 'application/json'
- prefix = 'OS-EXT-IMG-SIZE'
-
- def setUp(self):
- super(ImageSizeTestV21, self).setUp()
- self.stubs.Set(glance.GlanceImageService, 'show', fake_show)
- self.stubs.Set(glance.GlanceImageService, 'detail', fake_detail)
- self.flags(osapi_compute_extension=['nova.api.openstack.compute'
- '.contrib.image_size.Image_size'])
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(self._get_app())
- return res
-
- def _get_app(self):
- return fakes.wsgi_app_v21()
-
- def _get_image(self, body):
- return jsonutils.loads(body).get('image')
-
- def _get_images(self, body):
- return jsonutils.loads(body).get('images')
-
- def assertImageSize(self, image, size):
- self.assertEqual(image.get('%s:size' % self.prefix), size)
-
- def test_show(self):
- url = '/v2/fake/images/1'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- image = self._get_image(res.body)
- self.assertImageSize(image, 12345678)
-
- def test_detail(self):
- url = '/v2/fake/images/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- images = self._get_images(res.body)
- self.assertImageSize(images[0], 12345678)
- self.assertImageSize(images[1], 87654321)
-
-
-class ImageSizeTestV2(ImageSizeTestV21):
- def _get_app(self):
- return fakes.wsgi_app()
-
-
-class ImageSizeXmlTest(ImageSizeTestV2):
- content_type = 'application/xml'
- prefix = '{%s}' % image_size.Image_size.namespace
-
- def _get_image(self, body):
- return etree.XML(body)
-
- def _get_images(self, body):
- return etree.XML(body).getchildren()
-
- def assertImageSize(self, image, size):
- self.assertEqual(int(image.get('%ssize' % self.prefix)), size)
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
deleted file mode 100644
index 9d3c289e84..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2013 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import uuid
-
-from lxml import etree
-from webob import exc
-
-from nova.api.openstack.compute.contrib import instance_actions \
- as instance_actions_v2
-from nova.api.openstack.compute.plugins.v3 import instance_actions \
- as instance_actions_v21
-from nova.compute import api as compute_api
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests import fake_server_actions
-
-FAKE_UUID = fake_server_actions.FAKE_UUID
-FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
-
-
-def format_action(action):
- '''Remove keys that aren't serialized.'''
- to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
- 'deleted')
- for key in to_delete:
- if key in action:
- del(action[key])
- if 'start_time' in action:
- # NOTE(danms): Without WSGI above us, these will be just stringified
- action['start_time'] = str(action['start_time'].replace(tzinfo=None))
- for event in action.get('events', []):
- format_event(event)
- return action
-
-
-def format_event(event):
- '''Remove keys that aren't serialized.'''
- to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
- 'action_id')
- for key in to_delete:
- if key in event:
- del(event[key])
- if 'start_time' in event:
- # NOTE(danms): Without WSGI above us, these will be just stringified
- event['start_time'] = str(event['start_time'].replace(tzinfo=None))
- if 'finish_time' in event:
- # NOTE(danms): Without WSGI above us, these will be just stringified
- event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
- return event
-
-
-class InstanceActionsPolicyTestV21(test.NoDBTestCase):
- instance_actions = instance_actions_v21
-
- def setUp(self):
- super(InstanceActionsPolicyTestV21, self).setUp()
- self.controller = self.instance_actions.InstanceActionsController()
-
- def _get_http_req(self, action):
- fake_url = '/123/servers/12/%s' % action
- return fakes.HTTPRequest.blank(fake_url)
-
- def _set_policy_rules(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:v3:os-instance-actions':
- common_policy.parse_rule('project_id:%(project_id)s')}
- policy.set_rules(rules)
-
- def test_list_actions_restricted_by_project(self):
- self._set_policy_rules()
-
- def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None,
- use_slave=False):
- return fake_instance.fake_db_instance(
- **{'name': 'fake', 'project_id': '%s_unequal' %
- context.project_id})
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- req = self._get_http_req('os-instance-actions')
- self.assertRaises(exception.Forbidden, self.controller.index, req,
- str(uuid.uuid4()))
-
- def test_get_action_restricted_by_project(self):
- self._set_policy_rules()
-
- def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None,
- use_slave=False):
- return fake_instance.fake_db_instance(
- **{'name': 'fake', 'project_id': '%s_unequal' %
- context.project_id})
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- req = self._get_http_req('os-instance-actions/1')
- self.assertRaises(exception.Forbidden, self.controller.show, req,
- str(uuid.uuid4()), '1')
-
-
-class InstanceActionsPolicyTestV2(InstanceActionsPolicyTestV21):
- instance_actions = instance_actions_v2
-
- def _set_policy_rules(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:instance_actions':
- common_policy.parse_rule('project_id:%(project_id)s')}
- policy.set_rules(rules)
-
-
-class InstanceActionsTestV21(test.NoDBTestCase):
- instance_actions = instance_actions_v21
-
- def setUp(self):
- super(InstanceActionsTestV21, self).setUp()
- self.controller = self.instance_actions.InstanceActionsController()
- self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
- self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
-
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=False):
- return {'uuid': instance_uuid}
-
- def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
- return {'name': 'fake', 'project_id': context.project_id}
-
- self.stubs.Set(compute_api.API, 'get', fake_get)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
-
- def _get_http_req(self, action, use_admin_context=False):
- fake_url = '/123/servers/12/%s' % action
- return fakes.HTTPRequest.blank(fake_url,
- use_admin_context=use_admin_context)
-
- def _set_policy_rules(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:v3:os-instance-actions':
- common_policy.parse_rule(''),
- 'compute_extension:v3:os-instance-actions:events':
- common_policy.parse_rule('is_admin:True')}
- policy.set_rules(rules)
-
- def test_list_actions(self):
- def fake_get_actions(context, uuid):
- actions = []
- for act in self.fake_actions[uuid].itervalues():
- action = models.InstanceAction()
- action.update(act)
- actions.append(action)
- return actions
-
- self.stubs.Set(db, 'actions_get', fake_get_actions)
- req = self._get_http_req('os-instance-actions')
- res_dict = self.controller.index(req, FAKE_UUID)
- for res in res_dict['instanceActions']:
- fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
- self.assertEqual(format_action(fake_action), format_action(res))
-
- def test_get_action_with_events_allowed(self):
- def fake_get_action(context, uuid, request_id):
- action = models.InstanceAction()
- action.update(self.fake_actions[uuid][request_id])
- return action
-
- def fake_get_events(context, action_id):
- events = []
- for evt in self.fake_events[action_id]:
- event = models.InstanceActionEvent()
- event.update(evt)
- events.append(event)
- return events
-
- self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
- self.stubs.Set(db, 'action_events_get', fake_get_events)
- req = self._get_http_req('os-instance-actions/1',
- use_admin_context=True)
- res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
- fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
- fake_events = self.fake_events[fake_action['id']]
- fake_action['events'] = fake_events
- self.assertEqual(format_action(fake_action),
- format_action(res_dict['instanceAction']))
-
- def test_get_action_with_events_not_allowed(self):
- def fake_get_action(context, uuid, request_id):
- return self.fake_actions[uuid][request_id]
-
- def fake_get_events(context, action_id):
- return self.fake_events[action_id]
-
- self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
- self.stubs.Set(db, 'action_events_get', fake_get_events)
-
- self._set_policy_rules()
- req = self._get_http_req('os-instance-actions/1')
- res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
- fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
- self.assertEqual(format_action(fake_action),
- format_action(res_dict['instanceAction']))
-
- def test_action_not_found(self):
- def fake_no_action(context, uuid, action_id):
- return None
-
- self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
- req = self._get_http_req('os-instance-actions/1')
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
- FAKE_UUID, FAKE_REQUEST_ID)
-
- def test_index_instance_not_found(self):
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=False):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
- self.stubs.Set(compute_api.API, 'get', fake_get)
- req = self._get_http_req('os-instance-actions')
- self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
- FAKE_UUID)
-
- def test_show_instance_not_found(self):
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=False):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
- self.stubs.Set(compute_api.API, 'get', fake_get)
- req = self._get_http_req('os-instance-actions/fake')
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
- FAKE_UUID, 'fake')
-
-
-class InstanceActionsTestV2(InstanceActionsTestV21):
- instance_actions = instance_actions_v2
-
- def _set_policy_rules(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:instance_actions':
- common_policy.parse_rule(''),
- 'compute_extension:instance_actions:events':
- common_policy.parse_rule('is_admin:True')}
- policy.set_rules(rules)
-
-
-class InstanceActionsSerializerTestV2(test.NoDBTestCase):
- def setUp(self):
- super(InstanceActionsSerializerTestV2, self).setUp()
- self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
- self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
-
- def _verify_instance_action_attachment(self, attach, tree):
- for key in attach.keys():
- if key != 'events':
- self.assertEqual(attach[key], tree.get(key),
- '%s did not match' % key)
-
- def _verify_instance_action_event_attachment(self, attach, tree):
- for key in attach.keys():
- self.assertEqual(attach[key], tree.get(key),
- '%s did not match' % key)
-
- def test_instance_action_serializer(self):
- serializer = instance_actions_v2.InstanceActionTemplate()
- action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
- text = serializer.serialize({'instanceAction': action})
- tree = etree.fromstring(text)
-
- action = format_action(action)
- self.assertEqual('instanceAction', tree.tag)
- self._verify_instance_action_attachment(action, tree)
- found_events = False
- for child in tree:
- if child.tag == 'events':
- found_events = True
- self.assertFalse(found_events)
-
- def test_instance_action_events_serializer(self):
- serializer = instance_actions_v2.InstanceActionTemplate()
- action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
- event = self.fake_events[action['id']][0]
- action['events'] = [dict(event), dict(event)]
- text = serializer.serialize({'instanceAction': action})
- tree = etree.fromstring(text)
-
- action = format_action(action)
- self.assertEqual('instanceAction', tree.tag)
- self._verify_instance_action_attachment(action, tree)
-
- event = format_event(event)
- found_events = False
- for child in tree:
- if child.tag == 'events':
- found_events = True
- for key in event:
- self.assertEqual(event[key], child.get(key))
- self.assertTrue(found_events)
-
- def test_instance_actions_serializer(self):
- serializer = instance_actions_v2.InstanceActionsTemplate()
- action_list = self.fake_actions[FAKE_UUID].values()
- text = serializer.serialize({'instanceActions': action_list})
- tree = etree.fromstring(text)
-
- action_list = [format_action(action) for action in action_list]
- self.assertEqual('instanceActions', tree.tag)
- self.assertEqual(len(action_list), len(tree))
- for idx, child in enumerate(tree):
- self.assertEqual('instanceAction', child.tag)
- request_id = child.get('request_id')
- self._verify_instance_action_attachment(
- self.fake_actions[FAKE_UUID][request_id],
- child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
deleted file mode 100644
index 58e3c405b8..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.utils import timeutils
-
-from nova.api.openstack.compute.contrib import instance_usage_audit_log as ial
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_service
-from nova import utils
-
-
-service_base = test_service.fake_service
-TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
- dict(service_base, host='bar', topic='compute'),
- dict(service_base, host='baz', topic='compute'),
- dict(service_base, host='plonk', topic='compute'),
- dict(service_base, host='wibble', topic='bogus'),
- ]
-
-
-begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
-begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
-begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
-end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
-
-
-# test data
-
-
-TEST_LOGS1 = [
- # all services done, no errors.
- dict(host="plonk", period_beginning=begin1, period_ending=end1,
- state="DONE", errors=0, task_items=23, message="test1"),
- dict(host="baz", period_beginning=begin1, period_ending=end1,
- state="DONE", errors=0, task_items=17, message="test2"),
- dict(host="bar", period_beginning=begin1, period_ending=end1,
- state="DONE", errors=0, task_items=10, message="test3"),
- dict(host="foo", period_beginning=begin1, period_ending=end1,
- state="DONE", errors=0, task_items=7, message="test4"),
- ]
-
-
-TEST_LOGS2 = [
- # some still running...
- dict(host="plonk", period_beginning=begin2, period_ending=end2,
- state="DONE", errors=0, task_items=23, message="test5"),
- dict(host="baz", period_beginning=begin2, period_ending=end2,
- state="DONE", errors=0, task_items=17, message="test6"),
- dict(host="bar", period_beginning=begin2, period_ending=end2,
- state="RUNNING", errors=0, task_items=10, message="test7"),
- dict(host="foo", period_beginning=begin2, period_ending=end2,
- state="DONE", errors=0, task_items=7, message="test8"),
- ]
-
-
-TEST_LOGS3 = [
- # some errors..
- dict(host="plonk", period_beginning=begin3, period_ending=end3,
- state="DONE", errors=0, task_items=23, message="test9"),
- dict(host="baz", period_beginning=begin3, period_ending=end3,
- state="DONE", errors=2, task_items=17, message="test10"),
- dict(host="bar", period_beginning=begin3, period_ending=end3,
- state="DONE", errors=0, task_items=10, message="test11"),
- dict(host="foo", period_beginning=begin3, period_ending=end3,
- state="DONE", errors=1, task_items=7, message="test12"),
- ]
-
-
-def fake_task_log_get_all(context, task_name, begin, end,
- host=None, state=None):
- assert task_name == "instance_usage_audit"
-
- if begin == begin1 and end == end1:
- return TEST_LOGS1
- if begin == begin2 and end == end2:
- return TEST_LOGS2
- if begin == begin3 and end == end3:
- return TEST_LOGS3
- raise AssertionError("Invalid date %s to %s" % (begin, end))
-
-
-def fake_last_completed_audit_period(unit=None, before=None):
- audit_periods = [(begin3, end3),
- (begin2, end2),
- (begin1, end1)]
- if before is not None:
- for begin, end in audit_periods:
- if before > end:
- return begin, end
- raise AssertionError("Invalid before date %s" % (before))
- return begin1, end1
-
-
-class InstanceUsageAuditLogTest(test.NoDBTestCase):
- def setUp(self):
- super(InstanceUsageAuditLogTest, self).setUp()
- self.context = context.get_admin_context()
- timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
- self.controller = ial.InstanceUsageAuditLogController()
- self.host_api = self.controller.host_api
-
- def fake_service_get_all(context, disabled):
- self.assertIsNone(disabled)
- return TEST_COMPUTE_SERVICES
-
- self.stubs.Set(utils, 'last_completed_audit_period',
- fake_last_completed_audit_period)
- self.stubs.Set(db, 'service_get_all',
- fake_service_get_all)
- self.stubs.Set(db, 'task_log_get_all',
- fake_task_log_get_all)
-
- def tearDown(self):
- super(InstanceUsageAuditLogTest, self).tearDown()
- timeutils.clear_time_override()
-
- def test_index(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
- use_admin_context=True)
- result = self.controller.index(req)
- self.assertIn('instance_usage_audit_logs', result)
- logs = result['instance_usage_audit_logs']
- self.assertEqual(57, logs['total_instances'])
- self.assertEqual(0, logs['total_errors'])
- self.assertEqual(4, len(logs['log']))
- self.assertEqual(4, logs['num_hosts'])
- self.assertEqual(4, logs['num_hosts_done'])
- self.assertEqual(0, logs['num_hosts_running'])
- self.assertEqual(0, logs['num_hosts_not_run'])
- self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
-
- def test_index_non_admin(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
- use_admin_context=False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, req)
-
- def test_show(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-instance_usage_audit_log/show',
- use_admin_context=True)
- result = self.controller.show(req, '2012-07-05 10:00:00')
- self.assertIn('instance_usage_audit_log', result)
- logs = result['instance_usage_audit_log']
- self.assertEqual(57, logs['total_instances'])
- self.assertEqual(0, logs['total_errors'])
- self.assertEqual(4, len(logs['log']))
- self.assertEqual(4, logs['num_hosts'])
- self.assertEqual(4, logs['num_hosts_done'])
- self.assertEqual(0, logs['num_hosts_running'])
- self.assertEqual(0, logs['num_hosts_not_run'])
- self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
-
- def test_show_non_admin(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
- use_admin_context=False)
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.show, req, '2012-07-05 10:00:00')
-
- def test_show_with_running(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-instance_usage_audit_log/show',
- use_admin_context=True)
- result = self.controller.show(req, '2012-07-06 10:00:00')
- self.assertIn('instance_usage_audit_log', result)
- logs = result['instance_usage_audit_log']
- self.assertEqual(57, logs['total_instances'])
- self.assertEqual(0, logs['total_errors'])
- self.assertEqual(4, len(logs['log']))
- self.assertEqual(4, logs['num_hosts'])
- self.assertEqual(3, logs['num_hosts_done'])
- self.assertEqual(1, logs['num_hosts_running'])
- self.assertEqual(0, logs['num_hosts_not_run'])
- self.assertEqual("3 of 4 hosts done. 0 errors.",
- logs['overall_status'])
-
- def test_show_with_errors(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-instance_usage_audit_log/show',
- use_admin_context=True)
- result = self.controller.show(req, '2012-07-07 10:00:00')
- self.assertIn('instance_usage_audit_log', result)
- logs = result['instance_usage_audit_log']
- self.assertEqual(57, logs['total_instances'])
- self.assertEqual(3, logs['total_errors'])
- self.assertEqual(4, len(logs['log']))
- self.assertEqual(4, logs['num_hosts'])
- self.assertEqual(4, logs['num_hosts_done'])
- self.assertEqual(0, logs['num_hosts_running'])
- self.assertEqual(0, logs['num_hosts_not_run'])
- self.assertEqual("ALL hosts done. 3 errors.",
- logs['overall_status'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py
deleted file mode 100644
index 21ac2969aa..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py
+++ /dev/null
@@ -1,497 +0,0 @@
-# Copyright 2011 Eldar Nugaev
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import keypairs as keypairs_v2
-from nova.api.openstack.compute.plugins.v3 import keypairs as keypairs_v21
-from nova.api.openstack import wsgi
-from nova import db
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import quota
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_keypair
-
-
-QUOTAS = quota.QUOTAS
-
-
-keypair_data = {
- 'public_key': 'FAKE_KEY',
- 'fingerprint': 'FAKE_FINGERPRINT',
-}
-
-
-def fake_keypair(name):
- return dict(test_keypair.fake_keypair,
- name=name, **keypair_data)
-
-
-def db_key_pair_get_all_by_user(self, user_id):
- return [fake_keypair('FAKE')]
-
-
-def db_key_pair_create(self, keypair):
- return fake_keypair(name=keypair['name'])
-
-
-def db_key_pair_destroy(context, user_id, name):
- if not (user_id and name):
- raise Exception()
-
-
-def db_key_pair_create_duplicate(context, keypair):
- raise exception.KeyPairExists(key_name=keypair.get('name', ''))
-
-
-class KeypairsTestV21(test.TestCase):
- base_url = '/v2/fake'
-
- def _setup_app(self):
- self.app = fakes.wsgi_app_v21(init_only=('os-keypairs', 'servers'))
- self.app_server = self.app
-
- def setUp(self):
- super(KeypairsTestV21, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- self.stubs.Set(db, "key_pair_get_all_by_user",
- db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_create",
- db_key_pair_create)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Keypairs'])
- self._setup_app()
-
- def test_keypair_list(self):
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
- response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
- self.assertEqual(res_dict, response)
-
- def test_keypair_create(self):
- body = {'keypair': {'name': 'create_test'}}
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
- self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
- self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
-
- def _test_keypair_create_bad_request_case(self, body):
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- def test_keypair_create_with_empty_name(self):
- body = {'keypair': {'name': ''}}
- self._test_keypair_create_bad_request_case(body)
-
- def test_keypair_create_with_name_too_long(self):
- body = {
- 'keypair': {
- 'name': 'a' * 256
- }
- }
- self._test_keypair_create_bad_request_case(body)
-
- def test_keypair_create_with_non_alphanumeric_name(self):
- body = {
- 'keypair': {
- 'name': 'test/keypair'
- }
- }
- self._test_keypair_create_bad_request_case(body)
-
- def test_keypair_import_bad_key(self):
- body = {
- 'keypair': {
- 'name': 'create_test',
- 'public_key': 'ssh-what negative',
- },
- }
- self._test_keypair_create_bad_request_case(body)
-
- def test_keypair_create_with_invalid_keypair_body(self):
- body = {'alpha': {'name': 'create_test'}}
- self._test_keypair_create_bad_request_case(body)
-
- def test_keypair_import(self):
- body = {
- 'keypair': {
- 'name': 'create_test',
- 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
- 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
- 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
- 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
- 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
- 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
- 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
- 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
- 'bHkXa6OciiJDvkRzJXzf',
- },
- }
-
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 200)
- # FIXME(ja): sholud we check that public_key was sent to create?
- res_dict = jsonutils.loads(res.body)
- self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
- self.assertNotIn('private_key', res_dict['keypair'])
-
- def test_keypair_import_quota_limit(self):
-
- def fake_quotas_count(self, context, resource, *args, **kwargs):
- return 100
-
- self.stubs.Set(QUOTAS, "count", fake_quotas_count)
-
- body = {
- 'keypair': {
- 'name': 'create_test',
- 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
- 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
- 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
- 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
- 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
- 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
- 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
- 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
- 'bHkXa6OciiJDvkRzJXzf',
- },
- }
-
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 403)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- "Quota exceeded, too many key pairs.",
- res_dict['forbidden']['message'])
-
- def test_keypair_create_quota_limit(self):
-
- def fake_quotas_count(self, context, resource, *args, **kwargs):
- return 100
-
- self.stubs.Set(QUOTAS, "count", fake_quotas_count)
-
- body = {
- 'keypair': {
- 'name': 'create_test',
- },
- }
-
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 403)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- "Quota exceeded, too many key pairs.",
- res_dict['forbidden']['message'])
-
- def test_keypair_create_duplicate(self):
- self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
- body = {'keypair': {'name': 'create_duplicate'}}
- req = webob.Request.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 409)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- "Key pair 'create_duplicate' already exists.",
- res_dict['conflictingRequest']['message'])
-
- def test_keypair_delete(self):
- req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
- req.method = 'DELETE'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 202)
-
- def test_keypair_get_keypair_not_found(self):
- req = webob.Request.blank(self.base_url + '/os-keypairs/DOESNOTEXIST')
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_keypair_delete_not_found(self):
-
- def db_key_pair_get_not_found(context, user_id, name):
- raise exception.KeypairNotFound(user_id=user_id, name=name)
-
- self.stubs.Set(db, "key_pair_get",
- db_key_pair_get_not_found)
- req = webob.Request.blank(self.base_url + '/os-keypairs/WHAT')
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_keypair_show(self):
-
- def _db_key_pair_get(context, user_id, name):
- return dict(test_keypair.fake_keypair,
- name='foo', public_key='XXX', fingerprint='YYY')
-
- self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
-
- req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
- req.method = 'GET'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 200)
- self.assertEqual('foo', res_dict['keypair']['name'])
- self.assertEqual('XXX', res_dict['keypair']['public_key'])
- self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
-
- def test_keypair_show_not_found(self):
-
- def _db_key_pair_get(context, user_id, name):
- raise exception.KeypairNotFound(user_id=user_id, name=name)
-
- self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
-
- req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
- req.method = 'GET'
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 404)
-
- def test_show_server(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = webob.Request.blank(self.base_url + '/servers/1')
- req.headers['Content-Type'] = 'application/json'
- response = req.get_response(self.app_server)
- self.assertEqual(response.status_int, 200)
- res_dict = jsonutils.loads(response.body)
- self.assertIn('key_name', res_dict['server'])
- self.assertEqual(res_dict['server']['key_name'], '')
-
- def test_detail_servers(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fakes.fake_instance_get_all_by_filters())
- req = fakes.HTTPRequest.blank(self.base_url + '/servers/detail')
- res = req.get_response(self.app_server)
- server_dicts = jsonutils.loads(res.body)['servers']
- self.assertEqual(len(server_dicts), 5)
-
- for server_dict in server_dicts:
- self.assertIn('key_name', server_dict)
- self.assertEqual(server_dict['key_name'], '')
-
-
-class KeypairPolicyTestV21(test.TestCase):
- KeyPairController = keypairs_v21.KeypairController()
- policy_path = 'compute_extension:v3:os-keypairs'
- base_url = '/v2/fake'
-
- def setUp(self):
- super(KeypairPolicyTestV21, self).setUp()
-
- def _db_key_pair_get(context, user_id, name):
- return dict(test_keypair.fake_keypair,
- name='foo', public_key='XXX', fingerprint='YYY')
-
- self.stubs.Set(db, "key_pair_get",
- _db_key_pair_get)
- self.stubs.Set(db, "key_pair_get_all_by_user",
- db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_create",
- db_key_pair_create)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
-
- def test_keypair_list_fail_policy(self):
- rules = {self.policy_path + ':index':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.index,
- req)
-
- def test_keypair_list_pass_policy(self):
- rules = {self.policy_path + ':index':
- common_policy.parse_rule('')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
- res = self.KeyPairController.index(req)
- self.assertIn('keypairs', res)
-
- def test_keypair_show_fail_policy(self):
- rules = {self.policy_path + ':show':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.show,
- req, 'FAKE')
-
- def test_keypair_show_pass_policy(self):
- rules = {self.policy_path + ':show':
- common_policy.parse_rule('')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
- res = self.KeyPairController.show(req, 'FAKE')
- self.assertIn('keypair', res)
-
- def test_keypair_create_fail_policy(self):
- body = {'keypair': {'name': 'create_test'}}
- rules = {self.policy_path + ':create':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.create,
- req, body=body)
-
- def test_keypair_create_pass_policy(self):
- body = {'keypair': {'name': 'create_test'}}
- rules = {self.policy_path + ':create':
- common_policy.parse_rule('')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
- req.method = 'POST'
- res = self.KeyPairController.create(req, body=body)
- self.assertIn('keypair', res)
-
- def test_keypair_delete_fail_policy(self):
- rules = {self.policy_path + ':delete':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
- req.method = 'DELETE'
- self.assertRaises(exception.Forbidden,
- self.KeyPairController.delete,
- req, 'FAKE')
-
- def test_keypair_delete_pass_policy(self):
- rules = {self.policy_path + ':delete':
- common_policy.parse_rule('')}
- policy.set_rules(rules)
- req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
- req.method = 'DELETE'
- res = self.KeyPairController.delete(req, 'FAKE')
-
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.KeyPairController, keypairs_v21.KeypairController):
- status_int = self.KeyPairController.delete.wsgi_code
- else:
- status_int = res.status_int
- self.assertEqual(202, status_int)
-
-
-class KeypairsXMLSerializerTest(test.TestCase):
- def setUp(self):
- super(KeypairsXMLSerializerTest, self).setUp()
- self.deserializer = wsgi.XMLDeserializer()
-
- def test_default_serializer(self):
- exemplar = dict(keypair=dict(
- public_key='fake_public_key',
- private_key='fake_private_key',
- fingerprint='fake_fingerprint',
- user_id='fake_user_id',
- name='fake_key_name'))
- serializer = keypairs_v2.KeypairTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('keypair', tree.tag)
- for child in tree:
- self.assertIn(child.tag, exemplar['keypair'])
- self.assertEqual(child.text, exemplar['keypair'][child.tag])
-
- def test_index_serializer(self):
- exemplar = dict(keypairs=[
- dict(keypair=dict(
- name='key1_name',
- public_key='key1_key',
- fingerprint='key1_fingerprint')),
- dict(keypair=dict(
- name='key2_name',
- public_key='key2_key',
- fingerprint='key2_fingerprint'))])
- serializer = keypairs_v2.KeypairsTemplate()
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('keypairs', tree.tag)
- self.assertEqual(len(exemplar['keypairs']), len(tree))
- for idx, keypair in enumerate(tree):
- self.assertEqual('keypair', keypair.tag)
- kp_data = exemplar['keypairs'][idx]['keypair']
- for child in keypair:
- self.assertIn(child.tag, kp_data)
- self.assertEqual(child.text, kp_data[child.tag])
-
- def test_deserializer(self):
- exemplar = dict(keypair=dict(
- name='key_name',
- public_key='public_key'))
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<keypair><name>key_name</name>'
- '<public_key>public_key</public_key></keypair>')
-
- result = self.deserializer.deserialize(intext)['body']
- self.assertEqual(result, exemplar)
-
-
-class KeypairsTestV2(KeypairsTestV21):
-
- def _setup_app(self):
- self.app = fakes.wsgi_app(init_only=('os-keypairs',))
- self.app_server = fakes.wsgi_app(init_only=('servers',))
-
-
-class KeypairPolicyTestV2(KeypairPolicyTestV21):
- KeyPairController = keypairs_v2.KeypairController()
- policy_path = 'compute_extension:keypairs'
diff --git a/nova/tests/api/openstack/compute/contrib/test_migrate_server.py b/nova/tests/api/openstack/compute/contrib/test_migrate_server.py
deleted file mode 100644
index 3a79e65810..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_migrate_server.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute.plugins.v3 import migrate_server
-from nova import exception
-from nova.openstack.common import uuidutils
-from nova.tests.api.openstack.compute.plugins.v3 import \
- admin_only_action_common
-from nova.tests.api.openstack import fakes
-
-
-class MigrateServerTests(admin_only_action_common.CommonTests):
- def setUp(self):
- super(MigrateServerTests, self).setUp()
- self.controller = migrate_server.MigrateServerController()
- self.compute_api = self.controller.compute_api
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(migrate_server, 'MigrateServerController',
- _fake_controller)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-migrate-server'),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_migrate(self):
- method_translations = {'migrate': 'resize',
- 'os-migrateLive': 'live_migrate'}
- body_map = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
- self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
- method_translations=method_translations,
- args_map=args_map)
-
- def test_migrate_none_hostname(self):
- method_translations = {'migrate': 'resize',
- 'os-migrateLive': 'live_migrate'}
- body_map = {'os-migrateLive': {'host': None,
- 'block_migration': False,
- 'disk_over_commit': False}}
- args_map = {'os-migrateLive': ((False, False, None), {})}
- self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
- method_translations=method_translations,
- args_map=args_map)
-
- def test_migrate_with_non_existed_instance(self):
- body_map = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- self._test_actions_with_non_existed_instance(
- ['migrate', 'os-migrateLive'], body_map=body_map)
-
- def test_migrate_raise_conflict_on_invalid_state(self):
- method_translations = {'migrate': 'resize',
- 'os-migrateLive': 'live_migrate'}
- body_map = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
- self._test_actions_raise_conflict_on_invalid_state(
- ['migrate', 'os-migrateLive'], body_map=body_map,
- args_map=args_map, method_translations=method_translations)
-
- def test_actions_with_locked_instance(self):
- method_translations = {'migrate': 'resize',
- 'os-migrateLive': 'live_migrate'}
- body_map = {'os-migrateLive': {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}}
- args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
- self._test_actions_with_locked_instance(
- ['migrate', 'os-migrateLive'], body_map=body_map,
- args_map=args_map, method_translations=method_translations)
-
- def _test_migrate_exception(self, exc_info, expected_result):
- self.mox.StubOutWithMock(self.compute_api, 'resize')
- instance = self._stub_instance_get()
- self.compute_api.resize(self.context, instance).AndRaise(exc_info)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {'migrate': None})
- self.assertEqual(expected_result, res.status_int)
-
- def test_migrate_too_many_instances(self):
- exc_info = exception.TooManyInstances(overs='', req='', used=0,
- allowed=0, resource='')
- self._test_migrate_exception(exc_info, 403)
-
- def _test_migrate_live_succeeded(self, param):
- self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
- instance = self._stub_instance_get()
- self.compute_api.live_migrate(self.context, instance, False,
- False, 'hostname')
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {'os-migrateLive': param})
- self.assertEqual(202, res.status_int)
-
- def test_migrate_live_enabled(self):
- param = {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}
- self._test_migrate_live_succeeded(param)
-
- def test_migrate_live_enabled_with_string_param(self):
- param = {'host': 'hostname',
- 'block_migration': "False",
- 'disk_over_commit': "False"}
- self._test_migrate_live_succeeded(param)
-
- def test_migrate_live_without_host(self):
- res = self._make_request('/servers/FAKE/action',
- {'os-migrateLive':
- {'block_migration': False,
- 'disk_over_commit': False}})
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_without_block_migration(self):
- res = self._make_request('/servers/FAKE/action',
- {'os-migrateLive':
- {'host': 'hostname',
- 'disk_over_commit': False}})
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_without_disk_over_commit(self):
- res = self._make_request('/servers/FAKE/action',
- {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False}})
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_with_invalid_block_migration(self):
- res = self._make_request('/servers/FAKE/action',
- {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': "foo",
- 'disk_over_commit': False}})
- self.assertEqual(400, res.status_int)
-
- def test_migrate_live_with_invalid_disk_over_commit(self):
- res = self._make_request('/servers/FAKE/action',
- {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': "foo"}})
- self.assertEqual(400, res.status_int)
-
- def _test_migrate_live_failed_with_exception(self, fake_exc,
- uuid=None):
- self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
-
- instance = self._stub_instance_get(uuid=uuid)
- self.compute_api.live_migrate(self.context, instance, False,
- False, 'hostname').AndRaise(fake_exc)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {'os-migrateLive':
- {'host': 'hostname',
- 'block_migration': False,
- 'disk_over_commit': False}})
- self.assertEqual(400, res.status_int)
- self.assertIn(unicode(fake_exc), res.body)
-
- def test_migrate_live_compute_service_unavailable(self):
- self._test_migrate_live_failed_with_exception(
- exception.ComputeServiceUnavailable(host='host'))
-
- def test_migrate_live_invalid_hypervisor_type(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidHypervisorType())
-
- def test_migrate_live_invalid_cpu_info(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidCPUInfo(reason=""))
-
- def test_migrate_live_unable_to_migrate_to_self(self):
- uuid = uuidutils.generate_uuid()
- self._test_migrate_live_failed_with_exception(
- exception.UnableToMigrateToSelf(instance_id=uuid,
- host='host'),
- uuid=uuid)
-
- def test_migrate_live_destination_hypervisor_too_old(self):
- self._test_migrate_live_failed_with_exception(
- exception.DestinationHypervisorTooOld())
-
- def test_migrate_live_no_valid_host(self):
- self._test_migrate_live_failed_with_exception(
- exception.NoValidHost(reason=''))
-
- def test_migrate_live_invalid_local_storage(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidLocalStorage(path='', reason=''))
-
- def test_migrate_live_invalid_shared_storage(self):
- self._test_migrate_live_failed_with_exception(
- exception.InvalidSharedStorage(path='', reason=''))
-
- def test_migrate_live_hypervisor_unavailable(self):
- self._test_migrate_live_failed_with_exception(
- exception.HypervisorUnavailable(host=""))
-
- def test_migrate_live_instance_not_running(self):
- self._test_migrate_live_failed_with_exception(
- exception.InstanceNotRunning(instance_id=""))
-
- def test_migrate_live_pre_check_error(self):
- self._test_migrate_live_failed_with_exception(
- exception.MigrationPreCheckError(reason=''))
diff --git a/nova/tests/api/openstack/compute/contrib/test_multinic.py b/nova/tests/api/openstack/compute/contrib/test_multinic.py
deleted file mode 100644
index 481fe7e441..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_multinic.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova import compute
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
-last_add_fixed_ip = (None, None)
-last_remove_fixed_ip = (None, None)
-
-
-def compute_api_add_fixed_ip(self, context, instance, network_id):
- global last_add_fixed_ip
-
- last_add_fixed_ip = (instance['uuid'], network_id)
-
-
-def compute_api_remove_fixed_ip(self, context, instance, address):
- global last_remove_fixed_ip
-
- last_remove_fixed_ip = (instance['uuid'], address)
-
-
-def compute_api_get(self, context, instance_id, want_objects=False,
- expected_attrs=None):
- instance = objects.Instance()
- instance.uuid = instance_id
- instance.id = 1
- instance.vm_state = 'fake'
- instance.task_state = 'fake'
- instance.obj_reset_changes()
- return instance
-
-
-class FixedIpTestV21(test.NoDBTestCase):
- def setUp(self):
- super(FixedIpTestV21, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(compute.api.API, "add_fixed_ip",
- compute_api_add_fixed_ip)
- self.stubs.Set(compute.api.API, "remove_fixed_ip",
- compute_api_remove_fixed_ip)
- self.stubs.Set(compute.api.API, 'get', compute_api_get)
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers', 'os-multinic'))
-
- def _get_url(self):
- return '/v2/fake'
-
- def test_add_fixed_ip(self):
- global last_add_fixed_ip
- last_add_fixed_ip = (None, None)
-
- body = dict(addFixedIp=dict(networkId='test_net'))
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 202)
- self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
-
- def _test_add_fixed_ip_bad_request(self, body):
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- resp = req.get_response(self.app)
- self.assertEqual(400, resp.status_int)
-
- def test_add_fixed_ip_empty_network_id(self):
- body = {'addFixedIp': {'network_id': ''}}
- self._test_add_fixed_ip_bad_request(body)
-
- def test_add_fixed_ip_network_id_bigger_than_36(self):
- body = {'addFixedIp': {'network_id': 'a' * 37}}
- self._test_add_fixed_ip_bad_request(body)
-
- def test_add_fixed_ip_no_network(self):
- global last_add_fixed_ip
- last_add_fixed_ip = (None, None)
-
- body = dict(addFixedIp=dict())
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual(last_add_fixed_ip, (None, None))
-
- @mock.patch.object(compute.api.API, 'add_fixed_ip')
- def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip):
- mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps(net='netid')
-
- body = dict(addFixedIp=dict(networkId='test_net'))
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
-
- def test_remove_fixed_ip(self):
- global last_remove_fixed_ip
- last_remove_fixed_ip = (None, None)
-
- body = dict(removeFixedIp=dict(address='10.10.10.1'))
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 202)
- self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
-
- def test_remove_fixed_ip_no_address(self):
- global last_remove_fixed_ip
- last_remove_fixed_ip = (None, None)
-
- body = dict(removeFixedIp=dict())
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
- self.assertEqual(last_remove_fixed_ip, (None, None))
-
- def test_remove_fixed_ip_invalid_address(self):
- body = {'remove_fixed_ip': {'address': ''}}
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- resp = req.get_response(self.app)
- self.assertEqual(400, resp.status_int)
-
- @mock.patch.object(compute.api.API, 'remove_fixed_ip',
- side_effect=exception.FixedIpNotFoundForSpecificInstance(
- instance_uuid=UUID, ip='10.10.10.1'))
- def test_remove_fixed_ip_not_found(self, _remove_fixed_ip):
-
- body = {'remove_fixed_ip': {'address': '10.10.10.1'}}
- req = webob.Request.blank(
- self._get_url() + '/servers/%s/action' % UUID)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(400, resp.status_int)
-
-
-class FixedIpTestV2(FixedIpTestV21):
- def setUp(self):
- super(FixedIpTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Multinic'])
-
- def _get_app(self):
- return fakes.wsgi_app(init_only=('servers',))
-
- def test_remove_fixed_ip_invalid_address(self):
- # NOTE(cyeoh): This test is disabled for the V2 API because it is
- # has poorer input validation.
- pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
deleted file mode 100644
index b388ae1b5d..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ /dev/null
@@ -1,610 +0,0 @@
-# Copyright 2011 Grid Dynamics
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import datetime
-import math
-import uuid
-
-import iso8601
-import mock
-import netaddr
-from oslo.config import cfg
-import webob
-
-from nova.api.openstack.compute.contrib import networks_associate
-from nova.api.openstack.compute.contrib import os_networks as networks
-from nova.api.openstack.compute.plugins.v3 import networks as networks_v21
-from nova.api.openstack.compute.plugins.v3 import networks_associate as \
- networks_associate_v21
-from nova.api.openstack import extensions
-import nova.context
-from nova import exception
-from nova.network import manager
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-import nova.utils
-
-CONF = cfg.CONF
-
-UTC = iso8601.iso8601.Utc()
-FAKE_NETWORKS = [
- {
- 'bridge': 'br100', 'vpn_public_port': 1000,
- 'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0',
- 'updated_at': datetime.datetime(2011, 8, 16, 9, 26, 13, 48257,
- tzinfo=UTC),
- 'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
- 'cidr_v6': None, 'deleted_at': None,
- 'gateway': '10.0.0.1', 'label': 'mynet_0',
- 'project_id': '1234', 'rxtx_base': None,
- 'vpn_private_address': '10.0.0.2', 'deleted': False,
- 'vlan': 100, 'broadcast': '10.0.0.7',
- 'netmask': '255.255.255.248', 'injected': False,
- 'cidr': '10.0.0.0/29',
- 'vpn_public_address': '127.0.0.1', 'multi_host': False,
- 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
- 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
- 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525,
- tzinfo=UTC),
- 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True,
- 'share_address': False,
- },
- {
- 'bridge': 'br101', 'vpn_public_port': 1001,
- 'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0',
- 'updated_at': None, 'id': 2, 'cidr_v6': None,
- 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
- 'deleted_at': None, 'gateway': '10.0.0.9',
- 'label': 'mynet_1', 'project_id': None,
- 'vpn_private_address': '10.0.0.10', 'deleted': False,
- 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None,
- 'netmask': '255.255.255.248', 'injected': False,
- 'cidr': '10.0.0.10/29', 'vpn_public_address': None,
- 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
- 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
- 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495,
- tzinfo=UTC),
- 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True,
- 'share_address': False,
- },
-]
-
-
-FAKE_USER_NETWORKS = [
- {
- 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248',
- 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None,
- 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0',
- 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
- },
- {
- 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248',
- 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None,
- 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1',
- 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
- },
-]
-
-NEW_NETWORK = {
- "network": {
- "bridge_interface": "eth0",
- "cidr": "10.20.105.0/24",
- "label": "new net 111",
- "vlan_start": 111,
- "injected": False,
- "multi_host": False,
- 'mtu': None,
- 'dhcp_server': '10.0.0.1',
- 'enable_dhcp': True,
- 'share_address': False,
- }
-}
-
-
-class FakeNetworkAPI(object):
-
- _sentinel = object()
- _vlan_is_disabled = False
-
- def __init__(self):
- self.networks = copy.deepcopy(FAKE_NETWORKS)
-
- def disable_vlan(self):
- self._vlan_is_disabled = True
-
- def delete(self, context, network_id):
- if network_id == 'always_delete':
- return True
- if network_id == -1:
- raise exception.NetworkInUse(network_id=network_id)
- for i, network in enumerate(self.networks):
- if network['id'] == network_id:
- del self.networks[0]
- return True
- raise exception.NetworkNotFoundForUUID(uuid=network_id)
-
- def disassociate(self, context, network_uuid):
- for network in self.networks:
- if network.get('uuid') == network_uuid:
- network['project_id'] = None
- return True
- raise exception.NetworkNotFound(network_id=network_uuid)
-
- def associate(self, context, network_uuid, host=_sentinel,
- project=_sentinel):
- for network in self.networks:
- if network.get('uuid') == network_uuid:
- if host is not FakeNetworkAPI._sentinel:
- network['host'] = host
- if project is not FakeNetworkAPI._sentinel:
- network['project_id'] = project
- return True
- raise exception.NetworkNotFound(network_id=network_uuid)
-
- def add_network_to_project(self, context,
- project_id, network_uuid=None):
- if self._vlan_is_disabled:
- raise NotImplementedError()
- if network_uuid:
- for network in self.networks:
- if network.get('project_id', None) is None:
- network['project_id'] = project_id
- return
- return
- for network in self.networks:
- if network.get('uuid') == network_uuid:
- network['project_id'] = project_id
- return
-
- def get_all(self, context):
- return self._fake_db_network_get_all(context, project_only=True)
-
- def _fake_db_network_get_all(self, context, project_only="allow_none"):
- project_id = context.project_id
- nets = self.networks
- if nova.context.is_user_context(context) and project_only:
- if project_only == 'allow_none':
- nets = [n for n in self.networks
- if (n['project_id'] == project_id or
- n['project_id'] is None)]
- else:
- nets = [n for n in self.networks
- if n['project_id'] == project_id]
- objs = [objects.Network._from_db_object(context,
- objects.Network(),
- net)
- for net in nets]
- return objects.NetworkList(objects=objs)
-
- def get(self, context, network_id):
- for network in self.networks:
- if network.get('uuid') == network_id:
- return objects.Network._from_db_object(context,
- objects.Network(),
- network)
- raise exception.NetworkNotFound(network_id=network_id)
-
- def create(self, context, **kwargs):
- subnet_bits = int(math.ceil(math.log(kwargs.get(
- 'network_size', CONF.network_size), 2)))
- fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
- prefixlen_v4 = 32 - subnet_bits
- subnets_v4 = list(fixed_net_v4.subnet(
- prefixlen_v4,
- count=kwargs.get('num_networks', CONF.num_networks)))
- new_networks = []
- new_id = max((net['id'] for net in self.networks))
- for index, subnet_v4 in enumerate(subnets_v4):
- new_id += 1
- net = {'id': new_id, 'uuid': str(uuid.uuid4())}
-
- net['cidr'] = str(subnet_v4)
- net['netmask'] = str(subnet_v4.netmask)
- net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1])
- net['broadcast'] = str(subnet_v4.broadcast)
- net['dhcp_start'] = str(subnet_v4[2])
-
- for key in FAKE_NETWORKS[0].iterkeys():
- net.setdefault(key, kwargs.get(key))
- new_networks.append(net)
- self.networks += new_networks
- return new_networks
-
-
-# NOTE(vish): tests that network create Exceptions actually return
-# the proper error responses
-class NetworkCreateExceptionsTestV21(test.TestCase):
- url_prefix = '/v2/1234'
-
- class PassthroughAPI(object):
- def __init__(self):
- self.network_manager = manager.FlatDHCPManager()
-
- def create(self, *args, **kwargs):
- if kwargs['label'] == 'fail_NetworkNotCreated':
- raise exception.NetworkNotCreated(req='fake_fail')
- return self.network_manager.create_networks(*args, **kwargs)
-
- def setUp(self):
- super(NetworkCreateExceptionsTestV21, self).setUp()
- self._setup()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- def _setup(self):
- self.controller = networks_v21.NetworkController(self.PassthroughAPI())
-
- def test_network_create_bad_vlan(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['vlan_start'] = 'foo'
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_create_no_cidr(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['cidr'] = ''
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_create_invalid_fixed_cidr(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['fixed_cidr'] = 'foo'
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_create_invalid_start(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['allowed_start'] = 'foo'
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_create_handle_network_not_created(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['label'] = 'fail_NetworkNotCreated'
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_create_cidr_conflict(self):
-
- @staticmethod
- def get_all(context):
- ret = objects.NetworkList(context=context, objects=[])
- net = objects.Network(cidr='10.0.0.0/23')
- ret.objects.append(net)
- return ret
-
- self.stubs.Set(objects.NetworkList, 'get_all', get_all)
-
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['cidr'] = '10.0.0.0/24'
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.create, req, net)
-
-
-class NetworkCreateExceptionsTestV2(NetworkCreateExceptionsTestV21):
-
- def _setup(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {'os-extended-networks': 'fake'}
-
- self.controller = networks.NetworkController(
- self.PassthroughAPI(), ext_mgr)
-
-
-class NetworksTestV21(test.NoDBTestCase):
- url_prefix = '/v2/1234'
-
- def setUp(self):
- super(NetworksTestV21, self).setUp()
- self.fake_network_api = FakeNetworkAPI()
- self._setup()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- def _setup(self):
- self.controller = networks_v21.NetworkController(
- self.fake_network_api)
-
- def _check_status(self, res, method, code):
- self.assertEqual(method.wsgi_code, 202)
-
- @staticmethod
- def network_uuid_to_id(network):
- network['id'] = network['uuid']
- del network['uuid']
-
- def test_network_list_all_as_user(self):
- self.maxDiff = None
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, {'networks': []})
-
- project_id = req.environ["nova.context"].project_id
- cxt = req.environ["nova.context"]
- uuid = FAKE_NETWORKS[0]['uuid']
- self.fake_network_api.associate(context=cxt,
- network_uuid=uuid,
- project=project_id)
- res_dict = self.controller.index(req)
- expected = [copy.deepcopy(FAKE_USER_NETWORKS[0])]
- for network in expected:
- self.network_uuid_to_id(network)
- self.assertEqual({'networks': expected}, res_dict)
-
- def test_network_list_all_as_admin(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- req.environ["nova.context"].is_admin = True
- res_dict = self.controller.index(req)
- expected = copy.deepcopy(FAKE_NETWORKS)
- for network in expected:
- self.network_uuid_to_id(network)
- self.assertEqual({'networks': expected}, res_dict)
-
- def test_network_disassociate(self):
- uuid = FAKE_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s/action' % uuid)
- res = self.controller._disassociate_host_and_project(
- req, uuid, {'disassociate': None})
- self._check_status(res, self.controller._disassociate_host_and_project,
- 202)
- self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
- self.assertIsNone(self.fake_network_api.networks[0]['host'])
-
- def test_network_disassociate_not_found(self):
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/100/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._disassociate_host_and_project,
- req, 100, {'disassociate': None})
-
- def test_network_get_as_user(self):
- uuid = FAKE_USER_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])}
- self.network_uuid_to_id(expected['network'])
- self.assertEqual(expected, res_dict)
-
- def test_network_get_as_admin(self):
- uuid = FAKE_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s' % uuid)
- req.environ["nova.context"].is_admin = True
- res_dict = self.controller.show(req, uuid)
- expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])}
- self.network_uuid_to_id(expected['network'])
- self.assertEqual(expected, res_dict)
-
- def test_network_get_not_found(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, 100)
-
- def test_network_delete(self):
- uuid = FAKE_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s' % uuid)
- res = self.controller.delete(req, 1)
- self._check_status(res, self.controller._disassociate_host_and_project,
- 202)
-
- def test_network_delete_not_found(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, req, 100)
-
- def test_network_delete_in_use(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/-1')
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.delete, req, -1)
-
- def test_network_add(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
- res = self.controller.add(req, {'id': uuid})
- self._check_status(res, self.controller._disassociate_host_and_project,
- 202)
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s' % uuid)
- req.environ["nova.context"].is_admin = True
- res_dict = self.controller.show(req, uuid)
- self.assertEqual(res_dict['network']['project_id'], 'fake')
-
- @mock.patch('nova.tests.api.openstack.compute.contrib.test_networks.'
- 'FakeNetworkAPI.add_network_to_project',
- side_effect=exception.NoMoreNetworks)
- def test_network_add_no_more_networks_fail(self, mock_add):
- uuid = FAKE_NETWORKS[1]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
- {'id': uuid})
-
- @mock.patch('nova.tests.api.openstack.compute.contrib.test_networks.'
- 'FakeNetworkAPI.add_network_to_project',
- side_effect=exception.NetworkNotFoundForUUID(uuid='fake_uuid'))
- def test_network_add_network_not_found_networks_fail(self, mock_add):
- uuid = FAKE_NETWORKS[1]['uuid']
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
- {'id': uuid})
-
- def test_network_create(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- res_dict = self.controller.create(req, NEW_NETWORK)
- self.assertIn('network', res_dict)
- uuid = res_dict['network']['id']
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- self.assertTrue(res_dict['network']['label'].
- startswith(NEW_NETWORK['network']['label']))
-
- def test_network_create_large(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- large_network = copy.deepcopy(NEW_NETWORK)
- large_network['network']['cidr'] = '128.0.0.0/4'
- res_dict = self.controller.create(req, large_network)
- self.assertEqual(res_dict['network']['cidr'],
- large_network['network']['cidr'])
-
- def test_network_create_bad_cidr(self):
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['cidr'] = '128.0.0.0/900'
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, net)
-
- def test_network_neutron_disassociate_not_implemented(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- controller = networks.NetworkController()
- req = fakes.HTTPRequest.blank(self.url_prefix +
- '/os-networks/%s/action' % uuid)
- self.assertRaises(webob.exc.HTTPNotImplemented,
- controller._disassociate_host_and_project,
- req, uuid, {'disassociate': None})
-
-
-class NetworksTestV2(NetworksTestV21):
-
- def _setup(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {'os-extended-networks': 'fake'}
- self.controller = networks.NetworkController(self.fake_network_api,
- ext_mgr)
-
- def _check_status(self, res, method, code):
- self.assertEqual(res.status_int, 202)
-
- def test_network_create_not_extended(self):
- self.stubs.Set(self.controller, 'extended', False)
- # NOTE(vish): Verify that new params are not passed through if
- # extension is not enabled.
-
- def no_mtu(*args, **kwargs):
- if 'mtu' in kwargs:
- raise test.TestingException("mtu should not pass through")
- return [{}]
-
- self.stubs.Set(self.controller.network_api, 'create', no_mtu)
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
- net = copy.deepcopy(NEW_NETWORK)
- net['network']['mtu'] = 9000
- self.controller.create(req, net)
-
-
-class NetworksAssociateTestV21(test.NoDBTestCase):
-
- def setUp(self):
- super(NetworksAssociateTestV21, self).setUp()
- self.fake_network_api = FakeNetworkAPI()
- self._setup()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- def _setup(self):
- self.controller = networks.NetworkController(self.fake_network_api)
- self.associate_controller = networks_associate_v21\
- .NetworkAssociateActionController(self.fake_network_api)
-
- def _check_status(self, res, method, code):
- self.assertEqual(method.wsgi_code, code)
-
- def test_network_disassociate_host_only(self):
- uuid = FAKE_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- res = self.associate_controller._disassociate_host_only(
- req, uuid, {'disassociate_host': None})
- self._check_status(res,
- self.associate_controller._disassociate_host_only,
- 202)
- self.assertIsNotNone(self.fake_network_api.networks[0]['project_id'])
- self.assertIsNone(self.fake_network_api.networks[0]['host'])
-
- def test_network_disassociate_project_only(self):
- uuid = FAKE_NETWORKS[0]['uuid']
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- res = self.associate_controller._disassociate_project_only(
- req, uuid, {'disassociate_project': None})
- self._check_status(
- res, self.associate_controller._disassociate_project_only, 202)
- self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
- self.assertIsNotNone(self.fake_network_api.networks[0]['host'])
-
- def test_network_associate_with_host(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- req = fakes.HTTPRequest.blank('/v2/1234//os-networks/%s/action' % uuid)
- res = self.associate_controller._associate_host(
- req, uuid, {'associate_host': "TestHost"})
- self._check_status(res, self.associate_controller._associate_host, 202)
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
- req.environ["nova.context"].is_admin = True
- res_dict = self.controller.show(req, uuid)
- self.assertEqual(res_dict['network']['host'], 'TestHost')
-
- def test_network_neutron_associate_not_implemented(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- assoc_ctrl = networks_associate.NetworkAssociateActionController()
-
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- self.assertRaises(webob.exc.HTTPNotImplemented,
- assoc_ctrl._associate_host,
- req, uuid, {'associate_host': "TestHost"})
-
- def test_network_neutron_disassociate_project_not_implemented(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- assoc_ctrl = networks_associate.NetworkAssociateActionController()
-
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- self.assertRaises(webob.exc.HTTPNotImplemented,
- assoc_ctrl._disassociate_project_only,
- req, uuid, {'disassociate_project': None})
-
- def test_network_neutron_disassociate_host_not_implemented(self):
- uuid = FAKE_NETWORKS[1]['uuid']
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- assoc_ctrl = networks_associate.NetworkAssociateActionController()
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
- self.assertRaises(webob.exc.HTTPNotImplemented,
- assoc_ctrl._disassociate_host_only,
- req, uuid, {'disassociate_host': None})
-
-
-class NetworksAssociateTestV2(NetworksAssociateTestV21):
-
- def _setup(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {'os-extended-networks': 'fake'}
- self.controller = networks.NetworkController(
- self.fake_network_api,
- ext_mgr)
- self.associate_controller = networks_associate\
- .NetworkAssociateActionController(self.fake_network_api)
-
- def _check_status(self, res, method, code):
- self.assertEqual(res.status_int, 202)
diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
deleted file mode 100644
index 2d818466be..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
+++ /dev/null
@@ -1,918 +0,0 @@
-# Copyright 2013 Nicira, Inc.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import uuid
-
-from lxml import etree
-import mock
-from neutronclient.common import exceptions as n_exc
-from neutronclient.neutron import v2_0 as neutronv20
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import security_groups
-from nova.api.openstack import xmlutil
-from nova import compute
-from nova import context
-import nova.db
-from nova import exception
-from nova.network import model
-from nova.network import neutronv2
-from nova.network.neutronv2 import api as neutron_api
-from nova.network.security_group import neutron_driver
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack.compute.contrib import test_security_groups
-from nova.tests.api.openstack import fakes
-
-
-class TestNeutronSecurityGroupsTestCase(test.TestCase):
- def setUp(self):
- super(TestNeutronSecurityGroupsTestCase, self).setUp()
- cfg.CONF.set_override('security_group_api', 'neutron')
- self.original_client = neutronv2.get_client
- neutronv2.get_client = get_client
-
- def tearDown(self):
- neutronv2.get_client = self.original_client
- get_client()._reset()
- super(TestNeutronSecurityGroupsTestCase, self).tearDown()
-
-
-class TestNeutronSecurityGroupsV21(
- test_security_groups.TestSecurityGroupsV21,
- TestNeutronSecurityGroupsTestCase):
-
- def _create_sg_template(self, **kwargs):
- sg = test_security_groups.security_group_template(**kwargs)
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- return self.controller.create(req, {'security_group': sg})
-
- def _create_network(self):
- body = {'network': {'name': 'net1'}}
- neutron = get_client()
- net = neutron.create_network(body)
- body = {'subnet': {'network_id': net['network']['id'],
- 'cidr': '10.0.0.0/24'}}
- neutron.create_subnet(body)
- return net
-
- def _create_port(self, **kwargs):
- body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
- fields = ['security_groups', 'device_id', 'network_id',
- 'port_security_enabled']
- for field in fields:
- if field in kwargs:
- body['port'][field] = kwargs[field]
- neutron = get_client()
- return neutron.create_port(body)
-
- def _create_security_group(self, **kwargs):
- body = {'security_group': {}}
- fields = ['name', 'description']
- for field in fields:
- if field in kwargs:
- body['security_group'][field] = kwargs[field]
- neutron = get_client()
- return neutron.create_security_group(body)
-
- def test_create_security_group_with_no_description(self):
- # Neutron's security group description field is optional.
- pass
-
- def test_create_security_group_with_empty_description(self):
- # Neutron's security group description field is optional.
- pass
-
- def test_create_security_group_with_blank_name(self):
- # Neutron's security group name field is optional.
- pass
-
- def test_create_security_group_with_whitespace_name(self):
- # Neutron allows security group name to be whitespace.
- pass
-
- def test_create_security_group_with_blank_description(self):
- # Neutron's security group description field is optional.
- pass
-
- def test_create_security_group_with_whitespace_description(self):
- # Neutron allows description to be whitespace.
- pass
-
- def test_create_security_group_with_duplicate_name(self):
- # Neutron allows duplicate names for security groups.
- pass
-
- def test_create_security_group_non_string_name(self):
- # Neutron allows security group name to be non string.
- pass
-
- def test_create_security_group_non_string_description(self):
- # Neutron allows non string description.
- pass
-
- def test_create_security_group_quota_limit(self):
- # Enforced by Neutron server.
- pass
-
- def test_update_security_group(self):
- # Enforced by Neutron server.
- pass
-
- def test_get_security_group_list(self):
- self._create_sg_template().get('security_group')
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- list_dict = self.controller.index(req)
- self.assertEqual(len(list_dict['security_groups']), 2)
-
- def test_get_security_group_list_all_tenants(self):
- pass
-
- def test_get_security_group_by_instance(self):
- sg = self._create_sg_template().get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg['id']],
- device_id=test_security_groups.FAKE_UUID1)
- expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
- 'name': 'test', 'description': 'test-description'}]
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- test_security_groups.return_server_by_uuid)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
- % test_security_groups.FAKE_UUID1)
- res_dict = self.server_controller.index(
- req, test_security_groups.FAKE_UUID1)['security_groups']
- self.assertEqual(expected, res_dict)
-
- def test_get_security_group_by_id(self):
- sg = self._create_sg_template().get('security_group')
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
- % sg['id'])
- res_dict = self.controller.show(req, sg['id'])
- expected = {'security_group': sg}
- self.assertEqual(res_dict, expected)
-
- def test_delete_security_group_by_id(self):
- sg = self._create_sg_template().get('security_group')
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
- sg['id'])
- self.controller.delete(req, sg['id'])
-
- def test_delete_security_group_by_admin(self):
- sg = self._create_sg_template().get('security_group')
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
- sg['id'], use_admin_context=True)
- self.controller.delete(req, sg['id'])
-
- def test_delete_security_group_in_use(self):
- sg = self._create_sg_template().get('security_group')
- self._create_network()
- db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
- _context = context.get_admin_context()
- instance = instance_obj.Instance._from_db_object(
- _context, instance_obj.Instance(), db_inst,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
- neutron = neutron_api.API()
- with mock.patch.object(nova.db, 'instance_get_by_uuid',
- return_value=db_inst):
- neutron.allocate_for_instance(_context, instance,
- security_groups=[sg['id']])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
- % sg['id'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, sg['id'])
-
- def test_associate_non_running_instance(self):
- # Neutron does not care if the instance is running or not. When the
- # instances is detected by nuetron it will push down the security
- # group policy to it.
- pass
-
- def test_associate_already_associated_security_group_to_instance(self):
- # Neutron security groups does not raise an error if you update a
- # port adding a security group to it that was already associated
- # to the port. This is because PUT semantics are used.
- pass
-
- def test_associate(self):
- sg = self._create_sg_template().get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg['id']],
- device_id=test_security_groups.FAKE_UUID1)
-
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
-
- def test_associate_duplicate_names(self):
- sg1 = self._create_security_group(name='sg1',
- description='sg1')['security_group']
- self._create_security_group(name='sg1',
- description='sg1')['security_group']
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg1['id']],
- device_id=test_security_groups.FAKE_UUID1)
-
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(addSecurityGroup=dict(name="sg1"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPConflict,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_port_security_enabled_true(self):
- sg = self._create_sg_template().get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg['id']],
- port_security_enabled=True,
- device_id=test_security_groups.FAKE_UUID1)
-
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
-
- def test_associate_port_security_enabled_false(self):
- self._create_sg_template().get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], port_security_enabled=False,
- device_id=test_security_groups.FAKE_UUID1)
-
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._addSecurityGroup,
- req, '1', body)
-
- def test_disassociate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(removeSecurityGroup=dict(name='non-existing'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_non_running_instance(self):
- # Neutron does not care if the instance is running or not. When the
- # instances is detected by neutron it will push down the security
- # group policy to it.
- pass
-
- def test_disassociate_already_associated_security_group_to_instance(self):
- # Neutron security groups does not raise an error if you update a
- # port adding a security group to it that was already associated
- # to the port. This is because PUT semantics are used.
- pass
-
- def test_disassociate(self):
- sg = self._create_sg_template().get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg['id']],
- device_id=test_security_groups.FAKE_UUID1)
-
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
- body = dict(removeSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._removeSecurityGroup(req, '1', body)
-
- def test_get_raises_no_unique_match_error(self):
-
- def fake_find_resourceid_by_name_or_id(client, param, name,
- project_id=None):
- raise n_exc.NeutronClientNoUniqueMatch()
-
- self.stubs.Set(neutronv20, 'find_resourceid_by_name_or_id',
- fake_find_resourceid_by_name_or_id)
- security_group_api = self.controller.security_group_api
- self.assertRaises(exception.NoUniqueMatch, security_group_api.get,
- context.get_admin_context(), 'foobar')
-
- def test_get_instances_security_groups_bindings(self):
- servers = [{'id': test_security_groups.FAKE_UUID1},
- {'id': test_security_groups.FAKE_UUID2}]
- sg1 = self._create_sg_template(name='test1').get('security_group')
- sg2 = self._create_sg_template(name='test2').get('security_group')
- # test name='' is replaced with id
- sg3 = self._create_sg_template(name='').get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg1['id'],
- sg2['id']],
- device_id=test_security_groups.FAKE_UUID1)
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg2['id'],
- sg3['id']],
- device_id=test_security_groups.FAKE_UUID2)
- expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
- {'name': sg2['name']}],
- test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
- {'name': sg3['id']}]}
- security_group_api = self.controller.security_group_api
- bindings = (
- security_group_api.get_instances_security_groups_bindings(
- context.get_admin_context(), servers))
- self.assertEqual(bindings, expected)
-
- def test_get_instance_security_groups(self):
- sg1 = self._create_sg_template(name='test1').get('security_group')
- sg2 = self._create_sg_template(name='test2').get('security_group')
- # test name='' is replaced with id
- sg3 = self._create_sg_template(name='').get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg1['id'],
- sg2['id'],
- sg3['id']],
- device_id=test_security_groups.FAKE_UUID1)
-
- expected = [{'name': sg1['name']}, {'name': sg2['name']},
- {'name': sg3['id']}]
- security_group_api = self.controller.security_group_api
- sgs = security_group_api.get_instance_security_groups(
- context.get_admin_context(), test_security_groups.FAKE_UUID1)
- self.assertEqual(sgs, expected)
-
- @mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
- 'get_instances_security_groups_bindings')
- def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
- servers = [{'id': test_security_groups.FAKE_UUID1}]
- neutron_sg_bind_mock.return_value = {}
-
- security_group_api = self.controller.security_group_api
- ctx = context.get_admin_context()
- sgs = security_group_api.get_instance_security_groups(ctx,
- test_security_groups.FAKE_UUID1)
-
- neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
- self.assertEqual([], sgs)
-
- def test_create_port_with_sg_and_port_security_enabled_true(self):
- sg1 = self._create_sg_template(name='test1').get('security_group')
- net = self._create_network()
- self._create_port(
- network_id=net['network']['id'], security_groups=[sg1['id']],
- port_security_enabled=True,
- device_id=test_security_groups.FAKE_UUID1)
- security_group_api = self.controller.security_group_api
- sgs = security_group_api.get_instance_security_groups(
- context.get_admin_context(), test_security_groups.FAKE_UUID1)
- self.assertEqual(sgs, [{'name': 'test1'}])
-
- def test_create_port_with_sg_and_port_security_enabled_false(self):
- sg1 = self._create_sg_template(name='test1').get('security_group')
- net = self._create_network()
- self.assertRaises(exception.SecurityGroupCannotBeApplied,
- self._create_port,
- network_id=net['network']['id'],
- security_groups=[sg1['id']],
- port_security_enabled=False,
- device_id=test_security_groups.FAKE_UUID1)
-
-
-class TestNeutronSecurityGroupsV2(TestNeutronSecurityGroupsV21):
- controller_cls = security_groups.SecurityGroupController
- server_secgrp_ctl_cls = security_groups.ServerSecurityGroupController
- secgrp_act_ctl_cls = security_groups.SecurityGroupActionController
-
-
-class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
- def setUp(self):
- super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
- id1 = '11111111-1111-1111-1111-111111111111'
- sg_template1 = test_security_groups.security_group_template(
- security_group_rules=[], id=id1)
- id2 = '22222222-2222-2222-2222-222222222222'
- sg_template2 = test_security_groups.security_group_template(
- security_group_rules=[], id=id2)
- self.controller_sg = security_groups.SecurityGroupController()
- neutron = get_client()
- neutron._fake_security_groups[id1] = sg_template1
- neutron._fake_security_groups[id2] = sg_template2
-
- def tearDown(self):
- neutronv2.get_client = self.original_client
- get_client()._reset()
- super(TestNeutronSecurityGroupsTestCase, self).tearDown()
-
-
-class _TestNeutronSecurityGroupRulesBase(object):
-
- def test_create_add_existing_rules_by_cidr(self):
- sg = test_security_groups.security_group_template()
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.controller_sg.create(req, {'security_group': sg})
- rule = test_security_groups.security_group_rule_template(
- cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.controller.create(req, {'security_group_rule': rule})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_add_existing_rules_by_group_id(self):
- sg = test_security_groups.security_group_template()
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.controller_sg.create(req, {'security_group': sg})
- rule = test_security_groups.security_group_rule_template(
- group=self.sg1['id'], parent_group_id=self.sg2['id'])
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.controller.create(req, {'security_group_rule': rule})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_delete(self):
- rule = test_security_groups.security_group_rule_template(
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
- % security_group_rule['id'])
- self.controller.delete(req, security_group_rule['id'])
-
- def test_create_rule_quota_limit(self):
- # Enforced by neutron
- pass
-
-
-class TestNeutronSecurityGroupRulesV2(
- _TestNeutronSecurityGroupRulesBase,
- test_security_groups.TestSecurityGroupRulesV2,
- TestNeutronSecurityGroupRulesTestCase):
- pass
-
-
-class TestNeutronSecurityGroupRulesV21(
- _TestNeutronSecurityGroupRulesBase,
- test_security_groups.TestSecurityGroupRulesV21,
- TestNeutronSecurityGroupRulesTestCase):
- pass
-
-
-class TestNeutronSecurityGroupsXMLDeserializer(
- test_security_groups.TestSecurityGroupXMLDeserializer,
- TestNeutronSecurityGroupsTestCase):
- pass
-
-
-class TestNeutronSecurityGroupsXMLSerializer(
- test_security_groups.TestSecurityGroupXMLSerializer,
- TestNeutronSecurityGroupsTestCase):
- pass
-
-
-class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
- content_type = 'application/json'
-
- def setUp(self):
- super(TestNeutronSecurityGroupsOutputTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.controller = security_groups.SecurityGroupController()
- self.stubs.Set(compute.api.API, 'get',
- test_security_groups.fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all',
- test_security_groups.fake_compute_get_all)
- self.stubs.Set(compute.api.API, 'create',
- test_security_groups.fake_compute_create)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'get_instances_security_groups_bindings',
- (test_security_groups.
- fake_get_instances_security_groups_bindings))
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Security_groups'])
-
- def _make_request(self, url, body=None):
- req = webob.Request.blank(url)
- if body:
- req.method = 'POST'
- req.body = self._encode_body(body)
- req.content_type = self.content_type
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
- def _encode_body(self, body):
- return jsonutils.dumps(body)
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def _get_groups(self, server):
- return server.get('security_groups')
-
- def test_create(self):
- url = '/v2/fake/servers'
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
- for security_group in security_groups:
- sg = test_security_groups.security_group_template(
- name=security_group['name'])
- self.controller.create(req, {'security_group': sg})
-
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
- security_groups=security_groups)
- res = self._make_request(url, {'server': server})
- self.assertEqual(res.status_int, 202)
- server = self._get_server(res.body)
- for i, group in enumerate(self._get_groups(server)):
- name = 'fake-2-%s' % i
- self.assertEqual(group.get('name'), name)
-
- def test_create_server_get_default_security_group(self):
- url = '/v2/fake/servers'
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- res = self._make_request(url, {'server': server})
- self.assertEqual(res.status_int, 202)
- server = self._get_server(res.body)
- group = self._get_groups(server)[0]
- self.assertEqual(group.get('name'), 'default')
-
- def test_show(self):
- def fake_get_instance_security_groups(inst, context, id):
- return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
-
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'get_instance_security_groups',
- fake_get_instance_security_groups)
-
- url = '/v2/fake/servers'
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
- for security_group in security_groups:
- sg = test_security_groups.security_group_template(
- name=security_group['name'])
- self.controller.create(req, {'security_group': sg})
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
- security_groups=security_groups)
-
- res = self._make_request(url, {'server': server})
- self.assertEqual(res.status_int, 202)
- server = self._get_server(res.body)
- for i, group in enumerate(self._get_groups(server)):
- name = 'fake-2-%s' % i
- self.assertEqual(group.get('name'), name)
-
- # Test that show (GET) returns the same information as create (POST)
- url = '/v2/fake/servers/' + test_security_groups.UUID3
- res = self._make_request(url)
- self.assertEqual(res.status_int, 200)
- server = self._get_server(res.body)
-
- for i, group in enumerate(self._get_groups(server)):
- name = 'fake-2-%s' % i
- self.assertEqual(group.get('name'), name)
-
- def test_detail(self):
- url = '/v2/fake/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- for j, group in enumerate(self._get_groups(server)):
- name = 'fake-%s-%s' % (i, j)
- self.assertEqual(group.get('name'), name)
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class TestNeutronSecurityGroupsOutputXMLTest(
- TestNeutronSecurityGroupsOutputTest):
-
- content_type = 'application/xml'
-
- class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('server', selector='server')
- root.set('name')
- root.set('id')
- root.set('imageRef')
- root.set('flavorRef')
- elem = xmlutil.SubTemplateElement(root, 'security_groups')
- sg = xmlutil.SubTemplateElement(elem, 'security_group',
- selector='security_groups')
- sg.set('name')
- return xmlutil.MasterTemplate(root, 1,
- nsmap={None: xmlutil.XMLNS_V11})
-
- def _encode_body(self, body):
- serializer = self.MinimalCreateServerTemplate()
- return serializer.serialize(body)
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
-
- def _get_groups(self, server):
- # NOTE(vish): we are adding security groups without an extension
- # namespace so we don't break people using the existing
- # functionality, but that means we need to use find with
- # the existing server namespace.
- namespace = server.nsmap[None]
- return server.find('{%s}security_groups' % namespace).getchildren()
-
-
-def get_client(context=None, admin=False):
- return MockClient()
-
-
-class MockClient(object):
-
- # Needs to be global to survive multiple calls to get_client.
- _fake_security_groups = {}
- _fake_ports = {}
- _fake_networks = {}
- _fake_subnets = {}
- _fake_security_group_rules = {}
-
- def __init__(self):
- # add default security group
- if not len(self._fake_security_groups):
- ret = {'name': 'default', 'description': 'default',
- 'tenant_id': 'fake_tenant', 'security_group_rules': [],
- 'id': str(uuid.uuid4())}
- self._fake_security_groups[ret['id']] = ret
-
- def _reset(self):
- self._fake_security_groups.clear()
- self._fake_ports.clear()
- self._fake_networks.clear()
- self._fake_subnets.clear()
- self._fake_security_group_rules.clear()
-
- def create_security_group(self, body=None):
- s = body.get('security_group')
- if len(s.get('name')) > 255 or len(s.get('description')) > 255:
- msg = 'Security Group name great than 255'
- raise n_exc.NeutronClientException(message=msg, status_code=401)
- ret = {'name': s.get('name'), 'description': s.get('description'),
- 'tenant_id': 'fake', 'security_group_rules': [],
- 'id': str(uuid.uuid4())}
-
- self._fake_security_groups[ret['id']] = ret
- return {'security_group': ret}
-
- def create_network(self, body):
- n = body.get('network')
- ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
- 'admin_state_up': n.get('admin_state_up', True),
- 'tenant_id': 'fake_tenant',
- 'id': str(uuid.uuid4())}
- if 'port_security_enabled' in n:
- ret['port_security_enabled'] = n['port_security_enabled']
- self._fake_networks[ret['id']] = ret
- return {'network': ret}
-
- def create_subnet(self, body):
- s = body.get('subnet')
- try:
- net = self._fake_networks[s.get('network_id')]
- except KeyError:
- msg = 'Network %s not found' % s.get('network_id')
- raise n_exc.NeutronClientException(message=msg, status_code=404)
- ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
- 'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
- 'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
- net['subnets'].append(ret['id'])
- self._fake_networks[net['id']] = net
- self._fake_subnets[ret['id']] = ret
- return {'subnet': ret}
-
- def create_port(self, body):
- p = body.get('port')
- ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
- 'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
- 'device_id': p.get('device_id', str(uuid.uuid4())),
- 'admin_state_up': p.get('admin_state_up', True),
- 'security_groups': p.get('security_groups', []),
- 'network_id': p.get('network_id'),
- 'binding:vnic_type':
- p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
-
- network = self._fake_networks[p['network_id']]
- if 'port_security_enabled' in p:
- ret['port_security_enabled'] = p['port_security_enabled']
- elif 'port_security_enabled' in network:
- ret['port_security_enabled'] = network['port_security_enabled']
-
- port_security = ret.get('port_security_enabled', True)
- # port_security must be True if security groups are present
- if not port_security and ret['security_groups']:
- raise exception.SecurityGroupCannotBeApplied()
-
- if network['subnets']:
- ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
- 'ip_address': '10.0.0.1'}]
- if not ret['security_groups'] and (port_security is None or
- port_security is True):
- for security_group in self._fake_security_groups.values():
- if security_group['name'] == 'default':
- ret['security_groups'] = [security_group['id']]
- break
- self._fake_ports[ret['id']] = ret
- return {'port': ret}
-
- def create_security_group_rule(self, body):
- # does not handle bulk case so just picks rule[0]
- r = body.get('security_group_rules')[0]
- fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
- 'ethertype', 'remote_ip_prefix', 'tenant_id',
- 'security_group_id', 'remote_group_id']
- ret = {}
- for field in fields:
- ret[field] = r.get(field)
- ret['id'] = str(uuid.uuid4())
- self._fake_security_group_rules[ret['id']] = ret
- return {'security_group_rules': [ret]}
-
- def show_security_group(self, security_group, **_params):
- try:
- sg = self._fake_security_groups[security_group]
- except KeyError:
- msg = 'Security Group %s not found' % security_group
- raise n_exc.NeutronClientException(message=msg, status_code=404)
- for security_group_rule in self._fake_security_group_rules.values():
- if security_group_rule['security_group_id'] == sg['id']:
- sg['security_group_rules'].append(security_group_rule)
-
- return {'security_group': sg}
-
- def show_security_group_rule(self, security_group_rule, **_params):
- try:
- return {'security_group_rule':
- self._fake_security_group_rules[security_group_rule]}
- except KeyError:
- msg = 'Security Group rule %s not found' % security_group_rule
- raise n_exc.NeutronClientException(message=msg, status_code=404)
-
- def show_network(self, network, **_params):
- try:
- return {'network':
- self._fake_networks[network]}
- except KeyError:
- msg = 'Network %s not found' % network
- raise n_exc.NeutronClientException(message=msg, status_code=404)
-
- def show_port(self, port, **_params):
- try:
- return {'port':
- self._fake_ports[port]}
- except KeyError:
- msg = 'Port %s not found' % port
- raise n_exc.NeutronClientException(message=msg, status_code=404)
-
- def show_subnet(self, subnet, **_params):
- try:
- return {'subnet':
- self._fake_subnets[subnet]}
- except KeyError:
- msg = 'Port %s not found' % subnet
- raise n_exc.NeutronClientException(message=msg, status_code=404)
-
- def list_security_groups(self, **_params):
- ret = []
- for security_group in self._fake_security_groups.values():
- names = _params.get('name')
- if names:
- if not isinstance(names, list):
- names = [names]
- for name in names:
- if security_group.get('name') == name:
- ret.append(security_group)
- ids = _params.get('id')
- if ids:
- if not isinstance(ids, list):
- ids = [ids]
- for id in ids:
- if security_group.get('id') == id:
- ret.append(security_group)
- elif not (names or ids):
- ret.append(security_group)
- return {'security_groups': ret}
-
- def list_networks(self, **_params):
- # neutronv2/api.py _get_available_networks calls this assuming
- # search_opts filter "shared" is implemented and not ignored
- shared = _params.get("shared", None)
- if shared:
- return {'networks': []}
- else:
- return {'networks':
- [network for network in self._fake_networks.values()]}
-
- def list_ports(self, **_params):
- ret = []
- device_id = _params.get('device_id')
- for port in self._fake_ports.values():
- if device_id:
- if port['device_id'] in device_id:
- ret.append(port)
- else:
- ret.append(port)
- return {'ports': ret}
-
- def list_subnets(self, **_params):
- return {'subnets':
- [subnet for subnet in self._fake_subnets.values()]}
-
- def list_floatingips(self, **_params):
- return {'floatingips': []}
-
- def delete_security_group(self, security_group):
- self.show_security_group(security_group)
- ports = self.list_ports()
- for port in ports.get('ports'):
- for sg_port in port['security_groups']:
- if sg_port == security_group:
- msg = ('Unable to delete Security group %s in use'
- % security_group)
- raise n_exc.NeutronClientException(message=msg,
- status_code=409)
- del self._fake_security_groups[security_group]
-
- def delete_security_group_rule(self, security_group_rule):
- self.show_security_group_rule(security_group_rule)
- del self._fake_security_group_rules[security_group_rule]
-
- def delete_network(self, network):
- self.show_network(network)
- self._check_ports_on_network(network)
- for subnet in self._fake_subnets.values():
- if subnet['network_id'] == network:
- del self._fake_subnets[subnet['id']]
- del self._fake_networks[network]
-
- def delete_subnet(self, subnet):
- subnet = self.show_subnet(subnet).get('subnet')
- self._check_ports_on_network(subnet['network_id'])
- del self._fake_subnet[subnet]
-
- def delete_port(self, port):
- self.show_port(port)
- del self._fake_ports[port]
-
- def update_port(self, port, body=None):
- self.show_port(port)
- self._fake_ports[port].update(body['port'])
- return {'port': self._fake_ports[port]}
-
- def list_extensions(self, **_parms):
- return {'extensions': []}
-
- def _check_ports_on_network(self, network):
- ports = self.list_ports()
- for port in ports:
- if port['network_id'] == network:
- msg = ('Unable to complete operation on network %s. There is '
- 'one or more ports still in use on the network'
- % network)
- raise n_exc.NeutronClientException(message=msg, status_code=409)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
deleted file mode 100644
index 2767d66518..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.compute.contrib import quota_classes
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def quota_set(class_name):
- return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'instances': 10,
- 'injected_files': 5, 'cores': 20,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10,
- 'security_group_rules': 20, 'key_pairs': 100,
- 'injected_file_path_bytes': 255}}
-
-
-class QuotaClassSetsTest(test.TestCase):
-
- def setUp(self):
- super(QuotaClassSetsTest, self).setUp()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = quota_classes.QuotaClassSetsController(self.ext_mgr)
-
- def test_format_quota_set(self):
- raw_quota_set = {
- 'instances': 10,
- 'cores': 20,
- 'ram': 51200,
- 'floating_ips': 10,
- 'fixed_ips': -1,
- 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_path_bytes': 255,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100,
- }
-
- quota_set = self.controller._format_quota_set('test_class',
- raw_quota_set)
- qs = quota_set['quota_class_set']
-
- self.assertEqual(qs['id'], 'test_class')
- self.assertEqual(qs['instances'], 10)
- self.assertEqual(qs['cores'], 20)
- self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['floating_ips'], 10)
- self.assertEqual(qs['fixed_ips'], -1)
- self.assertEqual(qs['metadata_items'], 128)
- self.assertEqual(qs['injected_files'], 5)
- self.assertEqual(qs['injected_file_path_bytes'], 255)
- self.assertEqual(qs['injected_file_content_bytes'], 10240)
- self.assertEqual(qs['security_groups'], 10)
- self.assertEqual(qs['security_group_rules'], 20)
- self.assertEqual(qs['key_pairs'], 100)
-
- def test_quotas_show_as_admin(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- res_dict = self.controller.show(req, 'test_class')
-
- self.assertEqual(res_dict, quota_set('test_class'))
-
- def test_quotas_show_as_unauthorized_user(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
- req, 'test_class')
-
- def test_quotas_update_as_admin(self):
- body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_content_bytes': 10240,
- 'injected_file_path_bytes': 255,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100}}
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- res_dict = self.controller.update(req, 'test_class', body)
-
- self.assertEqual(res_dict, body)
-
- def test_quotas_update_as_user(self):
- body = {'quota_class_set': {'instances': 50, 'cores': 50,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100,
- }}
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
- req, 'test_class', body)
-
- def test_quotas_update_with_empty_body(self):
- body = {}
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'test_class', body)
-
- def test_quotas_update_with_non_integer(self):
- body = {'quota_class_set': {'instances': "abc"}}
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'test_class', body)
-
- body = {'quota_class_set': {'instances': 50.5}}
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'test_class', body)
-
- body = {'quota_class_set': {
- 'instances': u'\u30aa\u30fc\u30d7\u30f3'}}
- req = fakes.HTTPRequest.blank(
- '/v2/fake4/os-quota-class-sets/test_class',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'test_class', body)
-
-
-class QuotaTemplateXMLSerializerTest(test.TestCase):
- def setUp(self):
- super(QuotaTemplateXMLSerializerTest, self).setUp()
- self.serializer = quota_classes.QuotaClassTemplate()
- self.deserializer = wsgi.XMLDeserializer()
-
- def test_serializer(self):
- exemplar = dict(quota_class_set=dict(
- id='test_class',
- metadata_items=10,
- injected_file_path_bytes=255,
- injected_file_content_bytes=20,
- ram=50,
- floating_ips=60,
- fixed_ips=-1,
- instances=70,
- injected_files=80,
- security_groups=10,
- security_group_rules=20,
- key_pairs=100,
- cores=90))
- text = self.serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('quota_class_set', tree.tag)
- self.assertEqual('test_class', tree.get('id'))
- self.assertEqual(len(exemplar['quota_class_set']) - 1, len(tree))
- for child in tree:
- self.assertIn(child.tag, exemplar['quota_class_set'])
- self.assertEqual(int(child.text),
- exemplar['quota_class_set'][child.tag])
-
- def test_deserializer(self):
- exemplar = dict(quota_class_set=dict(
- metadata_items='10',
- injected_file_content_bytes='20',
- ram='50',
- floating_ips='60',
- fixed_ips='-1',
- instances='70',
- injected_files='80',
- security_groups='10',
- security_group_rules='20',
- key_pairs='100',
- cores='90'))
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<quota_class_set>'
- '<metadata_items>10</metadata_items>'
- '<injected_file_content_bytes>20'
- '</injected_file_content_bytes>'
- '<ram>50</ram>'
- '<floating_ips>60</floating_ips>'
- '<fixed_ips>-1</fixed_ips>'
- '<instances>70</instances>'
- '<injected_files>80</injected_files>'
- '<cores>90</cores>'
- '<security_groups>10</security_groups>'
- '<security_group_rules>20</security_group_rules>'
- '<key_pairs>100</key_pairs>'
- '</quota_class_set>')
-
- result = self.deserializer.deserialize(intext)['body']
- self.assertEqual(result, exemplar)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
deleted file mode 100644
index 37d3ab2ecb..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ /dev/null
@@ -1,648 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from lxml import etree
-import mock
-import webob
-
-from nova.api.openstack.compute.contrib import quotas as quotas_v2
-from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas_v21
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import context as context_maker
-from nova import exception
-from nova import quota
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def quota_set(id, include_server_group_quotas=True):
- res = {'quota_set': {'id': id, 'metadata_items': 128,
- 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
- 'instances': 10, 'injected_files': 5, 'cores': 20,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10, 'security_group_rules': 20,
- 'key_pairs': 100, 'injected_file_path_bytes': 255}}
- if include_server_group_quotas:
- res['quota_set']['server_groups'] = 10
- res['quota_set']['server_group_members'] = 10
- return res
-
-
-class BaseQuotaSetsTest(test.TestCase):
-
- def _is_v20_api_test(self):
- # NOTE(oomichi): If a test is for v2.0 API, this method returns
- # True. Otherwise(v2.1 API test), returns False.
- return (self.plugin == quotas_v2)
-
- def get_update_expected_response(self, base_body):
- # NOTE(oomichi): "id" parameter is added to a response of
- # "update quota" API since v2.1 API, because it makes the
- # API consistent and it is not backwards incompatible change.
- # This method adds "id" for an expected body of a response.
- if self._is_v20_api_test():
- expected_body = base_body
- else:
- expected_body = copy.deepcopy(base_body)
- expected_body['quota_set'].update({'id': 'update_me'})
- return expected_body
-
- def setup_mock_for_show(self):
- if self._is_v20_api_test():
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
- self.mox.ReplayAll()
-
- def setup_mock_for_update(self):
- if self._is_v20_api_test():
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
- self.mox.ReplayAll()
-
- def get_delete_status_int(self, res):
- if self._is_v20_api_test():
- return res.status_int
- else:
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- return self.controller.delete.wsgi_code
-
-
-class QuotaSetsTestV21(BaseQuotaSetsTest):
- plugin = quotas_v21
- validation_error = exception.ValidationError
- include_server_group_quotas = True
-
- def setUp(self):
- super(QuotaSetsTestV21, self).setUp()
- self._setup_controller()
- self.default_quotas = {
- 'instances': 10,
- 'cores': 20,
- 'ram': 51200,
- 'floating_ips': 10,
- 'fixed_ips': -1,
- 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_path_bytes': 255,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100,
- }
- if self.include_server_group_quotas:
- self.default_quotas['server_groups'] = 10
- self.default_quotas['server_group_members'] = 10
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
-
- def test_format_quota_set(self):
- quota_set = self.controller._format_quota_set('1234',
- self.default_quotas)
- qs = quota_set['quota_set']
-
- self.assertEqual(qs['id'], '1234')
- self.assertEqual(qs['instances'], 10)
- self.assertEqual(qs['cores'], 20)
- self.assertEqual(qs['ram'], 51200)
- self.assertEqual(qs['floating_ips'], 10)
- self.assertEqual(qs['fixed_ips'], -1)
- self.assertEqual(qs['metadata_items'], 128)
- self.assertEqual(qs['injected_files'], 5)
- self.assertEqual(qs['injected_file_path_bytes'], 255)
- self.assertEqual(qs['injected_file_content_bytes'], 10240)
- self.assertEqual(qs['security_groups'], 10)
- self.assertEqual(qs['security_group_rules'], 20)
- self.assertEqual(qs['key_pairs'], 100)
- if self.include_server_group_quotas:
- self.assertEqual(qs['server_groups'], 10)
- self.assertEqual(qs['server_group_members'], 10)
-
- def test_quotas_defaults(self):
- uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
-
- req = fakes.HTTPRequest.blank(uri)
- res_dict = self.controller.defaults(req, 'fake_tenant')
- self.default_quotas.update({'id': 'fake_tenant'})
- expected = {'quota_set': self.default_quotas}
-
- self.assertEqual(res_dict, expected)
-
- def test_quotas_show_as_admin(self):
- self.setup_mock_for_show()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
- use_admin_context=True)
- res_dict = self.controller.show(req, 1234)
-
- ref_quota_set = quota_set('1234', self.include_server_group_quotas)
- self.assertEqual(res_dict, ref_quota_set)
-
- def test_quotas_show_as_unauthorized_user(self):
- self.setup_mock_for_show()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
- req, 1234)
-
- def test_quotas_update_as_admin(self):
- self.setup_mock_for_update()
- self.default_quotas.update({
- 'instances': 50,
- 'cores': 50
- })
- body = {'quota_set': self.default_quotas}
- expected_body = self.get_update_expected_response(body)
-
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- res_dict = self.controller.update(req, 'update_me', body=body)
- self.assertEqual(expected_body, res_dict)
-
- def test_quotas_update_zero_value_as_admin(self):
- self.setup_mock_for_update()
- body = {'quota_set': {'instances': 0, 'cores': 0,
- 'ram': 0, 'floating_ips': 0,
- 'metadata_items': 0,
- 'injected_files': 0,
- 'injected_file_content_bytes': 0,
- 'injected_file_path_bytes': 0,
- 'security_groups': 0,
- 'security_group_rules': 0,
- 'key_pairs': 100, 'fixed_ips': -1}}
- if self.include_server_group_quotas:
- body['quota_set']['server_groups'] = 10
- body['quota_set']['server_group_members'] = 10
- expected_body = self.get_update_expected_response(body)
-
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- res_dict = self.controller.update(req, 'update_me', body=body)
- self.assertEqual(expected_body, res_dict)
-
- def test_quotas_update_as_user(self):
- self.setup_mock_for_update()
- self.default_quotas.update({
- 'instances': 50,
- 'cores': 50
- })
- body = {'quota_set': self.default_quotas}
-
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
- req, 'update_me', body=body)
-
- def _quotas_update_bad_request_case(self, body):
- self.setup_mock_for_update()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- self.assertRaises(self.validation_error, self.controller.update,
- req, 'update_me', body=body)
-
- def test_quotas_update_invalid_key(self):
- body = {'quota_set': {'instances2': -2, 'cores': -2,
- 'ram': -2, 'floating_ips': -2,
- 'metadata_items': -2, 'injected_files': -2,
- 'injected_file_content_bytes': -2}}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_update_invalid_limit(self):
- body = {'quota_set': {'instances': -2, 'cores': -2,
- 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
- 'metadata_items': -2, 'injected_files': -2,
- 'injected_file_content_bytes': -2}}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_update_empty_body(self):
- body = {}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_update_invalid_value_non_int(self):
- # when PUT non integer value
- self.default_quotas.update({
- 'instances': 'test'
- })
- body = {'quota_set': self.default_quotas}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_update_invalid_value_with_float(self):
- # when PUT non integer value
- self.default_quotas.update({
- 'instances': 50.5
- })
- body = {'quota_set': self.default_quotas}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_update_invalid_value_with_unicode(self):
- # when PUT non integer value
- self.default_quotas.update({
- 'instances': u'\u30aa\u30fc\u30d7\u30f3'
- })
- body = {'quota_set': self.default_quotas}
- self._quotas_update_bad_request_case(body)
-
- def test_quotas_delete_as_unauthorized_user(self):
- if self._is_v20_api_test():
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.mox.ReplayAll()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
- req, 1234)
-
- def test_quotas_delete_as_admin(self):
- if self._is_v20_api_test():
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- context = context_maker.get_admin_context()
- self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
- self.req.environ['nova.context'] = context
- self.mox.StubOutWithMock(quota.QUOTAS,
- "destroy_all_by_project")
- quota.QUOTAS.destroy_all_by_project(context, 1234)
- self.mox.ReplayAll()
- res = self.controller.delete(self.req, 1234)
- self.mox.VerifyAll()
- self.assertEqual(202, self.get_delete_status_int(res))
-
-
-class QuotaXMLSerializerTest(test.TestCase):
- def setUp(self):
- super(QuotaXMLSerializerTest, self).setUp()
- self.serializer = quotas_v2.QuotaTemplate()
- self.deserializer = wsgi.XMLDeserializer()
-
- def test_serializer(self):
- exemplar = dict(quota_set=dict(
- id='project_id',
- metadata_items=10,
- injected_file_path_bytes=255,
- injected_file_content_bytes=20,
- ram=50,
- floating_ips=60,
- fixed_ips=-1,
- instances=70,
- injected_files=80,
- security_groups=10,
- security_group_rules=20,
- key_pairs=100,
- cores=90))
- text = self.serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('quota_set', tree.tag)
- self.assertEqual('project_id', tree.get('id'))
- self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
- for child in tree:
- self.assertIn(child.tag, exemplar['quota_set'])
- self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
-
- def test_deserializer(self):
- exemplar = dict(quota_set=dict(
- metadata_items='10',
- injected_file_content_bytes='20',
- ram='50',
- floating_ips='60',
- fixed_ips='-1',
- instances='70',
- injected_files='80',
- security_groups='10',
- security_group_rules='20',
- key_pairs='100',
- cores='90'))
- intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<quota_set>'
- '<metadata_items>10</metadata_items>'
- '<injected_file_content_bytes>20'
- '</injected_file_content_bytes>'
- '<ram>50</ram>'
- '<floating_ips>60</floating_ips>'
- '<fixed_ips>-1</fixed_ips>'
- '<instances>70</instances>'
- '<injected_files>80</injected_files>'
- '<security_groups>10</security_groups>'
- '<security_group_rules>20</security_group_rules>'
- '<key_pairs>100</key_pairs>'
- '<cores>90</cores>'
- '</quota_set>')
-
- result = self.deserializer.deserialize(intext)['body']
- self.assertEqual(result, exemplar)
-
-
-class ExtendedQuotasTestV21(BaseQuotaSetsTest):
- plugin = quotas_v21
-
- def setUp(self):
- super(ExtendedQuotasTestV21, self).setUp()
- self._setup_controller()
- self.setup_mock_for_update()
-
- fake_quotas = {'ram': {'limit': 51200,
- 'in_use': 12800,
- 'reserved': 12800},
- 'cores': {'limit': 20,
- 'in_use': 10,
- 'reserved': 5},
- 'instances': {'limit': 100,
- 'in_use': 0,
- 'reserved': 0}}
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
-
- def fake_get_quotas(self, context, id, user_id=None, usages=False):
- if usages:
- return self.fake_quotas
- else:
- return dict((k, v['limit']) for k, v in self.fake_quotas.items())
-
- def fake_get_settable_quotas(self, context, project_id, user_id=None):
- return {
- 'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
- self.fake_quotas['ram']['reserved'],
- 'maximum': -1},
- 'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
- self.fake_quotas['cores']['reserved'],
- 'maximum': -1},
- 'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
- self.fake_quotas['instances']['reserved'],
- 'maximum': -1},
- }
-
- def test_quotas_update_exceed_in_used(self):
- patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
- get_settable_quotas = patcher.start()
-
- body = {'quota_set': {'cores': 10}}
-
- get_settable_quotas.side_effect = self.fake_get_settable_quotas
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'update_me', body=body)
- mock.patch.stopall()
-
- def test_quotas_force_update_exceed_in_used(self):
- patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
- get_settable_quotas = patcher.start()
- patcher = mock.patch.object(self.plugin.QuotaSetsController,
- '_get_quotas')
- _get_quotas = patcher.start()
-
- body = {'quota_set': {'cores': 10, 'force': 'True'}}
-
- get_settable_quotas.side_effect = self.fake_get_settable_quotas
- _get_quotas.side_effect = self.fake_get_quotas
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- self.controller.update(req, 'update_me', body=body)
- mock.patch.stopall()
-
-
-class UserQuotasTestV21(BaseQuotaSetsTest):
- plugin = quotas_v21
- include_server_group_quotas = True
-
- def setUp(self):
- super(UserQuotasTestV21, self).setUp()
- self._setup_controller()
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
-
- def test_user_quotas_show_as_admin(self):
- self.setup_mock_for_show()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
- use_admin_context=True)
- res_dict = self.controller.show(req, 1234)
- ref_quota_set = quota_set('1234', self.include_server_group_quotas)
- self.assertEqual(res_dict, ref_quota_set)
-
- def test_user_quotas_show_as_unauthorized_user(self):
- self.setup_mock_for_show()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
- req, 1234)
-
- def test_user_quotas_update_as_admin(self):
- self.setup_mock_for_update()
- body = {'quota_set': {'instances': 10, 'cores': 20,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_content_bytes': 10240,
- 'injected_file_path_bytes': 255,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100}}
- if self.include_server_group_quotas:
- body['quota_set']['server_groups'] = 10
- body['quota_set']['server_group_members'] = 10
-
- expected_body = self.get_update_expected_response(body)
-
- url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
- req = fakes.HTTPRequest.blank(url, use_admin_context=True)
- res_dict = self.controller.update(req, 'update_me', body=body)
-
- self.assertEqual(expected_body, res_dict)
-
- def test_user_quotas_update_as_user(self):
- self.setup_mock_for_update()
- body = {'quota_set': {'instances': 10, 'cores': 20,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_content_bytes': 10240,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100,
- 'server_groups': 10,
- 'server_group_members': 10}}
-
- url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
- req = fakes.HTTPRequest.blank(url)
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
- req, 'update_me', body=body)
-
- def test_user_quotas_update_exceed_project(self):
- self.setup_mock_for_update()
- body = {'quota_set': {'instances': 20}}
-
- url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
- req = fakes.HTTPRequest.blank(url, use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'update_me', body=body)
-
- def test_user_quotas_delete_as_unauthorized_user(self):
- self.setup_mock_for_update()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
- req, 1234)
-
- def test_user_quotas_delete_as_admin(self):
- if self._is_v20_api_test():
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
- context = context_maker.get_admin_context()
- url = '/v2/fake4/os-quota-sets/1234?user_id=1'
- self.req = fakes.HTTPRequest.blank(url)
- self.req.environ['nova.context'] = context
- self.mox.StubOutWithMock(quota.QUOTAS,
- "destroy_all_by_project_and_user")
- quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
- self.mox.ReplayAll()
- res = self.controller.delete(self.req, 1234)
- self.mox.VerifyAll()
- self.assertEqual(202, self.get_delete_status_int(res))
-
-
-class QuotaSetsTestV2(QuotaSetsTestV21):
- plugin = quotas_v2
- validation_error = webob.exc.HTTPBadRequest
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
- AndReturn(self.include_server_group_quotas)
- self.mox.ReplayAll()
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
- self.mox.ResetAll()
-
- # NOTE: The following tests are tricky and v2.1 API does not allow
- # this kind of input by strong input validation. Just for test coverage,
- # we keep them now.
- def test_quotas_update_invalid_value_json_fromat_empty_string(self):
- self.setup_mock_for_update()
- self.default_quotas.update({
- 'instances': 50,
- 'cores': 50
- })
- expected_resp = {'quota_set': self.default_quotas}
-
- # when PUT JSON format with empty string for quota
- body = copy.deepcopy(expected_resp)
- body['quota_set']['ram'] = ''
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- res_dict = self.controller.update(req, 'update_me', body)
- self.assertEqual(res_dict, expected_resp)
-
- def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
- self.default_quotas.update({
- 'instances': 50,
- 'cores': 50
- })
- expected_resp = {'quota_set': self.default_quotas}
-
- # when PUT XML format with empty string for quota
- body = copy.deepcopy(expected_resp)
- body['quota_set']['ram'] = {}
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- self.setup_mock_for_update()
- res_dict = self.controller.update(req, 'update_me', body)
- self.assertEqual(res_dict, expected_resp)
-
- # NOTE: os-extended-quotas and os-user-quotas are only for v2.0.
- # On v2.1, these features are always enable. So we need the following
- # tests only for v2.0.
- def test_delete_quotas_when_extension_not_loaded(self):
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
- self.mox.ReplayAll()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, 1234)
-
- def test_delete_user_quotas_when_extension_not_loaded(self):
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
- self.mox.ReplayAll()
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, 1234)
-
-
-class QuotaSetsTestV2WithoutServerGroupQuotas(QuotaSetsTestV2):
- include_server_group_quotas = False
-
- # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
- # is always enabled, so this test is only needed for v2.0
- def test_quotas_update_without_server_group_quotas_extenstion(self):
- self.setup_mock_for_update()
- self.default_quotas.update({
- 'server_groups': 50,
- 'sever_group_members': 50
- })
- body = {'quota_set': self.default_quotas}
-
- req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'update_me', body=body)
-
-
-class ExtendedQuotasTestV2(ExtendedQuotasTestV21):
- plugin = quotas_v2
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
- AndReturn(False)
- self.mox.ReplayAll()
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
- self.mox.ResetAll()
-
-
-class UserQuotasTestV2(UserQuotasTestV21):
- plugin = quotas_v2
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
- AndReturn(self.include_server_group_quotas)
- self.mox.ReplayAll()
- self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
- self.mox.ResetAll()
-
-
-class UserQuotasTestV2WithoutServerGroupQuotas(UserQuotasTestV2):
- include_server_group_quotas = False
-
- # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
- # is always enabled, so this test is only needed for v2.0
- def test_user_quotas_update_as_admin_without_sg_quota_extension(self):
- self.setup_mock_for_update()
- body = {'quota_set': {'instances': 10, 'cores': 20,
- 'ram': 51200, 'floating_ips': 10,
- 'fixed_ips': -1, 'metadata_items': 128,
- 'injected_files': 5,
- 'injected_file_content_bytes': 10240,
- 'injected_file_path_bytes': 255,
- 'security_groups': 10,
- 'security_group_rules': 20,
- 'key_pairs': 100,
- 'server_groups': 100,
- 'server_group_members': 200}}
-
- url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
- req = fakes.HTTPRequest.blank(url, use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 'update_me', body=body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py
deleted file mode 100644
index 4695f8b268..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_rescue.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova import compute
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-
-
-def rescue(self, context, instance, rescue_password=None,
- rescue_image_ref=None):
- pass
-
-
-def unrescue(self, context, instance):
- pass
-
-
-def fake_compute_get(*args, **kwargs):
- uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
- return {'id': 1, 'uuid': uuid}
-
-
-class RescueTestV21(test.NoDBTestCase):
- _prefix = '/v2/fake'
-
- def setUp(self):
- super(RescueTestV21, self).setUp()
-
- self.stubs.Set(compute.api.API, "get", fake_compute_get)
- self.stubs.Set(compute.api.API, "rescue", rescue)
- self.stubs.Set(compute.api.API, "unrescue", unrescue)
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers', 'os-rescue'))
-
- def test_rescue_from_locked_server(self):
- def fake_rescue_from_locked_server(self, context,
- instance, rescue_password=None, rescue_image_ref=None):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
- self.stubs.Set(compute.api.API,
- 'rescue',
- fake_rescue_from_locked_server)
- body = {"rescue": {"adminPass": "AABBCC112233"}}
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 409)
-
- def test_rescue_with_preset_password(self):
- body = {"rescue": {"adminPass": "AABBCC112233"}}
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- resp_json = jsonutils.loads(resp.body)
- self.assertEqual("AABBCC112233", resp_json['adminPass'])
-
- def test_rescue_generates_password(self):
- body = dict(rescue=None)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- resp_json = jsonutils.loads(resp.body)
- self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
-
- def test_rescue_of_rescued_instance(self):
- body = dict(rescue=None)
-
- def fake_rescue(*args, **kwargs):
- raise exception.InstanceInvalidState('fake message')
-
- self.stubs.Set(compute.api.API, "rescue", fake_rescue)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 409)
-
- def test_unrescue(self):
- body = dict(unrescue=None)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 202)
-
- def test_unrescue_from_locked_server(self):
- def fake_unrescue_from_locked_server(self, context,
- instance):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
- self.stubs.Set(compute.api.API,
- 'unrescue',
- fake_unrescue_from_locked_server)
-
- body = dict(unrescue=None)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 409)
-
- def test_unrescue_of_active_instance(self):
- body = dict(unrescue=None)
-
- def fake_unrescue(*args, **kwargs):
- raise exception.InstanceInvalidState('fake message')
-
- self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 409)
-
- def test_rescue_raises_unrescuable(self):
- body = dict(rescue=None)
-
- def fake_rescue(*args, **kwargs):
- raise exception.InstanceNotRescuable('fake message')
-
- self.stubs.Set(compute.api.API, "rescue", fake_rescue)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
-
- @mock.patch('nova.compute.api.API.rescue')
- def test_rescue_with_image_specified(self, mock_compute_api_rescue):
- instance = fake_compute_get()
- body = {"rescue": {"adminPass": "ABC123",
- "rescue_image_ref": "img-id"}}
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- resp_json = jsonutils.loads(resp.body)
- self.assertEqual("ABC123", resp_json['adminPass'])
-
- mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
- rescue_password=u'ABC123',
- rescue_image_ref=u'img-id')
-
- @mock.patch('nova.compute.api.API.rescue')
- def test_rescue_without_image_specified(self, mock_compute_api_rescue):
- instance = fake_compute_get()
- body = {"rescue": {"adminPass": "ABC123"}}
-
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- resp_json = jsonutils.loads(resp.body)
- self.assertEqual("ABC123", resp_json['adminPass'])
-
- mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
- rescue_password=u'ABC123',
- rescue_image_ref=None)
-
- def test_rescue_with_none(self):
- body = dict(rescue=None)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(200, resp.status_int)
-
- def test_rescue_with_empty_dict(self):
- body = dict(rescue=dict())
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(200, resp.status_int)
-
- def test_rescue_disable_password(self):
- self.flags(enable_instance_password=False)
- body = dict(rescue=None)
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(200, resp.status_int)
- resp_json = jsonutils.loads(resp.body)
- self.assertNotIn('adminPass', resp_json)
-
- def test_rescue_with_invalid_property(self):
- body = {"rescue": {"test": "test"}}
- req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(400, resp.status_int)
-
-
-class RescueTestV20(RescueTestV21):
-
- def _get_app(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=None)
- return fakes.wsgi_app(init_only=('servers',))
-
- def test_rescue_with_invalid_property(self):
- # NOTE(cyeoh): input validation in original v2 code does not
- # check for invalid properties.
- pass
-
- def test_rescue_disable_password(self):
- # NOTE(cyeoh): Original v2.0 code does not support disabling
- # the admin password being returned through a conf setting
- pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
deleted file mode 100644
index 9dc9a5a8cd..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.api.openstack import compute
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
-from nova.api.openstack.compute import servers as servers_v2
-from nova.api.openstack import extensions
-import nova.compute.api
-from nova.compute import flavors
-from nova import db
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-
-UUID = fakes.FAKE_UUID
-
-
-CONF = cfg.CONF
-
-
-class SchedulerHintsTestCaseV21(test.TestCase):
-
- def setUp(self):
- super(SchedulerHintsTestCaseV21, self).setUp()
- self.fake_instance = fakes.stub_instance(1, uuid=UUID)
- self._set_up_router()
-
- def _set_up_router(self):
- self.app = compute.APIRouterV3(init_only=('servers',
- 'os-scheduler-hints'))
-
- def _get_request(self):
- return fakes.HTTPRequestV3.blank('/servers')
-
- def test_create_server_without_hints(self):
-
- def fake_create(*args, **kwargs):
- self.assertEqual(kwargs['scheduler_hints'], {})
- return ([self.fake_instance], '')
-
- self.stubs.Set(nova.compute.api.API, 'create', fake_create)
-
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- }}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(202, res.status_int)
-
- def test_create_server_with_hints(self):
-
- def fake_create(*args, **kwargs):
- self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
- return ([self.fake_instance], '')
-
- self.stubs.Set(nova.compute.api.API, 'create', fake_create)
-
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- },
- 'os:scheduler_hints': {'a': 'b'},
- }
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(202, res.status_int)
-
- def test_create_server_bad_hints(self):
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
- 'flavorRef': '1',
- },
- 'os:scheduler_hints': 'here',
- }
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
-
-class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21):
-
- def _set_up_router(self):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Scheduler_hints'])
- self.app = compute.APIRouter(init_only=('servers',))
-
- def _get_request(self):
- return fakes.HTTPRequest.blank('/fake/servers')
-
-
-class ServersControllerCreateTestV21(test.TestCase):
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTestV21, self).setUp()
-
- self.instance_cache_num = 0
- self._set_up_controller()
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': fakes.FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- return instance
-
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(db, 'instance_create', instance_create)
-
- def _set_up_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
- 'osapi_v3')
- self.no_scheduler_hints_controller = servers_v21.ServersController(
- extension_info=ext_info)
-
- def _verify_availability_zone(self, **kwargs):
- self.assertNotIn('scheduler_hints', kwargs)
-
- def _get_request(self):
- return fakes.HTTPRequestV3.blank('/servers')
-
- def _test_create_extra(self, params):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- body = dict(server=server)
- body.update(params)
- req = self._get_request()
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- server = self.no_scheduler_hints_controller.create(
- req, body=body).obj['server']
-
- def test_create_instance_with_scheduler_hints_disabled(self):
- hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
- params = {'OS-SCH-HNT:scheduler_hints': hints}
- old_create = nova.compute.api.API.create
-
- def create(*args, **kwargs):
- self._verify_availability_zone(**kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(nova.compute.api.API, 'create', create)
- self._test_create_extra(params)
-
-
-class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
-
- def _set_up_controller(self):
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.no_scheduler_hints_controller = servers_v2.Controller(
- self.ext_mgr)
-
- def _verify_availability_zone(self, **kwargs):
- self.assertEqual(kwargs['scheduler_hints'], {})
-
- def _get_request(self):
- return fakes.HTTPRequest.blank('/fake/servers')
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
deleted file mode 100644
index abe0724f78..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
+++ /dev/null
@@ -1,515 +0,0 @@
-# Copyright 2013 Metacloud, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.config import cfg
-import webob
-
-from nova.api.openstack.compute.contrib import \
- security_group_default_rules as security_group_default_rules_v2
-from nova.api.openstack.compute.plugins.v3 import \
- security_group_default_rules as security_group_default_rules_v21
-from nova.api.openstack import wsgi
-from nova import context
-import nova.db
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-CONF = cfg.CONF
-
-
-class AttrDict(dict):
- def __getattr__(self, k):
- return self[k]
-
-
-def security_group_default_rule_template(**kwargs):
- rule = kwargs.copy()
- rule.setdefault('ip_protocol', 'TCP')
- rule.setdefault('from_port', 22)
- rule.setdefault('to_port', 22)
- rule.setdefault('cidr', '10.10.10.0/24')
- return rule
-
-
-def security_group_default_rule_db(security_group_default_rule, id=None):
- attrs = security_group_default_rule.copy()
- if id is not None:
- attrs['id'] = id
- return AttrDict(attrs)
-
-
-class TestSecurityGroupDefaultRulesNeutronV21(test.TestCase):
- controller_cls = (security_group_default_rules_v21.
- SecurityGroupDefaultRulesController)
-
- def setUp(self):
- self.flags(security_group_api='neutron')
- super(TestSecurityGroupDefaultRulesNeutronV21, self).setUp()
- self.controller = self.controller_cls()
-
- def test_create_security_group_default_rule_not_implemented_neutron(self):
- sgr = security_group_default_rule_template()
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_security_group_default_rules_list_not_implemented_neturon(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
- req)
-
- def test_security_group_default_rules_show_not_implemented_neturon(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
- req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
-
- def test_security_group_default_rules_delete_not_implemented_neturon(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
- req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
-
-
-class TestSecurityGroupDefaultRulesNeutronV2(test.TestCase):
- controller_cls = (security_group_default_rules_v2.
- SecurityGroupDefaultRulesController)
-
-
-class TestSecurityGroupDefaultRulesV21(test.TestCase):
- controller_cls = (security_group_default_rules_v21.
- SecurityGroupDefaultRulesController)
-
- def setUp(self):
- super(TestSecurityGroupDefaultRulesV21, self).setUp()
- self.controller = self.controller_cls()
-
- def test_create_security_group_default_rule(self):
- sgr = security_group_default_rule_template()
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- sgr_dict = dict(security_group_default_rule=sgr)
- res_dict = self.controller.create(req, sgr_dict)
- security_group_default_rule = res_dict['security_group_default_rule']
- self.assertEqual(security_group_default_rule['ip_protocol'],
- sgr['ip_protocol'])
- self.assertEqual(security_group_default_rule['from_port'],
- sgr['from_port'])
- self.assertEqual(security_group_default_rule['to_port'],
- sgr['to_port'])
- self.assertEqual(security_group_default_rule['ip_range']['cidr'],
- sgr['cidr'])
-
- def test_create_security_group_default_rule_with_no_to_port(self):
- sgr = security_group_default_rule_template()
- del sgr['to_port']
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_no_from_port(self):
- sgr = security_group_default_rule_template()
- del sgr['from_port']
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_no_ip_protocol(self):
- sgr = security_group_default_rule_template()
- del sgr['ip_protocol']
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_no_cidr(self):
- sgr = security_group_default_rule_template()
- del sgr['cidr']
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- res_dict = self.controller.create(req,
- {'security_group_default_rule': sgr})
- security_group_default_rule = res_dict['security_group_default_rule']
- self.assertNotEqual(security_group_default_rule['id'], 0)
- self.assertEqual(security_group_default_rule['ip_range']['cidr'],
- '0.0.0.0/0')
-
- def test_create_security_group_default_rule_with_blank_to_port(self):
- sgr = security_group_default_rule_template(to_port='')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_blank_from_port(self):
- sgr = security_group_default_rule_template(from_port='')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_blank_ip_protocol(self):
- sgr = security_group_default_rule_template(ip_protocol='')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_blank_cidr(self):
- sgr = security_group_default_rule_template(cidr='')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- res_dict = self.controller.create(req,
- {'security_group_default_rule': sgr})
- security_group_default_rule = res_dict['security_group_default_rule']
- self.assertNotEqual(security_group_default_rule['id'], 0)
- self.assertEqual(security_group_default_rule['ip_range']['cidr'],
- '0.0.0.0/0')
-
- def test_create_security_group_default_rule_non_numerical_to_port(self):
- sgr = security_group_default_rule_template(to_port='invalid')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_non_numerical_from_port(self):
- sgr = security_group_default_rule_template(from_port='invalid')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_invalid_ip_protocol(self):
- sgr = security_group_default_rule_template(ip_protocol='invalid')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_invalid_cidr(self):
- sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_invalid_to_port(self):
- sgr = security_group_default_rule_template(to_port='666666')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_invalid_from_port(self):
- sgr = security_group_default_rule_template(from_port='666666')
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_create_security_group_default_rule_with_no_body(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, None)
-
- def test_create_duplicate_security_group_default_rule(self):
- sgr = security_group_default_rule_template()
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.controller.create(req, {'security_group_default_rule': sgr})
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_default_rule': sgr})
-
- def test_security_group_default_rules_list(self):
- self.test_create_security_group_default_rule()
- rules = [dict(id=1,
- ip_protocol='TCP',
- from_port=22,
- to_port=22,
- ip_range=dict(cidr='10.10.10.0/24'))]
- expected = {'security_group_default_rules': rules}
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, expected)
-
- def test_default_security_group_default_rule_show(self):
- sgr = security_group_default_rule_template(id=1)
-
- self.test_create_security_group_default_rule()
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- res_dict = self.controller.show(req, '1')
-
- security_group_default_rule = res_dict['security_group_default_rule']
-
- self.assertEqual(security_group_default_rule['ip_protocol'],
- sgr['ip_protocol'])
- self.assertEqual(security_group_default_rule['to_port'],
- sgr['to_port'])
- self.assertEqual(security_group_default_rule['from_port'],
- sgr['from_port'])
- self.assertEqual(security_group_default_rule['ip_range']['cidr'],
- sgr['cidr'])
-
- def test_delete_security_group_default_rule(self):
- sgr = security_group_default_rule_template(id=1)
-
- self.test_create_security_group_default_rule()
-
- self.called = False
-
- def security_group_default_rule_destroy(context, id):
- self.called = True
-
- def return_security_group_default_rule(context, id):
- self.assertEqual(sgr['id'], id)
- return security_group_default_rule_db(sgr)
-
- self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
- security_group_default_rule_destroy)
- self.stubs.Set(nova.db, 'security_group_default_rule_get',
- return_security_group_default_rule)
-
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-security-group-default-rules', use_admin_context=True)
- self.controller.delete(req, '1')
-
- self.assertTrue(self.called)
-
- def test_security_group_ensure_default(self):
- sgr = security_group_default_rule_template(id=1)
- self.test_create_security_group_default_rule()
-
- ctxt = context.get_admin_context()
-
- setattr(ctxt, 'project_id', 'new_project_id')
-
- sg = nova.db.security_group_ensure_default(ctxt)
- rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
- security_group_rule = rules[0]
- self.assertEqual(sgr['id'], security_group_rule.id)
- self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
- self.assertEqual(sgr['from_port'], security_group_rule.from_port)
- self.assertEqual(sgr['to_port'], security_group_rule.to_port)
- self.assertEqual(sgr['cidr'], security_group_rule.cidr)
-
-
-class TestSecurityGroupDefaultRulesV2(test.TestCase):
- controller_cls = (security_group_default_rules_v2.
- SecurityGroupDefaultRulesController)
-
-
-class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
- def setUp(self):
- super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
- deserializer = security_group_default_rules_v2.\
- SecurityGroupDefaultRulesXMLDeserializer()
- self.deserializer = deserializer
-
- def test_create_request(self):
- serial_request = """
-<security_group_default_rule>
- <from_port>22</from_port>
- <to_port>22</to_port>
- <ip_protocol>TCP</ip_protocol>
- <cidr>10.10.10.0/24</cidr>
-</security_group_default_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_default_rule": {
- "from_port": "22",
- "to_port": "22",
- "ip_protocol": "TCP",
- "cidr": "10.10.10.0/24"
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_to_port_request(self):
- serial_request = """
-<security_group_default_rule>
- <from_port>22</from_port>
- <ip_protocol>TCP</ip_protocol>
- <cidr>10.10.10.0/24</cidr>
-</security_group_default_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_default_rule": {
- "from_port": "22",
- "ip_protocol": "TCP",
- "cidr": "10.10.10.0/24"
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_from_port_request(self):
- serial_request = """
-<security_group_default_rule>
- <to_port>22</to_port>
- <ip_protocol>TCP</ip_protocol>
- <cidr>10.10.10.0/24</cidr>
-</security_group_default_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_default_rule": {
- "to_port": "22",
- "ip_protocol": "TCP",
- "cidr": "10.10.10.0/24"
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_ip_protocol_request(self):
- serial_request = """
-<security_group_default_rule>
- <from_port>22</from_port>
- <to_port>22</to_port>
- <cidr>10.10.10.0/24</cidr>
-</security_group_default_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_default_rule": {
- "from_port": "22",
- "to_port": "22",
- "cidr": "10.10.10.0/24"
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_cidr_request(self):
- serial_request = """
-<security_group_default_rule>
- <from_port>22</from_port>
- <to_port>22</to_port>
- <ip_protocol>TCP</ip_protocol>
-</security_group_default_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_default_rule": {
- "from_port": "22",
- "to_port": "22",
- "ip_protocol": "TCP",
- },
- }
- self.assertEqual(request['body'], expected)
-
-
-class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
- def setUp(self):
- super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
- self.namespace = wsgi.XMLNS_V11
- self.rule_serializer =\
- security_group_default_rules_v2.SecurityGroupDefaultRuleTemplate()
- self.index_serializer =\
- security_group_default_rules_v2.SecurityGroupDefaultRulesTemplate()
-
- def _tag(self, elem):
- tagname = elem.tag
- self.assertEqual(tagname[0], '{')
- tmp = tagname.partition('}')
- namespace = tmp[0][1:]
- self.assertEqual(namespace, self.namespace)
- return tmp[2]
-
- def _verify_security_group_default_rule(self, raw_rule, tree):
- self.assertEqual(raw_rule['id'], tree.get('id'))
-
- seen = set()
- expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
- 'ip_range/cidr'])
-
- for child in tree:
- child_tag = self._tag(child)
- seen.add(child_tag)
- if child_tag == 'ip_range':
- for gr_child in child:
- gr_child_tag = self._tag(gr_child)
- self.assertIn(gr_child_tag, raw_rule[child_tag])
- seen.add('%s/%s' % (child_tag, gr_child_tag))
- self.assertEqual(gr_child.text,
- raw_rule[child_tag][gr_child_tag])
- else:
- self.assertEqual(child.text, raw_rule[child_tag])
- self.assertEqual(seen, expected)
-
- def test_rule_serializer(self):
- raw_rule = dict(id='123',
- ip_protocol='TCP',
- from_port='22',
- to_port='22',
- ip_range=dict(cidr='10.10.10.0/24'))
- rule = dict(security_group_default_rule=raw_rule)
- text = self.rule_serializer.serialize(rule)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('security_group_default_rule', self._tag(tree))
- self._verify_security_group_default_rule(raw_rule, tree)
-
- def test_index_serializer(self):
- rules = [dict(id='123',
- ip_protocol='TCP',
- from_port='22',
- to_port='22',
- ip_range=dict(cidr='10.10.10.0/24')),
- dict(id='234',
- ip_protocol='UDP',
- from_port='23456',
- to_port='234567',
- ip_range=dict(cidr='10.12.0.0/18')),
- dict(id='345',
- ip_protocol='tcp',
- from_port='3456',
- to_port='4567',
- ip_range=dict(cidr='192.168.1.0/32'))]
-
- rules_dict = dict(security_group_default_rules=rules)
-
- text = self.index_serializer.serialize(rules_dict)
-
- tree = etree.fromstring(text)
- self.assertEqual('security_group_default_rules', self._tag(tree))
- self.assertEqual(len(rules), len(tree))
- for idx, child in enumerate(tree):
- self._verify_security_group_default_rule(rules[idx], child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
deleted file mode 100644
index 6911f92d22..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ /dev/null
@@ -1,1767 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2012 Justin Santa Barbara
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
-from nova.api.openstack.compute.plugins.v3 import security_groups as \
- secgroups_v21
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import compute
-from nova.compute import power_state
-from nova import context as context_maker
-import nova.db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import quota
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests import utils
-
-CONF = cfg.CONF
-FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
-FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
-
-
-class AttrDict(dict):
- def __getattr__(self, k):
- return self[k]
-
-
-def security_group_template(**kwargs):
- sg = kwargs.copy()
- sg.setdefault('tenant_id', '123')
- sg.setdefault('name', 'test')
- sg.setdefault('description', 'test-description')
- return sg
-
-
-def security_group_db(security_group, id=None):
- attrs = security_group.copy()
- if 'tenant_id' in attrs:
- attrs['project_id'] = attrs.pop('tenant_id')
- if id is not None:
- attrs['id'] = id
- attrs.setdefault('rules', [])
- attrs.setdefault('instances', [])
- return AttrDict(attrs)
-
-
-def security_group_rule_template(**kwargs):
- rule = kwargs.copy()
- rule.setdefault('ip_protocol', 'tcp')
- rule.setdefault('from_port', 22)
- rule.setdefault('to_port', 22)
- rule.setdefault('parent_group_id', 2)
- return rule
-
-
-def security_group_rule_db(rule, id=None):
- attrs = rule.copy()
- if 'ip_protocol' in attrs:
- attrs['protocol'] = attrs.pop('ip_protocol')
- return AttrDict(attrs)
-
-
-def return_server(context, server_id,
- columns_to_join=None, use_slave=False):
- return fake_instance.fake_db_instance(
- **{'id': int(server_id),
- 'power_state': 0x01,
- 'host': "localhost",
- 'uuid': FAKE_UUID1,
- 'name': 'asdf'})
-
-
-def return_server_by_uuid(context, server_uuid,
- columns_to_join=None,
- use_slave=False):
- return fake_instance.fake_db_instance(
- **{'id': 1,
- 'power_state': 0x01,
- 'host': "localhost",
- 'uuid': server_uuid,
- 'name': 'asdf'})
-
-
-def return_non_running_server(context, server_id, columns_to_join=None):
- return fake_instance.fake_db_instance(
- **{'id': server_id, 'power_state': power_state.SHUTDOWN,
- 'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
-
-
-def return_security_group_by_name(context, project_id, group_name):
- return {'id': 1, 'name': group_name,
- "instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
-
-
-def return_security_group_without_instances(context, project_id, group_name):
- return {'id': 1, 'name': group_name}
-
-
-def return_server_nonexistent(context, server_id, columns_to_join=None):
- raise exception.InstanceNotFound(instance_id=server_id)
-
-
-class TestSecurityGroupsV21(test.TestCase):
- secgrp_ctl_cls = secgroups_v21.SecurityGroupController
- server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
- secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
-
- def setUp(self):
- super(TestSecurityGroupsV21, self).setUp()
-
- self.controller = self.secgrp_ctl_cls()
- self.server_controller = self.server_secgrp_ctl_cls()
- self.manager = self.secgrp_act_ctl_cls()
-
- # This needs to be done here to set fake_id because the derived
- # class needs to be called first if it wants to set
- # 'security_group_api' and this setUp method needs to be called.
- if self.controller.security_group_api.id_is_uuid:
- self.fake_id = '11111111-1111-1111-1111-111111111111'
- else:
- self.fake_id = '11111111'
-
- def _assert_no_security_groups_reserved(self, context):
- """Check that no reservations are leaked during tests."""
- result = quota.QUOTAS.get_project_quotas(context, context.project_id)
- self.assertEqual(result['security_groups']['reserved'], 0)
-
- def _assert_security_groups_in_use(self, project_id, user_id, in_use):
- context = context_maker.get_admin_context()
- result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
- self.assertEqual(result['security_groups']['in_use'], in_use)
-
- def test_create_security_group(self):
- sg = security_group_template()
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- res_dict = self.controller.create(req, {'security_group': sg})
- self.assertEqual(res_dict['security_group']['name'], 'test')
- self.assertEqual(res_dict['security_group']['description'],
- 'test-description')
-
- def test_create_security_group_with_no_name(self):
- sg = security_group_template()
- del sg['name']
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, sg)
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_no_description(self):
- sg = security_group_template()
- del sg['description']
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_empty_description(self):
- sg = security_group_template()
- sg['description'] = ""
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- try:
- self.controller.create(req, {'security_group': sg})
- self.fail('Should have raised BadRequest exception')
- except webob.exc.HTTPBadRequest as exc:
- self.assertEqual('description has a minimum character requirement'
- ' of 1.', exc.explanation)
- except exception.InvalidInput as exc:
- self.fail('Should have raised BadRequest exception instead of')
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_blank_name(self):
- sg = security_group_template(name='')
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_whitespace_name(self):
- sg = security_group_template(name=' ')
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_blank_description(self):
- sg = security_group_template(description='')
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_whitespace_description(self):
- sg = security_group_template(description=' ')
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_duplicate_name(self):
- sg = security_group_template()
-
- # FIXME: Stub out _get instead of creating twice
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.controller.create(req, {'security_group': sg})
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_no_body(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, None)
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_with_no_security_group(self):
- body = {'no-securityGroup': None}
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, body)
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_above_255_characters_name(self):
- sg = security_group_template(name='1234567890' * 26)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_above_255_characters_description(self):
- sg = security_group_template(description='1234567890' * 26)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_non_string_name(self):
- sg = security_group_template(name=12)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_non_string_description(self):
- sg = security_group_template(description=12)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group': sg})
-
- self._assert_no_security_groups_reserved(req.environ['nova.context'])
-
- def test_create_security_group_quota_limit(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- for num in range(1, CONF.quota_security_groups):
- name = 'test%s' % num
- sg = security_group_template(name=name)
- res_dict = self.controller.create(req, {'security_group': sg})
- self.assertEqual(res_dict['security_group']['name'], name)
-
- sg = security_group_template()
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
- req, {'security_group': sg})
-
- def test_get_security_group_list(self):
- groups = []
- for i, name in enumerate(['default', 'test']):
- sg = security_group_template(id=i + 1,
- name=name,
- description=name + '-desc',
- rules=[])
- groups.append(sg)
- expected = {'security_groups': groups}
-
- def return_security_groups(context, project_id):
- return [security_group_db(sg) for sg in groups]
-
- self.stubs.Set(nova.db, 'security_group_get_by_project',
- return_security_groups)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- res_dict = self.controller.index(req)
-
- self.assertEqual(res_dict, expected)
-
- def test_get_security_group_list_missing_group_id_rule(self):
- groups = []
- rule1 = security_group_rule_template(cidr='10.2.3.124/24',
- parent_group_id=1,
- group_id={}, id=88,
- protocol='TCP')
- rule2 = security_group_rule_template(cidr='10.2.3.125/24',
- parent_group_id=1,
- id=99, protocol=88,
- group_id='HAS_BEEN_DELETED')
- sg = security_group_template(id=1,
- name='test',
- description='test-desc',
- rules=[rule1, rule2])
-
- groups.append(sg)
- # An expected rule here needs to be created as the api returns
- # different attributes on the rule for a response than what was
- # passed in. For example:
- # "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
- expected_rule = security_group_rule_template(
- ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
- group={}, id=88, ip_protocol='TCP')
- expected = security_group_template(id=1,
- name='test',
- description='test-desc',
- rules=[expected_rule])
-
- expected = {'security_groups': [expected]}
-
- def return_security_groups(context, project, search_opts):
- return [security_group_db(sg) for sg in groups]
-
- self.stubs.Set(self.controller.security_group_api, 'list',
- return_security_groups)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- res_dict = self.controller.index(req)
-
- self.assertEqual(res_dict, expected)
-
- def test_get_security_group_list_all_tenants(self):
- all_groups = []
- tenant_groups = []
-
- for i, name in enumerate(['default', 'test']):
- sg = security_group_template(id=i + 1,
- name=name,
- description=name + '-desc',
- rules=[])
- all_groups.append(sg)
- if name == 'default':
- tenant_groups.append(sg)
-
- all = {'security_groups': all_groups}
- tenant_specific = {'security_groups': tenant_groups}
-
- def return_all_security_groups(context):
- return [security_group_db(sg) for sg in all_groups]
-
- self.stubs.Set(nova.db, 'security_group_get_all',
- return_all_security_groups)
-
- def return_tenant_security_groups(context, project_id):
- return [security_group_db(sg) for sg in tenant_groups]
-
- self.stubs.Set(nova.db, 'security_group_get_by_project',
- return_tenant_security_groups)
-
- path = '/v2/fake/os-security-groups'
-
- req = fakes.HTTPRequest.blank(path, use_admin_context=True)
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, tenant_specific)
-
- req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
- use_admin_context=True)
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, all)
-
- def test_get_security_group_by_instance(self):
- groups = []
- for i, name in enumerate(['default', 'test']):
- sg = security_group_template(id=i + 1,
- name=name,
- description=name + '-desc',
- rules=[])
- groups.append(sg)
- expected = {'security_groups': groups}
-
- def return_instance(context, server_id,
- columns_to_join=None, use_slave=False):
- self.assertEqual(server_id, FAKE_UUID1)
- return return_server_by_uuid(context, server_id)
-
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_instance)
-
- def return_security_groups(context, instance_uuid):
- self.assertEqual(instance_uuid, FAKE_UUID1)
- return [security_group_db(sg) for sg in groups]
-
- self.stubs.Set(nova.db, 'security_group_get_by_instance',
- return_security_groups)
-
- req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
- ('fake', FAKE_UUID1))
- res_dict = self.server_controller.index(req, FAKE_UUID1)
-
- self.assertEqual(res_dict, expected)
-
- @mock.patch('nova.db.instance_get_by_uuid')
- @mock.patch('nova.db.security_group_get_by_instance', return_value=[])
- def test_get_security_group_empty_for_instance(self, mock_sec_group,
- mock_db_get_ins):
- expected = {'security_groups': []}
-
- def return_instance(context, server_id,
- columns_to_join=None, use_slave=False):
- self.assertEqual(server_id, FAKE_UUID1)
- return return_server_by_uuid(context, server_id)
- mock_db_get_ins.side_effect = return_instance
- req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
- ('fake', FAKE_UUID1))
- res_dict = self.server_controller.index(req, FAKE_UUID1)
- self.assertEqual(expected, res_dict)
- mock_sec_group.assert_called_once_with(req.environ['nova.context'],
- FAKE_UUID1)
-
- def test_get_security_group_by_instance_non_existing(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.server_controller.index, req, '1')
-
- def test_get_security_group_by_instance_invalid_id(self):
- req = fakes.HTTPRequest.blank(
- '/v2/fake/servers/invalid/os-security-groups')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.server_controller.index, req, 'invalid')
-
- def test_get_security_group_by_id(self):
- sg = security_group_template(id=2, rules=[])
-
- def return_security_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return security_group_db(sg)
-
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
- res_dict = self.controller.show(req, '2')
-
- expected = {'security_group': sg}
- self.assertEqual(res_dict, expected)
-
- def test_get_security_group_by_invalid_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, 'invalid')
-
- def test_get_security_group_by_non_existing_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
- self.fake_id)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.fake_id)
-
- def test_update_security_group(self):
- sg = security_group_template(id=2, rules=[])
- sg_update = security_group_template(id=2, rules=[],
- name='update_name', description='update_desc')
-
- def return_security_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return security_group_db(sg)
-
- def return_update_security_group(context, group_id, values,
- columns_to_join=None):
- self.assertEqual(sg_update['id'], group_id)
- self.assertEqual(sg_update['name'], values['name'])
- self.assertEqual(sg_update['description'], values['description'])
- return security_group_db(sg_update)
-
- self.stubs.Set(nova.db, 'security_group_update',
- return_update_security_group)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
- res_dict = self.controller.update(req, '2',
- {'security_group': sg_update})
-
- expected = {'security_group': sg_update}
- self.assertEqual(res_dict, expected)
-
- def test_update_security_group_name_to_default(self):
- sg = security_group_template(id=2, rules=[], name='default')
-
- def return_security_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return security_group_db(sg)
-
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, '2', {'security_group': sg})
-
- def test_update_default_security_group_fail(self):
- sg = security_group_template()
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, '1', {'security_group': sg})
-
- def test_delete_security_group_by_id(self):
- sg = security_group_template(id=1, project_id='fake_project',
- user_id='fake_user', rules=[])
-
- self.called = False
-
- def security_group_destroy(context, id):
- self.called = True
-
- def return_security_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return security_group_db(sg)
-
- self.stubs.Set(nova.db, 'security_group_destroy',
- security_group_destroy)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
- self.controller.delete(req, '1')
-
- self.assertTrue(self.called)
-
- def test_delete_security_group_by_admin(self):
- sg = security_group_template(id=2, rules=[])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
- self.controller.create(req, {'security_group': sg})
- context = req.environ['nova.context']
-
- # Ensure quota usage for security group is correct.
- self._assert_security_groups_in_use(context.project_id,
- context.user_id, 2)
-
- # Delete the security group by admin.
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
- use_admin_context=True)
- self.controller.delete(req, '2')
-
- # Ensure quota for security group in use is released.
- self._assert_security_groups_in_use(context.project_id,
- context.user_id, 1)
-
- def test_delete_security_group_by_invalid_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, 'invalid')
-
- def test_delete_security_group_by_non_existing_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
- % self.fake_id)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.fake_id)
-
- def test_delete_security_group_in_use(self):
- sg = security_group_template(id=1, rules=[])
-
- def security_group_in_use(context, id):
- return True
-
- def return_security_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return security_group_db(sg)
-
- self.stubs.Set(nova.db, 'security_group_in_use',
- security_group_in_use)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, '1')
-
- def test_associate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.assertEqual(return_server(None, '1'),
- nova.db.instance_get(None, '1'))
- body = dict(addSecurityGroup=dict(name='non-existing'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_by_invalid_server_id(self):
- body = dict(addSecurityGroup=dict(name='test'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._addSecurityGroup, req, 'invalid', body)
-
- def test_associate_without_body(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(addSecurityGroup=None)
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_no_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(addSecurityGroup=dict())
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_security_group_name_with_whitespaces(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(addSecurityGroup=dict(name=" "))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_without_instances)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
-
- def test_associate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._addSecurityGroup, req, '1', body)
-
- def test_associate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
- nova.db.instance_add_security_group(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_without_instances)
- self.mox.ReplayAll()
-
- body = dict(addSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
-
- def test_disassociate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.assertEqual(return_server(None, '1'),
- nova.db.instance_get(None, '1'))
- body = dict(removeSecurityGroup=dict(name='non-existing'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_by_invalid_server_id(self):
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
- body = dict(removeSecurityGroup=dict(name='test'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._removeSecurityGroup, req, 'invalid',
- body)
-
- def test_disassociate_without_body(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(removeSecurityGroup=None)
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_no_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(removeSecurityGroup=dict())
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_security_group_name_with_whitespaces(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- body = dict(removeSecurityGroup=dict(name=" "))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
- body = dict(removeSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
- body = dict(removeSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._removeSecurityGroup(req, '1', body)
-
- def test_disassociate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_without_instances)
- body = dict(removeSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.manager._removeSecurityGroup, req, '1', body)
-
- def test_disassociate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
- nova.db.instance_remove_security_group(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
- self.mox.ReplayAll()
-
- body = dict(removeSecurityGroup=dict(name="test"))
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._removeSecurityGroup(req, '1', body)
-
-
-class TestSecurityGroupsV2(TestSecurityGroupsV21):
- secgrp_ctl_cls = secgroups_v2.SecurityGroupController
- server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
- secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
-
-
-class TestSecurityGroupRulesV21(test.TestCase):
- secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
-
- def setUp(self):
- super(TestSecurityGroupRulesV21, self).setUp()
-
- self.controller = self.secgrp_ctl_cls()
- if self.controller.security_group_api.id_is_uuid:
- id1 = '11111111-1111-1111-1111-111111111111'
- id2 = '22222222-2222-2222-2222-222222222222'
- self.invalid_id = '33333333-3333-3333-3333-333333333333'
- else:
- id1 = 1
- id2 = 2
- self.invalid_id = '33333333'
-
- self.sg1 = security_group_template(id=id1)
- self.sg2 = security_group_template(
- id=id2, name='authorize_revoke',
- description='authorize-revoke testing')
-
- db1 = security_group_db(self.sg1)
- db2 = security_group_db(self.sg2)
-
- def return_security_group(context, group_id, columns_to_join=None):
- if group_id == db1['id']:
- return db1
- if group_id == db2['id']:
- return db2
- raise exception.SecurityGroupNotFound(security_group_id=group_id)
-
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
-
- self.parent_security_group = db2
-
- def test_create_by_cidr(self):
- rule = security_group_rule_template(cidr='10.2.3.124/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg2['id'])
- self.assertEqual(security_group_rule['ip_range']['cidr'],
- "10.2.3.124/24")
-
- def test_create_by_group_id(self):
- rule = security_group_rule_template(group_id=self.sg1['id'],
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg2['id'])
-
- def test_create_by_same_group_id(self):
- rule1 = security_group_rule_template(group_id=self.sg1['id'],
- from_port=80, to_port=80,
- parent_group_id=self.sg2['id'])
- self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
-
- rule2 = security_group_rule_template(group_id=self.sg1['id'],
- from_port=81, to_port=81,
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule2})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg2['id'])
- self.assertEqual(security_group_rule['from_port'], 81)
- self.assertEqual(security_group_rule['to_port'], 81)
-
- def test_create_none_value_from_to_port(self):
- rule = {'parent_group_id': self.sg1['id'],
- 'group_id': self.sg1['id']}
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- self.assertIsNone(security_group_rule['from_port'])
- self.assertIsNone(security_group_rule['to_port'])
- self.assertEqual(security_group_rule['group']['name'], 'test')
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg1['id'])
-
- def test_create_none_value_from_to_port_icmp(self):
- rule = {'parent_group_id': self.sg1['id'],
- 'group_id': self.sg1['id'],
- 'ip_protocol': 'ICMP'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
- self.assertEqual(security_group_rule['from_port'], -1)
- self.assertEqual(security_group_rule['to_port'], -1)
- self.assertEqual(security_group_rule['group']['name'], 'test')
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg1['id'])
-
- def test_create_none_value_from_to_port_tcp(self):
- rule = {'parent_group_id': self.sg1['id'],
- 'group_id': self.sg1['id'],
- 'ip_protocol': 'TCP'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
- self.assertEqual(security_group_rule['from_port'], 1)
- self.assertEqual(security_group_rule['to_port'], 65535)
- self.assertEqual(security_group_rule['group']['name'], 'test')
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg1['id'])
-
- def test_create_by_invalid_cidr_json(self):
- rule = security_group_rule_template(
- ip_protocol="tcp",
- from_port=22,
- to_port=22,
- parent_group_id=self.sg2['id'],
- cidr="10.2.3.124/2433")
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_by_invalid_tcp_port_json(self):
- rule = security_group_rule_template(
- ip_protocol="tcp",
- from_port=75534,
- to_port=22,
- parent_group_id=self.sg2['id'],
- cidr="10.2.3.124/24")
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_by_invalid_icmp_port_json(self):
- rule = security_group_rule_template(
- ip_protocol="icmp",
- from_port=1,
- to_port=256,
- parent_group_id=self.sg2['id'],
- cidr="10.2.3.124/24")
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_add_existing_rules_by_cidr(self):
- rule = security_group_rule_template(cidr='10.0.0.0/24',
- parent_group_id=self.sg2['id'])
-
- self.parent_security_group['rules'] = [security_group_rule_db(rule)]
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_add_existing_rules_by_group_id(self):
- rule = security_group_rule_template(group_id=1)
-
- self.parent_security_group['rules'] = [security_group_rule_db(rule)]
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_no_body(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, None)
-
- def test_create_with_no_security_group_rule_in_body(self):
- rules = {'test': 'test'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, rules)
-
- def test_create_with_invalid_parent_group_id(self):
- rule = security_group_rule_template(parent_group_id='invalid')
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_non_existing_parent_group_id(self):
- rule = security_group_rule_template(group_id=None,
- parent_group_id=self.invalid_id)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_non_existing_group_id(self):
- rule = security_group_rule_template(group_id='invalid',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_invalid_protocol(self):
- rule = security_group_rule_template(ip_protocol='invalid-protocol',
- cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_no_protocol(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
- del rule['ip_protocol']
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_invalid_from_port(self):
- rule = security_group_rule_template(from_port='666666',
- cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_invalid_to_port(self):
- rule = security_group_rule_template(to_port='666666',
- cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_non_numerical_from_port(self):
- rule = security_group_rule_template(from_port='invalid',
- cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_non_numerical_to_port(self):
- rule = security_group_rule_template(to_port='invalid',
- cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_no_from_port(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
- del rule['from_port']
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_no_to_port(self):
- rule = security_group_rule_template(cidr='10.2.2.0/24',
- parent_group_id=self.sg2['id'])
- del rule['to_port']
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_invalid_cidr(self):
- rule = security_group_rule_template(cidr='10.2.2222.0/24',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_no_cidr_group(self):
- rule = security_group_rule_template(parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.parent_security_group['id'])
- self.assertEqual(security_group_rule['ip_range']['cidr'],
- "0.0.0.0/0")
-
- def test_create_with_invalid_group_id(self):
- rule = security_group_rule_template(group_id='invalid',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_empty_group_id(self):
- rule = security_group_rule_template(group_id='',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_nonexist_group_id(self):
- rule = security_group_rule_template(group_id=self.invalid_id,
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_with_same_group_parent_id_and_group_id(self):
- rule = security_group_rule_template(group_id=self.sg1['id'],
- parent_group_id=self.sg1['id'])
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.sg1['id'])
- self.assertEqual(security_group_rule['group']['name'],
- self.sg1['name'])
-
- def _test_create_with_no_ports_and_no_group(self, proto):
- rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
- def _test_create_with_no_ports(self, proto):
- rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
- 'group_id': self.sg1['id']}
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
- security_group_rule = res_dict['security_group_rule']
- expected_rule = {
- 'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
- 'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
- self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
- }
- if proto == 'icmp':
- expected_rule['to_port'] = -1
- expected_rule['from_port'] = -1
- self.assertEqual(expected_rule, security_group_rule)
-
- def test_create_with_no_ports_icmp(self):
- self._test_create_with_no_ports_and_no_group('icmp')
- self._test_create_with_no_ports('icmp')
-
- def test_create_with_no_ports_tcp(self):
- self._test_create_with_no_ports_and_no_group('tcp')
- self._test_create_with_no_ports('tcp')
-
- def test_create_with_no_ports_udp(self):
- self._test_create_with_no_ports_and_no_group('udp')
- self._test_create_with_no_ports('udp')
-
- def _test_create_with_ports(self, proto, from_port, to_port):
- rule = {
- 'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
- 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
- }
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- expected_rule = {
- 'from_port': from_port,
- 'group': {'tenant_id': '123', 'name': 'test'},
- 'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
- self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
- }
- self.assertEqual(proto, security_group_rule['ip_protocol'])
- self.assertEqual(from_port, security_group_rule['from_port'])
- self.assertEqual(to_port, security_group_rule['to_port'])
- self.assertEqual(expected_rule, security_group_rule)
-
- def test_create_with_ports_icmp(self):
- self._test_create_with_ports('icmp', 0, 1)
- self._test_create_with_ports('icmp', 0, 0)
- self._test_create_with_ports('icmp', 1, 0)
-
- def test_create_with_ports_tcp(self):
- self._test_create_with_ports('tcp', 1, 1)
- self._test_create_with_ports('tcp', 1, 65535)
- self._test_create_with_ports('tcp', 65535, 65535)
-
- def test_create_with_ports_udp(self):
- self._test_create_with_ports('udp', 1, 1)
- self._test_create_with_ports('udp', 1, 65535)
- self._test_create_with_ports('udp', 65535, 65535)
-
- def test_delete(self):
- rule = security_group_rule_template(id=self.sg2['id'],
- parent_group_id=self.sg2['id'])
-
- def security_group_rule_get(context, id):
- return security_group_rule_db(rule)
-
- def security_group_rule_destroy(context, id):
- pass
-
- self.stubs.Set(nova.db, 'security_group_rule_get',
- security_group_rule_get)
- self.stubs.Set(nova.db, 'security_group_rule_destroy',
- security_group_rule_destroy)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
- % self.sg2['id'])
- self.controller.delete(req, self.sg2['id'])
-
- def test_delete_invalid_rule_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
- '/invalid')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, 'invalid')
-
- def test_delete_non_existing_rule_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
- % self.invalid_id)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.invalid_id)
-
- def test_create_rule_quota_limit(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- for num in range(100, 100 + CONF.quota_security_group_rules):
- rule = {
- 'ip_protocol': 'tcp', 'from_port': num,
- 'to_port': num, 'parent_group_id': self.sg2['id'],
- 'group_id': self.sg1['id']
- }
- self.controller.create(req, {'security_group_rule': rule})
-
- rule = {
- 'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
- 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
- }
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
- req, {'security_group_rule': rule})
-
- def test_create_rule_cidr_allow_all(self):
- rule = security_group_rule_template(cidr='0.0.0.0/0',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.parent_security_group['id'])
- self.assertEqual(security_group_rule['ip_range']['cidr'],
- "0.0.0.0/0")
-
- def test_create_rule_cidr_ipv6_allow_all(self):
- rule = security_group_rule_template(cidr='::/0',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.parent_security_group['id'])
- self.assertEqual(security_group_rule['ip_range']['cidr'],
- "::/0")
-
- def test_create_rule_cidr_allow_some(self):
- rule = security_group_rule_template(cidr='15.0.0.0/8',
- parent_group_id=self.sg2['id'])
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- res_dict = self.controller.create(req, {'security_group_rule': rule})
-
- security_group_rule = res_dict['security_group_rule']
- self.assertNotEqual(security_group_rule['id'], 0)
- self.assertEqual(security_group_rule['parent_group_id'],
- self.parent_security_group['id'])
- self.assertEqual(security_group_rule['ip_range']['cidr'],
- "15.0.0.0/8")
-
- def test_create_rule_cidr_bad_netmask(self):
- rule = security_group_rule_template(cidr='15.0.0.0/0')
- req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'security_group_rule': rule})
-
-
-class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
- secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
-
-
-class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
- self.deserializer = secgroups_v2.SecurityGroupRulesXMLDeserializer()
-
- def test_create_request(self):
- serial_request = """
-<security_group_rule>
- <parent_group_id>12</parent_group_id>
- <from_port>22</from_port>
- <to_port>22</to_port>
- <group_id></group_id>
- <ip_protocol>tcp</ip_protocol>
- <cidr>10.0.0.0/24</cidr>
-</security_group_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_rule": {
- "parent_group_id": "12",
- "from_port": "22",
- "to_port": "22",
- "ip_protocol": "tcp",
- "group_id": "",
- "cidr": "10.0.0.0/24",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_protocol_request(self):
- serial_request = """
-<security_group_rule>
- <parent_group_id>12</parent_group_id>
- <from_port>22</from_port>
- <to_port>22</to_port>
- <group_id></group_id>
- <cidr>10.0.0.0/24</cidr>
-</security_group_rule>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group_rule": {
- "parent_group_id": "12",
- "from_port": "22",
- "to_port": "22",
- "group_id": "",
- "cidr": "10.0.0.0/24",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_corrupt_xml(self):
- """Should throw a 400 error on corrupt xml."""
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
-
-
-class TestSecurityGroupXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestSecurityGroupXMLDeserializer, self).setUp()
- self.deserializer = secgroups_v2.SecurityGroupXMLDeserializer()
-
- def test_create_request(self):
- serial_request = """
-<security_group name="test">
- <description>test</description>
-</security_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group": {
- "name": "test",
- "description": "test",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_description_request(self):
- serial_request = """
-<security_group name="test">
-</security_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group": {
- "name": "test",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_no_name_request(self):
- serial_request = """
-<security_group>
-<description>test</description>
-</security_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "security_group": {
- "description": "test",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_corrupt_xml(self):
- """Should throw a 400 error on corrupt xml."""
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
-
-
-class TestSecurityGroupXMLSerializer(test.TestCase):
- def setUp(self):
- super(TestSecurityGroupXMLSerializer, self).setUp()
- self.namespace = wsgi.XMLNS_V11
- self.rule_serializer = secgroups_v2.SecurityGroupRuleTemplate()
- self.index_serializer = secgroups_v2.SecurityGroupsTemplate()
- self.default_serializer = secgroups_v2.SecurityGroupTemplate()
-
- def _tag(self, elem):
- tagname = elem.tag
- self.assertEqual(tagname[0], '{')
- tmp = tagname.partition('}')
- namespace = tmp[0][1:]
- self.assertEqual(namespace, self.namespace)
- return tmp[2]
-
- def _verify_security_group_rule(self, raw_rule, tree):
- self.assertEqual(raw_rule['id'], tree.get('id'))
- self.assertEqual(raw_rule['parent_group_id'],
- tree.get('parent_group_id'))
-
- seen = set()
- expected = set(['ip_protocol', 'from_port', 'to_port',
- 'group', 'group/name', 'group/tenant_id',
- 'ip_range', 'ip_range/cidr'])
-
- for child in tree:
- child_tag = self._tag(child)
- self.assertIn(child_tag, raw_rule)
- seen.add(child_tag)
- if child_tag in ('group', 'ip_range'):
- for gr_child in child:
- gr_child_tag = self._tag(gr_child)
- self.assertIn(gr_child_tag, raw_rule[child_tag])
- seen.add('%s/%s' % (child_tag, gr_child_tag))
- self.assertEqual(gr_child.text,
- raw_rule[child_tag][gr_child_tag])
- else:
- self.assertEqual(child.text, raw_rule[child_tag])
- self.assertEqual(seen, expected)
-
- def _verify_security_group(self, raw_group, tree):
- rules = raw_group['rules']
- self.assertEqual('security_group', self._tag(tree))
- self.assertEqual(raw_group['id'], tree.get('id'))
- self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
- self.assertEqual(raw_group['name'], tree.get('name'))
- self.assertEqual(2, len(tree))
- for child in tree:
- child_tag = self._tag(child)
- if child_tag == 'rules':
- self.assertEqual(2, len(child))
- for idx, gr_child in enumerate(child):
- self.assertEqual(self._tag(gr_child), 'rule')
- self._verify_security_group_rule(rules[idx], gr_child)
- else:
- self.assertEqual('description', child_tag)
- self.assertEqual(raw_group['description'], child.text)
-
- def test_rule_serializer(self):
- raw_rule = dict(
- id='123',
- parent_group_id='456',
- ip_protocol='tcp',
- from_port='789',
- to_port='987',
- group=dict(name='group', tenant_id='tenant'),
- ip_range=dict(cidr='10.0.0.0/8'))
- rule = dict(security_group_rule=raw_rule)
- text = self.rule_serializer.serialize(rule)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('security_group_rule', self._tag(tree))
- self._verify_security_group_rule(raw_rule, tree)
-
- def test_group_serializer(self):
- rules = [dict(
- id='123',
- parent_group_id='456',
- ip_protocol='tcp',
- from_port='789',
- to_port='987',
- group=dict(name='group1', tenant_id='tenant1'),
- ip_range=dict(cidr='10.55.44.0/24')),
- dict(
- id='654',
- parent_group_id='321',
- ip_protocol='udp',
- from_port='234',
- to_port='567',
- group=dict(name='group2', tenant_id='tenant2'),
- ip_range=dict(cidr='10.44.55.0/24'))]
- raw_group = dict(
- id='890',
- description='description',
- name='name',
- tenant_id='tenant',
- rules=rules)
- sg_group = dict(security_group=raw_group)
- text = self.default_serializer.serialize(sg_group)
-
- tree = etree.fromstring(text)
-
- self._verify_security_group(raw_group, tree)
-
- def test_groups_serializer(self):
- rules = [dict(
- id='123',
- parent_group_id='1234',
- ip_protocol='tcp',
- from_port='12345',
- to_port='123456',
- group=dict(name='group1', tenant_id='tenant1'),
- ip_range=dict(cidr='10.123.0.0/24')),
- dict(
- id='234',
- parent_group_id='2345',
- ip_protocol='udp',
- from_port='23456',
- to_port='234567',
- group=dict(name='group2', tenant_id='tenant2'),
- ip_range=dict(cidr='10.234.0.0/24')),
- dict(
- id='345',
- parent_group_id='3456',
- ip_protocol='tcp',
- from_port='34567',
- to_port='345678',
- group=dict(name='group3', tenant_id='tenant3'),
- ip_range=dict(cidr='10.345.0.0/24')),
- dict(
- id='456',
- parent_group_id='4567',
- ip_protocol='udp',
- from_port='45678',
- to_port='456789',
- group=dict(name='group4', tenant_id='tenant4'),
- ip_range=dict(cidr='10.456.0.0/24'))]
- groups = [dict(
- id='567',
- description='description1',
- name='name1',
- tenant_id='tenant1',
- rules=rules[0:2]),
- dict(
- id='678',
- description='description2',
- name='name2',
- tenant_id='tenant2',
- rules=rules[2:4])]
- sg_groups = dict(security_groups=groups)
- text = self.index_serializer.serialize(sg_groups)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('security_groups', self._tag(tree))
- self.assertEqual(len(groups), len(tree))
- for idx, child in enumerate(tree):
- self._verify_security_group(groups[idx], child)
-
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get_all(*args, **kwargs):
- base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
- 'project_id': 'baz', 'deleted': False, 'deleted_at': None,
- 'updated_at': None, 'created_at': None}
- db_list = [
- fakes.stub_instance(
- 1, uuid=UUID1,
- security_groups=[dict(base, **{'name': 'fake-0-0'}),
- dict(base, **{'name': 'fake-0-1'})]),
- fakes.stub_instance(
- 2, uuid=UUID2,
- security_groups=[dict(base, **{'name': 'fake-1-0'}),
- dict(base, **{'name': 'fake-1-1'})])
- ]
-
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list,
- ['metadata', 'system_metadata',
- 'security_groups', 'info_cache'])
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3,
- security_groups=[{'name': 'fake-2-0'},
- {'name': 'fake-2-1'}])
- return fake_instance.fake_instance_obj(args[1],
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
-
-
-def fake_compute_create(*args, **kwargs):
- return ([fake_compute_get(*args, **kwargs)], '')
-
-
-def fake_get_instances_security_groups_bindings(inst, context, servers):
- groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
- UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
- UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
- result = {}
- for server in servers:
- result[server['id']] = groups.get(server['id'])
- return result
-
-
-class SecurityGroupsOutputTestV21(test.TestCase):
- base_url = '/v2/fake/servers'
- content_type = 'application/json'
-
- def setUp(self):
- super(SecurityGroupsOutputTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(compute.api.API, 'create', fake_compute_create)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Security_groups'])
- self.app = self._setup_app()
-
- def _setup_app(self):
- return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
-
- def _make_request(self, url, body=None):
- req = webob.Request.blank(url)
- if body:
- req.method = 'POST'
- req.body = self._encode_body(body)
- req.content_type = self.content_type
- req.headers['Accept'] = self.content_type
- res = req.get_response(self.app)
- return res
-
- def _encode_body(self, body):
- return jsonutils.dumps(body)
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def _get_groups(self, server):
- return server.get('security_groups')
-
- def test_create(self):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- res = self._make_request(self.base_url, {'server': server})
- self.assertEqual(res.status_int, 202)
- server = self._get_server(res.body)
- for i, group in enumerate(self._get_groups(server)):
- name = 'fake-2-%s' % i
- self.assertEqual(group.get('name'), name)
-
- def test_show(self):
- url = self.base_url + '/' + UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- server = self._get_server(res.body)
- for i, group in enumerate(self._get_groups(server)):
- name = 'fake-2-%s' % i
- self.assertEqual(group.get('name'), name)
-
- def test_detail(self):
- url = self.base_url + '/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- for j, group in enumerate(self._get_groups(server)):
- name = 'fake-%s-%s' % (i, j)
- self.assertEqual(group.get('name'), name)
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
-
- def _setup_app(self):
- return fakes.wsgi_app(init_only=('servers',))
-
-
-class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTestV2):
- content_type = 'application/xml'
-
- class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('server', selector='server')
- root.set('name')
- root.set('id')
- root.set('imageRef')
- root.set('flavorRef')
- return xmlutil.MasterTemplate(root, 1,
- nsmap={None: xmlutil.XMLNS_V11})
-
- def _encode_body(self, body):
- serializer = self.MinimalCreateServerTemplate()
- return serializer.serialize(body)
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
-
- def _get_groups(self, server):
- # NOTE(vish): we are adding security groups without an extension
- # namespace so we don't break people using the existing
- # functionality, but that means we need to use find with
- # the existing server namespace.
- namespace = server.nsmap[None]
- return server.find('{%s}security_groups' % namespace).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
deleted file mode 100644
index bf29024f99..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2011 Eldar Nugaev
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from lxml import etree
-import mock
-from oslo.serialization import jsonutils
-
-from nova.api.openstack import compute
-from nova.api.openstack.compute.contrib import server_diagnostics
-from nova.api.openstack import wsgi
-from nova.compute import api as compute_api
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-UUID = 'abc'
-
-
-def fake_get_diagnostics(self, _context, instance_uuid):
- return {'data': 'Some diagnostic info'}
-
-
-def fake_instance_get(self, _context, instance_uuid, want_objects=False,
- expected_attrs=None):
- if instance_uuid != UUID:
- raise Exception("Invalid UUID")
- return {'uuid': instance_uuid}
-
-
-class ServerDiagnosticsTestV21(test.NoDBTestCase):
-
- def _setup_router(self):
- self.router = compute.APIRouterV3(init_only=('servers',
- 'os-server-diagnostics'))
-
- def _get_request(self):
- return fakes.HTTPRequestV3.blank(
- '/servers/%s/diagnostics' % UUID)
-
- def setUp(self):
- super(ServerDiagnosticsTestV21, self).setUp()
- self._setup_router()
-
- @mock.patch.object(compute_api.API, 'get_diagnostics',
- fake_get_diagnostics)
- @mock.patch.object(compute_api.API, 'get',
- fake_instance_get)
- def test_get_diagnostics(self):
- req = self._get_request()
- res = req.get_response(self.router)
- output = jsonutils.loads(res.body)
- self.assertEqual(output, {'data': 'Some diagnostic info'})
-
- @mock.patch.object(compute_api.API, 'get_diagnostics',
- fake_get_diagnostics)
- @mock.patch.object(compute_api.API, 'get',
- side_effect=exception.InstanceNotFound(instance_id=UUID))
- def test_get_diagnostics_with_non_existed_instance(self, mock_get):
- req = self._get_request()
- res = req.get_response(self.router)
- self.assertEqual(res.status_int, 404)
-
- @mock.patch.object(compute_api.API, 'get_diagnostics',
- side_effect=exception.InstanceInvalidState('fake message'))
- @mock.patch.object(compute_api.API, 'get', fake_instance_get)
- def test_get_diagnostics_raise_conflict_on_invalid_state(self,
- mock_get_diagnostics):
- req = self._get_request()
- res = req.get_response(self.router)
- self.assertEqual(409, res.status_int)
-
- @mock.patch.object(compute_api.API, 'get_diagnostics',
- side_effect=NotImplementedError)
- @mock.patch.object(compute_api.API, 'get', fake_instance_get)
- def test_get_diagnostics_raise_no_notimplementederror(self,
- mock_get_diagnostics):
- req = self._get_request()
- res = req.get_response(self.router)
- self.assertEqual(501, res.status_int)
-
-
-class ServerDiagnosticsTestV2(ServerDiagnosticsTestV21):
-
- def _setup_router(self):
- self.flags(verbose=True,
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Server_diagnostics'])
-
- self.router = compute.APIRouter(init_only=('servers', 'diagnostics'))
-
- def _get_request(self):
- return fakes.HTTPRequest.blank(
- '/fake/servers/%s/diagnostics' % UUID)
-
-
-class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase):
- namespace = wsgi.XMLNS_V11
-
- def _tag(self, elem):
- tagname = elem.tag
- self.assertEqual(tagname[0], '{')
- tmp = tagname.partition('}')
- namespace = tmp[0][1:]
- self.assertEqual(namespace, self.namespace)
- return tmp[2]
-
- def test_index_serializer(self):
- serializer = server_diagnostics.ServerDiagnosticsTemplate()
- exemplar = dict(diag1='foo', diag2='bar')
- text = serializer.serialize(exemplar)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('diagnostics', self._tag(tree))
- self.assertEqual(len(tree), len(exemplar))
- for child in tree:
- tag = self._tag(child)
- self.assertIn(tag, exemplar)
- self.assertEqual(child.text, exemplar[tag])
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_group_quotas.py b/nova/tests/api/openstack/compute/contrib/test_server_group_quotas.py
deleted file mode 100644
index 1f97c5ec4f..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_group_quotas.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# Copyright 2014 Hewlett-Packard Development Company, L.P
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-import webob
-
-from nova.api.openstack.compute.contrib import server_groups
-from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
-from nova.api.openstack import extensions
-from nova import context
-import nova.db
-from nova.openstack.common import uuidutils
-from nova import quota
-from nova import test
-from nova.tests.api.openstack import fakes
-
-CONF = cfg.CONF
-
-
-class AttrDict(dict):
- def __getattr__(self, k):
- return self[k]
-
-
-def server_group_template(**kwargs):
- sgroup = kwargs.copy()
- sgroup.setdefault('name', 'test')
- return sgroup
-
-
-def server_group_db(sg):
- attrs = sg.copy()
- if 'id' in attrs:
- attrs['uuid'] = attrs.pop('id')
- if 'policies' in attrs:
- policies = attrs.pop('policies')
- attrs['policies'] = policies
- else:
- attrs['policies'] = []
- if 'members' in attrs:
- members = attrs.pop('members')
- attrs['members'] = members
- else:
- attrs['members'] = []
- if 'metadata' in attrs:
- attrs['metadetails'] = attrs.pop('metadata')
- else:
- attrs['metadetails'] = {}
- attrs['deleted'] = 0
- attrs['deleted_at'] = None
- attrs['created_at'] = None
- attrs['updated_at'] = None
- if 'user_id' not in attrs:
- attrs['user_id'] = 'user_id'
- if 'project_id' not in attrs:
- attrs['project_id'] = 'project_id'
- attrs['id'] = 7
-
- return AttrDict(attrs)
-
-
-class ServerGroupQuotasTestV21(test.TestCase):
-
- def setUp(self):
- super(ServerGroupQuotasTestV21, self).setUp()
- self._setup_controller()
- self.app = self._get_app()
-
- def _setup_controller(self):
- self.controller = sg_v3.ServerGroupController()
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('os-server-groups',))
-
- def _get_url(self):
- return '/v2/fake'
-
- def _setup_quotas(self):
- pass
-
- def _assert_server_groups_in_use(self, project_id, user_id, in_use):
- ctxt = context.get_admin_context()
- result = quota.QUOTAS.get_user_quotas(ctxt, project_id, user_id)
- self.assertEqual(result['server_groups']['in_use'], in_use)
-
- def test_create_server_group_normal(self):
- self._setup_quotas()
- req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
- sgroup = server_group_template()
- policies = ['anti-affinity']
- sgroup['policies'] = policies
- res_dict = self.controller.create(req, {'server_group': sgroup})
- self.assertEqual(res_dict['server_group']['name'], 'test')
- self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
- self.assertEqual(res_dict['server_group']['policies'], policies)
-
- def test_create_server_group_quota_limit(self):
- self._setup_quotas()
- req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
- sgroup = server_group_template()
- policies = ['anti-affinity']
- sgroup['policies'] = policies
- # Start by creating as many server groups as we're allowed to.
- for i in range(CONF.quota_server_groups):
- self.controller.create(req, {'server_group': sgroup})
-
- # Then, creating a server group should fail.
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create,
- req, {'server_group': sgroup})
-
- def test_delete_server_group_by_admin(self):
- self._setup_quotas()
- sgroup = server_group_template()
- policies = ['anti-affinity']
- sgroup['policies'] = policies
- req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
- res = self.controller.create(req, {'server_group': sgroup})
- sg_id = res['server_group']['id']
- context = req.environ['nova.context']
-
- self._assert_server_groups_in_use(context.project_id,
- context.user_id, 1)
-
- # Delete the server group we've just created.
- req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/%s' % sg_id,
- use_admin_context=True)
- self.controller.delete(req, sg_id)
-
- # Make sure the quota in use has been released.
- self._assert_server_groups_in_use(context.project_id,
- context.user_id, 0)
-
- def test_delete_server_group_by_id(self):
- self._setup_quotas()
- sg = server_group_template(id='123')
- self.called = False
-
- def server_group_delete(context, id):
- self.called = True
-
- def return_server_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return server_group_db(sg)
-
- self.stubs.Set(nova.db, 'instance_group_delete',
- server_group_delete)
- self.stubs.Set(nova.db, 'instance_group_get',
- return_server_group)
-
- req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/123')
- resp = self.controller.delete(req, '123')
- self.assertTrue(self.called)
-
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.controller, sg_v3.ServerGroupController):
- status_int = self.controller.delete.wsgi_code
- else:
- status_int = resp.status_int
- self.assertEqual(204, status_int)
-
-
-class ServerGroupQuotasTestV2(ServerGroupQuotasTestV21):
-
- def _setup_controller(self):
- self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
- self.controller = server_groups.ServerGroupController(self.ext_mgr)
-
- def _setup_quotas(self):
- self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes()\
- .AndReturn(True)
- self.mox.ReplayAll()
-
- def _get_app(self):
- return fakes.wsgi_app(init_only=('os-server-groups',))
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/api/openstack/compute/contrib/test_server_groups.py
deleted file mode 100644
index 7d4b4d88ca..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_groups.py
+++ /dev/null
@@ -1,521 +0,0 @@
-# Copyright (c) 2014 Cisco Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import webob
-
-from nova.api.openstack.compute.contrib import server_groups
-from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import context
-import nova.db
-from nova import exception
-from nova import objects
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import utils
-
-FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
-FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
-FAKE_UUID3 = 'b8713410-9ba3-e913-901b-13410ca90121'
-
-
-class AttrDict(dict):
- def __getattr__(self, k):
- return self[k]
-
-
-def server_group_template(**kwargs):
- sgroup = kwargs.copy()
- sgroup.setdefault('name', 'test')
- return sgroup
-
-
-def server_group_resp_template(**kwargs):
- sgroup = kwargs.copy()
- sgroup.setdefault('name', 'test')
- sgroup.setdefault('policies', [])
- sgroup.setdefault('members', [])
- return sgroup
-
-
-def server_group_db(sg):
- attrs = sg.copy()
- if 'id' in attrs:
- attrs['uuid'] = attrs.pop('id')
- if 'policies' in attrs:
- policies = attrs.pop('policies')
- attrs['policies'] = policies
- else:
- attrs['policies'] = []
- if 'members' in attrs:
- members = attrs.pop('members')
- attrs['members'] = members
- else:
- attrs['members'] = []
- attrs['deleted'] = 0
- attrs['deleted_at'] = None
- attrs['created_at'] = None
- attrs['updated_at'] = None
- if 'user_id' not in attrs:
- attrs['user_id'] = 'user_id'
- if 'project_id' not in attrs:
- attrs['project_id'] = 'project_id'
- attrs['id'] = 7
-
- return AttrDict(attrs)
-
-
-class ServerGroupTestV21(test.TestCase):
-
- def setUp(self):
- super(ServerGroupTestV21, self).setUp()
- self._setup_controller()
- self.app = self._get_app()
-
- def _setup_controller(self):
- self.controller = sg_v3.ServerGroupController()
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('os-server-groups',))
-
- def _get_url(self):
- return '/v2/fake'
-
- def test_create_server_group_with_no_policies(self):
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- sgroup = server_group_template()
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_normal(self):
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- sgroup = server_group_template()
- policies = ['anti-affinity']
- sgroup['policies'] = policies
- res_dict = self.controller.create(req, {'server_group': sgroup})
- self.assertEqual(res_dict['server_group']['name'], 'test')
- self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
- self.assertEqual(res_dict['server_group']['policies'], policies)
-
- def _create_instance(self, context):
- instance = objects.Instance(image_ref=1, node='node1',
- reservation_id='a', host='host1', project_id='fake',
- vm_state='fake', system_metadata={'key': 'value'})
- instance.create(context)
- return instance
-
- def _create_instance_group(self, context, members):
- ig = objects.InstanceGroup(name='fake_name',
- user_id='fake_user', project_id='fake',
- members=members)
- ig.create(context)
- return ig.uuid
-
- def _create_groups_and_instances(self, ctx):
- instances = [self._create_instance(ctx), self._create_instance(ctx)]
- members = [instance.uuid for instance in instances]
- ig_uuid = self._create_instance_group(ctx, members)
- return (ig_uuid, instances, members)
-
- def test_display_members(self):
- ctx = context.RequestContext('fake_user', 'fake')
- (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- res_dict = self.controller.show(req, ig_uuid)
- result_members = res_dict['server_group']['members']
- self.assertEqual(2, len(result_members))
- for member in members:
- self.assertIn(member, result_members)
-
- def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', 'fake')
- (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
-
- # delete an instance
- instances[1].destroy(ctx)
- # check that the instance does not exist
- self.assertRaises(exception.InstanceNotFound,
- objects.Instance.get_by_uuid,
- ctx, instances[1].uuid)
- res_dict = self.controller.show(req, ig_uuid)
- result_members = res_dict['server_group']['members']
- # check that only the active instance is displayed
- self.assertEqual(1, len(result_members))
- self.assertIn(instances[0].uuid, result_members)
-
- def test_create_server_group_with_illegal_name(self):
- # blank name
- sgroup = server_group_template(name='', policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # name with length 256
- sgroup = server_group_template(name='1234567890' * 26,
- policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # non-string name
- sgroup = server_group_template(name=12, policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # name with leading spaces
- sgroup = server_group_template(name=' leading spaces',
- policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # name with trailing spaces
- sgroup = server_group_template(name='trailing space ',
- policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # name with all spaces
- sgroup = server_group_template(name=' ',
- policies=['test_policy'])
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_with_illegal_policies(self):
- # blank policy
- sgroup = server_group_template(name='fake-name', policies='')
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # policy as integer
- sgroup = server_group_template(name='fake-name', policies=7)
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # policy as string
- sgroup = server_group_template(name='fake-name', policies='invalid')
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- # policy as None
- sgroup = server_group_template(name='fake-name', policies=None)
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_conflicting_policies(self):
- sgroup = server_group_template()
- policies = ['anti-affinity', 'affinity']
- sgroup['policies'] = policies
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_with_duplicate_policies(self):
- sgroup = server_group_template()
- policies = ['affinity', 'affinity']
- sgroup['policies'] = policies
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_not_supported(self):
- sgroup = server_group_template()
- policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
- sgroup['policies'] = policies
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, {'server_group': sgroup})
-
- def test_create_server_group_with_no_body(self):
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, None)
-
- def test_create_server_group_with_no_server_group(self):
- body = {'no-instanceGroup': None}
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, body)
-
- def test_list_server_group_by_tenant(self):
- groups = []
- policies = ['anti-affinity']
- members = []
- metadata = {} # always empty
- names = ['default-x', 'test']
- sg1 = server_group_resp_template(id=str(1345),
- name=names[0],
- policies=policies,
- members=members,
- metadata=metadata)
- sg2 = server_group_resp_template(id=str(891),
- name=names[1],
- policies=policies,
- members=members,
- metadata=metadata)
- groups = [sg1, sg2]
- expected = {'server_groups': groups}
-
- def return_server_groups(context, project_id):
- return [server_group_db(sg) for sg in groups]
-
- self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
- return_server_groups)
-
- req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, expected)
-
- def test_list_server_group_all(self):
- all_groups = []
- tenant_groups = []
- policies = ['anti-affinity']
- members = []
- metadata = {} # always empty
- names = ['default-x', 'test']
- sg1 = server_group_resp_template(id=str(1345),
- name=names[0],
- policies=[],
- members=members,
- metadata=metadata)
- sg2 = server_group_resp_template(id=str(891),
- name=names[1],
- policies=policies,
- members=members,
- metadata={})
- tenant_groups = [sg2]
- all_groups = [sg1, sg2]
-
- all = {'server_groups': all_groups}
- tenant_specific = {'server_groups': tenant_groups}
-
- def return_all_server_groups(context):
- return [server_group_db(sg) for sg in all_groups]
-
- self.stubs.Set(nova.db, 'instance_group_get_all',
- return_all_server_groups)
-
- def return_tenant_server_groups(context, project_id):
- return [server_group_db(sg) for sg in tenant_groups]
-
- self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
- return_tenant_server_groups)
-
- path = self._get_url() + '/os-server-groups?all_projects=True'
-
- req = fakes.HTTPRequest.blank(path, use_admin_context=True)
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, all)
- req = fakes.HTTPRequest.blank(path)
- res_dict = self.controller.index(req)
- self.assertEqual(res_dict, tenant_specific)
-
- def test_delete_server_group_by_id(self):
- sg = server_group_template(id='123')
-
- self.called = False
-
- def server_group_delete(context, id):
- self.called = True
-
- def return_server_group(context, group_id):
- self.assertEqual(sg['id'], group_id)
- return server_group_db(sg)
-
- self.stubs.Set(nova.db, 'instance_group_delete',
- server_group_delete)
- self.stubs.Set(nova.db, 'instance_group_get',
- return_server_group)
-
- req = fakes.HTTPRequest.blank(self._get_url() +
- '/os-server-groups/123')
- resp = self.controller.delete(req, '123')
- self.assertTrue(self.called)
-
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.controller, sg_v3.ServerGroupController):
- status_int = self.controller.delete.wsgi_code
- else:
- status_int = resp.status_int
- self.assertEqual(204, status_int)
-
- def test_delete_non_existing_server_group(self):
- req = fakes.HTTPRequest.blank(self._get_url() +
- '/os-server-groups/invalid')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, 'invalid')
-
-
-class ServerGroupTestV2(ServerGroupTestV21):
-
- def _setup_controller(self):
- ext_mgr = extensions.ExtensionManager()
- ext_mgr.extensions = {}
- self.controller = server_groups.ServerGroupController(ext_mgr)
-
- def _get_app(self):
- return fakes.wsgi_app(init_only=('os-server-groups',))
-
-
-class TestServerGroupXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestServerGroupXMLDeserializer, self).setUp()
- self.deserializer = server_groups.ServerGroupXMLDeserializer()
-
- def test_create_request(self):
- serial_request = """
-<server_group name="test">
-</server_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server_group": {
- "name": "test",
- "policies": []
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_update_request(self):
- serial_request = """
-<server_group name="test">
-<policies>
-<policy>policy-1</policy>
-<policy>policy-2</policy>
-</policies>
-</server_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server_group": {
- "name": 'test',
- "policies": ['policy-1', 'policy-2']
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_request_no_name(self):
- serial_request = """
-<server_group>
-</server_group>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server_group": {
- "policies": []
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_corrupt_xml(self):
- """Should throw a 400 error on corrupt xml."""
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
-
-
-class TestServerGroupXMLSerializer(test.TestCase):
- def setUp(self):
- super(TestServerGroupXMLSerializer, self).setUp()
- self.namespace = wsgi.XMLNS_V11
- self.index_serializer = server_groups.ServerGroupsTemplate()
- self.default_serializer = server_groups.ServerGroupTemplate()
-
- def _tag(self, elem):
- tagname = elem.tag
- self.assertEqual(tagname[0], '{')
- tmp = tagname.partition('}')
- namespace = tmp[0][1:]
- self.assertEqual(namespace, self.namespace)
- return tmp[2]
-
- def _verify_server_group(self, raw_group, tree):
- policies = raw_group['policies']
- members = raw_group['members']
- self.assertEqual('server_group', self._tag(tree))
- self.assertEqual(raw_group['id'], tree.get('id'))
- self.assertEqual(raw_group['name'], tree.get('name'))
- self.assertEqual(3, len(tree))
- for child in tree:
- child_tag = self._tag(child)
- if child_tag == 'policies':
- self.assertEqual(len(policies), len(child))
- for idx, gr_child in enumerate(child):
- self.assertEqual(self._tag(gr_child), 'policy')
- self.assertEqual(policies[idx],
- gr_child.text)
- elif child_tag == 'members':
- self.assertEqual(len(members), len(child))
- for idx, gr_child in enumerate(child):
- self.assertEqual(self._tag(gr_child), 'member')
- self.assertEqual(members[idx],
- gr_child.text)
- elif child_tag == 'metadata':
- self.assertEqual(0, len(child))
-
- def _verify_server_group_brief(self, raw_group, tree):
- self.assertEqual('server_group', self._tag(tree))
- self.assertEqual(raw_group['id'], tree.get('id'))
- self.assertEqual(raw_group['name'], tree.get('name'))
-
- def test_group_serializer(self):
- policies = ["policy-1", "policy-2"]
- members = ["1", "2"]
- raw_group = dict(
- id='890',
- name='name',
- policies=policies,
- members=members)
- sg_group = dict(server_group=raw_group)
- text = self.default_serializer.serialize(sg_group)
-
- tree = etree.fromstring(text)
-
- self._verify_server_group(raw_group, tree)
-
- def test_groups_serializer(self):
- policies = ["policy-1", "policy-2",
- "policy-3"]
- members = ["1", "2", "3"]
- groups = [dict(
- id='890',
- name='test',
- policies=policies[0:2],
- members=members[0:2]),
- dict(
- id='123',
- name='default',
- policies=policies[2:],
- members=members[2:])]
- sg_groups = dict(server_groups=groups)
- text = self.index_serializer.serialize(sg_groups)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('server_groups', self._tag(tree))
- self.assertEqual(len(groups), len(tree))
- for idx, child in enumerate(tree):
- self._verify_server_group_brief(groups[idx], child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_password.py b/nova/tests/api/openstack/compute/contrib/test_server_password.py
deleted file mode 100644
index d7a6d3f070..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_password.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.metadata import password
-from nova import compute
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-CONF = cfg.CONF
-CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
-
-
-class ServerPasswordTest(test.TestCase):
- content_type = 'application/json'
-
- def setUp(self):
- super(ServerPasswordTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(
- compute.api.API, 'get',
- lambda self, ctxt, *a, **kw:
- fake_instance.fake_instance_obj(
- ctxt,
- system_metadata={},
- expected_attrs=['system_metadata']))
- self.password = 'fakepass'
-
- def fake_extract_password(instance):
- return self.password
-
- def fake_convert_password(context, password):
- self.password = password
- return {}
-
- self.stubs.Set(password, 'extract_password', fake_extract_password)
- self.stubs.Set(password, 'convert_password', fake_convert_password)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Server_password'])
-
- def _make_request(self, url, method='GET'):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- req.method = method
- res = req.get_response(
- fakes.wsgi_app(init_only=('servers', 'os-server-password')))
- return res
-
- def _get_pass(self, body):
- return jsonutils.loads(body).get('password')
-
- def test_get_password(self):
- url = '/v2/fake/servers/fake/os-server-password'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(self._get_pass(res.body), 'fakepass')
-
- def test_reset_password(self):
- url = '/v2/fake/servers/fake/os-server-password'
- res = self._make_request(url, 'DELETE')
- self.assertEqual(res.status_int, 204)
-
- res = self._make_request(url)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(self._get_pass(res.body), '')
-
-
-class ServerPasswordXmlTest(ServerPasswordTest):
- content_type = 'application/xml'
-
- def _get_pass(self, body):
- # NOTE(vish): first element is password
- return etree.XML(body).text or ''
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
deleted file mode 100644
index ad19010020..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (c) 2012 Midokura Japan K.K.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mox
-import webob
-
-from nova.api.openstack.compute.contrib import server_start_stop \
- as server_v2
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import servers \
- as server_v21
-from nova.compute import api as compute_api
-from nova import db
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-def fake_instance_get(context, instance_id,
- columns_to_join=None, use_slave=False):
- result = fakes.stub_instance(id=1, uuid=instance_id)
- result['created_at'] = None
- result['deleted_at'] = None
- result['updated_at'] = None
- result['deleted'] = 0
- result['info_cache'] = {'network_info': '[]',
- 'instance_uuid': result['uuid']}
- return result
-
-
-def fake_start_stop_not_ready(self, context, instance):
- raise exception.InstanceNotReady(instance_id=instance["uuid"])
-
-
-def fake_start_stop_locked_server(self, context, instance):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
-
-def fake_start_stop_invalid_state(self, context, instance):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
-
-class ServerStartStopTestV21(test.TestCase):
- start_policy = "compute:v3:servers:start"
- stop_policy = "compute:v3:servers:stop"
-
- def setUp(self):
- super(ServerStartStopTestV21, self).setUp()
- self._setup_controller()
-
- def _setup_controller(self):
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = server_v21.ServersController(
- extension_info=ext_info)
-
- def test_start(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.mox.StubOutWithMock(compute_api.API, 'start')
- compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.controller._start_server(req, 'test_inst', body)
-
- def test_start_policy_failed(self):
- rules = {
- self.start_policy:
- common_policy.parse_rule("project_id:non_fake")
- }
- policy.set_rules(rules)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._start_server,
- req, 'test_inst', body)
- self.assertIn(self.start_policy, exc.format_message())
-
- def test_start_not_ready(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, 'test_inst', body)
-
- def test_start_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, 'test_inst', body)
-
- def test_start_invalid_state(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, 'test_inst', body)
-
- def test_stop(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.mox.StubOutWithMock(compute_api.API, 'stop')
- compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(stop="")
- self.controller._stop_server(req, 'test_inst', body)
-
- def test_stop_policy_failed(self):
- rules = {
- self.stop_policy:
- common_policy.parse_rule("project_id:non_fake")
- }
- policy.set_rules(rules)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(stop="")
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._stop_server,
- req, 'test_inst', body)
- self.assertIn(self.stop_policy, exc.format_message())
-
- def test_stop_not_ready(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, 'test_inst', body)
-
- def test_stop_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, 'test_inst', body)
-
- def test_stop_invalid_state(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
- self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, 'test_inst', body)
-
- def test_start_with_bogus_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._start_server, req, 'test_inst', body)
-
- def test_stop_with_bogus_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._stop_server, req, 'test_inst', body)
-
-
-class ServerStartStopTestV2(ServerStartStopTestV21):
- start_policy = "compute:start"
- stop_policy = "compute:stop"
-
- def _setup_controller(self):
- self.controller = server_v2.ServerStartStopActionController()
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_usage.py b/nova/tests/api/openstack/compute/contrib/test_server_usage.py
deleted file mode 100644
index 52844191f3..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_server_usage.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-
-from nova.api.openstack.compute.contrib import server_usage
-from nova import compute
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
-DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
-DATE3 = datetime.datetime(year=2013, month=4, day=5, hour=14)
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, launched_at=DATE1,
- terminated_at=DATE2)
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(2, uuid=UUID1, launched_at=DATE2,
- terminated_at=DATE3),
- fakes.stub_instance(3, uuid=UUID2, launched_at=DATE1,
- terminated_at=DATE3),
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ServerUsageTestV21(test.TestCase):
- content_type = 'application/json'
- prefix = 'OS-SRV-USG:'
- _prefix = "/v2/fake"
-
- def setUp(self):
- super(ServerUsageTestV21, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Server_usage'])
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url):
- req = fakes.HTTPRequest.blank(url)
- req.accept = self.content_type
- res = req.get_response(self._get_app())
- return res
-
- def _get_app(self):
- return fakes.wsgi_app_v21(init_only=('servers', 'os-server-usage'))
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertServerUsage(self, server, launched_at, terminated_at):
- resp_launched_at = timeutils.parse_isotime(
- server.get('%slaunched_at' % self.prefix))
- self.assertEqual(timeutils.normalize_time(resp_launched_at),
- launched_at)
- resp_terminated_at = timeutils.parse_isotime(
- server.get('%sterminated_at' % self.prefix))
- self.assertEqual(timeutils.normalize_time(resp_terminated_at),
- terminated_at)
-
- def test_show(self):
- url = self._prefix + ('/servers/%s' % UUID3)
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- now = timeutils.utcnow()
- timeutils.set_time_override(now)
- self.assertServerUsage(self._get_server(res.body),
- launched_at=DATE1,
- terminated_at=DATE2)
-
- def test_detail(self):
- url = self._prefix + '/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- servers = self._get_servers(res.body)
- self.assertServerUsage(servers[0],
- launched_at=DATE2,
- terminated_at=DATE3)
- self.assertServerUsage(servers[1],
- launched_at=DATE1,
- terminated_at=DATE3)
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = self._prefix + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
-
-
-class ServerUsageTestV20(ServerUsageTestV21):
-
- def setUp(self):
- super(ServerUsageTestV20, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Server_usage'])
-
- def _get_app(self):
- return fakes.wsgi_app(init_only=('servers',))
-
-
-class ServerUsageXmlTest(ServerUsageTestV20):
- content_type = 'application/xml'
- prefix = '{%s}' % server_usage.Server_usage.namespace
-
- def _get_server(self, body):
- return etree.XML(body)
-
- def _get_servers(self, body):
- return etree.XML(body).getchildren()
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
deleted file mode 100644
index 43fa6c463b..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ /dev/null
@@ -1,576 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import calendar
-import datetime
-
-import iso8601
-import mock
-from oslo.utils import timeutils
-import webob.exc
-
-from nova.api.openstack.compute.contrib import services
-from nova.api.openstack import extensions
-from nova import availability_zones
-from nova.compute import cells_api
-from nova import context
-from nova import db
-from nova import exception
-from nova.servicegroup.drivers import db as db_driver
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_service
-
-
-fake_services_list = [
- dict(test_service.fake_service,
- binary='nova-scheduler',
- host='host1',
- id=1,
- disabled=True,
- topic='scheduler',
- updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
- disabled_reason='test1'),
- dict(test_service.fake_service,
- binary='nova-compute',
- host='host1',
- id=2,
- disabled=True,
- topic='compute',
- updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
- disabled_reason='test2'),
- dict(test_service.fake_service,
- binary='nova-scheduler',
- host='host2',
- id=3,
- disabled=False,
- topic='scheduler',
- updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
- disabled_reason=None),
- dict(test_service.fake_service,
- binary='nova-compute',
- host='host2',
- id=4,
- disabled=True,
- topic='compute',
- updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
- disabled_reason='test4'),
- ]
-
-
-class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {}
-
-
-class FakeRequestWithService(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"binary": "nova-compute"}
-
-
-class FakeRequestWithHost(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"host": "host1"}
-
-
-class FakeRequestWithHostService(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"host": "host1", "binary": "nova-compute"}
-
-
-def fake_service_get_all(services):
- def service_get_all(context, filters=None, set_zones=False):
- if set_zones or 'availability_zone' in filters:
- return availability_zones.set_availability_zones(context,
- services)
- return services
- return service_get_all
-
-
-def fake_db_api_service_get_all(context, disabled=None):
- return fake_services_list
-
-
-def fake_db_service_get_by_host_binary(services):
- def service_get_by_host_binary(context, host, binary):
- for service in services:
- if service['host'] == host and service['binary'] == binary:
- return service
- raise exception.HostBinaryNotFound(host=host, binary=binary)
- return service_get_by_host_binary
-
-
-def fake_service_get_by_host_binary(context, host, binary):
- fake = fake_db_service_get_by_host_binary(fake_services_list)
- return fake(context, host, binary)
-
-
-def _service_get_by_id(services, value):
- for service in services:
- if service['id'] == value:
- return service
- return None
-
-
-def fake_db_service_update(services):
- def service_update(context, service_id, values):
- service = _service_get_by_id(services, service_id)
- if service is None:
- raise exception.ServiceNotFound(service_id=service_id)
- return service
- return service_update
-
-
-def fake_service_update(context, service_id, values):
- fake = fake_db_service_update(fake_services_list)
- return fake(context, service_id, values)
-
-
-def fake_utcnow():
- return datetime.datetime(2012, 10, 29, 13, 42, 11)
-
-
-fake_utcnow.override_time = None
-
-
-def fake_utcnow_ts():
- d = fake_utcnow()
- return calendar.timegm(d.utctimetuple())
-
-
-class ServicesTest(test.TestCase):
-
- def setUp(self):
- super(ServicesTest, self).setUp()
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = services.ServiceController(self.ext_mgr)
-
- self.stubs.Set(timeutils, "utcnow", fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
-
- self.stubs.Set(self.controller.host_api, "service_get_all",
- fake_service_get_all(fake_services_list))
-
- self.stubs.Set(db, "service_get_by_args",
- fake_db_service_get_by_host_binary(fake_services_list))
- self.stubs.Set(db, "service_update",
- fake_db_service_update(fake_services_list))
-
- def test_services_list(self):
- req = FakeRequest()
- res_dict = self.controller.index(req)
-
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
- {'binary': 'nova-scheduler',
- 'host': 'host2',
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
- self.assertEqual(res_dict, response)
-
- def test_services_list_with_host(self):
- req = FakeRequestWithHost()
- res_dict = self.controller.index(req)
-
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
- self.assertEqual(res_dict, response)
-
- def test_services_list_with_service(self):
- req = FakeRequestWithService()
- res_dict = self.controller.index(req)
-
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
- self.assertEqual(res_dict, response)
-
- def test_services_list_with_host_service(self):
- req = FakeRequestWithHostService()
- res_dict = self.controller.index(req)
-
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
- self.assertEqual(res_dict, response)
-
- def test_services_detail(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = FakeRequest()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'nova-scheduler',
- 'host': 'host2',
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
- 'disabled_reason': None},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
- 'disabled_reason': 'test4'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_detail_with_host(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = FakeRequestWithHost()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_detail_with_service(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = FakeRequestWithService()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
- 'disabled_reason': 'test4'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_detail_with_host_service(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = FakeRequestWithHostService()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'}]}
- self.assertEqual(res_dict, response)
-
- def test_services_detail_with_delete_extension(self):
- self.ext_mgr.extensions['os-extended-services-delete'] = True
- req = FakeRequest()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'id': 1,
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
- {'binary': 'nova-scheduler',
- 'host': 'host2',
- 'id': 3,
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'id': 4,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
- self.assertEqual(res_dict, response)
-
- def test_services_enable(self):
- def _service_update(context, service_id, values):
- self.assertIsNone(values['disabled_reason'])
- return dict(test_service.fake_service, id=service_id, **values)
-
- self.stubs.Set(db, "service_update", _service_update)
-
- body = {'host': 'host1', 'binary': 'nova-compute'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
-
- res_dict = self.controller.update(req, "enable", body)
- self.assertEqual(res_dict['service']['status'], 'enabled')
- self.assertNotIn('disabled_reason', res_dict['service'])
-
- def test_services_enable_with_invalid_host(self):
- body = {'host': 'invalid', 'binary': 'nova-compute'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "enable",
- body)
-
- def test_services_enable_with_invalid_binary(self):
- body = {'host': 'host1', 'binary': 'invalid'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "enable",
- body)
-
- # This test is just to verify that the servicegroup API gets used when
- # calling this API.
- def test_services_with_exception(self):
- def dummy_is_up(self, dummy):
- raise KeyError()
-
- self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
- req = FakeRequestWithHostService()
- self.assertRaises(KeyError, self.controller.index, req)
-
- def test_services_disable(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
- body = {'host': 'host1', 'binary': 'nova-compute'}
- res_dict = self.controller.update(req, "disable", body)
-
- self.assertEqual(res_dict['service']['status'], 'disabled')
- self.assertNotIn('disabled_reason', res_dict['service'])
-
- def test_services_disable_with_invalid_host(self):
- body = {'host': 'invalid', 'binary': 'nova-compute'}
- req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "disable",
- body)
-
- def test_services_disable_with_invalid_binary(self):
- body = {'host': 'host1', 'binary': 'invalid'}
- req = fakes.HTTPRequestV3.blank('/v2/fake/os-services/disable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "disable",
- body)
-
- def test_services_disable_log_reason(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = \
- fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
- body = {'host': 'host1',
- 'binary': 'nova-compute',
- 'disabled_reason': 'test-reason',
- }
- res_dict = self.controller.update(req, "disable-log-reason", body)
-
- self.assertEqual(res_dict['service']['status'], 'disabled')
- self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
-
- def test_mandatory_reason_field(self):
- self.ext_mgr.extensions['os-extended-services'] = True
- req = \
- fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
- body = {'host': 'host1',
- 'binary': 'nova-compute',
- }
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, "disable-log-reason", body)
-
- def test_invalid_reason_field(self):
- reason = ' '
- self.assertFalse(self.controller._is_valid_as_reason(reason))
- reason = 'a' * 256
- self.assertFalse(self.controller._is_valid_as_reason(reason))
- reason = 'it\'s a valid reason.'
- self.assertTrue(self.controller._is_valid_as_reason(reason))
-
- def test_services_delete(self):
- self.ext_mgr.extensions['os-extended-services-delete'] = True
-
- request = fakes.HTTPRequest.blank('/v2/fakes/os-services/1',
- use_admin_context=True)
- request.method = 'DELETE'
-
- with mock.patch.object(self.controller.host_api,
- 'service_delete') as service_delete:
- self.controller.delete(request, '1')
- service_delete.assert_called_once_with(
- request.environ['nova.context'], '1')
- self.assertEqual(self.controller.delete.wsgi_code, 204)
-
- def test_services_delete_not_found(self):
- self.ext_mgr.extensions['os-extended-services-delete'] = True
-
- request = fakes.HTTPRequest.blank('/v2/fakes/os-services/abc',
- use_admin_context=True)
- request.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, request, 'abc')
-
- def test_services_delete_not_enabled(self):
- request = fakes.HTTPRequest.blank('/v2/fakes/os-services/300',
- use_admin_context=True)
- request.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPMethodNotAllowed,
- self.controller.delete, request, '300')
-
-
-class ServicesCellsTest(test.TestCase):
- def setUp(self):
- super(ServicesCellsTest, self).setUp()
-
- host_api = cells_api.HostAPI()
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = services.ServiceController(self.ext_mgr)
- self.controller.host_api = host_api
-
- self.stubs.Set(timeutils, "utcnow", fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
-
- services_list = []
- for service in fake_services_list:
- service = service.copy()
- service['id'] = 'cell1@%d' % service['id']
- services_list.append(service)
-
- self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
- fake_service_get_all(services_list))
-
- def test_services_detail(self):
- self.ext_mgr.extensions['os-extended-services-delete'] = True
- req = FakeRequest()
- res_dict = self.controller.index(req)
- utc = iso8601.iso8601.Utc()
- response = {'services': [
- {'id': 'cell1@1',
- 'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
- tzinfo=utc)},
- {'id': 'cell1@2',
- 'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
- tzinfo=utc)},
- {'id': 'cell1@3',
- 'binary': 'nova-scheduler',
- 'host': 'host2',
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
- tzinfo=utc)},
- {'id': 'cell1@4',
- 'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
- tzinfo=utc)}]}
- self.assertEqual(res_dict, response)
diff --git a/nova/tests/api/openstack/compute/contrib/test_shelve.py b/nova/tests/api/openstack/compute/contrib/test_shelve.py
deleted file mode 100644
index 5405a92fec..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_shelve.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import webob
-
-from nova.api.openstack.compute.contrib import shelve as shelve_v2
-from nova.api.openstack.compute.plugins.v3 import shelve as shelve_v21
-from nova.compute import api as compute_api
-from nova import db
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
- return fake_instance.fake_db_instance(
- **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
-
-
-def fake_auth_context(context):
- return True
-
-
-class ShelvePolicyTestV21(test.NoDBTestCase):
- plugin = shelve_v21
- prefix = 'v3:os-shelve:'
- offload = 'shelve_offload'
-
- def setUp(self):
- super(ShelvePolicyTestV21, self).setUp()
- self.controller = self.plugin.ShelveController()
-
- def _fake_request(self):
- return fakes.HTTPRequestV3.blank('/servers/12/os-shelve')
-
- def test_shelve_restricted_by_role(self):
- rules = {'compute_extension:%sshelve' % self.prefix:
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
-
- req = self._fake_request()
- self.assertRaises(exception.Forbidden, self.controller._shelve,
- req, str(uuid.uuid4()), {})
-
- def test_shelve_allowed(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:%sshelve' % self.prefix:
- common_policy.parse_rule('')}
- policy.set_rules(rules)
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- req = self._fake_request()
- self.assertRaises(exception.Forbidden, self.controller._shelve,
- req, str(uuid.uuid4()), {})
-
- def test_shelve_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- self.stubs.Set(self.plugin, 'auth_shelve', fake_auth_context)
- self.stubs.Set(compute_api.API, 'shelve',
- fakes.fake_actions_to_locked_server)
- req = self._fake_request()
- self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
- req, str(uuid.uuid4()), {})
-
- def test_unshelve_restricted_by_role(self):
- rules = {'compute_extension:%sunshelve' % self.prefix:
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
-
- req = self._fake_request()
- self.assertRaises(exception.Forbidden, self.controller._unshelve,
- req, str(uuid.uuid4()), {})
-
- def test_unshelve_allowed(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:%sunshelve' % self.prefix:
- common_policy.parse_rule('')}
- policy.set_rules(rules)
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- req = self._fake_request()
- self.assertRaises(exception.Forbidden, self.controller._unshelve,
- req, str(uuid.uuid4()), {})
-
- def test_unshelve_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- self.stubs.Set(self.plugin, 'auth_unshelve', fake_auth_context)
- self.stubs.Set(compute_api.API, 'unshelve',
- fakes.fake_actions_to_locked_server)
- req = self._fake_request()
- self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
- req, str(uuid.uuid4()), {})
-
- def test_shelve_offload_restricted_by_role(self):
- rules = {'compute_extension:%s%s' % (self.prefix, self.offload):
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rules)
-
- req = self._fake_request()
- self.assertRaises(exception.Forbidden,
- self.controller._shelve_offload, req, str(uuid.uuid4()), {})
-
- def test_shelve_offload_allowed(self):
- rules = {'compute:get': common_policy.parse_rule(''),
- 'compute_extension:%s%s' % (self.prefix, self.offload):
- common_policy.parse_rule('')}
- policy.set_rules(rules)
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- req = self._fake_request()
- self.assertRaises(exception.Forbidden,
- self.controller._shelve_offload, req, str(uuid.uuid4()), {})
-
- def test_shelve_offload_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- self.stubs.Set(self.plugin, 'auth_shelve_offload', fake_auth_context)
- self.stubs.Set(compute_api.API, 'shelve_offload',
- fakes.fake_actions_to_locked_server)
- req = self._fake_request()
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._shelve_offload,
- req, str(uuid.uuid4()), {})
-
-
-class ShelvePolicyTestV2(ShelvePolicyTestV21):
- plugin = shelve_v2
- prefix = ''
- offload = 'shelveOffload'
-
- def _fake_request(self):
- return fakes.HTTPRequest.blank('/v2/123/servers/12/os-shelve')
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
deleted file mode 100644
index 8cde54e575..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-import mock
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import webob
-
-from nova.api.openstack.compute.contrib import simple_tenant_usage as \
- simple_tenant_usage_v2
-from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
- simple_tenant_usage_v21
-from nova.compute import flavors
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova import utils
-
-SERVERS = 5
-TENANTS = 2
-HOURS = 24
-ROOT_GB = 10
-EPHEMERAL_GB = 20
-MEMORY_MB = 1024
-VCPUS = 2
-NOW = timeutils.utcnow()
-START = NOW - datetime.timedelta(hours=HOURS)
-STOP = NOW
-
-
-FAKE_INST_TYPE = {'id': 1,
- 'vcpus': VCPUS,
- 'root_gb': ROOT_GB,
- 'ephemeral_gb': EPHEMERAL_GB,
- 'memory_mb': MEMORY_MB,
- 'name': 'fakeflavor',
- 'flavorid': 'foo',
- 'rxtx_factor': 1.0,
- 'vcpu_weight': 1,
- 'swap': 0,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'disabled': False,
- 'is_public': True,
- 'extra_specs': {'foo': 'bar'}}
-
-
-def get_fake_db_instance(start, end, instance_id, tenant_id,
- vm_state=vm_states.ACTIVE):
- sys_meta = utils.dict_to_metadata(
- flavors.save_flavor_info({}, FAKE_INST_TYPE))
- # NOTE(mriedem): We use fakes.stub_instance since it sets the fields
- # needed on the db instance for converting it to an object, but we still
- # need to override system_metadata to use our fake flavor.
- inst = fakes.stub_instance(
- id=instance_id,
- uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
- image_ref='1',
- project_id=tenant_id,
- user_id='fakeuser',
- display_name='name',
- flavor_id=FAKE_INST_TYPE['id'],
- launched_at=start,
- terminated_at=end,
- vm_state=vm_state,
- memory_mb=MEMORY_MB,
- vcpus=VCPUS,
- root_gb=ROOT_GB,
- ephemeral_gb=EPHEMERAL_GB,)
- inst['system_metadata'] = sys_meta
- return inst
-
-
-def fake_instance_get_active_by_window_joined(context, begin, end,
- project_id, host):
- return [get_fake_db_instance(START,
- STOP,
- x,
- "faketenant_%s" % (x / SERVERS))
- for x in xrange(TENANTS * SERVERS)]
-
-
-@mock.patch.object(db, 'instance_get_active_by_window_joined',
- fake_instance_get_active_by_window_joined)
-class SimpleTenantUsageTestV21(test.TestCase):
- url = '/v2/faketenant_0/os-simple-tenant-usage'
- alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
- policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
-
- def setUp(self):
- super(SimpleTenantUsageTestV21, self).setUp()
- self.admin_context = context.RequestContext('fakeadmin_0',
- 'faketenant_0',
- is_admin=True)
- self.user_context = context.RequestContext('fakeadmin_0',
- 'faketenant_0',
- is_admin=False)
- self.alt_user_context = context.RequestContext('fakeadmin_0',
- 'faketenant_1',
- is_admin=False)
-
- def _get_wsgi_app(self, context):
- return fakes.wsgi_app_v21(fake_auth_context=context,
- init_only=('servers',
- 'os-simple-tenant-usage'))
-
- def _test_verify_index(self, start, stop):
- req = webob.Request.blank(
- self.url + '?start=%s&end=%s' %
- (start.isoformat(), stop.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self._get_wsgi_app(self.admin_context))
-
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
- usages = res_dict['tenant_usages']
- for i in xrange(TENANTS):
- self.assertEqual(int(usages[i]['total_hours']),
- SERVERS * HOURS)
- self.assertEqual(int(usages[i]['total_local_gb_usage']),
- SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
- self.assertEqual(int(usages[i]['total_memory_mb_usage']),
- SERVERS * MEMORY_MB * HOURS)
- self.assertEqual(int(usages[i]['total_vcpus_usage']),
- SERVERS * VCPUS * HOURS)
- self.assertFalse(usages[i].get('server_usages'))
-
- def test_verify_index(self):
- self._test_verify_index(START, STOP)
-
- def test_verify_index_future_end_time(self):
- future = NOW + datetime.timedelta(hours=HOURS)
- self._test_verify_index(START, future)
-
- def test_verify_show(self):
- self._test_verify_show(START, STOP)
-
- def test_verify_show_future_end_time(self):
- future = NOW + datetime.timedelta(hours=HOURS)
- self._test_verify_show(START, future)
-
- def _get_tenant_usages(self, detailed=''):
- req = webob.Request.blank(
- self.url + '?detailed=%s&start=%s&end=%s' %
- (detailed, START.isoformat(), STOP.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self._get_wsgi_app(self.admin_context))
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
- return res_dict['tenant_usages']
-
- def test_verify_detailed_index(self):
- usages = self._get_tenant_usages('1')
- for i in xrange(TENANTS):
- servers = usages[i]['server_usages']
- for j in xrange(SERVERS):
- self.assertEqual(int(servers[j]['hours']), HOURS)
-
- def test_verify_simple_index(self):
- usages = self._get_tenant_usages(detailed='0')
- for i in xrange(TENANTS):
- self.assertIsNone(usages[i].get('server_usages'))
-
- def test_verify_simple_index_empty_param(self):
- # NOTE(lzyeval): 'detailed=&start=..&end=..'
- usages = self._get_tenant_usages()
- for i in xrange(TENANTS):
- self.assertIsNone(usages[i].get('server_usages'))
-
- def _test_verify_show(self, start, stop):
- tenant_id = 0
- req = webob.Request.blank(
- self.url + '/faketenant_%s?start=%s&end=%s' %
- (tenant_id, start.isoformat(), stop.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self._get_wsgi_app(self.user_context))
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
-
- usage = res_dict['tenant_usage']
- servers = usage['server_usages']
- self.assertEqual(len(usage['server_usages']), SERVERS)
- uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
- (x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
- for j in xrange(SERVERS):
- delta = STOP - START
- uptime = delta.days * 24 * 3600 + delta.seconds
- self.assertEqual(int(servers[j]['uptime']), uptime)
- self.assertEqual(int(servers[j]['hours']), HOURS)
- self.assertIn(servers[j]['instance_id'], uuids)
-
- def test_verify_show_cannot_view_other_tenant(self):
- req = webob.Request.blank(
- self.alt_url + '/faketenant_0?start=%s&end=%s' %
- (START.isoformat(), STOP.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- rules = {
- self.policy_rule_prefix + ":show":
- common_policy.parse_rule([
- ["role:admin"], ["project_id:%(project_id)s"]
- ])
- }
- policy.set_rules(rules)
-
- try:
- res = req.get_response(self._get_wsgi_app(self.alt_user_context))
- self.assertEqual(res.status_int, 403)
- finally:
- policy.reset()
-
- def test_get_tenants_usage_with_bad_start_date(self):
- future = NOW + datetime.timedelta(hours=HOURS)
- tenant_id = 0
- req = webob.Request.blank(
- self.url + '/'
- 'faketenant_%s?start=%s&end=%s' %
- (tenant_id, future.isoformat(), NOW.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self._get_wsgi_app(self.user_context))
- self.assertEqual(res.status_int, 400)
-
- def test_get_tenants_usage_with_invalid_start_date(self):
- tenant_id = 0
- req = webob.Request.blank(
- self.url + '/'
- 'faketenant_%s?start=%s&end=%s' %
- (tenant_id, "xxxx", NOW.isoformat()))
- req.method = "GET"
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(self._get_wsgi_app(self.user_context))
- self.assertEqual(res.status_int, 400)
-
- def _test_get_tenants_usage_with_one_date(self, date_url_param):
- req = webob.Request.blank(
- self.url + '/'
- 'faketenant_0?%s' % date_url_param)
- req.method = "GET"
- req.headers["content-type"] = "application/json"
- res = req.get_response(self._get_wsgi_app(self.user_context))
- self.assertEqual(200, res.status_int)
-
- def test_get_tenants_usage_with_no_start_date(self):
- self._test_get_tenants_usage_with_one_date(
- 'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
-
- def test_get_tenants_usage_with_no_end_date(self):
- self._test_get_tenants_usage_with_one_date(
- 'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
-
-
-class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
- policy_rule_prefix = "compute_extension:simple_tenant_usage"
-
- def _get_wsgi_app(self, context):
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Simple_tenant_usage'])
- return fakes.wsgi_app(fake_auth_context=context,
- init_only=('os-simple-tenant-usage', ))
-
-
-class SimpleTenantUsageSerializerTest(test.TestCase):
- def _verify_server_usage(self, raw_usage, tree):
- self.assertEqual('server_usage', tree.tag)
-
- # Figure out what fields we expect
- not_seen = set(raw_usage.keys())
-
- for child in tree:
- self.assertIn(child.tag, not_seen)
- not_seen.remove(child.tag)
- self.assertEqual(str(raw_usage[child.tag]), child.text)
-
- self.assertEqual(len(not_seen), 0)
-
- def _verify_tenant_usage(self, raw_usage, tree):
- self.assertEqual('tenant_usage', tree.tag)
-
- # Figure out what fields we expect
- not_seen = set(raw_usage.keys())
-
- for child in tree:
- self.assertIn(child.tag, not_seen)
- not_seen.remove(child.tag)
- if child.tag == 'server_usages':
- for idx, gr_child in enumerate(child):
- self._verify_server_usage(raw_usage['server_usages'][idx],
- gr_child)
- else:
- self.assertEqual(str(raw_usage[child.tag]), child.text)
-
- self.assertEqual(len(not_seen), 0)
-
- def test_serializer_show(self):
- serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
- today = timeutils.utcnow()
- yesterday = today - datetime.timedelta(days=1)
- raw_usage = dict(
- tenant_id='tenant',
- total_local_gb_usage=789,
- total_vcpus_usage=456,
- total_memory_mb_usage=123,
- total_hours=24,
- start=yesterday,
- stop=today,
- server_usages=[dict(
- instance_id='00000000-0000-0000-0000-0000000000000000',
- name='test',
- hours=24,
- memory_mb=1024,
- local_gb=50,
- vcpus=1,
- tenant_id='tenant',
- flavor='m1.small',
- started_at=yesterday,
- ended_at=today,
- state='terminated',
- uptime=86400),
- dict(
- instance_id='00000000-0000-0000-0000-0000000000000002',
- name='test2',
- hours=12,
- memory_mb=512,
- local_gb=25,
- vcpus=2,
- tenant_id='tenant',
- flavor='m1.tiny',
- started_at=yesterday,
- ended_at=today,
- state='terminated',
- uptime=43200),
- ],
- )
- tenant_usage = dict(tenant_usage=raw_usage)
- text = serializer.serialize(tenant_usage)
-
- tree = etree.fromstring(text)
-
- self._verify_tenant_usage(raw_usage, tree)
-
- def test_serializer_index(self):
- serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
- today = timeutils.utcnow()
- yesterday = today - datetime.timedelta(days=1)
- raw_usages = [dict(
- tenant_id='tenant1',
- total_local_gb_usage=1024,
- total_vcpus_usage=23,
- total_memory_mb_usage=512,
- total_hours=24,
- start=yesterday,
- stop=today,
- server_usages=[dict(
- instance_id='00000000-0000-0000-0000-0000000000000001',
- name='test1',
- hours=24,
- memory_mb=1024,
- local_gb=50,
- vcpus=2,
- tenant_id='tenant1',
- flavor='m1.small',
- started_at=yesterday,
- ended_at=today,
- state='terminated',
- uptime=86400),
- dict(
- instance_id='00000000-0000-0000-0000-0000000000000002',
- name='test2',
- hours=42,
- memory_mb=4201,
- local_gb=25,
- vcpus=1,
- tenant_id='tenant1',
- flavor='m1.tiny',
- started_at=today,
- ended_at=yesterday,
- state='terminated',
- uptime=43200),
- ],
- ),
- dict(
- tenant_id='tenant2',
- total_local_gb_usage=512,
- total_vcpus_usage=32,
- total_memory_mb_usage=1024,
- total_hours=42,
- start=today,
- stop=yesterday,
- server_usages=[dict(
- instance_id='00000000-0000-0000-0000-0000000000000003',
- name='test3',
- hours=24,
- memory_mb=1024,
- local_gb=50,
- vcpus=2,
- tenant_id='tenant2',
- flavor='m1.small',
- started_at=yesterday,
- ended_at=today,
- state='terminated',
- uptime=86400),
- dict(
- instance_id='00000000-0000-0000-0000-0000000000000002',
- name='test2',
- hours=42,
- memory_mb=4201,
- local_gb=25,
- vcpus=1,
- tenant_id='tenant4',
- flavor='m1.tiny',
- started_at=today,
- ended_at=yesterday,
- state='terminated',
- uptime=43200),
- ],
- ),
- ]
- tenant_usages = dict(tenant_usages=raw_usages)
- text = serializer.serialize(tenant_usages)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('tenant_usages', tree.tag)
- self.assertEqual(len(raw_usages), len(tree))
- for idx, child in enumerate(tree):
- self._verify_tenant_usage(raw_usages[idx], child)
-
-
-class SimpleTenantUsageControllerTestV21(test.TestCase):
- controller = simple_tenant_usage_v21.SimpleTenantUsageController()
-
- def setUp(self):
- super(SimpleTenantUsageControllerTestV21, self).setUp()
-
- self.context = context.RequestContext('fakeuser', 'fake-project')
-
- self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
- tenant_id=self.context.project_id,
- vm_state=vm_states.DELETED)
- # convert the fake instance dict to an object
- self.inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), self.baseinst)
-
- def test_get_flavor_from_sys_meta(self):
- # Non-deleted instances get their type information from their
- # system_metadata
- with mock.patch.object(db, 'instance_get_by_uuid',
- return_value=self.baseinst):
- flavor = self.controller._get_flavor(self.context,
- self.inst_obj, {})
- self.assertEqual(objects.Flavor, type(flavor))
- self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
-
- def test_get_flavor_from_non_deleted_with_id_fails(self):
- # If an instance is not deleted and missing type information from
- # system_metadata, then that's a bug
- self.inst_obj.system_metadata = {}
- self.assertRaises(KeyError,
- self.controller._get_flavor, self.context,
- self.inst_obj, {})
-
- def test_get_flavor_from_deleted_with_id(self):
- # Deleted instances may not have type info in system_metadata,
- # so verify that they get their type from a lookup of their
- # instance_type_id
- self.inst_obj.system_metadata = {}
- self.inst_obj.deleted = 1
- flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
- self.assertEqual(objects.Flavor, type(flavor))
- self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
-
- def test_get_flavor_from_deleted_with_id_of_deleted(self):
- # Verify the legacy behavior of instance_type_id pointing to a
- # missing type being non-fatal
- self.inst_obj.system_metadata = {}
- self.inst_obj.deleted = 1
- self.inst_obj.instance_type_id = 99
- flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
- self.assertIsNone(flavor)
-
-
-class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
- controller = simple_tenant_usage_v2.SimpleTenantUsageController()
-
-
-class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
- simple_tenant_usage = simple_tenant_usage_v21
-
- def test_valid_string(self):
- dt = self.simple_tenant_usage.parse_strtime(
- "2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
- self.assertEqual(datetime.datetime(
- microsecond=824060, second=20, minute=47, hour=13,
- day=21, month=2, year=2014), dt)
-
- def test_invalid_string(self):
- self.assertRaises(exception.InvalidStrTime,
- self.simple_tenant_usage.parse_strtime,
- "2014-02-21 13:47:20.824060",
- "%Y-%m-%dT%H:%M:%S.%f")
-
-
-class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
- simple_tenant_usage = simple_tenant_usage_v2
diff --git a/nova/tests/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/api/openstack/compute/contrib/test_snapshots.py
deleted file mode 100644
index da3e9262fa..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_snapshots.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright 2011 Denali Systems, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import webob
-
-from nova.api.openstack.compute.contrib import volumes
-from nova import context
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.volume import cinder
-
-
-class SnapshotApiTest(test.NoDBTestCase):
- def setUp(self):
- super(SnapshotApiTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(cinder.API, "create_snapshot",
- fakes.stub_snapshot_create)
- self.stubs.Set(cinder.API, "create_snapshot_force",
- fakes.stub_snapshot_create)
- self.stubs.Set(cinder.API, "delete_snapshot",
- fakes.stub_snapshot_delete)
- self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
- self.stubs.Set(cinder.API, "get_all_snapshots",
- fakes.stub_snapshot_get_all)
- self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Volumes'])
-
- self.context = context.get_admin_context()
- self.app = fakes.wsgi_app(init_only=('os-snapshots',))
-
- def test_snapshot_create(self):
- snapshot = {"volume_id": 12,
- "force": False,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
- body = dict(snapshot=snapshot)
- req = webob.Request.blank('/v2/fake/os-snapshots')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
- resp_dict = jsonutils.loads(resp.body)
- self.assertIn('snapshot', resp_dict)
- self.assertEqual(resp_dict['snapshot']['displayName'],
- snapshot['display_name'])
- self.assertEqual(resp_dict['snapshot']['displayDescription'],
- snapshot['display_description'])
- self.assertEqual(resp_dict['snapshot']['volumeId'],
- snapshot['volume_id'])
-
- def test_snapshot_create_force(self):
- snapshot = {"volume_id": 12,
- "force": True,
- "display_name": "Snapshot Test Name",
- "display_description": "Snapshot Test Desc"}
- body = dict(snapshot=snapshot)
- req = webob.Request.blank('/v2/fake/os-snapshots')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
-
- resp_dict = jsonutils.loads(resp.body)
- self.assertIn('snapshot', resp_dict)
- self.assertEqual(resp_dict['snapshot']['displayName'],
- snapshot['display_name'])
- self.assertEqual(resp_dict['snapshot']['displayDescription'],
- snapshot['display_description'])
- self.assertEqual(resp_dict['snapshot']['volumeId'],
- snapshot['volume_id'])
-
- # Test invalid force paramter
- snapshot = {"volume_id": 12,
- "force": '**&&^^%%$$##@@'}
- body = dict(snapshot=snapshot)
- req = webob.Request.blank('/v2/fake/os-snapshots')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 400)
-
- def test_snapshot_delete(self):
- snapshot_id = 123
- req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
- req.method = 'DELETE'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 202)
-
- def test_snapshot_delete_invalid_id(self):
- snapshot_id = -1
- req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
- req.method = 'DELETE'
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 404)
-
- def test_snapshot_show(self):
- snapshot_id = 123
- req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
- req.method = 'GET'
- resp = req.get_response(self.app)
-
- self.assertEqual(resp.status_int, 200)
- resp_dict = jsonutils.loads(resp.body)
- self.assertIn('snapshot', resp_dict)
- self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
-
- def test_snapshot_show_invalid_id(self):
- snapshot_id = -1
- req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
- req.method = 'GET'
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 404)
-
- def test_snapshot_detail(self):
- req = webob.Request.blank('/v2/fake/os-snapshots/detail')
- req.method = 'GET'
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
-
- resp_dict = jsonutils.loads(resp.body)
- self.assertIn('snapshots', resp_dict)
- resp_snapshots = resp_dict['snapshots']
- self.assertEqual(len(resp_snapshots), 3)
-
- resp_snapshot = resp_snapshots.pop()
- self.assertEqual(resp_snapshot['id'], 102)
-
-
-class SnapshotSerializerTest(test.NoDBTestCase):
- def _verify_snapshot(self, snap, tree):
- self.assertEqual(tree.tag, 'snapshot')
-
- for attr in ('id', 'status', 'size', 'createdAt',
- 'displayName', 'displayDescription', 'volumeId'):
- self.assertEqual(str(snap[attr]), tree.get(attr))
-
- def test_snapshot_show_create_serializer(self):
- serializer = volumes.SnapshotTemplate()
- raw_snapshot = dict(
- id='snap_id',
- status='snap_status',
- size=1024,
- createdAt=timeutils.utcnow(),
- displayName='snap_name',
- displayDescription='snap_desc',
- volumeId='vol_id',
- )
- text = serializer.serialize(dict(snapshot=raw_snapshot))
-
- tree = etree.fromstring(text)
-
- self._verify_snapshot(raw_snapshot, tree)
-
- def test_snapshot_index_detail_serializer(self):
- serializer = volumes.SnapshotsTemplate()
- raw_snapshots = [dict(
- id='snap1_id',
- status='snap1_status',
- size=1024,
- createdAt=timeutils.utcnow(),
- displayName='snap1_name',
- displayDescription='snap1_desc',
- volumeId='vol1_id',
- ),
- dict(
- id='snap2_id',
- status='snap2_status',
- size=1024,
- createdAt=timeutils.utcnow(),
- displayName='snap2_name',
- displayDescription='snap2_desc',
- volumeId='vol2_id',
- )]
- text = serializer.serialize(dict(snapshots=raw_snapshots))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('snapshots', tree.tag)
- self.assertEqual(len(raw_snapshots), len(tree))
- for idx, child in enumerate(tree):
- self._verify_snapshot(raw_snapshots[idx], child)
diff --git a/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py b/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py
deleted file mode 100644
index 18133e8604..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_tenant_networks.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import webob
-
-from nova.api.openstack.compute.contrib import os_tenant_networks as networks
-from nova.api.openstack.compute.plugins.v3 import tenant_networks \
- as networks_v21
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class TenantNetworksTestV21(test.NoDBTestCase):
- ctrlr = networks_v21.TenantNetworkController
-
- def setUp(self):
- super(TenantNetworksTestV21, self).setUp()
- self.controller = self.ctrlr()
- self.flags(enable_network_quota=True)
-
- @mock.patch('nova.network.api.API.delete',
- side_effect=exception.NetworkInUse(network_id=1))
- def test_network_delete_in_use(self, mock_delete):
- req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks/1')
-
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.delete, req, 1)
-
- @mock.patch('nova.quota.QUOTAS.reserve')
- @mock.patch('nova.quota.QUOTAS.rollback')
- @mock.patch('nova.network.api.API.delete')
- def _test_network_delete_exception(self, ex, expex, delete_mock,
- rollback_mock, reserve_mock):
- req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
- ctxt = req.environ['nova.context']
-
- reserve_mock.return_value = 'rv'
- delete_mock.side_effect = ex
-
- self.assertRaises(expex, self.controller.delete, req, 1)
-
- delete_mock.assert_called_once_with(ctxt, 1)
- rollback_mock.assert_called_once_with(ctxt, 'rv')
- reserve_mock.assert_called_once_with(ctxt, networks=-1)
-
- def test_network_delete_exception_network_not_found(self):
- ex = exception.NetworkNotFound(network_id=1)
- expex = webob.exc.HTTPNotFound
- self._test_network_delete_exception(ex, expex)
-
- def test_network_delete_exception_policy_failed(self):
- ex = exception.PolicyNotAuthorized(action='dummy')
- expex = webob.exc.HTTPForbidden
- self._test_network_delete_exception(ex, expex)
-
- def test_network_delete_exception_network_in_use(self):
- ex = exception.NetworkInUse(network_id=1)
- expex = webob.exc.HTTPConflict
- self._test_network_delete_exception(ex, expex)
-
-
-class TenantNetworksTestV2(TenantNetworksTestV21):
- ctrlr = networks.NetworkController
diff --git a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
deleted file mode 100644
index f235513897..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
+++ /dev/null
@@ -1,127 +0,0 @@
-# Copyright (C) 2011 Midokura KK
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.contrib import virtual_interfaces
-from nova.api.openstack import wsgi
-from nova import compute
-from nova.compute import api as compute_api
-from nova import context
-from nova import exception
-from nova import network
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-def compute_api_get(self, context, instance_id, expected_attrs=None,
- want_objects=False):
- return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
-
-
-def get_vifs_by_instance(self, context, instance_id):
- return [{'uuid': '00000000-0000-0000-0000-00000000000000000',
- 'address': '00-00-00-00-00-00'},
- {'uuid': '11111111-1111-1111-1111-11111111111111111',
- 'address': '11-11-11-11-11-11'}]
-
-
-class FakeRequest(object):
- def __init__(self, context):
- self.environ = {'nova.context': context}
-
-
-class ServerVirtualInterfaceTest(test.NoDBTestCase):
-
- def setUp(self):
- super(ServerVirtualInterfaceTest, self).setUp()
- self.stubs.Set(compute.api.API, "get",
- compute_api_get)
- self.stubs.Set(network.api.API, "get_vifs_by_instance",
- get_vifs_by_instance)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Virtual_interfaces'])
-
- def test_get_virtual_interfaces_list(self):
- url = '/v2/fake/servers/abcd/os-virtual-interfaces'
- req = webob.Request.blank(url)
- res = req.get_response(fakes.wsgi_app(
- init_only=('os-virtual-interfaces',)))
- self.assertEqual(res.status_int, 200)
- res_dict = jsonutils.loads(res.body)
- response = {'virtual_interfaces': [
- {'id': '00000000-0000-0000-0000-00000000000000000',
- 'mac_address': '00-00-00-00-00-00'},
- {'id': '11111111-1111-1111-1111-11111111111111111',
- 'mac_address': '11-11-11-11-11-11'}]}
- self.assertEqual(res_dict, response)
-
- def test_vif_instance_not_found(self):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- fake_context = context.RequestContext('fake', 'fake')
- fake_req = FakeRequest(fake_context)
-
- compute_api.API.get(fake_context, 'fake_uuid',
- expected_attrs=None,
- want_objects=True).AndRaise(
- exception.InstanceNotFound(instance_id='instance-0000'))
-
- self.mox.ReplayAll()
- self.assertRaises(
- webob.exc.HTTPNotFound,
- virtual_interfaces.ServerVirtualInterfaceController().index,
- fake_req, 'fake_uuid')
-
-
-class ServerVirtualInterfaceSerializerTest(test.NoDBTestCase):
- def setUp(self):
- super(ServerVirtualInterfaceSerializerTest, self).setUp()
- self.namespace = wsgi.XMLNS_V11
- self.serializer = virtual_interfaces.VirtualInterfaceTemplate()
-
- def _tag(self, elem):
- tagname = elem.tag
- self.assertEqual(tagname[0], '{')
- tmp = tagname.partition('}')
- namespace = tmp[0][1:]
- self.assertEqual(namespace, self.namespace)
- return tmp[2]
-
- def test_serializer(self):
- raw_vifs = [dict(
- id='uuid1',
- mac_address='aa:bb:cc:dd:ee:ff'),
- dict(
- id='uuid2',
- mac_address='bb:aa:dd:cc:ff:ee')]
- vifs = dict(virtual_interfaces=raw_vifs)
- text = self.serializer.serialize(vifs)
-
- tree = etree.fromstring(text)
-
- self.assertEqual('virtual_interfaces', self._tag(tree))
- self.assertEqual(len(raw_vifs), len(tree))
- for idx, child in enumerate(tree):
- self.assertEqual('virtual_interface', self._tag(child))
- self.assertEqual(raw_vifs[idx]['id'], child.get('id'))
- self.assertEqual(raw_vifs[idx]['mac_address'],
- child.get('mac_address'))
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
deleted file mode 100644
index 511f6017c5..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ /dev/null
@@ -1,1083 +0,0 @@
-# Copyright 2013 Josh Durgin
-# Copyright 2013 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from lxml import etree
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import webob
-from webob import exc
-
-from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \
- assisted_snaps
-from nova.api.openstack.compute.contrib import volumes
-from nova.api.openstack.compute.plugins.v3 import volumes as volumes_v3
-from nova.api.openstack import extensions
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.volume import cinder
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
-FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
-FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
-
-IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
-
-
-def fake_get_instance(self, context, instance_id, want_objects=False,
- expected_attrs=None):
- return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
-
-
-def fake_get_volume(self, context, id):
- return {'id': 'woot'}
-
-
-def fake_attach_volume(self, context, instance, volume_id, device):
- pass
-
-
-def fake_detach_volume(self, context, instance, volume):
- pass
-
-
-def fake_swap_volume(self, context, instance,
- old_volume_id, new_volume_id):
- pass
-
-
-def fake_create_snapshot(self, context, volume, name, description):
- return {'id': 123,
- 'volume_id': 'fakeVolId',
- 'status': 'available',
- 'volume_size': 123,
- 'created_at': '2013-01-01 00:00:01',
- 'display_name': 'myVolumeName',
- 'display_description': 'myVolumeDescription'}
-
-
-def fake_delete_snapshot(self, context, snapshot_id):
- pass
-
-
-def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
- delete_info):
- pass
-
-
-def fake_compute_volume_snapshot_create(self, context, volume_id,
- create_info):
- pass
-
-
-def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'instance_uuid': instance_uuid,
- 'device_name': '/dev/fake0',
- 'delete_on_termination': 'False',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'snapshot_id': None,
- 'volume_id': FAKE_UUID_A,
- 'volume_size': 1}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2,
- 'instance_uuid': instance_uuid,
- 'device_name': '/dev/fake1',
- 'delete_on_termination': 'False',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'snapshot_id': None,
- 'volume_id': FAKE_UUID_B,
- 'volume_size': 1})]
-
-
-class BootFromVolumeTest(test.TestCase):
-
- def setUp(self):
- super(BootFromVolumeTest, self).setUp()
- self.stubs.Set(compute_api.API, 'create',
- self._get_fake_compute_api_create())
- fakes.stub_out_nw_api(self.stubs)
- self._block_device_mapping_seen = None
- self._legacy_bdm_seen = True
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
-
- def _get_fake_compute_api_create(self):
- def _fake_compute_api_create(cls, context, instance_type,
- image_href, **kwargs):
- self._block_device_mapping_seen = kwargs.get(
- 'block_device_mapping')
- self._legacy_bdm_seen = kwargs.get('legacy_bdm')
-
- inst_type = flavors.get_flavor_by_flavor_id(2)
- resv_id = None
- return ([{'id': 1,
- 'display_name': 'test_server',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': IMAGE_UUID,
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
- 'progress': 0,
- 'fixed_ips': []
- }], resv_id)
- return _fake_compute_api_create
-
- def test_create_root_volume(self):
- body = dict(server=dict(
- name='test_server', imageRef=IMAGE_UUID,
- flavorRef=2, min_count=1, max_count=1,
- block_device_mapping=[dict(
- volume_id=1,
- device_name='/dev/vda',
- virtual='root',
- delete_on_termination=False,
- )]
- ))
- req = webob.Request.blank('/v2/fake/os-volumes_boot')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app(
- init_only=('os-volumes_boot', 'servers')))
- self.assertEqual(res.status_int, 202)
- server = jsonutils.loads(res.body)['server']
- self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(CONF.password_length, len(server['adminPass']))
- self.assertEqual(len(self._block_device_mapping_seen), 1)
- self.assertTrue(self._legacy_bdm_seen)
- self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
- self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
- '/dev/vda')
-
- def test_create_root_volume_bdm_v2(self):
- body = dict(server=dict(
- name='test_server', imageRef=IMAGE_UUID,
- flavorRef=2, min_count=1, max_count=1,
- block_device_mapping_v2=[dict(
- source_type='volume',
- uuid=1,
- device_name='/dev/vda',
- boot_index=0,
- delete_on_termination=False,
- )]
- ))
- req = webob.Request.blank('/v2/fake/os-volumes_boot')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- res = req.get_response(fakes.wsgi_app(
- init_only=('os-volumes_boot', 'servers')))
- self.assertEqual(res.status_int, 202)
- server = jsonutils.loads(res.body)['server']
- self.assertEqual(FAKE_UUID, server['id'])
- self.assertEqual(CONF.password_length, len(server['adminPass']))
- self.assertEqual(len(self._block_device_mapping_seen), 1)
- self.assertFalse(self._legacy_bdm_seen)
- self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
- self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
- 0)
- self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
- '/dev/vda')
-
-
-class VolumeApiTestV21(test.TestCase):
- url_prefix = '/v2/fake'
-
- def setUp(self):
- super(VolumeApiTestV21, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
- self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
- self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Volumes'])
-
- self.context = context.get_admin_context()
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app_v21()
-
- def test_volume_create(self):
- self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
-
- vol = {"size": 100,
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "zone1:host1"}
- body = {"volume": vol}
- req = webob.Request.blank(self.url_prefix + '/os-volumes')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['content-type'] = 'application/json'
- resp = req.get_response(self.app)
-
- self.assertEqual(resp.status_int, 200)
-
- resp_dict = jsonutils.loads(resp.body)
- self.assertIn('volume', resp_dict)
- self.assertEqual(resp_dict['volume']['size'],
- vol['size'])
- self.assertEqual(resp_dict['volume']['displayName'],
- vol['display_name'])
- self.assertEqual(resp_dict['volume']['displayDescription'],
- vol['display_description'])
- self.assertEqual(resp_dict['volume']['availabilityZone'],
- vol['availability_zone'])
-
- def test_volume_create_bad(self):
- def fake_volume_create(self, context, size, name, description,
- snapshot, **param):
- raise exception.InvalidInput(reason="bad request data")
-
- self.stubs.Set(cinder.API, "create", fake_volume_create)
-
- vol = {"size": '#$?',
- "display_name": "Volume Test Name",
- "display_description": "Volume Test Desc",
- "availability_zone": "zone1:host1"}
- body = {"volume": vol}
-
- req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
- self.assertRaises(webob.exc.HTTPBadRequest,
- volumes.VolumeController().create, req, body)
-
- def test_volume_index(self):
- req = webob.Request.blank(self.url_prefix + '/os-volumes')
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
-
- def test_volume_detail(self):
- req = webob.Request.blank(self.url_prefix + '/os-volumes/detail')
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
-
- def test_volume_show(self):
- req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 200)
-
- def test_volume_show_no_volume(self):
- self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
-
- req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 404)
- self.assertIn('Volume 456 could not be found.', resp.body)
-
- def test_volume_delete(self):
- req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
- req.method = 'DELETE'
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 202)
-
- def test_volume_delete_no_volume(self):
- self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
-
- req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
- req.method = 'DELETE'
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 404)
- self.assertIn('Volume 456 could not be found.', resp.body)
-
-
-class VolumeApiTestV2(VolumeApiTestV21):
-
- def setUp(self):
- super(VolumeApiTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Volumes'])
-
- self.context = context.get_admin_context()
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app()
-
-
-class VolumeAttachTests(test.TestCase):
- def setUp(self):
- super(VolumeAttachTests, self).setUp()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
- self.stubs.Set(compute_api.API, 'get', fake_get_instance)
- self.stubs.Set(cinder.API, 'get', fake_get_volume)
- self.context = context.get_admin_context()
- self.expected_show = {'volumeAttachment':
- {'device': '/dev/fake0',
- 'serverId': FAKE_UUID,
- 'id': FAKE_UUID_A,
- 'volumeId': FAKE_UUID_A
- }}
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.attachments = volumes.VolumeAttachmentController(self.ext_mgr)
-
- def test_show(self):
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
- self.assertEqual(self.expected_show, result)
-
- @mock.patch.object(compute_api.API, 'get',
- side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
- def test_show_no_instance(self, mock_mr):
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.show,
- req,
- FAKE_UUID,
- FAKE_UUID_A)
-
- @mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid', return_value=None)
- def test_show_no_bdms(self, mock_mr):
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.show,
- req,
- FAKE_UUID,
- FAKE_UUID_A)
-
- def test_show_bdms_no_mountpoint(self):
- FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.show,
- req,
- FAKE_UUID,
- FAKE_UUID_NOTEXIST)
-
- def test_detach(self):
- self.stubs.Set(compute_api.API,
- 'detach_volume',
- fake_detach_volume)
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'DELETE'
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
- self.assertEqual('202 Accepted', result.status)
-
- def test_detach_vol_not_found(self):
- self.stubs.Set(compute_api.API,
- 'detach_volume',
- fake_detach_volume)
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'DELETE'
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- self.attachments.delete,
- req,
- FAKE_UUID,
- FAKE_UUID_C)
-
- @mock.patch('nova.objects.BlockDeviceMapping.is_root',
- new_callable=mock.PropertyMock)
- def test_detach_vol_root(self, mock_isroot):
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'DELETE'
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- mock_isroot.return_value = True
- self.assertRaises(exc.HTTPForbidden,
- self.attachments.delete,
- req,
- FAKE_UUID,
- FAKE_UUID_A)
-
- def test_detach_volume_from_locked_server(self):
- def fake_detach_volume_from_locked_server(self, context,
- instance, volume):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
- self.stubs.Set(compute_api.API,
- 'detach_volume',
- fake_detach_volume_from_locked_server)
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'DELETE'
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
- req, FAKE_UUID, FAKE_UUID_A)
-
- def test_attach_volume(self):
- self.stubs.Set(compute_api.API,
- 'attach_volume',
- fake_attach_volume)
- body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
- 'device': '/dev/fake'}}
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- result = self.attachments.create(req, FAKE_UUID, body)
- self.assertEqual(result['volumeAttachment']['id'],
- '00000000-aaaa-aaaa-aaaa-000000000000')
-
- def test_attach_volume_to_locked_server(self):
- def fake_attach_volume_to_locked_server(self, context, instance,
- volume_id, device=None):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
- self.stubs.Set(compute_api.API,
- 'attach_volume',
- fake_attach_volume_to_locked_server)
- body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
- 'device': '/dev/fake'}}
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
- req, FAKE_UUID, body)
-
- def test_attach_volume_bad_id(self):
- self.stubs.Set(compute_api.API,
- 'attach_volume',
- fake_attach_volume)
-
- body = {
- 'volumeAttachment': {
- 'device': None,
- 'volumeId': 'TESTVOLUME',
- }
- }
-
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
- req, FAKE_UUID, body)
-
- def test_attach_volume_without_volumeId(self):
- self.stubs.Set(compute_api.API,
- 'attach_volume',
- fake_attach_volume)
-
- body = {
- 'volumeAttachment': {
- 'device': None
- }
- }
-
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
- req, FAKE_UUID, body)
-
- def _test_swap(self, uuid=FAKE_UUID_A, fake_func=None, body=None):
- fake_func = fake_func or fake_swap_volume
- self.stubs.Set(compute_api.API,
- 'swap_volume',
- fake_func)
- body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B,
- 'device': '/dev/fake'}}
-
- req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
- req.method = 'PUT'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- return self.attachments.update(req, FAKE_UUID, uuid, body)
-
- def test_swap_volume_for_locked_server(self):
- self.ext_mgr.extensions['os-volume-attachment-update'] = True
-
- def fake_swap_volume_for_locked_server(self, context, instance,
- old_volume, new_volume):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
-
- self.ext_mgr.extensions['os-volume-attachment-update'] = True
- self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
- fake_func=fake_swap_volume_for_locked_server)
-
- def test_swap_volume_no_extension(self):
- self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
-
- def test_swap_volume(self):
- self.ext_mgr.extensions['os-volume-attachment-update'] = True
- result = self._test_swap()
- self.assertEqual('202 Accepted', result.status)
-
- def test_swap_volume_no_attachment(self):
- self.ext_mgr.extensions['os-volume-attachment-update'] = True
-
- self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C)
-
- def test_swap_volume_without_volumeId(self):
- self.ext_mgr.extensions['os-volume-attachment-update'] = True
- body = {'volumeAttachment': {'device': '/dev/fake'}}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_swap,
- body=body)
-
-
-class VolumeSerializerTest(test.TestCase):
- def _verify_volume_attachment(self, attach, tree):
- for attr in ('id', 'volumeId', 'serverId', 'device'):
- self.assertEqual(str(attach[attr]), tree.get(attr))
-
- def _verify_volume(self, vol, tree):
- self.assertEqual(tree.tag, 'volume')
-
- for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
- 'displayName', 'displayDescription', 'volumeType',
- 'snapshotId'):
- self.assertEqual(str(vol[attr]), tree.get(attr))
-
- for child in tree:
- self.assertIn(child.tag, ('attachments', 'metadata'))
- if child.tag == 'attachments':
- self.assertEqual(1, len(child))
- self.assertEqual('attachment', child[0].tag)
- self._verify_volume_attachment(vol['attachments'][0], child[0])
- elif child.tag == 'metadata':
- not_seen = set(vol['metadata'].keys())
- for gr_child in child:
- self.assertIn(gr_child.get("key"), not_seen)
- self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
- gr_child.text)
- not_seen.remove(gr_child.get("key"))
- self.assertEqual(0, len(not_seen))
-
- def test_attach_show_create_serializer(self):
- serializer = volumes.VolumeAttachmentTemplate()
- raw_attach = dict(
- id='vol_id',
- volumeId='vol_id',
- serverId='instance_uuid',
- device='/foo')
- text = serializer.serialize(dict(volumeAttachment=raw_attach))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('volumeAttachment', tree.tag)
- self._verify_volume_attachment(raw_attach, tree)
-
- def test_attach_index_serializer(self):
- serializer = volumes.VolumeAttachmentsTemplate()
- raw_attaches = [dict(
- id='vol_id1',
- volumeId='vol_id1',
- serverId='instance1_uuid',
- device='/foo1'),
- dict(
- id='vol_id2',
- volumeId='vol_id2',
- serverId='instance2_uuid',
- device='/foo2')]
- text = serializer.serialize(dict(volumeAttachments=raw_attaches))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('volumeAttachments', tree.tag)
- self.assertEqual(len(raw_attaches), len(tree))
- for idx, child in enumerate(tree):
- self.assertEqual('volumeAttachment', child.tag)
- self._verify_volume_attachment(raw_attaches[idx], child)
-
- def test_volume_show_create_serializer(self):
- serializer = volumes.VolumeTemplate()
- raw_volume = dict(
- id='vol_id',
- status='vol_status',
- size=1024,
- availabilityZone='vol_availability',
- createdAt=timeutils.utcnow(),
- attachments=[dict(
- id='vol_id',
- volumeId='vol_id',
- serverId='instance_uuid',
- device='/foo')],
- displayName='vol_name',
- displayDescription='vol_desc',
- volumeType='vol_type',
- snapshotId='snap_id',
- metadata=dict(
- foo='bar',
- baz='quux',
- ),
- )
- text = serializer.serialize(dict(volume=raw_volume))
-
- tree = etree.fromstring(text)
-
- self._verify_volume(raw_volume, tree)
-
- def test_volume_index_detail_serializer(self):
- serializer = volumes.VolumesTemplate()
- raw_volumes = [dict(
- id='vol1_id',
- status='vol1_status',
- size=1024,
- availabilityZone='vol1_availability',
- createdAt=timeutils.utcnow(),
- attachments=[dict(
- id='vol1_id',
- volumeId='vol1_id',
- serverId='instance_uuid',
- device='/foo1')],
- displayName='vol1_name',
- displayDescription='vol1_desc',
- volumeType='vol1_type',
- snapshotId='snap1_id',
- metadata=dict(
- foo='vol1_foo',
- bar='vol1_bar',
- ),
- ),
- dict(
- id='vol2_id',
- status='vol2_status',
- size=1024,
- availabilityZone='vol2_availability',
- createdAt=timeutils.utcnow(),
- attachments=[dict(
- id='vol2_id',
- volumeId='vol2_id',
- serverId='instance_uuid',
- device='/foo2')],
- displayName='vol2_name',
- displayDescription='vol2_desc',
- volumeType='vol2_type',
- snapshotId='snap2_id',
- metadata=dict(
- foo='vol2_foo',
- bar='vol2_bar',
- ),
- )]
- text = serializer.serialize(dict(volumes=raw_volumes))
-
- tree = etree.fromstring(text)
-
- self.assertEqual('volumes', tree.tag)
- self.assertEqual(len(raw_volumes), len(tree))
- for idx, child in enumerate(tree):
- self._verify_volume(raw_volumes[idx], child)
-
-
-class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
- self.deserializer = volumes.CreateDeserializer()
-
- def test_minimal_volume(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_display_name(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_display_description(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_volume_type(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_availability_zone(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
- availability_zone="us-east1"></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- "availability_zone": "us-east1",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_metadata(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- display_name="Volume-xml"
- size="1">
- <metadata><meta key="Type">work</meta></metadata></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "display_name": "Volume-xml",
- "size": "1",
- "metadata": {
- "Type": "work",
- },
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_full_volume(self):
- self_request = """
-<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
- size="1"
- display_name="Volume-xml"
- display_description="description"
- volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
- availability_zone="us-east1">
- <metadata><meta key="Type">work</meta></metadata></volume>"""
- request = self.deserializer.deserialize(self_request)
- expected = {
- "volume": {
- "size": "1",
- "display_name": "Volume-xml",
- "display_description": "description",
- "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
- "availability_zone": "us-east1",
- "metadata": {
- "Type": "work",
- },
- },
- }
- self.maxDiff = None
- self.assertEqual(request['body'], expected)
-
-
-class CommonBadRequestTestCase(object):
-
- resource = None
- entity_name = None
- controller_cls = None
- kwargs = {}
-
- """
- Tests of places we throw 400 Bad Request from
- """
-
- def setUp(self):
- super(CommonBadRequestTestCase, self).setUp()
- self.controller = self.controller_cls()
-
- def _bad_request_create(self, body):
- req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
- req.method = 'POST'
-
- kwargs = self.kwargs.copy()
- kwargs['body'] = body
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, **kwargs)
-
- def test_create_no_body(self):
- self._bad_request_create(body=None)
-
- def test_create_missing_volume(self):
- body = {'foo': {'a': 'b'}}
- self._bad_request_create(body=body)
-
- def test_create_malformed_entity(self):
- body = {self.entity_name: 'string'}
- self._bad_request_create(body=body)
-
-
-class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
- test.TestCase):
-
- resource = 'os-volumes'
- entity_name = 'volume'
- controller_cls = volumes_v3.VolumeController
-
-
-class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
- controller_cls = volumes.VolumeController
-
-
-class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
- test.TestCase):
- resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
- entity_name = 'volumeAttachment'
- controller_cls = volumes.VolumeAttachmentController
- kwargs = {'server_id': FAKE_UUID}
-
-
-class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
- test.TestCase):
-
- resource = 'os-snapshots'
- entity_name = 'snapshot'
- controller_cls = volumes.SnapshotController
-
-
-class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
- controller_cls = volumes_v3.SnapshotController
-
-
-class ShowSnapshotTestCaseV21(test.TestCase):
- snapshot_cls = volumes_v3.SnapshotController
-
- def setUp(self):
- super(ShowSnapshotTestCaseV21, self).setUp()
- self.controller = self.snapshot_cls()
- self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
- self.req.method = 'GET'
-
- def test_show_snapshot_not_exist(self):
- def fake_get_snapshot(self, context, id):
- raise exception.SnapshotNotFound(snapshot_id=id)
- self.stubs.Set(cinder.API, 'get_snapshot', fake_get_snapshot)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.show, self.req, FAKE_UUID_A)
-
-
-class ShowSnapshotTestCaseV2(ShowSnapshotTestCaseV21):
- snapshot_cls = volumes.SnapshotController
-
-
-class CreateSnapshotTestCaseV21(test.TestCase):
- snapshot_cls = volumes_v3.SnapshotController
-
- def setUp(self):
- super(CreateSnapshotTestCaseV21, self).setUp()
- self.controller = self.snapshot_cls()
- self.stubs.Set(cinder.API, 'get', fake_get_volume)
- self.stubs.Set(cinder.API, 'create_snapshot_force',
- fake_create_snapshot)
- self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
- self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
- self.req.method = 'POST'
- self.body = {'snapshot': {'volume_id': 1}}
-
- def test_force_true(self):
- self.body['snapshot']['force'] = 'True'
- self.controller.create(self.req, body=self.body)
-
- def test_force_false(self):
- self.body['snapshot']['force'] = 'f'
- self.controller.create(self.req, body=self.body)
-
- def test_force_invalid(self):
- self.body['snapshot']['force'] = 'foo'
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
-
-class CreateSnapshotTestCaseV2(CreateSnapshotTestCaseV21):
- snapshot_cls = volumes.SnapshotController
-
-
-class DeleteSnapshotTestCaseV21(test.TestCase):
- snapshot_cls = volumes_v3.SnapshotController
-
- def setUp(self):
- super(DeleteSnapshotTestCaseV21, self).setUp()
- self.controller = self.snapshot_cls()
- self.stubs.Set(cinder.API, 'get', fake_get_volume)
- self.stubs.Set(cinder.API, 'create_snapshot_force',
- fake_create_snapshot)
- self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
- self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot)
- self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
-
- def test_normal_delete(self):
- self.req.method = 'POST'
- self.body = {'snapshot': {'volume_id': 1}}
- result = self.controller.create(self.req, body=self.body)
-
- self.req.method = 'DELETE'
- result = self.controller.delete(self.req, result['snapshot']['id'])
-
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if isinstance(self.controller, volumes_v3.SnapshotController):
- status_int = self.controller.delete.wsgi_code
- else:
- status_int = result.status_int
- self.assertEqual(202, status_int)
-
- def test_delete_snapshot_not_exists(self):
- def fake_delete_snapshot_not_exist(self, context, snapshot_id):
- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
-
- self.stubs.Set(cinder.API, 'delete_snapshot',
- fake_delete_snapshot_not_exist)
- self.req.method = 'POST'
- self.body = {'snapshot': {'volume_id': 1}}
- result = self.controller.create(self.req, body=self.body)
-
- self.req.method = 'DELETE'
- self.assertRaises(exc.HTTPNotFound, self.controller.delete,
- self.req, result['snapshot']['id'])
-
-
-class DeleteSnapshotTestCaseV2(DeleteSnapshotTestCaseV21):
- snapshot_cls = volumes.SnapshotController
-
-
-class AssistedSnapshotCreateTestCase(test.TestCase):
- def setUp(self):
- super(AssistedSnapshotCreateTestCase, self).setUp()
-
- self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
- self.stubs.Set(compute_api.API, 'volume_snapshot_create',
- fake_compute_volume_snapshot_create)
-
- def test_assisted_create(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
- body = {'snapshot': {'volume_id': 1, 'create_info': {}}}
- req.method = 'POST'
- self.controller.create(req, body=body)
-
- def test_assisted_create_missing_create_info(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
- body = {'snapshot': {'volume_id': 1}}
- req.method = 'POST'
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, body=body)
-
-
-class AssistedSnapshotDeleteTestCase(test.TestCase):
- def setUp(self):
- super(AssistedSnapshotDeleteTestCase, self).setUp()
-
- self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
- self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
- fake_compute_volume_snapshot_delete)
-
- def test_assisted_delete(self):
- params = {
- 'delete_info': jsonutils.dumps({'volume_id': 1}),
- }
- req = fakes.HTTPRequest.blank(
- '/v2/fake/os-assisted-volume-snapshots?%s' %
- '&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()]))
- req.method = 'DELETE'
- result = self.controller.delete(req, '5')
- self.assertEqual(result.status_int, 204)
-
- def test_assisted_delete_missing_delete_info(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
- req.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
- req, '5')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py b/nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py
deleted file mode 100644
index b3b234f954..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/admin_only_action_common.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import webob
-
-from nova.compute import vm_states
-import nova.context
-from nova import exception
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_instance
-
-
-class CommonMixin(object):
- def setUp(self):
- super(CommonMixin, self).setUp()
- self.compute_api = None
- self.context = nova.context.RequestContext('fake', 'fake')
-
- def _make_request(self, url, body):
- req = webob.Request.blank('/v2/fake' + url)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.content_type = 'application/json'
- return req.get_response(self.app)
-
- def _stub_instance_get(self, uuid=None):
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_instance_obj(self.context,
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
- task_state=None, launched_at=timeutils.utcnow())
- self.compute_api.get(self.context, uuid, expected_attrs=None,
- want_objects=True).AndReturn(instance)
- return instance
-
- def _stub_instance_get_failure(self, exc_info, uuid=None):
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- self.compute_api.get(self.context, uuid, expected_attrs=None,
- want_objects=True).AndRaise(exc_info)
- return uuid
-
- def _test_non_existing_instance(self, action, body_map=None):
- uuid = uuidutils.generate_uuid()
- self._stub_instance_get_failure(
- exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % uuid,
- {action: body_map.get(action)})
- self.assertEqual(404, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_action(self, action, body=None, method=None,
- compute_api_args_map=None):
- if method is None:
- method = action
-
- compute_api_args_map = compute_api_args_map or {}
-
- instance = self._stub_instance_get()
-
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- getattr(self.compute_api, method)(self.context, instance, *args,
- **kwargs)
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {action: body})
- self.assertEqual(202, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_not_implemented_state(self, action, method=None):
- if method is None:
- method = action
-
- instance = self._stub_instance_get()
- body = {}
- compute_api_args_map = {}
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- getattr(self.compute_api, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- NotImplementedError())
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {action: body})
- self.assertEqual(501, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_invalid_state(self, action, method=None, body_map=None,
- compute_api_args_map=None):
- if method is None:
- method = action
- if body_map is None:
- body_map = {}
- if compute_api_args_map is None:
- compute_api_args_map = {}
-
- instance = self._stub_instance_get()
-
- args, kwargs = compute_api_args_map.get(action, ((), {}))
-
- getattr(self.compute_api, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- exception.InstanceInvalidState(
- attr='vm_state', instance_uuid=instance.uuid,
- state='foo', method=method))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {action: body_map.get(action)})
- self.assertEqual(409, res.status_int)
- self.assertIn("Cannot \'%(action)s\' instance %(id)s"
- % {'action': action, 'id': instance.uuid}, res.body)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_locked_instance(self, action, method=None, body=None,
- compute_api_args_map=None):
- if method is None:
- method = action
-
- compute_api_args_map = compute_api_args_map or {}
- instance = self._stub_instance_get()
-
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- getattr(self.compute_api, method)(self.context, instance, *args,
- **kwargs).AndRaise(
- exception.InstanceIsLocked(instance_uuid=instance.uuid))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {action: body})
- self.assertEqual(409, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def _test_instance_not_found_in_compute_api(self, action,
- method=None, body=None, compute_api_args_map=None):
- if method is None:
- method = action
-
- compute_api_args_map = compute_api_args_map or {}
-
- instance = self._stub_instance_get()
-
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- getattr(self.compute_api, method)(self.context, instance, *args,
- **kwargs).AndRaise(
- exception.InstanceNotFound(instance_id=instance.uuid))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {action: body})
- self.assertEqual(404, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
-
-class CommonTests(CommonMixin, test.NoDBTestCase):
- def _test_actions(self, actions, method_translations=None, body_map=None,
- args_map=None):
- method_translations = method_translations or {}
- body_map = body_map or {}
- args_map = args_map or {}
- for action in actions:
- method = method_translations.get(action)
- body = body_map.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_action(action, method=method, body=body,
- compute_api_args_map=args_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _test_actions_instance_not_found_in_compute_api(self,
- actions, method_translations=None, body_map=None,
- args_map=None):
- method_translations = method_translations or {}
- body_map = body_map or {}
- args_map = args_map or {}
- for action in actions:
- method = method_translations.get(action)
- body = body_map.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_instance_not_found_in_compute_api(
- action, method=method, body=body,
- compute_api_args_map=args_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _test_actions_with_non_existed_instance(self, actions, body_map=None):
- body_map = body_map or {}
- for action in actions:
- self._test_non_existing_instance(action,
- body_map=body_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _test_actions_raise_conflict_on_invalid_state(
- self, actions, method_translations=None, body_map=None,
- args_map=None):
- method_translations = method_translations or {}
- body_map = body_map or {}
- args_map = args_map or {}
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_invalid_state(action, method=method,
- body_map=body_map,
- compute_api_args_map=args_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def _test_actions_with_locked_instance(self, actions,
- method_translations=None,
- body_map=None, args_map=None):
- method_translations = method_translations or {}
- body_map = body_map or {}
- args_map = args_map or {}
- for action in actions:
- method = method_translations.get(action)
- body = body_map.get(action)
- self.mox.StubOutWithMock(self.compute_api, method or action)
- self._test_locked_instance(action, method=method, body=body,
- compute_api_args_map=args_map)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_access_ips.py b/nova/tests/api/openstack/compute/plugins/v3/test_access_ips.py
deleted file mode 100644
index 831d708c22..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_access_ips.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import access_ips
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.api.openstack import wsgi
-from nova.compute import api as compute_api
-from nova import db
-from nova import exception
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.image import fake
-
-
-class AccessIPsExtTest(test.NoDBTestCase):
- def setUp(self):
- super(AccessIPsExtTest, self).setUp()
- self.access_ips_ext = access_ips.AccessIPs(None)
-
- def _test(self, func):
- server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1',
- access_ips.AccessIPs.v6_key: 'fe80::'}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1',
- 'access_ip_v6': 'fe80::'})
-
- def _test_with_ipv4_only(self, func):
- server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1'}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1'})
-
- def _test_with_ipv6_only(self, func):
- server_dict = {access_ips.AccessIPs.v6_key: 'fe80::'}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v6': 'fe80::'})
-
- def _test_without_ipv4_and_ipv6(self, func):
- server_dict = {}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {})
-
- def _test_with_ipv4_null(self, func):
- server_dict = {access_ips.AccessIPs.v4_key: None}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v4': None})
-
- def _test_with_ipv6_null(self, func):
- server_dict = {access_ips.AccessIPs.v6_key: None}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v6': None})
-
- def _test_with_ipv4_blank(self, func):
- server_dict = {access_ips.AccessIPs.v4_key: ''}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v4': None})
-
- def _test_with_ipv6_blank(self, func):
- server_dict = {access_ips.AccessIPs.v6_key: ''}
- create_kwargs = {}
- func(server_dict, create_kwargs)
- self.assertEqual(create_kwargs, {'access_ip_v6': None})
-
- def test_server_create(self):
- self._test(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv4_only(self):
- self._test_with_ipv4_only(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv6_only(self):
- self._test_with_ipv6_only(self.access_ips_ext.server_create)
-
- def test_server_create_without_ipv4_and_ipv6(self):
- self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv4_null(self):
- self._test_with_ipv4_null(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv6_null(self):
- self._test_with_ipv6_null(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv4_blank(self):
- self._test_with_ipv4_blank(self.access_ips_ext.server_create)
-
- def test_server_create_with_ipv6_blank(self):
- self._test_with_ipv6_blank(self.access_ips_ext.server_create)
-
- def test_server_update(self):
- self._test(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv4_only(self):
- self._test_with_ipv4_only(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv6_only(self):
- self._test_with_ipv6_only(self.access_ips_ext.server_update)
-
- def test_server_update_without_ipv4_and_ipv6(self):
- self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv4_null(self):
- self._test_with_ipv4_null(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv6_null(self):
- self._test_with_ipv6_null(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv4_blank(self):
- self._test_with_ipv4_blank(self.access_ips_ext.server_update)
-
- def test_server_update_with_ipv6_blank(self):
- self._test_with_ipv6_blank(self.access_ips_ext.server_update)
-
- def test_server_rebuild(self):
- self._test(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv4_only(self):
- self._test_with_ipv4_only(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv6_only(self):
- self._test_with_ipv6_only(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_without_ipv4_and_ipv6(self):
- self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv4_null(self):
- self._test_with_ipv4_null(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv6_null(self):
- self._test_with_ipv6_null(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv4_blank(self):
- self._test_with_ipv4_blank(self.access_ips_ext.server_rebuild)
-
- def test_server_rebuild_with_ipv6_blank(self):
- self._test_with_ipv6_blank(self.access_ips_ext.server_rebuild)
-
-
-class AccessIPsExtAPIValidationTest(test.TestCase):
- def setUp(self):
- super(AccessIPsExtAPIValidationTest, self).setUp()
-
- def fake_save(context, **kwargs):
- pass
-
- def fake_rebuild(*args, **kwargs):
- pass
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
- self.stubs.Set(instance_obj.Instance, 'save', fake_save)
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- def _test_create(self, params):
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'flavorRef': 'http://localhost/123/flavors/3',
- },
- }
- body['server'].update(params)
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.headers['content-type'] = 'application/json'
- req.body = jsonutils.dumps(body)
- self.controller.create(req, body=body)
-
- def _test_update(self, params):
- body = {
- 'server': {
- },
- }
- body['server'].update(params)
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'PUT'
- req.headers['content-type'] = 'application/json'
- req.body = jsonutils.dumps(body)
- self.controller.update(req, fakes.FAKE_UUID, body=body)
-
- def _test_rebuild(self, params):
- body = {
- 'rebuild': {
- 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- },
- }
- body['rebuild'].update(params)
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'PUT'
- req.headers['content-type'] = 'application/json'
- req.body = jsonutils.dumps(body)
- self.controller._action_rebuild(req, fakes.FAKE_UUID, body=body)
-
- def test_create_server_with_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
- self._test_create(params)
-
- def test_create_server_with_invalid_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
- self.assertRaises(exception.ValidationError, self._test_create, params)
-
- def test_create_server_with_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
- self._test_create(params)
-
- def test_create_server_with_invalid_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
- self.assertRaises(exception.ValidationError, self._test_create, params)
-
- def test_update_server_with_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
- self._test_update(params)
-
- def test_update_server_with_invalid_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
- self.assertRaises(exception.ValidationError, self._test_update, params)
-
- def test_update_server_with_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
- self._test_update(params)
-
- def test_update_server_with_invalid_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
- self.assertRaises(exception.ValidationError, self._test_update, params)
-
- def test_rebuild_server_with_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
- self._test_rebuild(params)
-
- def test_rebuild_server_with_invalid_access_ipv4(self):
- params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
- self.assertRaises(exception.ValidationError, self._test_rebuild,
- params)
-
- def test_rebuild_server_with_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
- self._test_rebuild(params)
-
- def test_rebuild_server_with_invalid_access_ipv6(self):
- params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
- self.assertRaises(exception.ValidationError, self._test_rebuild,
- params)
-
-
-class AccessIPsControllerTest(test.NoDBTestCase):
- def setUp(self):
- super(AccessIPsControllerTest, self).setUp()
- self.controller = access_ips.AccessIPsController()
-
- def _test_with_access_ips(self, func, kwargs={'id': 'fake'}):
- req = wsgi.Request({'nova.context':
- fakes.FakeRequestContext('fake_user', 'fake',
- is_admin=True)})
- instance = {'uuid': 'fake',
- 'access_ip_v4': '1.1.1.1',
- 'access_ip_v6': 'fe80::'}
- req.cache_db_instance(instance)
- resp_obj = wsgi.ResponseObject(
- {"server": {'id': 'fake'}})
- func(req, resp_obj, **kwargs)
- self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
- '1.1.1.1')
- self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
- 'fe80::')
-
- def _test_without_access_ips(self, func, kwargs={'id': 'fake'}):
- req = wsgi.Request({'nova.context':
- fakes.FakeRequestContext('fake_user', 'fake',
- is_admin=True)})
- instance = {'uuid': 'fake',
- 'access_ip_v4': None,
- 'access_ip_v6': None}
- req.cache_db_instance(instance)
- resp_obj = wsgi.ResponseObject(
- {"server": {'id': 'fake'}})
- func(req, resp_obj, **kwargs)
- self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
- '')
- self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
- '')
-
- def test_create(self):
- self._test_with_access_ips(self.controller.create, {'body': {}})
-
- def test_create_without_access_ips(self):
- self._test_with_access_ips(self.controller.create, {'body': {}})
-
- def test_show(self):
- self._test_with_access_ips(self.controller.show)
-
- def test_show_without_access_ips(self):
- self._test_without_access_ips(self.controller.show)
-
- def test_detail(self):
- req = wsgi.Request({'nova.context':
- fakes.FakeRequestContext('fake_user', 'fake',
- is_admin=True)})
- instance1 = {'uuid': 'fake1',
- 'access_ip_v4': '1.1.1.1',
- 'access_ip_v6': 'fe80::'}
- instance2 = {'uuid': 'fake2',
- 'access_ip_v4': '1.1.1.2',
- 'access_ip_v6': 'fe81::'}
- req.cache_db_instance(instance1)
- req.cache_db_instance(instance2)
- resp_obj = wsgi.ResponseObject(
- {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
- self.controller.detail(req, resp_obj)
- self.assertEqual(
- resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key],
- '1.1.1.1')
- self.assertEqual(
- resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key],
- 'fe80::')
- self.assertEqual(
- resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key],
- '1.1.1.2')
- self.assertEqual(
- resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key],
- 'fe81::')
-
- def test_detail_without_access_ips(self):
- req = wsgi.Request({'nova.context':
- fakes.FakeRequestContext('fake_user', 'fake',
- is_admin=True)})
- instance1 = {'uuid': 'fake1',
- 'access_ip_v4': None,
- 'access_ip_v6': None}
- instance2 = {'uuid': 'fake2',
- 'access_ip_v4': None,
- 'access_ip_v6': None}
- req.cache_db_instance(instance1)
- req.cache_db_instance(instance2)
- resp_obj = wsgi.ResponseObject(
- {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
- self.controller.detail(req, resp_obj)
- self.assertEqual(
- resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key], '')
- self.assertEqual(
- resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key], '')
- self.assertEqual(
- resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key], '')
- self.assertEqual(
- resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key], '')
-
- def test_update(self):
- self._test_with_access_ips(self.controller.update, {'id': 'fake',
- 'body': {}})
-
- def test_update_without_access_ips(self):
- self._test_without_access_ips(self.controller.update, {'id': 'fake',
- 'body': {}})
-
- def test_rebuild(self):
- self._test_with_access_ips(self.controller.rebuild, {'id': 'fake',
- 'body': {}})
-
- def test_rebuild_without_access_ips(self):
- self._test_without_access_ips(self.controller.rebuild, {'id': 'fake',
- 'body': {}})
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_console_auth_tokens.py b/nova/tests/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
deleted file mode 100644
index 1248c39e39..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2013 Cloudbase Solutions Srl
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-
-from nova.consoleauth import rpcapi as consoleauth_rpcapi
-from nova import context
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
- 'host': 'fake_host',
- 'port': 'fake_port',
- 'internal_access_path': 'fake_access_path',
- 'console_type': 'rdp-html5'}
-
-
-def _fake_check_token(self, context, token):
- return _FAKE_CONNECT_INFO
-
-
-def _fake_check_token_not_found(self, context, token):
- return None
-
-
-def _fake_check_token_unauthorized(self, context, token):
- connect_info = _FAKE_CONNECT_INFO
- connect_info['console_type'] = 'unauthorized_console_type'
- return connect_info
-
-
-class ConsoleAuthTokensExtensionTest(test.TestCase):
-
- _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
-
- _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
- 'host': 'fake_host',
- 'port': 'fake_port',
- 'internal_access_path':
- 'fake_access_path'}}
-
- def setUp(self):
- super(ConsoleAuthTokensExtensionTest, self).setUp()
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token)
-
- ctxt = self._get_admin_context()
- self.app = fakes.wsgi_app_v21(init_only=('os-console-auth-tokens'),
- fake_auth_context=ctxt)
-
- def _get_admin_context(self):
- ctxt = context.get_admin_context()
- ctxt.user_id = 'fake'
- ctxt.project_id = 'fake'
- return ctxt
-
- def _create_request(self):
- req = fakes.HTTPRequestV3.blank(self._FAKE_URL)
- req.method = "GET"
- req.headers["content-type"] = "application/json"
- return req
-
- def test_get_console_connect_info(self):
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(200, res.status_int)
- output = jsonutils.loads(res.body)
- self.assertEqual(self._EXPECTED_OUTPUT, output)
-
- def test_get_console_connect_info_token_not_found(self):
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token_not_found)
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(404, res.status_int)
-
- def test_get_console_connect_info_unauthorized_console_type(self):
- self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
- _fake_check_token_unauthorized)
- req = self._create_request()
- res = req.get_response(self.app)
- self.assertEqual(401, res.status_int)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_consoles.py b/nova/tests/api/openstack/compute/plugins/v3/test_consoles.py
deleted file mode 100644
index addc5396da..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_consoles.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid as stdlib_uuid
-
-from oslo.utils import timeutils
-import webob
-
-from nova.api.openstack.compute.plugins.v3 import consoles
-from nova.compute import vm_states
-from nova import console
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-class FakeInstanceDB(object):
-
- def __init__(self):
- self.instances_by_id = {}
- self.ids_by_uuid = {}
- self.max_id = 0
-
- def return_server_by_id(self, context, id):
- if id not in self.instances_by_id:
- self._add_server(id=id)
- return dict(self.instances_by_id[id])
-
- def return_server_by_uuid(self, context, uuid):
- if uuid not in self.ids_by_uuid:
- self._add_server(uuid=uuid)
- return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
-
- def _add_server(self, id=None, uuid=None):
- if id is None:
- id = self.max_id + 1
- if uuid is None:
- uuid = str(stdlib_uuid.uuid4())
- instance = stub_instance(id, uuid=uuid)
- self.instances_by_id[id] = instance
- self.ids_by_uuid[uuid] = id
- if id > self.max_id:
- self.max_id = id
-
-
-def stub_instance(id, user_id='fake', project_id='fake', host=None,
- vm_state=None, task_state=None,
- reservation_id="", uuid=FAKE_UUID, image_ref="10",
- flavor_id="1", name=None, key_name='',
- access_ipv4=None, access_ipv6=None, progress=0):
-
- if host is not None:
- host = str(host)
-
- if key_name:
- key_data = 'FAKE'
- else:
- key_data = ''
-
- # ReservationID isn't sent back, hack it in there.
- server_name = name or "server%s" % id
- if reservation_id != "":
- server_name = "reservation_%s" % (reservation_id, )
-
- instance = {
- "id": int(id),
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "admin_password": "",
- "user_id": user_id,
- "project_id": project_id,
- "image_ref": image_ref,
- "kernel_id": "",
- "ramdisk_id": "",
- "launch_index": 0,
- "key_name": key_name,
- "key_data": key_data,
- "vm_state": vm_state or vm_states.BUILDING,
- "task_state": task_state,
- "memory_mb": 0,
- "vcpus": 0,
- "root_gb": 0,
- "hostname": "",
- "host": host,
- "instance_type": {},
- "user_data": "",
- "reservation_id": reservation_id,
- "mac_address": "",
- "scheduled_at": timeutils.utcnow(),
- "launched_at": timeutils.utcnow(),
- "terminated_at": timeutils.utcnow(),
- "availability_zone": "",
- "display_name": server_name,
- "display_description": "",
- "locked": False,
- "metadata": [],
- "access_ip_v4": access_ipv4,
- "access_ip_v6": access_ipv6,
- "uuid": uuid,
- "progress": progress}
-
- return instance
-
-
-class ConsolesControllerTest(test.NoDBTestCase):
- def setUp(self):
- super(ConsolesControllerTest, self).setUp()
- self.flags(verbose=True)
- self.instance_db = FakeInstanceDB()
- self.stubs.Set(db, 'instance_get',
- self.instance_db.return_server_by_id)
- self.stubs.Set(db, 'instance_get_by_uuid',
- self.instance_db.return_server_by_uuid)
- self.uuid = str(stdlib_uuid.uuid4())
- self.url = '/v3/fake/servers/%s/consoles' % self.uuid
- self.controller = consoles.ConsolesController()
-
- def test_create_console(self):
- def fake_create_console(cons_self, context, instance_id):
- self.assertEqual(instance_id, self.uuid)
- return {}
- self.stubs.Set(console.api.API, 'create_console', fake_create_console)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.controller.create(req, self.uuid, None)
- self.assertEqual(self.controller.create.wsgi_code, 201)
-
- def test_create_console_unknown_instance(self):
- def fake_create_console(cons_self, context, instance_id):
- raise exception.InstanceNotFound(instance_id=instance_id)
- self.stubs.Set(console.api.API, 'create_console', fake_create_console)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, self.uuid, None)
-
- def test_show_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
- pool = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- return dict(id=console_id, password='fake_password',
- port='fake_port', pool=pool, instance_name='inst-0001')
-
- expected = {'console': {'id': 20,
- 'port': 'fake_port',
- 'host': 'fake_hostname',
- 'password': 'fake_password',
- 'instance_name': 'inst-0001',
- 'console_type': 'fake_type'}}
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- res_dict = self.controller.show(req, self.uuid, '20')
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_show_console_unknown_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFound(console_id=console_id)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, self.uuid, '20')
-
- def test_show_console_unknown_instance(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFoundForInstance(
- instance_uuid=instance_id)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, self.uuid, '20')
-
- def test_list_consoles(self):
- def fake_get_consoles(cons_self, context, instance_id):
- self.assertEqual(instance_id, self.uuid)
-
- pool1 = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- cons1 = dict(id=10, password='fake_password',
- port='fake_port', pool=pool1)
- pool2 = dict(console_type='fake_type2',
- public_hostname='fake_hostname2')
- cons2 = dict(id=11, password='fake_password2',
- port='fake_port2', pool=pool2)
- return [cons1, cons2]
-
- expected = {'consoles':
- [{'id': 10, 'console_type': 'fake_type'},
- {'id': 11, 'console_type': 'fake_type2'}]}
-
- self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- res_dict = self.controller.index(req, self.uuid)
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_list_consoles_unknown_instance(self):
- def fake_get_consoles(cons_self, context, instance_id):
- raise exception.InstanceNotFound(instance_id=instance_id)
- self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
- req, self.uuid)
-
- def test_delete_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
- pool = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- return dict(id=console_id, password='fake_password',
- port='fake_port', pool=pool)
-
- def fake_delete_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- self.controller.delete(req, self.uuid, '20')
-
- def test_delete_console_unknown_console(self):
- def fake_delete_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFound(console_id=console_id)
-
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.uuid, '20')
-
- def test_delete_console_unknown_instance(self):
- def fake_delete_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFoundForInstance(
- instance_uuid=instance_id)
-
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequestV3.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.uuid, '20')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_create_backup.py b/nova/tests/api/openstack/compute/plugins/v3/test_create_backup.py
deleted file mode 100644
index 332d652195..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_create_backup.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack import common
-from nova.api.openstack.compute.plugins.v3 import create_backup
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.api.openstack.compute.plugins.v3 import \
- admin_only_action_common
-from nova.tests.api.openstack import fakes
-
-
-class CreateBackupTests(admin_only_action_common.CommonMixin,
- test.NoDBTestCase):
- def setUp(self):
- super(CreateBackupTests, self).setUp()
- self.controller = create_backup.CreateBackupController()
- self.compute_api = self.controller.compute_api
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(create_backup, 'CreateBackupController',
- _fake_controller)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-create-backup'),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
- self.mox.StubOutWithMock(common,
- 'check_img_metadata_properties_quota')
- self.mox.StubOutWithMock(self.compute_api, 'backup')
-
- def _make_url(self, uuid=None):
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- return '/servers/%s/action' % uuid
-
- def test_create_backup_with_metadata(self):
- metadata = {'123': 'asdf'}
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- 'metadata': metadata,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties=metadata)
-
- common.check_img_metadata_properties_quota(self.context, metadata)
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 1,
- extra_properties=metadata).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance.uuid), body)
- self.assertEqual(202, res.status_int)
- self.assertIn('fake-image-id', res.headers['Location'])
-
- def test_create_backup_no_name(self):
- # Name is required for backups.
- body = {
- 'createBackup': {
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_no_rotation(self):
- # Rotation is required for backup requests.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- },
- }
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_negative_rotation(self):
- """Rotation must be greater than or equal to zero
- for backup requests
- """
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': -1,
- },
- }
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_negative_rotation_with_string_number(self):
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': '-1',
- },
- }
- res = self._make_request(self._make_url('fake'), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_no_backup_type(self):
- # Backup Type (daily or weekly) is required for backup requests.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_non_dict_metadata(self):
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- 'metadata': 'non_dict',
- },
- }
- res = self._make_request(self._make_url('fake'), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_bad_entity(self):
- body = {'createBackup': 'go'}
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
-
- def test_create_backup_rotation_is_zero(self):
- # The happy path for creating backups if rotation is zero.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 0,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties={})
- common.check_img_metadata_properties_quota(self.context, {})
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 0,
- extra_properties={}).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance.uuid), body)
- self.assertEqual(202, res.status_int)
- self.assertNotIn('Location', res.headers)
-
- def test_create_backup_rotation_is_positive(self):
- # The happy path for creating backups if rotation is positive.
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties={})
- common.check_img_metadata_properties_quota(self.context, {})
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 1,
- extra_properties={}).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance.uuid), body)
- self.assertEqual(202, res.status_int)
- self.assertIn('fake-image-id', res.headers['Location'])
-
- def test_create_backup_rotation_is_string_number(self):
- body = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': '1',
- },
- }
-
- image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
- properties={})
- common.check_img_metadata_properties_quota(self.context, {})
- instance = self._stub_instance_get()
- self.compute_api.backup(self.context, instance, 'Backup 1',
- 'daily', 1,
- extra_properties={}).AndReturn(image)
-
- self.mox.ReplayAll()
-
- res = self._make_request(self._make_url(instance['uuid']), body)
- self.assertEqual(202, res.status_int)
- self.assertIn('fake-image-id', res.headers['Location'])
-
- def test_create_backup_raises_conflict_on_invalid_state(self):
- body_map = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- args_map = {
- 'createBackup': (
- ('Backup 1', 'daily', 1), {'extra_properties': {}}
- ),
- }
- common.check_img_metadata_properties_quota(self.context, {})
- self._test_invalid_state('createBackup', method='backup',
- body_map=body_map,
- compute_api_args_map=args_map)
-
- def test_create_backup_with_non_existed_instance(self):
- body_map = {
- 'createBackup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- common.check_img_metadata_properties_quota(self.context, {})
- self._test_non_existing_instance('createBackup',
- body_map=body_map)
-
- def test_create_backup_with_invalid_create_backup(self):
- body = {
- 'createBackupup': {
- 'name': 'Backup 1',
- 'backup_type': 'daily',
- 'rotation': 1,
- },
- }
- res = self._make_request(self._make_url(), body)
- self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py b/nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py
deleted file mode 100644
index dda65113e8..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_extended_volumes.py
+++ /dev/null
@@ -1,387 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute.plugins.v3 import extended_volumes
-from nova import compute
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova import volume
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID1)
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_not_found(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id=UUID1)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-def fake_bdms_get_all_by_instance(*args, **kwargs):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': UUID1, 'source_type': 'volume',
- 'destination_type': 'volume', 'id': 1}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': UUID2, 'source_type': 'volume',
- 'destination_type': 'volume', 'id': 2})]
-
-
-def fake_attach_volume(self, context, instance, volume_id,
- device, disk_bus, device_type):
- pass
-
-
-def fake_attach_volume_not_found_vol(self, context, instance, volume_id,
- device, disk_bus, device_type):
- raise exception.VolumeNotFound(volume_id=volume_id)
-
-
-def fake_attach_volume_invalid_device_path(self, context, instance,
- volume_id, device, disk_bus,
- device_type):
- raise exception.InvalidDevicePath(path=device)
-
-
-def fake_attach_volume_instance_invalid_state(self, context, instance,
- volume_id, device, disk_bus,
- device_type):
- raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
- method='', attr='')
-
-
-def fake_attach_volume_invalid_volume(self, context, instance,
- volume_id, device, disk_bus,
- device_type):
- raise exception.InvalidVolume(reason='')
-
-
-def fake_detach_volume(self, context, instance, volume):
- pass
-
-
-def fake_swap_volume(self, context, instance,
- old_volume_id, new_volume_id):
- pass
-
-
-def fake_swap_volume_invalid_volume(self, context, instance,
- volume_id, device):
- raise exception.InvalidVolume(reason='', volume_id=volume_id)
-
-
-def fake_swap_volume_unattached_volume(self, context, instance,
- volume_id, device):
- raise exception.VolumeUnattached(reason='', volume_id=volume_id)
-
-
-def fake_detach_volume_invalid_volume(self, context, instance, volume):
- raise exception.InvalidVolume(reason='')
-
-
-def fake_swap_volume_instance_invalid_state(self, context, instance,
- volume_id, device):
- raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
- method='', attr='')
-
-
-def fake_volume_get(*args, **kwargs):
- pass
-
-
-def fake_volume_get_not_found(*args, **kwargs):
- raise exception.VolumeNotFound(volume_id=UUID1)
-
-
-class ExtendedVolumesTest(test.TestCase):
- content_type = 'application/json'
- prefix = 'os-extended-volumes:'
-
- def setUp(self):
- super(ExtendedVolumesTest, self).setUp()
- self.Controller = extended_volumes.ExtendedVolumesController()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
- self.stubs.Set(volume.cinder.API, 'get', fake_volume_get)
- self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume)
- self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume)
- self.app = fakes.wsgi_app_v21(init_only=('os-extended-volumes',
- 'servers'))
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url, body=None):
- base_url = '/v2/fake/servers'
- req = webob.Request.blank(base_url + url)
- req.headers['Accept'] = self.content_type
- if body:
- req.body = jsonutils.dumps(body)
- req.method = 'POST'
- req.content_type = 'application/json'
- res = req.get_response(self.app)
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def test_show(self):
- url = '/%s' % UUID1
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- server = self._get_server(res.body)
- exp_volumes = [{'id': UUID1}, {'id': UUID2}]
- if self.content_type == 'application/json':
- actual = server.get('%svolumes_attached' % self.prefix)
- self.assertEqual(exp_volumes, actual)
-
- def test_detail(self):
- url = '/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- exp_volumes = [{'id': UUID1}, {'id': UUID2}]
- for i, server in enumerate(self._get_servers(res.body)):
- if self.content_type == 'application/json':
- actual = server.get('%svolumes_attached' % self.prefix)
- self.assertEqual(exp_volumes, actual)
-
- def test_detach(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"detach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 202)
-
- def test_detach_volume_from_locked_server(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'detach_volume',
- fakes.fake_actions_to_locked_server)
- res = self._make_request(url, {"detach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 409)
-
- def test_detach_with_non_existed_vol(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
- res = self._make_request(url, {"detach": {"volume_id": UUID2}})
- self.assertEqual(res.status_int, 404)
-
- def test_detach_with_non_existed_instance(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
- res = self._make_request(url, {"detach": {"volume_id": UUID2}})
- self.assertEqual(res.status_int, 404)
-
- def test_detach_with_invalid_vol(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'detach_volume',
- fake_detach_volume_invalid_volume)
- res = self._make_request(url, {"detach": {"volume_id": UUID2}})
- self.assertEqual(res.status_int, 400)
-
- def test_detach_with_bad_id(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"detach": {"volume_id": 'xxx'}})
- self.assertEqual(res.status_int, 400)
-
- def test_detach_without_id(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"detach": {}})
- self.assertEqual(res.status_int, 400)
-
- def test_detach_volume_with_invalid_request(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"detach": None})
- self.assertEqual(res.status_int, 400)
-
- @mock.patch('nova.objects.BlockDeviceMapping.is_root',
- new_callable=mock.PropertyMock)
- def test_detach_volume_root(self, mock_isroot):
- url = "/%s/action" % UUID1
- mock_isroot.return_value = True
- res = self._make_request(url, {"detach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 403)
-
- def test_attach_volume(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 202)
-
- def test_attach_volume_to_locked_server(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fakes.fake_actions_to_locked_server)
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 409)
-
- def test_attach_volume_disk_bus_and_disk_dev(self):
- url = "/%s/action" % UUID1
- self._make_request(url, {"attach": {"volume_id": UUID1,
- "device": "/dev/vdb",
- "disk_bus": "ide",
- "device_type": "cdrom"}})
-
- def test_attach_volume_with_bad_id(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"attach": {"volume_id": 'xxx'}})
- self.assertEqual(res.status_int, 400)
-
- def test_attach_volume_without_id(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"attach": {}})
- self.assertEqual(res.status_int, 400)
-
- def test_attach_volume_with_invalid_request(self):
- url = "/%s/action" % UUID1
- res = self._make_request(url, {"attach": None})
- self.assertEqual(res.status_int, 400)
-
- def test_attach_volume_with_non_existe_vol(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fake_attach_volume_not_found_vol)
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 404)
-
- def test_attach_volume_with_non_existed_instance(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 404)
-
- def test_attach_volume_with_invalid_device_path(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fake_attach_volume_invalid_device_path)
- res = self._make_request(url, {"attach": {"volume_id": UUID1,
- 'device': 'xxx'}})
- self.assertEqual(res.status_int, 400)
-
- def test_attach_volume_with_instance_invalid_state(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fake_attach_volume_instance_invalid_state)
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 409)
-
- def test_attach_volume_with_invalid_volume(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fake_attach_volume_invalid_volume)
- res = self._make_request(url, {"attach": {"volume_id": UUID1}})
- self.assertEqual(res.status_int, 400)
-
- def test_attach_volume_with_invalid_request_body(self):
- url = "/%s/action" % UUID1
- self.stubs.Set(compute.api.API, 'attach_volume',
- fake_attach_volume_invalid_volume)
- res = self._make_request(url, {"attach": None})
- self.assertEqual(res.status_int, 400)
-
- def _test_swap(self, uuid=UUID1, body=None):
- body = body or {'swap_volume_attachment': {'old_volume_id': uuid,
- 'new_volume_id': UUID2}}
- req = webob.Request.blank('/v2/fake/servers/%s/action' % UUID1)
- req.method = 'PUT'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = context.get_admin_context()
- return self.Controller.swap(req, UUID1, body=body)
-
- def test_swap_volume(self):
- self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
- # Check any exceptions don't happen and status code
- self._test_swap()
- self.assertEqual(202, self.Controller.swap.wsgi_code)
-
- def test_swap_volume_for_locked_server(self):
- def fake_swap_volume_for_locked_server(self, context, instance,
- old_volume, new_volume):
- raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
- self.stubs.Set(compute.api.API, 'swap_volume',
- fake_swap_volume_for_locked_server)
- self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
-
- def test_swap_volume_for_locked_server_new(self):
- self.stubs.Set(compute.api.API, 'swap_volume',
- fakes.fake_actions_to_locked_server)
- self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
-
- def test_swap_volume_instance_not_found(self):
- self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
- self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
-
- def test_swap_volume_with_bad_action(self):
- self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
- body = {'swap_volume_attachment_bad_action': None}
- self.assertRaises(exception.ValidationError, self._test_swap,
- body=body)
-
- def test_swap_volume_with_invalid_body(self):
- self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
- body = {'swap_volume_attachment': {'bad_volume_id_body': UUID1,
- 'new_volume_id': UUID2}}
- self.assertRaises(exception.ValidationError, self._test_swap,
- body=body)
-
- def test_swap_volume_with_invalid_volume(self):
- self.stubs.Set(compute.api.API, 'swap_volume',
- fake_swap_volume_invalid_volume)
- self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
-
- def test_swap_volume_with_unattached_volume(self):
- self.stubs.Set(compute.api.API, 'swap_volume',
- fake_swap_volume_unattached_volume)
- self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
-
- def test_swap_volume_with_bad_state_instance(self):
- self.stubs.Set(compute.api.API, 'swap_volume',
- fake_swap_volume_instance_invalid_state)
- self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
-
- def test_swap_volume_no_attachment(self):
- self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
- self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, UUID3)
-
- def test_swap_volume_not_found(self):
- self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
- self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
- self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py b/nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py
deleted file mode 100644
index 38905a4702..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import extension_info
-from nova import exception
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class fake_extension(object):
- def __init__(self, name, alias, description, version):
- self.name = name
- self.alias = alias
- self.__doc__ = description
- self.version = version
-
-
-fake_extensions = {
- 'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description', 1),
- 'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description', 2),
- 'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description', 1)
-}
-
-
-def fake_policy_enforce(context, action, target, do_raise=True):
- return True
-
-
-def fake_policy_enforce_selective(context, action, target, do_raise=True):
- if action == 'compute_extension:v3:ext1-alias:discoverable':
- raise exception.Forbidden
- else:
- return True
-
-
-class ExtensionInfoTest(test.NoDBTestCase):
-
- def setUp(self):
- super(ExtensionInfoTest, self).setUp()
- ext_info = plugins.LoadedExtensionInfo()
- ext_info.extensions = fake_extensions
- self.controller = extension_info.ExtensionInfoController(ext_info)
-
- def test_extension_info_list(self):
- self.stubs.Set(policy, 'enforce', fake_policy_enforce)
- req = fakes.HTTPRequestV3.blank('/extensions')
- res_dict = self.controller.index(req)
- self.assertEqual(3, len(res_dict['extensions']))
- for e in res_dict['extensions']:
- self.assertIn(e['alias'], fake_extensions)
- self.assertEqual(e['name'], fake_extensions[e['alias']].name)
- self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
- self.assertEqual(e['description'],
- fake_extensions[e['alias']].__doc__)
- self.assertEqual(e['version'],
- fake_extensions[e['alias']].version)
-
- def test_extension_info_show(self):
- self.stubs.Set(policy, 'enforce', fake_policy_enforce)
- req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
- res_dict = self.controller.show(req, 'ext1-alias')
- self.assertEqual(1, len(res_dict))
- self.assertEqual(res_dict['extension']['name'],
- fake_extensions['ext1-alias'].name)
- self.assertEqual(res_dict['extension']['alias'],
- fake_extensions['ext1-alias'].alias)
- self.assertEqual(res_dict['extension']['description'],
- fake_extensions['ext1-alias'].__doc__)
- self.assertEqual(res_dict['extension']['version'],
- fake_extensions['ext1-alias'].version)
-
- def test_extension_info_list_not_all_discoverable(self):
- self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
- req = fakes.HTTPRequestV3.blank('/extensions')
- res_dict = self.controller.index(req)
- self.assertEqual(2, len(res_dict['extensions']))
- for e in res_dict['extensions']:
- self.assertNotEqual('ext1-alias', e['alias'])
- self.assertIn(e['alias'], fake_extensions)
- self.assertEqual(e['name'], fake_extensions[e['alias']].name)
- self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
- self.assertEqual(e['description'],
- fake_extensions[e['alias']].__doc__)
- self.assertEqual(e['version'],
- fake_extensions[e['alias']].version)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py b/nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py
deleted file mode 100644
index 5f8e499b0b..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_lock_server.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute.plugins.v3 import lock_server
-from nova import exception
-from nova.tests.api.openstack.compute.plugins.v3 import \
- admin_only_action_common
-from nova.tests.api.openstack import fakes
-
-
-class LockServerTests(admin_only_action_common.CommonTests):
- def setUp(self):
- super(LockServerTests, self).setUp()
- self.controller = lock_server.LockServerController()
- self.compute_api = self.controller.compute_api
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(lock_server, 'LockServerController',
- _fake_controller)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-lock-server'),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_lock_unlock(self):
- self._test_actions(['lock', 'unlock'])
-
- def test_lock_unlock_with_non_existed_instance(self):
- self._test_actions_with_non_existed_instance(['lock', 'unlock'])
-
- def test_unlock_not_authorized(self):
- self.mox.StubOutWithMock(self.compute_api, 'unlock')
-
- instance = self._stub_instance_get()
-
- self.compute_api.unlock(self.context, instance).AndRaise(
- exception.PolicyNotAuthorized(action='unlock'))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance.uuid,
- {'unlock': None})
- self.assertEqual(403, res.status_int)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py b/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py
deleted file mode 100644
index 0c8320bccc..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_multiple_create.py
+++ /dev/null
@@ -1,547 +0,0 @@
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import block_device_mapping
-from nova.api.openstack.compute.plugins.v3 import multiple_create
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import db
-from nova import exception
-from nova.network import manager
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-CONF = cfg.CONF
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def fake_gen_uuid():
- return FAKE_UUID
-
-
-def return_security_group(context, instance_id, security_group_id):
- pass
-
-
-class ServersControllerCreateTest(test.TestCase):
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTest, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- CONF.set_override('extensions_blacklist', 'os-multiple-create',
- 'osapi_v3')
- self.no_mult_create_controller = servers.ServersController(
- extension_info=ext_info)
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "security_groups": inst['security_groups'],
- })
-
- self.instance_cache_by_id[instance['id']] = instance
- self.instance_cache_by_uuid[instance['uuid']] = instance
- return instance
-
- def instance_get(context, instance_id):
- """Stub for compute/api create() pulling in instance after
- scheduling
- """
- return self.instance_cache_by_id[instance_id]
-
- def instance_update(context, uuid, values):
- instance = self.instance_cache_by_uuid[uuid]
- instance.update(values)
- return instance
-
- def server_update(context, instance_uuid, params, update_cells=True,
- columns_to_join=None):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return (inst, inst)
-
- def fake_method(*args, **kwargs):
- pass
-
- def project_get_networks(context, user_id):
- return dict(id='1', host='localhost')
-
- def queue_get_for(context, *args):
- return 'network_topic'
-
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update)
- self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
- fake_method)
-
- def _test_create_extra(self, params, no_image=False,
- override_controller=None):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- if no_image:
- server.pop('imageRef', None)
- server.update(params)
- body = dict(server=server)
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- if override_controller:
- server = override_controller.create(req, body=body).obj['server']
- else:
- server = self.controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_multiple_create_disabled(self):
- min_count = 2
- max_count = 3
- params = {
- multiple_create.MIN_ATTRIBUTE_NAME: min_count,
- multiple_create.MAX_ATTRIBUTE_NAME: max_count,
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('min_count', kwargs)
- self.assertNotIn('max_count', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(
- params,
- override_controller=self.no_mult_create_controller)
-
- def test_multiple_create_with_string_type_min_and_max(self):
- min_count = '2'
- max_count = '3'
- params = {
- multiple_create.MIN_ATTRIBUTE_NAME: min_count,
- multiple_create.MAX_ATTRIBUTE_NAME: max_count,
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsInstance(kwargs['min_count'], int)
- self.assertIsInstance(kwargs['max_count'], int)
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(kwargs['max_count'], 3)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_multiple_create_enabled(self):
- min_count = 2
- max_count = 3
- params = {
- multiple_create.MIN_ATTRIBUTE_NAME: min_count,
- multiple_create.MAX_ATTRIBUTE_NAME: max_count,
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(kwargs['max_count'], 3)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_invalid_negative_min(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: -1,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_invalid_negative_max(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MAX_ATTRIBUTE_NAME: -1,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_with_blank_min(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: '',
- 'name': 'server_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_with_blank_max(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MAX_ATTRIBUTE_NAME: '',
- 'name': 'server_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_invalid_min_greater_than_max(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 4,
- multiple_create.MAX_ATTRIBUTE_NAME: 2,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_invalid_alpha_min(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 'abcd',
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_instance_invalid_alpha_max(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- body = {
- 'server': {
- multiple_create.MAX_ATTRIBUTE_NAME: 'abcd',
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- }
- }
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- def test_create_multiple_instances(self):
- """Test creating multiple instances but not asking for
- reservation_id
- """
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 2,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- }
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body).obj
-
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_password_len(res["server"])
-
- def test_create_multiple_instances_pass_disabled(self):
- """Test creating multiple instances but not asking for
- reservation_id
- """
- self.flags(enable_instance_password=False)
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 2,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- }
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body).obj
-
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_password_missing(res["server"])
-
- def _check_admin_password_len(self, server_dict):
- """utility function - check server_dict for admin_password length."""
- self.assertEqual(CONF.password_length,
- len(server_dict["adminPass"]))
-
- def _check_admin_password_missing(self, server_dict):
- """utility function - check server_dict for admin_password absence."""
- self.assertNotIn("admin_password", server_dict)
-
- def _create_multiple_instances_resv_id_return(self, resv_id_return):
- """Test creating multiple instances with asking for
- reservation_id
- """
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 2,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- multiple_create.RRID_ATTRIBUTE_NAME: resv_id_return
- }
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body)
- reservation_id = res.obj['reservation_id']
- self.assertNotEqual(reservation_id, "")
- self.assertIsNotNone(reservation_id)
- self.assertTrue(len(reservation_id) > 1)
-
- def test_create_multiple_instances_with_resv_id_return(self):
- self._create_multiple_instances_resv_id_return(True)
-
- def test_create_multiple_instances_with_string_resv_id_return(self):
- self._create_multiple_instances_resv_id_return("True")
-
- def test_create_multiple_instances_with_multiple_volume_bdm(self):
- """Test that a BadRequest is raised if multiple instances
- are requested with a list of block device mappings for volumes.
- """
- min_count = 2
- bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
- {'source_type': 'volume', 'uuid': 'vol-yyyy'}
- ]
- params = {
- block_device_mapping.ATTRIBUTE_NAME: bdm,
- multiple_create.MIN_ATTRIBUTE_NAME: min_count
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(len(kwargs['block_device_mapping']), 2)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- exc = self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params, no_image=True)
- self.assertEqual("Cannot attach one or more volumes to multiple "
- "instances", exc.explanation)
-
- def test_create_multiple_instances_with_single_volume_bdm(self):
- """Test that a BadRequest is raised if multiple instances
- are requested to boot from a single volume.
- """
- min_count = 2
- bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
- params = {
- block_device_mapping.ATTRIBUTE_NAME: bdm,
- multiple_create.MIN_ATTRIBUTE_NAME: min_count
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
- 'vol-xxxx')
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- exc = self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params, no_image=True)
- self.assertEqual("Cannot attach one or more volumes to multiple "
- "instances", exc.explanation)
-
- def test_create_multiple_instance_with_non_integer_max_count(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- multiple_create.MAX_ATTRIBUTE_NAME: 2.5,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- }
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_multiple_instance_with_non_integer_min_count(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- multiple_create.MIN_ATTRIBUTE_NAME: 2.5,
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- }
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_pause_server.py b/nova/tests/api/openstack/compute/plugins/v3/test_pause_server.py
deleted file mode 100644
index e5de55bc30..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_pause_server.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute.plugins.v3 import pause_server
-from nova.tests.api.openstack.compute.plugins.v3 import \
- admin_only_action_common
-from nova.tests.api.openstack import fakes
-
-
-class PauseServerTests(admin_only_action_common.CommonTests):
- def setUp(self):
- super(PauseServerTests, self).setUp()
- self.controller = pause_server.PauseServerController()
- self.compute_api = self.controller.compute_api
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(pause_server, 'PauseServerController',
- _fake_controller)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-pause-server'),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_pause_unpause(self):
- self._test_actions(['pause', 'unpause'])
-
- def test_actions_raise_on_not_implemented(self):
- for action in ['pause', 'unpause']:
- self.mox.StubOutWithMock(self.compute_api, action)
- self._test_not_implemented_state(action)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_pause_unpause_with_non_existed_instance(self):
- self._test_actions_with_non_existed_instance(['pause', 'unpause'])
-
- def test_pause_unpause_with_non_existed_instance_in_compute_api(self):
- self._test_actions_instance_not_found_in_compute_api(['pause',
- 'unpause'])
-
- def test_pause_unpause_raise_conflict_on_invalid_state(self):
- self._test_actions_raise_conflict_on_invalid_state(['pause',
- 'unpause'])
-
- def test_actions_with_locked_instance(self):
- self._test_actions_with_locked_instance(['pause', 'unpause'])
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_pci.py b/nova/tests/api/openstack/compute/plugins/v3/test_pci.py
deleted file mode 100644
index 5bc2201c49..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_pci.py
+++ /dev/null
@@ -1,236 +0,0 @@
-# Copyright 2013 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo.serialization import jsonutils
-from webob import exc
-
-from nova.api.openstack.compute.plugins.v3 import pci
-from nova.api.openstack import wsgi
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova.pci import device
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_pci_device
-
-
-fake_compute_node = {
- 'pci_stats': [{"count": 3,
- "vendor_id": "8086",
- "product_id": "1520",
- "extra_info": {"phys_function": '[["0x0000", "0x04", '
- '"0x00", "0x1"]]'}}]}
-
-
-class FakeResponse(wsgi.ResponseObject):
- pass
-
-
-class PciServerControllerTest(test.NoDBTestCase):
- def setUp(self):
- super(PciServerControllerTest, self).setUp()
- self.controller = pci.PciServerController()
- self.fake_obj = {'server': {'addresses': {},
- 'id': 'fb08',
- 'name': 'a3',
- 'status': 'ACTIVE',
- 'tenant_id': '9a3af784c',
- 'user_id': 'e992080ac0',
- }}
- self.fake_list = {'servers': [{'addresses': {},
- 'id': 'fb08',
- 'name': 'a3',
- 'status': 'ACTIVE',
- 'tenant_id': '9a3af784c',
- 'user_id': 'e992080ac',
- }]}
- self._create_fake_instance()
- self._create_fake_pci_device()
- device.claim(self.pci_device, self.inst)
- device.allocate(self.pci_device, self.inst)
-
- def _create_fake_instance(self):
- self.inst = objects.Instance()
- self.inst.uuid = 'fake-inst-uuid'
- self.inst.pci_devices = objects.PciDeviceList()
-
- def _create_fake_pci_device(self):
- def fake_pci_device_get_by_addr(ctxt, id, addr):
- return test_pci_device.fake_db_dev
-
- ctxt = context.get_admin_context()
- self.stubs.Set(db, 'pci_device_get_by_addr',
- fake_pci_device_get_by_addr)
- self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
-
- def test_show(self):
- def fake_get_db_instance(id):
- return self.inst
-
- resp = FakeResponse(self.fake_obj, '')
- req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
- self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
- self.controller.show(req, resp, '1')
- self.assertEqual([{'id': 1}],
- resp.obj['server']['os-pci:pci_devices'])
-
- def test_detail(self):
- def fake_get_db_instance(id):
- return self.inst
-
- resp = FakeResponse(self.fake_list, '')
- req = fakes.HTTPRequestV3.blank('/os-pci/detail',
- use_admin_context=True)
- self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
- self.controller.detail(req, resp)
- self.assertEqual([{'id': 1}],
- resp.obj['servers'][0]['os-pci:pci_devices'])
-
-
-class PciHypervisorControllerTest(test.NoDBTestCase):
- def setUp(self):
- super(PciHypervisorControllerTest, self).setUp()
- self.controller = pci.PciHypervisorController()
- self.fake_objs = dict(hypervisors=[
- dict(id=1,
- service=dict(id=1, host="compute1"),
- hypervisor_type="xen",
- hypervisor_version=3,
- hypervisor_hostname="hyper1")])
- self.fake_obj = dict(hypervisor=dict(
- id=1,
- service=dict(id=1, host="compute1"),
- hypervisor_type="xen",
- hypervisor_version=3,
- hypervisor_hostname="hyper1"))
-
- def test_show(self):
- def fake_get_db_compute_node(id):
- fake_compute_node['pci_stats'] = jsonutils.dumps(
- fake_compute_node['pci_stats'])
- return fake_compute_node
-
- req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
- use_admin_context=True)
- resp = FakeResponse(self.fake_obj, '')
- self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
- self.controller.show(req, resp, '1')
- self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
- fake_compute_node['pci_stats'] = jsonutils.loads(
- fake_compute_node['pci_stats'])
- self.assertEqual(fake_compute_node['pci_stats'][0],
- resp.obj['hypervisor']['os-pci:pci_stats'][0])
-
- def test_detail(self):
- def fake_get_db_compute_node(id):
- fake_compute_node['pci_stats'] = jsonutils.dumps(
- fake_compute_node['pci_stats'])
- return fake_compute_node
-
- req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail',
- use_admin_context=True)
- resp = FakeResponse(self.fake_objs, '')
- self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
- self.controller.detail(req, resp)
- fake_compute_node['pci_stats'] = jsonutils.loads(
- fake_compute_node['pci_stats'])
- self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
- self.assertEqual(fake_compute_node['pci_stats'][0],
- resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
-
-
-class PciControlletest(test.NoDBTestCase):
- def setUp(self):
- super(PciControlletest, self).setUp()
- self.controller = pci.PciController()
-
- def test_show(self):
- def fake_pci_device_get_by_id(context, id):
- return test_pci_device.fake_db_dev
-
- self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
- req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
- result = self.controller.show(req, '1')
- dist = {'pci_device': {'address': 'a',
- 'compute_node_id': 1,
- 'dev_id': 'i',
- 'extra_info': {},
- 'dev_type': 't',
- 'id': 1,
- 'server_uuid': None,
- 'label': 'l',
- 'product_id': 'p',
- 'status': 'available',
- 'vendor_id': 'v'}}
- self.assertEqual(dist, result)
-
- def test_show_error_id(self):
- def fake_pci_device_get_by_id(context, id):
- raise exception.PciDeviceNotFoundById(id=id)
-
- self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
- req = fakes.HTTPRequestV3.blank('/os-pci/0', use_admin_context=True)
- self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
-
- def _fake_compute_node_get_all(self, context):
- return [dict(id=1,
- service_id=1,
- cpu_info='cpu_info',
- disk_available_least=100)]
-
- def _fake_pci_device_get_all_by_node(self, context, node):
- return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
-
- def test_index(self):
- self.stubs.Set(db, 'compute_node_get_all',
- self._fake_compute_node_get_all)
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
-
- req = fakes.HTTPRequestV3.blank('/os-pci', use_admin_context=True)
- result = self.controller.index(req)
- dist = {'pci_devices': [test_pci_device.fake_db_dev,
- test_pci_device.fake_db_dev_1]}
- for i in range(len(result['pci_devices'])):
- self.assertEqual(dist['pci_devices'][i]['vendor_id'],
- result['pci_devices'][i]['vendor_id'])
- self.assertEqual(dist['pci_devices'][i]['id'],
- result['pci_devices'][i]['id'])
- self.assertEqual(dist['pci_devices'][i]['status'],
- result['pci_devices'][i]['status'])
- self.assertEqual(dist['pci_devices'][i]['address'],
- result['pci_devices'][i]['address'])
-
- def test_detail(self):
- self.stubs.Set(db, 'compute_node_get_all',
- self._fake_compute_node_get_all)
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
- req = fakes.HTTPRequestV3.blank('/os-pci/detail',
- use_admin_context=True)
- result = self.controller.detail(req)
- dist = {'pci_devices': [test_pci_device.fake_db_dev,
- test_pci_device.fake_db_dev_1]}
- for i in range(len(result['pci_devices'])):
- self.assertEqual(dist['pci_devices'][i]['vendor_id'],
- result['pci_devices'][i]['vendor_id'])
- self.assertEqual(dist['pci_devices'][i]['id'],
- result['pci_devices'][i]['id'])
- self.assertEqual(dist['pci_devices'][i]['label'],
- result['pci_devices'][i]['label'])
- self.assertEqual(dist['pci_devices'][i]['dev_id'],
- result['pci_devices'][i]['dev_id'])
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py
deleted file mode 100644
index eca40bdfb7..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py
+++ /dev/null
@@ -1,1131 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.compute import api as compute_api
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import glance
-from nova import objects
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-FAKE_UUID = fakes.FAKE_UUID
-INSTANCE_IDS = {FAKE_UUID: 1}
-
-
-def return_server_not_found(*arg, **kwarg):
- raise exception.InstanceNotFound(instance_id='42')
-
-
-def instance_update_and_get_original(context, instance_uuid, values,
- update_cells=True,
- columns_to_join=None,
- ):
- inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
- inst = dict(inst, **values)
- return (inst, inst)
-
-
-def instance_update(context, instance_uuid, kwargs, update_cells=True):
- inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
- return inst
-
-
-class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
-
- def __call__(self, context, instance, password):
- self.instance_id = instance['uuid']
- self.password = password
-
-
-class ServerActionsControllerTest(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
-
- def setUp(self):
- super(ServerActionsControllerTest, self).setUp()
-
- CONF.set_override('host', 'localhost', group='glance')
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- host='fake_host'))
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
-
- fakes.stub_out_nw_api(self.stubs)
- fakes.stub_out_compute_api_snapshot(self.stubs)
- fake.stub_out_image_service(self.stubs)
- self.flags(allow_instance_snapshots=True,
- enable_instance_password=True)
- self.uuid = FAKE_UUID
- self.url = '/servers/%s/action' % self.uuid
- self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- self.compute_api = self.controller.compute_api
- self.context = context.RequestContext('fake', 'fake')
- self.app = fakes.wsgi_app_v21(init_only=('servers',),
- fake_auth_context=self.context)
-
- def _make_request(self, url, body):
- req = webob.Request.blank('/v2/fake' + url)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.content_type = 'application/json'
- return req.get_response(self.app)
-
- def _stub_instance_get(self, uuid=None):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_db_instance(
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance)
-
- self.compute_api.get(self.context, uuid, want_objects=True,
- expected_attrs=['pci_devices']).AndReturn(instance)
- return instance
-
- def _test_locked_instance(self, action, method=None, body_map=None,
- compute_api_args_map=None):
- if method is None:
- method = action
- if body_map is None:
- body_map = {}
- if compute_api_args_map is None:
- compute_api_args_map = {}
-
- instance = self._stub_instance_get()
- args, kwargs = compute_api_args_map.get(action, ((), {}))
-
- getattr(compute_api.API, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- exception.InstanceIsLocked(instance_uuid=instance['uuid']))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: body_map.get(action)})
- self.assertEqual(409, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def test_actions_with_locked_instance(self):
- actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
- 'rebuild']
-
- method_translations = {'confirmResize': 'confirm_resize',
- 'revertResize': 'revert_resize'}
-
- body_map = {'resize': {'flavorRef': '2'},
- 'reboot': {'type': 'HARD'},
- 'rebuild': {'imageRef': self.image_uuid,
- 'adminPass': 'TNc53Dr8s7vw'}}
-
- args_map = {'resize': (('2'), {}),
- 'confirmResize': ((), {}),
- 'reboot': (('HARD',), {}),
- 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'), {})}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(compute_api.API, method or action)
- self._test_locked_instance(action, method=method,
- body_map=body_map,
- compute_api_args_map=args_map)
-
- def test_reboot_hard(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_soft(self):
- body = dict(reboot=dict(type="SOFT"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_incorrect_type(self):
- body = dict(reboot=dict(type="NOT_A_TYPE"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_missing_type(self):
- body = dict(reboot=dict())
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_none(self):
- body = dict(reboot=dict(type=None))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_not_found(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_server_not_found)
-
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_reboot,
- req, str(uuid.uuid4()), body)
-
- def test_reboot_raises_conflict_on_invalid_state(self):
- body = dict(reboot=dict(type="HARD"))
-
- def fake_reboot(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
- body = dict(reboot=dict(type="SOFT"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING))
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING))
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING_HARD))
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_rebuild_accepted_minimum(self):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
- body = robj.obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertEqual(len(body['server']['adminPass']),
- CONF.password_length)
-
- self.assertEqual(robj['location'], self_href)
-
- def test_rebuild_instance_with_image_uuid(self):
- info = dict(image_href_in_call=None)
-
- def rebuild(self2, context, instance, image_href, *args, **kwargs):
- info['image_href_in_call'] = image_href
-
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.stubs.Set(compute_api.API, 'rebuild', rebuild)
-
- body = {
- 'rebuild': {
- 'imageRef': self.image_uuid,
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
- self.controller._action_rebuild(req, FAKE_UUID, body=body)
- self.assertEqual(info['image_href_in_call'], self.image_uuid)
-
- def test_rebuild_instance_with_image_href_uses_uuid(self):
- info = dict(image_href_in_call=None)
-
- def rebuild(self2, context, instance, image_href, *args, **kwargs):
- info['image_href_in_call'] = image_href
-
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.stubs.Set(compute_api.API, 'rebuild', rebuild)
-
- body = {
- 'rebuild': {
- 'imageRef': self.image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
- self.controller._action_rebuild(req, FAKE_UUID, body=body)
- self.assertEqual(info['image_href_in_call'], self.image_uuid)
-
- def test_rebuild_accepted_minimum_pass_disabled(self):
- # run with enable_instance_password disabled to verify admin_password
- # is missing from response. See lp bug 921814
- self.flags(enable_instance_password=False)
-
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
- body = robj.obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertNotIn("admin_password", body['server'])
-
- self.assertEqual(robj['location'], self_href)
-
- def test_rebuild_raises_conflict_on_invalid_state(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- def fake_rebuild(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_accepted_with_metadata(self):
- metadata = {'new': 'metadata'}
-
- return_server = fakes.fake_instance_get(metadata=metadata,
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": metadata,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
-
- self.assertEqual(body['server']['metadata'], metadata)
-
- def test_rebuild_accepted_with_bad_metadata(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": "stack",
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_with_too_large_metadata(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": {
- 256 * "k": "value"
- }
- }
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild, req,
- FAKE_UUID, body=body)
-
- def test_rebuild_bad_entity(self):
- body = {
- "rebuild": {
- "imageId": self._image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_admin_password(self):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "adminPass": "asdf",
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertEqual(body['server']['adminPass'], 'asdf')
-
- def test_rebuild_admin_password_pass_disabled(self):
- # run with enable_instance_password disabled to verify admin_password
- # is missing from response. See lp bug 921814
- self.flags(enable_instance_password=False)
-
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "admin_password": "asdf",
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertNotIn('adminPass', body['server'])
-
- def test_rebuild_server_not_found(self):
- def server_not_found(self, instance_id,
- columns_to_join=None, use_slave=False):
- raise exception.InstanceNotFound(instance_id=instance_id)
- self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_with_bad_image(self):
- body = {
- "rebuild": {
- "imageRef": "foo",
- },
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_when_kernel_not_exists(self):
-
- def return_image_meta(*args, **kwargs):
- image_meta_table = {
- '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
- '155d900f-4e14-4e4c-a73d-069cbf4541e6':
- {'id': 3, 'status': 'active', 'container_format': 'raw',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
- }
- image_id = args[2]
- try:
- image_meta = image_meta_table[str(image_id)]
- except KeyError:
- raise exception.ImageNotFound(image_id=image_id)
-
- return image_meta
-
- self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
- body = {
- "rebuild": {
- "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
- },
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_rebuild_proper_kernel_ram(self):
- instance_meta = {'kernel_id': None, 'ramdisk_id': None}
-
- orig_get = compute_api.API.get
-
- def wrap_get(*args, **kwargs):
- inst = orig_get(*args, **kwargs)
- instance_meta['instance'] = inst
- return inst
-
- def fake_save(context, **kwargs):
- instance = instance_meta['instance']
- for key in instance_meta.keys():
- if key in instance.obj_what_changed():
- instance_meta[key] = instance[key]
-
- def return_image_meta(*args, **kwargs):
- image_meta_table = {
- '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
- '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
- '155d900f-4e14-4e4c-a73d-069cbf4541e6':
- {'id': 3, 'status': 'active', 'container_format': 'raw',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
- }
- image_id = args[2]
- try:
- image_meta = image_meta_table[str(image_id)]
- except KeyError:
- raise exception.ImageNotFound(image_id=image_id)
-
- return image_meta
-
- self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
- self.stubs.Set(compute_api.API, 'get', wrap_get)
- self.stubs.Set(objects.Instance, 'save', fake_save)
- body = {
- "rebuild": {
- "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
- },
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
- self.assertEqual(instance_meta['kernel_id'], '1')
- self.assertEqual(instance_meta['ramdisk_id'], '2')
-
- def _test_rebuild_preserve_ephemeral(self, value=None):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE,
- host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
- if value is not None:
- body['rebuild']['preserve_ephemeral'] = value
-
- req = fakes.HTTPRequestV3.blank(self.url)
- context = req.environ['nova.context']
-
- self.mox.StubOutWithMock(compute_api.API, 'rebuild')
- if value is not None:
- compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
- mox.IgnoreArg(), preserve_ephemeral=value)
- else:
- compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
- mox.IgnoreArg())
- self.mox.ReplayAll()
-
- self.controller._action_rebuild(req, FAKE_UUID, body=body)
-
- def test_rebuild_preserve_ephemeral_true(self):
- self._test_rebuild_preserve_ephemeral(True)
-
- def test_rebuild_preserve_ephemeral_false(self):
- self._test_rebuild_preserve_ephemeral(False)
-
- def test_rebuild_preserve_ephemeral_default(self):
- self._test_rebuild_preserve_ephemeral()
-
- @mock.patch.object(compute_api.API, 'rebuild',
- side_effect=exception.AutoDiskConfigDisabledByImage(
- image='dummy'))
- def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body=body)
-
- def test_resize_server(self):
-
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- self.resize_called = False
-
- def resize_mock(*args):
- self.resize_called = True
-
- self.stubs.Set(compute_api.API, 'resize', resize_mock)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_resize(req, FAKE_UUID, body=body)
-
- self.assertEqual(self.resize_called, True)
-
- def test_resize_server_no_flavor(self):
- body = dict(resize=dict())
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- def test_resize_server_no_flavor_ref(self):
- body = dict(resize=dict(flavorRef=None))
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(exception.ValidationError,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- def test_resize_with_server_not_found(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- self.stubs.Set(compute_api.API, 'get', return_server_not_found)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- def test_resize_with_image_exceptions(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- self.resize_called = 0
- image_id = 'fake_image_id'
-
- exceptions = [
- (exception.ImageNotAuthorized(image_id=image_id),
- webob.exc.HTTPUnauthorized),
- (exception.ImageNotFound(image_id=image_id),
- webob.exc.HTTPBadRequest),
- (exception.Invalid, webob.exc.HTTPBadRequest),
- (exception.NoValidHost(reason='Bad host'),
- webob.exc.HTTPBadRequest),
- (exception.AutoDiskConfigDisabledByImage(image=image_id),
- webob.exc.HTTPBadRequest),
- ]
-
- raised, expected = map(iter, zip(*exceptions))
-
- def _fake_resize(obj, context, instance, flavor_id):
- self.resize_called += 1
- raise raised.next()
-
- self.stubs.Set(compute_api.API, 'resize', _fake_resize)
-
- for call_no in range(len(exceptions)):
- req = fakes.HTTPRequestV3.blank(self.url)
- next_exception = expected.next()
- actual = self.assertRaises(next_exception,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
- if (isinstance(exceptions[call_no][0],
- exception.NoValidHost)):
- self.assertEqual(actual.explanation,
- 'No valid host was found. Bad host')
- elif (isinstance(exceptions[call_no][0],
- exception.AutoDiskConfigDisabledByImage)):
- self.assertEqual(actual.explanation,
- 'Requested image fake_image_id has automatic'
- ' disk resize disabled.')
- self.assertEqual(self.resize_called, call_no + 1)
-
- def test_resize_with_too_many_instances(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- def fake_resize(*args, **kwargs):
- raise exception.TooManyInstances(message="TooManyInstance")
-
- self.stubs.Set(compute_api.API, 'resize', fake_resize)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- @mock.patch('nova.compute.api.API.resize',
- side_effect=exception.CannotResizeDisk(reason=''))
- def test_resize_raises_cannot_resize_disk(self, mock_resize):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- @mock.patch('nova.compute.api.API.resize',
- side_effect=exception.FlavorNotFound(reason='',
- flavor_id='fake_id'))
- def test_resize_raises_flavor_not_found(self, mock_resize):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- def test_resize_raises_conflict_on_invalid_state(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- def fake_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'resize', fake_resize)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_resize,
- req, FAKE_UUID, body=body)
-
- def test_confirm_resize_server(self):
- body = dict(confirmResize=None)
-
- self.confirm_resize_called = False
-
- def cr_mock(*args):
- self.confirm_resize_called = True
-
- self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
-
- self.assertEqual(self.confirm_resize_called, True)
-
- def test_confirm_resize_migration_not_found(self):
- body = dict(confirmResize=None)
-
- def confirm_resize_mock(*args):
- raise exception.MigrationNotFoundByStatus(instance_id=1,
- status='finished')
-
- self.stubs.Set(compute_api.API,
- 'confirm_resize',
- confirm_resize_mock)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_confirm_resize,
- req, FAKE_UUID, body)
-
- def test_confirm_resize_raises_conflict_on_invalid_state(self):
- body = dict(confirmResize=None)
-
- def fake_confirm_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'confirm_resize',
- fake_confirm_resize)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_confirm_resize,
- req, FAKE_UUID, body)
-
- def test_revert_resize_migration_not_found(self):
- body = dict(revertResize=None)
-
- def revert_resize_mock(*args):
- raise exception.MigrationNotFoundByStatus(instance_id=1,
- status='finished')
-
- self.stubs.Set(compute_api.API,
- 'revert_resize',
- revert_resize_mock)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_revert_resize,
- req, FAKE_UUID, body)
-
- def test_revert_resize_server_not_found(self):
- body = dict(revertResize=None)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob. exc.HTTPNotFound,
- self.controller._action_revert_resize,
- req, "bad_server_id", body)
-
- def test_revert_resize_server(self):
- body = dict(revertResize=None)
-
- self.revert_resize_called = False
-
- def revert_mock(*args):
- self.revert_resize_called = True
-
- self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- body = self.controller._action_revert_resize(req, FAKE_UUID, body)
-
- self.assertEqual(self.revert_resize_called, True)
-
- def test_revert_resize_raises_conflict_on_invalid_state(self):
- body = dict(revertResize=None)
-
- def fake_revert_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'revert_resize',
- fake_revert_resize)
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_revert_resize,
- req, FAKE_UUID, body)
-
- def test_create_image(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- self.assertEqual(glance.generate_image_url('123'), location)
-
- def test_create_image_name_too_long(self):
- long_name = 'a' * 260
- body = {
- 'createImage': {
- 'name': long_name,
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image, req,
- FAKE_UUID, body)
-
- def _do_test_create_volume_backed_image(self, extra_properties):
-
- def _fake_id(x):
- return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
-
- body = dict(createImage=dict(name='snapshot_of_volume_backed'))
-
- if extra_properties:
- body['createImage']['metadata'] = extra_properties
-
- image_service = glance.get_default_image_service()
-
- bdm = [dict(volume_id=_fake_id('a'),
- volume_size=1,
- device_name='vda',
- delete_on_termination=False)]
- props = dict(kernel_id=_fake_id('b'),
- ramdisk_id=_fake_id('c'),
- root_device_name='/dev/vda',
- block_device_mapping=bdm)
- original_image = dict(properties=props,
- container_format='ami',
- status='active',
- is_public=True)
-
- image_service.create(None, original_image)
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': _fake_id('a'),
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'vda',
- 'snapshot_id': 1,
- 'boot_index': 0,
- 'delete_on_termination': False,
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- instance = fakes.fake_instance_get(image_ref=original_image['id'],
- vm_state=vm_states.ACTIVE,
- root_device_name='/dev/vda')
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
-
- volume = dict(id=_fake_id('a'),
- size=1,
- host='fake',
- display_description='fake')
- snapshot = dict(id=_fake_id('d'))
- self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
- volume_api = self.controller.compute_api.volume_api
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
-
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequestV3.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- image_id = location.replace(glance.generate_image_url(''), '')
- image = image_service.show(None, image_id)
-
- self.assertEqual(image['name'], 'snapshot_of_volume_backed')
- properties = image['properties']
- self.assertEqual(properties['kernel_id'], _fake_id('b'))
- self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
- self.assertEqual(properties['root_device_name'], '/dev/vda')
- self.assertEqual(properties['bdm_v2'], True)
- bdms = properties['block_device_mapping']
- self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['boot_index'], 0)
- self.assertEqual(bdms[0]['source_type'], 'snapshot')
- self.assertEqual(bdms[0]['destination_type'], 'volume')
- self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
- for fld in ('connection_info', 'id',
- 'instance_uuid', 'device_name'):
- self.assertNotIn(fld, bdms[0])
- for k in extra_properties.keys():
- self.assertEqual(properties[k], extra_properties[k])
-
- def test_create_volume_backed_image_no_metadata(self):
- self._do_test_create_volume_backed_image({})
-
- def test_create_volume_backed_image_with_metadata(self):
- self._do_test_create_volume_backed_image(dict(ImageType='Gold',
- ImageVersion='2.0'))
-
- def _test_create_volume_backed_image_with_metadata_from_volume(
- self, extra_metadata=None):
-
- def _fake_id(x):
- return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
-
- body = dict(createImage=dict(name='snapshot_of_volume_backed'))
- if extra_metadata:
- body['createImage']['metadata'] = extra_metadata
-
- image_service = glance.get_default_image_service()
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': _fake_id('a'),
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'vda',
- 'snapshot_id': 1,
- 'boot_index': 0,
- 'delete_on_termination': False,
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- instance = fakes.fake_instance_get(image_ref='',
- vm_state=vm_states.ACTIVE,
- root_device_name='/dev/vda')
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
-
- fake_metadata = {'test_key1': 'test_value1',
- 'test_key2': 'test_value2'}
- volume = dict(id=_fake_id('a'),
- size=1,
- host='fake',
- display_description='fake',
- volume_image_metadata=fake_metadata)
- snapshot = dict(id=_fake_id('d'))
- self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
- volume_api = self.controller.compute_api.volume_api
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
-
- req = fakes.HTTPRequestV3.blank(self.url)
-
- self.mox.ReplayAll()
- response = self.controller._action_create_image(req, FAKE_UUID, body)
- location = response.headers['Location']
- image_id = location.replace('http://localhost:9292/images/', '')
- image = image_service.show(None, image_id)
-
- properties = image['properties']
- self.assertEqual(properties['test_key1'], 'test_value1')
- self.assertEqual(properties['test_key2'], 'test_value2')
- if extra_metadata:
- for key, val in extra_metadata.items():
- self.assertEqual(properties[key], val)
-
- def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
- self._test_create_volume_backed_image_with_metadata_from_volume()
-
- def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
- self._test_create_volume_backed_image_with_metadata_from_volume(
- extra_metadata={'a': 'b'})
-
- def test_create_image_snapshots_disabled(self):
- """Don't permit a snapshot if the allow_instance_snapshots flag is
- False
- """
- self.flags(allow_instance_snapshots=False)
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_with_metadata(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- 'metadata': {'key': 'asdf'},
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- self.assertEqual(glance.generate_image_url('123'), location)
-
- def test_create_image_with_too_much_metadata(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- 'metadata': {},
- },
- }
- for num in range(CONF.quota_metadata_items + 1):
- body['createImage']['metadata']['foo%i' % num] = "bar"
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_no_name(self):
- body = {
- 'createImage': {},
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_blank_name(self):
- body = {
- 'createImage': {
- 'name': '',
- }
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_bad_metadata(self):
- body = {
- 'createImage': {
- 'name': 'geoff',
- 'metadata': 'henry',
- },
- }
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_raises_conflict_on_invalid_state(self):
- def snapshot(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
- self.stubs.Set(compute_api.API, 'snapshot', snapshot)
-
- body = {
- "createImage": {
- "name": "test_snapshot",
- },
- }
-
- req = fakes.HTTPRequestV3.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_password.py b/nova/tests/api/openstack/compute/plugins/v3/test_server_password.py
deleted file mode 100644
index 969393ec60..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_server_password.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.metadata import password
-from nova import compute
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-CONF = cfg.CONF
-
-
-class ServerPasswordTest(test.TestCase):
- content_type = 'application/json'
-
- def setUp(self):
- super(ServerPasswordTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(
- compute.api.API, 'get',
- lambda self, ctxt, *a, **kw:
- fake_instance.fake_instance_obj(
- ctxt,
- system_metadata={},
- expected_attrs=['system_metadata']))
- self.password = 'fakepass'
-
- def fake_extract_password(instance):
- return self.password
-
- def fake_convert_password(context, password):
- self.password = password
- return {}
-
- self.stubs.Set(password, 'extract_password', fake_extract_password)
- self.stubs.Set(password, 'convert_password', fake_convert_password)
-
- def _make_request(self, url, method='GET'):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- req.method = method
- res = req.get_response(
- fakes.wsgi_app_v21(init_only=('servers', 'os-server-password')))
- return res
-
- def _get_pass(self, body):
- return jsonutils.loads(body).get('password')
-
- def test_get_password(self):
- url = '/v2/fake/servers/fake/os-server-password'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertEqual(self._get_pass(res.body), 'fakepass')
-
- def test_reset_password(self):
- url = '/v2/fake/servers/fake/os-server-password'
- res = self._make_request(url, 'DELETE')
- self.assertEqual(res.status_int, 204)
-
- res = self._make_request(url)
- self.assertEqual(res.status_int, 200)
- self.assertEqual(self._get_pass(res.body), '')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
deleted file mode 100644
index 9972ad8612..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
+++ /dev/null
@@ -1,3352 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import contextlib
-import copy
-import datetime
-import uuid
-
-import iso8601
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six.moves.urllib.parse as urlparse
-import testtools
-import webob
-
-from nova.api.openstack import compute
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import disk_config
-from nova.api.openstack.compute.plugins.v3 import ips
-from nova.api.openstack.compute.plugins.v3 import keypairs
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.api.openstack.compute.schemas.v3 import disk_config as \
- disk_config_schema
-from nova.api.openstack.compute.schemas.v3 import servers as servers_schema
-from nova.api.openstack.compute import views
-from nova.api.openstack import extensions
-from nova.compute import api as compute_api
-from nova.compute import delete_types
-from nova.compute import flavors
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova.i18n import _
-from nova.image import glance
-from nova.network import manager
-from nova.network.neutronv2 import api as neutron_api
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova import utils as nova_utils
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-
-FAKE_UUID = fakes.FAKE_UUID
-
-INSTANCE_IDS = {FAKE_UUID: 1}
-FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
-
-
-def fake_gen_uuid():
- return FAKE_UUID
-
-
-def return_servers_empty(context, *args, **kwargs):
- return []
-
-
-def instance_update_and_get_original(context, instance_uuid, values,
- update_cells=True,
- columns_to_join=None,
- ):
- inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
- name=values.get('display_name'))
- inst = dict(inst, **values)
- return (inst, inst)
-
-
-def instance_update(context, instance_uuid, values, update_cells=True):
- inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
- name=values.get('display_name'))
- inst = dict(inst, **values)
- return inst
-
-
-def fake_compute_api(cls, req, id):
- return True
-
-
-def fake_start_stop_not_ready(self, context, instance):
- raise exception.InstanceNotReady(instance_id=instance["uuid"])
-
-
-def fake_start_stop_invalid_state(self, context, instance):
- raise exception.InstanceInvalidState(
- instance_uuid=instance['uuid'], attr='fake_attr',
- method='fake_method', state='fake_state')
-
-
-def fake_instance_get_by_uuid_not_found(context, uuid,
- columns_to_join, use_slave=False):
- raise exception.InstanceNotFound(instance_id=uuid)
-
-
-class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
-
- def __call__(self, context, instance_id, password):
- self.instance_id = instance_id
- self.password = password
-
-
-class Base64ValidationTest(test.TestCase):
- def setUp(self):
- super(Base64ValidationTest, self).setUp()
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
-
- def test_decode_base64(self):
- value = "A random string"
- result = self.controller._decode_base64(base64.b64encode(value))
- self.assertEqual(result, value)
-
- def test_decode_base64_binary(self):
- value = "\x00\x12\x75\x99"
- result = self.controller._decode_base64(base64.b64encode(value))
- self.assertEqual(result, value)
-
- def test_decode_base64_whitespace(self):
- value = "A random string"
- encoded = base64.b64encode(value)
- white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
- result = self.controller._decode_base64(white)
- self.assertEqual(result, value)
-
- def test_decode_base64_invalid(self):
- invalid = "A random string"
- result = self.controller._decode_base64(invalid)
- self.assertIsNone(result)
-
- def test_decode_base64_illegal_bytes(self):
- value = "A random string"
- encoded = base64.b64encode(value)
- white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
- result = self.controller._decode_base64(white)
- self.assertIsNone(result)
-
-
-class NeutronV2Subclass(neutron_api.API):
- """Used to ensure that API handles subclasses properly."""
- pass
-
-
-class ControllerTest(test.TestCase):
-
- def setUp(self):
- super(ControllerTest, self).setUp()
- self.flags(verbose=True, use_ipv6=False)
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- return_server = fakes.fake_instance_get()
- return_servers = fakes.fake_instance_get_all_by_filters()
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers)
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_server)
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- self.ips_controller = ips.IPsController()
- policy.reset()
- policy.init()
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
-
-
-class ServersControllerTest(ControllerTest):
-
- def setUp(self):
- super(ServersControllerTest, self).setUp()
- CONF.set_override('host', 'localhost', group='glance')
-
- def test_requested_networks_prefix(self):
- uuid = 'br-00000000-0000-0000-0000-000000000000'
- requested_networks = [{'uuid': uuid}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertIn((uuid, None), res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_port(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_network(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- requested_networks = [{'uuid': network}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(network, None, None, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- addr = '10.0.0.1'
- requested_networks = [{'uuid': network,
- 'fixed_ip': addr,
- 'port': port}]
- self.assertRaises(
- webob.exc.HTTPBadRequest,
- self.controller._get_requested_networks,
- requested_networks)
-
- def test_requested_networks_neutronv2_disabled_with_port(self):
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- self.assertRaises(
- webob.exc.HTTPBadRequest,
- self.controller._get_requested_networks,
- requested_networks)
-
- def test_requested_networks_api_enabled_with_v2_subclass(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_subclass_with_port(self):
- cls = 'nova.tests.api.openstack.compute.test_servers.NeutronV2Subclass'
- self.flags(network_api_class=cls)
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_get_server_by_uuid(self):
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
-
- def test_get_server_joins_pci_devices(self):
- self.expected_attrs = None
-
- def fake_get(_self, *args, **kwargs):
- self.expected_attrs = kwargs['expected_attrs']
- ctxt = context.RequestContext('fake', 'fake')
- return fake_instance.fake_instance_obj(ctxt)
-
- self.stubs.Set(compute_api.API, 'get', fake_get)
-
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- self.controller.show(req, FAKE_UUID)
-
- self.assertIn('pci_devices', self.expected_attrs)
-
- def test_unique_host_id(self):
- """Create two servers with the same host and different
- project_ids and check that the host_id's are unique.
- """
- def return_instance_with_host(self, *args, **kwargs):
- project_id = str(uuid.uuid4())
- return fakes.stub_instance(id=1, uuid=FAKE_UUID,
- project_id=project_id,
- host='fake_host')
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_instance_with_host)
- self.stubs.Set(db, 'instance_get',
- return_instance_with_host)
-
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- server1 = self.controller.show(req, FAKE_UUID)
- server2 = self.controller.show(req, FAKE_UUID)
-
- self.assertNotEqual(server1['server']['hostId'],
- server2['server']['hostId'])
-
- def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
- status="ACTIVE", progress=100):
- return {
- "server": {
- "id": uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": progress,
- "name": "server1",
- "status": status,
- "hostId": '',
- "image": {
- "id": "10",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': '2001:db8:0:1::1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
- ]
- },
- "metadata": {
- "seq": "1",
- },
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v3/servers/%s" % uuid,
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/servers/%s" % uuid,
- },
- ],
- }
- }
-
- def test_get_server_by_id(self):
- self.flags(use_ipv6=True)
- image_bookmark = "http://localhost/images/10"
- flavor_bookmark = "http://localhost/flavors/1"
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
-
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark,
- status="BUILD",
- progress=0)
-
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_with_active_status_by_id(self):
- image_bookmark = "http://localhost/images/10"
- flavor_bookmark = "http://localhost/flavors/1"
-
- new_return_server = fakes.fake_instance_get(
- vm_state=vm_states.ACTIVE, progress=100)
- self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark)
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_with_id_image_ref_by_id(self):
- image_ref = "10"
- image_bookmark = "http://localhost/images/10"
- flavor_id = "1"
- flavor_bookmark = "http://localhost/flavors/1"
-
- new_return_server = fakes.fake_instance_get(
- vm_state=vm_states.ACTIVE, image_ref=image_ref,
- flavor_id=flavor_id, progress=100)
- self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark)
-
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_addresses_from_cache(self):
- pub0 = ('172.19.0.1', '172.19.0.2',)
- pub1 = ('1.2.3.4',)
- pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
- priv0 = ('192.168.0.3', '192.168.0.4',)
-
- def _ip(ip):
- return {'address': ip, 'type': 'fixed'}
-
- nw_cache = [
- {'address': 'aa:aa:aa:aa:aa:aa',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'public',
- 'subnets': [{'cidr': '172.19.0.0/24',
- 'ips': [_ip(ip) for ip in pub0]},
- {'cidr': '1.2.3.0/16',
- 'ips': [_ip(ip) for ip in pub1]},
- {'cidr': 'b33f::/64',
- 'ips': [_ip(ip) for ip in pub2]}]}},
- {'address': 'bb:bb:bb:bb:bb:bb',
- 'id': 2,
- 'network': {'bridge': 'br1',
- 'id': 2,
- 'label': 'private',
- 'subnets': [{'cidr': '192.168.0.0/24',
- 'ips': [_ip(ip) for ip in priv0]}]}}]
-
- return_server = fakes.fake_instance_get(nw_cache=nw_cache)
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % FAKE_UUID)
- res_dict = self.ips_controller.index(req, FAKE_UUID)
-
- expected = {
- 'addresses': {
- 'private': [
- {'version': 4, 'addr': '192.168.0.3',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
- {'version': 4, 'addr': '192.168.0.4',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
- ],
- 'public': [
- {'version': 4, 'addr': '172.19.0.1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 4, 'addr': '172.19.0.2',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 4, 'addr': '1.2.3.4',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- ],
- },
- }
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_get_server_addresses_nonexistent_network(self):
- url = '/v3/servers/%s/ips/network_0' % FAKE_UUID
- req = fakes.HTTPRequestV3.blank(url)
- self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
- req, FAKE_UUID, 'network_0')
-
- def test_get_server_addresses_nonexistent_server(self):
- def fake_instance_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
-
- server_id = str(uuid.uuid4())
- req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % server_id)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.ips_controller.index, req, server_id)
-
- def test_get_server_list_empty(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_empty)
-
- req = fakes.HTTPRequestV3.blank('/servers')
- res_dict = self.controller.index(req)
-
- num_servers = len(res_dict['servers'])
- self.assertEqual(0, num_servers)
-
- def test_get_server_list_with_reservation_id(self):
- req = fakes.HTTPRequestV3.blank('/servers?reservation_id=foo')
- res_dict = self.controller.index(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list_with_reservation_id_empty(self):
- req = fakes.HTTPRequestV3.blank('/servers/detail?'
- 'reservation_id=foo')
- res_dict = self.controller.detail(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list_with_reservation_id_details(self):
- req = fakes.HTTPRequestV3.blank('/servers/detail?'
- 'reservation_id=foo')
- res_dict = self.controller.detail(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list(self):
- req = fakes.HTTPRequestV3.blank('/servers')
- res_dict = self.controller.index(req)
-
- self.assertEqual(len(res_dict['servers']), 5)
- for i, s in enumerate(res_dict['servers']):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['name'], 'server%d' % (i + 1))
- self.assertIsNone(s.get('image', None))
-
- expected_links = [
- {
- "rel": "self",
- "href": "http://localhost/v3/servers/%s" % s['id'],
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/servers/%s" % s['id'],
- },
- ]
-
- self.assertEqual(s['links'], expected_links)
-
- def test_get_servers_with_limit(self):
- req = fakes.HTTPRequestV3.blank('/servers?limit=3')
- res_dict = self.controller.index(req)
-
- servers = res_dict['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res_dict['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v3/servers', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected_params = {'limit': ['3'],
- 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected_params))
-
- def test_get_servers_with_limit_bad_value(self):
- req = fakes.HTTPRequestV3.blank('/servers?limit=aaa')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_server_details_empty(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_empty)
-
- req = fakes.HTTPRequestV3.blank('/servers/detail')
- res_dict = self.controller.detail(req)
-
- num_servers = len(res_dict['servers'])
- self.assertEqual(0, num_servers)
-
- def test_get_server_details_with_limit(self):
- req = fakes.HTTPRequestV3.blank('/servers/detail?limit=3')
- res = self.controller.detail(req)
-
- servers = res['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v3/servers/detail', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected))
-
- def test_get_server_details_with_limit_bad_value(self):
- req = fakes.HTTPRequestV3.blank('/servers/detail?limit=aaa')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.detail, req)
-
- def test_get_server_details_with_limit_and_other_params(self):
- req = fakes.HTTPRequestV3.blank('/servers/detail'
- '?limit=3&blah=2:t')
- res = self.controller.detail(req)
-
- servers = res['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v3/servers/detail', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected = {'limit': ['3'], 'blah': ['2:t'],
- 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected))
-
- def test_get_servers_with_too_big_limit(self):
- req = fakes.HTTPRequestV3.blank('/servers?limit=30')
- res_dict = self.controller.index(req)
- self.assertNotIn('servers_links', res_dict)
-
- def test_get_servers_with_bad_limit(self):
- req = fakes.HTTPRequestV3.blank('/servers?limit=asdf')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_servers_with_marker(self):
- url = '/v3/servers?marker=%s' % fakes.get_fake_uuid(2)
- req = fakes.HTTPRequestV3.blank(url)
- servers = self.controller.index(req)['servers']
- self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
-
- def test_get_servers_with_limit_and_marker(self):
- url = '/v3/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
- req = fakes.HTTPRequestV3.blank(url)
- servers = self.controller.index(req)['servers']
- self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
-
- def test_get_servers_with_bad_marker(self):
- req = fakes.HTTPRequestV3.blank('/servers?limit=2&marker=asdf')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_servers_with_bad_option(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?unknownoption=whee')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_image(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('image', search_opts)
- self.assertEqual(search_opts['image'], '12345')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?image=12345')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_tenant_id_filter_converts_to_project_id_for_admin(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertIsNotNone(filters)
- self.assertEqual(filters['project_id'], 'newfake')
- self.assertFalse(filters.get('tenant_id'))
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers'
- '?all_tenants=1&tenant_id=newfake',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_tenant_id_filter_no_admin_context(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotEqual(filters, None)
- self.assertEqual(filters['project_id'], 'fake')
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake')
- res = self.controller.index(req)
- self.assertIn('servers', res)
-
- def test_tenant_id_filter_implies_all_tenants(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotEqual(filters, None)
- # The project_id assertion checks that the project_id
- # filter is set to that specified in the request url and
- # not that of the context, verifying that the all_tenants
- # flag was enabled
- self.assertEqual(filters['project_id'], 'newfake')
- self.assertFalse(filters.get('tenant_id'))
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake',
- use_admin_context=True)
- res = self.controller.index(req)
- self.assertIn('servers', res)
-
- def test_all_tenants_param_normal(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_one(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_zero(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=0',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_false(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=false',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_invalid(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None,
- expected_attrs=None):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=xxx',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_admin_restricted_tenant(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertIsNotNone(filters)
- self.assertEqual(filters['project_id'], 'fake')
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_pass_policy(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- expected_attrs=None):
- self.assertIsNotNone(filters)
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- rules = {
- "compute:get_all_tenants":
- common_policy.parse_rule("project_id:fake"),
- "compute:get_all":
- common_policy.parse_rule("project_id:fake"),
- }
-
- policy.set_rules(rules)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_fail_policy(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None):
- self.assertIsNotNone(filters)
- return [fakes.stub_instance(100)]
-
- rules = {
- "compute:get_all_tenants":
- common_policy.parse_rule("project_id:non_fake"),
- "compute:get_all":
- common_policy.parse_rule("project_id:fake"),
- }
-
- policy.set_rules(rules)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, req)
-
- def test_get_servers_allows_flavor(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('flavor', search_opts)
- # flavor is an integer ID
- self.assertEqual(search_opts['flavor'], '12345')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?flavor=12345')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_with_bad_flavor(self):
- req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 0)
-
- def test_get_server_details_with_bad_flavor(self):
- req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
- servers = self.controller.detail(req)['servers']
-
- self.assertThat(servers, testtools.matchers.HasLength(0))
-
- def test_get_servers_allows_status(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?status=active')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_task_status(self):
- server_uuid = str(uuid.uuid4())
- task_state = task_states.REBOOTING
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('task_state', search_opts)
- self.assertEqual([task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED,
- task_states.REBOOTING],
- search_opts['task_state'])
- db_list = [fakes.stub_instance(100, uuid=server_uuid,
- task_state=task_state)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?status=reboot')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_resize_status(self):
- # Test when resize status, it maps list of vm states.
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'],
- [vm_states.ACTIVE, vm_states.STOPPED])
-
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?status=resize')
-
- servers = self.controller.detail(req)['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_invalid_status(self):
- # Test getting servers by invalid status.
- req = fakes.HTTPRequestV3.blank('/servers?status=baloney',
- use_admin_context=False)
- servers = self.controller.index(req)['servers']
- self.assertEqual(len(servers), 0)
-
- def test_get_servers_deleted_status_as_user(self):
- req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
- use_admin_context=False)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.detail, req)
-
- def test_get_servers_deleted_status_as_admin(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'], ['deleted'])
-
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
- use_admin_context=True)
-
- servers = self.controller.detail(req)['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_name(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('name', search_opts)
- self.assertEqual(search_opts['name'], 'whee.*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?name=whee.*')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- @mock.patch.object(compute_api.API, 'get_all')
- def test_get_servers_flavor_not_found(self, get_all_mock):
- get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers?status=active&flavor=abc')
- servers = self.controller.index(req)['servers']
- self.assertEqual(0, len(servers))
-
- def test_get_servers_allows_changes_since(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('changes-since', search_opts)
- changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
- tzinfo=iso8601.iso8601.UTC)
- self.assertEqual(search_opts['changes-since'], changes_since)
- self.assertNotIn('deleted', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- params = 'changes-since=2011-01-24T17:08:01Z'
- req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_changes_since_bad_value(self):
- params = 'changes-since=asdf'
- req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
-
- def test_get_servers_admin_filters_as_user(self):
- """Test getting servers by admin-only or unknown options when
- context is not admin. Make sure the admin and unknown options
- are stripped before they get to compute_api.get_all()
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- # Allowed by user
- self.assertIn('name', search_opts)
- self.assertIn('ip', search_opts)
- # OSAPI converts status to vm_state
- self.assertIn('vm_state', search_opts)
- # Allowed only by admins with admin API on
- self.assertNotIn('unknown_option', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
- req = fakes.HTTPRequest.blank('/servers?%s' % query_str)
- res = self.controller.index(req)
-
- servers = res['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_admin_options_as_admin(self):
- """Test getting servers by admin-only or unknown options when
- context is admin. All options should be passed
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- # Allowed by user
- self.assertIn('name', search_opts)
- # OSAPI converts status to vm_state
- self.assertIn('vm_state', search_opts)
- # Allowed only by admins with admin API on
- self.assertIn('ip', search_opts)
- self.assertIn('unknown_option', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
- req = fakes.HTTPRequestV3.blank('/servers?%s' % query_str,
- use_admin_context=True)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_ip(self):
- """Test getting servers by ip."""
-
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('ip', search_opts)
- self.assertEqual(search_opts['ip'], '10\..*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?ip=10\..*')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_admin_allows_ip6(self):
- """Test getting servers by ip6 with admin_api enabled and
- admin context
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.assertIsNotNone(search_opts)
- self.assertIn('ip6', search_opts)
- self.assertEqual(search_opts['ip6'], 'ffff.*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers?ip6=ffff.*',
- use_admin_context=True)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_all_server_details(self):
- expected_flavor = {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/flavors/1',
- },
- ],
- }
- expected_image = {
- "id": "10",
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/images/10',
- },
- ],
- }
- req = fakes.HTTPRequestV3.blank('/servers/detail')
- res_dict = self.controller.detail(req)
-
- for i, s in enumerate(res_dict['servers']):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['hostId'], '')
- self.assertEqual(s['name'], 'server%d' % (i + 1))
- self.assertEqual(s['image'], expected_image)
- self.assertEqual(s['flavor'], expected_flavor)
- self.assertEqual(s['status'], 'BUILD')
- self.assertEqual(s['metadata']['seq'], str(i + 1))
-
- def test_get_all_server_details_with_host(self):
- """We want to make sure that if two instances are on the same host,
- then they return the same hostId. If two instances are on different
- hosts, they should return different hostIds. In this test,
- there are 5 instances - 2 on one host and 3 on another.
- """
-
- def return_servers_with_host(context, *args, **kwargs):
- return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
- uuid=fakes.get_fake_uuid(i))
- for i in xrange(5)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_with_host)
-
- req = fakes.HTTPRequestV3.blank('/servers/detail')
- res_dict = self.controller.detail(req)
-
- server_list = res_dict['servers']
- host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
- self.assertTrue(host_ids[0] and host_ids[1])
- self.assertNotEqual(host_ids[0], host_ids[1])
-
- for i, s in enumerate(server_list):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['hostId'], host_ids[i % 2])
- self.assertEqual(s['name'], 'server%d' % (i + 1))
-
- def test_get_servers_joins_pci_devices(self):
- self.expected_attrs = None
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False,
- expected_attrs=None):
- self.expected_attrs = expected_attrs
- return []
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequestV3.blank('/servers', use_admin_context=True)
- self.assertIn('servers', self.controller.index(req))
- self.assertIn('pci_devices', self.expected_attrs)
-
-
-class ServersControllerDeleteTest(ControllerTest):
-
- def setUp(self):
- super(ServersControllerDeleteTest, self).setUp()
- self.server_delete_called = False
-
- def instance_destroy_mock(*args, **kwargs):
- self.server_delete_called = True
- deleted_at = timeutils.utcnow()
- return fake_instance.fake_db_instance(deleted_at=deleted_at)
-
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
-
- def _create_delete_request(self, uuid):
- fakes.stub_out_instance_quota(self.stubs, 0, 10)
- req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
- req.method = 'DELETE'
- return req
-
- def _delete_server_instance(self, uuid=FAKE_UUID):
- req = self._create_delete_request(uuid)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.controller.delete(req, uuid)
-
- def test_delete_server_instance(self):
- self._delete_server_instance()
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_not_found(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self._delete_server_instance,
- uuid='non-existent-uuid')
-
- def test_delete_server_instance_while_building(self):
- req = self._create_delete_request(FAKE_UUID)
- self.controller.delete(req, FAKE_UUID)
-
- self.assertTrue(self.server_delete_called)
-
- def test_delete_locked_server(self):
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
- fakes.fake_actions_to_locked_server)
- self.stubs.Set(compute_api.API, delete_types.DELETE,
- fakes.fake_actions_to_locked_server)
-
- self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
- req, FAKE_UUID)
-
- def test_delete_server_instance_while_resize(self):
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.RESIZE_PREP))
-
- self.controller.delete(req, FAKE_UUID)
- # Delete shoud be allowed in any case, even during resizing,
- # because it may get stuck.
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_if_not_launched(self):
- self.flags(reclaim_instance_interval=3600)
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- req.method = 'DELETE'
-
- self.server_delete_called = False
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(launched_at=None))
-
- def instance_destroy_mock(*args, **kwargs):
- self.server_delete_called = True
- deleted_at = timeutils.utcnow()
- return fake_instance.fake_db_instance(deleted_at=deleted_at)
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
-
- self.controller.delete(req, FAKE_UUID)
- # delete() should be called for instance which has never been active,
- # even if reclaim_instance_interval has been set.
- self.assertEqual(self.server_delete_called, True)
-
-
-class ServersControllerRebuildInstanceTest(ControllerTest):
-
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v3/fake/images/%s' % image_uuid
-
- def setUp(self):
- super(ServersControllerRebuildInstanceTest, self).setUp()
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.body = {
- 'rebuild': {
- 'name': 'new_name',
- 'imageRef': self.image_href,
- 'metadata': {
- 'open': 'stack',
- },
- },
- }
- self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def test_rebuild_instance_with_blank_metadata_key(self):
- self.body['rebuild']['metadata'][''] = 'world'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_with_metadata_key_too_long(self):
- self.body['rebuild']['metadata'][('a' * 260)] = 'world'
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_with_metadata_value_too_long(self):
- self.body['rebuild']['metadata']['key1'] = ('a' * 260)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild, self.req,
- FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_with_metadata_value_not_string(self):
- self.body['rebuild']['metadata']['key1'] = 1
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild, self.req,
- FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_fails_when_min_ram_too_small(self):
- # make min_ram larger than our instance ram size
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', properties={'key1': 'value1'},
- min_ram="4096", min_disk="10")
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_fails_when_min_disk_too_small(self):
- # make min_disk larger than our instance disk size
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', properties={'key1': 'value1'},
- min_ram="128", min_disk="100000")
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild, self.req,
- FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_image_too_large(self):
- # make image size larger than our instance disk size
- size = str(1000 * (1024 ** 3))
-
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', size=size)
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_name_all_blank(self):
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True, status='active')
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
- self.body['rebuild']['name'] = ' '
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_with_deleted_image(self):
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='DELETED')
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_onset_file_limit_over_quota(self):
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True, status='active')
-
- with contextlib.nested(
- mock.patch.object(fake._FakeImageService, 'show',
- side_effect=fake_get_image),
- mock.patch.object(self.controller.compute_api, 'rebuild',
- side_effect=exception.OnsetFileLimitExceeded)
- ) as (
- show_mock, rebuild_mock
- ):
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_start(self):
- self.mox.StubOutWithMock(compute_api.API, 'start')
- compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- self.controller._start_server(req, FAKE_UUID, body)
-
- def test_start_policy_failed(self):
- rules = {
- "compute:v3:servers:start":
- common_policy.parse_rule("project_id:non_fake")
- }
- policy.set_rules(rules)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._start_server,
- req, FAKE_UUID, body)
- self.assertIn("compute:v3:servers:start", exc.format_message())
-
- def test_start_not_ready(self):
- self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, FAKE_UUID, body)
-
- def test_start_locked_server(self):
- self.stubs.Set(compute_api.API, 'start',
- fakes.fake_actions_to_locked_server)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, FAKE_UUID, body)
-
- def test_start_invalid(self):
- self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._start_server, req, FAKE_UUID, body)
-
- def test_stop(self):
- self.mox.StubOutWithMock(compute_api.API, 'stop')
- compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(stop="")
- self.controller._stop_server(req, FAKE_UUID, body)
-
- def test_stop_policy_failed(self):
- rules = {
- "compute:v3:servers:stop":
- common_policy.parse_rule("project_id:non_fake")
- }
- policy.set_rules(rules)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(stop='')
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._stop_server,
- req, FAKE_UUID, body)
- self.assertIn("compute:v3:servers:stop", exc.format_message())
-
- def test_stop_not_ready(self):
- self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, FAKE_UUID, body)
-
- def test_stop_locked_server(self):
- self.stubs.Set(compute_api.API, 'stop',
- fakes.fake_actions_to_locked_server)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, FAKE_UUID, body)
-
- def test_stop_invalid_state(self):
- self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
- req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._stop_server, req, FAKE_UUID, body)
-
- def test_start_with_bogus_id(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid_not_found)
- req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
- body = dict(start="")
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._start_server, req, 'test_inst', body)
-
- def test_stop_with_bogus_id(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid_not_found)
- req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
- body = dict(stop="")
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._stop_server, req, 'test_inst', body)
-
-
-class ServersControllerUpdateTest(ControllerTest):
-
- def _get_request(self, body=None, options=None):
- if options:
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(**options))
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
- req.content_type = 'application/json'
- req.body = jsonutils.dumps(body)
- return req
-
- def test_update_server_all_attributes(self):
- body = {'server': {
- 'name': 'server_test',
- }}
- req = self._get_request(body, {'name': 'server_test'})
- res_dict = self.controller.update(req, FAKE_UUID, body=body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
-
- def test_update_server_name(self):
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body, {'name': 'server_test'})
- res_dict = self.controller.update(req, FAKE_UUID, body=body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
-
- def test_update_server_name_too_long(self):
- body = {'server': {'name': 'x' * 256}}
- req = self._get_request(body, {'name': 'server_test'})
- self.assertRaises(exception.ValidationError, self.controller.update,
- req, FAKE_UUID, body=body)
-
- def test_update_server_name_all_blank_spaces(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(name='server_test'))
- req = fakes.HTTPRequest.blank('/v3/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
- req.content_type = 'application/json'
- body = {'server': {'name': ' ' * 64}}
- req.body = jsonutils.dumps(body)
- self.assertRaises(exception.ValidationError, self.controller.update,
- req, FAKE_UUID, body=body)
-
- def test_update_server_admin_password_ignored(self):
- inst_dict = dict(name='server_test', admin_password='bacon')
- body = dict(server=inst_dict)
-
- def server_update(context, id, params):
- filtered_dict = {
- 'display_name': 'server_test',
- }
- self.assertEqual(params, filtered_dict)
- filtered_dict['uuid'] = id
- return filtered_dict
-
- self.stubs.Set(db, 'instance_update', server_update)
- # FIXME (comstud)
- # self.stubs.Set(db, 'instance_get',
- # return_server_with_attributes(name='server_test'))
-
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
- req.content_type = "application/json"
- req.body = jsonutils.dumps(body)
- res_dict = self.controller.update(req, FAKE_UUID, body=body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
-
- def test_update_server_not_found(self):
- def fake_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute_api.API, 'get', fake_get)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, FAKE_UUID, body=body)
-
- def test_update_server_not_found_on_update(self):
- def fake_update(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, FAKE_UUID, body=body)
-
- def test_update_server_policy_fail(self):
- rule = {'compute:update': common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body, {'name': 'server_test'})
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.update, req, FAKE_UUID, body=body)
-
-
-class ServerStatusTest(test.TestCase):
-
- def setUp(self):
- super(ServerStatusTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
-
- def _get_with_state(self, vm_state, task_state=None):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_state,
- task_state=task_state))
-
- request = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- return self.controller.show(request, FAKE_UUID)
-
- def test_active(self):
- response = self._get_with_state(vm_states.ACTIVE)
- self.assertEqual(response['server']['status'], 'ACTIVE')
-
- def test_reboot(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBOOTING)
- self.assertEqual(response['server']['status'], 'REBOOT')
-
- def test_reboot_hard(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBOOTING_HARD)
- self.assertEqual(response['server']['status'], 'HARD_REBOOT')
-
- def test_reboot_resize_policy_fail(self):
- def fake_get_server(context, req, id):
- return fakes.stub_instance(id)
-
- self.stubs.Set(self.controller, '_get_server', fake_get_server)
-
- rule = {'compute:reboot':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- req = fakes.HTTPRequestV3.blank('/servers/1234/action')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_reboot, req, '1234',
- {'reboot': {'type': 'HARD'}})
-
- def test_rebuild(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBUILDING)
- self.assertEqual(response['server']['status'], 'REBUILD')
-
- def test_rebuild_error(self):
- response = self._get_with_state(vm_states.ERROR)
- self.assertEqual(response['server']['status'], 'ERROR')
-
- def test_resize(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.RESIZE_PREP)
- self.assertEqual(response['server']['status'], 'RESIZE')
-
- def test_confirm_resize_policy_fail(self):
- def fake_get_server(context, req, id):
- return fakes.stub_instance(id)
-
- self.stubs.Set(self.controller, '_get_server', fake_get_server)
-
- rule = {'compute:confirm_resize':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- req = fakes.HTTPRequestV3.blank('/servers/1234/action')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_confirm_resize, req, '1234', {})
-
- def test_verify_resize(self):
- response = self._get_with_state(vm_states.RESIZED, None)
- self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
-
- def test_revert_resize(self):
- response = self._get_with_state(vm_states.RESIZED,
- task_states.RESIZE_REVERTING)
- self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
-
- def test_revert_resize_policy_fail(self):
- def fake_get_server(context, req, id):
- return fakes.stub_instance(id)
-
- self.stubs.Set(self.controller, '_get_server', fake_get_server)
-
- rule = {'compute:revert_resize':
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- req = fakes.HTTPRequestV3.blank('/servers/1234/action')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_revert_resize, req, '1234', {})
-
- def test_password_update(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.UPDATING_PASSWORD)
- self.assertEqual(response['server']['status'], 'PASSWORD')
-
- def test_stopped(self):
- response = self._get_with_state(vm_states.STOPPED)
- self.assertEqual(response['server']['status'], 'SHUTOFF')
-
-
-class ServersControllerCreateTest(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTest, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- fakes.stub_out_nw_api(self.stubs)
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "config_drive": None,
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- self.instance_cache_by_id[instance['id']] = instance
- self.instance_cache_by_uuid[instance['uuid']] = instance
- return instance
-
- def instance_get(context, instance_id):
- """Stub for compute/api create() pulling in instance after
- scheduling
- """
- return self.instance_cache_by_id[instance_id]
-
- def instance_update(context, uuid, values):
- instance = self.instance_cache_by_uuid[uuid]
- instance.update(values)
- return instance
-
- def server_update(context, instance_uuid, params, update_cells=True):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return inst
-
- def server_update_and_get_original(
- context, instance_uuid, params, update_cells=False,
- columns_to_join=None):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return (inst, inst)
-
- def fake_method(*args, **kwargs):
- pass
-
- def project_get_networks(context, user_id):
- return dict(id='1', host='localhost')
-
- def queue_get_for(context, *args):
- return 'network_topic'
-
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update_and_get_original)
- self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
- fake_method)
- self.body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': self.image_uuid,
- 'flavorRef': self.flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
- self.bdm = [{'delete_on_termination': 1,
- 'device_name': 123,
- 'volume_size': 1,
- 'volume_id': '11111111-1111-1111-1111-111111111111'}]
-
- self.req = fakes.HTTPRequest.blank('/fake/servers')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def _check_admin_password_len(self, server_dict):
- """utility function - check server_dict for admin_password length."""
- self.assertEqual(CONF.password_length,
- len(server_dict["adminPass"]))
-
- def _check_admin_password_missing(self, server_dict):
- """utility function - check server_dict for admin_password absence."""
- self.assertNotIn("adminPass", server_dict)
-
- def _test_create_instance(self, flavor=2):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- self.body['server']['imageRef'] = image_uuid
- self.body['server']['flavorRef'] = flavor
- self.req.body = jsonutils.dumps(self.body)
- server = self.controller.create(self.req, body=self.body).obj['server']
- self._check_admin_password_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_private_flavor(self):
- values = {
- 'name': 'fake_name',
- 'memory_mb': 512,
- 'vcpus': 1,
- 'root_gb': 10,
- 'ephemeral_gb': 10,
- 'flavorid': '1324',
- 'swap': 0,
- 'rxtx_factor': 0.5,
- 'vcpu_weight': 1,
- 'disabled': False,
- 'is_public': False,
- }
- db.flavor_create(context.get_admin_context(), values)
- self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
- flavor=1324)
-
- def test_create_server_bad_image_href(self):
- image_href = 1
- self.body['server']['min_count'] = 1
- self.body['server']['imageRef'] = image_href,
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- self.req, body=self.body)
- # TODO(cyeoh): bp-v3-api-unittests
- # This needs to be ported to the os-networks extension tests
- # def test_create_server_with_invalid_networks_parameter(self):
- # self.ext_mgr.extensions = {'os-networks': 'fake'}
- # image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- # flavor_ref = 'http://localhost/123/flavors/3'
- # body = {
- # 'server': {
- # 'name': 'server_test',
- # 'imageRef': image_href,
- # 'flavorRef': flavor_ref,
- # 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
- # }
- # }
- # req = fakes.HTTPRequest.blank('/v2/fake/servers')
- # req.method = 'POST'
- # req.body = jsonutils.dumps(body)
- # req.headers["content-type"] = "application/json"
- # self.assertRaises(webob.exc.HTTPBadRequest,
- # self.controller.create,
- # req,
- # body)
-
- def test_create_server_with_deleted_image(self):
- # Get the fake image service so we can set the status to deleted
- (image_service, image_id) = glance.get_remote_image_service(
- context, '')
- image_service.update(context, self.image_uuid, {'status': 'DELETED'})
- self.addCleanup(image_service.update, context, self.image_uuid,
- {'status': 'active'})
-
- self.body['server']['flavorRef'] = 2
- self.req.body = jsonutils.dumps(self.body)
- with testtools.ExpectedException(
- webob.exc.HTTPBadRequest,
- 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
- self.controller.create(self.req, body=self.body)
-
- def test_create_server_image_too_large(self):
- # Get the fake image service so we can set the status to deleted
- (image_service, image_id) = glance.get_remote_image_service(
- context, self.image_uuid)
-
- image = image_service.show(context, image_id)
-
- orig_size = image['size']
- new_size = str(1000 * (1024 ** 3))
- image_service.update(context, self.image_uuid, {'size': new_size})
-
- self.addCleanup(image_service.update, context, self.image_uuid,
- {'size': orig_size})
-
- self.body['server']['flavorRef'] = 2
- self.req.body = jsonutils.dumps(self.body)
-
- with testtools.ExpectedException(
- webob.exc.HTTPBadRequest,
- "Flavor's disk is too small for requested image."):
- self.controller.create(self.req, body=self.body)
-
- def test_create_instance_image_ref_is_bookmark(self):
- image_href = 'http://localhost/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_image_ref_is_invalid(self):
- image_uuid = 'this_is_not_a_valid_uuid'
- image_href = 'http://localhost/images/%s' % image_uuid
- flavor_ref = 'http://localhost/flavors/3'
- self.body['server']['imageRef'] = image_href
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, body=self.body)
-
- def test_create_instance_no_key_pair(self):
- fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
- self._test_create_instance()
-
- def _test_create_extra(self, params, no_image=False):
- self.body['server']['flavorRef'] = 2
- if no_image:
- self.body['server'].pop('imageRef', None)
- self.body['server'].update(params)
- self.req.body = jsonutils.dumps(self.body)
- self.req.headers["content-type"] = "application/json"
- self.controller.create(self.req, body=self.body).obj['server']
-
- # TODO(cyeoh): bp-v3-api-unittests
- # This needs to be ported to the os-keypairs extension tests
- # def test_create_instance_with_keypairs_enabled(self):
- # self.ext_mgr.extensions = {'os-keypairs': 'fake'}
- # key_name = 'green'
- #
- # params = {'key_name': key_name}
- # old_create = compute_api.API.create
- #
- # # NOTE(sdague): key pair goes back to the database,
- # # so we need to stub it out for tests
- # def key_pair_get(context, user_id, name):
- # return {'public_key': 'FAKE_KEY',
- # 'fingerprint': 'FAKE_FINGERPRINT',
- # 'name': name}
- #
- # def create(*args, **kwargs):
- # self.assertEqual(kwargs['key_name'], key_name)
- # return old_create(*args, **kwargs)
- #
- # self.stubs.Set(db, 'key_pair_get', key_pair_get)
- # self.stubs.Set(compute_api.API, 'create', create)
- # self._test_create_extra(params)
- #
- # TODO(cyeoh): bp-v3-api-unittests
- # This needs to be ported to the os-networks extension tests
- # def test_create_instance_with_networks_enabled(self):
- # self.ext_mgr.extensions = {'os-networks': 'fake'}
- # net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- # requested_networks = [{'uuid': net_uuid}]
- # params = {'networks': requested_networks}
- # old_create = compute_api.API.create
-
- # def create(*args, **kwargs):
- # result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
- # self.assertEqual(kwargs['requested_networks'], result)
- # return old_create(*args, **kwargs)
-
- # self.stubs.Set(compute_api.API, 'create', create)
- # self._test_create_extra(params)
-
- def test_create_instance_with_port_with_no_fixed_ips(self):
- port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port_id}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortRequiresFixedIP(port_id=port_id)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_raise_user_data_too_large(self, mock_create):
- mock_create.side_effect = exception.InstanceUserDataTooLarge(
- maxsize=1, length=2)
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req, body=self.body)
-
- def test_create_instance_with_network_with_no_subnet(self):
- network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.NetworkRequiresSubnet(network_uuid=network)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_non_unique_secgroup_name(self):
- network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks,
- 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
-
- def fake_create(*args, **kwargs):
- raise exception.NoUniqueMatch("No Unique match found for ...")
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPConflict,
- self._test_create_extra, params)
-
- def test_create_instance_with_networks_disabled_neutronv2(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- requested_networks = [{'uuid': net_uuid}]
- params = {'networks': requested_networks}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
- None, None)]
- self.assertEqual(result, kwargs['requested_networks'].as_tuples())
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_networks_disabled(self):
- net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- requested_networks = [{'uuid': net_uuid}]
- params = {'networks': requested_networks}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['requested_networks'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_pass_disabled(self):
- # test with admin passwords disabled See lp bug 921814
- self.flags(enable_instance_password=False)
-
- # proper local hrefs must start with 'http://localhost/v3/'
- self.flags(enable_instance_password=False)
- image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self._check_admin_password_missing(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_name_too_long(self):
- # proper local hrefs must start with 'http://localhost/v3/'
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['name'] = 'X' * 256
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError, self.controller.create,
- self.req, body=self.body)
-
- def test_create_instance_name_all_blank_spaces(self):
- # proper local hrefs must start with 'http://localhost/v2/'
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v3/images/%s' % image_uuid
- flavor_ref = 'http://localhost/flavors/3'
- body = {
- 'server': {
- 'name': ' ' * 64,
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
-
- req = fakes.HTTPRequest.blank('/v3/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_instance(self):
- # proper local hrefs must start with 'http://localhost/v3/'
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self._check_admin_password_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_extension_create_exception(self):
- def fake_keypair_server_create(self, server_dict,
- create_kwargs):
- raise KeyError
-
- self.stubs.Set(keypairs.Keypairs, 'server_create',
- fake_keypair_server_create)
- # proper local hrefs must start with 'http://localhost/v3/'
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v3/images/%s' % image_uuid
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- self.assertRaises(webob.exc.HTTPInternalServerError,
- self.controller.create, req, body=body)
-
- def test_create_instance_pass_disabled(self):
- self.flags(enable_instance_password=False)
- # proper local hrefs must start with 'http://localhost/v3/'
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self._check_admin_password_missing(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_too_much_metadata(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata']['vote'] = 'fiddletown'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_key_too_long(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = {('a' * 260): '12345'}
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_value_too_long(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = {'key1': ('a' * 260)}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_key_blank(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = {'': 'abcd'}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_not_dict(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = 'string'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_key_not_string(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = {1: 'test'}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_metadata_value_not_string(self):
- self.flags(quota_metadata_items=1)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.body['server']['metadata'] = {'test': ['a', 'list']}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(exception.ValidationError,
- self.controller.create, self.req, body=self.body)
-
- def test_create_user_data_malformed_bad_request(self):
- params = {'user_data': 'u1234'}
- self.assertRaises(exception.ValidationError,
- self._test_create_extra, params)
-
- def test_create_instance_invalid_key_name(self):
- image_href = 'http://localhost/v2/images/2'
- self.body['server']['imageRef'] = image_href
- self.body['server']['key_name'] = 'nonexistentkey'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_valid_key_name(self):
- self.body['server']['key_name'] = 'key'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_password_len(res["server"])
-
- def test_create_instance_invalid_flavor_href(self):
- image_href = 'http://localhost/v2/images/2'
- flavor_ref = 'http://localhost/v2/flavors/asdf'
- self.body['server']['imageRef'] = image_href
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_invalid_flavor_id_int(self):
- image_href = 'http://localhost/v2/images/2'
- flavor_ref = -1
- self.body['server']['imageRef'] = image_href
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_bad_flavor_href(self):
- image_href = 'http://localhost/v2/images/2'
- flavor_ref = 'http://localhost/v2/flavors/17'
- self.body['server']['imageRef'] = image_href
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_bad_href(self):
- image_href = 'asdf'
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body=self.body)
-
- def test_create_instance_local_href(self):
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_admin_password(self):
- self.body['server']['flavorRef'] = 3
- self.body['server']['adminPass'] = 'testpass'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- server = res['server']
- self.assertEqual(server['adminPass'],
- self.body['server']['adminPass'])
-
- def test_create_instance_admin_password_pass_disabled(self):
- self.flags(enable_instance_password=False)
- self.body['server']['flavorRef'] = 3
- self.body['server']['adminPass'] = 'testpass'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, body=self.body).obj
-
- self.assertIn('server', res)
- self.assertIn('adminPass', self.body['server'])
-
- def test_create_instance_admin_password_empty(self):
- self.body['server']['flavorRef'] = 3
- self.body['server']['adminPass'] = ''
- self.req.body = jsonutils.dumps(self.body)
-
- # The fact that the action doesn't raise is enough validation
- self.controller.create(self.req, body=self.body)
-
- def test_create_location(self):
- selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
- self.req.body = jsonutils.dumps(self.body)
- robj = self.controller.create(self.req, body=self.body)
-
- self.assertEqual(robj['Location'], selfhref)
-
- def _do_test_create_instance_above_quota(self, resource, allowed, quota,
- expected_msg):
- fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
- self.body['server']['flavorRef'] = 3
- self.req.body = jsonutils.dumps(self.body)
- try:
- self.controller.create(self.req, body=self.body).obj['server']
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
- def test_create_instance_above_quota_instances(self):
- msg = _('Quota exceeded for instances: Requested 1, but'
- ' already used 10 of 10 instances')
- self._do_test_create_instance_above_quota('instances', 0, 10, msg)
-
- def test_create_instance_above_quota_ram(self):
- msg = _('Quota exceeded for ram: Requested 4096, but'
- ' already used 8192 of 10240 ram')
- self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
-
- def test_create_instance_above_quota_cores(self):
- msg = _('Quota exceeded for cores: Requested 2, but'
- ' already used 9 of 10 cores')
- self._do_test_create_instance_above_quota('cores', 1, 10, msg)
-
- def test_create_instance_above_quota_server_group_members(self):
- ctxt = context.get_admin_context()
- fake_group = objects.InstanceGroup(ctxt)
- fake_group.create()
-
- def fake_count(context, name, group, user_id):
- self.assertEqual(name, "server_group_members")
- self.assertEqual(group.uuid, fake_group.uuid)
- self.assertEqual(user_id,
- self.req.environ['nova.context'].user_id)
- return 10
-
- def fake_limit_check(context, **kwargs):
- if 'server_group_members' in kwargs:
- raise exception.OverQuota(overs={})
-
- def fake_instance_destroy(context, uuid, constraint):
- return fakes.stub_instance(1)
-
- self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
- self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
- self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
- self.req.body = jsonutils.dumps(self.body)
- expected_msg = "Quota exceeded, too many servers in group"
-
- try:
- self.controller.create(self.req, body=self.body).obj
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
- def test_create_instance_above_quota_server_groups(self):
-
- def fake_reserve(contex, **deltas):
- if 'server_groups' in deltas:
- raise exception.OverQuota(overs={})
-
- def fake_instance_destroy(context, uuid, constraint):
- return fakes.stub_instance(1)
-
- self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
- self.body['os:scheduler_hints'] = {'group': 'fake_group'}
- self.req.body = jsonutils.dumps(self.body)
-
- expected_msg = "Quota exceeded, too many server groups."
-
- try:
- self.controller.create(self.req, body=self.body).obj
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
- def test_create_instance_with_neutronv2_port_in_use(self):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortInUse(port_id=port)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPConflict,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_public_network_non_admin(self, mock_create):
- public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- params = {'networks': [{'uuid': public_network_uuid}]}
- self.req.body = jsonutils.dumps(self.body)
- mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
- network_uuid=public_network_uuid)
- self.assertRaises(webob.exc.HTTPForbidden,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_multiple_instance_with_specified_ip_neutronv2(self,
- _api_mock):
- _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
- reason="")
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- address = '10.0.0.1'
- requested_networks = [{'uuid': network, 'fixed_ip': address,
- 'port': port}]
- params = {'networks': requested_networks}
- self.body['server']['max_count'] = 2
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_multiple_instance_with_neutronv2_port(self):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
- self.body['server']['max_count'] = 2
-
- def fake_create(*args, **kwargs):
- msg = _("Unable to launch multiple instances with"
- " a single configured port ID. Please launch your"
- " instance one by one with different ports.")
- raise exception.MultiplePortsNotApplicable(reason=msg)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_neturonv2_not_found_network(self):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.NetworkNotFound(network_id=network)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_neutronv2_port_not_found(self):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortNotFound(port_id=port)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_with_network_ambiguous(self, mock_create):
- mock_create.side_effect = exception.NetworkAmbiguous()
- self.assertRaises(webob.exc.HTTPConflict,
- self._test_create_extra, {})
-
- @mock.patch.object(compute_api.API, 'create',
- side_effect=exception.InstanceExists(
- name='instance-name'))
- def test_create_instance_raise_instance_exists(self, mock_create):
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.create,
- self.req, body=self.body)
-
-
-class ServersControllerCreateTestWithMock(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTestWithMock, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
-
- self.body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': self.image_uuid,
- 'flavorRef': self.flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
- self.req = fakes.HTTPRequest.blank('/fake/servers')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def _test_create_extra(self, params, no_image=False):
- self.body['server']['flavorRef'] = 2
- if no_image:
- self.body['server'].pop('imageRef', None)
- self.body['server'].update(params)
- self.req.body = jsonutils.dumps(self.body)
- self.req.headers["content-type"] = "application/json"
- self.controller.create(self.req, body=self.body).obj['server']
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
- create_mock):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- address = '10.0.2.3'
- requested_networks = [{'uuid': network, 'fixed_ip': address}]
- params = {'networks': requested_networks}
- create_mock.side_effect = exception.FixedIpAlreadyInUse(
- address=address,
- instance_uuid=network)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
- self.assertEqual(1, len(create_mock.call_args_list))
-
- @mock.patch.object(compute_api.API, 'create',
- side_effect=exception.InvalidVolume(reason='error'))
- def test_create_instance_with_invalid_volume_error(self, create_mock):
- # Tests that InvalidVolume is translated to a 400 error.
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, {})
-
-
-class ServersViewBuilderTest(test.TestCase):
-
- def setUp(self):
- super(ServersViewBuilderTest, self).setUp()
- CONF.set_override('host', 'localhost', group='glance')
- self.flags(use_ipv6=True)
- db_inst = fakes.stub_instance(
- id=1,
- image_ref="5",
- uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
- display_name="test_server",
- include_fake_metadata=False)
-
- privates = ['172.19.0.1']
- publics = ['192.168.0.3']
- public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'public',
- 'ips': [dict(ip=ip) for ip in publics],
- 'ip6s': [dict(ip=ip) for ip in public6s]}),
- (None, {'label': 'private',
- 'ips': [dict(ip=ip) for ip in privates]})]
-
- def floaters(*args, **kwargs):
- return []
-
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
- fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
- floaters)
-
- self.uuid = db_inst['uuid']
- self.view_builder = views.servers.ViewBuilderV3()
- self.request = fakes.HTTPRequestV3.blank("")
- self.request.context = context.RequestContext('fake', 'fake')
- self.instance = fake_instance.fake_instance_obj(
- self.request.context,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- **db_inst)
-
- def test_get_flavor_valid_instance_type(self):
- flavor_bookmark = "http://localhost/flavors/1"
- expected = {"id": "1",
- "links": [{"rel": "bookmark",
- "href": flavor_bookmark}]}
- result = self.view_builder._get_flavor(self.request, self.instance)
- self.assertEqual(result, expected)
-
- def test_build_server(self):
- self_link = "http://localhost/v3/servers/%s" % self.uuid
- bookmark_link = "http://localhost/servers/%s" % self.uuid
- expected_server = {
- "server": {
- "id": self.uuid,
- "name": "test_server",
- "links": [
- {
- "rel": "self",
- "href": self_link,
- },
- {
- "rel": "bookmark",
- "href": bookmark_link,
- },
- ],
- }
- }
-
- output = self.view_builder.basic(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
- def test_build_server_with_project_id(self):
- expected_server = {
- "server": {
- "id": self.uuid,
- "name": "test_server",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v3/servers/%s" %
- self.uuid,
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/servers/%s" % self.uuid,
- },
- ],
- }
- }
-
- output = self.view_builder.basic(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
- def test_build_server_detail(self):
- image_bookmark = "http://localhost/images/5"
- flavor_bookmark = "http://localhost/flavors/1"
- self_link = "http://localhost/v3/servers/%s" % self.uuid
- bookmark_link = "http://localhost/servers/%s" % self.uuid
- expected_server = {
- "server": {
- "id": self.uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": 0,
- "name": "test_server",
- "status": "BUILD",
- "hostId": '',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': '2001:db8:0:1::1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
- ]
- },
- "metadata": {},
- "links": [
- {
- "rel": "self",
- "href": self_link,
- },
- {
- "rel": "bookmark",
- "href": bookmark_link,
- },
- ],
- }
- }
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
- def test_build_server_detail_with_fault(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context, self.uuid)
-
- image_bookmark = "http://localhost/images/5"
- flavor_bookmark = "http://localhost/flavors/1"
- self_link = "http://localhost/v3/servers/%s" % self.uuid
- bookmark_link = "http://localhost/servers/%s" % self.uuid
- expected_server = {
- "server": {
- "id": self.uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "name": "test_server",
- "status": "ERROR",
- "hostId": '',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': '2001:db8:0:1::1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
- ]
- },
- "metadata": {},
- "links": [
- {
- "rel": "self",
- "href": self_link,
- },
- {
- "rel": "bookmark",
- "href": bookmark_link,
- },
- ],
- "fault": {
- "code": 404,
- "created": "2010-10-10T12:00:00Z",
- "message": "HTTPNotFound",
- "details": "Stock details for test",
- },
- }
- }
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
- def test_build_server_detail_with_fault_that_has_been_deleted(self):
- self.instance['deleted'] = 1
- self.instance['vm_state'] = vm_states.ERROR
- fault = fake_instance.fake_fault_obj(self.request.context,
- self.uuid, code=500,
- message="No valid host was found")
- self.instance['fault'] = fault
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "No valid host was found"}
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- # Regardless of vm_state deleted servers sholud be DELETED
- self.assertEqual("DELETED", output['server']['status'])
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_no_details_not_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error"}
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error",
- 'details': 'Stock details for test'}
-
- self.request.environ['nova.context'].is_admin = True
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_no_details_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error',
- details='')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error"}
-
- self.request.environ['nova.context'].is_admin = True
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_but_active(self):
- self.instance['vm_state'] = vm_states.ACTIVE
- self.instance['progress'] = 100
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context, self.uuid)
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertNotIn('fault', output['server'])
-
- def test_build_server_detail_active_status(self):
- # set the power state of the instance to running
- self.instance['vm_state'] = vm_states.ACTIVE
- self.instance['progress'] = 100
- image_bookmark = "http://localhost/images/5"
- flavor_bookmark = "http://localhost/flavors/1"
- self_link = "http://localhost/v3/servers/%s" % self.uuid
- bookmark_link = "http://localhost/servers/%s" % self.uuid
- expected_server = {
- "server": {
- "id": self.uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": 100,
- "name": "test_server",
- "status": "ACTIVE",
- "hostId": '',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': '2001:db8:0:1::1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
- ]
- },
- "metadata": {},
- "links": [
- {
- "rel": "self",
- "href": self_link,
- },
- {
- "rel": "bookmark",
- "href": bookmark_link,
- },
- ],
- }
- }
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
- def test_build_server_detail_with_metadata(self):
-
- metadata = []
- metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
- metadata = nova_utils.metadata_to_dict(metadata)
- self.instance['metadata'] = metadata
-
- image_bookmark = "http://localhost/images/5"
- flavor_bookmark = "http://localhost/flavors/1"
- self_link = "http://localhost/v3/servers/%s" % self.uuid
- bookmark_link = "http://localhost/servers/%s" % self.uuid
- expected_server = {
- "server": {
- "id": self.uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": 0,
- "name": "test_server",
- "status": "BUILD",
- "hostId": '',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- {'version': 6, 'addr': '2001:db8:0:1::1',
- 'OS-EXT-IPS:type': 'fixed',
- 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
- ]
- },
- "metadata": {"Open": "Stack"},
- "links": [
- {
- "rel": "self",
- "href": self_link,
- },
- {
- "rel": "bookmark",
- "href": bookmark_link,
- },
- ],
- }
- }
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output, matchers.DictMatches(expected_server))
-
-
-class ServersAllExtensionsTestCase(test.TestCase):
- """Servers tests using default API router with all extensions enabled.
-
- The intent here is to catch cases where extensions end up throwing
- an exception because of a malformed request before the core API
- gets a chance to validate the request and return a 422 response.
-
- For example, AccessIPsController extends servers.Controller::
-
- | @wsgi.extends
- | def create(self, req, resp_obj, body):
- | context = req.environ['nova.context']
- | if authorize(context) and 'server' in resp_obj.obj:
- | resp_obj.attach(xml=AccessIPTemplate())
- | server = resp_obj.obj['server']
- | self._extend_server(req, server)
-
- we want to ensure that the extension isn't barfing on an invalid
- body.
- """
-
- def setUp(self):
- super(ServersAllExtensionsTestCase, self).setUp()
- self.app = compute.APIRouterV3()
-
- def test_create_missing_server(self):
- # Test create with malformed body.
-
- def fake_create(*args, **kwargs):
- raise test.TestingException("Should not reach the compute API.")
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'foo': {'a': 'b'}}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
- def test_update_missing_server(self):
- # Test update with malformed body.
-
- def fake_update(*args, **kwargs):
- raise test.TestingException("Should not reach the compute API.")
-
- self.stubs.Set(compute_api.API, 'update', fake_update)
-
- req = fakes.HTTPRequestV3.blank('/servers/1')
- req.method = 'PUT'
- req.content_type = 'application/json'
- body = {'foo': {'a': 'b'}}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(400, res.status_int)
-
-
-class ServersInvalidRequestTestCase(test.TestCase):
- """Tests of places we throw 400 Bad Request from."""
-
- def setUp(self):
- super(ServersInvalidRequestTestCase, self).setUp()
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
-
- def _invalid_server_create(self, body):
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
-
- self.assertRaises(exception.ValidationError,
- self.controller.create, req, body=body)
-
- def test_create_server_no_body(self):
- self._invalid_server_create(body=None)
-
- def test_create_server_missing_server(self):
- body = {'foo': {'a': 'b'}}
- self._invalid_server_create(body=body)
-
- def test_create_server_malformed_entity(self):
- body = {'server': 'string'}
- self._invalid_server_create(body=body)
-
- def _unprocessable_server_update(self, body):
- req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, FAKE_UUID, body=body)
-
- def test_update_server_no_body(self):
- self._invalid_server_create(body=None)
-
- def test_update_server_missing_server(self):
- body = {'foo': {'a': 'b'}}
- self._invalid_server_create(body=body)
-
- def test_create_update_malformed_entity(self):
- body = {'server': 'string'}
- self._invalid_server_create(body=body)
-
-
-class FakeExt(extensions.V3APIExtensionBase):
- name = "DiskConfig"
- alias = 'os-disk-config'
- version = 1
-
- def fake_extension_point(self, *args, **kwargs):
- pass
-
- def get_controller_extensions(self):
- return []
-
- def get_resources(self):
- return []
-
-
-class TestServersExtensionPoint(test.NoDBTestCase):
- def setUp(self):
- super(TestServersExtensionPoint, self).setUp()
- CONF.set_override('extensions_whitelist', ['os-disk-config'],
- 'osapi_v3')
- self.stubs.Set(disk_config, 'DiskConfig', FakeExt)
-
- def _test_load_extension_point(self, name):
- setattr(FakeExt, 'server_%s' % name,
- FakeExt.fake_extension_point)
- ext_info = plugins.LoadedExtensionInfo()
- controller = servers.ServersController(extension_info=ext_info)
- self.assertEqual(
- 'os-disk-config',
- list(getattr(controller,
- '%s_extension_manager' % name))[0].obj.alias)
- delattr(FakeExt, 'server_%s' % name)
-
- def test_load_update_extension_point(self):
- self._test_load_extension_point('update')
-
- def test_load_rebuild_extension_point(self):
- self._test_load_extension_point('rebuild')
-
- def test_load_create_extension_point(self):
- self._test_load_extension_point('create')
-
- def test_load_resize_extension_point(self):
- self._test_load_extension_point('resize')
-
-
-class TestServersExtensionSchema(test.NoDBTestCase):
- def setUp(self):
- super(TestServersExtensionSchema, self).setUp()
- CONF.set_override('extensions_whitelist', ['disk_config'], 'osapi_v3')
-
- def _test_load_extension_schema(self, name):
- setattr(FakeExt, 'get_server_%s_schema' % name,
- FakeExt.fake_extension_point)
- ext_info = plugins.LoadedExtensionInfo()
- controller = servers.ServersController(extension_info=ext_info)
- self.assertTrue(hasattr(controller, '%s_schema_manager' % name))
-
- delattr(FakeExt, 'get_server_%s_schema' % name)
- return getattr(controller, 'schema_server_%s' % name)
-
- def test_load_create_extension_point(self):
- # The expected is the schema combination of base and keypairs
- # because of the above extensions_whitelist.
- expected_schema = copy.deepcopy(servers_schema.base_create)
- expected_schema['properties']['server']['properties'].update(
- disk_config_schema.server_create)
-
- actual_schema = self._test_load_extension_schema('create')
- self.assertEqual(expected_schema, actual_schema)
-
- def test_load_update_extension_point(self):
- # keypair extension does not contain update_server() and
- # here checks that any extension is not added to the schema.
- expected_schema = copy.deepcopy(servers_schema.base_update)
- expected_schema['properties']['server']['properties'].update(
- disk_config_schema.server_create)
-
- actual_schema = self._test_load_extension_schema('update')
- self.assertEqual(expected_schema, actual_schema)
-
- def test_load_rebuild_extension_point(self):
- # keypair extension does not contain rebuild_server() and
- # here checks that any extension is not added to the schema.
- expected_schema = copy.deepcopy(servers_schema.base_rebuild)
- expected_schema['properties']['rebuild']['properties'].update(
- disk_config_schema.server_create)
-
- actual_schema = self._test_load_extension_schema('rebuild')
- self.assertEqual(expected_schema, actual_schema)
-
- def test_load_resize_extension_point(self):
- # keypair extension does not contain resize_server() and
- # here checks that any extension is not added to the schema.
- expected_schema = copy.deepcopy(servers_schema.base_resize)
- expected_schema['properties']['resize']['properties'].update(
- disk_config_schema.server_create)
-
- actual_schema = self._test_load_extension_schema('resize')
- self.assertEqual(expected_schema, actual_schema)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_services.py b/nova/tests/api/openstack/compute/plugins/v3/test_services.py
deleted file mode 100644
index 9e681c6fde..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_services.py
+++ /dev/null
@@ -1,453 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import calendar
-import datetime
-
-import iso8601
-import mock
-from oslo.utils import timeutils
-import webob.exc
-
-from nova.api.openstack.compute.plugins.v3 import services
-from nova import availability_zones
-from nova.compute import cells_api
-from nova import context
-from nova import db
-from nova import exception
-from nova.servicegroup.drivers import db as db_driver
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.objects import test_service
-
-
-fake_services_list = [
- dict(test_service.fake_service,
- binary='nova-scheduler',
- host='host1',
- id=1,
- disabled=True,
- topic='scheduler',
- updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
- disabled_reason='test1'),
- dict(test_service.fake_service,
- binary='nova-compute',
- host='host1',
- id=2,
- disabled=True,
- topic='compute',
- updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
- disabled_reason='test2'),
- dict(test_service.fake_service,
- binary='nova-scheduler',
- host='host2',
- id=3,
- disabled=False,
- topic='scheduler',
- updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
- disabled_reason=None),
- dict(test_service.fake_service,
- binary='nova-compute',
- host='host2',
- id=4,
- disabled=True,
- topic='compute',
- updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
- created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
- disabled_reason='test4'),
- ]
-
-
-class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {}
-
-
-class FakeRequestWithService(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"binary": "nova-compute"}
-
-
-class FakeRequestWithHost(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"host": "host1"}
-
-
-class FakeRequestWithHostService(object):
- environ = {"nova.context": context.get_admin_context()}
- GET = {"host": "host1", "binary": "nova-compute"}
-
-
-def fake_service_get_all(services):
- def service_get_all(context, filters=None, set_zones=False):
- if set_zones or 'availability_zone' in filters:
- return availability_zones.set_availability_zones(context,
- services)
- return services
- return service_get_all
-
-
-def fake_db_api_service_get_all(context, disabled=None):
- return fake_services_list
-
-
-def fake_db_service_get_by_host_binary(services):
- def service_get_by_host_binary(context, host, binary):
- for service in services:
- if service['host'] == host and service['binary'] == binary:
- return service
- raise exception.HostBinaryNotFound(host=host, binary=binary)
- return service_get_by_host_binary
-
-
-def fake_service_get_by_host_binary(context, host, binary):
- fake = fake_db_service_get_by_host_binary(fake_services_list)
- return fake(context, host, binary)
-
-
-def _service_get_by_id(services, value):
- for service in services:
- if service['id'] == value:
- return service
- return None
-
-
-def fake_db_service_update(services):
- def service_update(context, service_id, values):
- service = _service_get_by_id(services, service_id)
- if service is None:
- raise exception.ServiceNotFound(service_id=service_id)
- return service
- return service_update
-
-
-def fake_service_update(context, service_id, values):
- fake = fake_db_service_update(fake_services_list)
- return fake(context, service_id, values)
-
-
-def fake_utcnow():
- return datetime.datetime(2012, 10, 29, 13, 42, 11)
-
-
-fake_utcnow.override_time = None
-
-
-def fake_utcnow_ts():
- d = fake_utcnow()
- return calendar.timegm(d.utctimetuple())
-
-
-class ServicesTest(test.TestCase):
-
- def setUp(self):
- super(ServicesTest, self).setUp()
-
- self.controller = services.ServiceController()
-
- self.stubs.Set(timeutils, "utcnow", fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
-
- self.stubs.Set(self.controller.host_api, "service_get_all",
- fake_service_get_all(fake_services_list))
-
- self.stubs.Set(db, "service_get_by_args",
- fake_db_service_get_by_host_binary(fake_services_list))
- self.stubs.Set(db, "service_update",
- fake_db_service_update(fake_services_list))
-
- def test_services_list(self):
- req = FakeRequest()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'id': 1,
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'nova-scheduler',
- 'host': 'host2',
- 'id': 3,
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
- 'disabled_reason': None},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'id': 4,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
- 'disabled_reason': 'test4'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_list_with_host(self):
- req = FakeRequestWithHost()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-scheduler',
- 'host': 'host1',
- 'id': 1,
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_list_with_service(self):
- req = FakeRequestWithService()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'nova-compute',
- 'host': 'host2',
- 'id': 4,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
- 'disabled_reason': 'test4'}]}
- self.assertEqual(res_dict, response)
-
- def test_service_list_with_host_service(self):
- req = FakeRequestWithHostService()
- res_dict = self.controller.index(req)
- response = {'services': [
- {'binary': 'nova-compute',
- 'host': 'host1',
- 'id': 2,
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
- 'disabled_reason': 'test2'}]}
- self.assertEqual(res_dict, response)
-
- def test_services_enable(self):
- def _service_update(context, service_id, values):
- self.assertIsNone(values['disabled_reason'])
- return dict(test_service.fake_service, id=service_id)
-
- self.stubs.Set(db, "service_update", _service_update)
-
- body = {'service': {'host': 'host1',
- 'binary': 'nova-compute'}}
- req = fakes.HTTPRequestV3.blank('/os-services/enable')
- res_dict = self.controller.update(req, "enable", body)
-
- self.assertEqual(res_dict['service']['status'], 'enabled')
- self.assertNotIn('disabled_reason', res_dict['service'])
-
- def test_services_enable_with_invalid_host(self):
- body = {'service': {'host': 'invalid',
- 'binary': 'nova-compute'}}
- req = fakes.HTTPRequestV3.blank('/os-services/enable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "enable",
- body)
-
- def test_services_enable_with_invalid_binary(self):
- body = {'service': {'host': 'host1',
- 'binary': 'invalid'}}
- req = fakes.HTTPRequestV3.blank('/os-services/enable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "enable",
- body)
-
- # This test is just to verify that the servicegroup API gets used when
- # calling this API.
- def test_services_with_exception(self):
- def dummy_is_up(self, dummy):
- raise KeyError()
-
- self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
- req = FakeRequestWithHostService()
- self.assertRaises(webob.exc.HTTPInternalServerError,
- self.controller.index, req)
-
- def test_services_disable(self):
- req = fakes.HTTPRequestV3.blank('/os-services/disable')
- body = {'service': {'host': 'host1',
- 'binary': 'nova-compute'}}
- res_dict = self.controller.update(req, "disable", body)
-
- self.assertEqual(res_dict['service']['status'], 'disabled')
- self.assertNotIn('disabled_reason', res_dict['service'])
-
- def test_services_disable_with_invalid_host(self):
- body = {'service': {'host': 'invalid',
- 'binary': 'nova-compute'}}
- req = fakes.HTTPRequestV3.blank('/os-services/disable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "disable",
- body)
-
- def test_services_disable_with_invalid_binary(self):
- body = {'service': {'host': 'host1',
- 'binary': 'invalid'}}
- req = fakes.HTTPRequestV3.blank('/os-services/disable')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update,
- req,
- "disable",
- body)
-
- def test_services_disable_log_reason(self):
- req = \
- fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
- body = {'service': {'host': 'host1',
- 'binary': 'nova-compute',
- 'disabled_reason': 'test-reason'}}
- res_dict = self.controller.update(req, "disable-log-reason", body)
-
- self.assertEqual(res_dict['service']['status'], 'disabled')
- self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
-
- def test_mandatory_reason_field(self):
- req = \
- fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
- body = {'service': {'host': 'host1',
- 'binary': 'nova-compute'}}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, "disable-log-reason", body)
-
- def test_invalid_reason_field(self):
- reason = ' '
- self.assertFalse(self.controller._is_valid_as_reason(reason))
- reason = 'a' * 256
- self.assertFalse(self.controller._is_valid_as_reason(reason))
- reason = 'it\'s a valid reason.'
- self.assertTrue(self.controller._is_valid_as_reason(reason))
-
- def test_services_delete(self):
- request = fakes.HTTPRequestV3.blank('/v3/os-services/1',
- use_admin_context=True)
- request.method = 'DELETE'
-
- with mock.patch.object(self.controller.host_api,
- 'service_delete') as service_delete:
- self.controller.delete(request, '1')
- service_delete.assert_called_once_with(
- request.environ['nova.context'], '1')
- self.assertEqual(self.controller.delete.wsgi_code, 204)
-
- def test_services_delete_not_found(self):
- request = fakes.HTTPRequestV3.blank('/v3/os-services/abc',
- use_admin_context=True)
- request.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, request, 'abc')
-
-
-class ServicesCellsTest(test.TestCase):
- def setUp(self):
- super(ServicesCellsTest, self).setUp()
-
- host_api = cells_api.HostAPI()
-
- self.controller = services.ServiceController()
- self.controller.host_api = host_api
-
- self.stubs.Set(timeutils, "utcnow", fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
-
- services_list = []
- for service in fake_services_list:
- service = service.copy()
- service['id'] = 'cell1@%d' % service['id']
- services_list.append(service)
-
- self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
- fake_service_get_all(services_list))
-
- def test_services_detail(self):
- req = FakeRequest()
- res_dict = self.controller.index(req)
- utc = iso8601.iso8601.Utc()
- response = {'services': [
- {'id': 'cell1@1',
- 'binary': 'nova-scheduler',
- 'host': 'host1',
- 'zone': 'internal',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
- tzinfo=utc),
- 'disabled_reason': 'test1'},
- {'id': 'cell1@2',
- 'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
- tzinfo=utc),
- 'disabled_reason': 'test2'},
- {'id': 'cell1@3',
- 'binary': 'nova-scheduler',
- 'host': 'host2',
- 'zone': 'internal',
- 'status': 'enabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
- tzinfo=utc),
- 'disabled_reason': None},
- {'id': 'cell1@4',
- 'binary': 'nova-compute',
- 'host': 'host2',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
- tzinfo=utc),
- 'disabled_reason': 'test4'}]}
- self.assertEqual(res_dict, response)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_suspend_server.py b/nova/tests/api/openstack/compute/plugins/v3/test_suspend_server.py
deleted file mode 100644
index 0b82996e31..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_suspend_server.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.api.openstack.compute.plugins.v3 import suspend_server
-from nova.tests.api.openstack.compute.plugins.v3 import \
- admin_only_action_common
-from nova.tests.api.openstack import fakes
-
-
-class SuspendServerTests(admin_only_action_common.CommonTests):
- def setUp(self):
- super(SuspendServerTests, self).setUp()
- self.controller = suspend_server.SuspendServerController()
- self.compute_api = self.controller.compute_api
-
- def _fake_controller(*args, **kwargs):
- return self.controller
-
- self.stubs.Set(suspend_server, 'SuspendServerController',
- _fake_controller)
- self.app = fakes.wsgi_app_v21(init_only=('servers',
- 'os-suspend-server'),
- fake_auth_context=self.context)
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
- def test_suspend_resume(self):
- self._test_actions(['suspend', 'resume'])
-
- def test_suspend_resume_with_non_existed_instance(self):
- self._test_actions_with_non_existed_instance(['suspend', 'resume'])
-
- def test_suspend_resume_raise_conflict_on_invalid_state(self):
- self._test_actions_raise_conflict_on_invalid_state(['suspend',
- 'resume'])
-
- def test_actions_with_locked_instance(self):
- self._test_actions_with_locked_instance(['suspend', 'resume'])
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py b/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py
deleted file mode 100644
index 9ee2c5022a..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_user_data.py
+++ /dev/null
@@ -1,195 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import datetime
-import uuid
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.api.openstack.compute.plugins.v3 import user_data
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import db
-from nova import exception
-from nova.network import manager
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-
-CONF = cfg.CONF
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def fake_gen_uuid():
- return FAKE_UUID
-
-
-def return_security_group(context, instance_id, security_group_id):
- pass
-
-
-class ServersControllerCreateTest(test.TestCase):
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTest, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- CONF.set_override('extensions_blacklist', 'os-user-data',
- 'osapi_v3')
- self.no_user_data_controller = servers.ServersController(
- extension_info=ext_info)
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- user_data.ATTRIBUTE_NAME: None,
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- self.instance_cache_by_id[instance['id']] = instance
- self.instance_cache_by_uuid[instance['uuid']] = instance
- return instance
-
- def instance_get(context, instance_id):
- """Stub for compute/api create() pulling in instance after
- scheduling
- """
- return self.instance_cache_by_id[instance_id]
-
- def instance_update(context, uuid, values):
- instance = self.instance_cache_by_uuid[uuid]
- instance.update(values)
- return instance
-
- def server_update(context, instance_uuid, params):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return (inst, inst)
-
- def fake_method(*args, **kwargs):
- pass
-
- def project_get_networks(context, user_id):
- return dict(id='1', host='localhost')
-
- def queue_get_for(context, *args):
- return 'network_topic'
-
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update)
- self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
- fake_method)
-
- def _test_create_extra(self, params, no_image=False,
- override_controller=None):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
- if no_image:
- server.pop('imageRef', None)
- server.update(params)
- body = dict(server=server)
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- if override_controller:
- server = override_controller.create(req, body=body).obj['server']
- else:
- server = self.controller.create(req, body=body).obj['server']
- return server
-
- def test_create_instance_with_user_data_disabled(self):
- params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('user_data', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(
- params,
- override_controller=self.no_user_data_controller)
-
- def test_create_instance_with_user_data_enabled(self):
- params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIn('user_data', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_user_data(self):
- value = base64.b64encode("A random string")
- params = {user_data.ATTRIBUTE_NAME: value}
- server = self._test_create_extra(params)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_with_bad_user_data(self):
- value = "A random string"
- params = {user_data.ATTRIBUTE_NAME: value}
- self.assertRaises(exception.ValidationError,
- self._test_create_extra, params)
diff --git a/nova/tests/api/openstack/compute/test_api.py b/nova/tests/api/openstack/compute/test_api.py
deleted file mode 100644
index ecfe3c689a..0000000000
--- a/nova/tests/api/openstack/compute/test_api.py
+++ /dev/null
@@ -1,186 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-import webob.dec
-import webob.exc
-
-from nova.api import openstack as openstack_api
-from nova.api.openstack import wsgi
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class APITest(test.NoDBTestCase):
-
- def _wsgi_app(self, inner_app):
- # simpler version of the app than fakes.wsgi_app
- return openstack_api.FaultWrapper(inner_app)
-
- def test_malformed_json(self):
- req = webob.Request.blank('/')
- req.method = 'POST'
- req.body = '{'
- req.headers["content-type"] = "application/json"
-
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 400)
-
- def test_malformed_xml(self):
- req = webob.Request.blank('/')
- req.method = 'POST'
- req.body = '<hi im not xml>'
- req.headers["content-type"] = "application/xml"
-
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 400)
-
- def test_vendor_content_type_json(self):
- ctype = 'application/vnd.openstack.compute+json'
-
- req = webob.Request.blank('/')
- req.headers['Accept'] = ctype
-
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, ctype)
-
- jsonutils.loads(res.body)
-
- def test_vendor_content_type_xml(self):
- ctype = 'application/vnd.openstack.compute+xml'
-
- req = webob.Request.blank('/')
- req.headers['Accept'] = ctype
-
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, ctype)
-
- etree.XML(res.body)
-
- def test_exceptions_are_converted_to_faults_webob_exc(self):
- @webob.dec.wsgify
- def raise_webob_exc(req):
- raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
-
- # api.application = raise_webob_exc
- api = self._wsgi_app(raise_webob_exc)
- resp = webob.Request.blank('/').get_response(api)
- self.assertEqual(resp.status_int, 404, resp.body)
-
- def test_exceptions_are_converted_to_faults_api_fault(self):
- @webob.dec.wsgify
- def raise_api_fault(req):
- exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
- return wsgi.Fault(exc)
-
- # api.application = raise_api_fault
- api = self._wsgi_app(raise_api_fault)
- resp = webob.Request.blank('/').get_response(api)
- self.assertIn('itemNotFound', resp.body)
- self.assertEqual(resp.status_int, 404, resp.body)
-
- def test_exceptions_are_converted_to_faults_exception(self):
- @webob.dec.wsgify
- def fail(req):
- raise Exception("Threw an exception")
-
- # api.application = fail
- api = self._wsgi_app(fail)
- resp = webob.Request.blank('/').get_response(api)
- self.assertIn('{"computeFault', resp.body)
- self.assertEqual(resp.status_int, 500, resp.body)
-
- def test_exceptions_are_converted_to_faults_exception_xml(self):
- @webob.dec.wsgify
- def fail(req):
- raise Exception("Threw an exception")
-
- # api.application = fail
- api = self._wsgi_app(fail)
- resp = webob.Request.blank('/.xml').get_response(api)
- self.assertIn('<computeFault', resp.body)
- self.assertEqual(resp.status_int, 500, resp.body)
-
- def _do_test_exception_safety_reflected_in_faults(self, expose):
- class ExceptionWithSafety(exception.NovaException):
- safe = expose
-
- @webob.dec.wsgify
- def fail(req):
- raise ExceptionWithSafety('some explanation')
-
- api = self._wsgi_app(fail)
- resp = webob.Request.blank('/').get_response(api)
- self.assertIn('{"computeFault', resp.body)
- expected = ('ExceptionWithSafety: some explanation' if expose else
- 'The server has either erred or is incapable '
- 'of performing the requested operation.')
- self.assertIn(expected, resp.body)
- self.assertEqual(resp.status_int, 500, resp.body)
-
- def test_safe_exceptions_are_described_in_faults(self):
- self._do_test_exception_safety_reflected_in_faults(True)
-
- def test_unsafe_exceptions_are_not_described_in_faults(self):
- self._do_test_exception_safety_reflected_in_faults(False)
-
- def _do_test_exception_mapping(self, exception_type, msg):
- @webob.dec.wsgify
- def fail(req):
- raise exception_type(msg)
-
- api = self._wsgi_app(fail)
- resp = webob.Request.blank('/').get_response(api)
- self.assertIn(msg, resp.body)
- self.assertEqual(resp.status_int, exception_type.code, resp.body)
-
- if hasattr(exception_type, 'headers'):
- for (key, value) in exception_type.headers.iteritems():
- self.assertIn(key, resp.headers)
- self.assertEqual(resp.headers[key], str(value))
-
- def test_quota_error_mapping(self):
- self._do_test_exception_mapping(exception.QuotaError, 'too many used')
-
- def test_non_nova_notfound_exception_mapping(self):
- class ExceptionWithCode(Exception):
- code = 404
-
- self._do_test_exception_mapping(ExceptionWithCode,
- 'NotFound')
-
- def test_non_nova_exception_mapping(self):
- class ExceptionWithCode(Exception):
- code = 417
-
- self._do_test_exception_mapping(ExceptionWithCode,
- 'Expectation failed')
-
- def test_exception_with_none_code_throws_500(self):
- class ExceptionWithNoneCode(Exception):
- code = None
-
- @webob.dec.wsgify
- def fail(req):
- raise ExceptionWithNoneCode()
-
- api = self._wsgi_app(fail)
- resp = webob.Request.blank('/').get_response(api)
- self.assertEqual(500, resp.status_int)
diff --git a/nova/tests/api/openstack/compute/test_auth.py b/nova/tests/api/openstack/compute/test_auth.py
deleted file mode 100644
index a8b48f6852..0000000000
--- a/nova/tests/api/openstack/compute/test_auth.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-import webob.dec
-
-from nova import context
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class TestNoAuthMiddleware(test.NoDBTestCase):
-
- def setUp(self):
- super(TestNoAuthMiddleware, self).setUp()
- self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_networking(self.stubs)
-
- def test_authorize_user(self):
- req = webob.Request.blank('/v2')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertEqual(result.headers['X-Server-Management-Url'],
- "http://localhost/v2/user1_project")
-
- def test_authorize_user_trailing_slash(self):
- # make sure it works with trailing slash on the request
- req = webob.Request.blank('/v2/')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertEqual(result.headers['X-Server-Management-Url'],
- "http://localhost/v2/user1_project")
-
- def test_auth_token_no_empty_headers(self):
- req = webob.Request.blank('/v2')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertNotIn('X-CDN-Management-Url', result.headers)
- self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/api/openstack/compute/test_consoles.py b/nova/tests/api/openstack/compute/test_consoles.py
deleted file mode 100644
index df90927060..0000000000
--- a/nova/tests/api/openstack/compute/test_consoles.py
+++ /dev/null
@@ -1,293 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid as stdlib_uuid
-
-from lxml import etree
-from oslo.utils import timeutils
-import webob
-
-from nova.api.openstack.compute import consoles
-from nova.compute import vm_states
-from nova import console
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-
-
-class FakeInstanceDB(object):
-
- def __init__(self):
- self.instances_by_id = {}
- self.ids_by_uuid = {}
- self.max_id = 0
-
- def return_server_by_id(self, context, id):
- if id not in self.instances_by_id:
- self._add_server(id=id)
- return dict(self.instances_by_id[id])
-
- def return_server_by_uuid(self, context, uuid):
- if uuid not in self.ids_by_uuid:
- self._add_server(uuid=uuid)
- return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
-
- def _add_server(self, id=None, uuid=None):
- if id is None:
- id = self.max_id + 1
- if uuid is None:
- uuid = str(stdlib_uuid.uuid4())
- instance = stub_instance(id, uuid=uuid)
- self.instances_by_id[id] = instance
- self.ids_by_uuid[uuid] = id
- if id > self.max_id:
- self.max_id = id
-
-
-def stub_instance(id, user_id='fake', project_id='fake', host=None,
- vm_state=None, task_state=None,
- reservation_id="", uuid=FAKE_UUID, image_ref="10",
- flavor_id="1", name=None, key_name='',
- access_ipv4=None, access_ipv6=None, progress=0):
-
- if host is not None:
- host = str(host)
-
- if key_name:
- key_data = 'FAKE'
- else:
- key_data = ''
-
- # ReservationID isn't sent back, hack it in there.
- server_name = name or "server%s" % id
- if reservation_id != "":
- server_name = "reservation_%s" % (reservation_id, )
-
- instance = {
- "id": int(id),
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "admin_pass": "",
- "user_id": user_id,
- "project_id": project_id,
- "image_ref": image_ref,
- "kernel_id": "",
- "ramdisk_id": "",
- "launch_index": 0,
- "key_name": key_name,
- "key_data": key_data,
- "vm_state": vm_state or vm_states.BUILDING,
- "task_state": task_state,
- "memory_mb": 0,
- "vcpus": 0,
- "root_gb": 0,
- "hostname": "",
- "host": host,
- "instance_type": {},
- "user_data": "",
- "reservation_id": reservation_id,
- "mac_address": "",
- "scheduled_at": timeutils.utcnow(),
- "launched_at": timeutils.utcnow(),
- "terminated_at": timeutils.utcnow(),
- "availability_zone": "",
- "display_name": server_name,
- "display_description": "",
- "locked": False,
- "metadata": [],
- "access_ip_v4": access_ipv4,
- "access_ip_v6": access_ipv6,
- "uuid": uuid,
- "progress": progress}
-
- return instance
-
-
-class ConsolesControllerTest(test.NoDBTestCase):
- def setUp(self):
- super(ConsolesControllerTest, self).setUp()
- self.flags(verbose=True)
- self.instance_db = FakeInstanceDB()
- self.stubs.Set(db, 'instance_get',
- self.instance_db.return_server_by_id)
- self.stubs.Set(db, 'instance_get_by_uuid',
- self.instance_db.return_server_by_uuid)
- self.uuid = str(stdlib_uuid.uuid4())
- self.url = '/v2/fake/servers/%s/consoles' % self.uuid
- self.controller = consoles.Controller()
-
- def test_create_console(self):
- def fake_create_console(cons_self, context, instance_id):
- self.assertEqual(instance_id, self.uuid)
- return {}
- self.stubs.Set(console.api.API, 'create_console', fake_create_console)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.controller.create(req, self.uuid, None)
-
- def test_show_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
- pool = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- return dict(id=console_id, password='fake_password',
- port='fake_port', pool=pool, instance_name='inst-0001')
-
- expected = {'console': {'id': 20,
- 'port': 'fake_port',
- 'host': 'fake_hostname',
- 'password': 'fake_password',
- 'instance_name': 'inst-0001',
- 'console_type': 'fake_type'}}
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- res_dict = self.controller.show(req, self.uuid, '20')
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_show_console_unknown_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFound(console_id=console_id)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, self.uuid, '20')
-
- def test_show_console_unknown_instance(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- raise exception.InstanceNotFound(instance_id=instance_id)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
- req, self.uuid, '20')
-
- def test_list_consoles(self):
- def fake_get_consoles(cons_self, context, instance_id):
- self.assertEqual(instance_id, self.uuid)
-
- pool1 = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- cons1 = dict(id=10, password='fake_password',
- port='fake_port', pool=pool1)
- pool2 = dict(console_type='fake_type2',
- public_hostname='fake_hostname2')
- cons2 = dict(id=11, password='fake_password2',
- port='fake_port2', pool=pool2)
- return [cons1, cons2]
-
- expected = {'consoles':
- [{'console': {'id': 10, 'console_type': 'fake_type'}},
- {'console': {'id': 11, 'console_type': 'fake_type2'}}]}
-
- self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
-
- req = fakes.HTTPRequest.blank(self.url)
- res_dict = self.controller.index(req, self.uuid)
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_delete_console(self):
- def fake_get_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
- pool = dict(console_type='fake_type',
- public_hostname='fake_hostname')
- return dict(id=console_id, password='fake_password',
- port='fake_port', pool=pool)
-
- def fake_delete_console(cons_self, context, instance_id, console_id):
- self.assertEqual(instance_id, self.uuid)
- self.assertEqual(console_id, 20)
-
- self.stubs.Set(console.api.API, 'get_console', fake_get_console)
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- self.controller.delete(req, self.uuid, '20')
-
- def test_delete_console_unknown_console(self):
- def fake_delete_console(cons_self, context, instance_id, console_id):
- raise exception.ConsoleNotFound(console_id=console_id)
-
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.uuid, '20')
-
- def test_delete_console_unknown_instance(self):
- def fake_delete_console(cons_self, context, instance_id, console_id):
- raise exception.InstanceNotFound(instance_id=instance_id)
-
- self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
-
- req = fakes.HTTPRequest.blank(self.url + '/20')
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- req, self.uuid, '20')
-
-
-class TestConsolesXMLSerializer(test.NoDBTestCase):
- def test_show(self):
- fixture = {'console': {'id': 20,
- 'password': 'fake_password',
- 'port': 'fake_port',
- 'host': 'fake_hostname',
- 'console_type': 'fake_type'}}
-
- output = consoles.ConsoleTemplate().serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, 'console')
- self.assertEqual(res_tree.xpath('id')[0].text, '20')
- self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
- self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
- self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
- self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
-
- def test_index(self):
- fixture = {'consoles': [{'console': {'id': 10,
- 'console_type': 'fake_type'}},
- {'console': {'id': 11,
- 'console_type': 'fake_type2'}}]}
-
- output = consoles.ConsolesTemplate().serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, 'consoles')
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, 'console')
- self.assertEqual(res_tree[1].tag, 'console')
- self.assertEqual(len(res_tree[0]), 1)
- self.assertEqual(res_tree[0][0].tag, 'console')
- self.assertEqual(len(res_tree[1]), 1)
- self.assertEqual(res_tree[1][0].tag, 'console')
- self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
- self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
- self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
- 'fake_type')
- self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
- 'fake_type2')
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
deleted file mode 100644
index 49670087c8..0000000000
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ /dev/null
@@ -1,747 +0,0 @@
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import iso8601
-from lxml import etree
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack import compute
-from nova.api.openstack.compute import extensions as compute_extensions
-from nova.api.openstack import extensions as base_extensions
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-from nova import exception
-import nova.policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-CONF = cfg.CONF
-
-NS = "{http://docs.openstack.org/common/api/v1.0}"
-ATOMNS = "{http://www.w3.org/2005/Atom}"
-response_body = "Try to say this Mr. Knox, sir..."
-extension_body = "I am not a fox!"
-
-
-class StubController(object):
-
- def __init__(self, body):
- self.body = body
-
- def index(self, req):
- return self.body
-
- def create(self, req, body):
- msg = 'All aboard the fail train!'
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- def show(self, req, id):
- raise webob.exc.HTTPNotFound()
-
-
-class StubActionController(wsgi.Controller):
- def __init__(self, body):
- self.body = body
-
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return self.body
-
-
-class StubControllerExtension(base_extensions.ExtensionDescriptor):
- name = 'twaadle'
-
- def __init__(self):
- pass
-
-
-class StubEarlyExtensionController(wsgi.Controller):
- def __init__(self, body):
- self.body = body
-
- @wsgi.extends
- def index(self, req):
- yield self.body
-
- @wsgi.extends(action='fooAction')
- def _action_foo(self, req, id, body):
- yield self.body
-
-
-class StubLateExtensionController(wsgi.Controller):
- def __init__(self, body):
- self.body = body
-
- @wsgi.extends
- def index(self, req, resp_obj):
- return self.body
-
- @wsgi.extends(action='fooAction')
- def _action_foo(self, req, resp_obj, id, body):
- return self.body
-
-
-class StubExtensionManager(object):
- """Provides access to Tweedle Beetles."""
-
- name = "Tweedle Beetle Extension"
- alias = "TWDLBETL"
-
- def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
- controller_ext=None):
- self.resource_ext = resource_ext
- self.action_ext = action_ext
- self.request_ext = request_ext
- self.controller_ext = controller_ext
- self.extra_resource_ext = None
-
- def get_resources(self):
- resource_exts = []
- if self.resource_ext:
- resource_exts.append(self.resource_ext)
- if self.extra_resource_ext:
- resource_exts.append(self.extra_resource_ext)
- return resource_exts
-
- def get_actions(self):
- action_exts = []
- if self.action_ext:
- action_exts.append(self.action_ext)
- return action_exts
-
- def get_request_extensions(self):
- request_extensions = []
- if self.request_ext:
- request_extensions.append(self.request_ext)
- return request_extensions
-
- def get_controller_extensions(self):
- controller_extensions = []
- if self.controller_ext:
- controller_extensions.append(self.controller_ext)
- return controller_extensions
-
-
-class ExtensionTestCase(test.TestCase):
- def setUp(self):
- super(ExtensionTestCase, self).setUp()
- ext_list = CONF.osapi_compute_extension[:]
- fox = ('nova.tests.api.openstack.compute.extensions.'
- 'foxinsocks.Foxinsocks')
- if fox not in ext_list:
- ext_list.append(fox)
- self.flags(osapi_compute_extension=ext_list)
- self.fake_context = nova.context.RequestContext('fake', 'fake')
-
- def test_extension_authorizer_throws_exception_if_policy_fails(self):
- target = {'project_id': '1234',
- 'user_id': '5678'}
- self.mox.StubOutWithMock(nova.policy, 'enforce')
- nova.policy.enforce(self.fake_context,
- "compute_extension:used_limits_for_admin",
- target).AndRaise(
- exception.PolicyNotAuthorized(
- action="compute_extension:used_limits_for_admin"))
- self.mox.ReplayAll()
- authorize = base_extensions.extension_authorizer('compute',
- 'used_limits_for_admin'
- )
- self.assertRaises(exception.PolicyNotAuthorized, authorize,
- self.fake_context, target=target)
-
- def test_core_authorizer_throws_exception_if_policy_fails(self):
- target = {'project_id': '1234',
- 'user_id': '5678'}
- self.mox.StubOutWithMock(nova.policy, 'enforce')
- nova.policy.enforce(self.fake_context,
- "compute:used_limits_for_admin",
- target).AndRaise(
- exception.PolicyNotAuthorized(
- action="compute:used_limits_for_admin"))
- self.mox.ReplayAll()
- authorize = base_extensions.core_authorizer('compute',
- 'used_limits_for_admin'
- )
- self.assertRaises(exception.PolicyNotAuthorized, authorize,
- self.fake_context, target=target)
-
-
-class ExtensionControllerTest(ExtensionTestCase):
-
- def setUp(self):
- super(ExtensionControllerTest, self).setUp()
- self.ext_list = [
- "AdminActions",
- "Aggregates",
- "AssistedVolumeSnapshots",
- "AvailabilityZone",
- "Agents",
- "Certificates",
- "Cloudpipe",
- "CloudpipeUpdate",
- "ConsoleOutput",
- "Consoles",
- "Createserverext",
- "DeferredDelete",
- "DiskConfig",
- "ExtendedAvailabilityZone",
- "ExtendedFloatingIps",
- "ExtendedIps",
- "ExtendedIpsMac",
- "ExtendedVIFNet",
- "Evacuate",
- "ExtendedStatus",
- "ExtendedVolumes",
- "ExtendedServerAttributes",
- "FixedIPs",
- "FlavorAccess",
- "FlavorDisabled",
- "FlavorExtraSpecs",
- "FlavorExtraData",
- "FlavorManage",
- "FlavorRxtx",
- "FlavorSwap",
- "FloatingIps",
- "FloatingIpDns",
- "FloatingIpPools",
- "FloatingIpsBulk",
- "Fox In Socks",
- "Hosts",
- "ImageSize",
- "InstanceActions",
- "Keypairs",
- "Multinic",
- "MultipleCreate",
- "QuotaClasses",
- "Quotas",
- "ExtendedQuotas",
- "Rescue",
- "SchedulerHints",
- "SecurityGroupDefaultRules",
- "SecurityGroups",
- "ServerDiagnostics",
- "ServerListMultiStatus",
- "ServerPassword",
- "ServerStartStop",
- "Services",
- "SimpleTenantUsage",
- "UsedLimits",
- "UserData",
- "VirtualInterfaces",
- "VolumeAttachmentUpdate",
- "Volumes",
- ]
- self.ext_list.sort()
-
- def test_list_extensions_json(self):
- app = compute.APIRouter(init_only=('extensions',))
- request = webob.Request.blank("/fake/extensions")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- # Make sure we have all the extensions, extra extensions being OK.
- data = jsonutils.loads(response.body)
- names = [str(x['name']) for x in data['extensions']
- if str(x['name']) in self.ext_list]
- names.sort()
- self.assertEqual(names, self.ext_list)
-
- # Ensure all the timestamps are valid according to iso8601
- for ext in data['extensions']:
- iso8601.parse_date(ext['updated'])
-
- # Make sure that at least Fox in Sox is correct.
- (fox_ext, ) = [
- x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
- self.assertEqual(fox_ext, {
- 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
- 'name': 'Fox In Socks',
- 'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'The Fox In Socks Extension.',
- 'alias': 'FOXNSOX',
- 'links': []
- },
- )
-
- for ext in data['extensions']:
- url = '/fake/extensions/%s' % ext['alias']
- request = webob.Request.blank(url)
- response = request.get_response(app)
- output = jsonutils.loads(response.body)
- self.assertEqual(output['extension']['alias'], ext['alias'])
-
- def test_get_extension_json(self):
- app = compute.APIRouter(init_only=('extensions',))
- request = webob.Request.blank("/fake/extensions/FOXNSOX")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- data = jsonutils.loads(response.body)
- self.assertEqual(data['extension'], {
- "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
- "name": "Fox In Socks",
- "updated": "2011-01-22T13:25:27-06:00",
- "description": "The Fox In Socks Extension.",
- "alias": "FOXNSOX",
- "links": []})
-
- def test_get_non_existing_extension_json(self):
- app = compute.APIRouter(init_only=('extensions',))
- request = webob.Request.blank("/fake/extensions/4")
- response = request.get_response(app)
- self.assertEqual(404, response.status_int)
-
- def test_list_extensions_xml(self):
- app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
- request = webob.Request.blank("/fake/extensions")
- request.accept = "application/xml"
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
-
- root = etree.XML(response.body)
- self.assertEqual(root.tag.split('extensions')[0], NS)
-
- # Make sure we have all the extensions, extras extensions being OK.
- exts = root.findall('{0}extension'.format(NS))
- self.assertTrue(len(exts) >= len(self.ext_list))
-
- # Make sure that at least Fox in Sox is correct.
- (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
- self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
- self.assertEqual(fox_ext.get('namespace'),
- 'http://www.fox.in.socks/api/ext/pie/v1.0')
- self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension.')
-
- xmlutil.validate_schema(root, 'extensions')
-
- def test_get_extension_xml(self):
- app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
- request = webob.Request.blank("/fake/extensions/FOXNSOX")
- request.accept = "application/xml"
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- xml = response.body
-
- root = etree.XML(xml)
- self.assertEqual(root.tag.split('extension')[0], NS)
- self.assertEqual(root.get('alias'), 'FOXNSOX')
- self.assertEqual(root.get('name'), 'Fox In Socks')
- self.assertEqual(root.get('namespace'),
- 'http://www.fox.in.socks/api/ext/pie/v1.0')
- self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
- self.assertEqual(root.findtext('{0}description'.format(NS)),
- 'The Fox In Socks Extension.')
-
- xmlutil.validate_schema(root, 'extension')
-
-
-class ResourceExtensionTest(ExtensionTestCase):
-
- def test_no_extension_present(self):
- manager = StubExtensionManager(None)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/blah")
- response = request.get_response(app)
- self.assertEqual(404, response.status_int)
-
- def test_get_resources(self):
- res_ext = base_extensions.ResourceExtension('tweedles',
- StubController(response_body))
- manager = StubExtensionManager(res_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(response_body, response.body)
-
- def test_get_resources_with_controller(self):
- res_ext = base_extensions.ResourceExtension('tweedles',
- StubController(response_body))
- manager = StubExtensionManager(res_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(response_body, response.body)
-
- def test_bad_request(self):
- res_ext = base_extensions.ResourceExtension('tweedles',
- StubController(response_body))
- manager = StubExtensionManager(res_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles")
- request.method = "POST"
- response = request.get_response(app)
- self.assertEqual(400, response.status_int)
- self.assertEqual('application/json', response.content_type)
- body = jsonutils.loads(response.body)
- expected = {
- "badRequest": {
- "message": "All aboard the fail train!",
- "code": 400
- }
- }
- self.assertThat(expected, matchers.DictMatches(body))
-
- def test_non_exist_resource(self):
- res_ext = base_extensions.ResourceExtension('tweedles',
- StubController(response_body))
- manager = StubExtensionManager(res_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles/1")
- response = request.get_response(app)
- self.assertEqual(404, response.status_int)
- self.assertEqual('application/json', response.content_type)
- body = jsonutils.loads(response.body)
- expected = {
- "itemNotFound": {
- "message": "The resource could not be found.",
- "code": 404
- }
- }
- self.assertThat(expected, matchers.DictMatches(body))
-
-
-class InvalidExtension(object):
-
- alias = "THIRD"
-
-
-class ExtensionManagerTest(ExtensionTestCase):
-
- response_body = "Try to say this Mr. Knox, sir..."
-
- def test_get_resources(self):
- app = compute.APIRouter()
- request = webob.Request.blank("/fake/foxnsocks")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(response_body, response.body)
-
- def test_invalid_extensions(self):
- # Don't need the serialization middleware here because we're
- # not testing any serialization
- compute.APIRouter()
- ext_mgr = compute_extensions.ExtensionManager()
- ext_mgr.register(InvalidExtension())
- self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
- self.assertFalse(ext_mgr.is_loaded('THIRD'))
-
-
-class ActionExtensionTest(ExtensionTestCase):
-
- def _send_server_action_request(self, url, body):
- app = compute.APIRouter(init_only=('servers',))
- request = webob.Request.blank(url)
- request.method = 'POST'
- request.content_type = 'application/json'
- request.body = jsonutils.dumps(body)
- response = request.get_response(app)
- return response
-
- def test_extended_action(self):
- body = dict(add_tweedle=dict(name="test"))
- url = "/fake/servers/abcd/action"
- response = self._send_server_action_request(url, body)
- self.assertEqual(200, response.status_int)
- self.assertEqual("Tweedle Beetle Added.", response.body)
-
- body = dict(delete_tweedle=dict(name="test"))
- response = self._send_server_action_request(url, body)
- self.assertEqual(200, response.status_int)
- self.assertEqual("Tweedle Beetle Deleted.", response.body)
-
- def test_invalid_action(self):
- body = dict(blah=dict(name="test")) # Doesn't exist
- url = "/fake/servers/abcd/action"
- response = self._send_server_action_request(url, body)
- self.assertEqual(400, response.status_int)
- self.assertEqual('application/json', response.content_type)
- body = jsonutils.loads(response.body)
- expected = {
- "badRequest": {
- "message": "There is no such action: blah",
- "code": 400
- }
- }
- self.assertThat(expected, matchers.DictMatches(body))
-
- def test_non_exist_action(self):
- body = dict(blah=dict(name="test"))
- url = "/fake/fdsa/1/action"
- response = self._send_server_action_request(url, body)
- self.assertEqual(404, response.status_int)
-
- def test_failed_action(self):
- body = dict(fail=dict(name="test"))
- url = "/fake/servers/abcd/action"
- response = self._send_server_action_request(url, body)
- self.assertEqual(400, response.status_int)
- self.assertEqual('application/json', response.content_type)
- body = jsonutils.loads(response.body)
- expected = {
- "badRequest": {
- "message": "Tweedle fail",
- "code": 400
- }
- }
- self.assertThat(expected, matchers.DictMatches(body))
-
-
-class RequestExtensionTest(ExtensionTestCase):
-
- def test_get_resources_with_stub_mgr(self):
- class GooGoose(wsgi.Controller):
- @wsgi.extends
- def show(self, req, resp_obj, id):
- # only handle JSON responses
- resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
-
- req_ext = base_extensions.ControllerExtension(
- StubControllerExtension(), 'flavors', GooGoose())
-
- manager = StubExtensionManager(None, None, None, req_ext)
- app = fakes.wsgi_app(ext_mgr=manager)
- request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
- request.environ['api.version'] = '2'
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- response_data = jsonutils.loads(response.body)
- self.assertEqual('bluegoo', response_data['flavor']['googoose'])
-
- def test_get_resources_with_mgr(self):
-
- app = fakes.wsgi_app(init_only=('flavors',))
- request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
- request.environ['api.version'] = '2'
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- response_data = jsonutils.loads(response.body)
- self.assertEqual('newblue', response_data['flavor']['googoose'])
- self.assertEqual("Pig Bands!", response_data['big_bands'])
-
-
-class ControllerExtensionTest(ExtensionTestCase):
- def test_controller_extension_early(self):
- controller = StubController(response_body)
- res_ext = base_extensions.ResourceExtension('tweedles', controller)
- ext_controller = StubEarlyExtensionController(extension_body)
- extension = StubControllerExtension()
- cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
- ext_controller)
- manager = StubExtensionManager(resource_ext=res_ext,
- controller_ext=cont_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(extension_body, response.body)
-
- def test_controller_extension_late(self):
- # Need a dict for the body to convert to a ResponseObject
- controller = StubController(dict(foo=response_body))
- res_ext = base_extensions.ResourceExtension('tweedles', controller)
-
- ext_controller = StubLateExtensionController(extension_body)
- extension = StubControllerExtension()
- cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
- ext_controller)
-
- manager = StubExtensionManager(resource_ext=res_ext,
- controller_ext=cont_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(extension_body, response.body)
-
- def test_controller_extension_late_inherited_resource(self):
- # Need a dict for the body to convert to a ResponseObject
- controller = StubController(dict(foo=response_body))
- parent_ext = base_extensions.ResourceExtension('tweedles', controller)
-
- ext_controller = StubLateExtensionController(extension_body)
- extension = StubControllerExtension()
- cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
- ext_controller)
-
- manager = StubExtensionManager(resource_ext=parent_ext,
- controller_ext=cont_ext)
- child_ext = base_extensions.ResourceExtension('beetles', controller,
- inherits='tweedles')
- manager.extra_resource_ext = child_ext
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/beetles")
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(extension_body, response.body)
-
- def test_controller_action_extension_early(self):
- controller = StubActionController(response_body)
- actions = dict(action='POST')
- res_ext = base_extensions.ResourceExtension('tweedles', controller,
- member_actions=actions)
- ext_controller = StubEarlyExtensionController(extension_body)
- extension = StubControllerExtension()
- cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
- ext_controller)
- manager = StubExtensionManager(resource_ext=res_ext,
- controller_ext=cont_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles/foo/action")
- request.method = 'POST'
- request.headers['Content-Type'] = 'application/json'
- request.body = jsonutils.dumps(dict(fooAction=True))
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(extension_body, response.body)
-
- def test_controller_action_extension_late(self):
- # Need a dict for the body to convert to a ResponseObject
- controller = StubActionController(dict(foo=response_body))
- actions = dict(action='POST')
- res_ext = base_extensions.ResourceExtension('tweedles', controller,
- member_actions=actions)
-
- ext_controller = StubLateExtensionController(extension_body)
- extension = StubControllerExtension()
- cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
- ext_controller)
-
- manager = StubExtensionManager(resource_ext=res_ext,
- controller_ext=cont_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/tweedles/foo/action")
- request.method = 'POST'
- request.headers['Content-Type'] = 'application/json'
- request.body = jsonutils.dumps(dict(fooAction=True))
- response = request.get_response(app)
- self.assertEqual(200, response.status_int)
- self.assertEqual(extension_body, response.body)
-
-
-class ExtensionsXMLSerializerTest(test.TestCase):
-
- def test_serialize_extension(self):
- serializer = base_extensions.ExtensionTemplate()
- data = {'extension': {
- 'name': 'ext1',
- 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
- 'alias': 'RS-PIE',
- 'updated': '2011-01-22T13:25:27-06:00',
- 'description': 'Adds the capability to share an image.',
- 'links': [{'rel': 'describedby',
- 'type': 'application/pdf',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
- {'rel': 'describedby',
- 'type': 'application/vnd.sun.wadl+xml',
- 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
-
- xml = serializer.serialize(data)
- root = etree.XML(xml)
- ext_dict = data['extension']
- self.assertEqual(root.findtext('{0}description'.format(NS)),
- ext_dict['description'])
-
- for key in ['name', 'namespace', 'alias', 'updated']:
- self.assertEqual(root.get(key), ext_dict[key])
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(ext_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- xmlutil.validate_schema(root, 'extension')
-
- def test_serialize_extensions(self):
- serializer = base_extensions.ExtensionsTemplate()
- data = {"extensions": [{
- "name": "Public Image Extension",
- "namespace": "http://foo.com/api/ext/pie/v1.0",
- "alias": "RS-PIE",
- "updated": "2011-01-22T13:25:27-06:00",
- "description": "Adds the capability to share an image.",
- "links": [{"rel": "describedby",
- "type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-pie.pdf"},
- {"rel": "describedby",
- "type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-pie.wadl"}]},
- {"name": "Cloud Block Storage",
- "namespace": "http://foo.com/api/ext/cbs/v1.0",
- "alias": "RS-CBS",
- "updated": "2011-01-12T11:22:33-06:00",
- "description": "Allows mounting cloud block storage.",
- "links": [{"rel": "describedby",
- "type": "application/pdf",
- "href": "http://foo.com/api/ext/cs-cbs.pdf"},
- {"rel": "describedby",
- "type": "application/vnd.sun.wadl+xml",
- "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
-
- xml = serializer.serialize(data)
- root = etree.XML(xml)
- ext_elems = root.findall('{0}extension'.format(NS))
- self.assertEqual(len(ext_elems), 2)
- for i, ext_elem in enumerate(ext_elems):
- ext_dict = data['extensions'][i]
- self.assertEqual(ext_elem.findtext('{0}description'.format(NS)),
- ext_dict['description'])
-
- for key in ['name', 'namespace', 'alias', 'updated']:
- self.assertEqual(ext_elem.get(key), ext_dict[key])
-
- link_nodes = ext_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(ext_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- xmlutil.validate_schema(root, 'extensions')
-
-
-class ExtensionControllerIdFormatTest(test.TestCase):
-
- def _bounce_id(self, test_id):
-
- class BounceController(object):
- def show(self, req, id):
- return id
- res_ext = base_extensions.ResourceExtension('bounce',
- BounceController())
- manager = StubExtensionManager(res_ext)
- app = compute.APIRouter(manager)
- request = webob.Request.blank("/fake/bounce/%s" % test_id)
- response = request.get_response(app)
- return response.body
-
- def test_id_with_xml_format(self):
- result = self._bounce_id('foo.xml')
- self.assertEqual(result, 'foo')
-
- def test_id_with_json_format(self):
- result = self._bounce_id('foo.json')
- self.assertEqual(result, 'foo')
-
- def test_id_with_bad_format(self):
- result = self._bounce_id('foo.bad')
- self.assertEqual(result, 'foo.bad')
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
deleted file mode 100644
index 71a844f153..0000000000
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ /dev/null
@@ -1,943 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-import six.moves.urllib.parse as urlparse
-import webob
-
-from nova.api.openstack import common
-from nova.api.openstack.compute import flavors as flavors_v2
-from nova.api.openstack.compute.plugins.v3 import flavors as flavors_v3
-from nova.api.openstack import xmlutil
-import nova.compute.flavors
-from nova import context
-from nova import db
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-NS = "{http://docs.openstack.org/compute/api/v1.1}"
-ATOMNS = "{http://www.w3.org/2005/Atom}"
-
-
-FAKE_FLAVORS = {
- 'flavor 1': {
- "flavorid": '1',
- "name": 'flavor 1',
- "memory_mb": '256',
- "root_gb": '10',
- "ephemeral_gb": '20',
- "swap": '10',
- "disabled": False,
- "vcpus": '',
- },
- 'flavor 2': {
- "flavorid": '2',
- "name": 'flavor 2',
- "memory_mb": '512',
- "root_gb": '20',
- "ephemeral_gb": '10',
- "swap": '5',
- "disabled": False,
- "vcpus": '',
- },
-}
-
-
-def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
- return FAKE_FLAVORS['flavor %s' % flavorid]
-
-
-def fake_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- if marker in ['99999']:
- raise exception.MarkerNotFound(marker)
-
- def reject_min(db_attr, filter_attr):
- return (filter_attr in filters and
- int(flavor[db_attr]) < int(filters[filter_attr]))
-
- filters = filters or {}
- res = []
- for (flavor_name, flavor) in FAKE_FLAVORS.items():
- if reject_min('memory_mb', 'min_memory_mb'):
- continue
- elif reject_min('root_gb', 'min_root_gb'):
- continue
-
- res.append(flavor)
-
- res = sorted(res, key=lambda item: item[sort_key])
- output = []
- marker_found = True if marker is None else False
- for flavor in res:
- if not marker_found and marker == flavor['flavorid']:
- marker_found = True
- elif marker_found:
- if limit is None or len(output) < int(limit):
- output.append(flavor)
-
- return output
-
-
-def fake_get_limit_and_marker(request, max_limit=1):
- params = common.get_pagination_params(request)
- limit = params.get('limit', max_limit)
- limit = min(max_limit, limit)
- marker = params.get('marker')
-
- return limit, marker
-
-
-def empty_get_all_flavors_sorted_list(context=None, inactive=False,
- filters=None, sort_key='flavorid',
- sort_dir='asc', limit=None, marker=None):
- return []
-
-
-def return_flavor_not_found(flavor_id, ctxt=None):
- raise exception.FlavorNotFound(flavor_id=flavor_id)
-
-
-class FlavorsTestV21(test.TestCase):
- _prefix = "/v3"
- Controller = flavors_v3.FlavorsController
- fake_request = fakes.HTTPRequestV3
- _rspv = "v3"
- _fake = ""
-
- def setUp(self):
- super(FlavorsTestV21, self).setUp()
- self.flags(osapi_compute_extension=[])
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(nova.compute.flavors,
- "get_flavor_by_flavor_id",
- fake_flavor_get_by_flavor_id)
- self.controller = self.Controller()
-
- def _set_expected_body(self, expected, ephemeral, swap, disabled):
- # NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
- # as core features and we can get the following parameters as the
- # default.
- expected['OS-FLV-EXT-DATA:ephemeral'] = ephemeral
- expected['OS-FLV-DISABLED:disabled'] = disabled
- expected['swap'] = swap
-
- def test_get_flavor_by_invalid_id(self):
- self.stubs.Set(nova.compute.flavors,
- "get_flavor_by_flavor_id",
- return_flavor_not_found)
- req = self.fake_request.blank(self._prefix + '/flavors/asdf')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, 'asdf')
-
- def test_get_flavor_by_id(self):
- req = self.fake_request.blank(self._prefix + '/flavors/1')
- flavor = self.controller.show(req, '1')
- expected = {
- "flavor": {
- "id": "1",
- "name": "flavor 1",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/1",
- },
- ],
- },
- }
- self._set_expected_body(expected['flavor'], ephemeral='20',
- swap='10', disabled=False)
- self.assertEqual(flavor, expected)
-
- def test_get_flavor_with_custom_link_prefix(self):
- self.flags(osapi_compute_link_prefix='http://zoo.com:42',
- osapi_glance_link_prefix='http://circus.com:34')
- req = self.fake_request.blank(self._prefix + '/flavors/1')
- flavor = self.controller.show(req, '1')
- expected = {
- "flavor": {
- "id": "1",
- "name": "flavor 1",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://zoo.com:42/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://zoo.com:42" + self._fake +
- "/flavors/1",
- },
- ],
- },
- }
- self._set_expected_body(expected['flavor'], ephemeral='20',
- swap='10', disabled=False)
- self.assertEqual(expected, flavor)
-
- def test_get_flavor_list(self):
- req = self.fake_request.blank(self._prefix + '/flavors')
- flavor = self.controller.index(req)
- expected = {
- "flavors": [
- {
- "id": "1",
- "name": "flavor 1",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/1",
- },
- ],
- },
- {
- "id": "2",
- "name": "flavor 2",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- }
- self.assertEqual(flavor, expected)
-
- def test_get_flavor_list_with_marker(self):
- self.maxDiff = None
- url = self._prefix + '/flavors?limit=1&marker=1'
- req = self.fake_request.blank(url)
- flavor = self.controller.index(req)
- expected = {
- "flavors": [
- {
- "id": "2",
- "name": "flavor 2",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- 'flavors_links': [
- {'href': 'http://localhost/' + self._rspv +
- '/flavors?limit=1&marker=2',
- 'rel': 'next'}
- ]
- }
- self.assertThat(flavor, matchers.DictMatches(expected))
-
- def test_get_flavor_list_with_invalid_marker(self):
- req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_flavor_detail_with_limit(self):
- url = self._prefix + '/flavors/detail?limit=1'
- req = self.fake_request.blank(url)
- response = self.controller.index(req)
- response_list = response["flavors"]
- response_links = response["flavors_links"]
-
- expected_flavors = [
- {
- "id": "1",
- "name": "flavor 1",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/1",
- },
- ],
- },
- ]
- self.assertEqual(response_list, expected_flavors)
- self.assertEqual(response_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(response_links[0]['href'])
- self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- self.assertThat({'limit': ['1'], 'marker': ['1']},
- matchers.DictMatches(params))
-
- def test_get_flavor_with_limit(self):
- req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
- response = self.controller.index(req)
- response_list = response["flavors"]
- response_links = response["flavors_links"]
-
- expected_flavors = [
- {
- "id": "1",
- "name": "flavor 1",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/1",
- },
- ],
- },
- {
- "id": "2",
- "name": "flavor 2",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- }
- ]
- self.assertEqual(response_list, expected_flavors)
- self.assertEqual(response_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(response_links[0]['href'])
- self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- self.assertThat({'limit': ['2'], 'marker': ['2']},
- matchers.DictMatches(params))
-
- def test_get_flavor_with_default_limit(self):
- self.stubs.Set(common, "get_limit_and_marker",
- fake_get_limit_and_marker)
- self.flags(osapi_max_limit=1)
- req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
- response = self.controller.index(req)
- response_list = response["flavors"]
- response_links = response["flavors_links"]
-
- expected_flavors = [
- {
- "id": "1",
- "name": "flavor 1",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/1",
- }
- ]
- }
- ]
-
- self.assertEqual(response_list, expected_flavors)
- self.assertEqual(response_links[0]['rel'], 'next')
- href_parts = urlparse.urlparse(response_links[0]['href'])
- self.assertEqual('/v2/fake/flavors', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- self.assertThat({'limit': ['2'], 'marker': ['1']},
- matchers.DictMatches(params))
-
- def test_get_flavor_list_detail(self):
- req = self.fake_request.blank(self._prefix + '/flavors/detail')
- flavor = self.controller.detail(req)
- expected = {
- "flavors": [
- {
- "id": "1",
- "name": "flavor 1",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/1",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/1",
- },
- ],
- },
- {
- "id": "2",
- "name": "flavor 2",
- "ram": "512",
- "disk": "20",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- }
- self._set_expected_body(expected['flavors'][0], ephemeral='20',
- swap='10', disabled=False)
- self._set_expected_body(expected['flavors'][1], ephemeral='10',
- swap='5', disabled=False)
- self.assertEqual(expected, flavor)
-
- def test_get_empty_flavor_list(self):
- self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
- empty_get_all_flavors_sorted_list)
-
- req = self.fake_request.blank(self._prefix + '/flavors')
- flavors = self.controller.index(req)
- expected = {'flavors': []}
- self.assertEqual(flavors, expected)
-
- def test_get_flavor_list_filter_min_ram(self):
- # Flavor lists may be filtered by minRam.
- req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
- flavor = self.controller.index(req)
- expected = {
- "flavors": [
- {
- "id": "2",
- "name": "flavor 2",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- }
- self.assertEqual(flavor, expected)
-
- def test_get_flavor_list_filter_invalid_min_ram(self):
- # Ensure you cannot list flavors with invalid minRam param.
- req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_flavor_list_filter_min_disk(self):
- # Flavor lists may be filtered by minDisk.
- req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
- flavor = self.controller.index(req)
- expected = {
- "flavors": [
- {
- "id": "2",
- "name": "flavor 2",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- }
- self.assertEqual(flavor, expected)
-
- def test_get_flavor_list_filter_invalid_min_disk(self):
- # Ensure you cannot list flavors with invalid minDisk param.
- req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_flavor_list_detail_min_ram_and_min_disk(self):
- """Tests that filtering work on flavor details and that minRam and
- minDisk filters can be combined
- """
- req = self.fake_request.blank(self._prefix + '/flavors/detail'
- '?minRam=256&minDisk=20')
- flavor = self.controller.detail(req)
- expected = {
- "flavors": [
- {
- "id": "2",
- "name": "flavor 2",
- "ram": "512",
- "disk": "20",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/" + self._rspv +
- "/flavors/2",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost" + self._fake +
- "/flavors/2",
- },
- ],
- },
- ],
- }
- self._set_expected_body(expected['flavors'][0], ephemeral='10',
- swap='5', disabled=False)
- self.assertEqual(expected, flavor)
-
-
-class FlavorsTestV20(FlavorsTestV21):
- _prefix = "/v2/fake"
- Controller = flavors_v2.Controller
- fake_request = fakes.HTTPRequest
- _rspv = "v2/fake"
- _fake = "/fake"
-
- def _set_expected_body(self, expected, ephemeral, swap, disabled):
- pass
-
-
-class FlavorsXMLSerializationTest(test.TestCase):
-
- def test_xml_declaration(self):
- serializer = flavors_v2.FlavorTemplate()
-
- fixture = {
- "flavor": {
- "id": "12",
- "name": "asdf",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/12",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/12",
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_show(self):
- serializer = flavors_v2.FlavorTemplate()
-
- fixture = {
- "flavor": {
- "id": "12",
- "name": "asdf",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/12",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/12",
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'flavor')
- flavor_dict = fixture['flavor']
-
- for key in ['name', 'id', 'ram', 'disk']:
- self.assertEqual(root.get(key), str(flavor_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(flavor_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_show_handles_integers(self):
- serializer = flavors_v2.FlavorTemplate()
-
- fixture = {
- "flavor": {
- "id": 12,
- "name": "asdf",
- "ram": 256,
- "disk": 10,
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/12",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/12",
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'flavor')
- flavor_dict = fixture['flavor']
-
- for key in ['name', 'id', 'ram', 'disk']:
- self.assertEqual(root.get(key), str(flavor_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(flavor_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_detail(self):
- serializer = flavors_v2.FlavorsTemplate()
-
- fixture = {
- "flavors": [
- {
- "id": "23",
- "name": "flavor 23",
- "ram": "512",
- "disk": "20",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/23",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/23",
- },
- ],
- },
- {
- "id": "13",
- "name": "flavor 13",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/13",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/13",
- },
- ],
- },
- ],
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'flavors')
- flavor_elems = root.findall('{0}flavor'.format(NS))
- self.assertEqual(len(flavor_elems), 2)
- for i, flavor_elem in enumerate(flavor_elems):
- flavor_dict = fixture['flavors'][i]
-
- for key in ['name', 'id', 'ram', 'disk']:
- self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
-
- link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(flavor_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_index(self):
- serializer = flavors_v2.MinimalFlavorsTemplate()
-
- fixture = {
- "flavors": [
- {
- "id": "23",
- "name": "flavor 23",
- "ram": "512",
- "disk": "20",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/23",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/23",
- },
- ],
- },
- {
- "id": "13",
- "name": "flavor 13",
- "ram": "256",
- "disk": "10",
- "vcpus": "",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/flavors/13",
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/flavors/13",
- },
- ],
- },
- ],
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'flavors')
- flavor_elems = root.findall('{0}flavor'.format(NS))
- self.assertEqual(len(flavor_elems), 2)
- for i, flavor_elem in enumerate(flavor_elems):
- flavor_dict = fixture['flavors'][i]
-
- for key in ['name', 'id']:
- self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
-
- link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(flavor_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_index_empty(self):
- serializer = flavors_v2.MinimalFlavorsTemplate()
-
- fixture = {
- "flavors": [],
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'flavors')
- flavor_elems = root.findall('{0}flavor'.format(NS))
- self.assertEqual(len(flavor_elems), 0)
-
-
-class DisabledFlavorsWithRealDBTestV21(test.TestCase):
- """Tests that disabled flavors should not be shown nor listed."""
- Controller = flavors_v3.FlavorsController
- _prefix = "/v3"
- fake_request = fakes.HTTPRequestV3
-
- def setUp(self):
- super(DisabledFlavorsWithRealDBTestV21, self).setUp()
-
- # Add a new disabled type to the list of flavors
- self.req = self.fake_request.blank(self._prefix + '/flavors')
- self.context = self.req.environ['nova.context']
- self.admin_context = context.get_admin_context()
-
- self.disabled_type = self._create_disabled_instance_type()
- self.inst_types = db.flavor_get_all(
- self.admin_context)
- self.controller = self.Controller()
-
- def tearDown(self):
- db.flavor_destroy(
- self.admin_context, self.disabled_type['name'])
-
- super(DisabledFlavorsWithRealDBTestV21, self).tearDown()
-
- def _create_disabled_instance_type(self):
- inst_types = db.flavor_get_all(self.admin_context)
-
- inst_type = inst_types[0]
-
- del inst_type['id']
- inst_type['name'] += '.disabled'
- inst_type['flavorid'] = unicode(max(
- [int(flavor['flavorid']) for flavor in inst_types]) + 1)
- inst_type['disabled'] = True
-
- disabled_type = db.flavor_create(
- self.admin_context, inst_type)
-
- return disabled_type
-
- def test_index_should_not_list_disabled_flavors_to_user(self):
- self.context.is_admin = False
-
- flavor_list = self.controller.index(self.req)['flavors']
- api_flavorids = set(f['id'] for f in flavor_list)
-
- db_flavorids = set(i['flavorid'] for i in self.inst_types)
- disabled_flavorid = str(self.disabled_type['flavorid'])
-
- self.assertIn(disabled_flavorid, db_flavorids)
- self.assertEqual(db_flavorids - set([disabled_flavorid]),
- api_flavorids)
-
- def test_index_should_list_disabled_flavors_to_admin(self):
- self.context.is_admin = True
-
- flavor_list = self.controller.index(self.req)['flavors']
- api_flavorids = set(f['id'] for f in flavor_list)
-
- db_flavorids = set(i['flavorid'] for i in self.inst_types)
- disabled_flavorid = str(self.disabled_type['flavorid'])
-
- self.assertIn(disabled_flavorid, db_flavorids)
- self.assertEqual(db_flavorids, api_flavorids)
-
- def test_show_should_include_disabled_flavor_for_user(self):
- """Counterintuitively we should show disabled flavors to all users and
- not just admins. The reason is that, when a user performs a server-show
- request, we want to be able to display the pretty flavor name ('512 MB
- Instance') and not just the flavor-id even if the flavor id has been
- marked disabled.
- """
- self.context.is_admin = False
-
- flavor = self.controller.show(
- self.req, self.disabled_type['flavorid'])['flavor']
-
- self.assertEqual(flavor['name'], self.disabled_type['name'])
-
- def test_show_should_include_disabled_flavor_for_admin(self):
- self.context.is_admin = True
-
- flavor = self.controller.show(
- self.req, self.disabled_type['flavorid'])['flavor']
-
- self.assertEqual(flavor['name'], self.disabled_type['name'])
-
-
-class DisabledFlavorsWithRealDBTestV20(DisabledFlavorsWithRealDBTestV21):
- """Tests that disabled flavors should not be shown nor listed."""
- Controller = flavors_v2.Controller
- _prefix = "/v2/fake"
- fake_request = fakes.HTTPRequest
-
-
-class ParseIsPublicTestV21(test.TestCase):
- Controller = flavors_v3.FlavorsController
-
- def setUp(self):
- super(ParseIsPublicTestV21, self).setUp()
- self.controller = self.Controller()
-
- def assertPublic(self, expected, is_public):
- self.assertIs(expected, self.controller._parse_is_public(is_public),
- '%s did not return %s' % (is_public, expected))
-
- def test_None(self):
- self.assertPublic(True, None)
-
- def test_truthy(self):
- self.assertPublic(True, True)
- self.assertPublic(True, 't')
- self.assertPublic(True, 'true')
- self.assertPublic(True, 'yes')
- self.assertPublic(True, '1')
-
- def test_falsey(self):
- self.assertPublic(False, False)
- self.assertPublic(False, 'f')
- self.assertPublic(False, 'false')
- self.assertPublic(False, 'no')
- self.assertPublic(False, '0')
-
- def test_string_none(self):
- self.assertPublic(None, 'none')
- self.assertPublic(None, 'None')
-
- def test_other(self):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
-
-
-class ParseIsPublicTestV20(ParseIsPublicTestV21):
- Controller = flavors_v2.Controller
diff --git a/nova/tests/api/openstack/compute/test_image_metadata.py b/nova/tests/api/openstack/compute/test_image_metadata.py
deleted file mode 100644
index 194dfd3b1c..0000000000
--- a/nova/tests/api/openstack/compute/test_image_metadata.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute import image_metadata
-from nova.api.openstack.compute.plugins.v3 import image_metadata \
- as image_metadata_v21
-from nova import exception
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import image_fixtures
-
-IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
-CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
-
-
-def get_image_123():
- return copy.deepcopy(IMAGE_FIXTURES)[0]
-
-
-class ImageMetaDataTestV21(test.NoDBTestCase):
- controller_class = image_metadata_v21.ImageMetadataController
-
- def setUp(self):
- super(ImageMetaDataTestV21, self).setUp()
- self.controller = self.controller_class()
-
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_index(self, get_all_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
- res_dict = self.controller.index(req, '123')
- expected = {'metadata': {'key1': 'value1'}}
- self.assertEqual(res_dict, expected)
- get_all_mocked.assert_called_once_with(mock.ANY, '123')
-
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_show(self, get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- res_dict = self.controller.show(req, '123', 'key1')
- self.assertIn('meta', res_dict)
- self.assertEqual(len(res_dict['meta']), 1)
- self.assertEqual('value1', res_dict['meta']['key1'])
- get_mocked.assert_called_once_with(mock.ANY, '123')
-
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_show_not_found(self, _get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, '123', 'key9')
-
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotFound(image_id='100'))
- def test_show_image_not_found(self, _get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, '100', 'key9')
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_create(self, get_mocked, update_mocked, quota_mocked):
- mock_result = copy.deepcopy(get_image_123())
- mock_result['properties']['key7'] = 'value7'
- update_mocked.return_value = mock_result
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
- req.method = 'POST'
- body = {"metadata": {"key7": "value7"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, '123', body)
- get_mocked.assert_called_once_with(mock.ANY, '123')
- expected = copy.deepcopy(get_image_123())
- expected['properties'] = {
- 'key1': 'value1', # existing meta
- 'key7': 'value7' # new meta
- }
- quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
- update_mocked.assert_called_once_with(mock.ANY, '123', expected,
- data=None, purge_props=True)
-
- expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
- self.assertEqual(expected_output, res)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotFound(image_id='100'))
- def test_create_image_not_found(self, _get_mocked, update_mocked,
- quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
- req.method = 'POST'
- body = {"metadata": {"key7": "value7"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create, req, '100', body)
- self.assertFalse(quota_mocked.called)
- self.assertFalse(update_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_update_all(self, get_mocked, update_mocked, quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
- req.method = 'PUT'
- body = {"metadata": {"key9": "value9"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.update_all(req, '123', body)
- get_mocked.assert_called_once_with(mock.ANY, '123')
- expected = copy.deepcopy(get_image_123())
- expected['properties'] = {
- 'key9': 'value9' # replace meta
- }
- quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
- update_mocked.assert_called_once_with(mock.ANY, '123', expected,
- data=None, purge_props=True)
-
- expected_output = {'metadata': {'key9': 'value9'}}
- self.assertEqual(expected_output, res)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotFound(image_id='100'))
- def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
- req.method = 'PUT'
- body = {"metadata": {"key9": "value9"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update_all, req, '100', body)
- self.assertFalse(quota_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "zz"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.update(req, '123', 'key1', body)
- expected = copy.deepcopy(get_image_123())
- expected['properties'] = {
- 'key1': 'zz' # changed meta
- }
- quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
- update_mocked.assert_called_once_with(mock.ANY, '123', expected,
- data=None, purge_props=True)
-
- expected_output = {'meta': {'key1': 'zz'}}
- self.assertEqual(res, expected_output)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotFound(image_id='100'))
- def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "zz"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update, req, '100', 'key1', body)
- self.assertFalse(quota_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get')
- def test_update_item_bad_body(self, get_mocked, update_mocked,
- quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'PUT'
- body = {"key1": "zz"}
- req.body = ''
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, '123', 'key1', body)
- self.assertFalse(get_mocked.called)
- self.assertFalse(quota_mocked.called)
- self.assertFalse(update_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR,
- side_effect=webob.exc.HTTPRequestEntityTooLarge(
- explanation='', headers={'Retry-After': 0}))
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get')
- def test_update_item_too_many_keys(self, get_mocked, update_mocked,
- _quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'PUT'
- body = {"metadata": {"foo": "bar"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, '123', 'key1', body)
- self.assertFalse(get_mocked.called)
- self.assertFalse(update_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR)
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
- quota_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, '123', 'bad', body)
- self.assertFalse(quota_mocked.called)
- self.assertFalse(update_mocked.called)
-
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_delete(self, _get_mocked, update_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'DELETE'
- res = self.controller.delete(req, '123', 'key1')
- expected = copy.deepcopy(get_image_123())
- expected['properties'] = {}
- update_mocked.assert_called_once_with(mock.ANY, '123', expected,
- data=None, purge_props=True)
-
- self.assertIsNone(res)
-
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_delete_not_found(self, _get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
- req.method = 'DELETE'
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, req, '123', 'blah')
-
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotFound(image_id='100'))
- def test_delete_image_not_found(self, _get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
- req.method = 'DELETE'
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, req, '100', 'key1')
-
- @mock.patch(CHK_QUOTA_STR,
- side_effect=webob.exc.HTTPForbidden(
- explanation='', headers={'Retry-After': 0}))
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_too_many_metadata_items_on_create(self, _get_mocked,
- update_mocked, _quota_mocked):
- body = {"metadata": {"foo": "bar"}}
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, req, '123', body)
- self.assertFalse(update_mocked.called)
-
- @mock.patch(CHK_QUOTA_STR,
- side_effect=webob.exc.HTTPForbidden(
- explanation='', headers={'Retry-After': 0}))
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_too_many_metadata_items_on_put(self, _get_mocked,
- update_mocked, _quota_mocked):
- body = {"metadata": {"foo": "bar"}}
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
- req.method = 'PUT'
- body = {"meta": {"blah": "blah"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.update, req, '123', 'blah', body)
- self.assertFalse(update_mocked.called)
-
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotAuthorized(image_id='123'))
- def test_image_not_authorized_update(self, _get_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.update, req, '123', 'key1', body)
-
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotAuthorized(image_id='123'))
- def test_image_not_authorized_update_all(self, _get_mocked):
- image_id = 131
- # see nova.tests.api.openstack.fakes:_make_image_fixtures
-
- req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
- % image_id)
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.update_all, req, image_id, body)
-
- @mock.patch('nova.image.api.API.get',
- side_effect=exception.ImageNotAuthorized(image_id='123'))
- def test_image_not_authorized_create(self, _get_mocked):
- image_id = 131
- # see nova.tests.api.openstack.fakes:_make_image_fixtures
-
- req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
- % image_id)
- req.method = 'POST'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, req, image_id, body)
-
-
-class ImageMetaDataTestV2(ImageMetaDataTestV21):
- controller_class = image_metadata.Controller
-
- # NOTE(cyeoh): This duplicate unittest is necessary for a race condition
- # with the V21 unittests. It's mock issue.
- @mock.patch('nova.image.api.API.update')
- @mock.patch('nova.image.api.API.get', return_value=get_image_123())
- def test_delete(self, _get_mocked, update_mocked):
- req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
- req.method = 'DELETE'
- res = self.controller.delete(req, '123', 'key1')
- expected = copy.deepcopy(get_image_123())
- expected['properties'] = {}
- update_mocked.assert_called_once_with(mock.ANY, '123', expected,
- data=None, purge_props=True)
-
- self.assertIsNone(res)
diff --git a/nova/tests/api/openstack/compute/test_images.py b/nova/tests/api/openstack/compute/test_images.py
deleted file mode 100644
index 62dc3d8218..0000000000
--- a/nova/tests/api/openstack/compute/test_images.py
+++ /dev/null
@@ -1,1046 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests of the new image services, both as a service layer,
-and as a WSGI layer
-"""
-
-import copy
-
-from lxml import etree
-import mock
-import webob
-
-from nova.api.openstack.compute import images
-from nova.api.openstack.compute.plugins.v3 import images as images_v21
-from nova.api.openstack.compute.views import images as images_view
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova.image import glance
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import image_fixtures
-from nova.tests import matchers
-
-NS = "{http://docs.openstack.org/compute/api/v1.1}"
-ATOMNS = "{http://www.w3.org/2005/Atom}"
-NOW_API_FORMAT = "2010-10-11T10:30:22Z"
-IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
-
-
-class ImagesControllerTestV21(test.NoDBTestCase):
- """Test of the OpenStack API /images application controller w/Glance.
- """
- image_controller_class = images_v21.ImagesController
- url_base = '/v3'
- bookmark_base = ''
- http_request = fakes.HTTPRequestV3
-
- def setUp(self):
- """Run before each test."""
- super(ImagesControllerTestV21, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fakes.stub_out_compute_api_snapshot(self.stubs)
- fakes.stub_out_compute_api_backup(self.stubs)
-
- self.controller = self.image_controller_class()
- self.url_prefix = "http://localhost%s/images" % self.url_base
- self.bookmark_prefix = "http://localhost%s/images" % self.bookmark_base
- self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
- self.server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
- self.server_href = (
- "http://localhost%s/servers/%s" % (self.url_base,
- self.server_uuid))
- self.server_bookmark = (
- "http://localhost%s/servers/%s" % (self.bookmark_base,
- self.server_uuid))
- self.alternate = "%s/images/%s"
-
- self.expected_image_123 = {
- "image": {'id': '123',
- 'name': 'public image',
- 'metadata': {'key1': 'value1'},
- 'updated': NOW_API_FORMAT,
- 'created': NOW_API_FORMAT,
- 'status': 'ACTIVE',
- 'minDisk': 10,
- 'progress': 100,
- 'minRam': 128,
- "links": [{
- "rel": "self",
- "href": "%s/123" % self.url_prefix
- },
- {
- "rel": "bookmark",
- "href":
- "%s/123" % self.bookmark_prefix
- },
- {
- "rel": "alternate",
- "type": "application/vnd.openstack.image",
- "href": self.alternate %
- (glance.generate_glance_url(),
- 123),
- }],
- },
- }
-
- self.expected_image_124 = {
- "image": {'id': '124',
- 'name': 'queued snapshot',
- 'metadata': {
- u'instance_uuid': self.server_uuid,
- u'user_id': u'fake',
- },
- 'updated': NOW_API_FORMAT,
- 'created': NOW_API_FORMAT,
- 'status': 'SAVING',
- 'progress': 25,
- 'minDisk': 0,
- 'minRam': 0,
- 'server': {
- 'id': self.server_uuid,
- "links": [{
- "rel": "self",
- "href": self.server_href,
- },
- {
- "rel": "bookmark",
- "href": self.server_bookmark,
- }],
- },
- "links": [{
- "rel": "self",
- "href": "%s/124" % self.url_prefix
- },
- {
- "rel": "bookmark",
- "href":
- "%s/124" % self.bookmark_prefix
- },
- {
- "rel": "alternate",
- "type":
- "application/vnd.openstack.image",
- "href": self.alternate %
- (glance.generate_glance_url(),
- 124),
- }],
- },
- }
-
- @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[0])
- def test_get_image(self, get_mocked):
- request = self.http_request.blank(self.url_base + 'images/123')
- actual_image = self.controller.show(request, '123')
- self.assertThat(actual_image,
- matchers.DictMatches(self.expected_image_123))
- get_mocked.assert_called_once_with(mock.ANY, '123')
-
- @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[1])
- def test_get_image_with_custom_prefix(self, _get_mocked):
- self.flags(osapi_compute_link_prefix='https://zoo.com:42',
- osapi_glance_link_prefix='http://circus.com:34')
- fake_req = self.http_request.blank(self.url_base + 'images/124')
- actual_image = self.controller.show(fake_req, '124')
-
- expected_image = self.expected_image_124
- expected_image["image"]["links"][0]["href"] = (
- "https://zoo.com:42%s/images/124" % self.url_base)
- expected_image["image"]["links"][1]["href"] = (
- "https://zoo.com:42%s/images/124" % self.bookmark_base)
- expected_image["image"]["links"][2]["href"] = (
- "http://circus.com:34/images/124")
- expected_image["image"]["server"]["links"][0]["href"] = (
- "https://zoo.com:42%s/servers/%s" % (self.url_base,
- self.server_uuid))
- expected_image["image"]["server"]["links"][1]["href"] = (
- "https://zoo.com:42%s/servers/%s" % (self.bookmark_base,
- self.server_uuid))
-
- self.assertThat(actual_image, matchers.DictMatches(expected_image))
-
- @mock.patch('nova.image.api.API.get', side_effect=exception.NotFound)
- def test_get_image_404(self, _get_mocked):
- fake_req = self.http_request.blank(self.url_base + 'images/unknown')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, fake_req, 'unknown')
-
- @mock.patch('nova.image.api.API.get_all', return_value=IMAGE_FIXTURES)
- def test_get_image_details(self, get_all_mocked):
- request = self.http_request.blank(self.url_base + 'images/detail')
- response = self.controller.detail(request)
-
- get_all_mocked.assert_called_once_with(mock.ANY, filters={})
- response_list = response["images"]
-
- image_125 = copy.deepcopy(self.expected_image_124["image"])
- image_125['id'] = '125'
- image_125['name'] = 'saving snapshot'
- image_125['progress'] = 50
- image_125["links"][0]["href"] = "%s/125" % self.url_prefix
- image_125["links"][1]["href"] = "%s/125" % self.bookmark_prefix
- image_125["links"][2]["href"] = (
- "%s/images/125" % glance.generate_glance_url())
-
- image_126 = copy.deepcopy(self.expected_image_124["image"])
- image_126['id'] = '126'
- image_126['name'] = 'active snapshot'
- image_126['status'] = 'ACTIVE'
- image_126['progress'] = 100
- image_126["links"][0]["href"] = "%s/126" % self.url_prefix
- image_126["links"][1]["href"] = "%s/126" % self.bookmark_prefix
- image_126["links"][2]["href"] = (
- "%s/images/126" % glance.generate_glance_url())
-
- image_127 = copy.deepcopy(self.expected_image_124["image"])
- image_127['id'] = '127'
- image_127['name'] = 'killed snapshot'
- image_127['status'] = 'ERROR'
- image_127['progress'] = 0
- image_127["links"][0]["href"] = "%s/127" % self.url_prefix
- image_127["links"][1]["href"] = "%s/127" % self.bookmark_prefix
- image_127["links"][2]["href"] = (
- "%s/images/127" % glance.generate_glance_url())
-
- image_128 = copy.deepcopy(self.expected_image_124["image"])
- image_128['id'] = '128'
- image_128['name'] = 'deleted snapshot'
- image_128['status'] = 'DELETED'
- image_128['progress'] = 0
- image_128["links"][0]["href"] = "%s/128" % self.url_prefix
- image_128["links"][1]["href"] = "%s/128" % self.bookmark_prefix
- image_128["links"][2]["href"] = (
- "%s/images/128" % glance.generate_glance_url())
-
- image_129 = copy.deepcopy(self.expected_image_124["image"])
- image_129['id'] = '129'
- image_129['name'] = 'pending_delete snapshot'
- image_129['status'] = 'DELETED'
- image_129['progress'] = 0
- image_129["links"][0]["href"] = "%s/129" % self.url_prefix
- image_129["links"][1]["href"] = "%s/129" % self.bookmark_prefix
- image_129["links"][2]["href"] = (
- "%s/images/129" % glance.generate_glance_url())
-
- image_130 = copy.deepcopy(self.expected_image_123["image"])
- image_130['id'] = '130'
- image_130['name'] = None
- image_130['metadata'] = {}
- image_130['minDisk'] = 0
- image_130['minRam'] = 0
- image_130["links"][0]["href"] = "%s/130" % self.url_prefix
- image_130["links"][1]["href"] = "%s/130" % self.bookmark_prefix
- image_130["links"][2]["href"] = (
- "%s/images/130" % glance.generate_glance_url())
-
- image_131 = copy.deepcopy(self.expected_image_123["image"])
- image_131['id'] = '131'
- image_131['name'] = None
- image_131['metadata'] = {}
- image_131['minDisk'] = 0
- image_131['minRam'] = 0
- image_131["links"][0]["href"] = "%s/131" % self.url_prefix
- image_131["links"][1]["href"] = "%s/131" % self.bookmark_prefix
- image_131["links"][2]["href"] = (
- "%s/images/131" % glance.generate_glance_url())
-
- expected = [self.expected_image_123["image"],
- self.expected_image_124["image"],
- image_125, image_126, image_127,
- image_128, image_129, image_130,
- image_131]
-
- self.assertThat(expected, matchers.DictListMatches(response_list))
-
- @mock.patch('nova.image.api.API.get_all')
- def test_get_image_details_with_limit(self, get_all_mocked):
- request = self.http_request.blank(self.url_base +
- 'images/detail?limit=2')
- self.controller.detail(request)
- get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={})
-
- @mock.patch('nova.image.api.API.get_all')
- def test_get_image_details_with_limit_and_page_size(self, get_all_mocked):
- request = self.http_request.blank(
- self.url_base + 'images/detail?limit=2&page_size=1')
- self.controller.detail(request)
- get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={},
- page_size=1)
-
- @mock.patch('nova.image.api.API.get_all')
- def _detail_request(self, filters, request, get_all_mocked):
- self.controller.detail(request)
- get_all_mocked.assert_called_once_with(mock.ANY, filters=filters)
-
- def test_image_detail_filter_with_name(self):
- filters = {'name': 'testname'}
- request = self.http_request.blank(self.url_base + 'images/detail'
- '?name=testname')
- self._detail_request(filters, request)
-
- def test_image_detail_filter_with_status(self):
- filters = {'status': 'active'}
- request = self.http_request.blank(self.url_base + 'images/detail'
- '?status=ACTIVE')
- self._detail_request(filters, request)
-
- def test_image_detail_filter_with_property(self):
- filters = {'property-test': '3'}
- request = self.http_request.blank(self.url_base + 'images/detail'
- '?property-test=3')
- self._detail_request(filters, request)
-
- def test_image_detail_filter_server_href(self):
- filters = {'property-instance_uuid': self.uuid}
- request = self.http_request.blank(
- self.url_base + 'images/detail?server=' + self.uuid)
- self._detail_request(filters, request)
-
- def test_image_detail_filter_server_uuid(self):
- filters = {'property-instance_uuid': self.uuid}
- request = self.http_request.blank(
- self.url_base + 'images/detail?server=' + self.uuid)
- self._detail_request(filters, request)
-
- def test_image_detail_filter_changes_since(self):
- filters = {'changes-since': '2011-01-24T17:08Z'}
- request = self.http_request.blank(self.url_base + 'images/detail'
- '?changes-since=2011-01-24T17:08Z')
- self._detail_request(filters, request)
-
- def test_image_detail_filter_with_type(self):
- filters = {'property-image_type': 'BASE'}
- request = self.http_request.blank(
- self.url_base + 'images/detail?type=BASE')
- self._detail_request(filters, request)
-
- def test_image_detail_filter_not_supported(self):
- filters = {'status': 'active'}
- request = self.http_request.blank(
- self.url_base + 'images/detail?status='
- 'ACTIVE&UNSUPPORTEDFILTER=testname')
- self._detail_request(filters, request)
-
- def test_image_detail_no_filters(self):
- filters = {}
- request = self.http_request.blank(self.url_base + 'images/detail')
- self._detail_request(filters, request)
-
- @mock.patch('nova.image.api.API.get_all', side_effect=exception.Invalid)
- def test_image_detail_invalid_marker(self, _get_all_mocked):
- request = self.http_request.blank(self.url_base + '?marker=invalid')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail,
- request)
-
- def test_generate_alternate_link(self):
- view = images_view.ViewBuilder()
- request = self.http_request.blank(self.url_base + 'images/1')
- generated_url = view._get_alternate_link(request, 1)
- actual_url = "%s/images/1" % glance.generate_glance_url()
- self.assertEqual(generated_url, actual_url)
-
- def _check_response(self, controller_method, response, expected_code):
- self.assertEqual(expected_code, controller_method.wsgi_code)
-
- @mock.patch('nova.image.api.API.delete')
- def test_delete_image(self, delete_mocked):
- request = self.http_request.blank(self.url_base + 'images/124')
- request.method = 'DELETE'
- response = self.controller.delete(request, '124')
- self._check_response(self.controller.delete, response, 204)
- delete_mocked.assert_called_once_with(mock.ANY, '124')
-
- @mock.patch('nova.image.api.API.delete',
- side_effect=exception.ImageNotAuthorized(image_id='123'))
- def test_delete_deleted_image(self, _delete_mocked):
- # If you try to delete a deleted image, you get back 403 Forbidden.
- request = self.http_request.blank(self.url_base + 'images/123')
- request.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
- request, '123')
-
- @mock.patch('nova.image.api.API.delete',
- side_effect=exception.ImageNotFound(image_id='123'))
- def test_delete_image_not_found(self, _delete_mocked):
- request = self.http_request.blank(self.url_base + 'images/300')
- request.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, request, '300')
-
-
-class ImagesControllerTestV2(ImagesControllerTestV21):
- image_controller_class = images.Controller
- url_base = '/v2/fake'
- bookmark_base = '/fake'
- http_request = fakes.HTTPRequest
-
- def _check_response(self, controller_method, response, expected_code):
- self.assertEqual(expected_code, response.status_int)
-
-
-class ImageXMLSerializationTest(test.NoDBTestCase):
-
- TIMESTAMP = "2010-10-11T10:30:22Z"
- SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
- SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
- SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
- IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
- IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
- IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
-
- def test_xml_declaration(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'progress': 80,
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_show(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'progress': 80,
- 'minRam': 10,
- 'minDisk': 100,
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- metadata_root = root.find('{0}metadata'.format(NS))
- metadata_elems = metadata_root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = image_dict['metadata'].items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- server_root = root.find('{0}server'.format(NS))
- self.assertEqual(server_root.get('id'), image_dict['server']['id'])
- link_nodes = server_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['server']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_show_zero_metadata(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {},
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- meta_nodes = root.findall('{0}meta'.format(ATOMNS))
- self.assertEqual(len(meta_nodes), 0)
-
- server_root = root.find('{0}server'.format(NS))
- self.assertEqual(server_root.get('id'), image_dict['server']['id'])
- link_nodes = server_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['server']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_show_image_no_metadata_key(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- meta_nodes = root.findall('{0}meta'.format(ATOMNS))
- self.assertEqual(len(meta_nodes), 0)
-
- server_root = root.find('{0}server'.format(NS))
- self.assertEqual(server_root.get('id'), image_dict['server']['id'])
- link_nodes = server_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['server']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_show_no_server(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- metadata_root = root.find('{0}metadata'.format(NS))
- metadata_elems = metadata_root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = image_dict['metadata'].items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- server_root = root.find('{0}server'.format(NS))
- self.assertIsNone(server_root)
-
- def test_show_with_min_ram(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'progress': 80,
- 'minRam': 256,
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
- 'minRam']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- metadata_root = root.find('{0}metadata'.format(NS))
- metadata_elems = metadata_root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = image_dict['metadata'].items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- server_root = root.find('{0}server'.format(NS))
- self.assertEqual(server_root.get('id'), image_dict['server']['id'])
- link_nodes = server_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['server']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_show_with_min_disk(self):
- serializer = images.ImageTemplate()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'progress': 80,
- 'minDisk': 5,
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'image')
- image_dict = fixture['image']
-
- for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
- 'minDisk']:
- self.assertEqual(root.get(key), str(image_dict[key]))
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- metadata_root = root.find('{0}metadata'.format(NS))
- metadata_elems = metadata_root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = image_dict['metadata'].items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- server_root = root.find('{0}server'.format(NS))
- self.assertEqual(server_root.get('id'), image_dict['server']['id'])
- link_nodes = server_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['server']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_index(self):
- serializer = images.MinimalImagesTemplate()
-
- fixture = {
- 'images': [
- {
- 'id': 1,
- 'name': 'Image1',
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- {
- 'id': 2,
- 'name': 'Image2',
- 'links': [
- {
- 'href': self.IMAGE_HREF % 2,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 2,
- 'rel': 'bookmark',
- },
- ],
- },
- ]
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'images')
- image_elems = root.findall('{0}image'.format(NS))
- self.assertEqual(len(image_elems), 2)
- for i, image_elem in enumerate(image_elems):
- image_dict = fixture['images'][i]
-
- for key in ['name', 'id']:
- self.assertEqual(image_elem.get(key), str(image_dict[key]))
-
- link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_index_with_links(self):
- serializer = images.MinimalImagesTemplate()
-
- fixture = {
- 'images': [
- {
- 'id': 1,
- 'name': 'Image1',
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- {
- 'id': 2,
- 'name': 'Image2',
- 'links': [
- {
- 'href': self.IMAGE_HREF % 2,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 2,
- 'rel': 'bookmark',
- },
- ],
- },
- ],
- 'images_links': [
- {
- 'rel': 'next',
- 'href': self.IMAGE_NEXT % (2, 2),
- }
- ],
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'images')
- image_elems = root.findall('{0}image'.format(NS))
- self.assertEqual(len(image_elems), 2)
- for i, image_elem in enumerate(image_elems):
- image_dict = fixture['images'][i]
-
- for key in ['name', 'id']:
- self.assertEqual(image_elem.get(key), str(image_dict[key]))
-
- link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- # Check images_links
- images_links = root.findall('{0}link'.format(ATOMNS))
- for i, link in enumerate(fixture['images_links']):
- for key, value in link.items():
- self.assertEqual(images_links[i].get(key), value)
-
- def test_index_zero_images(self):
- serializer = images.MinimalImagesTemplate()
-
- fixtures = {
- 'images': [],
- }
-
- output = serializer.serialize(fixtures)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'images')
- image_elems = root.findall('{0}image'.format(NS))
- self.assertEqual(len(image_elems), 0)
-
- def test_detail(self):
- serializer = images.ImagesTemplate()
-
- fixture = {
- 'images': [
- {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'ACTIVE',
- 'server': {
- 'id': self.SERVER_UUID,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- {
- 'id': '2',
- 'name': 'Image2',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'SAVING',
- 'progress': 80,
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 2,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 2,
- 'rel': 'bookmark',
- },
- ],
- },
- ]
- }
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'images')
- image_elems = root.findall('{0}image'.format(NS))
- self.assertEqual(len(image_elems), 2)
- for i, image_elem in enumerate(image_elems):
- image_dict = fixture['images'][i]
-
- for key in ['name', 'id', 'updated', 'created', 'status']:
- self.assertEqual(image_elem.get(key), str(image_dict[key]))
-
- link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(image_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
deleted file mode 100644
index 73885ad2cc..0000000000
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ /dev/null
@@ -1,1016 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests dealing with HTTP rate-limiting.
-"""
-
-import httplib
-import StringIO
-from xml.dom import minidom
-
-from lxml import etree
-import mock
-from oslo.serialization import jsonutils
-import six
-import webob
-
-from nova.api.openstack.compute import limits
-from nova.api.openstack.compute.plugins.v3 import limits as limits_v3
-from nova.api.openstack.compute import views
-from nova.api.openstack import wsgi
-from nova.api.openstack import xmlutil
-import nova.context
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-from nova import utils
-
-
-TEST_LIMITS = [
- limits.Limit("GET", "/delayed", "^/delayed", 1,
- utils.TIME_UNITS['MINUTE']),
- limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
- limits.Limit("POST", "/servers", "^/servers", 3,
- utils.TIME_UNITS['MINUTE']),
- limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
- limits.Limit("PUT", "/servers", "^/servers", 5,
- utils.TIME_UNITS['MINUTE']),
-]
-NS = {
- 'atom': 'http://www.w3.org/2005/Atom',
- 'ns': 'http://docs.openstack.org/common/api/v1.0'
-}
-
-
-class BaseLimitTestSuite(test.NoDBTestCase):
- """Base test suite which provides relevant stubs and time abstraction."""
-
- def setUp(self):
- super(BaseLimitTestSuite, self).setUp()
- self.time = 0.0
- self.stubs.Set(limits.Limit, "_get_time", self._get_time)
- self.absolute_limits = {}
-
- def stub_get_project_quotas(context, project_id, usages=True):
- return dict((k, dict(limit=v))
- for k, v in self.absolute_limits.items())
-
- self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
- stub_get_project_quotas)
-
- def _get_time(self):
- """Return the "time" according to this test suite."""
- return self.time
-
-
-class LimitsControllerTestV21(BaseLimitTestSuite):
- """Tests for `limits.LimitsController` class."""
- limits_controller = limits_v3.LimitsController
-
- def setUp(self):
- """Run before each test."""
- super(LimitsControllerTestV21, self).setUp()
- self.controller = wsgi.Resource(self.limits_controller())
- self.ctrler = self.limits_controller()
-
- def _get_index_request(self, accept_header="application/json",
- tenant_id=None):
- """Helper to set routing arguments."""
- request = webob.Request.blank("/")
- if tenant_id:
- request = webob.Request.blank("/?tenant_id=%s" % tenant_id)
-
- request.accept = accept_header
- request.environ["wsgiorg.routing_args"] = (None, {
- "action": "index",
- "controller": "",
- })
- context = nova.context.RequestContext('testuser', 'testproject')
- request.environ["nova.context"] = context
- return request
-
- def _populate_limits(self, request):
- """Put limit info into a request."""
- _limits = [
- limits.Limit("GET", "*", ".*", 10, 60).display(),
- limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
- limits.Limit("GET", "changes-since*", "changes-since",
- 5, 60).display(),
- ]
- request.environ["nova.limits"] = _limits
- return request
-
- def test_empty_index_json(self):
- # Test getting empty limit details in JSON.
- request = self._get_index_request()
- response = request.get_response(self.controller)
- expected = {
- "limits": {
- "rate": [],
- "absolute": {},
- },
- }
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
-
- def test_index_json(self):
- self._test_index_json()
-
- def test_index_json_by_tenant(self):
- self._test_index_json('faketenant')
-
- def _test_index_json(self, tenant_id=None):
- # Test getting limit details in JSON.
- request = self._get_index_request(tenant_id=tenant_id)
- context = request.environ["nova.context"]
- if tenant_id is None:
- tenant_id = context.project_id
-
- request = self._populate_limits(request)
- self.absolute_limits = {
- 'ram': 512,
- 'instances': 5,
- 'cores': 21,
- 'key_pairs': 10,
- 'floating_ips': 10,
- 'security_groups': 10,
- 'security_group_rules': 20,
- }
- expected = {
- "limits": {
- "rate": [
- {
- "regex": ".*",
- "uri": "*",
- "limit": [
- {
- "verb": "GET",
- "next-available": "1970-01-01T00:00:00Z",
- "unit": "MINUTE",
- "value": 10,
- "remaining": 10,
- },
- {
- "verb": "POST",
- "next-available": "1970-01-01T00:00:00Z",
- "unit": "HOUR",
- "value": 5,
- "remaining": 5,
- },
- ],
- },
- {
- "regex": "changes-since",
- "uri": "changes-since*",
- "limit": [
- {
- "verb": "GET",
- "next-available": "1970-01-01T00:00:00Z",
- "unit": "MINUTE",
- "value": 5,
- "remaining": 5,
- },
- ],
- },
-
- ],
- "absolute": {
- "maxTotalRAMSize": 512,
- "maxTotalInstances": 5,
- "maxTotalCores": 21,
- "maxTotalKeypairs": 10,
- "maxTotalFloatingIps": 10,
- "maxSecurityGroups": 10,
- "maxSecurityGroupRules": 20,
- },
- },
- }
-
- def _get_project_quotas(context, project_id, usages=True):
- return dict((k, dict(limit=v))
- for k, v in self.absolute_limits.items())
-
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
-
- response = request.get_response(self.controller)
-
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
- get_project_quotas.assert_called_once_with(context, tenant_id,
- usages=False)
-
-
-class LimitsControllerTestV2(LimitsControllerTestV21):
- limits_controller = limits.LimitsController
-
- def _populate_limits_diff_regex(self, request):
- """Put limit info into a request."""
- _limits = [
- limits.Limit("GET", "*", ".*", 10, 60).display(),
- limits.Limit("GET", "*", "*.*", 10, 60).display(),
- ]
- request.environ["nova.limits"] = _limits
- return request
-
- def test_index_diff_regex(self):
- # Test getting limit details in JSON.
- request = self._get_index_request()
- request = self._populate_limits_diff_regex(request)
- response = request.get_response(self.controller)
- expected = {
- "limits": {
- "rate": [
- {
- "regex": ".*",
- "uri": "*",
- "limit": [
- {
- "verb": "GET",
- "next-available": "1970-01-01T00:00:00Z",
- "unit": "MINUTE",
- "value": 10,
- "remaining": 10,
- },
- ],
- },
- {
- "regex": "*.*",
- "uri": "*",
- "limit": [
- {
- "verb": "GET",
- "next-available": "1970-01-01T00:00:00Z",
- "unit": "MINUTE",
- "value": 10,
- "remaining": 10,
- },
- ],
- },
-
- ],
- "absolute": {},
- },
- }
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
-
- def _test_index_absolute_limits_json(self, expected):
- request = self._get_index_request()
- response = request.get_response(self.controller)
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body['limits']['absolute'])
-
- def test_index_ignores_extra_absolute_limits_json(self):
- self.absolute_limits = {'unknown_limit': 9001}
- self._test_index_absolute_limits_json({})
-
- def test_index_absolute_ram_json(self):
- self.absolute_limits = {'ram': 1024}
- self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024})
-
- def test_index_absolute_cores_json(self):
- self.absolute_limits = {'cores': 17}
- self._test_index_absolute_limits_json({'maxTotalCores': 17})
-
- def test_index_absolute_instances_json(self):
- self.absolute_limits = {'instances': 19}
- self._test_index_absolute_limits_json({'maxTotalInstances': 19})
-
- def test_index_absolute_metadata_json(self):
- # NOTE: both server metadata and image metadata are overloaded
- # into metadata_items
- self.absolute_limits = {'metadata_items': 23}
- expected = {
- 'maxServerMeta': 23,
- 'maxImageMeta': 23,
- }
- self._test_index_absolute_limits_json(expected)
-
- def test_index_absolute_injected_files(self):
- self.absolute_limits = {
- 'injected_files': 17,
- 'injected_file_content_bytes': 86753,
- }
- expected = {
- 'maxPersonality': 17,
- 'maxPersonalitySize': 86753,
- }
- self._test_index_absolute_limits_json(expected)
-
- def test_index_absolute_security_groups(self):
- self.absolute_limits = {
- 'security_groups': 8,
- 'security_group_rules': 16,
- }
- expected = {
- 'maxSecurityGroups': 8,
- 'maxSecurityGroupRules': 16,
- }
- self._test_index_absolute_limits_json(expected)
-
- def test_limit_create(self):
- req = fakes.HTTPRequest.blank('/v2/fake/limits')
- self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.create,
- req, {})
-
- def test_limit_delete(self):
- req = fakes.HTTPRequest.blank('/v2/fake/limits')
- self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.delete,
- req, 1)
-
- def test_limit_detail(self):
- req = fakes.HTTPRequest.blank('/v2/fake/limits')
- self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.detail,
- req)
-
- def test_limit_show(self):
- req = fakes.HTTPRequest.blank('/v2/fake/limits')
- self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.show,
- req, 1)
-
- def test_limit_update(self):
- req = fakes.HTTPRequest.blank('/v2/fake/limits')
- self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.update,
- req, 1, {})
-
-
-class MockLimiter(limits.Limiter):
- pass
-
-
-class LimitMiddlewareTest(BaseLimitTestSuite):
- """Tests for the `limits.RateLimitingMiddleware` class."""
-
- @webob.dec.wsgify
- def _empty_app(self, request):
- """Do-nothing WSGI app."""
- pass
-
- def setUp(self):
- """Prepare middleware for use through fake WSGI app."""
- super(LimitMiddlewareTest, self).setUp()
- _limits = '(GET, *, .*, 1, MINUTE)'
- self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
- "%s.MockLimiter" %
- self.__class__.__module__)
-
- def test_limit_class(self):
- # Test that middleware selected correct limiter class.
- self.assertIsInstance(self.app._limiter, MockLimiter)
-
- def test_good_request(self):
- # Test successful GET request through middleware.
- request = webob.Request.blank("/")
- response = request.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- def test_limited_request_json(self):
- # Test a rate-limited (429) GET request through middleware.
- request = webob.Request.blank("/")
- response = request.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- request = webob.Request.blank("/")
- response = request.get_response(self.app)
- self.assertEqual(response.status_int, 429)
-
- self.assertIn('Retry-After', response.headers)
- retry_after = int(response.headers['Retry-After'])
- self.assertAlmostEqual(retry_after, 60, 1)
-
- body = jsonutils.loads(response.body)
- expected = "Only 1 GET request(s) can be made to * every minute."
- value = body["overLimit"]["details"].strip()
- self.assertEqual(value, expected)
-
- self.assertIn("retryAfter", body["overLimit"])
- retryAfter = body["overLimit"]["retryAfter"]
- self.assertEqual(retryAfter, "60")
-
- def test_limited_request_xml(self):
- # Test a rate-limited (429) response as XML.
- request = webob.Request.blank("/")
- response = request.get_response(self.app)
- self.assertEqual(200, response.status_int)
-
- request = webob.Request.blank("/")
- request.accept = "application/xml"
- response = request.get_response(self.app)
- self.assertEqual(response.status_int, 429)
-
- root = minidom.parseString(response.body).childNodes[0]
- expected = "Only 1 GET request(s) can be made to * every minute."
-
- self.assertIsNotNone(root.attributes.getNamedItem("retryAfter"))
- retryAfter = root.attributes.getNamedItem("retryAfter").value
- self.assertEqual(retryAfter, "60")
-
- details = root.getElementsByTagName("details")
- self.assertEqual(details.length, 1)
-
- value = details.item(0).firstChild.data.strip()
- self.assertEqual(value, expected)
-
-
-class LimitTest(BaseLimitTestSuite):
- """Tests for the `limits.Limit` class."""
-
- def test_GET_no_delay(self):
- # Test a limit handles 1 GET per second.
- limit = limits.Limit("GET", "*", ".*", 1, 1)
- delay = limit("GET", "/anything")
- self.assertIsNone(delay)
- self.assertEqual(0, limit.next_request)
- self.assertEqual(0, limit.last_request)
-
- def test_GET_delay(self):
- # Test two calls to 1 GET per second limit.
- limit = limits.Limit("GET", "*", ".*", 1, 1)
- delay = limit("GET", "/anything")
- self.assertIsNone(delay)
-
- delay = limit("GET", "/anything")
- self.assertEqual(1, delay)
- self.assertEqual(1, limit.next_request)
- self.assertEqual(0, limit.last_request)
-
- self.time += 4
-
- delay = limit("GET", "/anything")
- self.assertIsNone(delay)
- self.assertEqual(4, limit.next_request)
- self.assertEqual(4, limit.last_request)
-
-
-class ParseLimitsTest(BaseLimitTestSuite):
- """Tests for the default limits parser in the in-memory
- `limits.Limiter` class.
- """
-
- def test_invalid(self):
- # Test that parse_limits() handles invalid input correctly.
- self.assertRaises(ValueError, limits.Limiter.parse_limits,
- ';;;;;')
-
- def test_bad_rule(self):
- # Test that parse_limits() handles bad rules correctly.
- self.assertRaises(ValueError, limits.Limiter.parse_limits,
- 'GET, *, .*, 20, minute')
-
- def test_missing_arg(self):
- # Test that parse_limits() handles missing args correctly.
- self.assertRaises(ValueError, limits.Limiter.parse_limits,
- '(GET, *, .*, 20)')
-
- def test_bad_value(self):
- # Test that parse_limits() handles bad values correctly.
- self.assertRaises(ValueError, limits.Limiter.parse_limits,
- '(GET, *, .*, foo, minute)')
-
- def test_bad_unit(self):
- # Test that parse_limits() handles bad units correctly.
- self.assertRaises(ValueError, limits.Limiter.parse_limits,
- '(GET, *, .*, 20, lightyears)')
-
- def test_multiple_rules(self):
- # Test that parse_limits() handles multiple rules correctly.
- try:
- l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
- '(PUT, /foo*, /foo.*, 10, hour);'
- '(POST, /bar*, /bar.*, 5, second);'
- '(Say, /derp*, /derp.*, 1, day)')
- except ValueError as e:
- assert False, six.text_type(e)
-
- # Make sure the number of returned limits are correct
- self.assertEqual(len(l), 4)
-
- # Check all the verbs...
- expected = ['GET', 'PUT', 'POST', 'SAY']
- self.assertEqual([t.verb for t in l], expected)
-
- # ...the URIs...
- expected = ['*', '/foo*', '/bar*', '/derp*']
- self.assertEqual([t.uri for t in l], expected)
-
- # ...the regexes...
- expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
- self.assertEqual([t.regex for t in l], expected)
-
- # ...the values...
- expected = [20, 10, 5, 1]
- self.assertEqual([t.value for t in l], expected)
-
- # ...and the units...
- expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'],
- utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']]
- self.assertEqual([t.unit for t in l], expected)
-
-
-class LimiterTest(BaseLimitTestSuite):
- """Tests for the in-memory `limits.Limiter` class."""
-
- def setUp(self):
- """Run before each test."""
- super(LimiterTest, self).setUp()
- userlimits = {'limits.user3': '',
- 'limits.user0': '(get, *, .*, 4, minute);'
- '(put, *, .*, 2, minute)'}
- self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
-
- def _check(self, num, verb, url, username=None):
- """Check and yield results from checks."""
- for x in xrange(num):
- yield self.limiter.check_for_delay(verb, url, username)[0]
-
- def _check_sum(self, num, verb, url, username=None):
- """Check and sum results from checks."""
- results = self._check(num, verb, url, username)
- return sum(item for item in results if item)
-
- def test_no_delay_GET(self):
- """Simple test to ensure no delay on a single call for a limit verb we
- didn"t set.
- """
- delay = self.limiter.check_for_delay("GET", "/anything")
- self.assertEqual(delay, (None, None))
-
- def test_no_delay_PUT(self):
- # Simple test to ensure no delay on a single call for a known limit.
- delay = self.limiter.check_for_delay("PUT", "/anything")
- self.assertEqual(delay, (None, None))
-
- def test_delay_PUT(self):
- """Ensure the 11th PUT will result in a delay of 6.0 seconds until
- the next request will be granced.
- """
- expected = [None] * 10 + [6.0]
- results = list(self._check(11, "PUT", "/anything"))
-
- self.assertEqual(expected, results)
-
- def test_delay_POST(self):
- """Ensure the 8th POST will result in a delay of 6.0 seconds until
- the next request will be granced.
- """
- expected = [None] * 7
- results = list(self._check(7, "POST", "/anything"))
- self.assertEqual(expected, results)
-
- expected = 60.0 / 7.0
- results = self._check_sum(1, "POST", "/anything")
- self.assertAlmostEqual(expected, results, 8)
-
- def test_delay_GET(self):
- # Ensure the 11th GET will result in NO delay.
- expected = [None] * 11
- results = list(self._check(11, "GET", "/anything"))
- self.assertEqual(expected, results)
-
- expected = [None] * 4 + [15.0]
- results = list(self._check(5, "GET", "/foo", "user0"))
- self.assertEqual(expected, results)
-
- def test_delay_PUT_servers(self):
- """Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is
- still OK after 5 requests...but then after 11 total requests, PUT
- limiting kicks in.
- """
- # First 6 requests on PUT /servers
- expected = [None] * 5 + [12.0]
- results = list(self._check(6, "PUT", "/servers"))
- self.assertEqual(expected, results)
-
- # Next 5 request on PUT /anything
- expected = [None] * 4 + [6.0]
- results = list(self._check(5, "PUT", "/anything"))
- self.assertEqual(expected, results)
-
- def test_delay_PUT_wait(self):
- """Ensure after hitting the limit and then waiting for the correct
- amount of time, the limit will be lifted.
- """
- expected = [None] * 10 + [6.0]
- results = list(self._check(11, "PUT", "/anything"))
- self.assertEqual(expected, results)
-
- # Advance time
- self.time += 6.0
-
- expected = [None, 6.0]
- results = list(self._check(2, "PUT", "/anything"))
- self.assertEqual(expected, results)
-
- def test_multiple_delays(self):
- # Ensure multiple requests still get a delay.
- expected = [None] * 10 + [6.0] * 10
- results = list(self._check(20, "PUT", "/anything"))
- self.assertEqual(expected, results)
-
- self.time += 1.0
-
- expected = [5.0] * 10
- results = list(self._check(10, "PUT", "/anything"))
- self.assertEqual(expected, results)
-
- expected = [None] * 2 + [30.0] * 8
- results = list(self._check(10, "PUT", "/anything", "user0"))
- self.assertEqual(expected, results)
-
- def test_user_limit(self):
- # Test user-specific limits.
- self.assertEqual(self.limiter.levels['user3'], [])
- self.assertEqual(len(self.limiter.levels['user0']), 2)
-
- def test_multiple_users(self):
- # Tests involving multiple users.
- # User0
- expected = [None] * 2 + [30.0] * 8
- results = list(self._check(10, "PUT", "/anything", "user0"))
- self.assertEqual(expected, results)
-
- # User1
- expected = [None] * 10 + [6.0] * 10
- results = list(self._check(20, "PUT", "/anything", "user1"))
- self.assertEqual(expected, results)
-
- # User2
- expected = [None] * 10 + [6.0] * 5
- results = list(self._check(15, "PUT", "/anything", "user2"))
- self.assertEqual(expected, results)
-
- # User3
- expected = [None] * 20
- results = list(self._check(20, "PUT", "/anything", "user3"))
- self.assertEqual(expected, results)
-
- self.time += 1.0
-
- # User1 again
- expected = [5.0] * 10
- results = list(self._check(10, "PUT", "/anything", "user1"))
- self.assertEqual(expected, results)
-
- self.time += 1.0
-
- # User1 again
- expected = [4.0] * 5
- results = list(self._check(5, "PUT", "/anything", "user2"))
- self.assertEqual(expected, results)
-
- # User0 again
- expected = [28.0]
- results = list(self._check(1, "PUT", "/anything", "user0"))
- self.assertEqual(expected, results)
-
- self.time += 28.0
-
- expected = [None, 30.0]
- results = list(self._check(2, "PUT", "/anything", "user0"))
- self.assertEqual(expected, results)
-
-
-class WsgiLimiterTest(BaseLimitTestSuite):
- """Tests for `limits.WsgiLimiter` class."""
-
- def setUp(self):
- """Run before each test."""
- super(WsgiLimiterTest, self).setUp()
- self.app = limits.WsgiLimiter(TEST_LIMITS)
-
- def _request_data(self, verb, path):
- """Get data describing a limit request verb/path."""
- return jsonutils.dumps({"verb": verb, "path": path})
-
- def _request(self, verb, url, username=None):
- """Make sure that POSTing to the given url causes the given username
- to perform the given action. Make the internal rate limiter return
- delay and make sure that the WSGI app returns the correct response.
- """
- if username:
- request = webob.Request.blank("/%s" % username)
- else:
- request = webob.Request.blank("/")
-
- request.method = "POST"
- request.body = self._request_data(verb, url)
- response = request.get_response(self.app)
-
- if "X-Wait-Seconds" in response.headers:
- self.assertEqual(response.status_int, 403)
- return response.headers["X-Wait-Seconds"]
-
- self.assertEqual(response.status_int, 204)
-
- def test_invalid_methods(self):
- # Only POSTs should work.
- for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
- request = webob.Request.blank("/", method=method)
- response = request.get_response(self.app)
- self.assertEqual(response.status_int, 405)
-
- def test_good_url(self):
- delay = self._request("GET", "/something")
- self.assertIsNone(delay)
-
- def test_escaping(self):
- delay = self._request("GET", "/something/jump%20up")
- self.assertIsNone(delay)
-
- def test_response_to_delays(self):
- delay = self._request("GET", "/delayed")
- self.assertIsNone(delay)
-
- delay = self._request("GET", "/delayed")
- self.assertEqual(delay, '60.00')
-
- def test_response_to_delays_usernames(self):
- delay = self._request("GET", "/delayed", "user1")
- self.assertIsNone(delay)
-
- delay = self._request("GET", "/delayed", "user2")
- self.assertIsNone(delay)
-
- delay = self._request("GET", "/delayed", "user1")
- self.assertEqual(delay, '60.00')
-
- delay = self._request("GET", "/delayed", "user2")
- self.assertEqual(delay, '60.00')
-
-
-class FakeHttplibSocket(object):
- """Fake `httplib.HTTPResponse` replacement."""
-
- def __init__(self, response_string):
- """Initialize new `FakeHttplibSocket`."""
- self._buffer = StringIO.StringIO(response_string)
-
- def makefile(self, _mode, _other):
- """Returns the socket's internal buffer."""
- return self._buffer
-
-
-class FakeHttplibConnection(object):
- """Fake `httplib.HTTPConnection`."""
-
- def __init__(self, app, host):
- """Initialize `FakeHttplibConnection`."""
- self.app = app
- self.host = host
-
- def request(self, method, path, body="", headers=None):
- """Requests made via this connection actually get translated and routed
- into our WSGI app, we then wait for the response and turn it back into
- an `httplib.HTTPResponse`.
- """
- if not headers:
- headers = {}
-
- req = webob.Request.blank(path)
- req.method = method
- req.headers = headers
- req.host = self.host
- req.body = body
-
- resp = str(req.get_response(self.app))
- resp = "HTTP/1.0 %s" % resp
- sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(sock)
- self.http_response.begin()
-
- def getresponse(self):
- """Return our generated response from the request."""
- return self.http_response
-
-
-def wire_HTTPConnection_to_WSGI(host, app):
- """Monkeypatches HTTPConnection so that if you try to connect to host, you
- are instead routed straight to the given WSGI app.
-
- After calling this method, when any code calls
-
- httplib.HTTPConnection(host)
-
- the connection object will be a fake. Its requests will be sent directly
- to the given WSGI app rather than through a socket.
-
- Code connecting to hosts other than host will not be affected.
-
- This method may be called multiple times to map different hosts to
- different apps.
-
- This method returns the original HTTPConnection object, so that the caller
- can restore the default HTTPConnection interface (for all hosts).
- """
- class HTTPConnectionDecorator(object):
- """Wraps the real HTTPConnection class so that when you instantiate
- the class you might instead get a fake instance.
- """
-
- def __init__(self, wrapped):
- self.wrapped = wrapped
-
- def __call__(self, connection_host, *args, **kwargs):
- if connection_host == host:
- return FakeHttplibConnection(app, host)
- else:
- return self.wrapped(connection_host, *args, **kwargs)
-
- oldHTTPConnection = httplib.HTTPConnection
- httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
- return oldHTTPConnection
-
-
-class WsgiLimiterProxyTest(BaseLimitTestSuite):
- """Tests for the `limits.WsgiLimiterProxy` class."""
-
- def setUp(self):
- """Do some nifty HTTP/WSGI magic which allows for WSGI to be called
- directly by something like the `httplib` library.
- """
- super(WsgiLimiterProxyTest, self).setUp()
- self.app = limits.WsgiLimiter(TEST_LIMITS)
- self.oldHTTPConnection = (
- wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
- self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
-
- def test_200(self):
- # Successful request test.
- delay = self.proxy.check_for_delay("GET", "/anything")
- self.assertEqual(delay, (None, None))
-
- def test_403(self):
- # Forbidden request test.
- delay = self.proxy.check_for_delay("GET", "/delayed")
- self.assertEqual(delay, (None, None))
-
- delay, error = self.proxy.check_for_delay("GET", "/delayed")
- error = error.strip()
-
- expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
- "made to /delayed every minute.")
-
- self.assertEqual((delay, error), expected)
-
- def tearDown(self):
- # restore original HTTPConnection object
- httplib.HTTPConnection = self.oldHTTPConnection
- super(WsgiLimiterProxyTest, self).tearDown()
-
-
-class LimitsViewBuilderTest(test.NoDBTestCase):
- def setUp(self):
- super(LimitsViewBuilderTest, self).setUp()
- self.view_builder = views.limits.ViewBuilder()
- self.rate_limits = [{"URI": "*",
- "regex": ".*",
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "resetTime": 1311272226},
- {"URI": "*/servers",
- "regex": "^/servers",
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "resetTime": 1311272226}]
- self.absolute_limits = {"metadata_items": 1,
- "injected_files": 5,
- "injected_file_content_bytes": 5}
-
- def test_build_limits(self):
- expected_limits = {"limits": {
- "rate": [{
- "uri": "*",
- "regex": ".*",
- "limit": [{"value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-07-21T18:17:06Z"}]},
- {"uri": "*/servers",
- "regex": "^/servers",
- "limit": [{"value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-07-21T18:17:06Z"}]}],
- "absolute": {"maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 5}}}
-
- output = self.view_builder.build(self.rate_limits,
- self.absolute_limits)
- self.assertThat(output, matchers.DictMatches(expected_limits))
-
- def test_build_limits_empty_limits(self):
- expected_limits = {"limits": {"rate": [],
- "absolute": {}}}
-
- abs_limits = {}
- rate_limits = []
- output = self.view_builder.build(rate_limits, abs_limits)
- self.assertThat(output, matchers.DictMatches(expected_limits))
-
-
-class LimitsXMLSerializationTest(test.NoDBTestCase):
- def test_xml_declaration(self):
- serializer = limits.LimitsTemplate()
-
- fixture = {"limits": {
- "rate": [],
- "absolute": {}}}
-
- output = serializer.serialize(fixture)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_index(self):
- serializer = limits.LimitsTemplate()
- fixture = {
- "limits": {
- "rate": [{
- "uri": "*",
- "regex": ".*",
- "limit": [{
- "value": 10,
- "verb": "POST",
- "remaining": 2,
- "unit": "MINUTE",
- "next-available": "2011-12-15T22:42:45Z"}]},
- {"uri": "*/servers",
- "regex": "^/servers",
- "limit": [{
- "value": 50,
- "verb": "POST",
- "remaining": 10,
- "unit": "DAY",
- "next-available": "2011-12-15T22:42:45Z"}]}],
- "absolute": {"maxServerMeta": 1,
- "maxImageMeta": 1,
- "maxPersonality": 5,
- "maxPersonalitySize": 10240}}}
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'limits')
-
- # verify absolute limits
- absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
- self.assertEqual(len(absolutes), 4)
- for limit in absolutes:
- name = limit.get('name')
- value = limit.get('value')
- self.assertEqual(value, str(fixture['limits']['absolute'][name]))
-
- # verify rate limits
- rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
- self.assertEqual(len(rates), 2)
- for i, rate in enumerate(rates):
- for key in ['uri', 'regex']:
- self.assertEqual(rate.get(key),
- str(fixture['limits']['rate'][i][key]))
- rate_limits = rate.xpath('ns:limit', namespaces=NS)
- self.assertEqual(len(rate_limits), 1)
- for j, limit in enumerate(rate_limits):
- for key in ['verb', 'value', 'remaining', 'unit',
- 'next-available']:
- self.assertEqual(limit.get(key),
- str(fixture['limits']['rate'][i]['limit'][j][key]))
-
- def test_index_no_limits(self):
- serializer = limits.LimitsTemplate()
-
- fixture = {"limits": {
- "rate": [],
- "absolute": {}}}
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'limits')
-
- # verify absolute limits
- absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
- self.assertEqual(len(absolutes), 0)
-
- # verify rate limits
- rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
- self.assertEqual(len(rates), 0)
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
deleted file mode 100644
index 97b33dc99a..0000000000
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ /dev/null
@@ -1,1556 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import uuid
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute import servers
-from nova.compute import api as compute_api
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import glance
-from nova import objects
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova.tests import utils
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-FAKE_UUID = fakes.FAKE_UUID
-INSTANCE_IDS = {FAKE_UUID: 1}
-
-
-def return_server_not_found(*arg, **kwarg):
- raise exception.NotFound()
-
-
-def instance_update_and_get_original(context, instance_uuid, values,
- update_cells=True,
- columns_to_join=None,
- ):
- inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
- inst = dict(inst, **values)
- return (inst, inst)
-
-
-def instance_update(context, instance_uuid, kwargs, update_cells=True):
- inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
- return inst
-
-
-class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
-
- def __call__(self, context, instance, password):
- self.instance_id = instance['uuid']
- self.password = password
-
-
-class ServerActionsControllerTest(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
-
- def setUp(self):
- super(ServerActionsControllerTest, self).setUp()
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- host='fake_host'))
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
-
- fakes.stub_out_nw_api(self.stubs)
- fakes.stub_out_compute_api_snapshot(self.stubs)
- fake.stub_out_image_service(self.stubs)
- self.flags(allow_instance_snapshots=True,
- enable_instance_password=True)
- self.uuid = FAKE_UUID
- self.url = '/v2/fake/servers/%s/action' % self.uuid
- self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
-
- class FakeExtManager(object):
- def is_loaded(self, ext):
- return False
-
- self.controller = servers.Controller(ext_mgr=FakeExtManager())
- self.compute_api = self.controller.compute_api
- self.context = context.RequestContext('fake', 'fake')
- self.app = fakes.wsgi_app(init_only=('servers',),
- fake_auth_context=self.context)
-
- def _make_request(self, url, body):
- req = webob.Request.blank('/v2/fake' + url)
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.content_type = 'application/json'
- return req.get_response(self.app)
-
- def _stub_instance_get(self, uuid=None):
- self.mox.StubOutWithMock(compute_api.API, 'get')
- if uuid is None:
- uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_db_instance(
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance)
-
- self.compute_api.get(self.context, uuid,
- want_objects=True).AndReturn(instance)
- return instance
-
- def _test_locked_instance(self, action, method=None, body_map=None,
- compute_api_args_map=None):
- if method is None:
- method = action
- if body_map is None:
- body_map = {}
- if compute_api_args_map is None:
- compute_api_args_map = {}
-
- instance = self._stub_instance_get()
- args, kwargs = compute_api_args_map.get(action, ((), {}))
-
- getattr(compute_api.API, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- exception.InstanceIsLocked(instance_uuid=instance['uuid']))
-
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: body_map.get(action)})
- self.assertEqual(409, res.status_int)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def test_actions_with_locked_instance(self):
- actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
- 'rebuild']
-
- method_translations = {'confirmResize': 'confirm_resize',
- 'revertResize': 'revert_resize'}
-
- body_map = {'resize': {'flavorRef': '2'},
- 'reboot': {'type': 'HARD'},
- 'rebuild': {'imageRef': self.image_uuid,
- 'adminPass': 'TNc53Dr8s7vw'}}
-
- args_map = {'resize': (('2'), {}),
- 'confirmResize': ((), {}),
- 'reboot': (('HARD',), {}),
- 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'),
- {'files_to_inject': None})}
-
- for action in actions:
- method = method_translations.get(action)
- self.mox.StubOutWithMock(compute_api.API, method or action)
- self._test_locked_instance(action, method=method,
- body_map=body_map,
- compute_api_args_map=args_map)
-
- def test_server_change_password(self):
- mock_method = MockSetAdminPassword()
- self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
- body = {'changePassword': {'adminPass': '1234pass'}}
-
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_change_password(req, FAKE_UUID, body)
-
- self.assertEqual(mock_method.instance_id, self.uuid)
- self.assertEqual(mock_method.password, '1234pass')
-
- def test_server_change_password_pass_disabled(self):
- # run with enable_instance_password disabled to verify adminPass
- # is missing from response. See lp bug 921814
- self.flags(enable_instance_password=False)
-
- mock_method = MockSetAdminPassword()
- self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
- body = {'changePassword': {'adminPass': '1234pass'}}
-
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_change_password(req, FAKE_UUID, body)
-
- self.assertEqual(mock_method.instance_id, self.uuid)
- # note,the mock still contains the password.
- self.assertEqual(mock_method.password, '1234pass')
-
- def test_server_change_password_not_a_string(self):
- body = {'changePassword': {'adminPass': 1234}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_change_password,
- req, FAKE_UUID, body)
-
- def test_server_change_password_bad_request(self):
- body = {'changePassword': {'pass': '12345'}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_change_password,
- req, FAKE_UUID, body)
-
- def test_server_change_password_empty_string(self):
- mock_method = MockSetAdminPassword()
- self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
- body = {'changePassword': {'adminPass': ''}}
-
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_change_password(req, FAKE_UUID, body)
-
- self.assertEqual(mock_method.instance_id, self.uuid)
- self.assertEqual(mock_method.password, '')
-
- def test_server_change_password_none(self):
- body = {'changePassword': {'adminPass': None}}
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_change_password,
- req, FAKE_UUID, body)
-
- def test_reboot_hard(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_soft(self):
- body = dict(reboot=dict(type="SOFT"))
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_incorrect_type(self):
- body = dict(reboot=dict(type="NOT_A_TYPE"))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_missing_type(self):
- body = dict(reboot=dict())
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_none(self):
- body = dict(reboot=dict(type=None))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_not_found(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_server_not_found)
-
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_reboot,
- req, str(uuid.uuid4()), body)
-
- def test_reboot_raises_conflict_on_invalid_state(self):
- body = dict(reboot=dict(type="HARD"))
-
- def fake_reboot(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
- body = dict(reboot=dict(type="SOFT"))
- req = fakes.HTTPRequest.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING))
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequest.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING))
- self.controller._action_reboot(req, FAKE_UUID, body)
-
- def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
- body = dict(reboot=dict(type="HARD"))
- req = fakes.HTTPRequest.blank(self.url)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.REBOOTING_HARD))
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_reboot,
- req, FAKE_UUID, body)
-
- def test_rebuild_preserve_ephemeral_is_ignored_when_ext_not_loaded(self):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE,
- host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "preserve_ephemeral": False,
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- context = req.environ['nova.context']
-
- self.mox.StubOutWithMock(compute_api.API, 'rebuild')
- compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
- mox.IgnoreArg(), files_to_inject=None)
- self.mox.ReplayAll()
-
- self.controller._action_rebuild(req, FAKE_UUID, body)
-
- def _test_rebuild_preserve_ephemeral(self, value=None):
- def fake_is_loaded(ext):
- return ext == 'os-preserve-ephemeral-rebuild'
- self.stubs.Set(self.controller.ext_mgr, 'is_loaded', fake_is_loaded)
-
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE,
- host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
- if value is not None:
- body['rebuild']['preserve_ephemeral'] = value
-
- req = fakes.HTTPRequest.blank(self.url)
- context = req.environ['nova.context']
-
- self.mox.StubOutWithMock(compute_api.API, 'rebuild')
-
- if value is not None:
- compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
- mox.IgnoreArg(), preserve_ephemeral=value,
- files_to_inject=None)
- else:
- compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
- mox.IgnoreArg(), files_to_inject=None)
- self.mox.ReplayAll()
-
- self.controller._action_rebuild(req, FAKE_UUID, body)
-
- def test_rebuild_preserve_ephemeral_true(self):
- self._test_rebuild_preserve_ephemeral(True)
-
- def test_rebuild_preserve_ephemeral_false(self):
- self._test_rebuild_preserve_ephemeral(False)
-
- def test_rebuild_preserve_ephemeral_default(self):
- self._test_rebuild_preserve_ephemeral()
-
- def test_rebuild_accepted_minimum(self):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- robj = self.controller._action_rebuild(req, FAKE_UUID, body)
- body = robj.obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertEqual(len(body['server']['adminPass']),
- CONF.password_length)
-
- self.assertEqual(robj['location'], self_href)
-
- def test_rebuild_instance_with_image_uuid(self):
- info = dict(image_href_in_call=None)
-
- def rebuild(self2, context, instance, image_href, *args, **kwargs):
- info['image_href_in_call'] = image_href
-
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.stubs.Set(compute_api.API, 'rebuild', rebuild)
-
- # proper local hrefs must start with 'http://localhost/v2/'
- body = {
- 'rebuild': {
- 'imageRef': self.image_uuid,
- },
- }
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
- self.controller._action_rebuild(req, FAKE_UUID, body)
- self.assertEqual(info['image_href_in_call'], self.image_uuid)
-
- def test_rebuild_instance_with_image_href_uses_uuid(self):
- info = dict(image_href_in_call=None)
-
- def rebuild(self2, context, instance, image_href, *args, **kwargs):
- info['image_href_in_call'] = image_href
-
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.stubs.Set(compute_api.API, 'rebuild', rebuild)
-
- # proper local hrefs must start with 'http://localhost/v2/'
- body = {
- 'rebuild': {
- 'imageRef': self.image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
- self.controller._action_rebuild(req, FAKE_UUID, body)
- self.assertEqual(info['image_href_in_call'], self.image_uuid)
-
- def test_rebuild_accepted_minimum_pass_disabled(self):
- # run with enable_instance_password disabled to verify adminPass
- # is missing from response. See lp bug 921814
- self.flags(enable_instance_password=False)
-
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- robj = self.controller._action_rebuild(req, FAKE_UUID, body)
- body = robj.obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertNotIn("adminPass", body['server'])
-
- self.assertEqual(robj['location'], self_href)
-
- def test_rebuild_raises_conflict_on_invalid_state(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- def fake_rebuild(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_accepted_with_metadata(self):
- metadata = {'new': 'metadata'}
-
- return_server = fakes.fake_instance_get(metadata=metadata,
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": metadata,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
-
- self.assertEqual(body['server']['metadata'], metadata)
-
- def test_rebuild_accepted_with_bad_metadata(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": "stack",
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_with_too_large_metadata(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "metadata": {
- 256 * "k": "value"
- }
- }
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller._action_rebuild, req,
- FAKE_UUID, body)
-
- def test_rebuild_bad_entity(self):
- body = {
- "rebuild": {
- "imageId": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_bad_personality(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "personality": [{
- "path": "/path/to/file",
- "contents": "INVALID b64",
- }]
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_personality(self):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "personality": [{
- "path": "/path/to/file",
- "contents": base64.b64encode("Test String"),
- }]
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
-
- self.assertNotIn('personality', body['server'])
-
- def test_rebuild_admin_pass(self):
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "adminPass": "asdf",
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertEqual(body['server']['adminPass'], 'asdf')
-
- def test_rebuild_admin_pass_pass_disabled(self):
- # run with enable_instance_password disabled to verify adminPass
- # is missing from response. See lp bug 921814
- self.flags(enable_instance_password=False)
-
- return_server = fakes.fake_instance_get(image_ref='2',
- vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "adminPass": "asdf",
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
-
- self.assertEqual(body['server']['image']['id'], '2')
- self.assertNotIn('adminPass', body['server'])
-
- def test_rebuild_server_not_found(self):
- def server_not_found(self, instance_id,
- columns_to_join=None, use_slave=False):
- raise exception.InstanceNotFound(instance_id=instance_id)
- self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_with_bad_image(self):
- body = {
- "rebuild": {
- "imageRef": "foo",
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_accessIP(self):
- attributes = {
- 'access_ip_v4': '172.19.0.1',
- 'access_ip_v6': 'fe80::1',
- }
-
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- "accessIPv4": "172.19.0.1",
- "accessIPv6": "fe80::1",
- },
- }
-
- data = {'changes': {}}
- orig_get = compute_api.API.get
-
- def wrap_get(*args, **kwargs):
- data['instance'] = orig_get(*args, **kwargs)
- return data['instance']
-
- def fake_save(context, **kwargs):
- data['changes'].update(data['instance'].obj_get_changes())
-
- self.stubs.Set(compute_api.API, 'get', wrap_get)
- self.stubs.Set(objects.Instance, 'save', fake_save)
- req = fakes.HTTPRequest.blank(self.url)
-
- self.controller._action_rebuild(req, FAKE_UUID, body)
-
- self.assertEqual(self._image_href, data['changes']['image_ref'])
- self.assertEqual("", data['changes']['kernel_id'])
- self.assertEqual("", data['changes']['ramdisk_id'])
- self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
- self.assertEqual(0, data['changes']['progress'])
- for attr, value in attributes.items():
- self.assertEqual(value, str(data['changes'][attr]))
-
- def test_rebuild_when_kernel_not_exists(self):
-
- def return_image_meta(*args, **kwargs):
- image_meta_table = {
- '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
- '155d900f-4e14-4e4c-a73d-069cbf4541e6':
- {'id': 3, 'status': 'active', 'container_format': 'raw',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
- }
- image_id = args[2]
- try:
- image_meta = image_meta_table[str(image_id)]
- except KeyError:
- raise exception.ImageNotFound(image_id=image_id)
-
- return image_meta
-
- self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
- body = {
- "rebuild": {
- "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_rebuild_proper_kernel_ram(self):
- instance_meta = {'kernel_id': None, 'ramdisk_id': None}
-
- orig_get = compute_api.API.get
-
- def wrap_get(*args, **kwargs):
- inst = orig_get(*args, **kwargs)
- instance_meta['instance'] = inst
- return inst
-
- def fake_save(context, **kwargs):
- instance = instance_meta['instance']
- for key in instance_meta.keys():
- if key in instance.obj_what_changed():
- instance_meta[key] = instance[key]
-
- def return_image_meta(*args, **kwargs):
- image_meta_table = {
- '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
- '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
- '155d900f-4e14-4e4c-a73d-069cbf4541e6':
- {'id': 3, 'status': 'active', 'container_format': 'raw',
- 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
- }
- image_id = args[2]
- try:
- image_meta = image_meta_table[str(image_id)]
- except KeyError:
- raise exception.ImageNotFound(image_id=image_id)
-
- return image_meta
-
- self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
- self.stubs.Set(compute_api.API, 'get', wrap_get)
- self.stubs.Set(objects.Instance, 'save', fake_save)
- body = {
- "rebuild": {
- "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.controller._action_rebuild(req, FAKE_UUID, body).obj
- self.assertEqual(instance_meta['kernel_id'], '1')
- self.assertEqual(instance_meta['ramdisk_id'], '2')
-
- @mock.patch.object(compute_api.API, 'rebuild')
- def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
- body = {
- "rebuild": {
- "imageRef": self._image_href,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
- image='dummy')
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- req, FAKE_UUID, body)
-
- def test_resize_server(self):
-
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- self.resize_called = False
-
- def resize_mock(*args):
- self.resize_called = True
-
- self.stubs.Set(compute_api.API, 'resize', resize_mock)
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_resize(req, FAKE_UUID, body)
-
- self.assertEqual(self.resize_called, True)
-
- def test_resize_server_no_flavor(self):
- body = dict(resize=dict())
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_resize_server_no_flavor_ref(self):
- body = dict(resize=dict(flavorRef=None))
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_resize_with_server_not_found(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- self.stubs.Set(compute_api.API, 'get', return_server_not_found)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_resize_with_image_exceptions(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- self.resize_called = 0
- image_id = 'fake_image_id'
-
- exceptions = [
- (exception.ImageNotAuthorized(image_id=image_id),
- webob.exc.HTTPUnauthorized),
- (exception.ImageNotFound(image_id=image_id),
- webob.exc.HTTPBadRequest),
- (exception.Invalid, webob.exc.HTTPBadRequest),
- (exception.NoValidHost(reason='Bad host'),
- webob.exc.HTTPBadRequest),
- (exception.AutoDiskConfigDisabledByImage(image=image_id),
- webob.exc.HTTPBadRequest),
- ]
-
- raised, expected = map(iter, zip(*exceptions))
-
- def _fake_resize(obj, context, instance, flavor_id):
- self.resize_called += 1
- raise raised.next()
-
- self.stubs.Set(compute_api.API, 'resize', _fake_resize)
-
- for call_no in range(len(exceptions)):
- req = fakes.HTTPRequest.blank(self.url)
- next_exception = expected.next()
- actual = self.assertRaises(next_exception,
- self.controller._action_resize,
- req, FAKE_UUID, body)
- if (isinstance(exceptions[call_no][0],
- exception.NoValidHost)):
- self.assertEqual(actual.explanation,
- 'No valid host was found. Bad host')
- elif (isinstance(exceptions[call_no][0],
- exception.AutoDiskConfigDisabledByImage)):
- self.assertEqual(actual.explanation,
- 'Requested image fake_image_id has automatic'
- ' disk resize disabled.')
- self.assertEqual(self.resize_called, call_no + 1)
-
- @mock.patch('nova.compute.api.API.resize',
- side_effect=exception.CannotResizeDisk(reason=''))
- def test_resize_raises_cannot_resize_disk(self, mock_resize):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- @mock.patch('nova.compute.api.API.resize',
- side_effect=exception.FlavorNotFound(reason='',
- flavor_id='fake_id'))
- def test_resize_raises_flavor_not_found(self, mock_resize):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_resize_with_too_many_instances(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- def fake_resize(*args, **kwargs):
- raise exception.TooManyInstances(message="TooManyInstance")
-
- self.stubs.Set(compute_api.API, 'resize', fake_resize)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_resize_raises_conflict_on_invalid_state(self):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- def fake_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'resize', fake_resize)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- @mock.patch('nova.compute.api.API.resize',
- side_effect=exception.NoValidHost(reason=''))
- def test_resize_raises_no_valid_host(self, mock_resize):
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- @mock.patch.object(compute_api.API, 'resize')
- def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
- mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
- image='dummy')
-
- body = dict(resize=dict(flavorRef="http://localhost/3"))
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_resize,
- req, FAKE_UUID, body)
-
- def test_confirm_resize_server(self):
- body = dict(confirmResize=None)
-
- self.confirm_resize_called = False
-
- def cr_mock(*args):
- self.confirm_resize_called = True
-
- self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
-
- self.assertEqual(self.confirm_resize_called, True)
-
- def test_confirm_resize_migration_not_found(self):
- body = dict(confirmResize=None)
-
- def confirm_resize_mock(*args):
- raise exception.MigrationNotFoundByStatus(instance_id=1,
- status='finished')
-
- self.stubs.Set(compute_api.API,
- 'confirm_resize',
- confirm_resize_mock)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_confirm_resize,
- req, FAKE_UUID, body)
-
- def test_confirm_resize_raises_conflict_on_invalid_state(self):
- body = dict(confirmResize=None)
-
- def fake_confirm_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'confirm_resize',
- fake_confirm_resize)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_confirm_resize,
- req, FAKE_UUID, body)
-
- def test_revert_resize_migration_not_found(self):
- body = dict(revertResize=None)
-
- def revert_resize_mock(*args):
- raise exception.MigrationNotFoundByStatus(instance_id=1,
- status='finished')
-
- self.stubs.Set(compute_api.API,
- 'revert_resize',
- revert_resize_mock)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_revert_resize,
- req, FAKE_UUID, body)
-
- def test_revert_resize_server_not_found(self):
- body = dict(revertResize=None)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob. exc.HTTPNotFound,
- self.controller._action_revert_resize,
- req, "bad_server_id", body)
-
- def test_revert_resize_server(self):
- body = dict(revertResize=None)
-
- self.revert_resize_called = False
-
- def revert_mock(*args):
- self.revert_resize_called = True
-
- self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
-
- req = fakes.HTTPRequest.blank(self.url)
- body = self.controller._action_revert_resize(req, FAKE_UUID, body)
-
- self.assertEqual(self.revert_resize_called, True)
-
- def test_revert_resize_raises_conflict_on_invalid_state(self):
- body = dict(revertResize=None)
-
- def fake_revert_resize(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
-
- self.stubs.Set(compute_api.API, 'revert_resize',
- fake_revert_resize)
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_revert_resize,
- req, FAKE_UUID, body)
-
- def test_create_image(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- self.assertEqual('http://localhost/v2/fake/images/123', location)
-
- def test_create_image_glance_link_prefix(self):
- self.flags(osapi_glance_link_prefix='https://glancehost')
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- self.assertEqual('https://glancehost/v2/fake/images/123', location)
-
- def test_create_image_name_too_long(self):
- long_name = 'a' * 260
- body = {
- 'createImage': {
- 'name': long_name,
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image, req,
- FAKE_UUID, body)
-
- def _do_test_create_volume_backed_image(self, extra_properties):
-
- def _fake_id(x):
- return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
-
- body = dict(createImage=dict(name='snapshot_of_volume_backed'))
-
- if extra_properties:
- body['createImage']['metadata'] = extra_properties
-
- image_service = glance.get_default_image_service()
-
- bdm = [dict(volume_id=_fake_id('a'),
- volume_size=1,
- device_name='vda',
- delete_on_termination=False)]
- props = dict(kernel_id=_fake_id('b'),
- ramdisk_id=_fake_id('c'),
- root_device_name='/dev/vda',
- block_device_mapping=bdm)
- original_image = dict(properties=props,
- container_format='ami',
- status='active',
- is_public=True)
-
- image_service.create(None, original_image)
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': _fake_id('a'),
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'vda',
- 'snapshot_id': 1,
- 'boot_index': 0,
- 'delete_on_termination': False,
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- instance = fakes.fake_instance_get(image_ref=original_image['id'],
- vm_state=vm_states.ACTIVE,
- root_device_name='/dev/vda')
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
-
- volume = dict(id=_fake_id('a'),
- size=1,
- host='fake',
- display_description='fake')
- snapshot = dict(id=_fake_id('d'))
- self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
- volume_api = self.controller.compute_api.volume_api
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
-
- self.mox.ReplayAll()
-
- req = fakes.HTTPRequest.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- image_id = location.replace('http://localhost/v2/fake/images/', '')
- image = image_service.show(None, image_id)
-
- self.assertEqual(image['name'], 'snapshot_of_volume_backed')
- properties = image['properties']
- self.assertEqual(properties['kernel_id'], _fake_id('b'))
- self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
- self.assertEqual(properties['root_device_name'], '/dev/vda')
- self.assertEqual(properties['bdm_v2'], True)
- bdms = properties['block_device_mapping']
- self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['boot_index'], 0)
- self.assertEqual(bdms[0]['source_type'], 'snapshot')
- self.assertEqual(bdms[0]['destination_type'], 'volume')
- self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
- for fld in ('connection_info', 'id',
- 'instance_uuid', 'device_name'):
- self.assertNotIn(fld, bdms[0])
- for k in extra_properties.keys():
- self.assertEqual(properties[k], extra_properties[k])
-
- def test_create_volume_backed_image_no_metadata(self):
- self._do_test_create_volume_backed_image({})
-
- def test_create_volume_backed_image_with_metadata(self):
- self._do_test_create_volume_backed_image(dict(ImageType='Gold',
- ImageVersion='2.0'))
-
- def _test_create_volume_backed_image_with_metadata_from_volume(
- self, extra_metadata=None):
-
- def _fake_id(x):
- return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
-
- body = dict(createImage=dict(name='snapshot_of_volume_backed'))
- if extra_metadata:
- body['createImage']['metadata'] = extra_metadata
-
- image_service = glance.get_default_image_service()
-
- def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': _fake_id('a'),
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'device_name': 'vda',
- 'snapshot_id': 1,
- 'boot_index': 0,
- 'delete_on_termination': False,
- 'no_device': None})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
-
- instance = fakes.fake_instance_get(image_ref='',
- vm_state=vm_states.ACTIVE,
- root_device_name='/dev/vda')
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
-
- fake_metadata = {'test_key1': 'test_value1',
- 'test_key2': 'test_value2'}
- volume = dict(id=_fake_id('a'),
- size=1,
- host='fake',
- display_description='fake',
- volume_image_metadata=fake_metadata)
- snapshot = dict(id=_fake_id('d'))
- self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
- volume_api = self.controller.compute_api.volume_api
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
- volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
-
- req = fakes.HTTPRequest.blank(self.url)
-
- self.mox.ReplayAll()
- response = self.controller._action_create_image(req, FAKE_UUID, body)
- location = response.headers['Location']
- image_id = location.replace('http://localhost/v2/fake/images/', '')
- image = image_service.show(None, image_id)
-
- properties = image['properties']
- self.assertEqual(properties['test_key1'], 'test_value1')
- self.assertEqual(properties['test_key2'], 'test_value2')
- if extra_metadata:
- for key, val in extra_metadata.items():
- self.assertEqual(properties[key], val)
-
- def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
- self._test_create_volume_backed_image_with_metadata_from_volume()
-
- def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
- self._test_create_volume_backed_image_with_metadata_from_volume(
- extra_metadata={'a': 'b'})
-
- def test_create_image_snapshots_disabled(self):
- """Don't permit a snapshot if the allow_instance_snapshots flag is
- False
- """
- self.flags(allow_instance_snapshots=False)
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_with_metadata(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- 'metadata': {'key': 'asdf'},
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- response = self.controller._action_create_image(req, FAKE_UUID, body)
-
- location = response.headers['Location']
- self.assertEqual('http://localhost/v2/fake/images/123', location)
-
- def test_create_image_with_too_much_metadata(self):
- body = {
- 'createImage': {
- 'name': 'Snapshot 1',
- 'metadata': {},
- },
- }
- for num in range(CONF.quota_metadata_items + 1):
- body['createImage']['metadata']['foo%i' % num] = "bar"
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_no_name(self):
- body = {
- 'createImage': {},
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_blank_name(self):
- body = {
- 'createImage': {
- 'name': '',
- }
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_bad_metadata(self):
- body = {
- 'createImage': {
- 'name': 'geoff',
- 'metadata': 'henry',
- },
- }
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
- def test_create_image_raises_conflict_on_invalid_state(self):
- def snapshot(*args, **kwargs):
- raise exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
- self.stubs.Set(compute_api.API, 'snapshot', snapshot)
-
- body = {
- "createImage": {
- "name": "test_snapshot",
- },
- }
-
- req = fakes.HTTPRequest.blank(self.url)
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller._action_create_image,
- req, FAKE_UUID, body)
-
-
-class TestServerActionXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestServerActionXMLDeserializer, self).setUp()
- self.deserializer = servers.ActionDeserializer()
-
- def test_create_image(self):
- serial_request = """
-<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
- name="new-server-test"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "createImage": {
- "name": "new-server-test",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_create_image_with_metadata(self):
- serial_request = """
-<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
- name="new-server-test">
- <metadata>
- <meta key="key1">value1</meta>
- </metadata>
-</createImage>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "createImage": {
- "name": "new-server-test",
- "metadata": {"key1": "value1"},
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_change_pass(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <changePassword
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- adminPass="1234pass"/> """
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "changePassword": {
- "adminPass": "1234pass",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_change_pass_no_pass(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <changePassword
- xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
- self.assertRaises(AttributeError,
- self.deserializer.deserialize,
- serial_request,
- 'action')
-
- def test_change_pass_empty_pass(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <changePassword
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- adminPass=""/> """
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "changePassword": {
- "adminPass": "",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_reboot(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <reboot
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- type="HARD"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "reboot": {
- "type": "HARD",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_reboot_no_type(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <reboot
- xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- self.assertRaises(AttributeError,
- self.deserializer.deserialize,
- serial_request,
- 'action')
-
- def test_resize(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <resize
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- flavorRef="http://localhost/flavors/3"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "resize": {"flavorRef": "http://localhost/flavors/3"},
- }
- self.assertEqual(request['body'], expected)
-
- def test_resize_no_flavor_ref(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <resize
- xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- self.assertRaises(AttributeError,
- self.deserializer.deserialize,
- serial_request,
- 'action')
-
- def test_confirm_resize(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <confirmResize
- xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "confirmResize": None,
- }
- self.assertEqual(request['body'], expected)
-
- def test_revert_resize(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <revertResize
- xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "revertResize": None,
- }
- self.assertEqual(request['body'], expected)
-
- def test_rebuild(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <rebuild
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- name="new-server-test"
- imageRef="http://localhost/images/1">
- <metadata>
- <meta key="My Server Name">Apache1</meta>
- </metadata>
- <personality>
- <file path="/etc/banner.txt">Mg==</file>
- </personality>
- </rebuild>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "rebuild": {
- "name": "new-server-test",
- "imageRef": "http://localhost/images/1",
- "metadata": {
- "My Server Name": "Apache1",
- },
- "personality": [
- {"path": "/etc/banner.txt", "contents": "Mg=="},
- ],
- },
- }
- self.assertThat(request['body'], matchers.DictMatches(expected))
-
- def test_rebuild_minimum(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <rebuild
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- imageRef="http://localhost/images/1"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "rebuild": {
- "imageRef": "http://localhost/images/1",
- },
- }
- self.assertThat(request['body'], matchers.DictMatches(expected))
-
- def test_rebuild_no_imageRef(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <rebuild
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- name="new-server-test">
- <metadata>
- <meta key="My Server Name">Apache1</meta>
- </metadata>
- <personality>
- <file path="/etc/banner.txt">Mg==</file>
- </personality>
- </rebuild>"""
- self.assertRaises(AttributeError,
- self.deserializer.deserialize,
- serial_request,
- 'action')
-
- def test_rebuild_blank_name(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <rebuild
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- imageRef="http://localhost/images/1"
- name=""/>"""
- self.assertRaises(AttributeError,
- self.deserializer.deserialize,
- serial_request,
- 'action')
-
- def test_rebuild_preserve_ephemeral_passed(self):
- serial_request = """<?xml version="1.0" encoding="UTF-8"?>
- <rebuild
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- imageRef="http://localhost/images/1"
- preserve_ephemeral="true"/>"""
- request = self.deserializer.deserialize(serial_request, 'action')
- expected = {
- "rebuild": {
- "imageRef": "http://localhost/images/1",
- "preserve_ephemeral": True,
- },
- }
- self.assertThat(request['body'], matchers.DictMatches(expected))
-
- def test_corrupt_xml(self):
- """Should throw a 400 error on corrupt xml."""
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
deleted file mode 100644
index 330786142d..0000000000
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ /dev/null
@@ -1,771 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six
-import webob
-
-from nova.api.openstack.compute.plugins.v3 import server_metadata \
- as server_metadata_v21
-from nova.api.openstack.compute import server_metadata as server_metadata_v2
-from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import vm_states
-import nova.db
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-CONF = cfg.CONF
-
-
-def return_create_instance_metadata_max(context, server_id, metadata, delete):
- return stub_max_server_metadata()
-
-
-def return_create_instance_metadata(context, server_id, metadata, delete):
- return stub_server_metadata()
-
-
-def fake_instance_save(inst, **kwargs):
- inst.metadata = stub_server_metadata()
- inst.obj_reset_changes()
-
-
-def return_server_metadata(context, server_id):
- if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
- msg = 'id %s must be a uuid in return server metadata' % server_id
- raise Exception(msg)
- return stub_server_metadata()
-
-
-def return_empty_server_metadata(context, server_id):
- return {}
-
-
-def delete_server_metadata(context, server_id, key):
- pass
-
-
-def stub_server_metadata():
- metadata = {
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- }
- return metadata
-
-
-def stub_max_server_metadata():
- metadata = {"metadata": {}}
- for num in range(CONF.quota_metadata_items):
- metadata['metadata']['key%i' % num] = "blah"
- return metadata
-
-
-def return_server(context, server_id, columns_to_join=None):
- return fake_instance.fake_db_instance(
- **{'id': server_id,
- 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake',
- 'locked': False,
- 'launched_at': timeutils.utcnow(),
- 'vm_state': vm_states.ACTIVE})
-
-
-def return_server_by_uuid(context, server_uuid,
- columns_to_join=None, use_slave=False):
- return fake_instance.fake_db_instance(
- **{'id': 1,
- 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake',
- 'locked': False,
- 'launched_at': timeutils.utcnow(),
- 'metadata': stub_server_metadata(),
- 'vm_state': vm_states.ACTIVE})
-
-
-def return_server_nonexistent(context, server_id,
- columns_to_join=None, use_slave=False):
- raise exception.InstanceNotFound(instance_id=server_id)
-
-
-def fake_change_instance_metadata(self, context, instance, diff):
- pass
-
-
-class ServerMetaDataTestV21(test.TestCase):
- validation_ex = exception.ValidationError
- validation_ex_large = validation_ex
-
- def setUp(self):
- super(ServerMetaDataTestV21, self).setUp()
- fakes.stub_out_key_pair_funcs(self.stubs)
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
-
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
-
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
- self._set_up_resources()
-
- def _set_up_resources(self):
- self.controller = server_metadata_v21.ServerMetadataController()
- self.uuid = str(uuid.uuid4())
- self.url = '/fake/servers/%s/metadata' % self.uuid
-
- def _get_request(self, param_url=''):
- return fakes.HTTPRequestV3.blank(self.url + param_url)
-
- def test_index(self):
- req = self._get_request()
- res_dict = self.controller.index(req, self.uuid)
-
- expected = {
- 'metadata': {
- 'key1': 'value1',
- 'key2': 'value2',
- 'key3': 'value3',
- },
- }
- self.assertEqual(expected, res_dict)
-
- def test_index_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_nonexistent)
- req = self._get_request()
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.index, req, self.url)
-
- def test_index_no_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_empty_server_metadata)
- req = self._get_request()
- res_dict = self.controller.index(req, self.uuid)
- expected = {'metadata': {}}
- self.assertEqual(expected, res_dict)
-
- def test_show(self):
- req = self._get_request('/key2')
- res_dict = self.controller.show(req, self.uuid, 'key2')
- expected = {"meta": {'key2': 'value2'}}
- self.assertEqual(expected, res_dict)
-
- def test_show_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_nonexistent)
- req = self._get_request('/key2')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, self.uuid, 'key2')
-
- def test_show_meta_not_found(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_empty_server_metadata)
- req = self._get_request('/key6')
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, req, self.uuid, 'key6')
-
- def test_delete(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
- self.stubs.Set(nova.db, 'instance_metadata_delete',
- delete_server_metadata)
- req = self._get_request('/key2')
- req.method = 'DELETE'
- res = self.controller.delete(req, self.uuid, 'key2')
-
- self.assertIsNone(res)
-
- def test_delete_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- req = self._get_request('/key1')
- req.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, req, self.uuid, 'key1')
-
- def test_delete_meta_not_found(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_empty_server_metadata)
- req = self._get_request('/key6')
- req.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.delete, req, self.uuid, 'key6')
-
- def test_create(self):
- self.stubs.Set(objects.Instance, 'save', fake_instance_save)
- req = self._get_request()
- req.method = 'POST'
- req.content_type = "application/json"
- body = {"metadata": {"key9": "value9"}}
- req.body = jsonutils.dumps(body)
- res_dict = self.controller.create(req, self.uuid, body=body)
-
- body['metadata'].update({
- "key1": "value1",
- "key2": "value2",
- "key3": "value3",
- })
- self.assertEqual(body, res_dict)
-
- def test_create_empty_body(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'POST'
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=None)
-
- def test_create_item_empty_key(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"metadata": {"": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=body)
-
- def test_create_item_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"metadata": None}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=body)
-
- def test_create_item_key_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"metadata": {("a" * 260): "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex_large,
- self.controller.create,
- req, self.uuid, body=body)
-
- def test_create_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url + '/key1')
- req.method = 'PUT'
- body = {"meta": {}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=body)
-
- def test_create_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url + '/key1')
- req.method = 'PUT'
- body = {"metadata": ['asdf']}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=body)
-
- def test_create_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- req = self._get_request()
- req.method = 'POST'
- body = {"metadata": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.create, req, self.uuid, body=body)
-
- def test_update_metadata(self):
- self.stubs.Set(objects.Instance, 'save', fake_instance_save)
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- expected = {
- 'metadata': {
- 'key1': 'updatedvalue',
- 'key29': 'newkey',
- }
- }
- req.body = jsonutils.dumps(expected)
- response = self.controller.update_all(req, self.uuid, body=expected)
- self.assertEqual(expected, response)
-
- def test_update_all(self):
- self.stubs.Set(objects.Instance, 'save', fake_instance_save)
- req = self._get_request()
- req.method = 'PUT'
- req.content_type = "application/json"
- expected = {
- 'metadata': {
- 'key10': 'value10',
- 'key99': 'value99',
- },
- }
- req.body = jsonutils.dumps(expected)
- res_dict = self.controller.update_all(req, self.uuid, body=expected)
-
- self.assertEqual(expected, res_dict)
-
- def test_update_all_empty_container(self):
- self.stubs.Set(objects.Instance, 'save', fake_instance_save)
- req = self._get_request()
- req.method = 'PUT'
- req.content_type = "application/json"
- expected = {'metadata': {}}
- req.body = jsonutils.dumps(expected)
- res_dict = self.controller.update_all(req, self.uuid, body=expected)
-
- self.assertEqual(expected, res_dict)
-
- def test_update_all_empty_body_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url + '/key1')
- req.method = 'PUT'
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update_all, req, self.uuid,
- body=None)
-
- def test_update_all_with_non_dict_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url + '/bad')
- req.method = 'PUT'
- body = {"metadata": None}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update_all, req, self.uuid,
- body=body)
-
- def test_update_all_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'PUT'
- req.content_type = "application/json"
- expected = {'meta': {}}
- req.body = jsonutils.dumps(expected)
-
- self.assertRaises(self.validation_ex,
- self.controller.update_all, req, self.uuid,
- body=expected)
-
- def test_update_all_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'PUT'
- req.content_type = "application/json"
- expected = {'metadata': ['asdf']}
- req.body = jsonutils.dumps(expected)
-
- self.assertRaises(self.validation_ex,
- self.controller.update_all, req, self.uuid,
- body=expected)
-
- def test_update_all_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- req = self._get_request()
- req.method = 'PUT'
- req.content_type = "application/json"
- body = {'metadata': {'key10': 'value10'}}
- req.body = jsonutils.dumps(body)
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update_all, req, '100', body=body)
-
- def test_update_all_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'PUT'
- body = {"metadata": None}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex, self.controller.update_all,
- req, self.uuid, body=body)
-
- def test_update_item(self):
- self.stubs.Set(objects.Instance, 'save', fake_instance_save)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
- expected = {"meta": {'key1': 'value1'}}
- self.assertEqual(expected, res_dict)
-
- def test_update_item_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.update, req, self.uuid, 'key1',
- body=body)
-
- def test_update_item_empty_body(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'key1',
- body=None)
-
- def test_update_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url)
- req.method = 'PUT'
- expected = {'meta': {}}
- req.body = jsonutils.dumps(expected)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'key1',
- body=expected)
-
- def test_update_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url)
- req.method = 'PUT'
- expected = {'metadata': ['asdf']}
- req.body = jsonutils.dumps(expected)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'key1',
- body=expected)
-
- def test_update_item_empty_key(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {"": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, '',
- body=body)
-
- def test_update_item_key_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {("a" * 260): "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex_large,
- self.controller.update,
- req, self.uuid, ("a" * 260), body=body)
-
- def test_update_item_value_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": ("a" * 260)}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex_large,
- self.controller.update,
- req, self.uuid, "key1", body=body)
-
- def test_update_item_too_many_keys(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/key1')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1", "key2": "value2"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'key1',
- body=body)
-
- def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/bad')
- req.method = 'PUT'
- body = {"meta": {"key1": "value1"}}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, self.uuid, 'bad',
- body=body)
-
- def test_update_item_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request('/bad')
- req.method = 'PUT'
- body = {"meta": None}
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'bad',
- body=body)
-
- def test_update_empty_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = fakes.HTTPRequest.blank(self.url)
- req.method = 'PUT'
- expected = {'metadata': {}}
- req.body = jsonutils.dumps(expected)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(self.validation_ex,
- self.controller.update, req, self.uuid, 'bad',
- body=expected)
-
- def test_too_many_metadata_items_on_create(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- data = {"metadata": {}}
- for num in range(CONF.quota_metadata_items + 1):
- data['metadata']['key%i' % num] = "blah"
- req = self._get_request()
- req.method = 'POST'
- req.body = jsonutils.dumps(data)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, req, self.uuid, body=data)
-
- def test_invalid_metadata_items_on_create(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'POST'
- req.headers["content-type"] = "application/json"
-
- # test for long key
- data = {"metadata": {"a" * 260: "value1"}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex_large,
- self.controller.create, req, self.uuid, body=data)
-
- # test for long value
- data = {"metadata": {"key": "v" * 260}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex_large,
- self.controller.create, req, self.uuid, body=data)
-
- # test for empty key.
- data = {"metadata": {"": "value1"}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex,
- self.controller.create, req, self.uuid, body=data)
-
- def test_too_many_metadata_items_on_update_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- data = {"metadata": {}}
- for num in range(CONF.quota_metadata_items + 1):
- data['metadata']['key%i' % num] = "blah"
- req = self._get_request()
- req.method = 'PUT'
- req.body = jsonutils.dumps(data)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
- req, self.uuid, body=data)
-
- def test_invalid_metadata_items_on_update_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- data = {"metadata": {}}
- for num in range(CONF.quota_metadata_items + 1):
- data['metadata']['key%i' % num] = "blah"
- req = self._get_request()
- req.method = 'PUT'
- req.body = jsonutils.dumps(data)
- req.headers["content-type"] = "application/json"
-
- # test for long key
- data = {"metadata": {"a" * 260: "value1"}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex_large,
- self.controller.update_all, req, self.uuid,
- body=data)
-
- # test for long value
- data = {"metadata": {"key": "v" * 260}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex_large,
- self.controller.update_all, req, self.uuid,
- body=data)
-
- # test for empty key.
- data = {"metadata": {"": "value1"}}
- req.body = jsonutils.dumps(data)
- self.assertRaises(self.validation_ex,
- self.controller.update_all, req, self.uuid,
- body=data)
-
-
-class ServerMetaDataTestV2(ServerMetaDataTestV21):
- validation_ex = webob.exc.HTTPBadRequest
- validation_ex_large = webob.exc.HTTPRequestEntityTooLarge
-
- def _set_up_resources(self):
- self.controller = server_metadata_v2.Controller()
- self.uuid = str(uuid.uuid4())
- self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
-
- def _get_request(self, param_url=''):
- return fakes.HTTPRequest.blank(self.url + param_url)
-
-
-class BadStateServerMetaDataTestV21(test.TestCase):
-
- def setUp(self):
- super(BadStateServerMetaDataTestV21, self).setUp()
- fakes.stub_out_key_pair_funcs(self.stubs)
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
- self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- self._return_server_in_build_by_uuid)
- self.stubs.Set(nova.db, 'instance_metadata_delete',
- delete_server_metadata)
- self._set_up_resources()
-
- def _set_up_resources(self):
- self.controller = server_metadata_v21.ServerMetadataController()
- self.uuid = str(uuid.uuid4())
- self.url = '/fake/servers/%s/metadata' % self.uuid
-
- def _get_request(self, param_url=''):
- return fakes.HTTPRequestV3.blank(self.url + param_url)
-
- def test_invalid_state_on_delete(self):
- req = self._get_request('/key2')
- req.method = 'DELETE'
- self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
- req, self.uuid, 'key2')
-
- def test_invalid_state_on_update_metadata(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- expected = {
- 'metadata': {
- 'key1': 'updatedvalue',
- 'key29': 'newkey',
- }
- }
- req.body = jsonutils.dumps(expected)
- self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
- req, self.uuid, body=expected)
-
- def _return_server_in_build(self, context, server_id,
- columns_to_join=None):
- return fake_instance.fake_db_instance(
- **{'id': server_id,
- 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake',
- 'locked': False,
- 'vm_state': vm_states.BUILDING})
-
- def _return_server_in_build_by_uuid(self, context, server_uuid,
- columns_to_join=None, use_slave=False):
- return fake_instance.fake_db_instance(
- **{'id': 1,
- 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake',
- 'locked': False,
- 'vm_state': vm_states.BUILDING})
-
- @mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
- side_effect=exception.InstanceIsLocked(instance_uuid=0))
- def test_instance_lock_update_metadata(self, mock_update):
- req = self._get_request()
- req.method = 'POST'
- req.content_type = 'application/json'
- expected = {
- 'metadata': {
- 'keydummy': 'newkey',
- }
- }
- req.body = jsonutils.dumps(expected)
- self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
- req, self.uuid, body=expected)
-
-
-class BadStateServerMetaDataTestV2(BadStateServerMetaDataTestV21):
- def _set_up_resources(self):
- self.controller = server_metadata_v2.Controller()
- self.uuid = str(uuid.uuid4())
- self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
-
- def _get_request(self, param_url=''):
- return fakes.HTTPRequest.blank(self.url + param_url)
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
deleted file mode 100644
index f08821e101..0000000000
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ /dev/null
@@ -1,4624 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import contextlib
-import datetime
-import urllib
-import uuid
-
-import iso8601
-from lxml import etree
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six
-import six.moves.urllib.parse as urlparse
-import testtools
-import webob
-
-from nova.api.openstack import compute
-from nova.api.openstack.compute import ips
-from nova.api.openstack.compute import servers
-from nova.api.openstack.compute import views
-from nova.api.openstack import extensions
-from nova.api.openstack import xmlutil
-from nova.compute import api as compute_api
-from nova.compute import delete_types
-from nova.compute import flavors
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova.i18n import _
-from nova.image import glance
-from nova.network import manager
-from nova.network.neutronv2 import api as neutron_api
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests.image import fake
-from nova.tests import matchers
-from nova.tests.objects import test_keypair
-from nova.tests import utils
-from nova import utils as nova_utils
-
-CONF = cfg.CONF
-CONF.import_opt('password_length', 'nova.utils')
-
-FAKE_UUID = fakes.FAKE_UUID
-NS = "{http://docs.openstack.org/compute/api/v1.1}"
-ATOMNS = "{http://www.w3.org/2005/Atom}"
-XPATH_NS = {
- 'atom': 'http://www.w3.org/2005/Atom',
- 'ns': 'http://docs.openstack.org/compute/api/v1.1'
-}
-
-INSTANCE_IDS = {FAKE_UUID: 1}
-
-FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
-
-
-def fake_gen_uuid():
- return FAKE_UUID
-
-
-def return_servers_empty(context, *args, **kwargs):
- return []
-
-
-def return_security_group(context, instance_id, security_group_id):
- pass
-
-
-def instance_update_and_get_original(context, instance_uuid, values,
- update_cells=True,
- columns_to_join=None,
- ):
- inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
- name=values.get('display_name'))
- inst = dict(inst, **values)
- return (inst, inst)
-
-
-def instance_update(context, instance_uuid, values, update_cells=True):
- inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
- name=values.get('display_name'))
- inst = dict(inst, **values)
- return inst
-
-
-def fake_compute_api(cls, req, id):
- return True
-
-
-class MockSetAdminPassword(object):
- def __init__(self):
- self.instance_id = None
- self.password = None
-
- def __call__(self, context, instance_id, password):
- self.instance_id = instance_id
- self.password = password
-
-
-class Base64ValidationTest(test.TestCase):
- def setUp(self):
- super(Base64ValidationTest, self).setUp()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
-
- def test_decode_base64(self):
- value = "A random string"
- result = self.controller._decode_base64(base64.b64encode(value))
- self.assertEqual(result, value)
-
- def test_decode_base64_binary(self):
- value = "\x00\x12\x75\x99"
- result = self.controller._decode_base64(base64.b64encode(value))
- self.assertEqual(result, value)
-
- def test_decode_base64_whitespace(self):
- value = "A random string"
- encoded = base64.b64encode(value)
- white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
- result = self.controller._decode_base64(white)
- self.assertEqual(result, value)
-
- def test_decode_base64_invalid(self):
- invalid = "A random string"
- result = self.controller._decode_base64(invalid)
- self.assertIsNone(result)
-
- def test_decode_base64_illegal_bytes(self):
- value = "A random string"
- encoded = base64.b64encode(value)
- white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
- result = self.controller._decode_base64(white)
- self.assertIsNone(result)
-
-
-class NeutronV2Subclass(neutron_api.API):
- """Used to ensure that API handles subclasses properly."""
- pass
-
-
-class ControllerTest(test.TestCase):
-
- def setUp(self):
- super(ControllerTest, self).setUp()
- self.flags(verbose=True, use_ipv6=False)
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- return_server = fakes.fake_instance_get()
- return_servers = fakes.fake_instance_get_all_by_filters()
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers)
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_server)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
- self.ips_controller = ips.Controller()
- policy.reset()
- policy.init()
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
-
-
-class ServersControllerTest(ControllerTest):
- def test_can_check_loaded_extensions(self):
- self.ext_mgr.extensions = {'os-fake': None}
- self.assertTrue(self.controller.ext_mgr.is_loaded('os-fake'))
- self.assertFalse(self.controller.ext_mgr.is_loaded('os-not-loaded'))
-
- def test_requested_networks_prefix(self):
- uuid = 'br-00000000-0000-0000-0000-000000000000'
- requested_networks = [{'uuid': uuid}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertIn((uuid, None), res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_port(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_network(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- requested_networks = [{'uuid': network}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(network, None, None, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_disabled_with_port(self):
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- self.assertRaises(
- webob.exc.HTTPBadRequest,
- self.controller._get_requested_networks,
- requested_networks)
-
- def test_requested_networks_api_enabled_with_v2_subclass(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_requested_networks_neutronv2_subclass_with_port(self):
- cls = 'nova.tests.api.openstack.compute.test_servers.NeutronV2Subclass'
- self.flags(network_api_class=cls)
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port}]
- res = self.controller._get_requested_networks(requested_networks)
- self.assertEqual([(None, None, port, None)], res.as_tuples())
-
- def test_get_server_by_uuid(self):
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
-
- def test_unique_host_id(self):
- """Create two servers with the same host and different
- project_ids and check that the hostId's are unique.
- """
- def return_instance_with_host(self, *args, **kwargs):
- project_id = str(uuid.uuid4())
- return fakes.stub_instance(id=1, uuid=FAKE_UUID,
- project_id=project_id,
- host='fake_host')
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_instance_with_host)
- self.stubs.Set(db, 'instance_get',
- return_instance_with_host)
-
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- server1 = self.controller.show(req, FAKE_UUID)
- server2 = self.controller.show(req, FAKE_UUID)
-
- self.assertNotEqual(server1['server']['hostId'],
- server2['server']['hostId'])
-
- def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
- status="ACTIVE", progress=100):
- return {
- "server": {
- "id": uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": progress,
- "name": "server1",
- "status": status,
- "accessIPv4": "",
- "accessIPv6": "",
- "hostId": '',
- "image": {
- "id": "10",
- "links": [
- {
- "rel": "bookmark",
- "href": image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100'},
- {'version': 6, 'addr': '2001:db8:0:1::1'}
- ]
- },
- "metadata": {
- "seq": "1",
- },
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/servers/%s" % uuid,
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/servers/%s" % uuid,
- },
- ],
- }
- }
-
- def test_get_server_by_id(self):
- self.flags(use_ipv6=True)
- image_bookmark = "http://localhost/fake/images/10"
- flavor_bookmark = "http://localhost/fake/flavors/1"
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
-
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark,
- status="BUILD",
- progress=0)
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_with_active_status_by_id(self):
- image_bookmark = "http://localhost/fake/images/10"
- flavor_bookmark = "http://localhost/fake/flavors/1"
-
- new_return_server = fakes.fake_instance_get(
- vm_state=vm_states.ACTIVE, progress=100)
- self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark)
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_with_id_image_ref_by_id(self):
- image_ref = "10"
- image_bookmark = "http://localhost/fake/images/10"
- flavor_id = "1"
- flavor_bookmark = "http://localhost/fake/flavors/1"
-
- new_return_server = fakes.fake_instance_get(
- vm_state=vm_states.ACTIVE, image_ref=image_ref,
- flavor_id=flavor_id, progress=100)
- self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
-
- uuid = FAKE_UUID
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
- res_dict = self.controller.show(req, uuid)
- expected_server = self._get_server_data_dict(uuid,
- image_bookmark,
- flavor_bookmark)
- self.assertThat(res_dict, matchers.DictMatches(expected_server))
-
- def test_get_server_addresses_from_cache(self):
- pub0 = ('172.19.0.1', '172.19.0.2',)
- pub1 = ('1.2.3.4',)
- pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
- priv0 = ('192.168.0.3', '192.168.0.4',)
-
- def _ip(ip):
- return {'address': ip, 'type': 'fixed'}
-
- nw_cache = [
- {'address': 'aa:aa:aa:aa:aa:aa',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'public',
- 'subnets': [{'cidr': '172.19.0.0/24',
- 'ips': [_ip(ip) for ip in pub0]},
- {'cidr': '1.2.3.0/16',
- 'ips': [_ip(ip) for ip in pub1]},
- {'cidr': 'b33f::/64',
- 'ips': [_ip(ip) for ip in pub2]}]}},
- {'address': 'bb:bb:bb:bb:bb:bb',
- 'id': 2,
- 'network': {'bridge': 'br1',
- 'id': 2,
- 'label': 'private',
- 'subnets': [{'cidr': '192.168.0.0/24',
- 'ips': [_ip(ip) for ip in priv0]}]}}]
-
- return_server = fakes.fake_instance_get(nw_cache=nw_cache)
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % FAKE_UUID)
- res_dict = self.ips_controller.index(req, FAKE_UUID)
-
- expected = {
- 'addresses': {
- 'private': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 4, 'addr': '192.168.0.4'},
- ],
- 'public': [
- {'version': 4, 'addr': '172.19.0.1'},
- {'version': 4, 'addr': '172.19.0.2'},
- {'version': 4, 'addr': '1.2.3.4'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
- },
- }
- self.assertThat(res_dict, matchers.DictMatches(expected))
-
- def test_get_server_addresses_nonexistent_network(self):
- url = '/fake/servers/%s/ips/network_0' % FAKE_UUID
- req = fakes.HTTPRequest.blank(url)
- self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
- req, FAKE_UUID, 'network_0')
-
- def test_get_server_addresses_nonexistent_server(self):
- def fake_instance_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
-
- server_id = str(uuid.uuid4())
- req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % server_id)
- self.assertRaises(webob.exc.HTTPNotFound,
- self.ips_controller.index, req, server_id)
-
- def test_get_server_list_empty(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_empty)
-
- req = fakes.HTTPRequest.blank('/fake/servers')
- res_dict = self.controller.index(req)
-
- num_servers = len(res_dict['servers'])
- self.assertEqual(0, num_servers)
-
- def test_get_server_list_with_reservation_id(self):
- req = fakes.HTTPRequest.blank('/fake/servers?reservation_id=foo')
- res_dict = self.controller.index(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list_with_reservation_id_empty(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail?'
- 'reservation_id=foo')
- res_dict = self.controller.detail(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list_with_reservation_id_details(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail?'
- 'reservation_id=foo')
- res_dict = self.controller.detail(req)
-
- i = 0
- for s in res_dict['servers']:
- self.assertEqual(s.get('name'), 'server%d' % (i + 1))
- i += 1
-
- def test_get_server_list(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
- res_dict = self.controller.index(req)
-
- self.assertEqual(len(res_dict['servers']), 5)
- for i, s in enumerate(res_dict['servers']):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['name'], 'server%d' % (i + 1))
- self.assertIsNone(s.get('image', None))
-
- expected_links = [
- {
- "rel": "self",
- "href": "http://localhost/v2/fake/servers/%s" % s['id'],
- },
- {
- "rel": "bookmark",
- "href": "http://localhost/fake/servers/%s" % s['id'],
- },
- ]
-
- self.assertEqual(s['links'], expected_links)
-
- def test_get_servers_with_limit(self):
- req = fakes.HTTPRequest.blank('/fake/servers?limit=3')
- res_dict = self.controller.index(req)
-
- servers = res_dict['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res_dict['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v2/fake/servers', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected_params = {'limit': ['3'],
- 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected_params))
-
- def test_get_servers_with_limit_bad_value(self):
- req = fakes.HTTPRequest.blank('/fake/servers?limit=aaa')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_server_details_empty(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_empty)
-
- req = fakes.HTTPRequest.blank('/fake/servers/detail')
- res_dict = self.controller.detail(req)
-
- num_servers = len(res_dict['servers'])
- self.assertEqual(0, num_servers)
-
- def test_get_server_details_with_limit(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=3')
- res = self.controller.detail(req)
-
- servers = res['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v2/fake/servers/detail', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected))
-
- def test_get_server_details_with_limit_bad_value(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=aaa')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.detail, req)
-
- def test_get_server_details_with_limit_and_other_params(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail'
- '?limit=3&blah=2:t')
- res = self.controller.detail(req)
-
- servers = res['servers']
- self.assertEqual([s['id'] for s in servers],
- [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
-
- servers_links = res['servers_links']
- self.assertEqual(servers_links[0]['rel'], 'next')
-
- href_parts = urlparse.urlparse(servers_links[0]['href'])
- self.assertEqual('/v2/fake/servers/detail', href_parts.path)
- params = urlparse.parse_qs(href_parts.query)
- expected = {'limit': ['3'], 'blah': ['2:t'],
- 'marker': [fakes.get_fake_uuid(2)]}
- self.assertThat(params, matchers.DictMatches(expected))
-
- def test_get_servers_with_too_big_limit(self):
- req = fakes.HTTPRequest.blank('/fake/servers?limit=30')
- res_dict = self.controller.index(req)
- self.assertNotIn('servers_links', res_dict)
-
- def test_get_servers_with_bad_limit(self):
- req = fakes.HTTPRequest.blank('/fake/servers?limit=asdf')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_servers_with_marker(self):
- url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2)
- req = fakes.HTTPRequest.blank(url)
- servers = self.controller.index(req)['servers']
- self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
-
- def test_get_servers_with_limit_and_marker(self):
- url = '/v2/fake/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
- req = fakes.HTTPRequest.blank(url)
- servers = self.controller.index(req)['servers']
- self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
-
- def test_get_servers_with_bad_marker(self):
- req = fakes.HTTPRequest.blank('/fake/servers?limit=2&marker=asdf')
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_get_servers_with_bad_option(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?unknownoption=whee')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_image(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('image', search_opts)
- self.assertEqual(search_opts['image'], '12345')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?image=12345')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_tenant_id_filter_converts_to_project_id_for_admin(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertIsNotNone(filters)
- self.assertEqual(filters['project_id'], 'newfake')
- self.assertFalse(filters.get('tenant_id'))
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers'
- '?all_tenants=1&tenant_id=newfake',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_normal(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_one(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_zero(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=0',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_false(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=false',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_param_invalid(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None):
- self.assertNotIn('all_tenants', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=xxx',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.index, req)
-
- def test_admin_restricted_tenant(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertIsNotNone(filters)
- self.assertEqual(filters['project_id'], 'fake')
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers',
- use_admin_context=True)
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_pass_policy(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
- self.assertIsNotNone(filters)
- self.assertNotIn('project_id', filters)
- return [fakes.stub_instance(100)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- rules = {
- "compute:get_all_tenants":
- common_policy.parse_rule("project_id:fake"),
- "compute:get_all":
- common_policy.parse_rule("project_id:fake"),
- }
-
- policy.set_rules(rules)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
- res = self.controller.index(req)
-
- self.assertIn('servers', res)
-
- def test_all_tenants_fail_policy(self):
- def fake_get_all(context, filters=None, sort_key=None,
- sort_dir='desc', limit=None, marker=None,
- columns_to_join=None):
- self.assertIsNotNone(filters)
- return [fakes.stub_instance(100)]
-
- rules = {
- "compute:get_all_tenants":
- common_policy.parse_rule("project_id:non_fake"),
- "compute:get_all":
- common_policy.parse_rule("project_id:fake"),
- }
-
- policy.set_rules(rules)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, req)
-
- def test_get_servers_allows_flavor(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('flavor', search_opts)
- # flavor is an integer ID
- self.assertEqual(search_opts['flavor'], '12345')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?flavor=12345')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_with_bad_flavor(self):
- req = fakes.HTTPRequest.blank('/fake/servers?flavor=abcde')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 0)
-
- def test_get_server_details_with_bad_flavor(self):
- req = fakes.HTTPRequest.blank('/fake/servers/detail?flavor=abcde')
- servers = self.controller.detail(req)['servers']
-
- self.assertThat(servers, testtools.matchers.HasLength(0))
-
- def test_get_servers_allows_status(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?status=active')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- @mock.patch.object(compute_api.API, 'get_all')
- def test_get_servers_allows_multi_status(self, get_all_mock):
- server_uuid0 = str(uuid.uuid4())
- server_uuid1 = str(uuid.uuid4())
- db_list = [fakes.stub_instance(100, uuid=server_uuid0),
- fakes.stub_instance(101, uuid=server_uuid1)]
- get_all_mock.return_value = instance_obj._make_instance_list(
- context, instance_obj.InstanceList(), db_list, FIELDS)
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers?status=active&status=error')
- servers = self.controller.index(req)['servers']
- self.assertEqual(2, len(servers))
- self.assertEqual(server_uuid0, servers[0]['id'])
- self.assertEqual(server_uuid1, servers[1]['id'])
- expected_search_opts = dict(deleted=False,
- vm_state=[vm_states.ACTIVE,
- vm_states.ERROR],
- project_id='fake')
- get_all_mock.assert_called_once_with(mock.ANY,
- search_opts=expected_search_opts, limit=mock.ANY,
- marker=mock.ANY, want_objects=mock.ANY)
-
- @mock.patch.object(compute_api.API, 'get_all')
- def test_get_servers_system_metadata_filter(self, get_all_mock):
- server_uuid0 = str(uuid.uuid4())
- server_uuid1 = str(uuid.uuid4())
- expected_system_metadata = u'{"some_value": "some_key"}'
- db_list = [fakes.stub_instance(100, uuid=server_uuid0),
- fakes.stub_instance(101, uuid=server_uuid1)]
- get_all_mock.return_value = instance_obj._make_instance_list(
- context, instance_obj.InstanceList(), db_list, FIELDS)
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers?status=active&status=error&system_metadata=' +
- urllib.quote(expected_system_metadata),
- use_admin_context=True)
- servers = self.controller.index(req)['servers']
- self.assertEqual(2, len(servers))
- self.assertEqual(server_uuid0, servers[0]['id'])
- self.assertEqual(server_uuid1, servers[1]['id'])
- expected_search_opts = dict(
- deleted=False, vm_state=[vm_states.ACTIVE, vm_states.ERROR],
- system_metadata=expected_system_metadata, project_id='fake')
- get_all_mock.assert_called_once_with(mock.ANY,
- search_opts=expected_search_opts, limit=mock.ANY,
- marker=mock.ANY, want_objects=mock.ANY)
-
- @mock.patch.object(compute_api.API, 'get_all')
- def test_get_servers_flavor_not_found(self, get_all_mock):
- get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers?status=active&flavor=abc')
- servers = self.controller.index(req)['servers']
- self.assertEqual(0, len(servers))
-
- @mock.patch.object(compute_api.API, 'get_all')
- def test_get_servers_allows_invalid_status(self, get_all_mock):
- server_uuid0 = str(uuid.uuid4())
- server_uuid1 = str(uuid.uuid4())
- db_list = [fakes.stub_instance(100, uuid=server_uuid0),
- fakes.stub_instance(101, uuid=server_uuid1)]
- get_all_mock.return_value = instance_obj._make_instance_list(
- context, instance_obj.InstanceList(), db_list, FIELDS)
-
- req = fakes.HTTPRequest.blank(
- '/fake/servers?status=active&status=invalid')
- servers = self.controller.index(req)['servers']
- self.assertEqual(2, len(servers))
- self.assertEqual(server_uuid0, servers[0]['id'])
- self.assertEqual(server_uuid1, servers[1]['id'])
- expected_search_opts = dict(deleted=False,
- vm_state=[vm_states.ACTIVE],
- project_id='fake')
- get_all_mock.assert_called_once_with(mock.ANY,
- search_opts=expected_search_opts, limit=mock.ANY,
- marker=mock.ANY, want_objects=mock.ANY)
-
- def test_get_servers_allows_task_status(self):
- server_uuid = str(uuid.uuid4())
- task_state = task_states.REBOOTING
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('task_state', search_opts)
- self.assertEqual([task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED,
- task_states.REBOOTING],
- search_opts['task_state'])
- db_list = [fakes.stub_instance(100, uuid=server_uuid,
- task_state=task_state)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/servers?status=reboot')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_resize_status(self):
- # Test when resize status, it maps list of vm states.
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'],
- [vm_states.ACTIVE, vm_states.STOPPED])
-
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?status=resize')
-
- servers = self.controller.detail(req)['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_invalid_status(self):
- # Test getting servers by invalid status.
- req = fakes.HTTPRequest.blank('/fake/servers?status=baloney',
- use_admin_context=False)
- servers = self.controller.index(req)['servers']
- self.assertEqual(len(servers), 0)
-
- def test_get_servers_deleted_status_as_user(self):
- req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
- use_admin_context=False)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.detail, req)
-
- def test_get_servers_deleted_status_as_admin(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIn('vm_state', search_opts)
- self.assertEqual(search_opts['vm_state'], ['deleted'])
-
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
- use_admin_context=True)
-
- servers = self.controller.detail(req)['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_name(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('name', search_opts)
- self.assertEqual(search_opts['name'], 'whee.*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?name=whee.*')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_changes_since(self):
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('changes-since', search_opts)
- changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
- tzinfo=iso8601.iso8601.UTC)
- self.assertEqual(search_opts['changes-since'], changes_since)
- self.assertNotIn('deleted', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- params = 'changes-since=2011-01-24T17:08:01Z'
- req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_changes_since_bad_value(self):
- params = 'changes-since=asdf'
- req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
-
- def test_get_servers_admin_filters_as_user(self):
- """Test getting servers by admin-only or unknown options when
- context is not admin. Make sure the admin and unknown options
- are stripped before they get to compute_api.get_all()
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- # Allowed by user
- self.assertIn('name', search_opts)
- self.assertIn('ip', search_opts)
- # OSAPI converts status to vm_state
- self.assertIn('vm_state', search_opts)
- # Allowed only by admins with admin API on
- self.assertNotIn('unknown_option', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
- req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str)
- res = self.controller.index(req)
-
- servers = res['servers']
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_admin_options_as_admin(self):
- """Test getting servers by admin-only or unknown options when
- context is admin. All options should be passed
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- # Allowed by user
- self.assertIn('name', search_opts)
- # OSAPI converts status to vm_state
- self.assertIn('vm_state', search_opts)
- # Allowed only by admins with admin API on
- self.assertIn('ip', search_opts)
- self.assertIn('unknown_option', search_opts)
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
- req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str,
- use_admin_context=True)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_allows_ip(self):
- """Test getting servers by ip."""
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('ip', search_opts)
- self.assertEqual(search_opts['ip'], '10\..*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?ip=10\..*')
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_servers_admin_allows_ip6(self):
- """Test getting servers by ip6 with admin_api enabled and
- admin context
- """
- server_uuid = str(uuid.uuid4())
-
- def fake_get_all(compute_self, context, search_opts=None,
- sort_key=None, sort_dir='desc',
- limit=None, marker=None, want_objects=False):
- self.assertIsNotNone(search_opts)
- self.assertIn('ip6', search_opts)
- self.assertEqual(search_opts['ip6'], 'ffff.*')
- db_list = [fakes.stub_instance(100, uuid=server_uuid)]
- return instance_obj._make_instance_list(
- context, objects.InstanceList(), db_list, FIELDS)
-
- self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
-
- req = fakes.HTTPRequest.blank('/fake/servers?ip6=ffff.*',
- use_admin_context=True)
- servers = self.controller.index(req)['servers']
-
- self.assertEqual(len(servers), 1)
- self.assertEqual(servers[0]['id'], server_uuid)
-
- def test_get_all_server_details(self):
- expected_flavor = {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/fake/flavors/1',
- },
- ],
- }
- expected_image = {
- "id": "10",
- "links": [
- {
- "rel": "bookmark",
- "href": 'http://localhost/fake/images/10',
- },
- ],
- }
- req = fakes.HTTPRequest.blank('/fake/servers/detail')
- res_dict = self.controller.detail(req)
-
- for i, s in enumerate(res_dict['servers']):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['hostId'], '')
- self.assertEqual(s['name'], 'server%d' % (i + 1))
- self.assertEqual(s['image'], expected_image)
- self.assertEqual(s['flavor'], expected_flavor)
- self.assertEqual(s['status'], 'BUILD')
- self.assertEqual(s['metadata']['seq'], str(i + 1))
-
- def test_get_all_server_details_with_host(self):
- """We want to make sure that if two instances are on the same host,
- then they return the same hostId. If two instances are on different
- hosts, they should return different hostId's. In this test, there
- are 5 instances - 2 on one host and 3 on another.
- """
-
- def return_servers_with_host(context, *args, **kwargs):
- return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
- uuid=fakes.get_fake_uuid(i))
- for i in xrange(5)]
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- return_servers_with_host)
-
- req = fakes.HTTPRequest.blank('/fake/servers/detail')
- res_dict = self.controller.detail(req)
-
- server_list = res_dict['servers']
- host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
- self.assertTrue(host_ids[0] and host_ids[1])
- self.assertNotEqual(host_ids[0], host_ids[1])
-
- for i, s in enumerate(server_list):
- self.assertEqual(s['id'], fakes.get_fake_uuid(i))
- self.assertEqual(s['hostId'], host_ids[i % 2])
- self.assertEqual(s['name'], 'server%d' % (i + 1))
-
-
-class ServersControllerUpdateTest(ControllerTest):
-
- def _get_request(self, body=None, content_type='json', options=None):
- if options:
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get(**options))
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
- req.content_type = 'application/%s' % content_type
- req.body = jsonutils.dumps(body)
- return req
-
- def test_update_server_all_attributes(self):
- body = {'server': {
- 'name': 'server_test',
- 'accessIPv4': '0.0.0.0',
- 'accessIPv6': 'beef::0123',
- }}
- req = self._get_request(body, {'name': 'server_test',
- 'access_ipv4': '0.0.0.0',
- 'access_ipv6': 'beef::0123'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
- self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
- self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
-
- def test_update_server_invalid_xml_raises_lookup(self):
- body = """<?xml version="1.0" encoding="TF-8"?>
- <metadata
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- key="Label"></meta>"""
- req = self._get_request(body, content_type='xml')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 400)
-
- def test_update_server_invalid_xml_raises_expat(self):
- body = """<?xml version="1.0" encoding="UTF-8"?>
- <metadata
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- key="Label"></meta>"""
- req = self._get_request(body, content_type='xml')
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 400)
-
- def test_update_server_name(self):
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body, {'name': 'server_test'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
-
- def test_update_server_name_too_long(self):
- body = {'server': {'name': 'x' * 256}}
- req = self._get_request(body, {'name': 'server_test'})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_name_all_blank_spaces(self):
- body = {'server': {'name': ' ' * 64}}
- req = self._get_request(body, {'name': 'server_test'})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_access_ipv4(self):
- body = {'server': {'accessIPv4': '0.0.0.0'}}
- req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
-
- def test_update_server_access_ipv4_bad_format(self):
- body = {'server': {'accessIPv4': 'bad_format'}}
- req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_access_ipv4_none(self):
- body = {'server': {'accessIPv4': None}}
- req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv4'], '')
-
- def test_update_server_access_ipv4_blank(self):
- body = {'server': {'accessIPv4': ''}}
- req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv4'], '')
-
- def test_update_server_access_ipv6(self):
- body = {'server': {'accessIPv6': 'beef::0123'}}
- req = self._get_request(body, {'access_ipv6': 'beef::0123'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
-
- def test_update_server_access_ipv6_bad_format(self):
- body = {'server': {'accessIPv6': 'bad_format'}}
- req = self._get_request(body, {'access_ipv6': 'beef::0123'})
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_access_ipv6_none(self):
- body = {'server': {'accessIPv6': None}}
- req = self._get_request(body, {'access_ipv6': 'beef::0123'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv6'], '')
-
- def test_update_server_access_ipv6_blank(self):
- body = {'server': {'accessIPv6': ''}}
- req = self._get_request(body, {'access_ipv6': 'beef::0123'})
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['accessIPv6'], '')
-
- def test_update_server_personality(self):
- body = {
- 'server': {
- 'personality': []
- }
- }
- req = self._get_request(body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.update, req, FAKE_UUID, body)
-
- def test_update_server_adminPass_ignored(self):
- inst_dict = dict(name='server_test', adminPass='bacon')
- body = dict(server=inst_dict)
-
- def server_update(context, id, params):
- filtered_dict = {
- 'display_name': 'server_test',
- }
- self.assertEqual(params, filtered_dict)
- filtered_dict['uuid'] = id
- return filtered_dict
-
- self.stubs.Set(db, 'instance_update', server_update)
- # FIXME (comstud)
- # self.stubs.Set(db, 'instance_get',
- # return_server_with_attributes(name='server_test'))
-
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
- req.content_type = "application/json"
- req.body = jsonutils.dumps(body)
- res_dict = self.controller.update(req, FAKE_UUID, body)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server_test')
-
- def test_update_server_not_found(self):
- def fake_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute_api.API, 'get', fake_get)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_not_found_on_update(self):
- def fake_update(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body)
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, FAKE_UUID, body)
-
- def test_update_server_policy_fail(self):
- rule = {'compute:update': common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- body = {'server': {'name': 'server_test'}}
- req = self._get_request(body, {'name': 'server_test'})
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.update, req, FAKE_UUID, body)
-
-
-class ServersControllerDeleteTest(ControllerTest):
-
- def setUp(self):
- super(ServersControllerDeleteTest, self).setUp()
- self.server_delete_called = False
-
- def instance_destroy_mock(*args, **kwargs):
- self.server_delete_called = True
- deleted_at = timeutils.utcnow()
- return fake_instance.fake_db_instance(deleted_at=deleted_at)
-
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
-
- def _create_delete_request(self, uuid):
- fakes.stub_out_instance_quota(self.stubs, 0, 10)
- req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
- req.method = 'DELETE'
- return req
-
- def _delete_server_instance(self, uuid=FAKE_UUID):
- req = self._create_delete_request(uuid)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.controller.delete(req, uuid)
-
- def test_delete_server_instance(self):
- self._delete_server_instance()
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_not_found(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self._delete_server_instance,
- uuid='non-existent-uuid')
-
- def test_delete_locked_server(self):
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
- fakes.fake_actions_to_locked_server)
- self.stubs.Set(compute_api.API, delete_types.DELETE,
- fakes.fake_actions_to_locked_server)
-
- self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
- req, FAKE_UUID)
-
- def test_delete_server_instance_while_building(self):
- fakes.stub_out_instance_quota(self.stubs, 0, 10)
- request = self._create_delete_request(FAKE_UUID)
- self.controller.delete(request, FAKE_UUID)
-
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_while_deleting_host_up(self):
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.DELETING,
- host='fake_host'))
- self.stubs.Set(objects.Instance, 'save',
- lambda *args, **kwargs: None)
-
- @classmethod
- def fake_get_by_compute_host(cls, context, host):
- return {'updated_at': timeutils.utcnow()}
- self.stubs.Set(objects.Service, 'get_by_compute_host',
- fake_get_by_compute_host)
-
- self.controller.delete(req, FAKE_UUID)
- # Delete request can be ignored, because it's been accepted and
- # forwarded to the compute service already.
- self.assertFalse(self.server_delete_called)
-
- def test_delete_server_instance_while_deleting_host_down(self):
- fake_network.stub_out_network_cleanup(self.stubs)
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.DELETING,
- host='fake_host'))
- self.stubs.Set(objects.Instance, 'save',
- lambda *args, **kwargs: None)
-
- @classmethod
- def fake_get_by_compute_host(cls, context, host):
- return {'updated_at': datetime.datetime.min}
- self.stubs.Set(objects.Service, 'get_by_compute_host',
- fake_get_by_compute_host)
-
- self.controller.delete(req, FAKE_UUID)
- # Delete request would be ignored, because it's been accepted before
- # but since the host is down, api should remove the instance anyway.
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_while_resize(self):
- req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
- task_state=task_states.RESIZE_PREP))
-
- self.controller.delete(req, FAKE_UUID)
- # Delete shoud be allowed in any case, even during resizing,
- # because it may get stuck.
- self.assertTrue(self.server_delete_called)
-
- def test_delete_server_instance_if_not_launched(self):
- self.flags(reclaim_instance_interval=3600)
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- req.method = 'DELETE'
-
- self.server_delete_called = False
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(launched_at=None))
-
- def instance_destroy_mock(*args, **kwargs):
- self.server_delete_called = True
- deleted_at = timeutils.utcnow()
- return fake_instance.fake_db_instance(deleted_at=deleted_at)
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
-
- self.controller.delete(req, FAKE_UUID)
- # delete() should be called for instance which has never been active,
- # even if reclaim_instance_interval has been set.
- self.assertEqual(self.server_delete_called, True)
-
-
-class ServersControllerRebuildInstanceTest(ControllerTest):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
-
- def setUp(self):
- super(ServersControllerRebuildInstanceTest, self).setUp()
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
- self.body = {
- 'rebuild': {
- 'name': 'new_name',
- 'imageRef': self.image_href,
- 'metadata': {
- 'open': 'stack',
- },
- 'personality': [
- {
- "path": "/etc/banner.txt",
- "contents": "MQ==",
- },
- ],
- },
- }
- self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def test_rebuild_instance_with_access_ipv4_bad_format(self):
- # proper local hrefs must start with 'http://localhost/v2/'
- self.body['rebuild']['accessIPv4'] = 'bad_format'
- self.body['rebuild']['accessIPv6'] = 'fead::1234'
- self.body['rebuild']['metadata']['hello'] = 'world'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_with_blank_metadata_key(self):
- self.body['rebuild']['accessIPv4'] = '0.0.0.0'
- self.body['rebuild']['accessIPv6'] = 'fead::1234'
- self.body['rebuild']['metadata'][''] = 'world'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_with_metadata_key_too_long(self):
- self.body['rebuild']['accessIPv4'] = '0.0.0.0'
- self.body['rebuild']['accessIPv6'] = 'fead::1234'
- self.body['rebuild']['metadata'][('a' * 260)] = 'world'
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_with_metadata_value_too_long(self):
- self.body['rebuild']['accessIPv4'] = '0.0.0.0'
- self.body['rebuild']['accessIPv6'] = 'fead::1234'
- self.body['rebuild']['metadata']['key1'] = ('a' * 260)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_fails_when_min_ram_too_small(self):
- # make min_ram larger than our instance ram size
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', properties={'key1': 'value1'},
- min_ram="4096", min_disk="10")
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_fails_when_min_disk_too_small(self):
- # make min_disk larger than our instance disk size
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', properties={'key1': 'value1'},
- min_ram="128", min_disk="100000")
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild, self.req,
- FAKE_UUID, self.body)
-
- def test_rebuild_instance_image_too_large(self):
- # make image size larger than our instance disk size
- size = str(1000 * (1024 ** 3))
-
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='active', size=size)
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_with_deleted_image(self):
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True,
- status='DELETED')
-
- self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_onset_file_limit_over_quota(self):
- def fake_get_image(self, context, image_href, **kwargs):
- return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- name='public image', is_public=True, status='active')
-
- with contextlib.nested(
- mock.patch.object(fake._FakeImageService, 'show',
- side_effect=fake_get_image),
- mock.patch.object(self.controller.compute_api, 'rebuild',
- side_effect=exception.OnsetFileLimitExceeded)
- ) as (
- show_mock, rebuild_mock
- ):
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, body=self.body)
-
- def test_rebuild_instance_with_access_ipv6_bad_format(self):
- # proper local hrefs must start with 'http://localhost/v2/'
- self.body['rebuild']['accessIPv4'] = '1.2.3.4'
- self.body['rebuild']['accessIPv6'] = 'bad_format'
- self.body['rebuild']['metadata']['hello'] = 'world'
- self.req.body = jsonutils.dumps(self.body)
- self.req.headers["content-type"] = "application/json"
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild,
- self.req, FAKE_UUID, self.body)
-
- def test_rebuild_instance_with_null_image_ref(self):
- self.body['rebuild']['imageRef'] = None
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller._action_rebuild, self.req, FAKE_UUID,
- self.body)
-
-
-class ServerStatusTest(test.TestCase):
-
- def setUp(self):
- super(ServerStatusTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
-
- def _fake_get_server(context, req, id):
- return fakes.stub_instance(id)
-
- self.stubs.Set(self.controller, '_get_server', _fake_get_server)
-
- def _get_with_state(self, vm_state, task_state=None):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_state,
- task_state=task_state))
-
- request = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- return self.controller.show(request, FAKE_UUID)
-
- def _req_with_policy_fail(self, policy_rule_name):
- rule = {'compute:%s' % policy_rule_name:
- common_policy.parse_rule('role:admin')}
- policy.set_rules(rule)
- return fakes.HTTPRequest.blank('/fake/servers/1234/action')
-
- def test_active(self):
- response = self._get_with_state(vm_states.ACTIVE)
- self.assertEqual(response['server']['status'], 'ACTIVE')
-
- def test_reboot(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBOOTING)
- self.assertEqual(response['server']['status'], 'REBOOT')
-
- def test_reboot_hard(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBOOTING_HARD)
- self.assertEqual(response['server']['status'], 'HARD_REBOOT')
-
- def test_reboot_resize_policy_fail(self):
- req = self._req_with_policy_fail('reboot')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_reboot, req, '1234',
- {'reboot': {'type': 'HARD'}})
-
- def test_rebuild(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.REBUILDING)
- self.assertEqual(response['server']['status'], 'REBUILD')
-
- def test_rebuild_error(self):
- response = self._get_with_state(vm_states.ERROR)
- self.assertEqual(response['server']['status'], 'ERROR')
-
- def test_resize(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.RESIZE_PREP)
- self.assertEqual(response['server']['status'], 'RESIZE')
-
- def test_confirm_resize_policy_fail(self):
- req = self._req_with_policy_fail('confirm_resize')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_confirm_resize, req, '1234', {})
-
- def test_verify_resize(self):
- response = self._get_with_state(vm_states.RESIZED, None)
- self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
-
- def test_revert_resize(self):
- response = self._get_with_state(vm_states.RESIZED,
- task_states.RESIZE_REVERTING)
- self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
-
- def test_revert_resize_policy_fail(self):
- req = self._req_with_policy_fail('revert_resize')
- self.assertRaises(exception.PolicyNotAuthorized,
- self.controller._action_revert_resize, req, '1234', {})
-
- def test_password_update(self):
- response = self._get_with_state(vm_states.ACTIVE,
- task_states.UPDATING_PASSWORD)
- self.assertEqual(response['server']['status'], 'PASSWORD')
-
- def test_stopped(self):
- response = self._get_with_state(vm_states.STOPPED)
- self.assertEqual(response['server']['status'], 'SHUTOFF')
-
-
-class ServersControllerCreateTest(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTest, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- fakes.stub_out_nw_api(self.stubs)
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
-
- self.volume_id = 'fake'
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': inst_type,
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "config_drive": None,
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- "security_groups": inst['security_groups'],
- })
-
- self.instance_cache_by_id[instance['id']] = instance
- self.instance_cache_by_uuid[instance['uuid']] = instance
- return instance
-
- def instance_get(context, instance_id):
- """Stub for compute/api create() pulling in instance after
- scheduling
- """
- return self.instance_cache_by_id[instance_id]
-
- def instance_update(context, uuid, values):
- instance = self.instance_cache_by_uuid[uuid]
- instance.update(values)
- return instance
-
- def server_update(context, instance_uuid, params, update_cells=False):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return inst
-
- def server_update_and_get_original(
- context, instance_uuid, params, update_cells=False,
- columns_to_join=None):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return (inst, inst)
-
- def fake_method(*args, **kwargs):
- pass
-
- def project_get_networks(context, user_id):
- return dict(id='1', host='localhost')
-
- def queue_get_for(context, *args):
- return 'network_topic'
-
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update_and_get_original)
- self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
- fake_method)
- self.body = {
- 'server': {
- 'min_count': 2,
- 'name': 'server_test',
- 'imageRef': self.image_uuid,
- 'flavorRef': self.flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- 'personality': [
- {
- "path": "/etc/banner.txt",
- "contents": "MQ==",
- },
- ],
- },
- }
- self.bdm = [{'delete_on_termination': 1,
- 'device_name': 123,
- 'volume_size': 1,
- 'volume_id': '11111111-1111-1111-1111-111111111111'}]
-
- self.req = fakes.HTTPRequest.blank('/fake/servers')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def _check_admin_pass_len(self, server_dict):
- """utility function - check server_dict for adminPass length."""
- self.assertEqual(CONF.password_length,
- len(server_dict["adminPass"]))
-
- def _check_admin_pass_missing(self, server_dict):
- """utility function - check server_dict for absence of adminPass."""
- self.assertNotIn("adminPass", server_dict)
-
- def _test_create_instance(self, flavor=2):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- self.body['server']['imageRef'] = image_uuid
- self.body['server']['flavorRef'] = flavor
- self.req.body = jsonutils.dumps(self.body)
- server = self.controller.create(self.req, self.body).obj['server']
- self._check_admin_pass_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_private_flavor(self):
- values = {
- 'name': 'fake_name',
- 'memory_mb': 512,
- 'vcpus': 1,
- 'root_gb': 10,
- 'ephemeral_gb': 10,
- 'flavorid': '1324',
- 'swap': 0,
- 'rxtx_factor': 0.5,
- 'vcpu_weight': 1,
- 'disabled': False,
- 'is_public': False,
- }
- db.flavor_create(context.get_admin_context(), values)
- self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
- flavor=1324)
-
- def test_create_server_bad_image_href(self):
- image_href = 1
- self.body['server']['imageRef'] = image_href,
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req, self.body)
-
- def test_create_server_with_invalid_networks_parameter(self):
- self.ext_mgr.extensions = {'os-networks': 'fake'}
- self.body['server']['networks'] = {
- 'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req,
- self.body)
-
- def test_create_server_with_deleted_image(self):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- # Get the fake image service so we can set the status to deleted
- (image_service, image_id) = glance.get_remote_image_service(
- context, '')
- image_service.update(context, image_uuid, {'status': 'DELETED'})
- self.addCleanup(image_service.update, context, image_uuid,
- {'status': 'active'})
-
- self.body['server']['flavorRef'] = 2
- self.req.body = jsonutils.dumps(self.body)
- with testtools.ExpectedException(
- webob.exc.HTTPBadRequest,
- 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
- self.controller.create(self.req, self.body)
-
- def test_create_server_image_too_large(self):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- # Get the fake image service so we can set the status to deleted
- (image_service, image_id) = glance.get_remote_image_service(context,
- image_uuid)
- image = image_service.show(context, image_id)
- orig_size = image['size']
- new_size = str(1000 * (1024 ** 3))
- image_service.update(context, image_uuid, {'size': new_size})
-
- self.addCleanup(image_service.update, context, image_uuid,
- {'size': orig_size})
-
- self.body['server']['flavorRef'] = 2
- self.req.body = jsonutils.dumps(self.body)
- with testtools.ExpectedException(
- webob.exc.HTTPBadRequest,
- "Flavor's disk is too small for requested image."):
- self.controller.create(self.req, self.body)
-
- def test_create_instance_invalid_negative_min(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['min_count'] = -1
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req,
- self.body)
-
- def test_create_instance_invalid_negative_max(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['max_count'] = -1
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req,
- self.body)
-
- def test_create_instance_invalid_alpha_min(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['min_count'] = 'abcd',
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req,
- self.body)
-
- def test_create_instance_invalid_alpha_max(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['max_count'] = 'abcd',
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req,
- self.body)
-
- def test_create_multiple_instances(self):
- """Test creating multiple instances but not asking for
- reservation_id
- """
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_pass_len(res["server"])
-
- def test_create_multiple_instances_pass_disabled(self):
- """Test creating multiple instances but not asking for
- reservation_id
- """
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.flags(enable_instance_password=False)
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_pass_missing(res["server"])
-
- def test_create_multiple_instances_resv_id_return(self):
- """Test creating multiple instances with asking for
- reservation_id
- """
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['return_reservation_id'] = True
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body)
- reservation_id = res.obj.get('reservation_id')
- self.assertNotEqual(reservation_id, "")
- self.assertIsNotNone(reservation_id)
- self.assertTrue(len(reservation_id) > 1)
-
- def test_create_multiple_instances_with_multiple_volume_bdm(self):
- """Test that a BadRequest is raised if multiple instances
- are requested with a list of block device mappings for volumes.
- """
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- min_count = 2
- bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'},
- {'device_name': 'foo2', 'volume_id': 'vol-yyyy'}
- ]
- params = {
- 'block_device_mapping': bdm,
- 'min_count': min_count
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(len(kwargs['block_device_mapping']), 2)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params, no_image=True)
-
- def test_create_multiple_instances_with_single_volume_bdm(self):
- """Test that a BadRequest is raised if multiple instances
- are requested to boot from a single volume.
- """
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- min_count = 2
- bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}]
- params = {
- 'block_device_mapping': bdm,
- 'min_count': min_count
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(kwargs['block_device_mapping']['volume_id'],
- 'vol-xxxx')
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params, no_image=True)
-
- def test_create_multiple_instance_with_non_integer_max_count(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['max_count'] = 2.5
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_multiple_instance_with_non_integer_min_count(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- self.body['server']['min_count'] = 2.5
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_image_ref_is_bookmark(self):
- image_href = 'http://localhost/fake/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_image_ref_is_invalid(self):
- image_uuid = 'this_is_not_a_valid_uuid'
- image_href = 'http://localhost/fake/images/%s' % image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, self.body)
-
- def test_create_instance_no_key_pair(self):
- fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
- self._test_create_instance()
-
- def _test_create_extra(self, params, no_image=False):
- self.body['server']['flavorRef'] = 2
- if no_image:
- self.body['server'].pop('imageRef', None)
- self.body['server'].update(params)
- self.req.body = jsonutils.dumps(self.body)
- self.assertIn('server',
- self.controller.create(self.req, self.body).obj)
-
- def test_create_instance_with_security_group_enabled(self):
- self.ext_mgr.extensions = {'os-security-groups': 'fake'}
- group = 'foo'
- old_create = compute_api.API.create
-
- def sec_group_get(ctx, proj, name):
- if name == group:
- return True
- else:
- raise exception.SecurityGroupNotFoundForProject(
- project_id=proj, security_group_id=name)
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['security_group'], [group])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(db, 'security_group_get_by_name', sec_group_get)
- # negative test
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra,
- {'security_groups': [{'name': 'bogus'}]})
- # positive test - extra assert in create path
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra({'security_groups': [{'name': group}]})
-
- def test_create_instance_with_non_unique_secgroup_name(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks,
- 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
-
- def fake_create(*args, **kwargs):
- raise exception.NoUniqueMatch("No Unique match found for ...")
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPConflict,
- self._test_create_extra, params)
-
- def test_create_instance_with_port_with_no_fixed_ips(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'port': port_id}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortRequiresFixedIP(port_id=port_id)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_raise_user_data_too_large(self, mock_create):
- mock_create.side_effect = exception.InstanceUserDataTooLarge(
- maxsize=1, length=2)
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req, self.body)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_raise_auto_disk_config_exc(self, mock_create):
- mock_create.side_effect = exception.AutoDiskConfigDisabledByImage(
- image='dummy')
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create,
- self.req, self.body)
-
- @mock.patch.object(compute_api.API, 'create',
- side_effect=exception.InstanceExists(
- name='instance-name'))
- def test_create_instance_raise_instance_exists(self, mock_create):
- self.assertRaises(webob.exc.HTTPConflict,
- self.controller.create,
- self.req, self.body)
-
- def test_create_instance_with_network_with_no_subnet(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.NetworkRequiresSubnet(network_uuid=network)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_access_ip(self):
- self.body['server']['accessIPv4'] = '1.2.3.4'
- self.body['server']['accessIPv6'] = 'fead::1234'
-
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- server = res['server']
- self._check_admin_pass_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_with_access_ip_pass_disabled(self):
- # test with admin passwords disabled See lp bug 921814
- self.flags(enable_instance_password=False)
- self.body['server']['accessIPv4'] = '1.2.3.4'
- self.body['server']['accessIPv6'] = 'fead::1234'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self._check_admin_pass_missing(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_bad_format_access_ip_v4(self):
- self.body['server']['accessIPv4'] = 'bad_format'
- self.body['server']['accessIPv6'] = 'fead::1234'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, self.body)
-
- def test_create_instance_bad_format_access_ip_v6(self):
- self.body['server']['accessIPv4'] = '1.2.3.4'
- self.body['server']['accessIPv6'] = 'bad_format'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, self.body)
-
- def test_create_instance_name_all_blank_spaces(self):
- self.body['server']['name'] = ' ' * 64
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_name_too_long(self):
- self.body['server']['name'] = 'X' * 256
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, self.body)
-
- def test_create_instance(self):
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self._check_admin_pass_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_pass_disabled(self):
- self.flags(enable_instance_password=False)
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self._check_admin_pass_missing(server)
- self.assertEqual(FAKE_UUID, server['id'])
-
- @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
- def test_create_instance_numa_topology_wrong(self, numa_constraints_mock):
- numa_constraints_mock.side_effect = (
- exception.ImageNUMATopologyIncomplete)
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_too_much_metadata(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata']['vote'] = 'fiddletown'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_key_too_long(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = {('a' * 260): '12345'}
-
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_value_too_long(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = {'key1': ('a' * 260)}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_key_blank(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = {'': 'abcd'}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_not_dict(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = 'string'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_key_not_string(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = {1: 'test'}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_metadata_value_not_string(self):
- self.flags(quota_metadata_items=1)
- self.body['server']['metadata'] = {'test': ['a', 'list']}
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_user_data_malformed_bad_request(self):
- self.ext_mgr.extensions = {'os-user-data': 'fake'}
- params = {'user_data': 'u1234!'}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- @mock.patch('nova.compute.api.API.create',
- side_effect=exception.KeypairNotFound(name='nonexistentkey',
- user_id=1))
- def test_create_instance_invalid_key_name(self, mock_create):
- self.body['server']['key_name'] = 'nonexistentkey'
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_valid_key_name(self):
- self.body['server']['key_name'] = 'key'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- self.assertEqual(FAKE_UUID, res["server"]["id"])
- self._check_admin_pass_len(res["server"])
-
- def test_create_instance_invalid_flavor_href(self):
- flavor_ref = 'http://localhost/v2/flavors/asdf'
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_invalid_flavor_id_int(self):
- flavor_ref = -1
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_bad_flavor_href(self):
- flavor_ref = 'http://localhost/v2/flavors/17'
- self.body['server']['flavorRef'] = flavor_ref
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_with_config_drive(self):
- self.ext_mgr.extensions = {'os-config-drive': 'fake'}
- self.body['server']['config_drive'] = "true"
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_with_bad_config_drive(self):
- self.ext_mgr.extensions = {'os-config-drive': 'fake'}
- self.body['server']['config_drive'] = 'adcd'
- self.req.body = jsonutils.dumps(self.body)
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_without_config_drive(self):
- self.ext_mgr.extensions = {'os-config-drive': 'fake'}
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_with_config_drive_disabled(self):
- config_drive = [{'config_drive': 'foo'}]
- params = {'config_drive': config_drive}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['config_drive'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_bad_href(self):
- image_href = 'asdf'
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_instance_local_href(self):
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_admin_pass(self):
- self.body['server']['flavorRef'] = 3,
- self.body['server']['adminPass'] = 'testpass'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self.assertEqual(server['adminPass'], self.body['server']['adminPass'])
-
- def test_create_instance_admin_pass_pass_disabled(self):
- self.flags(enable_instance_password=False)
- self.body['server']['flavorRef'] = 3,
- self.body['server']['adminPass'] = 'testpass'
- self.req.body = jsonutils.dumps(self.body)
- res = self.controller.create(self.req, self.body).obj
-
- server = res['server']
- self.assertIn('adminPass', self.body['server'])
- self.assertNotIn('adminPass', server)
-
- def test_create_instance_admin_pass_empty(self):
- self.body['server']['flavorRef'] = 3,
- self.body['server']['adminPass'] = ''
- self.req.body = jsonutils.dumps(self.body)
-
- # The fact that the action doesn't raise is enough validation
- self.controller.create(self.req, self.body)
-
- def test_create_instance_with_security_group_disabled(self):
- group = 'foo'
- params = {'security_groups': [{'name': group}]}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- # NOTE(vish): if the security groups extension is not
- # enabled, then security groups passed in
- # are ignored.
- self.assertEqual(kwargs['security_group'], ['default'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_disk_config_enabled(self):
- self.ext_mgr.extensions = {'OS-DCF': 'fake'}
- # NOTE(vish): the extension converts OS-DCF:disk_config into
- # auto_disk_config, so we are testing with
- # the_internal_value
- params = {'auto_disk_config': 'AUTO'}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['auto_disk_config'], 'AUTO')
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_disk_config_disabled(self):
- params = {'auto_disk_config': True}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['auto_disk_config'], False)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_scheduler_hints_enabled(self):
- self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake'}
- hints = {'a': 'b'}
- params = {'scheduler_hints': hints}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['scheduler_hints'], hints)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_scheduler_hints_disabled(self):
- hints = {'a': 'b'}
- params = {'scheduler_hints': hints}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['scheduler_hints'], {})
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_volumes_enabled_no_image(self):
- """Test that the create will fail if there is no image
- and no bdms supplied in the request
- """
- self.ext_mgr.extensions = {'os-volumes': 'fake'}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('imageRef', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, {}, no_image=True)
-
- def test_create_instance_with_bdm_v2_enabled_no_image(self):
- self.ext_mgr.extensions = {'os-block-device-mapping-v2-boot': 'fake'}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('imageRef', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, {}, no_image=True)
-
- def test_create_instance_with_user_data_enabled(self):
- self.ext_mgr.extensions = {'os-user-data': 'fake'}
- user_data = 'fake'
- params = {'user_data': user_data}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['user_data'], user_data)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_user_data_disabled(self):
- user_data = 'fake'
- params = {'user_data': user_data}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['user_data'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_keypairs_enabled(self):
- self.ext_mgr.extensions = {'os-keypairs': 'fake'}
- key_name = 'green'
-
- params = {'key_name': key_name}
- old_create = compute_api.API.create
-
- # NOTE(sdague): key pair goes back to the database,
- # so we need to stub it out for tests
- def key_pair_get(context, user_id, name):
- return dict(test_keypair.fake_keypair,
- public_key='FAKE_KEY',
- fingerprint='FAKE_FINGERPRINT',
- name=name)
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['key_name'], key_name)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(db, 'key_pair_get', key_pair_get)
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_keypairs_disabled(self):
- key_name = 'green'
-
- params = {'key_name': key_name}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['key_name'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_availability_zone_enabled(self):
- self.ext_mgr.extensions = {'os-availability-zone': 'fake'}
- availability_zone = 'fake'
- params = {'availability_zone': availability_zone}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['availability_zone'], availability_zone)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
-
- try:
- self._test_create_extra(params)
- except webob.exc.HTTPBadRequest as e:
- expected = 'The requested availability zone is not available'
- self.assertEqual(e.explanation, expected)
- admin_context = context.get_admin_context()
- db.service_create(admin_context, {'host': 'host1_zones',
- 'binary': "nova-compute",
- 'topic': 'compute',
- 'report_count': 0})
- agg = db.aggregate_create(admin_context,
- {'name': 'agg1'}, {'availability_zone': availability_zone})
- db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
- self._test_create_extra(params)
-
- def test_create_instance_with_availability_zone_disabled(self):
- availability_zone = 'fake'
- params = {'availability_zone': availability_zone}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['availability_zone'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_multiple_create_enabled(self):
- self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
- min_count = 2
- max_count = 3
- params = {
- 'min_count': min_count,
- 'max_count': max_count,
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 2)
- self.assertEqual(kwargs['max_count'], 3)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_multiple_create_disabled(self):
- min_count = 2
- max_count = 3
- params = {
- 'min_count': min_count,
- 'max_count': max_count,
- }
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertEqual(kwargs['min_count'], 1)
- self.assertEqual(kwargs['max_count'], 1)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_networks_enabled(self):
- self.ext_mgr.extensions = {'os-networks': 'fake'}
- net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- requested_networks = [{'uuid': net_uuid}]
- params = {'networks': requested_networks}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
- self.assertEqual(result, kwargs['requested_networks'].as_tuples())
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_neutronv2_port_in_use(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortInUse(port_id=port)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPConflict,
- self._test_create_extra, params)
-
- def test_create_instance_with_neturonv2_not_found_network(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- requested_networks = [{'uuid': network}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.NetworkNotFound(network_id=network)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_neutronv2_port_not_found(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- raise exception.PortNotFound(port_id=port)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_multiple_instance_with_specified_ip_neutronv2(self,
- _api_mock):
- _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
- reason="")
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- address = '10.0.0.1'
- self.body['server']['max_count'] = 2
- requested_networks = [{'uuid': network, 'fixed_ip': address,
- 'port': port}]
- params = {'networks': requested_networks}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_multiple_instance_with_neutronv2_port(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- self.body['server']['max_count'] = 2
- requested_networks = [{'uuid': network, 'port': port}]
- params = {'networks': requested_networks}
-
- def fake_create(*args, **kwargs):
- msg = _("Unable to launch multiple instances with"
- " a single configured port ID. Please launch your"
- " instance one by one with different ports.")
- raise exception.MultiplePortsNotApplicable(reason=msg)
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
-
- def test_create_instance_with_networks_disabled_neutronv2(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- requested_networks = [{'uuid': net_uuid}]
- params = {'networks': requested_networks}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
- None, None)]
- self.assertEqual(result, kwargs['requested_networks'].as_tuples())
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_with_networks_disabled(self):
- self.ext_mgr.extensions = {}
- net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- requested_networks = [{'uuid': net_uuid}]
- params = {'networks': requested_networks}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertIsNone(kwargs['requested_networks'])
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params)
-
- def test_create_instance_invalid_personality(self):
-
- def fake_create(*args, **kwargs):
- codec = 'utf8'
- content = 'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA=='
- start_position = 19
- end_position = 20
- msg = 'invalid start byte'
- raise UnicodeDecodeError(codec, content, start_position,
- end_position, msg)
- self.stubs.Set(compute_api.API, 'create', fake_create)
- self.body['server']['personality'] = [
- {
- "path": "/etc/banner.txt",
- "contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
- },
- ]
- self.req.body = jsonutils.dumps(self.body)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, self.body)
-
- def test_create_location(self):
- selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
- image_href = 'http://localhost/v2/images/%s' % self.image_uuid
- self.body['server']['imageRef'] = image_href
- self.req.body = jsonutils.dumps(self.body)
- robj = self.controller.create(self.req, self.body)
- self.assertEqual(robj['Location'], selfhref)
-
- def _do_test_create_instance_above_quota(self, resource, allowed, quota,
- expected_msg):
- fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
- self.body['server']['flavorRef'] = 3
- self.req.body = jsonutils.dumps(self.body)
- try:
- self.controller.create(self.req, self.body).obj['server']
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
- def test_create_instance_above_quota_instances(self):
- msg = _('Quota exceeded for instances: Requested 1, but'
- ' already used 10 of 10 instances')
- self._do_test_create_instance_above_quota('instances', 0, 10, msg)
-
- def test_create_instance_above_quota_ram(self):
- msg = _('Quota exceeded for ram: Requested 4096, but'
- ' already used 8192 of 10240 ram')
- self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
-
- def test_create_instance_above_quota_cores(self):
- msg = _('Quota exceeded for cores: Requested 2, but'
- ' already used 9 of 10 cores')
- self._do_test_create_instance_above_quota('cores', 1, 10, msg)
-
- def test_create_instance_above_quota_group_members(self):
- ctxt = context.get_admin_context()
- fake_group = objects.InstanceGroup(ctxt)
- fake_group.create()
-
- def fake_count(context, name, group, user_id):
- self.assertEqual(name, "server_group_members")
- self.assertEqual(group.uuid, fake_group.uuid)
- self.assertEqual(user_id,
- self.req.environ['nova.context'].user_id)
- return 10
-
- def fake_limit_check(context, **kwargs):
- if 'server_group_members' in kwargs:
- raise exception.OverQuota(overs={})
-
- def fake_instance_destroy(context, uuid, constraint):
- return fakes.stub_instance(1)
-
- self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
- self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
- self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
- 'os-server-group-quotas': 'fake'}
- self.body['server']['scheduler_hints'] = {'group': fake_group.uuid}
- self.req.body = jsonutils.dumps(self.body)
-
- expected_msg = "Quota exceeded, too many servers in group"
-
- try:
- self.controller.create(self.req, self.body).obj['server']
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
- def test_create_instance_above_quota_server_groups(self):
-
- def fake_reserve(contex, **deltas):
- if 'server_groups' in deltas:
- raise exception.OverQuota(overs={})
-
- def fake_instance_destroy(context, uuid, constraint):
- return fakes.stub_instance(1)
-
- self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
- self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
- 'os-server-group-quotas': 'fake'}
- self.body['server']['scheduler_hints'] = {'group': 'fake-group'}
- self.req.body = jsonutils.dumps(self.body)
-
- expected_msg = "Quota exceeded, too many server groups."
-
- try:
- self.controller.create(self.req, self.body).obj['server']
- self.fail('expected quota to be exceeded')
- except webob.exc.HTTPForbidden as e:
- self.assertEqual(e.explanation, expected_msg)
-
-
-class ServersControllerCreateTestWithMock(test.TestCase):
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTestWithMock, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
-
- self.volume_id = 'fake'
-
- self.body = {
- 'server': {
- 'min_count': 2,
- 'name': 'server_test',
- 'imageRef': self.image_uuid,
- 'flavorRef': self.flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- 'personality': [
- {
- "path": "/etc/banner.txt",
- "contents": "MQ==",
- },
- ],
- },
- }
-
- self.req = fakes.HTTPRequest.blank('/fake/servers')
- self.req.method = 'POST'
- self.req.headers["content-type"] = "application/json"
-
- def _test_create_extra(self, params, no_image=False):
- self.body['server']['flavorRef'] = 2
- if no_image:
- self.body['server'].pop('imageRef', None)
- self.body['server'].update(params)
- self.req.body = jsonutils.dumps(self.body)
- self.controller.create(self.req, self.body).obj['server']
-
- @mock.patch.object(compute_api.API, 'create')
- def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
- create_mock):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- address = '10.0.2.3'
- requested_networks = [{'uuid': network, 'fixed_ip': address}]
- params = {'networks': requested_networks}
- create_mock.side_effect = exception.FixedIpAlreadyInUse(
- address=address,
- instance_uuid=network)
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, params)
- self.assertEqual(1, len(create_mock.call_args_list))
-
- @mock.patch.object(compute_api.API, 'create',
- side_effect=exception.InvalidVolume(reason='error'))
- def test_create_instance_with_invalid_volume_error(self, create_mock):
- # Tests that InvalidVolume is translated to a 400 error.
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._test_create_extra, {})
-
-
-class TestServerCreateRequestXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestServerCreateRequestXMLDeserializer, self).setUp()
- self.deserializer = servers.CreateDeserializer()
-
- def test_minimal_request(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_request_with_alternate_namespace_prefix(self):
- serial_request = """
-<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2">
- <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
- </ns2:server>
- """
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- 'metadata': {"hello": "world"},
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_request_with_scheduler_hints_and_alternate_namespace_prefix(self):
- serial_request = """
-<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2">
- <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
- <os:scheduler_hints
- xmlns:os="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2">
- <hypervisor>xen</hypervisor>
- <near>eb999657-dd6b-464e-8713-95c532ac3b18</near>
- </os:scheduler_hints>
- </ns2:server>
- """
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- 'OS-SCH-HNT:scheduler_hints': {
- 'hypervisor': ['xen'],
- 'near': ['eb999657-dd6b-464e-8713-95c532ac3b18']
- },
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "metadata": {
- "hello": "world"
- }
- }
- }
- self.assertEqual(request['body'], expected)
-
- def test_access_ipv4(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2"
- accessIPv4="1.2.3.4"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "accessIPv4": "1.2.3.4",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_access_ipv6(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2"
- accessIPv6="fead::1234"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "accessIPv6": "fead::1234",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_access_ip(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2"
- accessIPv4="1.2.3.4"
- accessIPv6="fead::1234"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "fead::1234",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_admin_pass(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2"
- adminPass="1234"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "adminPass": "1234",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_image_link(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="http://localhost:8774/v2/images/2"
- flavorRef="3"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "http://localhost:8774/v2/images/2",
- "flavorRef": "3",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_flavor_link(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="http://localhost:8774/v2/flavors/3"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "http://localhost:8774/v2/flavors/3",
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_empty_metadata_personality(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2">
- <metadata/>
- <personality/>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "metadata": {},
- "personality": [],
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_multiple_metadata_items(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2">
- <metadata>
- <meta key="one">two</meta>
- <meta key="open">snack</meta>
- </metadata>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "metadata": {"one": "two", "open": "snack"},
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_multiple_personality_files(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test"
- imageRef="1"
- flavorRef="2">
- <personality>
- <file path="/etc/banner.txt">MQ==</file>
- <file path="/etc/hosts">Mg==</file>
- </personality>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "2",
- "personality": [
- {"path": "/etc/banner.txt", "contents": "MQ=="},
- {"path": "/etc/hosts", "contents": "Mg=="},
- ],
- },
- }
- self.assertThat(request['body'], matchers.DictMatches(expected))
-
- def test_spec_request(self):
- image_bookmark_link = ("http://servers.api.openstack.org/1234/"
- "images/52415800-8b69-11e0-9b19-734f6f006e54")
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- imageRef="%s"
- flavorRef="52415800-8b69-11e0-9b19-734f1195ff37"
- name="new-server-test">
- <metadata>
- <meta key="My Server Name">Apache1</meta>
- </metadata>
- <personality>
- <file path="/etc/banner.txt">Mg==</file>
- </personality>
-</server>""" % (image_bookmark_link)
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "new-server-test",
- "imageRef": ("http://servers.api.openstack.org/1234/"
- "images/52415800-8b69-11e0-9b19-734f6f006e54"),
- "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37",
- "metadata": {"My Server Name": "Apache1"},
- "personality": [
- {
- "path": "/etc/banner.txt",
- "contents": "Mg==",
- },
- ],
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_request_with_empty_networks(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks/>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_one_network(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1" fixed_ip="10.0.1.12"/>
- </networks>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_two_networks(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1" fixed_ip="10.0.1.12"/>
- <network uuid="2" fixed_ip="10.0.2.12"/>
- </networks>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
- {"uuid": "2", "fixed_ip": "10.0.2.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_second_network_node_ignored(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1" fixed_ip="10.0.1.12"/>
- </networks>
- <networks>
- <network uuid="2" fixed_ip="10.0.2.12"/>
- </networks>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_one_network_missing_id(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network fixed_ip="10.0.1.12"/>
- </networks>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"fixed_ip": "10.0.1.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_one_network_missing_fixed_ip(self):
- serial_request = """
-<server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1"/>
- </networks>
-</server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_one_network_empty_id(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="" fixed_ip="10.0.1.12"/>
- </networks>
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "", "fixed_ip": "10.0.1.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_one_network_empty_fixed_ip(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1" fixed_ip=""/>
- </networks>
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1", "fixed_ip": ""}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_networks_duplicate_ids(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <networks>
- <network uuid="1" fixed_ip="10.0.1.12"/>
- <network uuid="1" fixed_ip="10.0.2.12"/>
- </networks>
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
- {"uuid": "1", "fixed_ip": "10.0.2.12"}],
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_availability_zone(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1"
- availability_zone="some_zone:some_host">
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "availability_zone": "some_zone:some_host",
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_multiple_create_args(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1"
- min_count="1" max_count="3" return_reservation_id="True">
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "min_count": "1",
- "max_count": "3",
- "return_reservation_id": True,
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_disk_config(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
- name="new-server-test" imageRef="1" flavorRef="1"
- OS-DCF:diskConfig="AUTO">
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "OS-DCF:diskConfig": "AUTO",
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_scheduler_hints(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- xmlns:OS-SCH-HNT=
- "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2"
- name="new-server-test" imageRef="1" flavorRef="1">
- <OS-SCH-HNT:scheduler_hints>
- <different_host>
- 7329b667-50c7-46a6-b913-cb2a09dfeee0
- </different_host>
- <different_host>
- f31efb24-34d2-43e1-8b44-316052956a39
- </different_host>
- </OS-SCH-HNT:scheduler_hints>
- </server>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {"server": {
- "name": "new-server-test",
- "imageRef": "1",
- "flavorRef": "1",
- "OS-SCH-HNT:scheduler_hints": {
- "different_host": [
- "7329b667-50c7-46a6-b913-cb2a09dfeee0",
- "f31efb24-34d2-43e1-8b44-316052956a39",
- ]
- }
- }}
- self.assertEqual(request['body'], expected)
-
- def test_request_with_config_drive(self):
- serial_request = """
- <server xmlns="http://docs.openstack.org/compute/api/v2"
- name="config_drive_test"
- imageRef="1"
- flavorRef="1"
- config_drive="true"/>"""
- request = self.deserializer.deserialize(serial_request)
- expected = {
- "server": {
- "name": "config_drive_test",
- "imageRef": "1",
- "flavorRef": "1",
- "config_drive": "true"
- },
- }
- self.assertEqual(request['body'], expected)
-
- def test_corrupt_xml(self):
- """Should throw a 400 error on corrupt xml."""
- self.assertRaises(
- exception.MalformedRequestBody,
- self.deserializer.deserialize,
- utils.killer_xml_body())
-
-
-class TestServerActionRequestXMLDeserializer(test.TestCase):
-
- def setUp(self):
- super(TestServerActionRequestXMLDeserializer, self).setUp()
- self.deserializer = servers.ActionDeserializer()
-
- def _generate_request(self, action, disk_cfg, ref):
- return """
-<%(action)s xmlns="http://docs.openstack.org/compute/api/v1.1"
- xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
- %(disk_config)s="MANUAL" %(ref)s="1"/>""" % (
- {'action': action, 'disk_config': disk_cfg, 'ref': ref})
-
- def _generate_expected(self, action, ref):
- return {
- "%s" % action: {
- "%s" % ref: "1",
- "OS-DCF:diskConfig": "MANUAL",
- },
- }
-
- def test_rebuild_request(self):
- serial_request = self._generate_request("rebuild", "OS-DCF:diskConfig",
- "imageRef")
- request = self.deserializer.deserialize(serial_request)
- expected = self._generate_expected("rebuild", "imageRef")
- self.assertEqual(request['body'], expected)
-
- def test_rebuild_request_auto_disk_config_compat(self):
- serial_request = self._generate_request("rebuild", "auto_disk_config",
- "imageRef")
- request = self.deserializer.deserialize(serial_request)
- expected = self._generate_expected("rebuild", "imageRef")
- self.assertEqual(request['body'], expected)
-
- def test_resize_request(self):
- serial_request = self._generate_request("resize", "OS-DCF:diskConfig",
- "flavorRef")
- request = self.deserializer.deserialize(serial_request)
- expected = self._generate_expected("resize", "flavorRef")
- self.assertEqual(request['body'], expected)
-
- def test_resize_request_auto_disk_config_compat(self):
- serial_request = self._generate_request("resize", "auto_disk_config",
- "flavorRef")
- request = self.deserializer.deserialize(serial_request)
- expected = self._generate_expected("resize", "flavorRef")
- self.assertEqual(request['body'], expected)
-
-
-class TestAddressesXMLSerialization(test.TestCase):
-
- index_serializer = ips.AddressesTemplate()
- show_serializer = ips.NetworkTemplate()
-
- def _serializer_test_data(self):
- return {
- 'network_2': [
- {'addr': '192.168.0.1', 'version': 4},
- {'addr': 'fe80::beef', 'version': 6},
- ],
- }
-
- def test_xml_declaration(self):
- output = self.show_serializer.serialize(self._serializer_test_data())
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_show(self):
- output = self.show_serializer.serialize(self._serializer_test_data())
- root = etree.XML(output)
- network = self._serializer_test_data()['network_2']
- self.assertEqual(str(root.get('id')), 'network_2')
- ip_elems = root.findall('{0}ip'.format(NS))
- for z, ip_elem in enumerate(ip_elems):
- ip = network[z]
- self.assertEqual(str(ip_elem.get('version')),
- str(ip['version']))
- self.assertEqual(str(ip_elem.get('addr')),
- str(ip['addr']))
-
- def test_index(self):
- fixture = {
- 'addresses': {
- 'network_1': [
- {'addr': '192.168.0.3', 'version': 4},
- {'addr': '192.168.0.5', 'version': 4},
- ],
- 'network_2': [
- {'addr': '192.168.0.1', 'version': 4},
- {'addr': 'fe80::beef', 'version': 6},
- ],
- },
- }
- output = self.index_serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'addresses')
- addresses_dict = fixture['addresses']
- network_elems = root.findall('{0}network'.format(NS))
- self.assertEqual(len(network_elems), 2)
- for i, network_elem in enumerate(network_elems):
- network = addresses_dict.items()[i]
- self.assertEqual(str(network_elem.get('id')), str(network[0]))
- ip_elems = network_elem.findall('{0}ip'.format(NS))
- for z, ip_elem in enumerate(ip_elems):
- ip = network[1][z]
- self.assertEqual(str(ip_elem.get('version')),
- str(ip['version']))
- self.assertEqual(str(ip_elem.get('addr')),
- str(ip['addr']))
-
-
-class ServersViewBuilderTest(test.TestCase):
-
- image_bookmark = "http://localhost/fake/images/5"
- flavor_bookmark = "http://localhost/fake/flavors/1"
-
- def setUp(self):
- super(ServersViewBuilderTest, self).setUp()
- self.flags(use_ipv6=True)
- db_inst = fakes.stub_instance(
- id=1,
- image_ref="5",
- uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
- display_name="test_server",
- include_fake_metadata=False)
-
- privates = ['172.19.0.1']
- publics = ['192.168.0.3']
- public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'public',
- 'ips': [dict(ip=ip) for ip in publics],
- 'ip6s': [dict(ip=ip) for ip in public6s]}),
- (None, {'label': 'private',
- 'ips': [dict(ip=ip) for ip in privates]})]
-
- def floaters(*args, **kwargs):
- return []
-
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
- fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
- floaters)
-
- self.uuid = db_inst['uuid']
- self.view_builder = views.servers.ViewBuilder()
- self.request = fakes.HTTPRequest.blank("/v2/fake")
- self.request.context = context.RequestContext('fake', 'fake')
- self.instance = fake_instance.fake_instance_obj(
- self.request.context,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- **db_inst)
- self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
- self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
- self.expected_detailed_server = {
- "server": {
- "id": self.uuid,
- "user_id": "fake_user",
- "tenant_id": "fake_project",
- "updated": "2010-11-11T11:00:00Z",
- "created": "2010-10-10T12:00:00Z",
- "progress": 0,
- "name": "test_server",
- "status": "BUILD",
- "accessIPv4": "",
- "accessIPv6": "",
- "hostId": '',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": self.image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": self.flavor_bookmark,
- },
- ],
- },
- "addresses": {
- 'test1': [
- {'version': 4, 'addr': '192.168.1.100'},
- {'version': 6, 'addr': '2001:db8:0:1::1'}
- ]
- },
- "metadata": {},
- "links": [
- {
- "rel": "self",
- "href": self.self_link,
- },
- {
- "rel": "bookmark",
- "href": self.bookmark_link,
- },
- ],
- }
- }
-
- self.expected_server = {
- "server": {
- "id": self.uuid,
- "name": "test_server",
- "links": [
- {
- "rel": "self",
- "href": self.self_link,
- },
- {
- "rel": "bookmark",
- "href": self.bookmark_link,
- },
- ],
- }
- }
-
- def test_get_flavor_valid_flavor(self):
- expected = {"id": "1",
- "links": [{"rel": "bookmark",
- "href": self.flavor_bookmark}]}
- result = self.view_builder._get_flavor(self.request, self.instance)
- self.assertEqual(result, expected)
-
- def test_build_server(self):
- output = self.view_builder.basic(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_server))
-
- def test_build_server_with_project_id(self):
-
- output = self.view_builder.basic(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_server))
-
- def test_build_server_detail(self):
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_no_image(self):
- self.instance["image_ref"] = ""
- output = self.view_builder.show(self.request, self.instance)
- self.assertEqual(output['server']['image'], "")
-
- def test_build_server_detail_with_fault(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context, self.uuid)
-
- self.expected_detailed_server["server"]["status"] = "ERROR"
- self.expected_detailed_server["server"]["fault"] = {
- "code": 404,
- "created": "2010-10-10T12:00:00Z",
- "message": "HTTPNotFound",
- "details": "Stock details for test",
- }
- del self.expected_detailed_server["server"]["progress"]
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_detail_with_fault_that_has_been_deleted(self):
- self.instance['deleted'] = 1
- self.instance['vm_state'] = vm_states.ERROR
- fault = fake_instance.fake_fault_obj(self.request.context,
- self.uuid, code=500,
- message="No valid host was found")
- self.instance['fault'] = fault
-
- # Regardless of the vm_state deleted servers sholud have DELETED status
- self.expected_detailed_server["server"]["status"] = "DELETED"
- self.expected_detailed_server["server"]["fault"] = {
- "code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "No valid host was found",
- }
- del self.expected_detailed_server["server"]["progress"]
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_detail_with_fault_no_details_not_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error"}
-
- self.request.context = context.RequestContext('fake', 'fake')
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error",
- 'details': 'Stock details for test'}
-
- self.request.environ['nova.context'].is_admin = True
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_no_details_admin(self):
- self.instance['vm_state'] = vm_states.ERROR
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context,
- self.uuid,
- code=500,
- message='Error',
- details='')
-
- expected_fault = {"code": 500,
- "created": "2010-10-10T12:00:00Z",
- "message": "Error"}
-
- self.request.environ['nova.context'].is_admin = True
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output['server']['fault'],
- matchers.DictMatches(expected_fault))
-
- def test_build_server_detail_with_fault_but_active(self):
- self.instance['vm_state'] = vm_states.ACTIVE
- self.instance['progress'] = 100
- self.instance['fault'] = fake_instance.fake_fault_obj(
- self.request.context, self.uuid)
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertNotIn('fault', output['server'])
-
- def test_build_server_detail_active_status(self):
- # set the power state of the instance to running
- self.instance['vm_state'] = vm_states.ACTIVE
- self.instance['progress'] = 100
-
- self.expected_detailed_server["server"]["status"] = "ACTIVE"
- self.expected_detailed_server["server"]["progress"] = 100
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_detail_with_accessipv4(self):
-
- access_ip_v4 = '1.2.3.4'
- self.instance['access_ip_v4'] = access_ip_v4
-
- self.expected_detailed_server["server"]["accessIPv4"] = access_ip_v4
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_detail_with_accessipv6(self):
-
- access_ip_v6 = 'fead::1234'
- self.instance['access_ip_v6'] = access_ip_v6
-
- self.expected_detailed_server["server"]["accessIPv6"] = access_ip_v6
-
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
- def test_build_server_detail_with_metadata(self):
-
- metadata = []
- metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
- metadata = nova_utils.metadata_to_dict(metadata)
- self.instance['metadata'] = metadata
-
- self.expected_detailed_server["server"]["metadata"] = {"Open": "Stack"}
- output = self.view_builder.show(self.request, self.instance)
- self.assertThat(output,
- matchers.DictMatches(self.expected_detailed_server))
-
-
-class ServerXMLSerializationTest(test.TestCase):
-
- TIMESTAMP = "2010-10-11T10:30:22Z"
- SERVER_HREF = 'http://localhost/v2/servers/%s' % FAKE_UUID
- SERVER_NEXT = 'http://localhost/v2/servers?limit=%s&marker=%s'
- SERVER_BOOKMARK = 'http://localhost/servers/%s' % FAKE_UUID
- IMAGE_BOOKMARK = 'http://localhost/images/5'
- FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
- USERS_ATTRIBUTES = ['name', 'id', 'created', 'accessIPv4',
- 'updated', 'progress', 'status', 'hostId',
- 'accessIPv6']
- ADMINS_ATTRIBUTES = USERS_ATTRIBUTES + ['adminPass']
-
- def setUp(self):
- super(ServerXMLSerializationTest, self).setUp()
- self.body = {
- "server": {
- 'id': FAKE_UUID,
- 'user_id': 'fake_user_id',
- 'tenant_id': 'fake_tenant_id',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- "progress": 0,
- "name": "test_server-" + u'\u89e3\u7801',
- "status": "BUILD",
- "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "fead::1234",
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": self.IMAGE_BOOKMARK,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": self.FLAVOR_BOOKMARK,
- },
- ],
- },
- "addresses": {
- "network_one": [
- {
- "version": 4,
- "addr": "67.23.10.138",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.138",
- },
- ],
- "network_two": [
- {
- "version": 4,
- "addr": "67.23.10.139",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.139",
- },
- ],
- },
- "metadata": {
- "Open": "Stack",
- "Number": "1",
- },
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- }
- }
-
- def _validate_xml(self, root, server_dict):
-
- link_nodes = root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(server_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- metadata_root = root.find('{0}metadata'.format(NS))
- metadata_elems = metadata_root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 2)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = server_dict['metadata'].items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- image_root = root.find('{0}image'.format(NS))
- self.assertEqual(image_root.get('id'), server_dict['image']['id'])
- link_nodes = image_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 1)
- for i, link in enumerate(server_dict['image']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- flavor_root = root.find('{0}flavor'.format(NS))
- self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
- link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 1)
- for i, link in enumerate(server_dict['flavor']['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- addresses_root = root.find('{0}addresses'.format(NS))
- addresses_dict = server_dict['addresses']
- network_elems = addresses_root.findall('{0}network'.format(NS))
- self.assertEqual(len(network_elems), 2)
- for i, network_elem in enumerate(network_elems):
- network = addresses_dict.items()[i]
- self.assertEqual(str(network_elem.get('id')), str(network[0]))
- ip_elems = network_elem.findall('{0}ip'.format(NS))
- for z, ip_elem in enumerate(ip_elems):
- ip = network[1][z]
- self.assertEqual(str(ip_elem.get('version')),
- str(ip['version']))
- self.assertEqual(str(ip_elem.get('addr')),
- str(ip['addr']))
-
- def _validate_required_attributes(self, root, server_dict, attributes):
- for key in attributes:
- expected = server_dict[key]
- if not isinstance(expected, six.text_type):
- expected = str(expected)
- self.assertEqual(expected, root.get(key))
-
- def test_xml_declaration(self):
- serializer = servers.ServerTemplate()
-
- output = serializer.serialize(self.body)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_show(self):
- serializer = servers.ServerTemplate()
-
- output = serializer.serialize(self.body)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'server')
-
- server_dict = self.body['server']
-
- self._validate_required_attributes(root, server_dict,
- self.USERS_ATTRIBUTES)
- self._validate_xml(root, server_dict)
-
- def test_create(self):
- serializer = servers.FullServerTemplate()
-
- self.body["server"]["adminPass"] = "test_password"
-
- output = serializer.serialize(self.body)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'server')
-
- server_dict = self.body['server']
-
- self._validate_required_attributes(root, server_dict,
- self.ADMINS_ATTRIBUTES)
- self._validate_xml(root, server_dict)
-
- def test_index(self):
- serializer = servers.MinimalServersTemplate()
-
- uuid1 = fakes.get_fake_uuid(1)
- uuid2 = fakes.get_fake_uuid(2)
- expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
- expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
- expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
- expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
- fixture = {"servers": [
- {
- "id": fakes.get_fake_uuid(1),
- "name": "test_server",
- 'links': [
- {
- 'href': expected_server_href,
- 'rel': 'self',
- },
- {
- 'href': expected_server_bookmark,
- 'rel': 'bookmark',
- },
- ],
- },
- {
- "id": fakes.get_fake_uuid(2),
- "name": "test_server_2",
- 'links': [
- {
- 'href': expected_server_href_2,
- 'rel': 'self',
- },
- {
- 'href': expected_server_bookmark_2,
- 'rel': 'bookmark',
- },
- ],
- },
- ]}
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'servers')
- server_elems = root.findall('{0}server'.format(NS))
- self.assertEqual(len(server_elems), 2)
- for i, server_elem in enumerate(server_elems):
- server_dict = fixture['servers'][i]
- for key in ['name', 'id']:
- self.assertEqual(server_elem.get(key), str(server_dict[key]))
-
- link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(server_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- def test_index_with_servers_links(self):
- serializer = servers.MinimalServersTemplate()
-
- uuid1 = fakes.get_fake_uuid(1)
- uuid2 = fakes.get_fake_uuid(2)
- expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
- expected_server_next = self.SERVER_NEXT % (2, 2)
- expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
- expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
- expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
- fixture = {"servers": [
- {
- "id": fakes.get_fake_uuid(1),
- "name": "test_server",
- 'links': [
- {
- 'href': expected_server_href,
- 'rel': 'self',
- },
- {
- 'href': expected_server_bookmark,
- 'rel': 'bookmark',
- },
- ],
- },
- {
- "id": fakes.get_fake_uuid(2),
- "name": "test_server_2",
- 'links': [
- {
- 'href': expected_server_href_2,
- 'rel': 'self',
- },
- {
- 'href': expected_server_bookmark_2,
- 'rel': 'bookmark',
- },
- ],
- },
- ],
- "servers_links": [
- {
- 'rel': 'next',
- 'href': expected_server_next,
- },
- ]}
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'servers')
- server_elems = root.findall('{0}server'.format(NS))
- self.assertEqual(len(server_elems), 2)
- for i, server_elem in enumerate(server_elems):
- server_dict = fixture['servers'][i]
- for key in ['name', 'id']:
- self.assertEqual(server_elem.get(key), str(server_dict[key]))
-
- link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
- self.assertEqual(len(link_nodes), 2)
- for i, link in enumerate(server_dict['links']):
- for key, value in link.items():
- self.assertEqual(link_nodes[i].get(key), value)
-
- # Check servers_links
- servers_links = root.findall('{0}link'.format(ATOMNS))
- for i, link in enumerate(fixture['servers_links']):
- for key, value in link.items():
- self.assertEqual(servers_links[i].get(key), value)
-
- def test_detail(self):
- serializer = servers.ServersTemplate()
-
- uuid1 = fakes.get_fake_uuid(1)
- expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
- expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
- expected_image_bookmark = self.IMAGE_BOOKMARK
- expected_flavor_bookmark = self.FLAVOR_BOOKMARK
-
- uuid2 = fakes.get_fake_uuid(2)
- expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
- expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
- fixture = {"servers": [
- {
- "id": fakes.get_fake_uuid(1),
- "user_id": "fake",
- "tenant_id": "fake",
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- "progress": 0,
- "name": "test_server",
- "status": "BUILD",
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "fead::1234",
- "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": expected_image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": expected_flavor_bookmark,
- },
- ],
- },
- "addresses": {
- "network_one": [
- {
- "version": 4,
- "addr": "67.23.10.138",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.138",
- },
- ],
- "network_two": [
- {
- "version": 4,
- "addr": "67.23.10.139",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.139",
- },
- ],
- },
- "metadata": {
- "Open": "Stack",
- "Number": "1",
- },
- "links": [
- {
- "href": expected_server_href,
- "rel": "self",
- },
- {
- "href": expected_server_bookmark,
- "rel": "bookmark",
- },
- ],
- },
- {
- "id": fakes.get_fake_uuid(2),
- "user_id": 'fake',
- "tenant_id": 'fake',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- "progress": 100,
- "name": "test_server_2",
- "status": "ACTIVE",
- "accessIPv4": "1.2.3.4",
- "accessIPv6": "fead::1234",
- "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
- "image": {
- "id": "5",
- "links": [
- {
- "rel": "bookmark",
- "href": expected_image_bookmark,
- },
- ],
- },
- "flavor": {
- "id": "1",
- "links": [
- {
- "rel": "bookmark",
- "href": expected_flavor_bookmark,
- },
- ],
- },
- "addresses": {
- "network_one": [
- {
- "version": 4,
- "addr": "67.23.10.138",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.138",
- },
- ],
- "network_two": [
- {
- "version": 4,
- "addr": "67.23.10.139",
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.139",
- },
- ],
- },
- "metadata": {
- "Open": "Stack",
- "Number": "2",
- },
- "links": [
- {
- "href": expected_server_href_2,
- "rel": "self",
- },
- {
- "href": expected_server_bookmark_2,
- "rel": "bookmark",
- },
- ],
- },
- ]}
-
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'servers')
- server_elems = root.findall('{0}server'.format(NS))
- self.assertEqual(len(server_elems), 2)
- for i, server_elem in enumerate(server_elems):
- server_dict = fixture['servers'][i]
- self._validate_required_attributes(server_elem, server_dict,
- self.USERS_ATTRIBUTES)
- self._validate_xml(server_elem, server_dict)
-
- def test_update(self):
- serializer = servers.ServerTemplate()
-
- self.body["server"]["fault"] = {
- "code": 500,
- "created": self.TIMESTAMP,
- "message": "Error Message",
- "details": "Fault details",
- }
- output = serializer.serialize(self.body)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'server')
-
- server_dict = self.body['server']
-
- self._validate_required_attributes(root, server_dict,
- self.USERS_ATTRIBUTES)
-
- self._validate_xml(root, server_dict)
- fault_root = root.find('{0}fault'.format(NS))
- fault_dict = server_dict['fault']
- self.assertEqual(fault_root.get("code"), str(fault_dict["code"]))
- self.assertEqual(fault_root.get("created"), fault_dict["created"])
- msg_elem = fault_root.find('{0}message'.format(NS))
- self.assertEqual(msg_elem.text, fault_dict["message"])
- det_elem = fault_root.find('{0}details'.format(NS))
- self.assertEqual(det_elem.text, fault_dict["details"])
-
- def test_action(self):
- serializer = servers.FullServerTemplate()
-
- self.body["server"]["adminPass"] = u'\u89e3\u7801'
- output = serializer.serialize(self.body)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'server')
-
- server_dict = self.body['server']
-
- self._validate_required_attributes(root, server_dict,
- self.ADMINS_ATTRIBUTES)
-
- self._validate_xml(root, server_dict)
-
-
-class ServersAllExtensionsTestCase(test.TestCase):
- """Servers tests using default API router with all extensions enabled.
-
- The intent here is to catch cases where extensions end up throwing
- an exception because of a malformed request before the core API
- gets a chance to validate the request and return a 422 response.
-
- For example, ServerDiskConfigController extends servers.Controller::
-
- | @wsgi.extends
- | def create(self, req, body):
- | if 'server' in body:
- | self._set_disk_config(body['server'])
- | resp_obj = (yield)
- | self._show(req, resp_obj)
-
- we want to ensure that the extension isn't barfing on an invalid
- body.
- """
-
- def setUp(self):
- super(ServersAllExtensionsTestCase, self).setUp()
- self.app = compute.APIRouter()
-
- def test_create_missing_server(self):
- # Test create with malformed body.
-
- def fake_create(*args, **kwargs):
- raise test.TestingException("Should not reach the compute API.")
-
- self.stubs.Set(compute_api.API, 'create', fake_create)
-
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
- req.content_type = 'application/json'
- body = {'foo': {'a': 'b'}}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(422, res.status_int)
-
- def test_update_missing_server(self):
- # Test update with malformed body.
-
- def fake_update(*args, **kwargs):
- raise test.TestingException("Should not reach the compute API.")
-
- self.stubs.Set(compute_api.API, 'update', fake_update)
-
- req = fakes.HTTPRequest.blank('/fake/servers/1')
- req.method = 'PUT'
- req.content_type = 'application/json'
- body = {'foo': {'a': 'b'}}
-
- req.body = jsonutils.dumps(body)
- res = req.get_response(self.app)
- self.assertEqual(422, res.status_int)
-
-
-class ServersUnprocessableEntityTestCase(test.TestCase):
- """Tests of places we throw 422 Unprocessable Entity from."""
-
- def setUp(self):
- super(ServersUnprocessableEntityTestCase, self).setUp()
- self.ext_mgr = extensions.ExtensionManager()
- self.ext_mgr.extensions = {}
- self.controller = servers.Controller(self.ext_mgr)
-
- def _unprocessable_server_create(self, body):
- req = fakes.HTTPRequest.blank('/fake/servers')
- req.method = 'POST'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.create, req, body)
-
- def test_create_server_no_body(self):
- self._unprocessable_server_create(body=None)
-
- def test_create_server_missing_server(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_server_create(body=body)
-
- def test_create_server_malformed_entity(self):
- body = {'server': 'string'}
- self._unprocessable_server_create(body=body)
-
- def _unprocessable_server_update(self, body):
- req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
- req.method = 'PUT'
-
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
- self.controller.update, req, FAKE_UUID, body)
-
- def test_update_server_no_body(self):
- self._unprocessable_server_update(body=None)
-
- def test_update_server_missing_server(self):
- body = {'foo': {'a': 'b'}}
- self._unprocessable_server_update(body=body)
-
- def test_create_update_malformed_entity(self):
- body = {'server': 'string'}
- self._unprocessable_server_update(body=body)
diff --git a/nova/tests/api/openstack/compute/test_urlmap.py b/nova/tests/api/openstack/compute/test_urlmap.py
deleted file mode 100644
index f1f5f60cfe..0000000000
--- a/nova/tests/api/openstack/compute/test_urlmap.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-import webob
-
-from nova import test
-from nova.tests.api.openstack import fakes
-import nova.tests.image.fake
-
-
-class UrlmapTest(test.NoDBTestCase):
- def setUp(self):
- super(UrlmapTest, self).setUp()
- fakes.stub_out_rate_limiting(self.stubs)
- nova.tests.image.fake.stub_out_image_service(self.stubs)
-
- def tearDown(self):
- super(UrlmapTest, self).tearDown()
- nova.tests.image.fake.FakeImageService_reset()
-
- def test_path_version_v1_1(self):
- # Test URL path specifying v1.1 returns v2 content.
- req = webob.Request.blank('/v1.1/')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_content_type_version_v1_1(self):
- # Test Content-Type specifying v1.1 returns v2 content.
- req = webob.Request.blank('/')
- req.content_type = "application/json;version=1.1"
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_accept_version_v1_1(self):
- # Test Accept header specifying v1.1 returns v2 content.
- req = webob.Request.blank('/')
- req.accept = "application/json;version=1.1"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_path_version_v2(self):
- # Test URL path specifying v2 returns v2 content.
- req = webob.Request.blank('/v2/')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_content_type_version_v2(self):
- # Test Content-Type specifying v2 returns v2 content.
- req = webob.Request.blank('/')
- req.content_type = "application/json;version=2"
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_accept_version_v2(self):
- # Test Accept header specifying v2 returns v2 content.
- req = webob.Request.blank('/')
- req.accept = "application/json;version=2"
- res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.0')
-
- def test_path_content_type(self):
- # Test URL path specifying JSON returns JSON content.
- url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
- req = webob.Request.blank(url)
- req.accept = "application/xml"
- res = req.get_response(fakes.wsgi_app(init_only=('images',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['image']['id'],
- 'cedef40a-ed67-4d10-800e-17455edce175')
-
- def test_accept_content_type(self):
- # Test Accept header specifying JSON returns JSON content.
- url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
- req = webob.Request.blank(url)
- req.accept = "application/xml;q=0.8, application/json"
- res = req.get_response(fakes.wsgi_app(init_only=('images',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['image']['id'],
- 'cedef40a-ed67-4d10-800e-17455edce175')
-
- def test_path_version_v21(self):
- # Test URL path specifying v2.1 returns v2.1 content.
- req = webob.Request.blank('/v2.1/')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.1')
-
- def test_content_type_version_v21(self):
- # Test Content-Type specifying v2.1 returns v2 content.
- req = webob.Request.blank('/')
- req.content_type = "application/json;version=2.1"
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.1')
-
- def test_accept_version_v21(self):
- # Test Accept header specifying v2.1 returns v2.1 content.
- req = webob.Request.blank('/')
- req.accept = "application/json;version=2.1"
- res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['version']['id'], 'v2.1')
-
- def test_path_content_type_v21(self):
- # Test URL path specifying JSON returns JSON content.
- url = '/v2.1/fake/extensions/extensions.json'
- req = webob.Request.blank(url)
- req.accept = "application/xml"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['extension']['name'], 'Extensions')
-
- def test_accept_content_type_v21(self):
- # Test Accept header specifying JSON returns JSON content.
- url = '/v2.1/fake/extensions/extensions'
- req = webob.Request.blank(url)
- req.accept = "application/xml;q=0.8, application/json"
- res = req.get_response(fakes.wsgi_app_v21(init_only=('extensions',)))
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- body = jsonutils.loads(res.body)
- self.assertEqual(body['extension']['name'], 'Extensions')
diff --git a/nova/tests/api/openstack/compute/test_v3_auth.py b/nova/tests/api/openstack/compute/test_v3_auth.py
deleted file mode 100644
index 5a42b758f5..0000000000
--- a/nova/tests/api/openstack/compute/test_v3_auth.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2013 IBM Corp.
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-import webob.dec
-
-from nova import context
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class TestNoAuthMiddlewareV3(test.NoDBTestCase):
-
- def setUp(self):
- super(TestNoAuthMiddlewareV3, self).setUp()
- self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_networking(self.stubs)
-
- def test_authorize_user(self):
- req = webob.Request.blank('/v2/fake')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertEqual(result.headers['X-Server-Management-Url'],
- "http://localhost/v2/fake")
-
- def test_authorize_user_trailing_slash(self):
- # make sure it works with trailing slash on the request
- req = webob.Request.blank('/v2/fake/')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertEqual(result.headers['X-Server-Management-Url'],
- "http://localhost/v2/fake")
-
- def test_auth_token_no_empty_headers(self):
- req = webob.Request.blank('/v2/fake')
- req.headers['X-Auth-User'] = 'user1'
- req.headers['X-Auth-Key'] = 'user1_key'
- req.headers['X-Auth-Project-Id'] = 'user1_project'
- result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
- self.assertEqual(result.status, '204 No Content')
- self.assertNotIn('X-CDN-Management-Url', result.headers)
- self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/api/openstack/compute/test_versions.py b/nova/tests/api/openstack/compute/test_versions.py
deleted file mode 100644
index baec98b9ee..0000000000
--- a/nova/tests/api/openstack/compute/test_versions.py
+++ /dev/null
@@ -1,797 +0,0 @@
-# Copyright 2010-2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import uuid as stdlib_uuid
-
-import feedparser
-from lxml import etree
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.openstack.compute import versions
-from nova.api.openstack.compute import views
-from nova.api.openstack import xmlutil
-from nova import test
-from nova.tests.api.openstack import common
-from nova.tests.api.openstack import fakes
-from nova.tests import matchers
-
-
-NS = {
- 'atom': 'http://www.w3.org/2005/Atom',
- 'ns': 'http://docs.openstack.org/common/api/v1.0'
-}
-
-
-EXP_LINKS = {
- 'v2.0': {
- 'html': 'http://docs.openstack.org/',
- },
- 'v2.1': {
- 'html': 'http://docs.openstack.org/'
- },
-}
-
-
-EXP_VERSIONS = {
- "v2.0": {
- "id": "v2.0",
- "status": "CURRENT",
- "updated": "2011-01-21T11:33:21Z",
- "links": [
- {
- "rel": "describedby",
- "type": "text/html",
- "href": EXP_LINKS['v2.0']['html'],
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.compute+xml;version=2",
- },
- {
- "base": "application/json",
- "type": "application/vnd.openstack.compute+json;version=2",
- },
- ],
- },
- "v2.1": {
- "id": "v2.1",
- "status": "EXPERIMENTAL",
- "updated": "2013-07-23T11:33:21Z",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2.1/",
- },
- {
- "rel": "describedby",
- "type": "text/html",
- "href": EXP_LINKS['v2.1']['html'],
- },
- ],
- "media-types": [
- {
- "base": "application/json",
- "type": "application/vnd.openstack.compute+json;version=2.1",
- }
- ],
- }
-}
-
-
-class VersionsTestV20(test.NoDBTestCase):
-
- def test_get_version_list(self):
- req = webob.Request.blank('/')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- versions = jsonutils.loads(res.body)["versions"]
- expected = [
- {
- "id": "v2.0",
- "status": "CURRENT",
- "updated": "2011-01-21T11:33:21Z",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/",
- }],
- },
- {
- "id": "v2.1",
- "status": "EXPERIMENTAL",
- "updated": "2013-07-23T11:33:21Z",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/",
- }],
- },
- ]
- self.assertEqual(versions, expected)
-
- def test_get_version_list_302(self):
- req = webob.Request.blank('/v2')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 302)
- redirect_req = webob.Request.blank('/v2/')
- self.assertEqual(res.location, redirect_req.url)
-
- def _test_get_version_2_detail(self, url, accept=None):
- if accept is None:
- accept = "application/json"
- req = webob.Request.blank(url)
- req.accept = accept
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- version = jsonutils.loads(res.body)
- expected = {
- "version": {
- "id": "v2.0",
- "status": "CURRENT",
- "updated": "2011-01-21T11:33:21Z",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/",
- },
- {
- "rel": "describedby",
- "type": "text/html",
- "href": EXP_LINKS['v2.0']['html'],
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/"
- "vnd.openstack.compute+xml;version=2",
- },
- {
- "base": "application/json",
- "type": "application/"
- "vnd.openstack.compute+json;version=2",
- },
- ],
- },
- }
- self.assertEqual(expected, version)
-
- def test_get_version_2_detail(self):
- self._test_get_version_2_detail('/v2/')
-
- def test_get_version_2_detail_content_type(self):
- accept = "application/json;version=2"
- self._test_get_version_2_detail('/', accept=accept)
-
- def test_get_version_2_versions_invalid(self):
- req = webob.Request.blank('/v2/versions/1234')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(404, res.status_int)
-
- def test_get_version_2_detail_xml(self):
- req = webob.Request.blank('/v2/')
- req.accept = "application/xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/xml")
-
- version = etree.XML(res.body)
- xmlutil.validate_schema(version, 'version')
-
- expected = EXP_VERSIONS['v2.0']
- self.assertTrue(version.xpath('/ns:version', namespaces=NS))
- media_types = version.xpath('ns:media-types/ns:media-type',
- namespaces=NS)
- self.assertTrue(common.compare_media_types(media_types,
- expected['media-types']))
- for key in ['id', 'status', 'updated']:
- self.assertEqual(version.get(key), expected[key])
- links = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(links,
- [{'rel': 'self', 'href': 'http://localhost/v2/'}]
- + expected['links']))
-
- def test_get_version_list_xml(self):
- req = webob.Request.blank('/')
- req.accept = "application/xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/xml")
-
- root = etree.XML(res.body)
- xmlutil.validate_schema(root, 'versions')
-
- self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
- versions = root.xpath('ns:version', namespaces=NS)
- self.assertEqual(len(versions), 2)
-
- for i, v in enumerate(['v2.0', 'v2.1']):
- version = versions[i]
- expected = EXP_VERSIONS[v]
- for key in ['id', 'status', 'updated']:
- self.assertEqual(version.get(key), expected[key])
- (link,) = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(link,
- [{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
-
- def test_get_version_2_detail_atom(self):
- req = webob.Request.blank('/v2/')
- req.accept = "application/atom+xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual("application/atom+xml", res.content_type)
-
- xmlutil.validate_schema(etree.XML(res.body), 'atom')
-
- f = feedparser.parse(res.body)
- self.assertEqual(f.feed.title, 'About This Version')
- self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
- self.assertEqual(f.feed.id, 'http://localhost/v2/')
- self.assertEqual(f.feed.author, 'Rackspace')
- self.assertEqual(f.feed.author_detail.href,
- 'http://www.rackspace.com/')
- self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(f.feed.links[0]['rel'], 'self')
-
- self.assertEqual(len(f.entries), 1)
- entry = f.entries[0]
- self.assertEqual(entry.id, 'http://localhost/v2/')
- self.assertEqual(entry.title, 'Version v2.0')
- self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
- self.assertEqual(len(entry.content), 1)
- self.assertEqual(entry.content[0].value,
- 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
- self.assertEqual(len(entry.links), 2)
- self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(entry.links[0]['rel'], 'self')
- self.assertEqual(entry.links[1], {
- 'href': EXP_LINKS['v2.0']['html'],
- 'type': 'text/html',
- 'rel': 'describedby'})
-
- def test_get_version_list_atom(self):
- req = webob.Request.blank('/')
- req.accept = "application/atom+xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/atom+xml")
-
- f = feedparser.parse(res.body)
- self.assertEqual(f.feed.title, 'Available API Versions')
- self.assertEqual(f.feed.updated, '2013-07-23T11:33:21Z')
- self.assertEqual(f.feed.id, 'http://localhost/')
- self.assertEqual(f.feed.author, 'Rackspace')
- self.assertEqual(f.feed.author_detail.href,
- 'http://www.rackspace.com/')
- self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
- self.assertEqual(f.feed.links[0]['rel'], 'self')
-
- self.assertEqual(len(f.entries), 2)
- entry = f.entries[0]
- self.assertEqual(entry.id, 'http://localhost/v2/')
- self.assertEqual(entry.title, 'Version v2.0')
- self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
- self.assertEqual(len(entry.content), 1)
- self.assertEqual(entry.content[0].value,
- 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
- self.assertEqual(len(entry.links), 1)
- self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(entry.links[0]['rel'], 'self')
-
- entry = f.entries[1]
- self.assertEqual(entry.id, 'http://localhost/v2/')
- self.assertEqual(entry.title, 'Version v2.1')
- self.assertEqual(entry.updated, '2013-07-23T11:33:21Z')
- self.assertEqual(len(entry.content), 1)
- self.assertEqual(entry.content[0].value,
- 'Version v2.1 EXPERIMENTAL (2013-07-23T11:33:21Z)')
- self.assertEqual(len(entry.links), 1)
- self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(entry.links[0]['rel'], 'self')
-
- def test_multi_choice_image(self):
- req = webob.Request.blank('/images/1')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 300)
- self.assertEqual(res.content_type, "application/json")
-
- expected = {
- "choices": [
- {
- "id": "v2.0",
- "status": "CURRENT",
- "links": [
- {
- "href": "http://localhost/v2/images/1",
- "rel": "self",
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.compute+xml"
- ";version=2"
- },
- {
- "base": "application/json",
- "type": "application/vnd.openstack.compute+json"
- ";version=2"
- },
- ],
- },
- {
- "id": "v2.1",
- "status": "EXPERIMENTAL",
- "links": [
- {
- "href": "http://localhost/v2/images/1",
- "rel": "self",
- },
- ],
- "media-types": [
- {
- "base": "application/json",
- "type":
- "application/vnd.openstack.compute+json;version=2.1",
- }
- ],
- },
- ], }
-
- self.assertThat(jsonutils.loads(res.body),
- matchers.DictMatches(expected))
-
- def test_multi_choice_image_xml(self):
- req = webob.Request.blank('/images/1')
- req.accept = "application/xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 300)
- self.assertEqual(res.content_type, "application/xml")
-
- root = etree.XML(res.body)
- self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
- versions = root.xpath('ns:version', namespaces=NS)
- self.assertEqual(len(versions), 2)
-
- version = versions[0]
- self.assertEqual(version.get('id'), 'v2.0')
- self.assertEqual(version.get('status'), 'CURRENT')
- media_types = version.xpath('ns:media-types/ns:media-type',
- namespaces=NS)
- self.assertTrue(common.
- compare_media_types(media_types,
- EXP_VERSIONS['v2.0']['media-types']
- ))
-
- links = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(links,
- [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
-
- version = versions[1]
- self.assertEqual(version.get('id'), 'v2.1')
- self.assertEqual(version.get('status'), 'EXPERIMENTAL')
- media_types = version.xpath('ns:media-types/ns:media-type',
- namespaces=NS)
- self.assertTrue(common.
- compare_media_types(media_types,
- EXP_VERSIONS['v2.1']['media-types']
- ))
-
- links = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(links,
- [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
-
- def test_multi_choice_server_atom(self):
- """Make sure multi choice responses do not have content-type
- application/atom+xml (should use default of json)
- """
- req = webob.Request.blank('/servers')
- req.accept = "application/atom+xml"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 300)
- self.assertEqual(res.content_type, "application/json")
-
- def test_multi_choice_server(self):
- uuid = str(stdlib_uuid.uuid4())
- req = webob.Request.blank('/servers/' + uuid)
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 300)
- self.assertEqual(res.content_type, "application/json")
-
- expected = {
- "choices": [
- {
- "id": "v2.0",
- "status": "CURRENT",
- "links": [
- {
- "href": "http://localhost/v2/servers/" + uuid,
- "rel": "self",
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.compute+xml"
- ";version=2"
- },
- {
- "base": "application/json",
- "type": "application/vnd.openstack.compute+json"
- ";version=2"
- },
- ],
- },
- {
- "id": "v2.1",
- "status": "EXPERIMENTAL",
- "links": [
- {
- "href": "http://localhost/v2/servers/" + uuid,
- "rel": "self",
- },
- ],
- "media-types": [
- {
- "base": "application/json",
- "type":
- "application/vnd.openstack.compute+json;version=2.1",
- }
- ],
- },
- ], }
-
- self.assertThat(jsonutils.loads(res.body),
- matchers.DictMatches(expected))
-
-
-class VersionsViewBuilderTests(test.NoDBTestCase):
- def test_view_builder(self):
- base_url = "http://example.org/"
-
- version_data = {
- "v3.2.1": {
- "id": "3.2.1",
- "status": "CURRENT",
- "updated": "2011-07-18T11:30:00Z",
- }
- }
-
- expected = {
- "versions": [
- {
- "id": "3.2.1",
- "status": "CURRENT",
- "updated": "2011-07-18T11:30:00Z",
- "links": [
- {
- "rel": "self",
- "href": "http://example.org/v2/",
- },
- ],
- }
- ]
- }
-
- builder = views.versions.ViewBuilder(base_url)
- output = builder.build_versions(version_data)
-
- self.assertEqual(output, expected)
-
- def test_generate_href(self):
- base_url = "http://example.org/app/"
-
- expected = "http://example.org/app/v2/"
-
- builder = views.versions.ViewBuilder(base_url)
- actual = builder.generate_href('v2')
-
- self.assertEqual(actual, expected)
-
- def test_generate_href_v21(self):
- base_url = "http://example.org/app/"
-
- expected = "http://example.org/app/v2/"
-
- builder = views.versions.ViewBuilder(base_url)
- actual = builder.generate_href('v2.1')
-
- self.assertEqual(actual, expected)
-
- def test_generate_href_unknown(self):
- base_url = "http://example.org/app/"
-
- expected = "http://example.org/app/v2/"
-
- builder = views.versions.ViewBuilder(base_url)
- actual = builder.generate_href('foo')
-
- self.assertEqual(actual, expected)
-
-
-class VersionsSerializerTests(test.NoDBTestCase):
- def test_versions_list_xml_serializer(self):
- versions_data = {
- 'versions': [
- {
- "id": "2.7",
- "updated": "2011-07-18T11:30:00Z",
- "status": "DEPRECATED",
- "links": [
- {
- "rel": "self",
- "href": "http://test/v2",
- },
- ],
- },
- ]
- }
-
- serializer = versions.VersionsTemplate()
- response = serializer.serialize(versions_data)
-
- root = etree.XML(response)
- xmlutil.validate_schema(root, 'versions')
-
- self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
- version_elems = root.xpath('ns:version', namespaces=NS)
- self.assertEqual(len(version_elems), 1)
- version = version_elems[0]
- self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
- self.assertEqual(version.get('status'),
- versions_data['versions'][0]['status'])
-
- (link,) = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(link, [{
- 'rel': 'self',
- 'href': 'http://test/v2',
- 'type': 'application/atom+xml'}]))
-
- def test_versions_multi_xml_serializer(self):
- versions_data = {
- 'choices': [
- {
- "id": "2.7",
- "updated": "2011-07-18T11:30:00Z",
- "status": "DEPRECATED",
- "media-types": EXP_VERSIONS['v2.0']['media-types'],
- "links": [
- {
- "rel": "self",
- "href": "http://test/v2/images",
- },
- ],
- },
- ]
- }
-
- serializer = versions.ChoicesTemplate()
- response = serializer.serialize(versions_data)
-
- root = etree.XML(response)
- self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
- (version,) = root.xpath('ns:version', namespaces=NS)
- self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
- self.assertEqual(version.get('status'),
- versions_data['choices'][0]['status'])
-
- media_types = list(version)[0]
- self.assertEqual(media_types.tag.split('}')[1], "media-types")
-
- media_types = version.xpath('ns:media-types/ns:media-type',
- namespaces=NS)
- self.assertTrue(common.compare_media_types(media_types,
- versions_data['choices'][0]['media-types']))
-
- (link,) = version.xpath('atom:link', namespaces=NS)
- self.assertTrue(common.compare_links(link,
- versions_data['choices'][0]['links']))
-
- def test_versions_list_atom_serializer(self):
- versions_data = {
- 'versions': [
- {
- "id": "2.9.8",
- "updated": "2011-07-20T11:40:00Z",
- "status": "CURRENT",
- "links": [
- {
- "rel": "self",
- "href": "http://test/2.9.8",
- },
- ],
- },
- ]
- }
-
- serializer = versions.VersionsAtomSerializer()
- response = serializer.serialize(versions_data)
- f = feedparser.parse(response)
-
- self.assertEqual(f.feed.title, 'Available API Versions')
- self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
- self.assertEqual(f.feed.id, 'http://test/')
- self.assertEqual(f.feed.author, 'Rackspace')
- self.assertEqual(f.feed.author_detail.href,
- 'http://www.rackspace.com/')
- self.assertEqual(f.feed.links[0]['href'], 'http://test/')
- self.assertEqual(f.feed.links[0]['rel'], 'self')
-
- self.assertEqual(len(f.entries), 1)
- entry = f.entries[0]
- self.assertEqual(entry.id, 'http://test/2.9.8')
- self.assertEqual(entry.title, 'Version 2.9.8')
- self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
- self.assertEqual(len(entry.content), 1)
- self.assertEqual(entry.content[0].value,
- 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
- self.assertEqual(len(entry.links), 1)
- self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
- self.assertEqual(entry.links[0]['rel'], 'self')
-
- def test_version_detail_atom_serializer(self):
- versions_data = {
- "version": {
- "id": "v2.0",
- "status": "CURRENT",
- "updated": "2011-01-21T11:33:21Z",
- "links": [
- {
- "rel": "self",
- "href": "http://localhost/v2/",
- },
- {
- "rel": "describedby",
- "type": "text/html",
- "href": EXP_LINKS['v2.0']['html'],
- },
- ],
- "media-types": [
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.compute+xml"
- ";version=2",
- },
- {
- "base": "application/json",
- "type": "application/vnd.openstack.compute+json"
- ";version=2",
- }
- ],
- },
- }
-
- serializer = versions.VersionAtomSerializer()
- response = serializer.serialize(versions_data)
- f = feedparser.parse(response)
-
- self.assertEqual(f.feed.title, 'About This Version')
- self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
- self.assertEqual(f.feed.id, 'http://localhost/v2/')
- self.assertEqual(f.feed.author, 'Rackspace')
- self.assertEqual(f.feed.author_detail.href,
- 'http://www.rackspace.com/')
- self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(f.feed.links[0]['rel'], 'self')
-
- self.assertEqual(len(f.entries), 1)
- entry = f.entries[0]
- self.assertEqual(entry.id, 'http://localhost/v2/')
- self.assertEqual(entry.title, 'Version v2.0')
- self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
- self.assertEqual(len(entry.content), 1)
- self.assertEqual(entry.content[0].value,
- 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
- self.assertEqual(len(entry.links), 2)
- self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
- self.assertEqual(entry.links[0]['rel'], 'self')
- self.assertEqual(entry.links[1], {
- 'rel': 'describedby',
- 'type': 'text/html',
- 'href': EXP_LINKS['v2.0']['html']})
-
- def test_multi_choice_image_with_body(self):
- req = webob.Request.blank('/images/1')
- req.accept = "application/json"
- req.method = 'POST'
- req.content_type = "application/json"
- req.body = "{\"foo\": \"bar\"}"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(300, res.status_int)
- self.assertEqual("application/json", res.content_type)
-
- def test_get_version_list_with_body(self):
- req = webob.Request.blank('/')
- req.accept = "application/json"
- req.method = 'POST'
- req.content_type = "application/json"
- req.body = "{\"foo\": \"bar\"}"
- res = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, res.status_int)
- self.assertEqual("application/json", res.content_type)
-
-
-# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
-# So this class tests "/v2.1" only for v2.1 API.
-class VersionsTestV21(test.NoDBTestCase):
- exp_versions = copy.deepcopy(EXP_VERSIONS)
- exp_versions['v2.0']['links'].insert(0,
- {'href': 'http://localhost/v2.1/', 'rel': 'self'},
- )
-
- def test_get_version_list_302(self):
- req = webob.Request.blank('/v2.1')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 302)
- redirect_req = webob.Request.blank('/v2.1/')
- self.assertEqual(res.location, redirect_req.url)
-
- def test_get_version_21_detail(self):
- req = webob.Request.blank('/v2.1/')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- version = jsonutils.loads(res.body)
- expected = {"version": self.exp_versions['v2.1']}
- self.assertEqual(expected, version)
-
- def test_get_version_21_versions_v21_detail(self):
- req = webob.Request.blank('/v2.1/fake/versions/v2.1')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- version = jsonutils.loads(res.body)
- expected = {"version": self.exp_versions['v2.1']}
- self.assertEqual(expected, version)
-
- def test_get_version_21_versions_v20_detail(self):
- req = webob.Request.blank('/v2.1/fake/versions/v2.0')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- version = jsonutils.loads(res.body)
- expected = {"version": self.exp_versions['v2.0']}
- self.assertEqual(expected, version)
-
- def test_get_version_21_versions_invalid(self):
- req = webob.Request.blank('/v2.1/versions/1234')
- req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 404)
-
- def test_get_version_21_detail_content_type(self):
- req = webob.Request.blank('/')
- req.accept = "application/json;version=2.1"
- res = req.get_response(fakes.wsgi_app_v21())
- self.assertEqual(res.status_int, 200)
- self.assertEqual(res.content_type, "application/json")
- version = jsonutils.loads(res.body)
- expected = {"version": self.exp_versions['v2.1']}
- self.assertEqual(expected, version)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
deleted file mode 100644
index 6b70638322..0000000000
--- a/nova/tests/api/openstack/fakes.py
+++ /dev/null
@@ -1,662 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid
-
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import routes
-import six
-import webob
-import webob.dec
-import webob.request
-
-from nova.api import auth as api_auth
-from nova.api import openstack as openstack_api
-from nova.api.openstack import auth
-from nova.api.openstack import compute
-from nova.api.openstack.compute import limits
-from nova.api.openstack.compute import versions
-from nova.api.openstack import urlmap
-from nova.api.openstack import wsgi as os_wsgi
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova.compute import vm_states
-from nova import context
-from nova.db.sqlalchemy import models
-from nova import exception as exc
-import nova.netconf
-from nova.network import api as network_api
-from nova import quota
-from nova.tests import fake_block_device
-from nova.tests import fake_network
-from nova.tests.objects import test_keypair
-from nova import utils
-from nova import wsgi
-
-
-QUOTAS = quota.QUOTAS
-
-
-FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-FAKE_UUIDS = {}
-
-
-class Context(object):
- pass
-
-
-class FakeRouter(wsgi.Router):
- def __init__(self, ext_mgr=None):
- pass
-
- @webob.dec.wsgify
- def __call__(self, req):
- res = webob.Response()
- res.status = '200'
- res.headers['X-Test-Success'] = 'True'
- return res
-
-
-@webob.dec.wsgify
-def fake_wsgi(self, req):
- return self.application
-
-
-def wsgi_app(inner_app_v2=None, fake_auth_context=None,
- use_no_auth=False, ext_mgr=None, init_only=None):
- if not inner_app_v2:
- inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
-
- if use_no_auth:
- api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
- limits.RateLimitingMiddleware(inner_app_v2)))
- else:
- if fake_auth_context is not None:
- ctxt = fake_auth_context
- else:
- ctxt = context.RequestContext('fake', 'fake', auth_token=True)
- api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
- limits.RateLimitingMiddleware(inner_app_v2)))
-
- mapper = urlmap.URLMap()
- mapper['/v2'] = api_v2
- mapper['/v1.1'] = api_v2
- mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
- return mapper
-
-
-def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
- use_no_auth=False, ext_mgr=None, init_only=None):
- if not inner_app_v21:
- inner_app_v21 = compute.APIRouterV21(init_only)
-
- if use_no_auth:
- api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
- limits.RateLimitingMiddleware(inner_app_v21)))
- else:
- if fake_auth_context is not None:
- ctxt = fake_auth_context
- else:
- ctxt = context.RequestContext('fake', 'fake', auth_token=True)
- api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
- limits.RateLimitingMiddleware(inner_app_v21)))
-
- mapper = urlmap.URLMap()
- mapper['/v2'] = api_v21
- mapper['/v2.1'] = api_v21
- return mapper
-
-
-def stub_out_key_pair_funcs(stubs, have_key_pair=True):
- def key_pair(context, user_id):
- return [dict(test_keypair.fake_keypair,
- name='key', public_key='public_key')]
-
- def one_key_pair(context, user_id, name):
- if name == 'key':
- return dict(test_keypair.fake_keypair,
- name='key', public_key='public_key')
- else:
- raise exc.KeypairNotFound(user_id=user_id, name=name)
-
- def no_key_pair(context, user_id):
- return []
-
- if have_key_pair:
- stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
- stubs.Set(nova.db, 'key_pair_get', one_key_pair)
- else:
- stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
-
-
-def stub_out_rate_limiting(stubs):
- def fake_rate_init(self, app):
- super(limits.RateLimitingMiddleware, self).__init__(app)
- self.application = app
-
- stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
- '__init__', fake_rate_init)
-
- stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
- '__call__', fake_wsgi)
-
-
-def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
- def fake_reserve(context, **deltas):
- requested = deltas.pop(resource, 0)
- if requested > allowed:
- quotas = dict(instances=1, cores=1, ram=1)
- quotas[resource] = quota
- usages = dict(instances=dict(in_use=0, reserved=0),
- cores=dict(in_use=0, reserved=0),
- ram=dict(in_use=0, reserved=0))
- usages[resource]['in_use'] = (quotas[resource] * 0.9 -
- allowed)
- usages[resource]['reserved'] = quotas[resource] * 0.1
- headroom = dict(
- (res, value - (usages[res]['in_use'] + usages[res]['reserved']))
- for res, value in quotas.iteritems()
- )
- raise exc.OverQuota(overs=[resource], quotas=quotas,
- usages=usages, headroom=headroom)
- stubs.Set(QUOTAS, 'reserve', fake_reserve)
-
-
-def stub_out_networking(stubs):
- def get_my_ip():
- return '127.0.0.1'
- stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
-
-
-def stub_out_compute_api_snapshot(stubs):
-
- def snapshot(self, context, instance, name, extra_properties=None):
- # emulate glance rejecting image names which are too long
- if len(name) > 256:
- raise exc.Invalid
- return dict(id='123', status='ACTIVE', name=name,
- properties=extra_properties)
-
- stubs.Set(compute_api.API, 'snapshot', snapshot)
-
-
-class stub_out_compute_api_backup(object):
-
- def __init__(self, stubs):
- self.stubs = stubs
- self.extra_props_last_call = None
- stubs.Set(compute_api.API, 'backup', self.backup)
-
- def backup(self, context, instance, name, backup_type, rotation,
- extra_properties=None):
- self.extra_props_last_call = extra_properties
- props = dict(backup_type=backup_type,
- rotation=rotation)
- props.update(extra_properties or {})
- return dict(id='123', status='ACTIVE', name=name, properties=props)
-
-
-def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
- fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
-
-
-def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
- def get_floating_ips_by_fixed_address(self, context, fixed_ip):
- return ['1.2.3.4']
-
- if func is None:
- func = get_floating_ips_by_fixed_address
- stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func)
-
-
-def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
- if not private:
- private = '192.168.0.3'
- if not publics:
- publics = ['1.2.3.4']
-
- class Fake:
- def get_instance_nw_info(*args, **kwargs):
- pass
-
- def get_floating_ips_by_fixed_address(*args, **kwargs):
- return publics
-
- def validate_networks(self, context, networks, max_count):
- return max_count
-
- def create_pci_requests_for_sriov_ports(self, context,
- system_metadata,
- requested_networks):
- pass
-
- if cls is None:
- cls = Fake
- stubs.Set(network_api, 'API', cls)
- fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
-
-
-class FakeToken(object):
- id_count = 0
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __init__(self, **kwargs):
- FakeToken.id_count += 1
- self.id = FakeToken.id_count
- for k, v in kwargs.iteritems():
- setattr(self, k, v)
-
-
-class FakeRequestContext(context.RequestContext):
- def __init__(self, *args, **kwargs):
- kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
- return super(FakeRequestContext, self).__init__(*args, **kwargs)
-
-
-class HTTPRequest(os_wsgi.Request):
-
- @staticmethod
- def blank(*args, **kwargs):
- kwargs['base_url'] = 'http://localhost/v2'
- use_admin_context = kwargs.pop('use_admin_context', False)
- out = os_wsgi.Request.blank(*args, **kwargs)
- out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
- is_admin=use_admin_context)
- return out
-
-
-class HTTPRequestV3(os_wsgi.Request):
-
- @staticmethod
- def blank(*args, **kwargs):
- kwargs['base_url'] = 'http://localhost/v3'
- use_admin_context = kwargs.pop('use_admin_context', False)
- out = os_wsgi.Request.blank(*args, **kwargs)
- out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
- is_admin=use_admin_context)
- return out
-
-
-class TestRouter(wsgi.Router):
- def __init__(self, controller, mapper=None):
- if not mapper:
- mapper = routes.Mapper()
- mapper.resource("test", "tests",
- controller=os_wsgi.Resource(controller))
- super(TestRouter, self).__init__(mapper)
-
-
-class FakeAuthDatabase(object):
- data = {}
-
- @staticmethod
- def auth_token_get(context, token_hash):
- return FakeAuthDatabase.data.get(token_hash, None)
-
- @staticmethod
- def auth_token_create(context, token):
- fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
- FakeAuthDatabase.data[fake_token.token_hash] = fake_token
- FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
- return fake_token
-
- @staticmethod
- def auth_token_destroy(context, token_id):
- token = FakeAuthDatabase.data.get('id_%i' % token_id)
- if token and token.token_hash in FakeAuthDatabase.data:
- del FakeAuthDatabase.data[token.token_hash]
- del FakeAuthDatabase.data['id_%i' % token_id]
-
-
-class FakeRateLimiter(object):
- def __init__(self, application):
- self.application = application
-
- @webob.dec.wsgify
- def __call__(self, req):
- return self.application
-
-
-def create_info_cache(nw_cache):
- if nw_cache is None:
- pub0 = ('192.168.1.100',)
- pub1 = ('2001:db8:0:1::1',)
-
- def _ip(ip):
- return {'address': ip, 'type': 'fixed'}
-
- nw_cache = [
- {'address': 'aa:aa:aa:aa:aa:aa',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'test1',
- 'subnets': [{'cidr': '192.168.1.0/24',
- 'ips': [_ip(ip) for ip in pub0]},
- {'cidr': 'b33f::/64',
- 'ips': [_ip(ip) for ip in pub1]}]}}]
-
- if not isinstance(nw_cache, six.string_types):
- nw_cache = jsonutils.dumps(nw_cache)
-
- return {
- "info_cache": {
- "network_info": nw_cache,
- "deleted": False,
- "created_at": None,
- "deleted_at": None,
- "updated_at": None,
- }
- }
-
-
-def get_fake_uuid(token=0):
- if token not in FAKE_UUIDS:
- FAKE_UUIDS[token] = str(uuid.uuid4())
- return FAKE_UUIDS[token]
-
-
-def fake_instance_get(**kwargs):
- def _return_server(context, uuid, columns_to_join=None, use_slave=False):
- return stub_instance(1, **kwargs)
- return _return_server
-
-
-def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
- raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
-
-
-def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
- def _return_servers(context, *args, **kwargs):
- servers_list = []
- marker = None
- limit = None
- found_marker = False
- if "marker" in kwargs:
- marker = kwargs["marker"]
- if "limit" in kwargs:
- limit = kwargs["limit"]
-
- if 'columns_to_join' in kwargs:
- kwargs.pop('columns_to_join')
-
- if 'use_slave' in kwargs:
- kwargs.pop('use_slave')
-
- for i in xrange(num_servers):
- uuid = get_fake_uuid(i)
- server = stub_instance(id=i + 1, uuid=uuid,
- **kwargs)
- servers_list.append(server)
- if marker is not None and uuid == marker:
- found_marker = True
- servers_list = []
- if marker is not None and not found_marker:
- raise exc.MarkerNotFound(marker=marker)
- if limit is not None:
- servers_list = servers_list[:limit]
- return servers_list
- return _return_servers
-
-
-def stub_instance(id, user_id=None, project_id=None, host=None,
- node=None, vm_state=None, task_state=None,
- reservation_id="", uuid=FAKE_UUID, image_ref="10",
- flavor_id="1", name=None, key_name='',
- access_ipv4=None, access_ipv6=None, progress=0,
- auto_disk_config=False, display_name=None,
- include_fake_metadata=True, config_drive=None,
- power_state=None, nw_cache=None, metadata=None,
- security_groups=None, root_device_name=None,
- limit=None, marker=None,
- launched_at=timeutils.utcnow(),
- terminated_at=timeutils.utcnow(),
- availability_zone='', locked_by=None, cleaned=False,
- memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0):
- if user_id is None:
- user_id = 'fake_user'
- if project_id is None:
- project_id = 'fake_project'
-
- if metadata:
- metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
- elif include_fake_metadata:
- metadata = [models.InstanceMetadata(key='seq', value=str(id))]
- else:
- metadata = []
-
- inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
- sys_meta = flavors.save_flavor_info({}, inst_type)
-
- if host is not None:
- host = str(host)
-
- if key_name:
- key_data = 'FAKE'
- else:
- key_data = ''
-
- if security_groups is None:
- security_groups = [{"id": 1, "name": "test", "description": "Foo:",
- "project_id": "project", "user_id": "user",
- "created_at": None, "updated_at": None,
- "deleted_at": None, "deleted": False}]
-
- # ReservationID isn't sent back, hack it in there.
- server_name = name or "server%s" % id
- if reservation_id != "":
- server_name = "reservation_%s" % (reservation_id, )
-
- info_cache = create_info_cache(nw_cache)
-
- instance = {
- "id": int(id),
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
- "deleted": None,
- "user_id": user_id,
- "project_id": project_id,
- "image_ref": image_ref,
- "kernel_id": "",
- "ramdisk_id": "",
- "launch_index": 0,
- "key_name": key_name,
- "key_data": key_data,
- "config_drive": config_drive,
- "vm_state": vm_state or vm_states.BUILDING,
- "task_state": task_state,
- "power_state": power_state,
- "memory_mb": memory_mb,
- "vcpus": vcpus,
- "root_gb": root_gb,
- "ephemeral_gb": ephemeral_gb,
- "ephemeral_key_uuid": None,
- "hostname": display_name or server_name,
- "host": host,
- "node": node,
- "instance_type_id": 1,
- "instance_type": inst_type,
- "user_data": "",
- "reservation_id": reservation_id,
- "mac_address": "",
- "scheduled_at": timeutils.utcnow(),
- "launched_at": launched_at,
- "terminated_at": terminated_at,
- "availability_zone": availability_zone,
- "display_name": display_name or server_name,
- "display_description": "",
- "locked": locked_by is not None,
- "locked_by": locked_by,
- "metadata": metadata,
- "access_ip_v4": access_ipv4,
- "access_ip_v6": access_ipv6,
- "uuid": uuid,
- "progress": progress,
- "auto_disk_config": auto_disk_config,
- "name": "instance-%s" % id,
- "shutdown_terminate": True,
- "disable_terminate": False,
- "security_groups": security_groups,
- "root_device_name": root_device_name,
- "system_metadata": utils.dict_to_metadata(sys_meta),
- "pci_devices": [],
- "vm_mode": "",
- "default_swap_device": "",
- "default_ephemeral_device": "",
- "launched_on": "",
- "cell_name": "",
- "architecture": "",
- "os_type": "",
- "cleaned": cleaned}
-
- instance.update(info_cache)
- instance['info_cache']['instance_uuid'] = instance['uuid']
-
- return instance
-
-
-def stub_volume(id, **kwargs):
- volume = {
- 'id': id,
- 'user_id': 'fakeuser',
- 'project_id': 'fakeproject',
- 'host': 'fakehost',
- 'size': 1,
- 'availability_zone': 'fakeaz',
- 'instance_uuid': 'fakeuuid',
- 'mountpoint': '/',
- 'status': 'fakestatus',
- 'attach_status': 'attached',
- 'name': 'vol name',
- 'display_name': 'displayname',
- 'display_description': 'displaydesc',
- 'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
- 'snapshot_id': None,
- 'volume_type_id': 'fakevoltype',
- 'volume_metadata': [],
- 'volume_type': {'name': 'vol_type_name'}}
-
- volume.update(kwargs)
- return volume
-
-
-def stub_volume_create(self, context, size, name, description, snapshot,
- **param):
- vol = stub_volume('1')
- vol['size'] = size
- vol['display_name'] = name
- vol['display_description'] = description
- try:
- vol['snapshot_id'] = snapshot['id']
- except (KeyError, TypeError):
- vol['snapshot_id'] = None
- vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
- return vol
-
-
-def stub_volume_update(self, context, *args, **param):
- pass
-
-
-def stub_volume_delete(self, context, *args, **param):
- pass
-
-
-def stub_volume_get(self, context, volume_id):
- return stub_volume(volume_id)
-
-
-def stub_volume_notfound(self, context, volume_id):
- raise exc.VolumeNotFound(volume_id=volume_id)
-
-
-def stub_volume_get_all(context, search_opts=None):
- return [stub_volume(100, project_id='fake'),
- stub_volume(101, project_id='superfake'),
- stub_volume(102, project_id='superduperfake')]
-
-
-def stub_volume_check_attach(self, context, *args, **param):
- pass
-
-
-def stub_snapshot(id, **kwargs):
- snapshot = {
- 'id': id,
- 'volume_id': 12,
- 'status': 'available',
- 'volume_size': 100,
- 'created_at': timeutils.utcnow(),
- 'display_name': 'Default name',
- 'display_description': 'Default description',
- 'project_id': 'fake'
- }
-
- snapshot.update(kwargs)
- return snapshot
-
-
-def stub_snapshot_create(self, context, volume_id, name, description):
- return stub_snapshot(100, volume_id=volume_id, display_name=name,
- display_description=description)
-
-
-def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
- return {'snapshot': {'id': 100, 'volumeId': volume_id}}
-
-
-def stub_snapshot_delete(self, context, snapshot_id):
- if snapshot_id == '-1':
- raise exc.NotFound
-
-
-def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
- delete_info):
- pass
-
-
-def stub_snapshot_get(self, context, snapshot_id):
- if snapshot_id == '-1':
- raise exc.NotFound
- return stub_snapshot(snapshot_id)
-
-
-def stub_snapshot_get_all(self, context):
- return [stub_snapshot(100, project_id='fake'),
- stub_snapshot(101, project_id='superfake'),
- stub_snapshot(102, project_id='superduperfake')]
-
-
-def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
-
-
-def fake_get_available_languages():
- existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
- return existing_translations
-
-
-def fake_not_implemented(*args, **kwargs):
- raise NotImplementedError()
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
deleted file mode 100644
index 37ead3e73d..0000000000
--- a/nova/tests/api/openstack/test_common.py
+++ /dev/null
@@ -1,764 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suites for 'common' code used throughout the OpenStack HTTP API.
-"""
-
-import xml.dom.minidom as minidom
-
-from lxml import etree
-import mock
-import six
-from testtools import matchers
-import webob
-import webob.exc
-import webob.multidict
-
-from nova.api.openstack import common
-from nova.api.openstack import xmlutil
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import exception
-from nova import test
-from nova.tests import utils
-
-
-NS = "{http://docs.openstack.org/compute/api/v1.1}"
-ATOMNS = "{http://www.w3.org/2005/Atom}"
-
-
-class LimiterTest(test.TestCase):
- """Unit tests for the `nova.api.openstack.common.limited` method which
- takes in a list of items and, depending on the 'offset' and 'limit' GET
- params, returns a subset or complete set of the given items.
- """
-
- def setUp(self):
- """Run before each test."""
- super(LimiterTest, self).setUp()
- self.tiny = range(1)
- self.small = range(10)
- self.medium = range(1000)
- self.large = range(10000)
-
- def test_limiter_offset_zero(self):
- # Test offset key works with 0.
- req = webob.Request.blank('/?offset=0')
- self.assertEqual(common.limited(self.tiny, req), self.tiny)
- self.assertEqual(common.limited(self.small, req), self.small)
- self.assertEqual(common.limited(self.medium, req), self.medium)
- self.assertEqual(common.limited(self.large, req), self.large[:1000])
-
- def test_limiter_offset_medium(self):
- # Test offset key works with a medium sized number.
- req = webob.Request.blank('/?offset=10')
- self.assertEqual(common.limited(self.tiny, req), [])
- self.assertEqual(common.limited(self.small, req), self.small[10:])
- self.assertEqual(common.limited(self.medium, req), self.medium[10:])
- self.assertEqual(common.limited(self.large, req), self.large[10:1010])
-
- def test_limiter_offset_over_max(self):
- # Test offset key works with a number over 1000 (max_limit).
- req = webob.Request.blank('/?offset=1001')
- self.assertEqual(common.limited(self.tiny, req), [])
- self.assertEqual(common.limited(self.small, req), [])
- self.assertEqual(common.limited(self.medium, req), [])
- self.assertEqual(
- common.limited(self.large, req), self.large[1001:2001])
-
- def test_limiter_offset_blank(self):
- # Test offset key works with a blank offset.
- req = webob.Request.blank('/?offset=')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
-
- def test_limiter_offset_bad(self):
- # Test offset key works with a BAD offset.
- req = webob.Request.blank(u'/?offset=\u0020aa')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
-
- def test_limiter_nothing(self):
- # Test request with no offset or limit.
- req = webob.Request.blank('/')
- self.assertEqual(common.limited(self.tiny, req), self.tiny)
- self.assertEqual(common.limited(self.small, req), self.small)
- self.assertEqual(common.limited(self.medium, req), self.medium)
- self.assertEqual(common.limited(self.large, req), self.large[:1000])
-
- def test_limiter_limit_zero(self):
- # Test limit of zero.
- req = webob.Request.blank('/?limit=0')
- self.assertEqual(common.limited(self.tiny, req), self.tiny)
- self.assertEqual(common.limited(self.small, req), self.small)
- self.assertEqual(common.limited(self.medium, req), self.medium)
- self.assertEqual(common.limited(self.large, req), self.large[:1000])
-
- def test_limiter_limit_medium(self):
- # Test limit of 10.
- req = webob.Request.blank('/?limit=10')
- self.assertEqual(common.limited(self.tiny, req), self.tiny)
- self.assertEqual(common.limited(self.small, req), self.small)
- self.assertEqual(common.limited(self.medium, req), self.medium[:10])
- self.assertEqual(common.limited(self.large, req), self.large[:10])
-
- def test_limiter_limit_over_max(self):
- # Test limit of 3000.
- req = webob.Request.blank('/?limit=3000')
- self.assertEqual(common.limited(self.tiny, req), self.tiny)
- self.assertEqual(common.limited(self.small, req), self.small)
- self.assertEqual(common.limited(self.medium, req), self.medium)
- self.assertEqual(common.limited(self.large, req), self.large[:1000])
-
- def test_limiter_limit_and_offset(self):
- # Test request with both limit and offset.
- items = range(2000)
- req = webob.Request.blank('/?offset=1&limit=3')
- self.assertEqual(common.limited(items, req), items[1:4])
- req = webob.Request.blank('/?offset=3&limit=0')
- self.assertEqual(common.limited(items, req), items[3:1003])
- req = webob.Request.blank('/?offset=3&limit=1500')
- self.assertEqual(common.limited(items, req), items[3:1003])
- req = webob.Request.blank('/?offset=3000&limit=10')
- self.assertEqual(common.limited(items, req), [])
-
- def test_limiter_custom_max_limit(self):
- # Test a max_limit other than 1000.
- items = range(2000)
- req = webob.Request.blank('/?offset=1&limit=3')
- self.assertEqual(
- common.limited(items, req, max_limit=2000), items[1:4])
- req = webob.Request.blank('/?offset=3&limit=0')
- self.assertEqual(
- common.limited(items, req, max_limit=2000), items[3:])
- req = webob.Request.blank('/?offset=3&limit=2500')
- self.assertEqual(
- common.limited(items, req, max_limit=2000), items[3:])
- req = webob.Request.blank('/?offset=3000&limit=10')
- self.assertEqual(common.limited(items, req, max_limit=2000), [])
-
- def test_limiter_negative_limit(self):
- # Test a negative limit.
- req = webob.Request.blank('/?limit=-3000')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
-
- def test_limiter_negative_offset(self):
- # Test a negative offset.
- req = webob.Request.blank('/?offset=-30')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
-
-
-class SortParamUtilsTest(test.TestCase):
-
- def test_get_sort_params_defaults(self):
- '''Verifies the default sort key and direction.'''
- sort_keys, sort_dirs = common.get_sort_params({})
- self.assertEqual(['created_at'], sort_keys)
- self.assertEqual(['desc'], sort_dirs)
-
- def test_get_sort_params_override_defaults(self):
- '''Verifies that the defaults can be overriden.'''
- sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
- default_dir='dir1')
- self.assertEqual(['key1'], sort_keys)
- self.assertEqual(['dir1'], sort_dirs)
-
- sort_keys, sort_dirs = common.get_sort_params({}, default_key=None,
- default_dir=None)
- self.assertEqual([], sort_keys)
- self.assertEqual([], sort_dirs)
-
- def test_get_sort_params_single_value(self):
- '''Verifies a single sort key and direction.'''
- params = webob.multidict.MultiDict()
- params.add('sort_key', 'key1')
- params.add('sort_dir', 'dir1')
- sort_keys, sort_dirs = common.get_sort_params(params)
- self.assertEqual(['key1'], sort_keys)
- self.assertEqual(['dir1'], sort_dirs)
-
- def test_get_sort_params_single_with_default(self):
- '''Verifies a single sort value with a default.'''
- params = webob.multidict.MultiDict()
- params.add('sort_key', 'key1')
- sort_keys, sort_dirs = common.get_sort_params(params)
- self.assertEqual(['key1'], sort_keys)
- # sort_key was supplied, sort_dir should be defaulted
- self.assertEqual(['desc'], sort_dirs)
-
- params = webob.multidict.MultiDict()
- params.add('sort_dir', 'dir1')
- sort_keys, sort_dirs = common.get_sort_params(params)
- self.assertEqual(['created_at'], sort_keys)
- # sort_dir was supplied, sort_key should be defaulted
- self.assertEqual(['dir1'], sort_dirs)
-
- def test_get_sort_params_multiple_values(self):
- '''Verifies multiple sort parameter values.'''
- params = webob.multidict.MultiDict()
- params.add('sort_key', 'key1')
- params.add('sort_key', 'key2')
- params.add('sort_key', 'key3')
- params.add('sort_dir', 'dir1')
- params.add('sort_dir', 'dir2')
- params.add('sort_dir', 'dir3')
- sort_keys, sort_dirs = common.get_sort_params(params)
- self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
- self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
- # Also ensure that the input parameters are not modified
- sort_key_vals = []
- sort_dir_vals = []
- while 'sort_key' in params:
- sort_key_vals.append(params.pop('sort_key'))
- while 'sort_dir' in params:
- sort_dir_vals.append(params.pop('sort_dir'))
- self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals)
- self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals)
- self.assertEqual(0, len(params))
-
-
-class PaginationParamsTest(test.TestCase):
- """Unit tests for the `nova.api.openstack.common.get_pagination_params`
- method which takes in a request object and returns 'marker' and 'limit'
- GET params.
- """
-
- def test_no_params(self):
- # Test no params.
- req = webob.Request.blank('/')
- self.assertEqual(common.get_pagination_params(req), {})
-
- def test_valid_marker(self):
- # Test valid marker param.
- req = webob.Request.blank(
- '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
- self.assertEqual(common.get_pagination_params(req),
- {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
-
- def test_valid_limit(self):
- # Test valid limit param.
- req = webob.Request.blank('/?limit=10')
- self.assertEqual(common.get_pagination_params(req), {'limit': 10})
-
- def test_invalid_limit(self):
- # Test invalid limit param.
- req = webob.Request.blank('/?limit=-2')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.get_pagination_params, req)
-
- def test_valid_limit_and_marker(self):
- # Test valid limit and marker parameters.
- marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
- req = webob.Request.blank('/?limit=20&marker=%s' % marker)
- self.assertEqual(common.get_pagination_params(req),
- {'marker': marker, 'limit': 20})
-
- def test_valid_page_size(self):
- # Test valid page_size param.
- req = webob.Request.blank('/?page_size=10')
- self.assertEqual(common.get_pagination_params(req),
- {'page_size': 10})
-
- def test_invalid_page_size(self):
- # Test invalid page_size param.
- req = webob.Request.blank('/?page_size=-2')
- self.assertRaises(
- webob.exc.HTTPBadRequest, common.get_pagination_params, req)
-
- def test_valid_limit_and_page_size(self):
- # Test valid limit and page_size parameters.
- req = webob.Request.blank('/?limit=20&page_size=5')
- self.assertEqual(common.get_pagination_params(req),
- {'page_size': 5, 'limit': 20})
-
-
-class MiscFunctionsTest(test.TestCase):
-
- def test_remove_major_version_from_href(self):
- fixture = 'http://www.testsite.com/v1/images'
- expected = 'http://www.testsite.com/images'
- actual = common.remove_version_from_href(fixture)
- self.assertEqual(actual, expected)
-
- def test_remove_version_from_href(self):
- fixture = 'http://www.testsite.com/v1.1/images'
- expected = 'http://www.testsite.com/images'
- actual = common.remove_version_from_href(fixture)
- self.assertEqual(actual, expected)
-
- def test_remove_version_from_href_2(self):
- fixture = 'http://www.testsite.com/v1.1/'
- expected = 'http://www.testsite.com/'
- actual = common.remove_version_from_href(fixture)
- self.assertEqual(actual, expected)
-
- def test_remove_version_from_href_3(self):
- fixture = 'http://www.testsite.com/v10.10'
- expected = 'http://www.testsite.com'
- actual = common.remove_version_from_href(fixture)
- self.assertEqual(actual, expected)
-
- def test_remove_version_from_href_4(self):
- fixture = 'http://www.testsite.com/v1.1/images/v10.5'
- expected = 'http://www.testsite.com/images/v10.5'
- actual = common.remove_version_from_href(fixture)
- self.assertEqual(actual, expected)
-
- def test_remove_version_from_href_bad_request(self):
- fixture = 'http://www.testsite.com/1.1/images'
- self.assertRaises(ValueError,
- common.remove_version_from_href,
- fixture)
-
- def test_remove_version_from_href_bad_request_2(self):
- fixture = 'http://www.testsite.com/v/images'
- self.assertRaises(ValueError,
- common.remove_version_from_href,
- fixture)
-
- def test_remove_version_from_href_bad_request_3(self):
- fixture = 'http://www.testsite.com/v1.1images'
- self.assertRaises(ValueError,
- common.remove_version_from_href,
- fixture)
-
- def test_get_id_from_href_with_int_url(self):
- fixture = 'http://www.testsite.com/dir/45'
- actual = common.get_id_from_href(fixture)
- expected = '45'
- self.assertEqual(actual, expected)
-
- def test_get_id_from_href_with_int(self):
- fixture = '45'
- actual = common.get_id_from_href(fixture)
- expected = '45'
- self.assertEqual(actual, expected)
-
- def test_get_id_from_href_with_int_url_query(self):
- fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
- actual = common.get_id_from_href(fixture)
- expected = '45'
- self.assertEqual(actual, expected)
-
- def test_get_id_from_href_with_uuid_url(self):
- fixture = 'http://www.testsite.com/dir/abc123'
- actual = common.get_id_from_href(fixture)
- expected = "abc123"
- self.assertEqual(actual, expected)
-
- def test_get_id_from_href_with_uuid_url_query(self):
- fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
- actual = common.get_id_from_href(fixture)
- expected = "abc123"
- self.assertEqual(actual, expected)
-
- def test_get_id_from_href_with_uuid(self):
- fixture = 'abc123'
- actual = common.get_id_from_href(fixture)
- expected = 'abc123'
- self.assertEqual(actual, expected)
-
- def test_raise_http_conflict_for_instance_invalid_state(self):
- exc = exception.InstanceInvalidState(attr='fake_attr',
- state='fake_state', method='fake_method',
- instance_uuid='fake')
- try:
- common.raise_http_conflict_for_instance_invalid_state(exc,
- 'meow', 'fake_server_id')
- except webob.exc.HTTPConflict as e:
- self.assertEqual(six.text_type(e),
- "Cannot 'meow' instance fake_server_id while it is in "
- "fake_attr fake_state")
- else:
- self.fail("webob.exc.HTTPConflict was not raised")
-
- def test_check_img_metadata_properties_quota_valid_metadata(self):
- ctxt = utils.get_test_admin_context()
- metadata1 = {"key": "value"}
- actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
- self.assertIsNone(actual)
-
- metadata2 = {"key": "v" * 260}
- actual = common.check_img_metadata_properties_quota(ctxt, metadata2)
- self.assertIsNone(actual)
-
- metadata3 = {"key": ""}
- actual = common.check_img_metadata_properties_quota(ctxt, metadata3)
- self.assertIsNone(actual)
-
- def test_check_img_metadata_properties_quota_inv_metadata(self):
- ctxt = utils.get_test_admin_context()
- metadata1 = {"a" * 260: "value"}
- self.assertRaises(webob.exc.HTTPBadRequest,
- common.check_img_metadata_properties_quota, ctxt, metadata1)
-
- metadata2 = {"": "value"}
- self.assertRaises(webob.exc.HTTPBadRequest,
- common.check_img_metadata_properties_quota, ctxt, metadata2)
-
- metadata3 = "invalid metadata"
- self.assertRaises(webob.exc.HTTPBadRequest,
- common.check_img_metadata_properties_quota, ctxt, metadata3)
-
- metadata4 = None
- self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
- metadata4))
- metadata5 = {}
- self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
- metadata5))
-
- def test_status_from_state(self):
- for vm_state in (vm_states.ACTIVE, vm_states.STOPPED):
- for task_state in (task_states.RESIZE_PREP,
- task_states.RESIZE_MIGRATING,
- task_states.RESIZE_MIGRATED,
- task_states.RESIZE_FINISH):
- actual = common.status_from_state(vm_state, task_state)
- expected = 'RESIZE'
- self.assertEqual(expected, actual)
-
- def test_status_rebuild_from_state(self):
- for vm_state in (vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.ERROR):
- for task_state in (task_states.REBUILDING,
- task_states.REBUILD_BLOCK_DEVICE_MAPPING,
- task_states.REBUILD_SPAWNING):
- actual = common.status_from_state(vm_state, task_state)
- expected = 'REBUILD'
- self.assertEqual(expected, actual)
-
- def test_task_and_vm_state_from_status(self):
- fixture1 = ['reboot']
- actual = common.task_and_vm_state_from_status(fixture1)
- expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED,
- task_states.REBOOTING]
- self.assertEqual(expected, actual)
-
- fixture2 = ['resize']
- actual = common.task_and_vm_state_from_status(fixture2)
- expected = ([vm_states.ACTIVE, vm_states.STOPPED],
- [task_states.RESIZE_FINISH,
- task_states.RESIZE_MIGRATED,
- task_states.RESIZE_MIGRATING,
- task_states.RESIZE_PREP])
- self.assertEqual(expected, actual)
-
- fixture3 = ['resize', 'reboot']
- actual = common.task_and_vm_state_from_status(fixture3)
- expected = ([vm_states.ACTIVE, vm_states.STOPPED],
- [task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED,
- task_states.REBOOTING,
- task_states.RESIZE_FINISH,
- task_states.RESIZE_MIGRATED,
- task_states.RESIZE_MIGRATING,
- task_states.RESIZE_PREP])
- self.assertEqual(expected, actual)
-
-
-class TestCollectionLinks(test.NoDBTestCase):
- """Tests the _get_collection_links method."""
-
- @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
- def test_items_less_than_limit(self, href_link_mock):
- items = [
- {"uuid": "123"}
- ]
- req = mock.MagicMock()
- params = mock.PropertyMock(return_value=dict(limit=10))
- type(req).params = params
-
- builder = common.ViewBuilder()
- results = builder._get_collection_links(req, items, "ignored", "uuid")
-
- self.assertFalse(href_link_mock.called)
- self.assertThat(results, matchers.HasLength(0))
-
- @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
- def test_items_equals_given_limit(self, href_link_mock):
- items = [
- {"uuid": "123"}
- ]
- req = mock.MagicMock()
- params = mock.PropertyMock(return_value=dict(limit=1))
- type(req).params = params
-
- builder = common.ViewBuilder()
- results = builder._get_collection_links(req, items,
- mock.sentinel.coll_key,
- "uuid")
-
- href_link_mock.assert_called_once_with(req, "123",
- mock.sentinel.coll_key)
- self.assertThat(results, matchers.HasLength(1))
-
- @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
- def test_items_equals_default_limit(self, href_link_mock):
- items = [
- {"uuid": "123"}
- ]
- req = mock.MagicMock()
- params = mock.PropertyMock(return_value=dict())
- type(req).params = params
- self.flags(osapi_max_limit=1)
-
- builder = common.ViewBuilder()
- results = builder._get_collection_links(req, items,
- mock.sentinel.coll_key,
- "uuid")
-
- href_link_mock.assert_called_once_with(req, "123",
- mock.sentinel.coll_key)
- self.assertThat(results, matchers.HasLength(1))
-
- @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
- def test_items_equals_default_limit_with_given(self, href_link_mock):
- items = [
- {"uuid": "123"}
- ]
- req = mock.MagicMock()
- # Given limit is greater than default max, only return default max
- params = mock.PropertyMock(return_value=dict(limit=2))
- type(req).params = params
- self.flags(osapi_max_limit=1)
-
- builder = common.ViewBuilder()
- results = builder._get_collection_links(req, items,
- mock.sentinel.coll_key,
- "uuid")
-
- href_link_mock.assert_called_once_with(req, "123",
- mock.sentinel.coll_key)
- self.assertThat(results, matchers.HasLength(1))
-
-
-class MetadataXMLDeserializationTest(test.TestCase):
-
- deserializer = common.MetadataXMLDeserializer()
-
- def test_create(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key='123'>asdf</meta>
- <meta key='567'>jkl;</meta>
- </metadata>"""
- output = self.deserializer.deserialize(request_body, 'create')
- expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
- self.assertEqual(output, expected)
-
- def test_create_empty(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
- output = self.deserializer.deserialize(request_body, 'create')
- expected = {"body": {"metadata": {}}}
- self.assertEqual(output, expected)
-
- def test_update_all(self):
- request_body = """
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key='123'>asdf</meta>
- <meta key='567'>jkl;</meta>
- </metadata>"""
- output = self.deserializer.deserialize(request_body, 'update_all')
- expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
- self.assertEqual(output, expected)
-
- def test_update(self):
- request_body = """
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1"
- key='123'>asdf</meta>"""
- output = self.deserializer.deserialize(request_body, 'update')
- expected = {"body": {"meta": {"123": "asdf"}}}
- self.assertEqual(output, expected)
-
-
-class MetadataXMLSerializationTest(test.TestCase):
-
- def test_xml_declaration(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- 'one': 'two',
- 'three': 'four',
- },
- }
-
- output = serializer.serialize(fixture)
- has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
- self.assertTrue(has_dec)
-
- def test_index(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- 'one': 'two',
- 'three': 'four',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'metadata')
- metadata_dict = fixture['metadata']
- metadata_elems = root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 2)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = metadata_dict.items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- def test_index_null(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- None: None,
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'metadata')
- metadata_dict = fixture['metadata']
- metadata_elems = root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = metadata_dict.items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- def test_index_unicode(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- u'three': u'Jos\xe9',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'metadata')
- metadata_dict = fixture['metadata']
- metadata_elems = root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 1)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = metadata_dict.items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(metadata_elem.text.strip(), meta_value)
-
- def test_show(self):
- serializer = common.MetaItemTemplate()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- meta_dict = fixture['meta']
- (meta_key, meta_value) = meta_dict.items()[0]
- self.assertEqual(str(root.get('key')), str(meta_key))
- self.assertEqual(root.text.strip(), meta_value)
-
- def test_update_all(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- 'key6': 'value6',
- 'key4': 'value4',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'metadata')
- metadata_dict = fixture['metadata']
- metadata_elems = root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 2)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = metadata_dict.items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
-
- def test_update_item(self):
- serializer = common.MetaItemTemplate()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- meta_dict = fixture['meta']
- (meta_key, meta_value) = meta_dict.items()[0]
- self.assertEqual(str(root.get('key')), str(meta_key))
- self.assertEqual(root.text.strip(), meta_value)
-
- def test_create(self):
- serializer = common.MetadataTemplate()
- fixture = {
- 'metadata': {
- 'key9': 'value9',
- 'key2': 'value2',
- 'key1': 'value1',
- },
- }
- output = serializer.serialize(fixture)
- root = etree.XML(output)
- xmlutil.validate_schema(root, 'metadata')
- metadata_dict = fixture['metadata']
- metadata_elems = root.findall('{0}meta'.format(NS))
- self.assertEqual(len(metadata_elems), 3)
- for i, metadata_elem in enumerate(metadata_elems):
- (meta_key, meta_value) = metadata_dict.items()[i]
- self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
- self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="key2">value2</meta>
- <meta key="key9">value9</meta>
- <meta key="key1">value1</meta>
- </metadata>
- """.replace(" ", "").replace("\n", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_metadata_deserializer(self):
- """Should throw a 400 error on corrupt xml."""
- deserializer = common.MetadataXMLDeserializer()
- self.assertRaises(
- exception.MalformedRequestBody,
- deserializer.deserialize,
- utils.killer_xml_body())
-
-
-class LinkPrefixTest(test.NoDBTestCase):
-
- def test_update_link_prefix(self):
- vb = common.ViewBuilder()
- result = vb._update_link_prefix("http://192.168.0.243:24/",
- "http://127.0.0.1/compute")
- self.assertEqual("http://127.0.0.1/compute", result)
-
- result = vb._update_link_prefix("http://foo.x.com/v1",
- "http://new.prefix.com")
- self.assertEqual("http://new.prefix.com/v1", result)
-
- result = vb._update_link_prefix(
- "http://foo.x.com/v1",
- "http://new.prefix.com:20455/new_extra_prefix")
- self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
- result)
diff --git a/nova/tests/api/openstack/test_mapper.py b/nova/tests/api/openstack/test_mapper.py
deleted file mode 100644
index 99b53f384a..0000000000
--- a/nova/tests/api/openstack/test_mapper.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova.api import openstack as openstack_api
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-class MapperTest(test.NoDBTestCase):
- def test_resource_project_prefix(self):
- class Controller(object):
- def index(self, req):
- return 'foo'
-
- app = fakes.TestRouter(Controller(),
- openstack_api.ProjectMapper())
- req = webob.Request.blank('/1234/tests')
- resp = req.get_response(app)
- self.assertEqual(resp.body, 'foo')
- self.assertEqual(resp.status_int, 200)
-
- def test_resource_no_project_prefix(self):
- class Controller(object):
- def index(self, req):
- return 'foo'
-
- app = fakes.TestRouter(Controller(),
- openstack_api.PlainMapper())
- req = webob.Request.blank('/tests')
- resp = req.get_response(app)
- self.assertEqual(resp.body, 'foo')
- self.assertEqual(resp.status_int, 200)
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
deleted file mode 100644
index 71ba710bce..0000000000
--- a/nova/tests/api/openstack/test_wsgi.py
+++ /dev/null
@@ -1,1244 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import inspect
-
-import webob
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import exception
-from nova import i18n
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import utils
-
-
-class RequestTest(test.NoDBTestCase):
- def test_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123', method='POST')
- request.body = "<body />"
- self.assertIsNone(request.get_content_type())
-
- def test_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123', method='POST')
- request.headers["Content-Type"] = "text/html"
- request.body = "asdf<br />"
- self.assertRaises(exception.InvalidContentType,
- request.get_content_type)
-
- def test_content_type_with_charset(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Content-Type"] = "application/json; charset=UTF-8"
- result = request.get_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_from_accept(self):
- for content_type in ('application/xml',
- 'application/vnd.openstack.compute+xml',
- 'application/json',
- 'application/vnd.openstack.compute+json'):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = content_type
- result = request.best_match_content_type()
- self.assertEqual(result, content_type)
-
- def test_content_type_from_accept_best(self):
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = "application/xml, application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123')
- request.headers["Accept"] = ("application/json; q=0.3, "
- "application/xml; q=0.9")
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_from_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- request = wsgi.Request.blank('/tests/123.json')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- request = wsgi.Request.blank('/tests/123.invalid')
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- def test_content_type_accept_and_query_extension(self):
- request = wsgi.Request.blank('/tests/123.xml')
- request.headers["Accept"] = "application/json"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/xml")
-
- def test_content_type_accept_default(self):
- request = wsgi.Request.blank('/tests/123.unsupported')
- request.headers["Accept"] = "application/unsupported1"
- result = request.best_match_content_type()
- self.assertEqual(result, "application/json")
-
- def test_cache_and_retrieve_instances(self):
- request = wsgi.Request.blank('/foo')
- instances = []
- for x in xrange(3):
- instances.append({'uuid': 'uuid%s' % x})
- # Store 2
- request.cache_db_instances(instances[:2])
- # Store 1
- request.cache_db_instance(instances[2])
- self.assertEqual(request.get_db_instance('uuid0'),
- instances[0])
- self.assertEqual(request.get_db_instance('uuid1'),
- instances[1])
- self.assertEqual(request.get_db_instance('uuid2'),
- instances[2])
- self.assertIsNone(request.get_db_instance('uuid3'))
- self.assertEqual(request.get_db_instances(),
- {'uuid0': instances[0],
- 'uuid1': instances[1],
- 'uuid2': instances[2]})
-
- def test_cache_and_retrieve_compute_nodes(self):
- request = wsgi.Request.blank('/foo')
- compute_nodes = []
- for x in xrange(3):
- compute_nodes.append({'id': 'id%s' % x})
- # Store 2
- request.cache_db_compute_nodes(compute_nodes[:2])
- # Store 1
- request.cache_db_compute_node(compute_nodes[2])
- self.assertEqual(request.get_db_compute_node('id0'),
- compute_nodes[0])
- self.assertEqual(request.get_db_compute_node('id1'),
- compute_nodes[1])
- self.assertEqual(request.get_db_compute_node('id2'),
- compute_nodes[2])
- self.assertIsNone(request.get_db_compute_node('id3'))
- self.assertEqual(request.get_db_compute_nodes(),
- {'id0': compute_nodes[0],
- 'id1': compute_nodes[1],
- 'id2': compute_nodes[2]})
-
- def test_from_request(self):
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'
- request.headers = {'Accept-Language': accepted}
- self.assertEqual(request.best_match_language(), 'en_US')
-
- def test_asterisk(self):
- # asterisk should match first available if there
- # are not any other available matches
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = '*,es;q=.5'
- request.headers = {'Accept-Language': accepted}
- self.assertEqual(request.best_match_language(), 'en_GB')
-
- def test_prefix(self):
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = 'zh'
- request.headers = {'Accept-Language': accepted}
- self.assertEqual(request.best_match_language(), 'zh_CN')
-
- def test_secondary(self):
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = 'nn,en-gb;q=.5'
- request.headers = {'Accept-Language': accepted}
- self.assertEqual(request.best_match_language(), 'en_GB')
-
- def test_none_found(self):
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = 'nb-no'
- request.headers = {'Accept-Language': accepted}
- self.assertIs(request.best_match_language(), None)
-
- def test_no_lang_header(self):
- self.stubs.Set(i18n, 'get_available_languages',
- fakes.fake_get_available_languages)
-
- request = wsgi.Request.blank('/')
- accepted = ''
- request.headers = {'Accept-Language': accepted}
- self.assertIs(request.best_match_language(), None)
-
-
-class ActionDispatcherTest(test.NoDBTestCase):
- def test_dispatch(self):
- serializer = wsgi.ActionDispatcher()
- serializer.create = lambda x: 'pants'
- self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
-
- def test_dispatch_action_None(self):
- serializer = wsgi.ActionDispatcher()
- serializer.create = lambda x: 'pants'
- serializer.default = lambda x: 'trousers'
- self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
-
- def test_dispatch_default(self):
- serializer = wsgi.ActionDispatcher()
- serializer.create = lambda x: 'pants'
- serializer.default = lambda x: 'trousers'
- self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
-
-
-class DictSerializerTest(test.NoDBTestCase):
- def test_dispatch_default(self):
- serializer = wsgi.DictSerializer()
- self.assertEqual(serializer.serialize({}, 'update'), '')
-
-
-class XMLDictSerializerTest(test.NoDBTestCase):
- def test_xml(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
- serializer = wsgi.XMLDictSerializer(xmlns="asdf")
- result = serializer.serialize(input_dict)
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_xml)
-
- def test_xml_contains_unicode(self):
- input_dict = dict(test=u'\u89e3\u7801')
- expected_xml = '<test>\xe8\xa7\xa3\xe7\xa0\x81</test>'
- serializer = wsgi.XMLDictSerializer()
- result = serializer.serialize(input_dict)
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(expected_xml, result)
-
-
-class JSONDictSerializerTest(test.NoDBTestCase):
- def test_json(self):
- input_dict = dict(servers=dict(a=(2, 3)))
- expected_json = '{"servers":{"a":[2,3]}}'
- serializer = wsgi.JSONDictSerializer()
- result = serializer.serialize(input_dict)
- result = result.replace('\n', '').replace(' ', '')
- self.assertEqual(result, expected_json)
-
-
-class TextDeserializerTest(test.NoDBTestCase):
- def test_dispatch_default(self):
- deserializer = wsgi.TextDeserializer()
- self.assertEqual(deserializer.deserialize({}, 'update'), {})
-
-
-class JSONDeserializerTest(test.NoDBTestCase):
- def test_json(self):
- data = """{"a": {
- "a1": "1",
- "a2": "2",
- "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
- "d": {"e": "1"},
- "f": "1"}}"""
- as_dict = {
- 'body': {
- 'a': {
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
- 'd': {'e': '1'},
- 'f': '1',
- },
- },
- }
- deserializer = wsgi.JSONDeserializer()
- self.assertEqual(deserializer.deserialize(data), as_dict)
-
- def test_json_valid_utf8(self):
- data = """{"server": {"min_count": 1, "flavorRef": "1",
- "name": "\xe6\xa6\x82\xe5\xbf\xb5",
- "imageRef": "10bab10c-1304-47d",
- "max_count": 1}} """
- as_dict = {
- 'body': {
- u'server': {
- u'min_count': 1, u'flavorRef': u'1',
- u'name': u'\u6982\u5ff5',
- u'imageRef': u'10bab10c-1304-47d',
- u'max_count': 1
- }
- }
- }
- deserializer = wsgi.JSONDeserializer()
- self.assertEqual(deserializer.deserialize(data), as_dict)
-
- def test_json_invalid_utf8(self):
- """Send invalid utf-8 to JSONDeserializer."""
- data = """{"server": {"min_count": 1, "flavorRef": "1",
- "name": "\xf0\x28\x8c\x28",
- "imageRef": "10bab10c-1304-47d",
- "max_count": 1}} """
-
- deserializer = wsgi.JSONDeserializer()
- self.assertRaises(exception.MalformedRequestBody,
- deserializer.deserialize, data)
-
-
-class XMLDeserializerTest(test.NoDBTestCase):
- def test_xml(self):
- xml = """
- <a a1="1" a2="2">
- <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
- <d><e>1</e></d>
- <f>1</f>
- </a>
- """.strip()
- as_dict = {
- 'body': {
- 'a': {
- 'a1': '1',
- 'a2': '2',
- 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
- 'd': {'e': '1'},
- 'f': '1',
- },
- },
- }
- metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
- deserializer = wsgi.XMLDeserializer(metadata=metadata)
- self.assertEqual(deserializer.deserialize(xml), as_dict)
-
- def test_xml_empty(self):
- xml = '<a></a>'
- as_dict = {"body": {"a": {}}}
- deserializer = wsgi.XMLDeserializer()
- self.assertEqual(deserializer.deserialize(xml), as_dict)
-
- def test_xml_valid_utf8(self):
- xml = """ <a><name>\xe6\xa6\x82\xe5\xbf\xb5</name></a> """
- deserializer = wsgi.XMLDeserializer()
- as_dict = {'body': {u'a': {u'name': u'\u6982\u5ff5'}}}
- self.assertEqual(deserializer.deserialize(xml), as_dict)
-
- def test_xml_invalid_utf8(self):
- """Send invalid utf-8 to XMLDeserializer."""
- xml = """ <a><name>\xf0\x28\x8c\x28</name></a> """
- deserializer = wsgi.XMLDeserializer()
- self.assertRaises(exception.MalformedRequestBody,
- deserializer.deserialize, xml)
-
-
-class ResourceTest(test.NoDBTestCase):
-
- def get_req_id_header_name(self, request):
- header_name = 'x-openstack-request-id'
- if utils.get_api_version(request) < 3:
- header_name = 'x-compute-request-id'
-
- return header_name
-
- def test_resource_call_with_method_get(self):
- class Controller(object):
- def index(self, req):
- return 'success'
-
- app = fakes.TestRouter(Controller())
- # the default method is GET
- req = webob.Request.blank('/tests')
- response = req.get_response(app)
- self.assertEqual(response.body, 'success')
- self.assertEqual(response.status_int, 200)
- req.body = '{"body": {"key": "value"}}'
- response = req.get_response(app)
- self.assertEqual(response.body, 'success')
- self.assertEqual(response.status_int, 200)
- req.content_type = 'application/json'
- response = req.get_response(app)
- self.assertEqual(response.body, 'success')
- self.assertEqual(response.status_int, 200)
-
- def test_resource_call_with_method_post(self):
- class Controller(object):
- @extensions.expected_errors(400)
- def create(self, req, body):
- if expected_body != body:
- msg = "The request body invalid"
- raise webob.exc.HTTPBadRequest(explanation=msg)
- return "success"
- # verify the method: POST
- app = fakes.TestRouter(Controller())
- req = webob.Request.blank('/tests', method="POST",
- content_type='application/json')
- req.body = '{"body": {"key": "value"}}'
- expected_body = {'body': {
- "key": "value"
- }
- }
- response = req.get_response(app)
- self.assertEqual(response.status_int, 200)
- self.assertEqual(response.body, 'success')
- # verify without body
- expected_body = None
- req.body = None
- response = req.get_response(app)
- self.assertEqual(response.status_int, 200)
- self.assertEqual(response.body, 'success')
- # the body is validated in the controller
- expected_body = {'body': None}
- response = req.get_response(app)
- expected_unsupported_type_body = ('{"badRequest": '
- '{"message": "The request body invalid", "code": 400}}')
- self.assertEqual(response.status_int, 400)
- self.assertEqual(expected_unsupported_type_body, response.body)
-
- def test_resource_call_with_method_put(self):
- class Controller(object):
- def update(self, req, id, body):
- if expected_body != body:
- msg = "The request body invalid"
- raise webob.exc.HTTPBadRequest(explanation=msg)
- return "success"
- # verify the method: PUT
- app = fakes.TestRouter(Controller())
- req = webob.Request.blank('/tests/test_id', method="PUT",
- content_type='application/json')
- req.body = '{"body": {"key": "value"}}'
- expected_body = {'body': {
- "key": "value"
- }
- }
- response = req.get_response(app)
- self.assertEqual(response.body, 'success')
- self.assertEqual(response.status_int, 200)
- req.body = None
- expected_body = None
- response = req.get_response(app)
- self.assertEqual(response.status_int, 200)
- # verify no content_type is contained in the request
- req.content_type = None
- req.body = '{"body": {"key": "value"}}'
- response = req.get_response(app)
- expected_unsupported_type_body = ('{"badRequest": '
- '{"message": "Unsupported Content-Type", "code": 400}}')
- self.assertEqual(response.status_int, 400)
- self.assertEqual(expected_unsupported_type_body, response.body)
-
- def test_resource_call_with_method_delete(self):
- class Controller(object):
- def delete(self, req, id):
- return "success"
-
- # verify the method: DELETE
- app = fakes.TestRouter(Controller())
- req = webob.Request.blank('/tests/test_id', method="DELETE")
- response = req.get_response(app)
- self.assertEqual(response.status_int, 200)
- self.assertEqual(response.body, 'success')
- # ignore the body
- req.body = '{"body": {"key": "value"}}'
- response = req.get_response(app)
- self.assertEqual(response.status_int, 200)
- self.assertEqual(response.body, 'success')
-
- def test_resource_not_authorized(self):
- class Controller(object):
- def index(self, req):
- raise exception.Forbidden()
-
- req = webob.Request.blank('/tests')
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
- self.assertEqual(response.status_int, 403)
-
- def test_dispatch(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- method, extensions = resource.get_method(None, 'index', None, '')
- actual = resource.dispatch(method, None, {'pants': 'off'})
- expected = 'off'
- self.assertEqual(actual, expected)
-
- def test_get_method_unknown_controller_method(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(AttributeError, resource.get_method,
- None, 'create', None, '')
-
- def test_get_method_action_json(self):
- class Controller(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- method, extensions = resource.get_method(None, 'action',
- 'application/json',
- '{"fooAction": true}')
- self.assertEqual(controller._action_foo, method)
-
- def test_get_method_action_xml(self):
- class Controller(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- method, extensions = resource.get_method(None, 'action',
- 'application/xml',
- '<fooAction>true</fooAction>')
- self.assertEqual(controller._action_foo, method)
-
- def test_get_method_action_corrupt_xml(self):
- class Controller(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(
- exception.MalformedRequestBody,
- resource.get_method,
- None, 'action',
- 'application/xml',
- utils.killer_xml_body())
-
- def test_get_method_action_bad_body(self):
- class Controller(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(exception.MalformedRequestBody, resource.get_method,
- None, 'action', 'application/json', '{}')
-
- def test_get_method_unknown_controller_action(self):
- class Controller(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(KeyError, resource.get_method,
- None, 'action', 'application/json',
- '{"barAction": true}')
-
- def test_get_method_action_method(self):
- class Controller():
- def action(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- method, extensions = resource.get_method(None, 'action',
- 'application/xml',
- '<fooAction>true</fooAction')
- self.assertEqual(controller.action, method)
-
- def test_get_action_args(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- env = {
- 'wsgiorg.routing_args': [None, {
- 'controller': None,
- 'format': None,
- 'action': 'update',
- 'id': 12,
- }],
- }
-
- expected = {'action': 'update', 'id': 12}
-
- self.assertEqual(resource.get_action_args(env), expected)
-
- def test_get_body_bad_content(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- request = wsgi.Request.blank('/', method='POST')
- request.headers['Content-Type'] = 'application/none'
- request.body = 'foo'
-
- content_type, body = resource.get_body(request)
- self.assertIsNone(content_type)
- self.assertEqual(body, '')
-
- def test_get_body_no_content_type(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- request = wsgi.Request.blank('/', method='POST')
- request.body = 'foo'
-
- content_type, body = resource.get_body(request)
- self.assertIsNone(content_type)
- self.assertEqual(body, 'foo')
-
- def test_get_body_no_content_body(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- request = wsgi.Request.blank('/', method='POST')
- request.headers['Content-Type'] = 'application/json'
- request.body = ''
-
- content_type, body = resource.get_body(request)
- self.assertEqual('application/json', content_type)
- self.assertEqual(body, '')
-
- def test_get_body(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- request = wsgi.Request.blank('/', method='POST')
- request.headers['Content-Type'] = 'application/json'
- request.body = 'foo'
-
- content_type, body = resource.get_body(request)
- self.assertEqual(content_type, 'application/json')
- self.assertEqual(body, 'foo')
-
- def test_get_request_id_with_dict_response_body(self):
- class Controller(wsgi.Controller):
- def index(self, req):
- return {'foo': 'bar'}
-
- req = fakes.HTTPRequest.blank('/tests')
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
- self.assertIn('nova.context', req.environ)
- self.assertEqual(response.body, '{"foo": "bar"}')
- self.assertEqual(response.status_int, 200)
-
- def test_no_request_id_with_str_response_body(self):
- class Controller(wsgi.Controller):
- def index(self, req):
- return 'foo'
-
- req = fakes.HTTPRequest.blank('/tests')
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
- # NOTE(alaski): This test is really to ensure that a str response
- # doesn't error. Not having a request_id header is a side effect of
- # our wsgi setup, ideally it would be there.
- expected_header = self.get_req_id_header_name(req)
- self.assertFalse(hasattr(response.headers, expected_header))
- self.assertEqual(response.body, 'foo')
- self.assertEqual(response.status_int, 200)
-
- def test_get_request_id_no_response_body(self):
- class Controller(object):
- def index(self, req):
- pass
-
- req = fakes.HTTPRequest.blank('/tests')
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
- self.assertIn('nova.context', req.environ)
- self.assertEqual(response.body, '')
- self.assertEqual(response.status_int, 200)
-
- def test_deserialize_badtype(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(exception.InvalidContentType,
- resource.deserialize,
- controller.index, 'application/none', 'foo')
-
- def test_deserialize_default(self):
- class JSONDeserializer(object):
- def deserialize(self, body):
- return 'json'
-
- class XMLDeserializer(object):
- def deserialize(self, body):
- return 'xml'
-
- class Controller(object):
- @wsgi.deserializers(xml=XMLDeserializer)
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller, json=JSONDeserializer)
-
- obj = resource.deserialize(controller.index, 'application/json', 'foo')
- self.assertEqual(obj, 'json')
-
- def test_deserialize_decorator(self):
- class JSONDeserializer(object):
- def deserialize(self, body):
- return 'json'
-
- class XMLDeserializer(object):
- def deserialize(self, body):
- return 'xml'
-
- class Controller(object):
- @wsgi.deserializers(xml=XMLDeserializer)
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller, json=JSONDeserializer)
-
- obj = resource.deserialize(controller.index, 'application/xml', 'foo')
- self.assertEqual(obj, 'xml')
-
- def test_register_actions(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- class ControllerExtended(wsgi.Controller):
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- @wsgi.action('barAction')
- def _action_bar(self, req, id, body):
- return body
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertEqual({}, resource.wsgi_actions)
-
- extended = ControllerExtended()
- resource.register_actions(extended)
- self.assertEqual({
- 'fooAction': extended._action_foo,
- 'barAction': extended._action_bar,
- }, resource.wsgi_actions)
-
- def test_register_extensions(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- class ControllerExtended(wsgi.Controller):
- @wsgi.extends
- def index(self, req, resp_obj, pants=None):
- return None
-
- @wsgi.extends(action='fooAction')
- def _action_foo(self, req, resp, id, body):
- return None
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertEqual({}, resource.wsgi_extensions)
- self.assertEqual({}, resource.wsgi_action_extensions)
-
- extended = ControllerExtended()
- resource.register_extensions(extended)
- self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
- self.assertEqual({'fooAction': [extended._action_foo]},
- resource.wsgi_action_extensions)
-
- def test_get_method_extensions(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- class ControllerExtended(wsgi.Controller):
- @wsgi.extends
- def index(self, req, resp_obj, pants=None):
- return None
-
- controller = Controller()
- extended = ControllerExtended()
- resource = wsgi.Resource(controller)
- resource.register_extensions(extended)
- method, extensions = resource.get_method(None, 'index', None, '')
- self.assertEqual(method, controller.index)
- self.assertEqual(extensions, [extended.index])
-
- def test_get_method_action_extensions(self):
- class Controller(wsgi.Controller):
- def index(self, req, pants=None):
- return pants
-
- @wsgi.action('fooAction')
- def _action_foo(self, req, id, body):
- return body
-
- class ControllerExtended(wsgi.Controller):
- @wsgi.extends(action='fooAction')
- def _action_foo(self, req, resp_obj, id, body):
- return None
-
- controller = Controller()
- extended = ControllerExtended()
- resource = wsgi.Resource(controller)
- resource.register_extensions(extended)
- method, extensions = resource.get_method(None, 'action',
- 'application/json',
- '{"fooAction": true}')
- self.assertEqual(method, controller._action_foo)
- self.assertEqual(extensions, [extended._action_foo])
-
- def test_get_method_action_whitelist_extensions(self):
- class Controller(wsgi.Controller):
- def index(self, req, pants=None):
- return pants
-
- class ControllerExtended(wsgi.Controller):
- @wsgi.action('create')
- def _create(self, req, body):
- pass
-
- @wsgi.action('delete')
- def _delete(self, req, id):
- pass
-
- controller = Controller()
- extended = ControllerExtended()
- resource = wsgi.Resource(controller)
- resource.register_actions(extended)
-
- method, extensions = resource.get_method(None, 'create',
- 'application/json',
- '{"create": true}')
- self.assertEqual(method, extended._create)
- self.assertEqual(extensions, [])
-
- method, extensions = resource.get_method(None, 'delete', None, None)
- self.assertEqual(method, extended._delete)
- self.assertEqual(extensions, [])
-
- def test_pre_process_extensions_regular(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req, resp_obj):
- called.append(1)
- return None
-
- def extension2(req, resp_obj):
- called.append(2)
- return None
-
- extensions = [extension1, extension2]
- response, post = resource.pre_process_extensions(extensions, None, {})
- self.assertEqual(called, [])
- self.assertIsNone(response)
- self.assertEqual(list(post), [extension2, extension1])
-
- def test_pre_process_extensions_generator(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req):
- called.append('pre1')
- yield
- called.append('post1')
-
- def extension2(req):
- called.append('pre2')
- yield
- called.append('post2')
-
- extensions = [extension1, extension2]
- response, post = resource.pre_process_extensions(extensions, None, {})
- post = list(post)
- self.assertEqual(called, ['pre1', 'pre2'])
- self.assertIsNone(response)
- self.assertEqual(len(post), 2)
- self.assertTrue(inspect.isgenerator(post[0]))
- self.assertTrue(inspect.isgenerator(post[1]))
-
- for gen in post:
- try:
- gen.send(None)
- except StopIteration:
- continue
-
- self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
-
- def test_pre_process_extensions_generator_response(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req):
- called.append('pre1')
- yield 'foo'
-
- def extension2(req):
- called.append('pre2')
-
- extensions = [extension1, extension2]
- response, post = resource.pre_process_extensions(extensions, None, {})
- self.assertEqual(called, ['pre1'])
- self.assertEqual(response, 'foo')
- self.assertEqual(post, [])
-
- def test_post_process_extensions_regular(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req, resp_obj):
- called.append(1)
- return None
-
- def extension2(req, resp_obj):
- called.append(2)
- return None
-
- response = resource.post_process_extensions([extension2, extension1],
- None, None, {})
- self.assertEqual(called, [2, 1])
- self.assertIsNone(response)
-
- def test_post_process_extensions_regular_response(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req, resp_obj):
- called.append(1)
- return None
-
- def extension2(req, resp_obj):
- called.append(2)
- return 'foo'
-
- response = resource.post_process_extensions([extension2, extension1],
- None, None, {})
- self.assertEqual(called, [2])
- self.assertEqual(response, 'foo')
-
- def test_post_process_extensions_generator(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req):
- yield
- called.append(1)
-
- def extension2(req):
- yield
- called.append(2)
-
- ext1 = extension1(None)
- ext1.next()
- ext2 = extension2(None)
- ext2.next()
-
- response = resource.post_process_extensions([ext2, ext1],
- None, None, {})
-
- self.assertEqual(called, [2, 1])
- self.assertIsNone(response)
-
- def test_post_process_extensions_generator_response(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
-
- called = []
-
- def extension1(req):
- yield
- called.append(1)
-
- def extension2(req):
- yield
- called.append(2)
- yield 'foo'
-
- ext1 = extension1(None)
- ext1.next()
- ext2 = extension2(None)
- ext2.next()
-
- response = resource.post_process_extensions([ext2, ext1],
- None, None, {})
-
- self.assertEqual(called, [2])
- self.assertEqual(response, 'foo')
-
- def test_resource_exception_handler_type_error(self):
- # A TypeError should be translated to a Fault/HTTP 400.
- def foo(a,):
- return a
-
- try:
- with wsgi.ResourceExceptionHandler():
- foo() # generate a TypeError
- self.fail("Should have raised a Fault (HTTP 400)")
- except wsgi.Fault as fault:
- self.assertEqual(400, fault.status_int)
-
- def test_resource_headers_are_utf8(self):
- resp = webob.Response(status_int=202)
- resp.headers['x-header1'] = 1
- resp.headers['x-header2'] = u'header2'
- resp.headers['x-header3'] = u'header3'
-
- class Controller(object):
- def index(self, req):
- return resp
-
- req = webob.Request.blank('/tests')
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
-
- for hdr, val in response.headers.iteritems():
- # All headers must be utf8
- self.assertIsInstance(hdr, str)
- self.assertIsInstance(val, str)
- self.assertEqual(response.headers['x-header1'], '1')
- self.assertEqual(response.headers['x-header2'], 'header2')
- self.assertEqual(response.headers['x-header3'], 'header3')
-
- def test_resource_valid_utf8_body(self):
- class Controller(object):
- def update(self, req, id, body):
- return body
-
- req = webob.Request.blank('/tests/test_id', method="PUT")
- body = """ {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
- expected_body = '{"name": "\\u6982\\u5ff5"}'
- req.body = body
- req.headers['Content-Type'] = 'application/json'
- app = fakes.TestRouter(Controller())
- response = req.get_response(app)
- self.assertEqual(response.body, expected_body)
- self.assertEqual(response.status_int, 200)
-
- def test_resource_invalid_utf8(self):
- class Controller(object):
- def update(self, req, id, body):
- return body
-
- req = webob.Request.blank('/tests/test_id', method="PUT")
- body = """ {"name": "\xf0\x28\x8c\x28" } """
- req.body = body
- req.headers['Content-Type'] = 'application/json'
- app = fakes.TestRouter(Controller())
- self.assertRaises(UnicodeDecodeError, req.get_response, app)
-
-
-class ResponseObjectTest(test.NoDBTestCase):
- def test_default_code(self):
- robj = wsgi.ResponseObject({})
- self.assertEqual(robj.code, 200)
-
- def test_modified_code(self):
- robj = wsgi.ResponseObject({})
- robj._default_code = 202
- self.assertEqual(robj.code, 202)
-
- def test_override_default_code(self):
- robj = wsgi.ResponseObject({}, code=404)
- self.assertEqual(robj.code, 404)
-
- def test_override_modified_code(self):
- robj = wsgi.ResponseObject({}, code=404)
- robj._default_code = 202
- self.assertEqual(robj.code, 404)
-
- def test_set_header(self):
- robj = wsgi.ResponseObject({})
- robj['Header'] = 'foo'
- self.assertEqual(robj.headers, {'header': 'foo'})
-
- def test_get_header(self):
- robj = wsgi.ResponseObject({})
- robj['Header'] = 'foo'
- self.assertEqual(robj['hEADER'], 'foo')
-
- def test_del_header(self):
- robj = wsgi.ResponseObject({})
- robj['Header'] = 'foo'
- del robj['hEADER']
- self.assertNotIn('header', robj.headers)
-
- def test_header_isolation(self):
- robj = wsgi.ResponseObject({})
- robj['Header'] = 'foo'
- hdrs = robj.headers
- hdrs['hEADER'] = 'bar'
- self.assertEqual(robj['hEADER'], 'foo')
-
- def test_default_serializers(self):
- robj = wsgi.ResponseObject({})
- self.assertEqual(robj.serializers, {})
-
- def test_bind_serializers(self):
- robj = wsgi.ResponseObject({}, json='foo')
- robj._bind_method_serializers(dict(xml='bar', json='baz'))
- self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
-
- def test_get_serializer(self):
- robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- _mtype, serializer = robj.get_serializer(content_type)
- self.assertEqual(serializer, mtype)
-
- def test_get_serializer_defaults(self):
- robj = wsgi.ResponseObject({})
- default_serializers = dict(json='json', xml='xml', atom='atom')
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- self.assertRaises(exception.InvalidContentType,
- robj.get_serializer, content_type)
- _mtype, serializer = robj.get_serializer(content_type,
- default_serializers)
- self.assertEqual(serializer, mtype)
-
- def test_serialize(self):
- class JSONSerializer(object):
- def serialize(self, obj):
- return 'json'
-
- class XMLSerializer(object):
- def serialize(self, obj):
- return 'xml'
-
- class AtomSerializer(object):
- def serialize(self, obj):
- return 'atom'
-
- robj = wsgi.ResponseObject({}, code=202,
- json=JSONSerializer,
- xml=XMLSerializer,
- atom=AtomSerializer)
- robj['X-header1'] = 'header1'
- robj['X-header2'] = 'header2'
- robj['X-header3'] = 3
- robj['X-header-unicode'] = u'header-unicode'
-
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- request = wsgi.Request.blank('/tests/123')
- response = robj.serialize(request, content_type)
-
- self.assertEqual(response.headers['Content-Type'], content_type)
- for hdr, val in response.headers.iteritems():
- # All headers must be utf8
- self.assertIsInstance(hdr, str)
- self.assertIsInstance(val, str)
- self.assertEqual(response.headers['X-header1'], 'header1')
- self.assertEqual(response.headers['X-header2'], 'header2')
- self.assertEqual(response.headers['X-header3'], '3')
- self.assertEqual(response.status_int, 202)
- self.assertEqual(response.body, mtype)
-
-
-class ValidBodyTest(test.NoDBTestCase):
-
- def setUp(self):
- super(ValidBodyTest, self).setUp()
- self.controller = wsgi.Controller()
-
- def test_is_valid_body(self):
- body = {'foo': {}}
- self.assertTrue(self.controller.is_valid_body(body, 'foo'))
-
- def test_is_valid_body_none(self):
- wsgi.Resource(controller=None)
- self.assertFalse(self.controller.is_valid_body(None, 'foo'))
-
- def test_is_valid_body_empty(self):
- wsgi.Resource(controller=None)
- self.assertFalse(self.controller.is_valid_body({}, 'foo'))
-
- def test_is_valid_body_no_entity(self):
- wsgi.Resource(controller=None)
- body = {'bar': {}}
- self.assertFalse(self.controller.is_valid_body(body, 'foo'))
-
- def test_is_valid_body_malformed_entity(self):
- wsgi.Resource(controller=None)
- body = {'foo': 'bar'}
- self.assertFalse(self.controller.is_valid_body(body, 'foo'))
diff --git a/nova/tests/api/openstack/test_xmlutil.py b/nova/tests/api/openstack/test_xmlutil.py
deleted file mode 100644
index 903340c8d6..0000000000
--- a/nova/tests/api/openstack/test_xmlutil.py
+++ /dev/null
@@ -1,948 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from xml.dom import minidom
-
-from lxml import etree
-
-from nova.api.openstack import xmlutil
-from nova import exception
-from nova import test
-from nova.tests import utils as tests_utils
-
-
-class SelectorTest(test.NoDBTestCase):
- obj_for_test = {
- 'test': {
- 'name': 'test',
- 'values': [1, 2, 3],
- 'attrs': {
- 'foo': 1,
- 'bar': 2,
- 'baz': 3,
- },
- },
- }
-
- def test_repr(self):
- sel = xmlutil.Selector()
- self.assertEqual(repr(sel), "Selector()")
-
- def test_empty_selector(self):
- sel = xmlutil.EmptyStringSelector()
- self.assertEqual(len(sel.chain), 0)
- self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
- self.assertEqual(
- repr(self.obj_for_test),
- "{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
- "{'baz': 3, 'foo': 1, 'bar': 2}}}")
-
- def test_dict_selector(self):
- sel = xmlutil.Selector('test')
- self.assertEqual(len(sel.chain), 1)
- self.assertEqual(sel.chain[0], 'test')
- self.assertEqual(sel(self.obj_for_test),
- self.obj_for_test['test'])
-
- def test_datum_selector(self):
- sel = xmlutil.Selector('test', 'name')
- self.assertEqual(len(sel.chain), 2)
- self.assertEqual(sel.chain[0], 'test')
- self.assertEqual(sel.chain[1], 'name')
- self.assertEqual(sel(self.obj_for_test), 'test')
-
- def test_list_selector(self):
- sel = xmlutil.Selector('test', 'values', 0)
- self.assertEqual(len(sel.chain), 3)
- self.assertEqual(sel.chain[0], 'test')
- self.assertEqual(sel.chain[1], 'values')
- self.assertEqual(sel.chain[2], 0)
- self.assertEqual(sel(self.obj_for_test), 1)
-
- def test_items_selector(self):
- sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
- self.assertEqual(len(sel.chain), 3)
- self.assertEqual(sel.chain[2], xmlutil.get_items)
- for key, val in sel(self.obj_for_test):
- self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
-
- def test_missing_key_selector(self):
- sel = xmlutil.Selector('test2', 'attrs')
- self.assertIsNone(sel(self.obj_for_test))
- self.assertRaises(KeyError, sel, self.obj_for_test, True)
-
- def test_constant_selector(self):
- sel = xmlutil.ConstantSelector('Foobar')
- self.assertEqual(sel.value, 'Foobar')
- self.assertEqual(sel(self.obj_for_test), 'Foobar')
- self.assertEqual(repr(sel), "'Foobar'")
-
-
-class TemplateElementTest(test.NoDBTestCase):
- def test_element_initial_attributes(self):
- # Create a template element with some attributes
- elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
- c=4, d=5, e=6)
-
- # Verify all the attributes are as expected
- expected = dict(a=1, b=2, c=4, d=5, e=6)
- for k, v in expected.items():
- self.assertEqual(elem.attrib[k].chain[0], v)
- self.assertTrue(repr(elem))
-
- def test_element_get_attributes(self):
- expected = dict(a=1, b=2, c=3)
-
- # Create a template element with some attributes
- elem = xmlutil.TemplateElement('test', attrib=expected)
-
- # Verify that get() retrieves the attributes
- for k, v in expected.items():
- self.assertEqual(elem.get(k).chain[0], v)
-
- def test_element_set_attributes(self):
- attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
-
- # Create a bare template element with no attributes
- elem = xmlutil.TemplateElement('test')
-
- # Set the attribute values
- for k, v in attrs.items():
- elem.set(k, v)
-
- # Now verify what got set
- self.assertEqual(len(elem.attrib['a'].chain), 1)
- self.assertEqual(elem.attrib['a'].chain[0], 'a')
- self.assertEqual(len(elem.attrib['b'].chain), 1)
- self.assertEqual(elem.attrib['b'].chain[0], 'foo')
- self.assertEqual(elem.attrib['c'], attrs['c'])
-
- def test_element_attribute_keys(self):
- attrs = dict(a=1, b=2, c=3, d=4)
- expected = set(attrs.keys())
-
- # Create a template element with some attributes
- elem = xmlutil.TemplateElement('test', attrib=attrs)
-
- # Now verify keys
- self.assertEqual(set(elem.keys()), expected)
-
- def test_element_attribute_items(self):
- expected = dict(a=xmlutil.Selector(1),
- b=xmlutil.Selector(2),
- c=xmlutil.Selector(3))
- keys = set(expected.keys())
-
- # Create a template element with some attributes
- elem = xmlutil.TemplateElement('test', attrib=expected)
-
- # Now verify items
- for k, v in elem.items():
- self.assertEqual(expected[k], v)
- keys.remove(k)
-
- # Did we visit all keys?
- self.assertEqual(len(keys), 0)
-
- def test_element_selector_none(self):
- # Create a template element with no selector
- elem = xmlutil.TemplateElement('test')
-
- self.assertEqual(len(elem.selector.chain), 0)
-
- def test_element_selector_string(self):
- # Create a template element with a string selector
- elem = xmlutil.TemplateElement('test', selector='test')
-
- self.assertEqual(len(elem.selector.chain), 1)
- self.assertEqual(elem.selector.chain[0], 'test')
-
- def test_element_selector(self):
- sel = xmlutil.Selector('a', 'b')
-
- # Create a template element with an explicit selector
- elem = xmlutil.TemplateElement('test', selector=sel)
-
- self.assertEqual(elem.selector, sel)
-
- def test_element_subselector_none(self):
- # Create a template element with no subselector
- elem = xmlutil.TemplateElement('test')
-
- self.assertIsNone(elem.subselector)
-
- def test_element_subselector_string(self):
- # Create a template element with a string subselector
- elem = xmlutil.TemplateElement('test', subselector='test')
-
- self.assertEqual(len(elem.subselector.chain), 1)
- self.assertEqual(elem.subselector.chain[0], 'test')
-
- def test_element_subselector(self):
- sel = xmlutil.Selector('a', 'b')
-
- # Create a template element with an explicit subselector
- elem = xmlutil.TemplateElement('test', subselector=sel)
-
- self.assertEqual(elem.subselector, sel)
-
- def test_element_append_child(self):
- # Create an element
- elem = xmlutil.TemplateElement('test')
-
- # Make sure the element starts off empty
- self.assertEqual(len(elem), 0)
-
- # Create a child element
- child = xmlutil.TemplateElement('child')
-
- # Append the child to the parent
- elem.append(child)
-
- # Verify that the child was added
- self.assertEqual(len(elem), 1)
- self.assertEqual(elem[0], child)
- self.assertIn('child', elem)
- self.assertEqual(elem['child'], child)
-
- # Ensure that multiple children of the same name are rejected
- child2 = xmlutil.TemplateElement('child')
- self.assertRaises(KeyError, elem.append, child2)
-
- def test_element_extend_children(self):
- # Create an element
- elem = xmlutil.TemplateElement('test')
-
- # Make sure the element starts off empty
- self.assertEqual(len(elem), 0)
-
- # Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
-
- # Extend the parent by those children
- elem.extend(children)
-
- # Verify that the children were added
- self.assertEqual(len(elem), 3)
- for idx in range(len(elem)):
- self.assertEqual(children[idx], elem[idx])
- self.assertIn(children[idx].tag, elem)
- self.assertEqual(elem[children[idx].tag], children[idx])
-
- # Ensure that multiple children of the same name are rejected
- children2 = [
- xmlutil.TemplateElement('child4'),
- xmlutil.TemplateElement('child1'),
- ]
- self.assertRaises(KeyError, elem.extend, children2)
-
- # Also ensure that child4 was not added
- self.assertEqual(len(elem), 3)
- self.assertEqual(elem[-1].tag, 'child3')
-
- def test_element_insert_child(self):
- # Create an element
- elem = xmlutil.TemplateElement('test')
-
- # Make sure the element starts off empty
- self.assertEqual(len(elem), 0)
-
- # Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
-
- # Extend the parent by those children
- elem.extend(children)
-
- # Create a child to insert
- child = xmlutil.TemplateElement('child4')
-
- # Insert it
- elem.insert(1, child)
-
- # Ensure the child was inserted in the right place
- self.assertEqual(len(elem), 4)
- children.insert(1, child)
- for idx in range(len(elem)):
- self.assertEqual(children[idx], elem[idx])
- self.assertIn(children[idx].tag, elem)
- self.assertEqual(elem[children[idx].tag], children[idx])
-
- # Ensure that multiple children of the same name are rejected
- child2 = xmlutil.TemplateElement('child2')
- self.assertRaises(KeyError, elem.insert, 2, child2)
-
- def test_element_remove_child(self):
- # Create an element
- elem = xmlutil.TemplateElement('test')
-
- # Make sure the element starts off empty
- self.assertEqual(len(elem), 0)
-
- # Create a few children
- children = [
- xmlutil.TemplateElement('child1'),
- xmlutil.TemplateElement('child2'),
- xmlutil.TemplateElement('child3'),
- ]
-
- # Extend the parent by those children
- elem.extend(children)
-
- # Create a test child to remove
- child = xmlutil.TemplateElement('child2')
-
- # Try to remove it
- self.assertRaises(ValueError, elem.remove, child)
-
- # Ensure that no child was removed
- self.assertEqual(len(elem), 3)
-
- # Now remove a legitimate child
- elem.remove(children[1])
-
- # Ensure that the child was removed
- self.assertEqual(len(elem), 2)
- self.assertEqual(elem[0], children[0])
- self.assertEqual(elem[1], children[2])
- self.assertEqual('child2' in elem, False)
-
- # Ensure the child cannot be retrieved by name
- def get_key(elem, key):
- return elem[key]
- self.assertRaises(KeyError, get_key, elem, 'child2')
-
- def test_element_text(self):
- # Create an element
- elem = xmlutil.TemplateElement('test')
-
- # Ensure that it has no text
- self.assertIsNone(elem.text)
-
- # Try setting it to a string and ensure it becomes a selector
- elem.text = 'test'
- self.assertEqual(hasattr(elem.text, 'chain'), True)
- self.assertEqual(len(elem.text.chain), 1)
- self.assertEqual(elem.text.chain[0], 'test')
-
- # Try resetting the text to None
- elem.text = None
- self.assertIsNone(elem.text)
-
- # Now make up a selector and try setting the text to that
- sel = xmlutil.Selector()
- elem.text = sel
- self.assertEqual(elem.text, sel)
-
- # Finally, try deleting the text and see what happens
- del elem.text
- self.assertIsNone(elem.text)
-
- def test_apply_attrs(self):
- # Create a template element
- attrs = dict(attr1=xmlutil.ConstantSelector(1),
- attr2=xmlutil.ConstantSelector(2))
- tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
-
- # Create an etree element
- elem = etree.Element('test')
-
- # Apply the template to the element
- tmpl_elem.apply(elem, None)
-
- # Now, verify the correct attributes were set
- for k, v in elem.items():
- self.assertEqual(str(attrs[k].value), v)
-
- def test_apply_text(self):
- # Create a template element
- tmpl_elem = xmlutil.TemplateElement('test')
- tmpl_elem.text = xmlutil.ConstantSelector(1)
-
- # Create an etree element
- elem = etree.Element('test')
-
- # Apply the template to the element
- tmpl_elem.apply(elem, None)
-
- # Now, verify the text was set
- self.assertEqual(str(tmpl_elem.text.value), elem.text)
-
- def test__render(self):
- attrs = dict(attr1=xmlutil.ConstantSelector(1),
- attr2=xmlutil.ConstantSelector(2),
- attr3=xmlutil.ConstantSelector(3))
-
- # Create a master template element
- master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
-
- # Create a couple of slave template element
- slave_elems = [
- xmlutil.TemplateElement('test', attr2=attrs['attr2']),
- xmlutil.TemplateElement('test', attr3=attrs['attr3']),
- ]
-
- # Try the render
- elem = master_elem._render(None, None, slave_elems, None)
-
- # Verify the particulars of the render
- self.assertEqual(elem.tag, 'test')
- self.assertEqual(len(elem.nsmap), 0)
- for k, v in elem.items():
- self.assertEqual(str(attrs[k].value), v)
-
- # Create a parent for the element to be rendered
- parent = etree.Element('parent')
-
- # Try the render again...
- elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
-
- # Verify the particulars of the render
- self.assertEqual(len(parent), 1)
- self.assertEqual(parent[0], elem)
- self.assertEqual(len(elem.nsmap), 1)
- self.assertEqual(elem.nsmap['a'], 'foo')
-
- def test_render(self):
- # Create a template element
- tmpl_elem = xmlutil.TemplateElement('test')
- tmpl_elem.text = xmlutil.Selector()
-
- # Create the object we're going to render
- obj = ['elem1', 'elem2', 'elem3', 'elem4']
-
- # Try a render with no object
- elems = tmpl_elem.render(None, None)
- self.assertEqual(len(elems), 0)
-
- # Try a render with one object
- elems = tmpl_elem.render(None, 'foo')
- self.assertEqual(len(elems), 1)
- self.assertEqual(elems[0][0].text, 'foo')
- self.assertEqual(elems[0][1], 'foo')
-
- # Now, try rendering an object with multiple entries
- parent = etree.Element('parent')
- elems = tmpl_elem.render(parent, obj)
- self.assertEqual(len(elems), 4)
-
- # Check the results
- for idx in range(len(obj)):
- self.assertEqual(elems[idx][0].text, obj[idx])
- self.assertEqual(elems[idx][1], obj[idx])
-
- # Check with a subselector
- tmpl_elem = xmlutil.TemplateElement(
- 'test',
- subselector=xmlutil.ConstantSelector('foo'))
- parent = etree.Element('parent')
-
- # Try a render with no object
- elems = tmpl_elem.render(parent, obj)
- self.assertEqual(len(elems), 4)
-
- def test_subelement(self):
- # Try the SubTemplateElement constructor
- parent = xmlutil.SubTemplateElement(None, 'parent')
- self.assertEqual(parent.tag, 'parent')
- self.assertEqual(len(parent), 0)
-
- # Now try it with a parent element
- child = xmlutil.SubTemplateElement(parent, 'child')
- self.assertEqual(child.tag, 'child')
- self.assertEqual(len(parent), 1)
- self.assertEqual(parent[0], child)
-
- def test_wrap(self):
- # These are strange methods, but they make things easier
- elem = xmlutil.TemplateElement('test')
- self.assertEqual(elem.unwrap(), elem)
- self.assertEqual(elem.wrap().root, elem)
-
- def test_dyntag(self):
- obj = ['a', 'b', 'c']
-
- # Create a template element with a dynamic tag
- tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
-
- # Try the render
- parent = etree.Element('parent')
- elems = tmpl_elem.render(parent, obj)
-
- # Verify the particulars of the render
- self.assertEqual(len(elems), len(obj))
- for idx in range(len(obj)):
- self.assertEqual(elems[idx][0].tag, obj[idx])
-
- def test_tree(self):
- # Create a template element
- elem = xmlutil.TemplateElement('test', attr3='attr3')
- elem.text = 'test'
- self.assertEqual(elem.tree(),
- "<test !selector=Selector() "
- "!text=Selector('test',) "
- "attr3=Selector('attr3',)"
- "/>")
-
- # Create a template element
- elem = xmlutil.TemplateElement('test2')
-
- # Create a child element
- child = xmlutil.TemplateElement('child')
-
- # Append the child to the parent
- elem.append(child)
-
- self.assertEqual(elem.tree(),
- "<test2 !selector=Selector()>"
- "<child !selector=Selector()/></test2>")
-
-
-class TemplateTest(test.NoDBTestCase):
- def test_tree(self):
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.Template(elem)
- self.assertTrue(tmpl.tree())
-
- def test_wrap(self):
- # These are strange methods, but they make things easier
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.Template(elem)
- self.assertEqual(tmpl.unwrap(), elem)
- self.assertEqual(tmpl.wrap(), tmpl)
-
- def test__siblings(self):
- # Set up a basic template
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.Template(elem)
-
- # Check that we get the right siblings
- siblings = tmpl._siblings()
- self.assertEqual(len(siblings), 1)
- self.assertEqual(siblings[0], elem)
-
- def test__nsmap(self):
- # Set up a basic template
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
-
- # Check out that we get the right namespace dictionary
- nsmap = tmpl._nsmap()
- self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
- self.assertEqual(len(nsmap), 1)
- self.assertEqual(nsmap['a'], 'foo')
-
- def test_master_attach(self):
- # Set up a master template
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.MasterTemplate(elem, 1)
-
- # Make sure it has a root but no slaves
- self.assertEqual(tmpl.root, elem)
- self.assertEqual(len(tmpl.slaves), 0)
- self.assertTrue(repr(tmpl))
-
- # Try to attach an invalid slave
- bad_elem = xmlutil.TemplateElement('test2')
- self.assertRaises(ValueError, tmpl.attach, bad_elem)
- self.assertEqual(len(tmpl.slaves), 0)
-
- # Try to attach an invalid and a valid slave
- good_elem = xmlutil.TemplateElement('test')
- self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
- self.assertEqual(len(tmpl.slaves), 0)
-
- # Try to attach an inapplicable template
- class InapplicableTemplate(xmlutil.Template):
- def apply(self, master):
- return False
- inapp_tmpl = InapplicableTemplate(good_elem)
- tmpl.attach(inapp_tmpl)
- self.assertEqual(len(tmpl.slaves), 0)
-
- # Now try attaching an applicable template
- tmpl.attach(good_elem)
- self.assertEqual(len(tmpl.slaves), 1)
- self.assertEqual(tmpl.slaves[0].root, good_elem)
-
- def test_master_copy(self):
- # Construct a master template
- elem = xmlutil.TemplateElement('test')
- tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
-
- # Give it a slave
- slave = xmlutil.TemplateElement('test')
- tmpl.attach(slave)
-
- # Construct a copy
- copy = tmpl.copy()
-
- # Check to see if we actually managed a copy
- self.assertNotEqual(tmpl, copy)
- self.assertEqual(tmpl.root, copy.root)
- self.assertEqual(tmpl.version, copy.version)
- self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
- self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
- self.assertEqual(len(tmpl.slaves), len(copy.slaves))
- self.assertEqual(tmpl.slaves[0], copy.slaves[0])
-
- def test_slave_apply(self):
- # Construct a master template
- elem = xmlutil.TemplateElement('test')
- master = xmlutil.MasterTemplate(elem, 3)
-
- # Construct a slave template with applicable minimum version
- slave = xmlutil.SlaveTemplate(elem, 2)
- self.assertEqual(slave.apply(master), True)
- self.assertTrue(repr(slave))
-
- # Construct a slave template with equal minimum version
- slave = xmlutil.SlaveTemplate(elem, 3)
- self.assertEqual(slave.apply(master), True)
-
- # Construct a slave template with inapplicable minimum version
- slave = xmlutil.SlaveTemplate(elem, 4)
- self.assertEqual(slave.apply(master), False)
-
- # Construct a slave template with applicable version range
- slave = xmlutil.SlaveTemplate(elem, 2, 4)
- self.assertEqual(slave.apply(master), True)
-
- # Construct a slave template with low version range
- slave = xmlutil.SlaveTemplate(elem, 1, 2)
- self.assertEqual(slave.apply(master), False)
-
- # Construct a slave template with high version range
- slave = xmlutil.SlaveTemplate(elem, 4, 5)
- self.assertEqual(slave.apply(master), False)
-
- # Construct a slave template with matching version range
- slave = xmlutil.SlaveTemplate(elem, 3, 3)
- self.assertEqual(slave.apply(master), True)
-
- def test__serialize(self):
- # Our test object to serialize
- obj = {
- 'test': {
- 'name': 'foobar',
- 'values': [1, 2, 3, 4],
- 'attrs': {
- 'a': 1,
- 'b': 2,
- 'c': 3,
- 'd': 4,
- },
- 'image': {
- 'name': 'image_foobar',
- 'id': 42,
- },
- },
- }
-
- # Set up our master template
- root = xmlutil.TemplateElement('test', selector='test',
- name='name')
- value = xmlutil.SubTemplateElement(root, 'value', selector='values')
- value.text = xmlutil.Selector()
- attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
- xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
- key=0, value=1)
- master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
-
- # Set up our slave template
- root_slave = xmlutil.TemplateElement('test', selector='test')
- image = xmlutil.SubTemplateElement(root_slave, 'image',
- selector='image', id='id')
- image.text = xmlutil.Selector('name')
- slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
-
- # Attach the slave to the master...
- master.attach(slave)
-
- # Try serializing our object
- siblings = master._siblings()
- nsmap = master._nsmap()
- result = master._serialize(None, obj, siblings, nsmap)
-
- # Now we get to manually walk the element tree...
- self.assertEqual(result.tag, 'test')
- self.assertEqual(len(result.nsmap), 2)
- self.assertEqual(result.nsmap['f'], 'foo')
- self.assertEqual(result.nsmap['b'], 'bar')
- self.assertEqual(result.get('name'), obj['test']['name'])
- for idx, val in enumerate(obj['test']['values']):
- self.assertEqual(result[idx].tag, 'value')
- self.assertEqual(result[idx].text, str(val))
- idx += 1
- self.assertEqual(result[idx].tag, 'attrs')
- for attr in result[idx]:
- self.assertEqual(attr.tag, 'attr')
- self.assertEqual(attr.get('value'),
- str(obj['test']['attrs'][attr.get('key')]))
- idx += 1
- self.assertEqual(result[idx].tag, 'image')
- self.assertEqual(result[idx].get('id'),
- str(obj['test']['image']['id']))
- self.assertEqual(result[idx].text, obj['test']['image']['name'])
-
- templ = xmlutil.Template(None)
- self.assertEqual(templ.serialize(None), '')
-
- def test_serialize_with_colon_tagname_support(self):
- # Our test object to serialize
- obj = {'extra_specs': {'foo:bar': '999'}}
- expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
- '</extra_specs>'))
- # Set up our master template
- root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
- colon_ns=True)
- value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
- colon_ns=True)
- value.text = xmlutil.Selector()
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
- self.assertEqual(expected_xml, result)
-
- def test__serialize_with_empty_datum_selector(self):
- # Our test object to serialize
- obj = {
- 'test': {
- 'name': 'foobar',
- 'image': ''
- },
- }
-
- root = xmlutil.TemplateElement('test', selector='test',
- name='name')
- master = xmlutil.MasterTemplate(root, 1)
- root_slave = xmlutil.TemplateElement('test', selector='test')
- image = xmlutil.SubTemplateElement(root_slave, 'image',
- selector='image')
- image.set('id')
- xmlutil.make_links(image, 'links')
- slave = xmlutil.SlaveTemplate(root_slave, 1)
- master.attach(slave)
-
- siblings = master._siblings()
- result = master._serialize(None, obj, siblings)
- self.assertEqual(result.tag, 'test')
- self.assertEqual(result[0].tag, 'image')
- self.assertEqual(result[0].get('id'), str(obj['test']['image']))
-
-
-class MasterTemplateBuilder(xmlutil.TemplateBuilder):
- def construct(self):
- elem = xmlutil.TemplateElement('test')
- return xmlutil.MasterTemplate(elem, 1)
-
-
-class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
- def construct(self):
- elem = xmlutil.TemplateElement('test')
- return xmlutil.SlaveTemplate(elem, 1)
-
-
-class TemplateBuilderTest(test.NoDBTestCase):
- def test_master_template_builder(self):
- # Make sure the template hasn't been built yet
- self.assertIsNone(MasterTemplateBuilder._tmpl)
-
- # Now, construct the template
- tmpl1 = MasterTemplateBuilder()
-
- # Make sure that there is a template cached...
- self.assertIsNotNone(MasterTemplateBuilder._tmpl)
-
- # Make sure it wasn't what was returned...
- self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
-
- # Make sure it doesn't get rebuilt
- cached = MasterTemplateBuilder._tmpl
- tmpl2 = MasterTemplateBuilder()
- self.assertEqual(MasterTemplateBuilder._tmpl, cached)
-
- # Make sure we're always getting fresh copies
- self.assertNotEqual(tmpl1, tmpl2)
-
- # Make sure we can override the copying behavior
- tmpl3 = MasterTemplateBuilder(False)
- self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
-
- def test_slave_template_builder(self):
- # Make sure the template hasn't been built yet
- self.assertIsNone(SlaveTemplateBuilder._tmpl)
-
- # Now, construct the template
- tmpl1 = SlaveTemplateBuilder()
-
- # Make sure there is a template cached...
- self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
-
- # Make sure it was what was returned...
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
-
- # Make sure it doesn't get rebuilt
- tmpl2 = SlaveTemplateBuilder()
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
-
- # Make sure we're always getting the cached copy
- self.assertEqual(tmpl1, tmpl2)
-
-
-class MiscellaneousXMLUtilTests(test.NoDBTestCase):
- def test_validate_schema(self):
- xml = '''<?xml version='1.0' encoding='UTF-8'?>
-<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
-<meta key="key6">value6</meta><meta key="key4">value4</meta>
-</metadata>
-'''
- xmlutil.validate_schema(xml, 'metadata')
- # No way to test the return value of validate_schema.
- # It just raises an exception when something is wrong.
- self.assertTrue(True)
-
- def test_make_links(self):
- elem = xmlutil.TemplateElement('image', selector='image')
- self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
-
- def test_make_flat_dict(self):
- expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<wrapper><a>foo</a><b>bar</b></wrapper>')
- root = xmlutil.make_flat_dict('wrapper')
- tmpl = xmlutil.MasterTemplate(root, 1)
- result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
- self.assertEqual(result, expected_xml)
-
- expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
-'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
-"</ns0:wrapper>")
- root = xmlutil.make_flat_dict('wrapper', ns='ns')
- tmpl = xmlutil.MasterTemplate(root, 1)
- result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
- self.assertEqual(result, expected_xml)
-
- def test_make_flat_dict_with_colon_tagname_support(self):
- # Our test object to serialize
- obj = {'extra_specs': {'foo:bar': '999'}}
- expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
- '</extra_specs>'))
- # Set up our master template
- root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
- self.assertEqual(expected_xml, result)
-
- def test_make_flat_dict_with_parent(self):
- # Our test object to serialize
- obj = {"device": {"id": 1,
- "extra_info": {"key1": "value1",
- "key2": "value2"}}}
-
- expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<device id="1"><extra_info><key2>value2</key2>'
- '<key1>value1</key1></extra_info></device>'))
-
- root = xmlutil.TemplateElement('device', selector='device')
- root.set('id')
- extra = xmlutil.make_flat_dict('extra_info', root=root)
- root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
- self.assertEqual(expected_xml, result)
-
- def test_make_flat_dict_with_dicts(self):
- # Our test object to serialize
- obj = {"device": {"id": 1,
- "extra_info": {"key1": "value1",
- "key2": "value2"}}}
-
- expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
- '<device><id>1</id><extra_info><key2>value2</key2>'
- '<key1>value1</key1></extra_info></device>'))
-
- root = xmlutil.make_flat_dict('device', selector='device',
- ignore_sub_dicts=True)
- extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
- root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
- self.assertEqual(expected_xml, result)
-
- def test_safe_parse_xml(self):
-
- normal_body = ('<?xml version="1.0" ?>'
- '<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
-
- dom = xmlutil.safe_minidom_parse_string(normal_body)
- # Some versions of minidom inject extra newlines so we ignore them
- result = str(dom.toxml()).replace('\n', '')
- self.assertEqual(normal_body, result)
-
- self.assertRaises(exception.MalformedRequestBody,
- xmlutil.safe_minidom_parse_string,
- tests_utils.killer_xml_body())
-
-
-class SafeParserTestCase(test.NoDBTestCase):
- def test_external_dtd(self):
- xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
- <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
- <html>
- <head/>
- <body>html with dtd</body>
- </html>""")
-
- parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
-
- def test_external_file(self):
- xml_string = """<!DOCTYPE external [
- <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
- ]>
- <root>&ee;</root>"""
-
- parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
-
- def test_notation(self):
- xml_string = """<?xml version="1.0" standalone="no"?>
- <!-- comment data -->
- <!DOCTYPE x [
- <!NOTATION notation SYSTEM "notation.jpeg">
- ]>
- <root attr1="value1">
- </root>"""
-
- parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
- forbid_entities=True)
- self.assertRaises(ValueError,
- minidom.parseString,
- xml_string, parser)
diff --git a/nova/tests/cells/fakes.py b/nova/tests/cells/fakes.py
deleted file mode 100644
index 8a27638500..0000000000
--- a/nova/tests/cells/fakes.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Fakes For Cells tests.
-"""
-
-from oslo.config import cfg
-
-from nova.cells import driver
-from nova.cells import manager as cells_manager
-from nova.cells import state as cells_state
-from nova.cells import utils as cells_utils
-import nova.db
-from nova.db import base
-from nova import exception
-
-CONF = cfg.CONF
-CONF.import_opt('name', 'nova.cells.opts', group='cells')
-
-
-# Fake Cell Hierarchy
-FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
-FAKE_CELL_LAYOUT = [{'child-cell1': []},
- {'child-cell2': [{'grandchild-cell1': []}]},
- {'child-cell3': [{'grandchild-cell2': []},
- {'grandchild-cell3': []}]},
- {'child-cell4': []}]
-
-# build_cell_stub_infos() below will take the above layout and create
-# a fake view of the DB from the perspective of each of the cells.
-# For each cell, a CellStubInfo will be created with this info.
-CELL_NAME_TO_STUB_INFO = {}
-
-
-class FakeDBApi(object):
- """Cells uses a different DB in each cell. This means in order to
- stub out things differently per cell, I need to create a fake DBApi
- object that is instantiated by each fake cell.
- """
- def __init__(self, cell_db_entries):
- self.cell_db_entries = cell_db_entries
-
- def __getattr__(self, key):
- return getattr(nova.db, key)
-
- def cell_get_all(self, ctxt):
- return self.cell_db_entries
-
- def compute_node_get_all(self, ctxt):
- return []
-
- def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
- return []
-
- def instance_get_by_uuid(self, ctxt, instance_uuid):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
-
-class FakeCellsDriver(driver.BaseCellsDriver):
- pass
-
-
-class FakeCellState(cells_state.CellState):
- def send_message(self, message):
- message_runner = get_message_runner(self.name)
- orig_ctxt = message.ctxt
- json_message = message.to_json()
- message = message_runner.message_from_json(json_message)
- # Restore this so we can use mox and verify same context
- message.ctxt = orig_ctxt
- message.process()
-
-
-class FakeCellStateManager(cells_state.CellStateManagerDB):
- def __init__(self, *args, **kwargs):
- super(FakeCellStateManager, self).__init__(*args,
- cell_state_cls=FakeCellState, **kwargs)
-
-
-class FakeCellsManager(cells_manager.CellsManager):
- def __init__(self, *args, **kwargs):
- super(FakeCellsManager, self).__init__(*args,
- cell_state_manager=FakeCellStateManager,
- **kwargs)
-
-
-class CellStubInfo(object):
- def __init__(self, test_case, cell_name, db_entries):
- self.test_case = test_case
- self.cell_name = cell_name
- self.db_entries = db_entries
-
- def fake_base_init(_self, *args, **kwargs):
- _self.db = FakeDBApi(db_entries)
-
- test_case.stubs.Set(base.Base, '__init__', fake_base_init)
- self.cells_manager = FakeCellsManager()
- # Fix the cell name, as it normally uses CONF.cells.name
- msg_runner = self.cells_manager.msg_runner
- msg_runner.our_name = self.cell_name
- self.cells_manager.state_manager.my_cell_state.name = self.cell_name
-
-
-def _build_cell_transport_url(cur_db_id):
- username = 'username%s' % cur_db_id
- password = 'password%s' % cur_db_id
- hostname = 'rpc_host%s' % cur_db_id
- port = 3090 + cur_db_id
- virtual_host = 'rpc_vhost%s' % cur_db_id
-
- return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port,
- virtual_host)
-
-
-def _build_cell_stub_info(test_case, our_name, parent_path, children):
- cell_db_entries = []
- cur_db_id = 1
- sep_char = cells_utils.PATH_CELL_SEP
- if parent_path:
- cell_db_entries.append(
- dict(id=cur_db_id,
- name=parent_path.split(sep_char)[-1],
- is_parent=True,
- transport_url=_build_cell_transport_url(cur_db_id)))
- cur_db_id += 1
- our_path = parent_path + sep_char + our_name
- else:
- our_path = our_name
- for child in children:
- for child_name, grandchildren in child.items():
- _build_cell_stub_info(test_case, child_name, our_path,
- grandchildren)
- cell_entry = dict(id=cur_db_id,
- name=child_name,
- transport_url=_build_cell_transport_url(
- cur_db_id),
- is_parent=False)
- cell_db_entries.append(cell_entry)
- cur_db_id += 1
- stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
- CELL_NAME_TO_STUB_INFO[our_name] = stub_info
-
-
-def _build_cell_stub_infos(test_case):
- _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
- FAKE_CELL_LAYOUT)
-
-
-def init(test_case):
- global CELL_NAME_TO_STUB_INFO
- test_case.flags(driver='nova.tests.cells.fakes.FakeCellsDriver',
- group='cells')
- CELL_NAME_TO_STUB_INFO = {}
- _build_cell_stub_infos(test_case)
-
-
-def _get_cell_stub_info(cell_name):
- return CELL_NAME_TO_STUB_INFO[cell_name]
-
-
-def get_state_manager(cell_name):
- return _get_cell_stub_info(cell_name).cells_manager.state_manager
-
-
-def get_cell_state(cur_cell_name, tgt_cell_name):
- state_manager = get_state_manager(cur_cell_name)
- cell = state_manager.child_cells.get(tgt_cell_name)
- if cell is None:
- cell = state_manager.parent_cells.get(tgt_cell_name)
- return cell
-
-
-def get_cells_manager(cell_name):
- return _get_cell_stub_info(cell_name).cells_manager
-
-
-def get_message_runner(cell_name):
- return _get_cell_stub_info(cell_name).cells_manager.msg_runner
-
-
-def stub_tgt_method(test_case, cell_name, method_name, method):
- msg_runner = get_message_runner(cell_name)
- tgt_msg_methods = msg_runner.methods_by_type['targeted']
- setattr(tgt_msg_methods, method_name, method)
-
-
-def stub_bcast_method(test_case, cell_name, method_name, method):
- msg_runner = get_message_runner(cell_name)
- tgt_msg_methods = msg_runner.methods_by_type['broadcast']
- setattr(tgt_msg_methods, method_name, method)
-
-
-def stub_bcast_methods(test_case, method_name, method):
- for cell_name in CELL_NAME_TO_STUB_INFO.keys():
- stub_bcast_method(test_case, cell_name, method_name, method)
diff --git a/nova/tests/cells/test_cells_filters.py b/nova/tests/cells/test_cells_filters.py
deleted file mode 100644
index 01ad3580ff..0000000000
--- a/nova/tests/cells/test_cells_filters.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright (c) 2012-2013 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for cells scheduler filters.
-"""
-
-from nova.cells import filters
-from nova import context
-from nova.db.sqlalchemy import models
-from nova import test
-from nova.tests.cells import fakes
-
-
-class FiltersTestCase(test.NoDBTestCase):
- """Makes sure the proper filters are in the directory."""
-
- def test_all_filters(self):
- filter_classes = filters.all_filters()
- class_names = [cls.__name__ for cls in filter_classes]
- self.assertIn("TargetCellFilter", class_names)
-
-
-class _FilterTestClass(test.NoDBTestCase):
- """Base class for testing individual filter plugins."""
- filter_cls_name = None
-
- def setUp(self):
- super(_FilterTestClass, self).setUp()
- fakes.init(self)
- self.msg_runner = fakes.get_message_runner('api-cell')
- self.scheduler = self.msg_runner.scheduler
- self.my_cell_state = self.msg_runner.state_manager.get_my_state()
- self.filter_handler = filters.CellFilterHandler()
- self.filter_classes = self.filter_handler.get_matching_classes(
- [self.filter_cls_name])
- self.context = context.RequestContext('fake', 'fake',
- is_admin=True)
-
- def _filter_cells(self, cells, filter_properties):
- return self.filter_handler.get_filtered_objects(self.filter_classes,
- cells,
- filter_properties)
-
-
-class ImagePropertiesFilter(_FilterTestClass):
- filter_cls_name = \
- 'nova.cells.filters.image_properties.ImagePropertiesFilter'
-
- def setUp(self):
- super(ImagePropertiesFilter, self).setUp()
- self.cell1 = models.Cell()
- self.cell2 = models.Cell()
- self.cell3 = models.Cell()
- self.cells = [self.cell1, self.cell2, self.cell3]
- for cell in self.cells:
- cell.capabilities = {}
- self.filter_props = {'context': self.context, 'request_spec': {}}
-
- def test_missing_image_properties(self):
- self.assertEqual(self.cells,
- self._filter_cells(self.cells, self.filter_props))
-
- def test_missing_hypervisor_version_requires(self):
- self.filter_props['request_spec'] = {'image': {'properties': {}}}
- for cell in self.cells:
- cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])}
- self.assertEqual(self.cells,
- self._filter_cells(self.cells, self.filter_props))
-
- def test_missing_hypervisor_version_in_cells(self):
- image = {'properties': {'hypervisor_version_requires': '>6.2.1'}}
- self.filter_props['request_spec'] = {'image': image}
- self.cell1.capabilities = {"prominent_hypervisor_version": set([])}
- self.assertEqual(self.cells,
- self._filter_cells(self.cells, self.filter_props))
-
- def test_cells_matching_hypervisor_version(self):
- image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}}
- self.filter_props['request_spec'] = {'image': image}
-
- self.cell1.capabilities = {"prominent_hypervisor_version":
- set([u"6.2"])}
- self.cell2.capabilities = {"prominent_hypervisor_version":
- set([u"6.3"])}
- self.cell3.capabilities = {"prominent_hypervisor_version":
- set([u"6.0"])}
-
- self.assertEqual([self.cell1, self.cell2],
- self._filter_cells(self.cells, self.filter_props))
-
- # assert again to verify filter doesn't mutate state
- # LP bug #1325705
- self.assertEqual([self.cell1, self.cell2],
- self._filter_cells(self.cells, self.filter_props))
-
-
-class TestTargetCellFilter(_FilterTestClass):
- filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter'
-
- def test_missing_scheduler_hints(self):
- cells = [1, 2, 3]
- # No filtering
- filter_props = {'context': self.context}
- self.assertEqual(cells, self._filter_cells(cells, filter_props))
-
- def test_no_target_cell_hint(self):
- cells = [1, 2, 3]
- filter_props = {'scheduler_hints': {},
- 'context': self.context}
- # No filtering
- self.assertEqual(cells, self._filter_cells(cells, filter_props))
-
- def test_target_cell_specified_me(self):
- cells = [1, 2, 3]
- target_cell = 'fake!cell!path'
- current_cell = 'fake!cell!path'
- filter_props = {'scheduler_hints': {'target_cell': target_cell},
- 'routing_path': current_cell,
- 'scheduler': self.scheduler,
- 'context': self.context}
- # Only myself in the list.
- self.assertEqual([self.my_cell_state],
- self._filter_cells(cells, filter_props))
-
- def test_target_cell_specified_me_but_not_admin(self):
- ctxt = context.RequestContext('fake', 'fake')
- cells = [1, 2, 3]
- target_cell = 'fake!cell!path'
- current_cell = 'fake!cell!path'
- filter_props = {'scheduler_hints': {'target_cell': target_cell},
- 'routing_path': current_cell,
- 'scheduler': self.scheduler,
- 'context': ctxt}
- # No filtering, because not an admin.
- self.assertEqual(cells, self._filter_cells(cells, filter_props))
-
- def test_target_cell_specified_not_me(self):
- info = {}
-
- def _fake_build_instances(ctxt, cell, sched_kwargs):
- info['ctxt'] = ctxt
- info['cell'] = cell
- info['sched_kwargs'] = sched_kwargs
-
- self.stubs.Set(self.msg_runner, 'build_instances',
- _fake_build_instances)
- cells = [1, 2, 3]
- target_cell = 'fake!cell!path'
- current_cell = 'not!the!same'
- filter_props = {'scheduler_hints': {'target_cell': target_cell},
- 'routing_path': current_cell,
- 'scheduler': self.scheduler,
- 'context': self.context,
- 'host_sched_kwargs': 'meow'}
- # None is returned to bypass further scheduling.
- self.assertIsNone(self._filter_cells(cells, filter_props))
- # The filter should have re-scheduled to the child cell itself.
- expected_info = {'ctxt': self.context,
- 'cell': 'fake!cell!path',
- 'sched_kwargs': 'meow'}
- self.assertEqual(expected_info, info)
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
deleted file mode 100644
index e540bf5793..0000000000
--- a/nova/tests/cells/test_cells_manager.py
+++ /dev/null
@@ -1,808 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For CellsManager
-"""
-import copy
-import datetime
-
-import mock
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova.cells import messaging
-from nova.cells import utils as cells_utils
-from nova import context
-from nova import test
-from nova.tests.cells import fakes
-from nova.tests import fake_server_actions
-
-CONF = cfg.CONF
-CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
-
-
-FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
-FAKE_SERVICES = [dict(id=1, host='host1',
- compute_node=[FAKE_COMPUTE_NODES[0]]),
- dict(id=2, host='host2',
- compute_node=[FAKE_COMPUTE_NODES[1]]),
- dict(id=3, host='host3', compute_node=[])]
-FAKE_TASK_LOGS = [dict(id=1, host='host1'),
- dict(id=2, host='host2')]
-
-
-class CellsManagerClassTestCase(test.NoDBTestCase):
- """Test case for CellsManager class."""
-
- def setUp(self):
- super(CellsManagerClassTestCase, self).setUp()
- fakes.init(self)
- # pick a child cell to use for tests.
- self.our_cell = 'grandchild-cell1'
- self.cells_manager = fakes.get_cells_manager(self.our_cell)
- self.msg_runner = self.cells_manager.msg_runner
- self.state_manager = fakes.get_state_manager(self.our_cell)
- self.driver = self.cells_manager.driver
- self.ctxt = 'fake_context'
-
- def _get_fake_response(self, raw_response=None, exc=False):
- if exc:
- return messaging.Response('fake', test.TestingException(),
- True)
- if raw_response is None:
- raw_response = 'fake-response'
- return messaging.Response('fake', raw_response, False)
-
- def test_get_cell_info_for_neighbors(self):
- self.mox.StubOutWithMock(self.cells_manager.state_manager,
- 'get_cell_info_for_neighbors')
- self.cells_manager.state_manager.get_cell_info_for_neighbors()
- self.mox.ReplayAll()
- self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
-
- def test_post_start_hook_child_cell(self):
- self.mox.StubOutWithMock(self.driver, 'start_servers')
- self.mox.StubOutWithMock(context, 'get_admin_context')
- self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
-
- self.driver.start_servers(self.msg_runner)
- context.get_admin_context().AndReturn(self.ctxt)
- self.cells_manager._update_our_parents(self.ctxt)
- self.mox.ReplayAll()
- self.cells_manager.post_start_hook()
-
- def test_post_start_hook_middle_cell(self):
- cells_manager = fakes.get_cells_manager('child-cell2')
- msg_runner = cells_manager.msg_runner
- driver = cells_manager.driver
-
- self.mox.StubOutWithMock(driver, 'start_servers')
- self.mox.StubOutWithMock(context, 'get_admin_context')
- self.mox.StubOutWithMock(msg_runner,
- 'ask_children_for_capabilities')
- self.mox.StubOutWithMock(msg_runner,
- 'ask_children_for_capacities')
-
- driver.start_servers(msg_runner)
- context.get_admin_context().AndReturn(self.ctxt)
- msg_runner.ask_children_for_capabilities(self.ctxt)
- msg_runner.ask_children_for_capacities(self.ctxt)
- self.mox.ReplayAll()
- cells_manager.post_start_hook()
-
- def test_update_our_parents(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'tell_parents_our_capabilities')
- self.mox.StubOutWithMock(self.msg_runner,
- 'tell_parents_our_capacities')
-
- self.msg_runner.tell_parents_our_capabilities(self.ctxt)
- self.msg_runner.tell_parents_our_capacities(self.ctxt)
- self.mox.ReplayAll()
- self.cells_manager._update_our_parents(self.ctxt)
-
- def test_build_instances(self):
- build_inst_kwargs = {'instances': [1, 2]}
- self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
- our_cell = self.msg_runner.state_manager.get_my_state()
- self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
- self.mox.ReplayAll()
- self.cells_manager.build_instances(self.ctxt,
- build_inst_kwargs=build_inst_kwargs)
-
- def test_run_compute_api_method(self):
- # Args should just be silently passed through
- cell_name = 'fake-cell-name'
- method_info = 'fake-method-info'
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'run_compute_api_method')
- fake_response = self._get_fake_response()
- self.msg_runner.run_compute_api_method(self.ctxt,
- cell_name,
- method_info,
- True).AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.run_compute_api_method(
- self.ctxt, cell_name=cell_name, method_info=method_info,
- call=True)
- self.assertEqual('fake-response', response)
-
- def test_instance_update_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
- self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.instance_update_at_top(self.ctxt,
- instance='fake-instance')
-
- def test_instance_destroy_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
- self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.instance_destroy_at_top(self.ctxt,
- instance='fake-instance')
-
- def test_instance_delete_everywhere(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'instance_delete_everywhere')
- self.msg_runner.instance_delete_everywhere(self.ctxt,
- 'fake-instance',
- 'fake-type')
- self.mox.ReplayAll()
- self.cells_manager.instance_delete_everywhere(
- self.ctxt, instance='fake-instance',
- delete_type='fake-type')
-
- def test_instance_fault_create_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'instance_fault_create_at_top')
- self.msg_runner.instance_fault_create_at_top(self.ctxt,
- 'fake-fault')
- self.mox.ReplayAll()
- self.cells_manager.instance_fault_create_at_top(
- self.ctxt, instance_fault='fake-fault')
-
- def test_bw_usage_update_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'bw_usage_update_at_top')
- self.msg_runner.bw_usage_update_at_top(self.ctxt,
- 'fake-bw-info')
- self.mox.ReplayAll()
- self.cells_manager.bw_usage_update_at_top(
- self.ctxt, bw_update_info='fake-bw-info')
-
- def test_heal_instances(self):
- self.flags(instance_updated_at_threshold=1000,
- instance_update_num_instances=2,
- group='cells')
-
- fake_context = context.RequestContext('fake', 'fake')
- stalled_time = timeutils.utcnow()
- updated_since = stalled_time - datetime.timedelta(seconds=1000)
-
- def utcnow():
- return stalled_time
-
- call_info = {'get_instances': 0, 'sync_instances': []}
-
- instances = ['instance1', 'instance2', 'instance3']
-
- def get_instances_to_sync(context, **kwargs):
- self.assertEqual(context, fake_context)
- call_info['shuffle'] = kwargs.get('shuffle')
- call_info['project_id'] = kwargs.get('project_id')
- call_info['updated_since'] = kwargs.get('updated_since')
- call_info['get_instances'] += 1
- return iter(instances)
-
- def instance_get_by_uuid(context, uuid):
- return instances[int(uuid[-1]) - 1]
-
- def sync_instance(context, instance):
- self.assertEqual(context, fake_context)
- call_info['sync_instances'].append(instance)
-
- self.stubs.Set(cells_utils, 'get_instances_to_sync',
- get_instances_to_sync)
- self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
- instance_get_by_uuid)
- self.stubs.Set(self.cells_manager, '_sync_instance',
- sync_instance)
- self.stubs.Set(timeutils, 'utcnow', utcnow)
-
- self.cells_manager._heal_instances(fake_context)
- self.assertEqual(call_info['shuffle'], True)
- self.assertIsNone(call_info['project_id'])
- self.assertEqual(call_info['updated_since'], updated_since)
- self.assertEqual(call_info['get_instances'], 1)
- # Only first 2
- self.assertEqual(call_info['sync_instances'],
- instances[:2])
-
- call_info['sync_instances'] = []
- self.cells_manager._heal_instances(fake_context)
- self.assertEqual(call_info['shuffle'], True)
- self.assertIsNone(call_info['project_id'])
- self.assertEqual(call_info['updated_since'], updated_since)
- self.assertEqual(call_info['get_instances'], 2)
- # Now the last 1 and the first 1
- self.assertEqual(call_info['sync_instances'],
- [instances[-1], instances[0]])
-
- def test_sync_instances(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'sync_instances')
- self.msg_runner.sync_instances(self.ctxt, 'fake-project',
- 'fake-time', 'fake-deleted')
- self.mox.ReplayAll()
- self.cells_manager.sync_instances(self.ctxt,
- project_id='fake-project',
- updated_since='fake-time',
- deleted='fake-deleted')
-
- def test_service_get_all(self):
- responses = []
- expected_response = []
- # 3 cells... so 3 responses. Each response is a list of services.
- # Manager should turn these into a single list of responses.
- for i in xrange(3):
- cell_name = 'path!to!cell%i' % i
- services = []
- for service in FAKE_SERVICES:
- services.append(copy.deepcopy(service))
- expected_service = copy.deepcopy(service)
- cells_utils.add_cell_to_service(expected_service, cell_name)
- expected_response.append(expected_service)
- response = messaging.Response(cell_name, services, False)
- responses.append(response)
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'service_get_all')
- self.msg_runner.service_get_all(self.ctxt,
- 'fake-filters').AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.service_get_all(self.ctxt,
- filters='fake-filters')
- self.assertEqual(expected_response, response)
-
- def test_service_get_by_compute_host(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'service_get_by_compute_host')
- fake_cell = 'fake-cell'
- fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
- False)
- expected_response = copy.deepcopy(FAKE_SERVICES[0])
- cells_utils.add_cell_to_service(expected_response, fake_cell)
-
- cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
- self.msg_runner.service_get_by_compute_host(self.ctxt,
- fake_cell, 'fake-host').AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.service_get_by_compute_host(self.ctxt,
- host_name=cell_and_host)
- self.assertEqual(expected_response, response)
-
- def test_get_host_uptime(self):
- fake_cell = 'parent!fake-cell'
- fake_host = 'fake-host'
- fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
- host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
- " 0.20, 0.12, 0.14")
- fake_response = messaging.Response(fake_cell, host_uptime, False)
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'get_host_uptime')
- self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
- AndReturn(fake_response)
- self.mox.ReplayAll()
-
- response = self.cells_manager.get_host_uptime(self.ctxt,
- fake_cell_and_host)
- self.assertEqual(host_uptime, response)
-
- def test_service_update(self):
- fake_cell = 'fake-cell'
- fake_response = messaging.Response(
- fake_cell, FAKE_SERVICES[0], False)
- expected_response = copy.deepcopy(FAKE_SERVICES[0])
- cells_utils.add_cell_to_service(expected_response, fake_cell)
- cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
- params_to_update = {'disabled': True}
-
- self.mox.StubOutWithMock(self.msg_runner, 'service_update')
- self.msg_runner.service_update(self.ctxt,
- fake_cell, 'fake-host', 'nova-api',
- params_to_update).AndReturn(fake_response)
- self.mox.ReplayAll()
-
- response = self.cells_manager.service_update(
- self.ctxt, host_name=cell_and_host, binary='nova-api',
- params_to_update=params_to_update)
- self.assertEqual(expected_response, response)
-
- def test_service_delete(self):
- fake_cell = 'fake-cell'
- service_id = '1'
- cell_service_id = cells_utils.cell_with_item(fake_cell, service_id)
-
- with mock.patch.object(self.msg_runner,
- 'service_delete') as service_delete:
- self.cells_manager.service_delete(self.ctxt, cell_service_id)
- service_delete.assert_called_once_with(
- self.ctxt, fake_cell, service_id)
-
- def test_proxy_rpc_to_manager(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'proxy_rpc_to_manager')
- fake_response = self._get_fake_response()
- cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
- topic = "%s.%s" % (CONF.compute_topic, cell_and_host)
- self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
- 'fake-host', topic, 'fake-rpc-msg',
- True, -1).AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
- topic=topic, rpc_message='fake-rpc-msg', call=True,
- timeout=-1)
- self.assertEqual('fake-response', response)
-
- def _build_task_log_responses(self, num):
- responses = []
- expected_response = []
- # 3 cells... so 3 responses. Each response is a list of task log
- # entries. Manager should turn these into a single list of
- # task log entries.
- for i in xrange(num):
- cell_name = 'path!to!cell%i' % i
- task_logs = []
- for task_log in FAKE_TASK_LOGS:
- task_logs.append(copy.deepcopy(task_log))
- expected_task_log = copy.deepcopy(task_log)
- cells_utils.add_cell_to_task_log(expected_task_log,
- cell_name)
- expected_response.append(expected_task_log)
- response = messaging.Response(cell_name, task_logs, False)
- responses.append(response)
- return expected_response, responses
-
- def test_task_log_get_all(self):
- expected_response, responses = self._build_task_log_responses(3)
- self.mox.StubOutWithMock(self.msg_runner,
- 'task_log_get_all')
- self.msg_runner.task_log_get_all(self.ctxt, None,
- 'fake-name', 'fake-begin',
- 'fake-end', host=None, state=None).AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.task_log_get_all(self.ctxt,
- task_name='fake-name',
- period_beginning='fake-begin', period_ending='fake-end')
- self.assertEqual(expected_response, response)
-
- def test_task_log_get_all_with_filters(self):
- expected_response, responses = self._build_task_log_responses(1)
- cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
- self.mox.StubOutWithMock(self.msg_runner,
- 'task_log_get_all')
- self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
- 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
- state='fake-state').AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.task_log_get_all(self.ctxt,
- task_name='fake-name',
- period_beginning='fake-begin', period_ending='fake-end',
- host=cell_and_host, state='fake-state')
- self.assertEqual(expected_response, response)
-
- def test_task_log_get_all_with_cell_but_no_host_filters(self):
- expected_response, responses = self._build_task_log_responses(1)
- # Host filter only has cell name.
- cell_and_host = 'fake-cell'
- self.mox.StubOutWithMock(self.msg_runner,
- 'task_log_get_all')
- self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
- 'fake-name', 'fake-begin', 'fake-end', host=None,
- state='fake-state').AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.task_log_get_all(self.ctxt,
- task_name='fake-name',
- period_beginning='fake-begin', period_ending='fake-end',
- host=cell_and_host, state='fake-state')
- self.assertEqual(expected_response, response)
-
- def test_compute_node_get_all(self):
- responses = []
- expected_response = []
- # 3 cells... so 3 responses. Each response is a list of computes.
- # Manager should turn these into a single list of responses.
- for i in xrange(3):
- cell_name = 'path!to!cell%i' % i
- compute_nodes = []
- for compute_node in FAKE_COMPUTE_NODES:
- compute_nodes.append(copy.deepcopy(compute_node))
- expected_compute_node = copy.deepcopy(compute_node)
- cells_utils.add_cell_to_compute_node(expected_compute_node,
- cell_name)
- expected_response.append(expected_compute_node)
- response = messaging.Response(cell_name, compute_nodes, False)
- responses.append(response)
- self.mox.StubOutWithMock(self.msg_runner,
- 'compute_node_get_all')
- self.msg_runner.compute_node_get_all(self.ctxt,
- hypervisor_match='fake-match').AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.compute_node_get_all(self.ctxt,
- hypervisor_match='fake-match')
- self.assertEqual(expected_response, response)
-
- def test_compute_node_stats(self):
- raw_resp1 = {'key1': 1, 'key2': 2}
- raw_resp2 = {'key2': 1, 'key3': 2}
- raw_resp3 = {'key3': 1, 'key4': 2}
- responses = [messaging.Response('cell1', raw_resp1, False),
- messaging.Response('cell2', raw_resp2, False),
- messaging.Response('cell2', raw_resp3, False)]
- expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'compute_node_stats')
- self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
- self.mox.ReplayAll()
- response = self.cells_manager.compute_node_stats(self.ctxt)
- self.assertEqual(expected_resp, response)
-
- def test_compute_node_get(self):
- fake_cell = 'fake-cell'
- fake_response = messaging.Response(fake_cell,
- FAKE_COMPUTE_NODES[0],
- False)
- expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
- cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
- cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
- self.mox.StubOutWithMock(self.msg_runner,
- 'compute_node_get')
- self.msg_runner.compute_node_get(self.ctxt,
- 'fake-cell', 'fake-id').AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.compute_node_get(self.ctxt,
- compute_id=cell_and_id)
- self.assertEqual(expected_response, response)
-
- def test_actions_get(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
- fake_response = messaging.Response('fake-cell', [fake_act], False)
- expected_response = [fake_act]
- self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
- self.msg_runner.actions_get(self.ctxt, 'fake-cell',
- 'fake-uuid').AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
- 'fake-uuid')
- self.assertEqual(expected_response, response)
-
- def test_action_get_by_request_id(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
- fake_response = messaging.Response('fake-cell', fake_act, False)
- expected_response = fake_act
- self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
- self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
- 'fake-uuid', 'req-fake').AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.action_get_by_request_id(self.ctxt,
- 'fake-cell',
- 'fake-uuid',
- 'req-fake')
- self.assertEqual(expected_response, response)
-
- def test_action_events_get(self):
- fake_action_id = fake_server_actions.FAKE_ACTION_ID1
- fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
- fake_response = messaging.Response('fake-cell', fake_events, False)
- expected_response = fake_events
- self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
- self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
- 'fake-action').AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
- 'fake-action')
- self.assertEqual(expected_response, response)
-
- def test_consoleauth_delete_tokens(self):
- instance_uuid = 'fake-instance-uuid'
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'consoleauth_delete_tokens')
- self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
- self.mox.ReplayAll()
- self.cells_manager.consoleauth_delete_tokens(self.ctxt,
- instance_uuid=instance_uuid)
-
- def test_get_capacities(self):
- cell_name = 'cell_name'
- response = {"ram_free":
- {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
- self.mox.StubOutWithMock(self.state_manager,
- 'get_capacities')
- self.state_manager.get_capacities(cell_name).AndReturn(response)
- self.mox.ReplayAll()
- self.assertEqual(response,
- self.cells_manager.get_capacities(self.ctxt, cell_name))
-
- def test_validate_console_port(self):
- instance_uuid = 'fake-instance-uuid'
- cell_name = 'fake-cell-name'
- instance = {'cell_name': cell_name}
- console_port = 'fake-console-port'
- console_type = 'fake-console-type'
-
- self.mox.StubOutWithMock(self.msg_runner,
- 'validate_console_port')
- self.mox.StubOutWithMock(self.cells_manager.db,
- 'instance_get_by_uuid')
- fake_response = self._get_fake_response()
-
- self.cells_manager.db.instance_get_by_uuid(self.ctxt,
- instance_uuid).AndReturn(instance)
- self.msg_runner.validate_console_port(self.ctxt, cell_name,
- instance_uuid, console_port,
- console_type).AndReturn(fake_response)
- self.mox.ReplayAll()
- response = self.cells_manager.validate_console_port(self.ctxt,
- instance_uuid=instance_uuid, console_port=console_port,
- console_type=console_type)
- self.assertEqual('fake-response', response)
-
- def test_bdm_update_or_create_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'bdm_update_or_create_at_top')
- self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
- 'fake-bdm',
- create='foo')
- self.mox.ReplayAll()
- self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
- 'fake-bdm',
- create='foo')
-
- def test_bdm_destroy_at_top(self):
- self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
- self.msg_runner.bdm_destroy_at_top(self.ctxt,
- 'fake_instance_uuid',
- device_name='fake_device_name',
- volume_id='fake_volume_id')
-
- self.mox.ReplayAll()
- self.cells_manager.bdm_destroy_at_top(self.ctxt,
- 'fake_instance_uuid',
- device_name='fake_device_name',
- volume_id='fake_volume_id')
-
- def test_get_migrations(self):
- filters = {'status': 'confirmed'}
- cell1_migrations = [{'id': 123}]
- cell2_migrations = [{'id': 456}]
- fake_responses = [self._get_fake_response(cell1_migrations),
- self._get_fake_response(cell2_migrations)]
- self.mox.StubOutWithMock(self.msg_runner,
- 'get_migrations')
- self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
- AndReturn(fake_responses)
- self.mox.ReplayAll()
-
- response = self.cells_manager.get_migrations(self.ctxt, filters)
-
- self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
-
- def test_get_migrations_for_a_given_cell(self):
- filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
- target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
- migrations = [{'id': 123}]
- fake_responses = [self._get_fake_response(migrations)]
- self.mox.StubOutWithMock(self.msg_runner,
- 'get_migrations')
- self.msg_runner.get_migrations(self.ctxt, target_cell, False,
- filters).AndReturn(fake_responses)
- self.mox.ReplayAll()
-
- response = self.cells_manager.get_migrations(self.ctxt, filters)
- self.assertEqual(migrations, response)
-
- def test_instance_update_from_api(self):
- self.mox.StubOutWithMock(self.msg_runner,
- 'instance_update_from_api')
- self.msg_runner.instance_update_from_api(self.ctxt,
- 'fake-instance',
- 'exp_vm', 'exp_task',
- 'admin_reset')
- self.mox.ReplayAll()
- self.cells_manager.instance_update_from_api(
- self.ctxt, instance='fake-instance',
- expected_vm_state='exp_vm',
- expected_task_state='exp_task',
- admin_state_reset='admin_reset')
-
- def test_start_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
- self.msg_runner.start_instance(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
-
- def test_stop_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
- self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
- do_cast='meow')
- self.mox.ReplayAll()
- self.cells_manager.stop_instance(self.ctxt,
- instance='fake-instance',
- do_cast='meow')
-
- def test_cell_create(self):
- values = 'values'
- response = 'created_cell'
- self.mox.StubOutWithMock(self.state_manager,
- 'cell_create')
- self.state_manager.cell_create(self.ctxt, values).\
- AndReturn(response)
- self.mox.ReplayAll()
- self.assertEqual(response,
- self.cells_manager.cell_create(self.ctxt, values))
-
- def test_cell_update(self):
- cell_name = 'cell_name'
- values = 'values'
- response = 'updated_cell'
- self.mox.StubOutWithMock(self.state_manager,
- 'cell_update')
- self.state_manager.cell_update(self.ctxt, cell_name, values).\
- AndReturn(response)
- self.mox.ReplayAll()
- self.assertEqual(response,
- self.cells_manager.cell_update(self.ctxt, cell_name,
- values))
-
- def test_cell_delete(self):
- cell_name = 'cell_name'
- response = 1
- self.mox.StubOutWithMock(self.state_manager,
- 'cell_delete')
- self.state_manager.cell_delete(self.ctxt, cell_name).\
- AndReturn(response)
- self.mox.ReplayAll()
- self.assertEqual(response,
- self.cells_manager.cell_delete(self.ctxt, cell_name))
-
- def test_cell_get(self):
- cell_name = 'cell_name'
- response = 'cell_info'
- self.mox.StubOutWithMock(self.state_manager,
- 'cell_get')
- self.state_manager.cell_get(self.ctxt, cell_name).\
- AndReturn(response)
- self.mox.ReplayAll()
- self.assertEqual(response,
- self.cells_manager.cell_get(self.ctxt, cell_name))
-
- def test_reboot_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
- self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
- 'HARD')
- self.mox.ReplayAll()
- self.cells_manager.reboot_instance(self.ctxt,
- instance='fake-instance',
- reboot_type='HARD')
-
- def test_suspend_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
- self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.suspend_instance(self.ctxt,
- instance='fake-instance')
-
- def test_resume_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
- self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.resume_instance(self.ctxt,
- instance='fake-instance')
-
- def test_terminate_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
- self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.terminate_instance(self.ctxt,
- instance='fake-instance')
-
- def test_soft_delete_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
- self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.soft_delete_instance(self.ctxt,
- instance='fake-instance')
-
- def test_resize_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
- self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
- 'fake-flavor', 'fake-updates')
- self.mox.ReplayAll()
- self.cells_manager.resize_instance(
- self.ctxt, instance='fake-instance', flavor='fake-flavor',
- extra_instance_updates='fake-updates')
-
- def test_live_migrate_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
- self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
- 'fake-block', 'fake-commit',
- 'fake-host')
- self.mox.ReplayAll()
- self.cells_manager.live_migrate_instance(
- self.ctxt, instance='fake-instance',
- block_migration='fake-block', disk_over_commit='fake-commit',
- host_name='fake-host')
-
- def test_revert_resize(self):
- self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
- self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
-
- def test_confirm_resize(self):
- self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
- self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
-
- def test_reset_network(self):
- self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
- self.msg_runner.reset_network(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
-
- def test_inject_network_info(self):
- self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
- self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
- self.mox.ReplayAll()
- self.cells_manager.inject_network_info(self.ctxt,
- instance='fake-instance')
-
- def test_snapshot_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
- self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
- 'fake-id')
- self.mox.ReplayAll()
- self.cells_manager.snapshot_instance(self.ctxt,
- instance='fake-instance',
- image_id='fake-id')
-
- def test_backup_instance(self):
- self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
- self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
- 'fake-id', 'backup-type',
- 'rotation')
- self.mox.ReplayAll()
- self.cells_manager.backup_instance(self.ctxt,
- instance='fake-instance',
- image_id='fake-id',
- backup_type='backup-type',
- rotation='rotation')
-
- def test_set_admin_password(self):
- with mock.patch.object(self.msg_runner,
- 'set_admin_password') as set_admin_password:
- self.cells_manager.set_admin_password(self.ctxt,
- instance='fake-instance', new_pass='fake-password')
- set_admin_password.assert_called_once_with(self.ctxt,
- 'fake-instance', 'fake-password')
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
deleted file mode 100644
index 079f97e089..0000000000
--- a/nova/tests/cells/test_cells_messaging.py
+++ /dev/null
@@ -1,2129 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Cells Messaging module
-"""
-
-import contextlib
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo import messaging as oslo_messaging
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-
-from nova.cells import messaging
-from nova.cells import utils as cells_utils
-from nova.compute import delete_types
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.network import model as network_model
-from nova import objects
-from nova.objects import base as objects_base
-from nova.objects import fields as objects_fields
-from nova.openstack.common import uuidutils
-from nova import rpc
-from nova import test
-from nova.tests.cells import fakes
-from nova.tests import fake_server_actions
-
-CONF = cfg.CONF
-CONF.import_opt('name', 'nova.cells.opts', group='cells')
-
-
-class CellsMessageClassesTestCase(test.TestCase):
- """Test case for the main Cells Message classes."""
- def setUp(self):
- super(CellsMessageClassesTestCase, self).setUp()
- fakes.init(self)
- self.ctxt = context.RequestContext('fake', 'fake')
- self.our_name = 'api-cell'
- self.msg_runner = fakes.get_message_runner(self.our_name)
- self.state_manager = self.msg_runner.state_manager
-
- def test_reverse_path(self):
- path = 'a!b!c!d'
- expected = 'd!c!b!a'
- rev_path = messaging._reverse_path(path)
- self.assertEqual(rev_path, expected)
-
- def test_response_cell_name_from_path(self):
- # test array with tuples of inputs/expected outputs
- test_paths = [('cell1', 'cell1'),
- ('cell1!cell2', 'cell2!cell1'),
- ('cell1!cell2!cell3', 'cell3!cell2!cell1')]
-
- for test_input, expected_output in test_paths:
- self.assertEqual(expected_output,
- messaging._response_cell_name_from_path(test_input))
-
- def test_response_cell_name_from_path_neighbor_only(self):
- # test array with tuples of inputs/expected outputs
- test_paths = [('cell1', 'cell1'),
- ('cell1!cell2', 'cell2!cell1'),
- ('cell1!cell2!cell3', 'cell3!cell2')]
-
- for test_input, expected_output in test_paths:
- self.assertEqual(expected_output,
- messaging._response_cell_name_from_path(test_input,
- neighbor_only=True))
-
- def test_targeted_message(self):
- self.flags(max_hop_count=99, group='cells')
- target_cell = 'api-cell!child-cell2!grandchild-cell1'
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- self.assertEqual(self.ctxt, tgt_message.ctxt)
- self.assertEqual(method, tgt_message.method_name)
- self.assertEqual(method_kwargs, tgt_message.method_kwargs)
- self.assertEqual(direction, tgt_message.direction)
- self.assertEqual(target_cell, target_cell)
- self.assertFalse(tgt_message.fanout)
- self.assertFalse(tgt_message.need_response)
- self.assertEqual(self.our_name, tgt_message.routing_path)
- self.assertEqual(1, tgt_message.hop_count)
- self.assertEqual(99, tgt_message.max_hop_count)
- self.assertFalse(tgt_message.is_broadcast)
- # Correct next hop?
- next_hop = tgt_message._get_next_hop()
- child_cell = self.state_manager.get_child_cell('child-cell2')
- self.assertEqual(child_cell, next_hop)
-
- def test_create_targeted_message_with_response(self):
- self.flags(max_hop_count=99, group='cells')
- our_name = 'child-cell1'
- target_cell = 'child-cell1!api-cell'
- msg_runner = fakes.get_message_runner(our_name)
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'up'
- tgt_message = messaging._TargetedMessage(msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- self.assertEqual(self.ctxt, tgt_message.ctxt)
- self.assertEqual(method, tgt_message.method_name)
- self.assertEqual(method_kwargs, tgt_message.method_kwargs)
- self.assertEqual(direction, tgt_message.direction)
- self.assertEqual(target_cell, target_cell)
- self.assertFalse(tgt_message.fanout)
- self.assertTrue(tgt_message.need_response)
- self.assertEqual(our_name, tgt_message.routing_path)
- self.assertEqual(1, tgt_message.hop_count)
- self.assertEqual(99, tgt_message.max_hop_count)
- self.assertFalse(tgt_message.is_broadcast)
- # Correct next hop?
- next_hop = tgt_message._get_next_hop()
- parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
- self.assertEqual(parent_cell, next_hop)
-
- def test_targeted_message_when_target_is_cell_state(self):
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
- target_cell = self.state_manager.get_child_cell('child-cell2')
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
- # Correct next hop?
- next_hop = tgt_message._get_next_hop()
- self.assertEqual(target_cell, next_hop)
-
- def test_targeted_message_when_target_cell_state_is_me(self):
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
- target_cell = self.state_manager.get_my_state()
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- self.assertEqual('api-cell', tgt_message.target_cell)
- # Correct next hop?
- next_hop = tgt_message._get_next_hop()
- self.assertEqual(target_cell, next_hop)
-
- def test_create_broadcast_message(self):
- self.flags(max_hop_count=99, group='cells')
- self.flags(name='api-cell', max_hop_count=99, group='cells')
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction)
- self.assertEqual(self.ctxt, bcast_message.ctxt)
- self.assertEqual(method, bcast_message.method_name)
- self.assertEqual(method_kwargs, bcast_message.method_kwargs)
- self.assertEqual(direction, bcast_message.direction)
- self.assertFalse(bcast_message.fanout)
- self.assertFalse(bcast_message.need_response)
- self.assertEqual(self.our_name, bcast_message.routing_path)
- self.assertEqual(1, bcast_message.hop_count)
- self.assertEqual(99, bcast_message.max_hop_count)
- self.assertTrue(bcast_message.is_broadcast)
- # Correct next hops?
- next_hops = bcast_message._get_next_hops()
- child_cells = self.state_manager.get_child_cells()
- self.assertEqual(child_cells, next_hops)
-
- def test_create_broadcast_message_with_response(self):
- self.flags(max_hop_count=99, group='cells')
- our_name = 'child-cell1'
- msg_runner = fakes.get_message_runner(our_name)
- method = 'fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'up'
- bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
- method, method_kwargs, direction, need_response=True)
- self.assertEqual(self.ctxt, bcast_message.ctxt)
- self.assertEqual(method, bcast_message.method_name)
- self.assertEqual(method_kwargs, bcast_message.method_kwargs)
- self.assertEqual(direction, bcast_message.direction)
- self.assertFalse(bcast_message.fanout)
- self.assertTrue(bcast_message.need_response)
- self.assertEqual(our_name, bcast_message.routing_path)
- self.assertEqual(1, bcast_message.hop_count)
- self.assertEqual(99, bcast_message.max_hop_count)
- self.assertTrue(bcast_message.is_broadcast)
- # Correct next hops?
- next_hops = bcast_message._get_next_hops()
- parent_cells = msg_runner.state_manager.get_parent_cells()
- self.assertEqual(parent_cells, next_hops)
-
- def test_self_targeted_message(self):
- target_cell = 'api-cell'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- call_info = {}
-
- def our_fake_method(message, **kwargs):
- call_info['context'] = message.ctxt
- call_info['routing_path'] = message.routing_path
- call_info['kwargs'] = kwargs
-
- fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- tgt_message.process()
-
- self.assertEqual(self.ctxt, call_info['context'])
- self.assertEqual(method_kwargs, call_info['kwargs'])
- self.assertEqual(target_cell, call_info['routing_path'])
-
- def test_child_targeted_message(self):
- target_cell = 'api-cell!child-cell1'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- call_info = {}
-
- def our_fake_method(message, **kwargs):
- call_info['context'] = message.ctxt
- call_info['routing_path'] = message.routing_path
- call_info['kwargs'] = kwargs
-
- fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- tgt_message.process()
-
- self.assertEqual(self.ctxt, call_info['context'])
- self.assertEqual(method_kwargs, call_info['kwargs'])
- self.assertEqual(target_cell, call_info['routing_path'])
-
- def test_child_targeted_message_with_object(self):
- target_cell = 'api-cell!child-cell1'
- method = 'our_fake_method'
- direction = 'down'
-
- call_info = {}
-
- class CellsMsgingTestObject(objects_base.NovaObject):
- """Test object. We just need 1 field in order to test
- that this gets serialized properly.
- """
- fields = {'test': objects_fields.StringField()}
-
- test_obj = CellsMsgingTestObject()
- test_obj.test = 'meow'
-
- method_kwargs = dict(obj=test_obj, arg1=1, arg2=2)
-
- def our_fake_method(message, **kwargs):
- call_info['context'] = message.ctxt
- call_info['routing_path'] = message.routing_path
- call_info['kwargs'] = kwargs
-
- fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- tgt_message.process()
-
- self.assertEqual(self.ctxt, call_info['context'])
- self.assertEqual(target_cell, call_info['routing_path'])
- self.assertEqual(3, len(call_info['kwargs']))
- self.assertEqual(1, call_info['kwargs']['arg1'])
- self.assertEqual(2, call_info['kwargs']['arg2'])
- # Verify we get a new object with what we expect.
- obj = call_info['kwargs']['obj']
- self.assertIsInstance(obj, CellsMsgingTestObject)
- self.assertNotEqual(id(test_obj), id(obj))
- self.assertEqual(test_obj.test, obj.test)
-
- def test_grandchild_targeted_message(self):
- target_cell = 'api-cell!child-cell2!grandchild-cell1'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- call_info = {}
-
- def our_fake_method(message, **kwargs):
- call_info['context'] = message.ctxt
- call_info['routing_path'] = message.routing_path
- call_info['kwargs'] = kwargs
-
- fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell)
- tgt_message.process()
-
- self.assertEqual(self.ctxt, call_info['context'])
- self.assertEqual(method_kwargs, call_info['kwargs'])
- self.assertEqual(target_cell, call_info['routing_path'])
-
- def test_grandchild_targeted_message_with_response(self):
- target_cell = 'api-cell!child-cell2!grandchild-cell1'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- call_info = {}
-
- def our_fake_method(message, **kwargs):
- call_info['context'] = message.ctxt
- call_info['routing_path'] = message.routing_path
- call_info['kwargs'] = kwargs
- return 'our_fake_response'
-
- fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- response = tgt_message.process()
-
- self.assertEqual(self.ctxt, call_info['context'])
- self.assertEqual(method_kwargs, call_info['kwargs'])
- self.assertEqual(target_cell, call_info['routing_path'])
- self.assertFalse(response.failure)
- self.assertEqual(response.value_or_raise(), 'our_fake_response')
-
- def test_grandchild_targeted_message_with_error(self):
- target_cell = 'api-cell!child-cell2!grandchild-cell1'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method(message, **kwargs):
- raise test.TestingException('this should be returned')
-
- fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- response = tgt_message.process()
- self.assertTrue(response.failure)
- self.assertRaises(test.TestingException, response.value_or_raise)
-
- def test_grandchild_targeted_message_max_hops(self):
- self.flags(max_hop_count=2, group='cells')
- target_cell = 'api-cell!child-cell2!grandchild-cell1'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method(message, **kwargs):
- raise test.TestingException('should not be reached')
-
- fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
- our_fake_method)
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- response = tgt_message.process()
- self.assertTrue(response.failure)
- self.assertRaises(exception.CellMaxHopCountReached,
- response.value_or_raise)
-
- def test_targeted_message_invalid_cell(self):
- target_cell = 'api-cell!child-cell2!grandchild-cell4'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- response = tgt_message.process()
- self.assertTrue(response.failure)
- self.assertRaises(exception.CellRoutingInconsistency,
- response.value_or_raise)
-
- def test_targeted_message_invalid_cell2(self):
- target_cell = 'unknown-cell!child-cell2'
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- tgt_message = messaging._TargetedMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs, direction,
- target_cell,
- need_response=True)
- response = tgt_message.process()
- self.assertTrue(response.failure)
- self.assertRaises(exception.CellRoutingInconsistency,
- response.value_or_raise)
-
- def test_broadcast_routing(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- cells = set()
-
- def our_fake_method(message, **kwargs):
- cells.add(message.routing_path)
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=True)
- bcast_message.process()
- # fakes creates 8 cells (including ourself).
- self.assertEqual(len(cells), 8)
-
- def test_broadcast_routing_up(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'up'
- msg_runner = fakes.get_message_runner('grandchild-cell3')
-
- cells = set()
-
- def our_fake_method(message, **kwargs):
- cells.add(message.routing_path)
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
- method, method_kwargs,
- direction,
- run_locally=True)
- bcast_message.process()
- # Paths are reversed, since going 'up'
- expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
- 'grandchild-cell3!child-cell3!api-cell'])
- self.assertEqual(expected, cells)
-
- def test_broadcast_routing_without_ourselves(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- cells = set()
-
- def our_fake_method(message, **kwargs):
- cells.add(message.routing_path)
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=False)
- bcast_message.process()
- # fakes creates 8 cells (including ourself). So we should see
- # only 7 here.
- self.assertEqual(len(cells), 7)
-
- def test_broadcast_routing_with_response(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method(message, **kwargs):
- return 'response-%s' % message.routing_path
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=True,
- need_response=True)
- responses = bcast_message.process()
- self.assertEqual(len(responses), 8)
- for response in responses:
- self.assertFalse(response.failure)
- self.assertEqual('response-%s' % response.cell_name,
- response.value_or_raise())
-
- def test_broadcast_routing_with_response_max_hops(self):
- self.flags(max_hop_count=2, group='cells')
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method(message, **kwargs):
- return 'response-%s' % message.routing_path
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=True,
- need_response=True)
- responses = bcast_message.process()
- # Should only get responses from our immediate children (and
- # ourselves)
- self.assertEqual(len(responses), 5)
- for response in responses:
- self.assertFalse(response.failure)
- self.assertEqual('response-%s' % response.cell_name,
- response.value_or_raise())
-
- def test_broadcast_routing_with_all_erroring(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method(message, **kwargs):
- raise test.TestingException('fake failure')
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=True,
- need_response=True)
- responses = bcast_message.process()
- self.assertEqual(len(responses), 8)
- for response in responses:
- self.assertTrue(response.failure)
- self.assertRaises(test.TestingException, response.value_or_raise)
-
- def test_broadcast_routing_with_two_erroring(self):
- method = 'our_fake_method'
- method_kwargs = dict(arg1=1, arg2=2)
- direction = 'down'
-
- def our_fake_method_failing(message, **kwargs):
- raise test.TestingException('fake failure')
-
- def our_fake_method(message, **kwargs):
- return 'response-%s' % message.routing_path
-
- fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
- fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
- our_fake_method_failing)
- fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
- our_fake_method_failing)
-
- bcast_message = messaging._BroadcastMessage(self.msg_runner,
- self.ctxt, method,
- method_kwargs,
- direction,
- run_locally=True,
- need_response=True)
- responses = bcast_message.process()
- self.assertEqual(len(responses), 8)
- failure_responses = [resp for resp in responses if resp.failure]
- success_responses = [resp for resp in responses if not resp.failure]
- self.assertEqual(len(failure_responses), 2)
- self.assertEqual(len(success_responses), 6)
-
- for response in success_responses:
- self.assertFalse(response.failure)
- self.assertEqual('response-%s' % response.cell_name,
- response.value_or_raise())
-
- for response in failure_responses:
- self.assertIn(response.cell_name, ['api-cell!child-cell2',
- 'api-cell!child-cell3!grandchild-cell3'])
- self.assertTrue(response.failure)
- self.assertRaises(test.TestingException, response.value_or_raise)
-
-
-class CellsTargetedMethodsTestCase(test.TestCase):
- """Test case for _TargetedMessageMethods class. Most of these
- tests actually test the full path from the MessageRunner through
- to the functionality of the message method. Hits 2 birds with 1
- stone, even though it's a little more than a unit test.
- """
- def setUp(self):
- super(CellsTargetedMethodsTestCase, self).setUp()
- fakes.init(self)
- self.ctxt = context.RequestContext('fake', 'fake')
- self._setup_attrs('api-cell', 'api-cell!child-cell2')
-
- def _setup_attrs(self, source_cell, target_cell):
- self.tgt_cell_name = target_cell
- self.src_msg_runner = fakes.get_message_runner(source_cell)
- self.src_state_manager = self.src_msg_runner.state_manager
- tgt_shortname = target_cell.split('!')[-1]
- self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
- self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
- self.tgt_scheduler = self.tgt_msg_runner.scheduler
- self.tgt_state_manager = self.tgt_msg_runner.state_manager
- methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
- self.tgt_methods_cls = methods_cls
- self.tgt_compute_api = methods_cls.compute_api
- self.tgt_host_api = methods_cls.host_api
- self.tgt_db_inst = methods_cls.db
- self.tgt_c_rpcapi = methods_cls.compute_rpcapi
-
- def test_build_instances(self):
- build_inst_kwargs = {'filter_properties': {},
- 'key1': 'value1',
- 'key2': 'value2'}
- self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
- self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
- self.mox.ReplayAll()
- self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
- build_inst_kwargs)
-
- def test_run_compute_api_method(self):
-
- instance_uuid = 'fake_instance_uuid'
- method_info = {'method': 'backup',
- 'method_args': (instance_uuid, 2, 3),
- 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
- self.mox.StubOutWithMock(self.tgt_compute_api, 'backup')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
-
- self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
- instance_uuid).AndReturn('fake_instance')
- self.tgt_compute_api.backup(self.ctxt, 'fake_instance', 2, 3,
- arg1='val1', arg2='val2').AndReturn('fake_result')
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.run_compute_api_method(
- self.ctxt,
- self.tgt_cell_name,
- method_info,
- True)
- result = response.value_or_raise()
- self.assertEqual('fake_result', result)
-
- def _run_compute_api_method_expects_object(self, tgt_compute_api_function,
- method_name,
- expected_attrs=None):
- # runs compute api methods which expects instance to be an object
- instance_uuid = 'fake_instance_uuid'
- method_info = {'method': method_name,
- 'method_args': (instance_uuid, 2, 3),
- 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
-
- self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
- instance_uuid).AndReturn('fake_instance')
-
- def get_instance_mock():
- # NOTE(comstud): This block of code simulates the following
- # mox code:
- #
- # self.mox.StubOutWithMock(objects, 'Instance',
- # use_mock_anything=True)
- # self.mox.StubOutWithMock(objects.Instance,
- # '_from_db_object')
- # instance_mock = self.mox.CreateMock(objects.Instance)
- # objects.Instance().AndReturn(instance_mock)
- #
- # Unfortunately, the above code fails on py27 do to some
- # issue with the Mock object do to similar issue as this:
- # https://code.google.com/p/pymox/issues/detail?id=35
- #
- class FakeInstance(object):
- @classmethod
- def _from_db_object(cls, ctxt, obj, db_obj, **kwargs):
- pass
-
- instance_mock = FakeInstance()
-
- def fake_instance():
- return instance_mock
-
- self.stubs.Set(objects, 'Instance', fake_instance)
- self.mox.StubOutWithMock(instance_mock, '_from_db_object')
- return instance_mock
-
- instance = get_instance_mock()
- instance._from_db_object(self.ctxt,
- instance,
- 'fake_instance',
- expected_attrs=expected_attrs
- ).AndReturn(instance)
- tgt_compute_api_function(self.ctxt, instance, 2, 3,
- arg1='val1', arg2='val2').AndReturn('fake_result')
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.run_compute_api_method(
- self.ctxt,
- self.tgt_cell_name,
- method_info,
- True)
- result = response.value_or_raise()
- self.assertEqual('fake_result', result)
-
- def test_run_compute_api_method_expects_obj(self):
- # Run compute_api start method
- self.mox.StubOutWithMock(self.tgt_compute_api, 'start')
- self._run_compute_api_method_expects_object(self.tgt_compute_api.start,
- 'start')
-
- def test_run_compute_api_method_expects_obj_with_info_cache(self):
- # Run compute_api shelve method as it requires info_cache and
- # metadata to be present in instance object
- self.mox.StubOutWithMock(self.tgt_compute_api, 'shelve')
- self._run_compute_api_method_expects_object(
- self.tgt_compute_api.shelve, 'shelve',
- expected_attrs=['metadata', 'info_cache'])
-
- def test_run_compute_api_method_unknown_instance(self):
- # Unknown instance should send a broadcast up that instance
- # is gone.
- instance_uuid = 'fake_instance_uuid'
- instance = {'uuid': instance_uuid}
- method_info = {'method': 'reboot',
- 'method_args': (instance_uuid, 2, 3),
- 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'instance_destroy_at_top')
-
- self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
- 'fake_instance_uuid').AndRaise(
- exception.InstanceNotFound(instance_id=instance_uuid))
- self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.run_compute_api_method(
- self.ctxt,
- self.tgt_cell_name,
- method_info,
- True)
- self.assertRaises(exception.InstanceNotFound,
- response.value_or_raise)
-
- def test_update_capabilities(self):
- # Route up to API
- self._setup_attrs('child-cell2', 'child-cell2!api-cell')
- capabs = {'cap1': set(['val1', 'val2']),
- 'cap2': set(['val3'])}
- # The list(set([])) seems silly, but we can't assume the order
- # of the list... This behavior should match the code we're
- # testing... which is check that a set was converted to a list.
- expected_capabs = {'cap1': list(set(['val1', 'val2'])),
- 'cap2': ['val3']}
- self.mox.StubOutWithMock(self.src_state_manager,
- 'get_our_capabilities')
- self.mox.StubOutWithMock(self.tgt_state_manager,
- 'update_cell_capabilities')
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'tell_parents_our_capabilities')
- self.src_state_manager.get_our_capabilities().AndReturn(capabs)
- self.tgt_state_manager.update_cell_capabilities('child-cell2',
- expected_capabs)
- self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
-
- def test_update_capacities(self):
- self._setup_attrs('child-cell2', 'child-cell2!api-cell')
- capacs = 'fake_capacs'
- self.mox.StubOutWithMock(self.src_state_manager,
- 'get_our_capacities')
- self.mox.StubOutWithMock(self.tgt_state_manager,
- 'update_cell_capacities')
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'tell_parents_our_capacities')
- self.src_state_manager.get_our_capacities().AndReturn(capacs)
- self.tgt_state_manager.update_cell_capacities('child-cell2',
- capacs)
- self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
-
- def test_announce_capabilities(self):
- self._setup_attrs('api-cell', 'api-cell!child-cell1')
- # To make this easier to test, make us only have 1 child cell.
- cell_state = self.src_state_manager.child_cells['child-cell1']
- self.src_state_manager.child_cells = {'child-cell1': cell_state}
-
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'tell_parents_our_capabilities')
- self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
-
- def test_announce_capacities(self):
- self._setup_attrs('api-cell', 'api-cell!child-cell1')
- # To make this easier to test, make us only have 1 child cell.
- cell_state = self.src_state_manager.child_cells['child-cell1']
- self.src_state_manager.child_cells = {'child-cell1': cell_state}
-
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'tell_parents_our_capacities')
- self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.ask_children_for_capacities(self.ctxt)
-
- def test_service_get_by_compute_host(self):
- fake_host_name = 'fake-host-name'
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'service_get_by_compute_host')
-
- self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
- fake_host_name).AndReturn('fake-service')
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.service_get_by_compute_host(
- self.ctxt,
- self.tgt_cell_name,
- fake_host_name)
- result = response.value_or_raise()
- self.assertEqual('fake-service', result)
-
- def test_service_update(self):
- binary = 'nova-compute'
- fake_service = dict(id=42, host='fake_host', binary='nova-compute',
- topic='compute')
- fake_compute = dict(
- id=7116, service_id=42, host='fake_host', vcpus=0, memory_mb=0,
- local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0,
- hypervisor_type=0, hypervisor_version=0, hypervisor_hostname=0,
- free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0,
- cpu_info='HAL', disk_available_least=0)
- params_to_update = {'disabled': True, 'report_count': 13}
-
- ctxt = context.RequestContext('fake_user', 'fake_project',
- is_admin=True)
- # We use the real DB for this test, as it's too hard to reach the
- # host_api to mock out its DB methods
- db.service_create(ctxt, fake_service)
- db.compute_node_create(ctxt, fake_compute)
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.service_update(
- ctxt, self.tgt_cell_name,
- 'fake_host', binary, params_to_update)
- result = response.value_or_raise()
- result.pop('created_at', None)
- result.pop('updated_at', None)
- result.pop('disabled_reason', None)
- expected_result = dict(
- deleted=0, deleted_at=None,
- binary=fake_service['binary'],
- disabled=True, # We just updated this..
- report_count=13, # ..and this
- host='fake_host', id=42,
- topic='compute')
- self.assertEqual(expected_result, result)
-
- def test_service_delete(self):
- fake_service = dict(id=42, host='fake_host', binary='nova-compute',
- topic='compute')
-
- ctxt = self.ctxt.elevated()
- db.service_create(ctxt, fake_service)
-
- self.src_msg_runner.service_delete(
- ctxt, self.tgt_cell_name, fake_service['id'])
- self.assertRaises(exception.ServiceNotFound,
- db.service_get, ctxt, fake_service['id'])
-
- def test_proxy_rpc_to_manager_call(self):
- fake_topic = 'fake-topic'
- fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
- fake_host_name = 'fake-host-name'
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'service_get_by_compute_host')
- self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
- fake_host_name)
-
- target = oslo_messaging.Target(topic='fake-topic')
- rpcclient = self.mox.CreateMockAnything()
-
- self.mox.StubOutWithMock(rpc, 'get_client')
- rpc.get_client(target).AndReturn(rpcclient)
- rpcclient.prepare(timeout=5).AndReturn(rpcclient)
- rpcclient.call(mox.IgnoreArg(),
- 'fake_rpc_method').AndReturn('fake_result')
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.proxy_rpc_to_manager(
- self.ctxt,
- self.tgt_cell_name,
- fake_host_name,
- fake_topic,
- fake_rpc_message, True, timeout=5)
- result = response.value_or_raise()
- self.assertEqual('fake_result', result)
-
- def test_proxy_rpc_to_manager_cast(self):
- fake_topic = 'fake-topic'
- fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
- fake_host_name = 'fake-host-name'
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'service_get_by_compute_host')
- self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
- fake_host_name)
-
- target = oslo_messaging.Target(topic='fake-topic')
- rpcclient = self.mox.CreateMockAnything()
-
- self.mox.StubOutWithMock(rpc, 'get_client')
- rpc.get_client(target).AndReturn(rpcclient)
- rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method')
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.proxy_rpc_to_manager(
- self.ctxt,
- self.tgt_cell_name,
- fake_host_name,
- fake_topic,
- fake_rpc_message, False, timeout=None)
-
- def test_task_log_get_all_targeted(self):
- task_name = 'fake_task_name'
- begin = 'fake_begin'
- end = 'fake_end'
- host = 'fake_host'
- state = 'fake_state'
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
- self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
- begin, end, host=host,
- state=state).AndReturn(['fake_result'])
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.task_log_get_all(self.ctxt,
- self.tgt_cell_name, task_name, begin, end, host=host,
- state=state)
- self.assertIsInstance(response, list)
- self.assertEqual(1, len(response))
- result = response[0].value_or_raise()
- self.assertEqual(['fake_result'], result)
-
- def test_compute_node_get(self):
- compute_id = 'fake-id'
- self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
- self.tgt_db_inst.compute_node_get(self.ctxt,
- compute_id).AndReturn('fake_result')
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.compute_node_get(self.ctxt,
- self.tgt_cell_name, compute_id)
- result = response.value_or_raise()
- self.assertEqual('fake_result', result)
-
- def test_actions_get(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get')
- self.tgt_db_inst.actions_get(self.ctxt,
- 'fake-uuid').AndReturn([fake_act])
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.actions_get(self.ctxt,
- self.tgt_cell_name,
- 'fake-uuid')
- result = response.value_or_raise()
- self.assertEqual([jsonutils.to_primitive(fake_act)], result)
-
- def test_action_get_by_request_id(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id')
- self.tgt_db_inst.action_get_by_request_id(self.ctxt,
- 'fake-uuid', 'req-fake').AndReturn(fake_act)
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.action_get_by_request_id(self.ctxt,
- self.tgt_cell_name, 'fake-uuid', 'req-fake')
- result = response.value_or_raise()
- self.assertEqual(jsonutils.to_primitive(fake_act), result)
-
- def test_action_events_get(self):
- fake_action_id = fake_server_actions.FAKE_ACTION_ID1
- fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get')
- self.tgt_db_inst.action_events_get(self.ctxt,
- 'fake-action').AndReturn(fake_events)
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.action_events_get(self.ctxt,
- self.tgt_cell_name,
- 'fake-action')
- result = response.value_or_raise()
- self.assertEqual(jsonutils.to_primitive(fake_events), result)
-
- def test_validate_console_port(self):
- instance_uuid = 'fake_instance_uuid'
- instance = {'uuid': instance_uuid}
- console_port = 'fake-port'
- console_type = 'fake-type'
-
- self.mox.StubOutWithMock(self.tgt_c_rpcapi, 'validate_console_port')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
-
- self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
- instance_uuid).AndReturn(instance)
- self.tgt_c_rpcapi.validate_console_port(self.ctxt,
- instance, console_port, console_type).AndReturn('fake_result')
-
- self.mox.ReplayAll()
-
- response = self.src_msg_runner.validate_console_port(self.ctxt,
- self.tgt_cell_name, instance_uuid, console_port,
- console_type)
- result = response.value_or_raise()
- self.assertEqual('fake_result', result)
-
- def test_get_migrations_for_a_given_cell(self):
- filters = {'cell_name': 'child-cell2', 'status': 'confirmed'}
- migrations_in_progress = [{'id': 123}]
- self.mox.StubOutWithMock(self.tgt_compute_api,
- 'get_migrations')
-
- self.tgt_compute_api.get_migrations(self.ctxt, filters).\
- AndReturn(migrations_in_progress)
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.get_migrations(
- self.ctxt,
- self.tgt_cell_name, False, filters)
- result = responses[0].value_or_raise()
- self.assertEqual(migrations_in_progress, result)
-
- def test_get_migrations_for_an_invalid_cell(self):
- filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'}
-
- responses = self.src_msg_runner.get_migrations(
- self.ctxt,
- 'api_cell!invalid_cell', False, filters)
-
- self.assertEqual(0, len(responses))
-
- def test_call_compute_api_with_obj(self):
- instance = objects.Instance()
- instance.uuid = uuidutils.generate_uuid()
- self.mox.StubOutWithMock(instance, 'refresh')
- # Using 'snapshot' for this test, because it
- # takes args and kwargs.
- self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot')
- instance.refresh(self.ctxt)
- self.tgt_compute_api.snapshot(
- self.ctxt, instance, 'name',
- extra_properties='props').AndReturn('foo')
-
- self.mox.ReplayAll()
- result = self.tgt_methods_cls._call_compute_api_with_obj(
- self.ctxt, instance, 'snapshot', 'name',
- extra_properties='props')
- self.assertEqual('foo', result)
-
- def test_call_compute_api_with_obj_no_cache(self):
- instance = objects.Instance()
- instance.uuid = uuidutils.generate_uuid()
- error = exception.InstanceInfoCacheNotFound(
- instance_uuid=instance.uuid)
- with mock.patch.object(instance, 'refresh', side_effect=error):
- self.assertRaises(exception.InstanceInfoCacheNotFound,
- self.tgt_methods_cls._call_compute_api_with_obj,
- self.ctxt, instance, 'snapshot')
-
- def test_call_delete_compute_api_with_obj_no_cache(self):
- instance = objects.Instance()
- instance.uuid = uuidutils.generate_uuid()
- error = exception.InstanceInfoCacheNotFound(
- instance_uuid=instance.uuid)
- with contextlib.nested(
- mock.patch.object(instance, 'refresh',
- side_effect=error),
- mock.patch.object(self.tgt_compute_api, 'delete')) as (inst,
- delete):
- self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt,
- instance,
- 'delete')
- delete.assert_called_once_with(self.ctxt, instance)
-
- def test_call_compute_with_obj_unknown_instance(self):
- instance = objects.Instance()
- instance.uuid = uuidutils.generate_uuid()
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = None
- self.mox.StubOutWithMock(instance, 'refresh')
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'instance_destroy_at_top')
-
- instance.refresh(self.ctxt).AndRaise(
- exception.InstanceNotFound(instance_id=instance.uuid))
-
- self.tgt_msg_runner.instance_destroy_at_top(self.ctxt,
- {'uuid': instance.uuid})
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InstanceNotFound,
- self.tgt_methods_cls._call_compute_api_with_obj,
- self.ctxt, instance, 'snapshot', 'name')
-
- def _instance_update_helper(self, admin_state_reset):
- class FakeMessage(object):
- pass
-
- message = FakeMessage()
- message.ctxt = self.ctxt
-
- instance = objects.Instance()
- instance.cell_name = self.tgt_cell_name
- instance.obj_reset_changes()
- instance.task_state = 'meow'
- instance.vm_state = 'wuff'
- instance.user_data = 'foo'
- instance.metadata = {'meta': 'data'}
- instance.system_metadata = {'system': 'metadata'}
- self.assertEqual(set(['user_data', 'vm_state', 'task_state',
- 'metadata', 'system_metadata']),
- instance.obj_what_changed())
-
- self.mox.StubOutWithMock(instance, 'save')
-
- def _check_object(*args, **kwargs):
- # task_state and vm_state changes should have been cleared
- # before calling save()
- if admin_state_reset:
- self.assertEqual(
- set(['user_data', 'vm_state', 'task_state']),
- instance.obj_what_changed())
- else:
- self.assertEqual(set(['user_data']),
- instance.obj_what_changed())
-
- instance.save(self.ctxt, expected_task_state='exp_task',
- expected_vm_state='exp_vm').WithSideEffects(
- _check_object)
-
- self.mox.ReplayAll()
-
- self.tgt_methods_cls.instance_update_from_api(
- message,
- instance,
- expected_vm_state='exp_vm',
- expected_task_state='exp_task',
- admin_state_reset=admin_state_reset)
-
- def test_instance_update_from_api(self):
- self._instance_update_helper(False)
-
- def test_instance_update_from_api_admin_state_reset(self):
- self._instance_update_helper(True)
-
- def _test_instance_action_method(self, method, args, kwargs,
- expected_args, expected_kwargs,
- expect_result):
- class FakeMessage(object):
- pass
-
- message = FakeMessage()
- message.ctxt = self.ctxt
- message.need_response = expect_result
-
- meth_cls = self.tgt_methods_cls
- self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj')
-
- method_corrections = {
- 'terminate': 'delete',
- }
- api_method = method_corrections.get(method, method)
-
- meth_cls._call_compute_api_with_obj(
- self.ctxt, 'fake-instance', api_method,
- *expected_args, **expected_kwargs).AndReturn('meow')
-
- self.mox.ReplayAll()
-
- method_translations = {'revert_resize': 'revert_resize',
- 'confirm_resize': 'confirm_resize',
- 'reset_network': 'reset_network',
- 'inject_network_info': 'inject_network_info',
- 'set_admin_password': 'set_admin_password',
- }
- tgt_method = method_translations.get(method,
- '%s_instance' % method)
- result = getattr(meth_cls, tgt_method)(
- message, 'fake-instance', *args, **kwargs)
- if expect_result:
- self.assertEqual('meow', result)
-
- def test_start_instance(self):
- self._test_instance_action_method('start', (), {}, (), {}, False)
-
- def test_stop_instance_cast(self):
- self._test_instance_action_method('stop', (), {}, (),
- {'do_cast': True}, False)
-
- def test_stop_instance_call(self):
- self._test_instance_action_method('stop', (), {}, (),
- {'do_cast': False}, True)
-
- def test_reboot_instance(self):
- kwargs = dict(reboot_type='HARD')
- self._test_instance_action_method('reboot', (), kwargs, (),
- kwargs, False)
-
- def test_suspend_instance(self):
- self._test_instance_action_method('suspend', (), {}, (), {}, False)
-
- def test_resume_instance(self):
- self._test_instance_action_method('resume', (), {}, (), {}, False)
-
- def test_get_host_uptime(self):
- host_name = "fake-host"
- host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
- " 0.20, 0.12, 0.14")
- self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime')
- self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\
- AndReturn(host_uptime)
- self.mox.ReplayAll()
- response = self.src_msg_runner.get_host_uptime(self.ctxt,
- self.tgt_cell_name,
- host_name)
- expected_host_uptime = response.value_or_raise()
- self.assertEqual(host_uptime, expected_host_uptime)
-
- def test_terminate_instance(self):
- self._test_instance_action_method('terminate',
- (), {}, (), {}, False)
-
- def test_soft_delete_instance(self):
- self._test_instance_action_method(delete_types.SOFT_DELETE,
- (), {}, (), {}, False)
-
- def test_pause_instance(self):
- self._test_instance_action_method('pause', (), {}, (), {}, False)
-
- def test_unpause_instance(self):
- self._test_instance_action_method('unpause', (), {}, (), {}, False)
-
- def test_resize_instance(self):
- kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
- extra_instance_updates=dict(cow='moo'))
- expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
- self._test_instance_action_method('resize', (), kwargs,
- (), expected_kwargs,
- False)
-
- def test_live_migrate_instance(self):
- kwargs = dict(block_migration='fake-block-mig',
- disk_over_commit='fake-commit',
- host_name='fake-host')
- expected_args = ('fake-block-mig', 'fake-commit', 'fake-host')
- self._test_instance_action_method('live_migrate', (), kwargs,
- expected_args, {}, False)
-
- def test_revert_resize(self):
- self._test_instance_action_method('revert_resize',
- (), {}, (), {}, False)
-
- def test_confirm_resize(self):
- self._test_instance_action_method('confirm_resize',
- (), {}, (), {}, False)
-
- def test_reset_network(self):
- self._test_instance_action_method('reset_network',
- (), {}, (), {}, False)
-
- def test_inject_network_info(self):
- self._test_instance_action_method('inject_network_info',
- (), {}, (), {}, False)
-
- def test_snapshot_instance(self):
- inst = objects.Instance()
- meth_cls = self.tgt_methods_cls
-
- self.mox.StubOutWithMock(inst, 'refresh')
- self.mox.StubOutWithMock(inst, 'save')
- self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance')
-
- def check_state(expected_task_state=None):
- self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING,
- inst.task_state)
-
- inst.refresh()
- inst.save(expected_task_state=[None]).WithSideEffects(check_state)
-
- meth_cls.compute_rpcapi.snapshot_instance(self.ctxt,
- inst, 'image-id')
-
- self.mox.ReplayAll()
-
- class FakeMessage(object):
- pass
-
- message = FakeMessage()
- message.ctxt = self.ctxt
- message.need_response = False
-
- meth_cls.snapshot_instance(message, inst, image_id='image-id')
-
- def test_backup_instance(self):
- inst = objects.Instance()
- meth_cls = self.tgt_methods_cls
-
- self.mox.StubOutWithMock(inst, 'refresh')
- self.mox.StubOutWithMock(inst, 'save')
- self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance')
-
- def check_state(expected_task_state=None):
- self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state)
-
- inst.refresh()
- inst.save(expected_task_state=[None]).WithSideEffects(check_state)
-
- meth_cls.compute_rpcapi.backup_instance(self.ctxt,
- inst,
- 'image-id',
- 'backup-type',
- 'rotation')
-
- self.mox.ReplayAll()
-
- class FakeMessage(object):
- pass
-
- message = FakeMessage()
- message.ctxt = self.ctxt
- message.need_response = False
-
- meth_cls.backup_instance(message, inst,
- image_id='image-id',
- backup_type='backup-type',
- rotation='rotation')
-
- def test_set_admin_password(self):
- args = ['fake-password']
- self._test_instance_action_method('set_admin_password', args, {}, args,
- {}, False)
-
-
-class CellsBroadcastMethodsTestCase(test.TestCase):
- """Test case for _BroadcastMessageMethods class. Most of these
- tests actually test the full path from the MessageRunner through
- to the functionality of the message method. Hits 2 birds with 1
- stone, even though it's a little more than a unit test.
- """
-
- def setUp(self):
- super(CellsBroadcastMethodsTestCase, self).setUp()
- fakes.init(self)
- self.ctxt = context.RequestContext('fake', 'fake')
- self._setup_attrs()
-
- def _setup_attrs(self, up=True):
- mid_cell = 'child-cell2'
- if up:
- src_cell = 'grandchild-cell1'
- tgt_cell = 'api-cell'
- else:
- src_cell = 'api-cell'
- tgt_cell = 'grandchild-cell1'
-
- self.src_msg_runner = fakes.get_message_runner(src_cell)
- methods_cls = self.src_msg_runner.methods_by_type['broadcast']
- self.src_methods_cls = methods_cls
- self.src_db_inst = methods_cls.db
- self.src_compute_api = methods_cls.compute_api
- self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi
-
- if not up:
- # fudge things so we only have 1 child to broadcast to
- state_manager = self.src_msg_runner.state_manager
- for cell in state_manager.get_child_cells():
- if cell.name != 'child-cell2':
- del state_manager.child_cells[cell.name]
-
- self.mid_msg_runner = fakes.get_message_runner(mid_cell)
- methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
- self.mid_methods_cls = methods_cls
- self.mid_db_inst = methods_cls.db
- self.mid_compute_api = methods_cls.compute_api
- self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi
-
- self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
- methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
- self.tgt_methods_cls = methods_cls
- self.tgt_db_inst = methods_cls.db
- self.tgt_compute_api = methods_cls.compute_api
- self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi
-
- def test_at_the_top(self):
- self.assertTrue(self.tgt_methods_cls._at_the_top())
- self.assertFalse(self.mid_methods_cls._at_the_top())
- self.assertFalse(self.src_methods_cls._at_the_top())
-
- def test_apply_expected_states_building(self):
- instance_info = {'vm_state': vm_states.BUILDING}
- expected = dict(instance_info,
- expected_vm_state=[vm_states.BUILDING, None])
- self.src_methods_cls._apply_expected_states(instance_info)
- self.assertEqual(expected, instance_info)
-
- def test_apply_expected_states_resize_finish(self):
- instance_info = {'task_state': task_states.RESIZE_FINISH}
- exp_states = [task_states.RESIZE_FINISH,
- task_states.RESIZE_MIGRATED,
- task_states.RESIZE_MIGRATING,
- task_states.RESIZE_PREP]
- expected = dict(instance_info, expected_task_state=exp_states)
- self.src_methods_cls._apply_expected_states(instance_info)
- self.assertEqual(expected, instance_info)
-
- def _test_instance_update_at_top(self, net_info, exists=True):
- fake_info_cache = {'id': 1,
- 'instance': 'fake_instance',
- 'network_info': net_info}
- fake_sys_metadata = [{'id': 1,
- 'key': 'key1',
- 'value': 'value1'},
- {'id': 2,
- 'key': 'key2',
- 'value': 'value2'}]
- fake_instance = {'id': 2,
- 'uuid': 'fake_uuid',
- 'security_groups': 'fake',
- 'volumes': 'fake',
- 'cell_name': 'fake',
- 'name': 'fake',
- 'metadata': 'fake',
- 'info_cache': fake_info_cache,
- 'system_metadata': fake_sys_metadata,
- 'other': 'meow'}
- expected_sys_metadata = {'key1': 'value1',
- 'key2': 'value2'}
- expected_info_cache = {'network_info': "[]"}
- expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
- expected_instance = {'system_metadata': expected_sys_metadata,
- 'cell_name': expected_cell_name,
- 'other': 'meow',
- 'uuid': 'fake_uuid'}
-
- # To show these should not be called in src/mid-level cell
- self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.src_db_inst,
- 'instance_info_cache_update')
- self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'instance_info_cache_update')
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_create')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'instance_info_cache_update')
- mock = self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
- expected_instance,
- update_cells=False)
- if not exists:
- mock.AndRaise(exception.InstanceNotFound(instance_id='fake_uuid'))
- self.tgt_db_inst.instance_create(self.ctxt,
- expected_instance)
- self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
- expected_info_cache)
- self.mox.ReplayAll()
-
- self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
-
- def test_instance_update_at_top(self):
- self._test_instance_update_at_top("[]")
-
- def test_instance_update_at_top_netinfo_list(self):
- self._test_instance_update_at_top([])
-
- def test_instance_update_at_top_netinfo_model(self):
- self._test_instance_update_at_top(network_model.NetworkInfo())
-
- def test_instance_update_at_top_does_not_already_exist(self):
- self._test_instance_update_at_top([], exists=False)
-
- def test_instance_update_at_top_with_building_state(self):
- fake_info_cache = {'id': 1,
- 'instance': 'fake_instance',
- 'other': 'moo'}
- fake_sys_metadata = [{'id': 1,
- 'key': 'key1',
- 'value': 'value1'},
- {'id': 2,
- 'key': 'key2',
- 'value': 'value2'}]
- fake_instance = {'id': 2,
- 'uuid': 'fake_uuid',
- 'security_groups': 'fake',
- 'volumes': 'fake',
- 'cell_name': 'fake',
- 'name': 'fake',
- 'metadata': 'fake',
- 'info_cache': fake_info_cache,
- 'system_metadata': fake_sys_metadata,
- 'vm_state': vm_states.BUILDING,
- 'other': 'meow'}
- expected_sys_metadata = {'key1': 'value1',
- 'key2': 'value2'}
- expected_info_cache = {'other': 'moo'}
- expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
- expected_instance = {'system_metadata': expected_sys_metadata,
- 'cell_name': expected_cell_name,
- 'other': 'meow',
- 'vm_state': vm_states.BUILDING,
- 'expected_vm_state': [vm_states.BUILDING, None],
- 'uuid': 'fake_uuid'}
-
- # To show these should not be called in src/mid-level cell
- self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.src_db_inst,
- 'instance_info_cache_update')
- self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'instance_info_cache_update')
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'instance_info_cache_update')
- self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
- expected_instance,
- update_cells=False)
- self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
- expected_info_cache)
- self.mox.ReplayAll()
-
- self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
-
- def test_instance_destroy_at_top(self):
- fake_instance = {'uuid': 'fake_uuid'}
-
- # To show these should not be called in src/mid-level cell
- self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
- self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
- update_cells=False)
- self.mox.ReplayAll()
-
- self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
-
- def test_instance_hard_delete_everywhere(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- instance = {'uuid': 'meow'}
-
- # Should not be called in src (API cell)
- self.mox.StubOutWithMock(self.src_compute_api, delete_types.DELETE)
-
- self.mox.StubOutWithMock(self.mid_compute_api, delete_types.DELETE)
- self.mox.StubOutWithMock(self.tgt_compute_api, delete_types.DELETE)
-
- self.mid_compute_api.delete(self.ctxt, instance)
- self.tgt_compute_api.delete(self.ctxt, instance)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.instance_delete_everywhere(self.ctxt,
- instance, delete_types.DELETE)
-
- def test_instance_soft_delete_everywhere(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- instance = {'uuid': 'meow'}
-
- # Should not be called in src (API cell)
- self.mox.StubOutWithMock(self.src_compute_api,
- delete_types.SOFT_DELETE)
-
- self.mox.StubOutWithMock(self.mid_compute_api,
- delete_types.SOFT_DELETE)
- self.mox.StubOutWithMock(self.tgt_compute_api,
- delete_types.SOFT_DELETE)
-
- self.mid_compute_api.soft_delete(self.ctxt, instance)
- self.tgt_compute_api.soft_delete(self.ctxt, instance)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.instance_delete_everywhere(self.ctxt,
- instance, delete_types.SOFT_DELETE)
-
- def test_instance_fault_create_at_top(self):
- fake_instance_fault = {'id': 1,
- 'message': 'fake-message',
- 'details': 'fake-details'}
-
- if_mock = mock.Mock(spec_set=objects.InstanceFault)
-
- def _check_create():
- self.assertEqual('fake-message', if_mock.message)
- self.assertEqual('fake-details', if_mock.details)
- # Should not be set
- self.assertNotEqual(1, if_mock.id)
-
- if_mock.create.side_effect = _check_create
-
- with mock.patch.object(objects, 'InstanceFault') as if_obj_mock:
- if_obj_mock.return_value = if_mock
- self.src_msg_runner.instance_fault_create_at_top(
- self.ctxt, fake_instance_fault)
-
- if_obj_mock.assert_called_once_with(context=self.ctxt)
- if_mock.create.assert_called_once_with()
-
- def test_bw_usage_update_at_top(self):
- fake_bw_update_info = {'uuid': 'fake_uuid',
- 'mac': 'fake_mac',
- 'start_period': 'fake_start_period',
- 'bw_in': 'fake_bw_in',
- 'bw_out': 'fake_bw_out',
- 'last_ctr_in': 'fake_last_ctr_in',
- 'last_ctr_out': 'fake_last_ctr_out',
- 'last_refreshed': 'fake_last_refreshed'}
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
- self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
-
- self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
- self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
- fake_bw_update_info)
-
- def test_sync_instances(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- project_id = 'fake_project_id'
- updated_since_raw = 'fake_updated_since_raw'
- updated_since_parsed = 'fake_updated_since_parsed'
- deleted = 'fake_deleted'
-
- instance1 = dict(uuid='fake_uuid1', deleted=False)
- instance2 = dict(uuid='fake_uuid2', deleted=True)
- fake_instances = [instance1, instance2]
-
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'instance_update_at_top')
- self.mox.StubOutWithMock(self.tgt_msg_runner,
- 'instance_destroy_at_top')
-
- self.mox.StubOutWithMock(timeutils, 'parse_isotime')
- self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
-
- # Middle cell.
- timeutils.parse_isotime(updated_since_raw).AndReturn(
- updated_since_parsed)
- cells_utils.get_instances_to_sync(self.ctxt,
- updated_since=updated_since_parsed,
- project_id=project_id,
- deleted=deleted).AndReturn([])
-
- # Bottom/Target cell
- timeutils.parse_isotime(updated_since_raw).AndReturn(
- updated_since_parsed)
- cells_utils.get_instances_to_sync(self.ctxt,
- updated_since=updated_since_parsed,
- project_id=project_id,
- deleted=deleted).AndReturn(fake_instances)
- self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
- self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.sync_instances(self.ctxt,
- project_id, updated_since_raw, deleted)
-
- def test_service_get_all_with_disabled(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
- self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
-
- self.src_db_inst.service_get_all(ctxt,
- disabled=None).AndReturn([1, 2])
- self.mid_db_inst.service_get_all(ctxt,
- disabled=None).AndReturn([3])
- self.tgt_db_inst.service_get_all(ctxt,
- disabled=None).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.service_get_all(ctxt,
- filters={})
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_service_get_all_without_disabled(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- disabled = False
- filters = {'disabled': disabled}
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
- self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
-
- self.src_db_inst.service_get_all(ctxt,
- disabled=disabled).AndReturn([1, 2])
- self.mid_db_inst.service_get_all(ctxt,
- disabled=disabled).AndReturn([3])
- self.tgt_db_inst.service_get_all(ctxt,
- disabled=disabled).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.service_get_all(ctxt,
- filters=filters)
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_task_log_get_all_broadcast(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- task_name = 'fake_task_name'
- begin = 'fake_begin'
- end = 'fake_end'
- host = 'fake_host'
- state = 'fake_state'
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
- self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
-
- self.src_db_inst.task_log_get_all(ctxt, task_name,
- begin, end, host=host, state=state).AndReturn([1, 2])
- self.mid_db_inst.task_log_get_all(ctxt, task_name,
- begin, end, host=host, state=state).AndReturn([3])
- self.tgt_db_inst.task_log_get_all(ctxt, task_name,
- begin, end, host=host, state=state).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.task_log_get_all(ctxt, None,
- task_name, begin, end, host=host, state=state)
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_compute_node_get_all(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
- self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
- self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
-
- self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
- self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
- self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.compute_node_get_all(ctxt)
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_compute_node_get_all_with_hyp_match(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
- hypervisor_match = 'meow'
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst,
- 'compute_node_search_by_hypervisor')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'compute_node_search_by_hypervisor')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'compute_node_search_by_hypervisor')
-
- self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
- hypervisor_match).AndReturn([1, 2])
- self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
- hypervisor_match).AndReturn([3])
- self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
- hypervisor_match).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.compute_node_get_all(ctxt,
- hypervisor_match=hypervisor_match)
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_compute_node_stats(self):
- # Reset this, as this is a broadcast down.
- self._setup_attrs(up=False)
-
- ctxt = self.ctxt.elevated()
-
- self.mox.StubOutWithMock(self.src_db_inst,
- 'compute_node_statistics')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'compute_node_statistics')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'compute_node_statistics')
-
- self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
- self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
- self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.compute_node_stats(ctxt)
- response_values = [(resp.cell_name, resp.value_or_raise())
- for resp in responses]
- expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
- ('api-cell!child-cell2', [3]),
- ('api-cell', [1, 2])]
- self.assertEqual(expected, response_values)
-
- def test_consoleauth_delete_tokens(self):
- fake_uuid = 'fake-instance-uuid'
-
- # To show these should not be called in src/mid-level cell
- self.mox.StubOutWithMock(self.src_ca_rpcapi,
- 'delete_tokens_for_instance')
- self.mox.StubOutWithMock(self.mid_ca_rpcapi,
- 'delete_tokens_for_instance')
-
- self.mox.StubOutWithMock(self.tgt_ca_rpcapi,
- 'delete_tokens_for_instance')
- self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid)
-
- def test_bdm_update_or_create_with_none_create(self):
- fake_bdm = {'id': 'fake_id',
- 'volume_id': 'fake_volume_id'}
- expected_bdm = fake_bdm.copy()
- expected_bdm.pop('id')
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_update_or_create')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_update_or_create')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_update_or_create')
- self.tgt_db_inst.block_device_mapping_update_or_create(
- self.ctxt, expected_bdm, legacy=False)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
- fake_bdm,
- create=None)
-
- def test_bdm_update_or_create_with_true_create(self):
- fake_bdm = {'id': 'fake_id',
- 'volume_id': 'fake_volume_id'}
- expected_bdm = fake_bdm.copy()
- expected_bdm.pop('id')
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_create')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_create')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_create')
- self.tgt_db_inst.block_device_mapping_create(
- self.ctxt, fake_bdm, legacy=False)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
- fake_bdm,
- create=True)
-
- def test_bdm_update_or_create_with_false_create_vol_id(self):
- fake_bdm = {'id': 'fake_id',
- 'instance_uuid': 'fake_instance_uuid',
- 'device_name': 'fake_device_name',
- 'volume_id': 'fake_volume_id'}
- expected_bdm = fake_bdm.copy()
- expected_bdm.pop('id')
-
- fake_inst_bdms = [{'id': 1,
- 'volume_id': 'not-a-match',
- 'device_name': 'not-a-match'},
- {'id': 2,
- 'volume_id': 'fake_volume_id',
- 'device_name': 'not-a-match'},
- {'id': 3,
- 'volume_id': 'not-a-match',
- 'device_name': 'not-a-match'}]
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_update')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_update')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_get_all_by_instance')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_update')
-
- self.tgt_db_inst.block_device_mapping_get_all_by_instance(
- self.ctxt, 'fake_instance_uuid').AndReturn(
- fake_inst_bdms)
- # Should try to update ID 2.
- self.tgt_db_inst.block_device_mapping_update(
- self.ctxt, 2, expected_bdm, legacy=False)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
- fake_bdm,
- create=False)
-
- def test_bdm_update_or_create_with_false_create_dev_name(self):
- fake_bdm = {'id': 'fake_id',
- 'instance_uuid': 'fake_instance_uuid',
- 'device_name': 'fake_device_name',
- 'volume_id': 'fake_volume_id'}
- expected_bdm = fake_bdm.copy()
- expected_bdm.pop('id')
-
- fake_inst_bdms = [{'id': 1,
- 'volume_id': 'not-a-match',
- 'device_name': 'not-a-match'},
- {'id': 2,
- 'volume_id': 'not-a-match',
- 'device_name': 'fake_device_name'},
- {'id': 3,
- 'volume_id': 'not-a-match',
- 'device_name': 'not-a-match'}]
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_update')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_update')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_get_all_by_instance')
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_update')
-
- self.tgt_db_inst.block_device_mapping_get_all_by_instance(
- self.ctxt, 'fake_instance_uuid').AndReturn(
- fake_inst_bdms)
- # Should try to update ID 2.
- self.tgt_db_inst.block_device_mapping_update(
- self.ctxt, 2, expected_bdm, legacy=False)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
- fake_bdm,
- create=False)
-
- def test_bdm_destroy_by_volume(self):
- fake_instance_uuid = 'fake-instance-uuid'
- fake_volume_id = 'fake-volume-name'
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_destroy_by_instance_and_volume')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_destroy_by_instance_and_volume')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_destroy_by_instance_and_volume')
- self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume(
- self.ctxt, fake_instance_uuid, fake_volume_id)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
- volume_id=fake_volume_id)
-
- def test_bdm_destroy_by_device(self):
- fake_instance_uuid = 'fake-instance-uuid'
- fake_device_name = 'fake-device-name'
-
- # Shouldn't be called for these 2 cells
- self.mox.StubOutWithMock(self.src_db_inst,
- 'block_device_mapping_destroy_by_instance_and_device')
- self.mox.StubOutWithMock(self.mid_db_inst,
- 'block_device_mapping_destroy_by_instance_and_device')
-
- self.mox.StubOutWithMock(self.tgt_db_inst,
- 'block_device_mapping_destroy_by_instance_and_device')
- self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device(
- self.ctxt, fake_instance_uuid, fake_device_name)
-
- self.mox.ReplayAll()
-
- self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
- device_name=fake_device_name)
-
- def test_get_migrations(self):
- self._setup_attrs(up=False)
- filters = {'status': 'confirmed'}
- migrations_from_cell1 = [{'id': 123}]
- migrations_from_cell2 = [{'id': 456}]
- self.mox.StubOutWithMock(self.mid_compute_api,
- 'get_migrations')
-
- self.mid_compute_api.get_migrations(self.ctxt, filters).\
- AndReturn(migrations_from_cell1)
-
- self.mox.StubOutWithMock(self.tgt_compute_api,
- 'get_migrations')
-
- self.tgt_compute_api.get_migrations(self.ctxt, filters).\
- AndReturn(migrations_from_cell2)
-
- self.mox.ReplayAll()
-
- responses = self.src_msg_runner.get_migrations(
- self.ctxt,
- None, False, filters)
- self.assertEqual(2, len(responses))
- for response in responses:
- self.assertIn(response.value_or_raise(), [migrations_from_cell1,
- migrations_from_cell2])
diff --git a/nova/tests/cells/test_cells_rpc_driver.py b/nova/tests/cells/test_cells_rpc_driver.py
deleted file mode 100644
index 1414adfb1e..0000000000
--- a/nova/tests/cells/test_cells_rpc_driver.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Cells RPC Communication Driver
-"""
-
-import mox
-from oslo.config import cfg
-from oslo import messaging as oslo_messaging
-
-from nova.cells import messaging
-from nova.cells import rpc_driver
-from nova import context
-from nova import rpc
-from nova import test
-from nova.tests.cells import fakes
-
-CONF = cfg.CONF
-CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
- group='cells')
-
-
-class CellsRPCDriverTestCase(test.NoDBTestCase):
- """Test case for Cells communication via RPC."""
-
- def setUp(self):
- super(CellsRPCDriverTestCase, self).setUp()
- fakes.init(self)
- self.ctxt = context.RequestContext('fake', 'fake')
- self.driver = rpc_driver.CellsRPCDriver()
-
- def test_start_servers(self):
- self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
- fake_msg_runner = fakes.get_message_runner('api-cell')
-
- class FakeInterCellRPCDispatcher(object):
- def __init__(_self, msg_runner):
- self.assertEqual(fake_msg_runner, msg_runner)
-
- self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
- FakeInterCellRPCDispatcher)
- self.mox.StubOutWithMock(rpc, 'get_server')
-
- for message_type in messaging.MessageRunner.get_message_types():
- topic = 'cells.intercell42.' + message_type
- target = oslo_messaging.Target(topic=topic, server=CONF.host)
- endpoints = [mox.IsA(FakeInterCellRPCDispatcher)]
-
- rpcserver = self.mox.CreateMockAnything()
- rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver)
- rpcserver.start()
-
- self.mox.ReplayAll()
-
- self.driver.start_servers(fake_msg_runner)
-
- def test_stop_servers(self):
- call_info = {'stopped': []}
-
- class FakeRPCServer(object):
- def stop(self):
- call_info['stopped'].append(self)
-
- fake_servers = [FakeRPCServer() for x in xrange(5)]
- self.driver.rpc_servers = fake_servers
- self.driver.stop_servers()
- self.assertEqual(fake_servers, call_info['stopped'])
-
- def test_send_message_to_cell_cast(self):
- msg_runner = fakes.get_message_runner('api-cell')
- cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
- message = messaging._TargetedMessage(msg_runner,
- self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
-
- expected_server_params = {'hostname': 'rpc_host2',
- 'password': 'password2',
- 'port': 3092,
- 'username': 'username2',
- 'virtual_host': 'rpc_vhost2'}
- expected_url = ('rabbit://%(username)s:%(password)s@'
- '%(hostname)s:%(port)d/%(virtual_host)s' %
- expected_server_params)
-
- def check_transport_url(cell_state):
- return cell_state.db_info['transport_url'] == expected_url
-
- rpcapi = self.driver.intercell_rpcapi
- rpcclient = self.mox.CreateMockAnything()
-
- self.mox.StubOutWithMock(rpcapi, '_get_client')
- rpcapi._get_client(
- mox.Func(check_transport_url),
- 'cells.intercell.targeted').AndReturn(rpcclient)
-
- rpcclient.cast(mox.IgnoreArg(), 'process_message',
- message=message.to_json())
-
- self.mox.ReplayAll()
-
- self.driver.send_message_to_cell(cell_state, message)
-
- def test_send_message_to_cell_fanout_cast(self):
- msg_runner = fakes.get_message_runner('api-cell')
- cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
- message = messaging._TargetedMessage(msg_runner,
- self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
-
- expected_server_params = {'hostname': 'rpc_host2',
- 'password': 'password2',
- 'port': 3092,
- 'username': 'username2',
- 'virtual_host': 'rpc_vhost2'}
- expected_url = ('rabbit://%(username)s:%(password)s@'
- '%(hostname)s:%(port)d/%(virtual_host)s' %
- expected_server_params)
-
- def check_transport_url(cell_state):
- return cell_state.db_info['transport_url'] == expected_url
-
- rpcapi = self.driver.intercell_rpcapi
- rpcclient = self.mox.CreateMockAnything()
-
- self.mox.StubOutWithMock(rpcapi, '_get_client')
- rpcapi._get_client(
- mox.Func(check_transport_url),
- 'cells.intercell.targeted').AndReturn(rpcclient)
-
- rpcclient.prepare(fanout=True).AndReturn(rpcclient)
- rpcclient.cast(mox.IgnoreArg(), 'process_message',
- message=message.to_json())
-
- self.mox.ReplayAll()
-
- self.driver.send_message_to_cell(cell_state, message)
-
- def test_rpc_topic_uses_message_type(self):
- self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
- msg_runner = fakes.get_message_runner('api-cell')
- cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
- message = messaging._BroadcastMessage(msg_runner,
- self.ctxt, 'fake', {}, 'down', fanout=True)
- message.message_type = 'fake-message-type'
-
- expected_server_params = {'hostname': 'rpc_host2',
- 'password': 'password2',
- 'port': 3092,
- 'username': 'username2',
- 'virtual_host': 'rpc_vhost2'}
- expected_url = ('rabbit://%(username)s:%(password)s@'
- '%(hostname)s:%(port)d/%(virtual_host)s' %
- expected_server_params)
-
- def check_transport_url(cell_state):
- return cell_state.db_info['transport_url'] == expected_url
-
- rpcapi = self.driver.intercell_rpcapi
- rpcclient = self.mox.CreateMockAnything()
-
- self.mox.StubOutWithMock(rpcapi, '_get_client')
- rpcapi._get_client(
- mox.Func(check_transport_url),
- 'cells.intercell42.fake-message-type').AndReturn(rpcclient)
-
- rpcclient.prepare(fanout=True).AndReturn(rpcclient)
- rpcclient.cast(mox.IgnoreArg(), 'process_message',
- message=message.to_json())
-
- self.mox.ReplayAll()
-
- self.driver.send_message_to_cell(cell_state, message)
-
- def test_process_message(self):
- msg_runner = fakes.get_message_runner('api-cell')
- dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
- message = messaging._BroadcastMessage(msg_runner,
- self.ctxt, 'fake', {}, 'down', fanout=True)
-
- call_info = {}
-
- def _fake_message_from_json(json_message):
- call_info['json_message'] = json_message
- self.assertEqual(message.to_json(), json_message)
- return message
-
- def _fake_process():
- call_info['process_called'] = True
-
- self.stubs.Set(msg_runner, 'message_from_json',
- _fake_message_from_json)
- self.stubs.Set(message, 'process', _fake_process)
-
- dispatcher.process_message(self.ctxt, message.to_json())
- self.assertEqual(message.to_json(), call_info['json_message'])
- self.assertTrue(call_info['process_called'])
diff --git a/nova/tests/cells/test_cells_rpcapi.py b/nova/tests/cells/test_cells_rpcapi.py
deleted file mode 100644
index 7292ee03fc..0000000000
--- a/nova/tests/cells/test_cells_rpcapi.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Cells RPCAPI
-"""
-
-from oslo.config import cfg
-import six
-
-from nova.cells import rpcapi as cells_rpcapi
-from nova import exception
-from nova import test
-from nova.tests import fake_instance
-
-CONF = cfg.CONF
-CONF.import_opt('topic', 'nova.cells.opts', group='cells')
-
-
-class CellsAPITestCase(test.NoDBTestCase):
- """Test case for cells.api interfaces."""
-
- def setUp(self):
- super(CellsAPITestCase, self).setUp()
- self.fake_topic = 'fake_topic'
- self.fake_context = 'fake_context'
- self.flags(topic=self.fake_topic, enable=True, group='cells')
- self.cells_rpcapi = cells_rpcapi.CellsAPI()
-
- def _stub_rpc_method(self, rpc_method, result):
- call_info = {}
-
- orig_prepare = self.cells_rpcapi.client.prepare
-
- def fake_rpc_prepare(**kwargs):
- if 'version' in kwargs:
- call_info['version'] = kwargs.pop('version')
- return self.cells_rpcapi.client
-
- def fake_csv(version):
- return orig_prepare(version).can_send_version()
-
- def fake_rpc_method(ctxt, method, **kwargs):
- call_info['context'] = ctxt
- call_info['method'] = method
- call_info['args'] = kwargs
- return result
-
- self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
- self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
- self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
-
- return call_info
-
- def _check_result(self, call_info, method, args, version=None):
- self.assertEqual(self.cells_rpcapi.client.target.topic,
- self.fake_topic)
- self.assertEqual(self.fake_context, call_info['context'])
- self.assertEqual(method, call_info['method'])
- self.assertEqual(args, call_info['args'])
- if version is not None:
- self.assertIn('version', call_info)
- self.assertIsInstance(call_info['version'], six.string_types,
- msg="Message version %s is not a string" %
- call_info['version'])
- self.assertEqual(version, call_info['version'])
- else:
- self.assertNotIn('version', call_info)
-
- def test_cast_compute_api_method(self):
- fake_cell_name = 'fake_cell_name'
- fake_method = 'fake_method'
- fake_method_args = (1, 2)
- fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
-
- expected_method_info = {'method': fake_method,
- 'method_args': fake_method_args,
- 'method_kwargs': fake_method_kwargs}
- expected_args = {'method_info': expected_method_info,
- 'cell_name': fake_cell_name,
- 'call': False}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.cast_compute_api_method(self.fake_context,
- fake_cell_name, fake_method,
- *fake_method_args, **fake_method_kwargs)
- self._check_result(call_info, 'run_compute_api_method',
- expected_args)
-
- def test_call_compute_api_method(self):
- fake_cell_name = 'fake_cell_name'
- fake_method = 'fake_method'
- fake_method_args = (1, 2)
- fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
- fake_response = 'fake_response'
-
- expected_method_info = {'method': fake_method,
- 'method_args': fake_method_args,
- 'method_kwargs': fake_method_kwargs}
- expected_args = {'method_info': expected_method_info,
- 'cell_name': fake_cell_name,
- 'call': True}
-
- call_info = self._stub_rpc_method('call', fake_response)
-
- result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
- fake_cell_name, fake_method,
- *fake_method_args, **fake_method_kwargs)
- self._check_result(call_info, 'run_compute_api_method',
- expected_args)
- self.assertEqual(fake_response, result)
-
- def test_build_instances(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.build_instances(
- self.fake_context, instances=['1', '2'],
- image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
-
- expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
- 'image': {'fake': 'image'},
- 'arg1': 1,
- 'arg2': 2,
- 'arg3': 3}}
- self._check_result(call_info, 'build_instances',
- expected_args, version='1.8')
-
- def test_get_capacities(self):
- capacity_info = {"capacity": "info"}
- call_info = self._stub_rpc_method('call',
- result=capacity_info)
- result = self.cells_rpcapi.get_capacities(self.fake_context,
- cell_name="name")
- self._check_result(call_info, 'get_capacities',
- {'cell_name': 'name'}, version='1.9')
- self.assertEqual(capacity_info, result)
-
- def test_instance_update_at_top(self):
- fake_info_cache = {'id': 1,
- 'instance': 'fake_instance',
- 'other': 'moo'}
- fake_sys_metadata = [{'id': 1,
- 'key': 'key1',
- 'value': 'value1'},
- {'id': 2,
- 'key': 'key2',
- 'value': 'value2'}]
- fake_instance = {'id': 2,
- 'security_groups': 'fake',
- 'instance_type': 'fake',
- 'volumes': 'fake',
- 'cell_name': 'fake',
- 'name': 'fake',
- 'metadata': 'fake',
- 'info_cache': fake_info_cache,
- 'system_metadata': fake_sys_metadata,
- 'other': 'meow'}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.instance_update_at_top(
- self.fake_context, fake_instance)
-
- expected_args = {'instance': fake_instance}
- self._check_result(call_info, 'instance_update_at_top',
- expected_args)
-
- def test_instance_destroy_at_top(self):
- fake_instance = {'uuid': 'fake-uuid'}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.instance_destroy_at_top(
- self.fake_context, fake_instance)
-
- expected_args = {'instance': fake_instance}
- self._check_result(call_info, 'instance_destroy_at_top',
- expected_args)
-
- def test_instance_delete_everywhere(self):
- instance = fake_instance.fake_instance_obj(self.fake_context)
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.instance_delete_everywhere(
- self.fake_context, instance,
- 'fake-type')
-
- expected_args = {'instance': instance,
- 'delete_type': 'fake-type'}
- self._check_result(call_info, 'instance_delete_everywhere',
- expected_args, version='1.27')
-
- def test_instance_fault_create_at_top(self):
- fake_instance_fault = {'id': 2,
- 'other': 'meow'}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.instance_fault_create_at_top(
- self.fake_context, fake_instance_fault)
-
- expected_args = {'instance_fault': fake_instance_fault}
- self._check_result(call_info, 'instance_fault_create_at_top',
- expected_args)
-
- def test_bw_usage_update_at_top(self):
- update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
- 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
- 'fake_ctr_out')
- update_kwargs = {'last_refreshed': 'fake_refreshed'}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.bw_usage_update_at_top(
- self.fake_context, *update_args, **update_kwargs)
-
- bw_update_info = {'uuid': 'fake_uuid',
- 'mac': 'fake_mac',
- 'start_period': 'fake_start_period',
- 'bw_in': 'fake_bw_in',
- 'bw_out': 'fake_bw_out',
- 'last_ctr_in': 'fake_ctr_in',
- 'last_ctr_out': 'fake_ctr_out',
- 'last_refreshed': 'fake_refreshed'}
-
- expected_args = {'bw_update_info': bw_update_info}
- self._check_result(call_info, 'bw_usage_update_at_top',
- expected_args)
-
- def test_get_cell_info_for_neighbors(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.get_cell_info_for_neighbors(
- self.fake_context)
- self._check_result(call_info, 'get_cell_info_for_neighbors', {},
- version='1.1')
- self.assertEqual(result, 'fake_response')
-
- def test_sync_instances(self):
- call_info = self._stub_rpc_method('cast', None)
- self.cells_rpcapi.sync_instances(self.fake_context,
- project_id='fake_project', updated_since='fake_time',
- deleted=True)
-
- expected_args = {'project_id': 'fake_project',
- 'updated_since': 'fake_time',
- 'deleted': True}
- self._check_result(call_info, 'sync_instances', expected_args,
- version='1.1')
-
- def test_service_get_all(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- fake_filters = {'key1': 'val1', 'key2': 'val2'}
- result = self.cells_rpcapi.service_get_all(self.fake_context,
- filters=fake_filters)
-
- expected_args = {'filters': fake_filters}
- self._check_result(call_info, 'service_get_all', expected_args,
- version='1.2')
- self.assertEqual(result, 'fake_response')
-
- def test_service_get_by_compute_host(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.service_get_by_compute_host(
- self.fake_context, host_name='fake-host-name')
- expected_args = {'host_name': 'fake-host-name'}
- self._check_result(call_info, 'service_get_by_compute_host',
- expected_args,
- version='1.2')
- self.assertEqual(result, 'fake_response')
-
- def test_get_host_uptime(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.get_host_uptime(
- self.fake_context, host_name='fake-host-name')
- expected_args = {'host_name': 'fake-host-name'}
- self._check_result(call_info, 'get_host_uptime',
- expected_args,
- version='1.17')
- self.assertEqual(result, 'fake_response')
-
- def test_service_update(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.service_update(
- self.fake_context, host_name='fake-host-name',
- binary='nova-api', params_to_update={'disabled': True})
- expected_args = {
- 'host_name': 'fake-host-name',
- 'binary': 'nova-api',
- 'params_to_update': {'disabled': True}}
- self._check_result(call_info, 'service_update',
- expected_args,
- version='1.7')
- self.assertEqual(result, 'fake_response')
-
- def test_service_delete(self):
- call_info = self._stub_rpc_method('call', None)
- cell_service_id = 'cell@id'
- result = self.cells_rpcapi.service_delete(
- self.fake_context, cell_service_id=cell_service_id)
- expected_args = {'cell_service_id': cell_service_id}
- self._check_result(call_info, 'service_delete',
- expected_args, version='1.26')
- self.assertIsNone(result)
-
- def test_proxy_rpc_to_manager(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.proxy_rpc_to_manager(
- self.fake_context, rpc_message='fake-msg',
- topic='fake-topic', call=True, timeout=-1)
- expected_args = {'rpc_message': 'fake-msg',
- 'topic': 'fake-topic',
- 'call': True,
- 'timeout': -1}
- self._check_result(call_info, 'proxy_rpc_to_manager',
- expected_args,
- version='1.2')
- self.assertEqual(result, 'fake_response')
-
- def test_task_log_get_all(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.task_log_get_all(self.fake_context,
- task_name='fake_name',
- period_beginning='fake_begin',
- period_ending='fake_end',
- host='fake_host',
- state='fake_state')
-
- expected_args = {'task_name': 'fake_name',
- 'period_beginning': 'fake_begin',
- 'period_ending': 'fake_end',
- 'host': 'fake_host',
- 'state': 'fake_state'}
- self._check_result(call_info, 'task_log_get_all', expected_args,
- version='1.3')
- self.assertEqual(result, 'fake_response')
-
- def test_compute_node_get_all(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
- hypervisor_match='fake-match')
-
- expected_args = {'hypervisor_match': 'fake-match'}
- self._check_result(call_info, 'compute_node_get_all', expected_args,
- version='1.4')
- self.assertEqual(result, 'fake_response')
-
- def test_compute_node_stats(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.compute_node_stats(self.fake_context)
- expected_args = {}
- self._check_result(call_info, 'compute_node_stats',
- expected_args, version='1.4')
- self.assertEqual(result, 'fake_response')
-
- def test_compute_node_get(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.compute_node_get(self.fake_context,
- 'fake_compute_id')
- expected_args = {'compute_id': 'fake_compute_id'}
- self._check_result(call_info, 'compute_node_get',
- expected_args, version='1.4')
- self.assertEqual(result, 'fake_response')
-
- def test_actions_get(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.actions_get(self.fake_context,
- fake_instance)
- expected_args = {'cell_name': 'region!child',
- 'instance_uuid': fake_instance['uuid']}
- self._check_result(call_info, 'actions_get', expected_args,
- version='1.5')
- self.assertEqual(result, 'fake_response')
-
- def test_actions_get_no_cell(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
- self.assertRaises(exception.InstanceUnknownCell,
- self.cells_rpcapi.actions_get, self.fake_context,
- fake_instance)
-
- def test_action_get_by_request_id(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
- fake_instance,
- 'req-fake')
- expected_args = {'cell_name': 'region!child',
- 'instance_uuid': fake_instance['uuid'],
- 'request_id': 'req-fake'}
- self._check_result(call_info, 'action_get_by_request_id',
- expected_args, version='1.5')
- self.assertEqual(result, 'fake_response')
-
- def test_action_get_by_request_id_no_cell(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
- self.assertRaises(exception.InstanceUnknownCell,
- self.cells_rpcapi.action_get_by_request_id,
- self.fake_context, fake_instance, 'req-fake')
-
- def test_action_events_get(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
- call_info = self._stub_rpc_method('call', 'fake_response')
- result = self.cells_rpcapi.action_events_get(self.fake_context,
- fake_instance,
- 'fake-action')
- expected_args = {'cell_name': 'region!child',
- 'action_id': 'fake-action'}
- self._check_result(call_info, 'action_events_get', expected_args,
- version='1.5')
- self.assertEqual(result, 'fake_response')
-
- def test_action_events_get_no_cell(self):
- fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
- self.assertRaises(exception.InstanceUnknownCell,
- self.cells_rpcapi.action_events_get,
- self.fake_context, fake_instance, 'fake-action')
-
- def test_consoleauth_delete_tokens(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
- 'fake-uuid')
-
- expected_args = {'instance_uuid': 'fake-uuid'}
- self._check_result(call_info, 'consoleauth_delete_tokens',
- expected_args, version='1.6')
-
- def test_validate_console_port(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.validate_console_port(self.fake_context,
- 'fake-uuid', 'fake-port', 'fake-type')
-
- expected_args = {'instance_uuid': 'fake-uuid',
- 'console_port': 'fake-port',
- 'console_type': 'fake-type'}
- self._check_result(call_info, 'validate_console_port',
- expected_args, version='1.6')
- self.assertEqual(result, 'fake_response')
-
- def test_bdm_update_or_create_at_top(self):
- fake_bdm = {'id': 2, 'other': 'meow'}
-
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.bdm_update_or_create_at_top(
- self.fake_context, fake_bdm, create='fake-create')
-
- expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
- self._check_result(call_info, 'bdm_update_or_create_at_top',
- expected_args, version='1.28')
-
- def test_bdm_destroy_at_top(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
- 'fake-uuid',
- device_name='fake-device',
- volume_id='fake-vol')
-
- expected_args = {'instance_uuid': 'fake-uuid',
- 'device_name': 'fake-device',
- 'volume_id': 'fake-vol'}
- self._check_result(call_info, 'bdm_destroy_at_top',
- expected_args, version='1.10')
-
- def test_get_migrations(self):
- call_info = self._stub_rpc_method('call', None)
- filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
-
- self.cells_rpcapi.get_migrations(self.fake_context, filters)
-
- expected_args = {'filters': filters}
- self._check_result(call_info, 'get_migrations', expected_args,
- version="1.11")
-
- def test_instance_update_from_api(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.instance_update_from_api(
- self.fake_context, 'fake-instance',
- expected_vm_state='exp_vm',
- expected_task_state='exp_task',
- admin_state_reset='admin_reset')
-
- expected_args = {'instance': 'fake-instance',
- 'expected_vm_state': 'exp_vm',
- 'expected_task_state': 'exp_task',
- 'admin_state_reset': 'admin_reset'}
- self._check_result(call_info, 'instance_update_from_api',
- expected_args, version='1.16')
-
- def test_start_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.start_instance(
- self.fake_context, 'fake-instance')
-
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'start_instance',
- expected_args, version='1.12')
-
- def test_stop_instance_cast(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.stop_instance(
- self.fake_context, 'fake-instance', do_cast=True)
-
- expected_args = {'instance': 'fake-instance',
- 'do_cast': True}
- self._check_result(call_info, 'stop_instance',
- expected_args, version='1.12')
-
- def test_stop_instance_call(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.stop_instance(
- self.fake_context, 'fake-instance', do_cast=False)
-
- expected_args = {'instance': 'fake-instance',
- 'do_cast': False}
- self._check_result(call_info, 'stop_instance',
- expected_args, version='1.12')
- self.assertEqual(result, 'fake_response')
-
- def test_cell_create(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
-
- expected_args = {'values': 'values'}
- self._check_result(call_info, 'cell_create',
- expected_args, version='1.13')
- self.assertEqual(result, 'fake_response')
-
- def test_cell_update(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.cell_update(self.fake_context,
- 'cell_name', 'values')
-
- expected_args = {'cell_name': 'cell_name',
- 'values': 'values'}
- self._check_result(call_info, 'cell_update',
- expected_args, version='1.13')
- self.assertEqual(result, 'fake_response')
-
- def test_cell_delete(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.cell_delete(self.fake_context,
- 'cell_name')
-
- expected_args = {'cell_name': 'cell_name'}
- self._check_result(call_info, 'cell_delete',
- expected_args, version='1.13')
- self.assertEqual(result, 'fake_response')
-
- def test_cell_get(self):
- call_info = self._stub_rpc_method('call', 'fake_response')
-
- result = self.cells_rpcapi.cell_get(self.fake_context,
- 'cell_name')
-
- expected_args = {'cell_name': 'cell_name'}
- self._check_result(call_info, 'cell_get',
- expected_args, version='1.13')
- self.assertEqual(result, 'fake_response')
-
- def test_reboot_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.reboot_instance(
- self.fake_context, 'fake-instance',
- block_device_info='ignored', reboot_type='HARD')
-
- expected_args = {'instance': 'fake-instance',
- 'reboot_type': 'HARD'}
- self._check_result(call_info, 'reboot_instance',
- expected_args, version='1.14')
-
- def test_pause_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.pause_instance(
- self.fake_context, 'fake-instance')
-
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'pause_instance',
- expected_args, version='1.19')
-
- def test_unpause_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.unpause_instance(
- self.fake_context, 'fake-instance')
-
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'unpause_instance',
- expected_args, version='1.19')
-
- def test_suspend_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.suspend_instance(
- self.fake_context, 'fake-instance')
-
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'suspend_instance',
- expected_args, version='1.15')
-
- def test_resume_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.resume_instance(
- self.fake_context, 'fake-instance')
-
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'resume_instance',
- expected_args, version='1.15')
-
- def test_terminate_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.terminate_instance(self.fake_context,
- 'fake-instance', [])
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'terminate_instance',
- expected_args, version='1.18')
-
- def test_soft_delete_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.soft_delete_instance(self.fake_context,
- 'fake-instance')
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'soft_delete_instance',
- expected_args, version='1.18')
-
- def test_resize_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.resize_instance(self.fake_context,
- 'fake-instance',
- dict(cow='moo'),
- 'fake-hint',
- 'fake-flavor',
- 'fake-reservations')
- expected_args = {'instance': 'fake-instance',
- 'flavor': 'fake-flavor',
- 'extra_instance_updates': dict(cow='moo')}
- self._check_result(call_info, 'resize_instance',
- expected_args, version='1.20')
-
- def test_live_migrate_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.live_migrate_instance(self.fake_context,
- 'fake-instance',
- 'fake-host',
- 'fake-block',
- 'fake-commit')
- expected_args = {'instance': 'fake-instance',
- 'block_migration': 'fake-block',
- 'disk_over_commit': 'fake-commit',
- 'host_name': 'fake-host'}
- self._check_result(call_info, 'live_migrate_instance',
- expected_args, version='1.20')
-
- def test_revert_resize(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.revert_resize(self.fake_context,
- 'fake-instance',
- 'fake-migration',
- 'fake-dest',
- 'resvs')
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'revert_resize',
- expected_args, version='1.21')
-
- def test_confirm_resize(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.confirm_resize(self.fake_context,
- 'fake-instance',
- 'fake-migration',
- 'fake-source',
- 'resvs')
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'confirm_resize',
- expected_args, version='1.21')
-
- def test_reset_network(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.reset_network(self.fake_context,
- 'fake-instance')
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'reset_network',
- expected_args, version='1.22')
-
- def test_inject_network_info(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.inject_network_info(self.fake_context,
- 'fake-instance')
- expected_args = {'instance': 'fake-instance'}
- self._check_result(call_info, 'inject_network_info',
- expected_args, version='1.23')
-
- def test_snapshot_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.snapshot_instance(self.fake_context,
- 'fake-instance',
- 'image-id')
- expected_args = {'instance': 'fake-instance',
- 'image_id': 'image-id'}
- self._check_result(call_info, 'snapshot_instance',
- expected_args, version='1.24')
-
- def test_backup_instance(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.backup_instance(self.fake_context,
- 'fake-instance',
- 'image-id',
- 'backup-type',
- 'rotation')
- expected_args = {'instance': 'fake-instance',
- 'image_id': 'image-id',
- 'backup_type': 'backup-type',
- 'rotation': 'rotation'}
- self._check_result(call_info, 'backup_instance',
- expected_args, version='1.24')
-
- def test_set_admin_password(self):
- call_info = self._stub_rpc_method('cast', None)
-
- self.cells_rpcapi.set_admin_password(self.fake_context,
- 'fake-instance', 'fake-password')
-
- expected_args = {'instance': 'fake-instance',
- 'new_pass': 'fake-password'}
- self._check_result(call_info, 'set_admin_password',
- expected_args, version='1.29')
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
deleted file mode 100644
index 1a60c7394f..0000000000
--- a/nova/tests/cells/test_cells_scheduler.py
+++ /dev/null
@@ -1,530 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For CellsScheduler
-"""
-import copy
-import time
-
-from oslo.config import cfg
-
-from nova import block_device
-from nova.cells import filters
-from nova.cells import weights
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.openstack.common import uuidutils
-from nova.scheduler import utils as scheduler_utils
-from nova import test
-from nova.tests.cells import fakes
-from nova.tests import fake_instance
-from nova import utils
-
-CONF = cfg.CONF
-CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
-CONF.import_opt('scheduler_filter_classes', 'nova.cells.scheduler',
- group='cells')
-CONF.import_opt('scheduler_weight_classes', 'nova.cells.scheduler',
- group='cells')
-
-
-class FakeFilterClass1(filters.BaseCellFilter):
- pass
-
-
-class FakeFilterClass2(filters.BaseCellFilter):
- pass
-
-
-class FakeWeightClass1(weights.BaseCellWeigher):
- pass
-
-
-class FakeWeightClass2(weights.BaseCellWeigher):
- pass
-
-
-class CellsSchedulerTestCase(test.TestCase):
- """Test case for CellsScheduler class."""
-
- def setUp(self):
- super(CellsSchedulerTestCase, self).setUp()
- self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
- group='cells')
- self._init_cells_scheduler()
-
- def _init_cells_scheduler(self):
- fakes.init(self)
- self.msg_runner = fakes.get_message_runner('api-cell')
- self.scheduler = self.msg_runner.scheduler
- self.state_manager = self.msg_runner.state_manager
- self.my_cell_state = self.state_manager.get_my_state()
- self.ctxt = context.RequestContext('fake', 'fake')
- instance_uuids = []
- for x in xrange(3):
- instance_uuids.append(uuidutils.generate_uuid())
- self.instance_uuids = instance_uuids
- self.instances = [{'uuid': uuid} for uuid in instance_uuids]
- self.request_spec = {
- 'instance_uuids': instance_uuids,
- 'instance_properties': self.instances[0],
- 'instance_type': 'fake_type',
- 'image': 'fake_image'}
- self.build_inst_kwargs = {
- 'instances': self.instances,
- 'image': 'fake_image',
- 'filter_properties': {'instance_type': 'fake_type'},
- 'security_groups': 'fake_sec_groups',
- 'block_device_mapping': 'fake_bdm'}
-
- def test_create_instances_here(self):
- # Just grab the first instance type
- inst_type = db.flavor_get(self.ctxt, 1)
- image = {'properties': {}}
- instance_uuids = self.instance_uuids
- instance_props = {'id': 'removed',
- 'security_groups': 'removed',
- 'info_cache': 'removed',
- 'name': 'instance-00000001',
- 'hostname': 'meow',
- 'display_name': 'moo',
- 'image_ref': 'fake_image_ref',
- 'user_id': self.ctxt.user_id,
- # Test these as lists
- 'metadata': [{'key': 'moo', 'value': 'cow'}],
- 'system_metadata': [{'key': 'meow', 'value': 'cat'}],
- 'project_id': self.ctxt.project_id}
-
- call_info = {'uuids': []}
- block_device_mapping = [block_device.create_image_bdm(
- 'fake_image_ref')]
-
- def _fake_instance_update_at_top(_ctxt, instance):
- call_info['uuids'].append(instance['uuid'])
-
- self.stubs.Set(self.msg_runner, 'instance_update_at_top',
- _fake_instance_update_at_top)
-
- self.scheduler._create_instances_here(self.ctxt, instance_uuids,
- instance_props, inst_type, image,
- ['default'], block_device_mapping)
- self.assertEqual(instance_uuids, call_info['uuids'])
-
- for instance_uuid in instance_uuids:
- instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
- meta = utils.instance_meta(instance)
- self.assertEqual('cow', meta['moo'])
- sys_meta = utils.instance_sys_meta(instance)
- self.assertEqual('cat', sys_meta['meow'])
- self.assertEqual('meow', instance['hostname'])
- self.assertEqual('moo-%s' % instance['uuid'],
- instance['display_name'])
- self.assertEqual('fake_image_ref', instance['image_ref'])
-
- def test_build_instances_selects_child_cell(self):
- # Make sure there's no capacity info so we're sure to
- # select a child cell
- our_cell_info = self.state_manager.get_my_state()
- our_cell_info.capacities = {}
-
- call_info = {'times': 0}
-
- orig_fn = self.msg_runner.build_instances
-
- def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
- # This gets called twice. Once for our running it
- # in this cell.. and then it'll get called when the
- # child cell is picked. So, first time.. just run it
- # like normal.
- if not call_info['times']:
- call_info['times'] += 1
- return orig_fn(ctxt, target_cell, build_inst_kwargs)
- call_info['ctxt'] = ctxt
- call_info['target_cell'] = target_cell
- call_info['build_inst_kwargs'] = build_inst_kwargs
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'image': image}
- return request_spec
-
- self.stubs.Set(self.msg_runner, 'build_instances',
- msg_runner_build_instances)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
-
- self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
- self.build_inst_kwargs)
-
- self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.build_inst_kwargs,
- call_info['build_inst_kwargs'])
- child_cells = self.state_manager.get_child_cells()
- self.assertIn(call_info['target_cell'], child_cells)
-
- def test_build_instances_selects_current_cell(self):
- # Make sure there's no child cells so that we will be
- # selected
- self.state_manager.child_cells = {}
-
- call_info = {}
- build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
-
- def fake_create_instances_here(ctxt, instance_uuids,
- instance_properties, instance_type, image, security_groups,
- block_device_mapping):
- call_info['ctxt'] = ctxt
- call_info['instance_uuids'] = instance_uuids
- call_info['instance_properties'] = instance_properties
- call_info['instance_type'] = instance_type
- call_info['image'] = image
- call_info['security_groups'] = security_groups
- call_info['block_device_mapping'] = block_device_mapping
- instances = [fake_instance.fake_instance_obj(ctxt, **instance)
- for instance in self.instances]
- return instances
-
- def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
- call_info['build_inst_kwargs'] = build_inst_kwargs
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'image': image}
- return request_spec
-
- self.stubs.Set(self.scheduler, '_create_instances_here',
- fake_create_instances_here)
- self.stubs.Set(self.scheduler.compute_task_api,
- 'build_instances', fake_rpc_build_instances)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
-
- self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
- build_inst_kwargs)
-
- self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
- self.assertEqual(self.build_inst_kwargs['instances'][0],
- call_info['instance_properties'])
- self.assertEqual(
- self.build_inst_kwargs['filter_properties']['instance_type'],
- call_info['instance_type'])
- self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
- self.assertEqual(self.build_inst_kwargs['security_groups'],
- call_info['security_groups'])
- self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
- call_info['block_device_mapping'])
- self.assertEqual(build_inst_kwargs,
- call_info['build_inst_kwargs'])
- self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
-
- def test_build_instances_retries_when_no_cells_avail(self):
- self.flags(scheduler_retries=7, group='cells')
-
- call_info = {'num_tries': 0, 'errored_uuids': []}
-
- def fake_grab_target_cells(filter_properties):
- call_info['num_tries'] += 1
- raise exception.NoCellsAvailable()
-
- def fake_sleep(_secs):
- return
-
- def fake_instance_update(ctxt, instance_uuid, values):
- self.assertEqual(vm_states.ERROR, values['vm_state'])
- call_info['errored_uuids'].append(instance_uuid)
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'image': image}
- return request_spec
-
- self.stubs.Set(self.scheduler, '_grab_target_cells',
- fake_grab_target_cells)
- self.stubs.Set(time, 'sleep', fake_sleep)
- self.stubs.Set(db, 'instance_update', fake_instance_update)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
-
- self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
- self.build_inst_kwargs)
-
- self.assertEqual(8, call_info['num_tries'])
- self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
-
- def test_schedule_method_on_random_exception(self):
- self.flags(scheduler_retries=7, group='cells')
-
- instances = [{'uuid': uuid} for uuid in self.instance_uuids]
- method_kwargs = {
- 'image': 'fake_image',
- 'instances': instances,
- 'filter_properties': {}}
-
- call_info = {'num_tries': 0,
- 'errored_uuids1': [],
- 'errored_uuids2': []}
-
- def fake_grab_target_cells(filter_properties):
- call_info['num_tries'] += 1
- raise test.TestingException()
-
- def fake_instance_update(ctxt, instance_uuid, values):
- self.assertEqual(vm_states.ERROR, values['vm_state'])
- call_info['errored_uuids1'].append(instance_uuid)
-
- def fake_instance_update_at_top(ctxt, instance):
- self.assertEqual(vm_states.ERROR, instance['vm_state'])
- call_info['errored_uuids2'].append(instance['uuid'])
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'image': image}
- return request_spec
-
- self.stubs.Set(self.scheduler, '_grab_target_cells',
- fake_grab_target_cells)
- self.stubs.Set(db, 'instance_update', fake_instance_update)
- self.stubs.Set(self.msg_runner, 'instance_update_at_top',
- fake_instance_update_at_top)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
-
- self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
- method_kwargs)
- # Shouldn't retry
- self.assertEqual(1, call_info['num_tries'])
- self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
- self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
-
- def test_filter_schedule_skipping(self):
- # if a filter handles scheduling, short circuit
-
- def _grab(filter_properties):
- return None
-
- self.stubs.Set(self.scheduler, '_grab_target_cells', _grab)
-
- def _test(self, *args):
- raise test.TestingException("shouldn't be called")
-
- try:
- self.scheduler._schedule_build_to_cells(None, None, None, _test,
- None)
- except test.TestingException:
- self.fail("Scheduling did not properly short circuit")
-
- def test_cells_filter_args_correct(self):
- # Re-init our fakes with some filters.
- our_path = 'nova.tests.cells.test_cells_scheduler'
- cls_names = [our_path + '.' + 'FakeFilterClass1',
- our_path + '.' + 'FakeFilterClass2']
- self.flags(scheduler_filter_classes=cls_names, group='cells')
- self._init_cells_scheduler()
-
- # Make sure there's no child cells so that we will be
- # selected. Makes stubbing easier.
- self.state_manager.child_cells = {}
-
- call_info = {}
-
- def fake_create_instances_here(ctxt, instance_uuids,
- instance_properties, instance_type, image, security_groups,
- block_device_mapping):
- call_info['ctxt'] = ctxt
- call_info['instance_uuids'] = instance_uuids
- call_info['instance_properties'] = instance_properties
- call_info['instance_type'] = instance_type
- call_info['image'] = image
- call_info['security_groups'] = security_groups
- call_info['block_device_mapping'] = block_device_mapping
-
- def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
- call_info['host_sched_kwargs'] = host_sched_kwargs
-
- def fake_get_filtered_objs(filter_classes, cells, filt_properties):
- call_info['filt_classes'] = filter_classes
- call_info['filt_cells'] = cells
- call_info['filt_props'] = filt_properties
- return cells
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'instance_properties': instances[0],
- 'image': image,
- 'instance_type': 'fake_type'}
- return request_spec
-
- self.stubs.Set(self.scheduler, '_create_instances_here',
- fake_create_instances_here)
- self.stubs.Set(self.scheduler.compute_task_api,
- 'build_instances', fake_rpc_build_instances)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
- filter_handler = self.scheduler.filter_handler
- self.stubs.Set(filter_handler, 'get_filtered_objects',
- fake_get_filtered_objs)
-
- host_sched_kwargs = {'image': 'fake_image',
- 'instances': self.instances,
- 'filter_properties':
- {'instance_type': 'fake_type'},
- 'security_groups': 'fake_sec_groups',
- 'block_device_mapping': 'fake_bdm'}
-
- self.msg_runner.build_instances(self.ctxt,
- self.my_cell_state, host_sched_kwargs)
- # Our cell was selected.
- self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
- self.assertEqual(self.request_spec['instance_properties'],
- call_info['instance_properties'])
- self.assertEqual(self.request_spec['instance_type'],
- call_info['instance_type'])
- self.assertEqual(self.request_spec['image'], call_info['image'])
- self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
- # Filter args are correct
- expected_filt_props = {'context': self.ctxt,
- 'scheduler': self.scheduler,
- 'routing_path': self.my_cell_state.name,
- 'host_sched_kwargs': host_sched_kwargs,
- 'request_spec': self.request_spec,
- 'instance_type': 'fake_type'}
- self.assertEqual(expected_filt_props, call_info['filt_props'])
- self.assertEqual([FakeFilterClass1, FakeFilterClass2],
- call_info['filt_classes'])
- self.assertEqual([self.my_cell_state], call_info['filt_cells'])
-
- def test_cells_filter_returning_none(self):
- # Re-init our fakes with some filters.
- our_path = 'nova.tests.cells.test_cells_scheduler'
- cls_names = [our_path + '.' + 'FakeFilterClass1',
- our_path + '.' + 'FakeFilterClass2']
- self.flags(scheduler_filter_classes=cls_names, group='cells')
- self._init_cells_scheduler()
-
- # Make sure there's no child cells so that we will be
- # selected. Makes stubbing easier.
- self.state_manager.child_cells = {}
-
- call_info = {'scheduled': False}
-
- def fake_create_instances_here(ctxt, request_spec):
- # Should not be called
- call_info['scheduled'] = True
-
- def fake_get_filtered_objs(filter_classes, cells, filt_properties):
- # Should cause scheduling to be skipped. Means that the
- # filter did it.
- return None
-
- self.stubs.Set(self.scheduler, '_create_instances_here',
- fake_create_instances_here)
- filter_handler = self.scheduler.filter_handler
- self.stubs.Set(filter_handler, 'get_filtered_objects',
- fake_get_filtered_objs)
-
- self.msg_runner.build_instances(self.ctxt,
- self.my_cell_state, {})
- self.assertFalse(call_info['scheduled'])
-
- def test_cells_weight_args_correct(self):
- # Re-init our fakes with some filters.
- our_path = 'nova.tests.cells.test_cells_scheduler'
- cls_names = [our_path + '.' + 'FakeWeightClass1',
- our_path + '.' + 'FakeWeightClass2']
- self.flags(scheduler_weight_classes=cls_names, group='cells')
- self._init_cells_scheduler()
-
- # Make sure there's no child cells so that we will be
- # selected. Makes stubbing easier.
- self.state_manager.child_cells = {}
-
- call_info = {}
-
- def fake_create_instances_here(ctxt, instance_uuids,
- instance_properties, instance_type, image, security_groups,
- block_device_mapping):
- call_info['ctxt'] = ctxt
- call_info['instance_uuids'] = instance_uuids
- call_info['instance_properties'] = instance_properties
- call_info['instance_type'] = instance_type
- call_info['image'] = image
- call_info['security_groups'] = security_groups
- call_info['block_device_mapping'] = block_device_mapping
-
- def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
- call_info['host_sched_kwargs'] = host_sched_kwargs
-
- def fake_get_weighed_objs(weight_classes, cells, filt_properties):
- call_info['weight_classes'] = weight_classes
- call_info['weight_cells'] = cells
- call_info['weight_props'] = filt_properties
- return [weights.WeightedCell(cells[0], 0.0)]
-
- def fake_build_request_spec(ctxt, image, instances):
- request_spec = {
- 'instance_uuids': [inst['uuid'] for inst in instances],
- 'instance_properties': instances[0],
- 'image': image,
- 'instance_type': 'fake_type'}
- return request_spec
-
- self.stubs.Set(self.scheduler, '_create_instances_here',
- fake_create_instances_here)
- self.stubs.Set(scheduler_utils, 'build_request_spec',
- fake_build_request_spec)
- self.stubs.Set(self.scheduler.compute_task_api,
- 'build_instances', fake_rpc_build_instances)
- weight_handler = self.scheduler.weight_handler
- self.stubs.Set(weight_handler, 'get_weighed_objects',
- fake_get_weighed_objs)
-
- host_sched_kwargs = {'image': 'fake_image',
- 'instances': self.instances,
- 'filter_properties':
- {'instance_type': 'fake_type'},
- 'security_groups': 'fake_sec_groups',
- 'block_device_mapping': 'fake_bdm'}
-
- self.msg_runner.build_instances(self.ctxt,
- self.my_cell_state, host_sched_kwargs)
- # Our cell was selected.
- self.assertEqual(self.ctxt, call_info['ctxt'])
- self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
- self.assertEqual(self.request_spec['instance_properties'],
- call_info['instance_properties'])
- self.assertEqual(self.request_spec['instance_type'],
- call_info['instance_type'])
- self.assertEqual(self.request_spec['image'], call_info['image'])
- self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
- # Weight args are correct
- expected_filt_props = {'context': self.ctxt,
- 'scheduler': self.scheduler,
- 'routing_path': self.my_cell_state.name,
- 'host_sched_kwargs': host_sched_kwargs,
- 'request_spec': self.request_spec,
- 'instance_type': 'fake_type'}
- self.assertEqual(expected_filt_props, call_info['weight_props'])
- self.assertEqual([FakeWeightClass1, FakeWeightClass2],
- call_info['weight_classes'])
- self.assertEqual([self.my_cell_state], call_info['weight_cells'])
diff --git a/nova/tests/compute/monitors/test_monitors.py b/nova/tests/compute/monitors/test_monitors.py
deleted file mode 100644
index 929e149bf6..0000000000
--- a/nova/tests/compute/monitors/test_monitors.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2013 Intel Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for resource monitors."""
-
-from nova.compute import monitors
-from nova import test
-
-
-class FakeResourceMonitor(monitors.ResourceMonitorBase):
- def _update_data(self):
- self._data['foo.metric1'] = '1000'
- self._data['foo.metric2'] = '99.999'
- self._data['timestamp'] = '123'
-
- @monitors.ResourceMonitorBase.add_timestamp
- def _get_foo_metric1(self, **kwargs):
- return self._data.get("foo.metric1")
-
- @monitors.ResourceMonitorBase.add_timestamp
- def _get_foo_metric2(self, **kwargs):
- return self._data.get("foo.metric2")
-
-
-class FakeMonitorClass1(monitors.ResourceMonitorBase):
- def get_metrics(self, **kwargs):
- data = [{'timestamp': 1232,
- 'name': 'key1',
- 'value': 2600,
- 'source': 'libvirt'}]
- return data
-
- def get_metric_names(self):
- return ['key1']
-
-
-class FakeMonitorClass2(monitors.ResourceMonitorBase):
- def get_metrics(self, **kwargs):
- data = [{'timestamp': 123,
- 'name': 'key2',
- 'value': 1600,
- 'source': 'libvirt'}]
- return data
-
- def get_metric_names(self):
- return ['key2']
-
-
-class FakeMonitorClass3(monitors.ResourceMonitorBase):
- def get_metrics(self, **kwargs):
- data = [{'timestamp': 1234,
- 'name': 'key1',
- 'value': 1200,
- 'source': 'libvirt'}]
- return data
-
- def get_metric_names(self):
- return ['key1']
-
-
-class FakeMonitorClass4(monitors.ResourceMonitorBase):
- def get_metrics(self, **kwargs):
- raise test.TestingException()
-
- def get_metric_names(self):
- raise test.TestingException()
-
-
-class ResourceMonitorBaseTestCase(test.TestCase):
- def setUp(self):
- super(ResourceMonitorBaseTestCase, self).setUp()
- self.monitor = FakeResourceMonitor(None)
-
- def test_get_metric_names(self):
- names = self.monitor.get_metric_names()
- self.assertEqual(2, len(names))
- self.assertIn("foo.metric1", names)
- self.assertIn("foo.metric2", names)
-
- def test_get_metrics(self):
- metrics_raw = self.monitor.get_metrics()
- names = self.monitor.get_metric_names()
- metrics = {}
- for metric in metrics_raw:
- self.assertIn(metric['name'], names)
- self.assertEqual(metric["timestamp"], '123')
- metrics[metric['name']] = metric['value']
-
- self.assertEqual(metrics["foo.metric1"], '1000')
- self.assertEqual(metrics["foo.metric2"], '99.999')
-
-
-class ResourceMonitorsTestCase(test.TestCase):
- """Test case for monitors."""
-
- def setUp(self):
- super(ResourceMonitorsTestCase, self).setUp()
- self.monitor_handler = monitors.ResourceMonitorHandler()
- fake_monitors = [
- 'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
- 'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
- self.flags(compute_available_monitors=fake_monitors)
-
- classes = self.monitor_handler.get_matching_classes(
- ['nova.compute.monitors.all_monitors'])
- self.class_map = {}
- for cls in classes:
- self.class_map[cls.__name__] = cls
-
- def test_choose_monitors_not_found(self):
- self.flags(compute_monitors=['FakeMonitorClass5', 'FakeMonitorClass4'])
- monitor_classes = self.monitor_handler.choose_monitors(self)
- self.assertEqual(len(monitor_classes), 0)
-
- def test_choose_monitors_bad(self):
- self.flags(compute_monitors=['FakeMonitorClass1', 'FakePluginClass3'])
- monitor_classes = self.monitor_handler.choose_monitors(self)
- self.assertEqual(len(monitor_classes), 1)
-
- def test_choose_monitors(self):
- self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
- monitor_classes = self.monitor_handler.choose_monitors(self)
- self.assertEqual(len(monitor_classes), 2)
-
- def test_choose_monitors_none(self):
- self.flags(compute_monitors=[])
- monitor_classes = self.monitor_handler.choose_monitors(self)
- self.assertEqual(len(monitor_classes), 0)
-
- def test_all_monitors(self):
- # Double check at least a couple of known monitors exist
- self.assertIn('ComputeDriverCPUMonitor', self.class_map)
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
deleted file mode 100644
index 8f94589167..0000000000
--- a/nova/tests/compute/test_claims.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for resource tracker claims."""
-
-import uuid
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova.compute import claims
-from nova import db
-from nova import exception
-from nova import objects
-from nova.pci import manager as pci_manager
-from nova import test
-from nova.tests.pci import fakes as pci_fakes
-from nova.virt import hardware
-
-
-class FakeResourceHandler(object):
- test_called = False
- usage_is_instance = False
-
- def test_resources(self, usage, limits):
- self.test_called = True
- self.usage_is_itype = usage.get('name') is 'fakeitype'
- return []
-
-
-class DummyTracker(object):
- icalled = False
- rcalled = False
- pci_tracker = pci_manager.PciDevTracker()
- ext_resources_handler = FakeResourceHandler()
-
- def abort_instance_claim(self, *args, **kwargs):
- self.icalled = True
-
- def drop_resize_claim(self, *args, **kwargs):
- self.rcalled = True
-
- def new_pci_tracker(self):
- self.pci_tracker = pci_manager.PciDevTracker()
-
-
-@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
-class ClaimTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(ClaimTestCase, self).setUp()
- self.resources = self._fake_resources()
- self.tracker = DummyTracker()
-
- def _claim(self, limits=None, overhead=None, **kwargs):
- numa_topology = kwargs.pop('numa_topology', None)
- instance = self._fake_instance(**kwargs)
- if numa_topology:
- db_numa_topology = {
- 'id': 1, 'created_at': None, 'updated_at': None,
- 'deleted_at': None, 'deleted': None,
- 'instance_uuid': instance['uuid'],
- 'numa_topology': numa_topology.to_json()
- }
- else:
- db_numa_topology = None
- if overhead is None:
- overhead = {'memory_mb': 0}
- with mock.patch.object(
- db, 'instance_extra_get_by_instance_uuid',
- return_value=db_numa_topology):
- return claims.Claim('context', instance, self.tracker,
- self.resources, overhead=overhead,
- limits=limits)
-
- def _fake_instance(self, **kwargs):
- instance = {
- 'uuid': str(uuid.uuid1()),
- 'memory_mb': 1024,
- 'root_gb': 10,
- 'ephemeral_gb': 5,
- 'vcpus': 1,
- 'system_metadata': {},
- 'numa_topology': None
- }
- instance.update(**kwargs)
- return instance
-
- def _fake_instance_type(self, **kwargs):
- instance_type = {
- 'id': 1,
- 'name': 'fakeitype',
- 'memory_mb': 1,
- 'vcpus': 1,
- 'root_gb': 1,
- 'ephemeral_gb': 2
- }
- instance_type.update(**kwargs)
- return instance_type
-
- def _fake_resources(self, values=None):
- resources = {
- 'memory_mb': 2048,
- 'memory_mb_used': 0,
- 'free_ram_mb': 2048,
- 'local_gb': 20,
- 'local_gb_used': 0,
- 'free_disk_gb': 20,
- 'vcpus': 2,
- 'vcpus_used': 0,
- 'numa_topology': hardware.VirtNUMAHostTopology(
- cells=[hardware.VirtNUMATopologyCellUsage(1, [1, 2], 512),
- hardware.VirtNUMATopologyCellUsage(2, [3, 4], 512)]
- ).to_json()
- }
- if values:
- resources.update(values)
- return resources
-
- def test_memory_unlimited(self, mock_get):
- self._claim(memory_mb=99999999)
-
- def test_disk_unlimited_root(self, mock_get):
- self._claim(root_gb=999999)
-
- def test_disk_unlimited_ephemeral(self, mock_get):
- self._claim(ephemeral_gb=999999)
-
- def test_memory_with_overhead(self, mock_get):
- overhead = {'memory_mb': 8}
- limits = {'memory_mb': 2048}
- self._claim(memory_mb=2040, limits=limits,
- overhead=overhead)
-
- def test_memory_with_overhead_insufficient(self, mock_get):
- overhead = {'memory_mb': 9}
- limits = {'memory_mb': 2048}
-
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self._claim, limits=limits, overhead=overhead,
- memory_mb=2040)
-
- def test_memory_oversubscription(self, mock_get):
- self._claim(memory_mb=4096)
-
- def test_memory_insufficient(self, mock_get):
- limits = {'memory_mb': 8192}
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self._claim, limits=limits, memory_mb=16384)
-
- def test_disk_oversubscription(self, mock_get):
- limits = {'disk_gb': 60}
- self._claim(root_gb=10, ephemeral_gb=40,
- limits=limits)
-
- def test_disk_insufficient(self, mock_get):
- limits = {'disk_gb': 45}
- self.assertRaisesRegexp(
- exception.ComputeResourcesUnavailable,
- "disk",
- self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
-
- def test_disk_and_memory_insufficient(self, mock_get):
- limits = {'disk_gb': 45, 'memory_mb': 8192}
- self.assertRaisesRegexp(
- exception.ComputeResourcesUnavailable,
- "memory.*disk",
- self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
- memory_mb=16384)
-
- @pci_fakes.patch_pci_whitelist
- def test_pci_pass(self, mock_get):
- dev_dict = {
- 'compute_node_id': 1,
- 'address': 'a',
- 'product_id': 'p',
- 'vendor_id': 'v',
- 'status': 'available'}
- self.tracker.new_pci_tracker()
- self.tracker.pci_tracker.set_hvdevs([dev_dict])
- claim = self._claim()
- request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
- mock_get.return_value = objects.InstancePCIRequests(
- requests=[request])
- self.assertIsNone(claim._test_pci())
-
- @pci_fakes.patch_pci_whitelist
- def test_pci_fail(self, mock_get):
- dev_dict = {
- 'compute_node_id': 1,
- 'address': 'a',
- 'product_id': 'p',
- 'vendor_id': 'v1',
- 'status': 'available'}
- self.tracker.new_pci_tracker()
- self.tracker.pci_tracker.set_hvdevs([dev_dict])
- claim = self._claim()
- request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
- mock_get.return_value = objects.InstancePCIRequests(
- requests=[request])
- claim._test_pci()
-
- @pci_fakes.patch_pci_whitelist
- def test_pci_pass_no_requests(self, mock_get):
- dev_dict = {
- 'compute_node_id': 1,
- 'address': 'a',
- 'product_id': 'p',
- 'vendor_id': 'v',
- 'status': 'available'}
- self.tracker.new_pci_tracker()
- self.tracker.pci_tracker.set_hvdevs([dev_dict])
- claim = self._claim()
- self.assertIsNone(claim._test_pci())
-
- def test_ext_resources(self, mock_get):
- self._claim()
- self.assertTrue(self.tracker.ext_resources_handler.test_called)
- self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
-
- def test_numa_topology_no_limit(self, mock_get):
- huge_instance = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 1, set([1, 2, 3, 4, 5]), 2048)])
- self._claim(numa_topology=huge_instance)
-
- def test_numa_topology_fails(self, mock_get):
- huge_instance = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 1, set([1, 2, 3, 4, 5]), 2048)])
- limit_topo = hardware.VirtNUMALimitTopology(
- cells=[hardware.VirtNUMATopologyCellLimit(
- 1, [1, 2], 512, cpu_limit=2, memory_limit=512),
- hardware.VirtNUMATopologyCellLimit(
- 1, [3, 4], 512, cpu_limit=2, memory_limit=512)])
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self._claim,
- limits={'numa_topology': limit_topo.to_json()},
- numa_topology=huge_instance)
-
- def test_numa_topology_passes(self, mock_get):
- huge_instance = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 1, set([1, 2, 3, 4, 5]), 2048)])
- limit_topo = hardware.VirtNUMALimitTopology(
- cells=[hardware.VirtNUMATopologyCellLimit(
- 1, [1, 2], 512, cpu_limit=5, memory_limit=4096),
- hardware.VirtNUMATopologyCellLimit(
- 1, [3, 4], 512, cpu_limit=5, memory_limit=4096)])
- self._claim(limits={'numa_topology': limit_topo.to_json()},
- numa_topology=huge_instance)
-
- def test_abort(self, mock_get):
- claim = self._abort()
- self.assertTrue(claim.tracker.icalled)
-
- def _abort(self):
- claim = None
- try:
- with self._claim(memory_mb=4096) as claim:
- raise test.TestingException("abort")
- except test.TestingException:
- pass
-
- return claim
-
-
-class ResizeClaimTestCase(ClaimTestCase):
-
- def setUp(self):
- super(ResizeClaimTestCase, self).setUp()
- self.instance = self._fake_instance()
- self.get_numa_constraint_patch = None
-
- def _claim(self, limits=None, overhead=None, **kwargs):
- instance_type = self._fake_instance_type(**kwargs)
- numa_constraint = kwargs.pop('numa_topology', None)
- if overhead is None:
- overhead = {'memory_mb': 0}
- with mock.patch.object(
- hardware.VirtNUMAInstanceTopology, 'get_constraints',
- return_value=numa_constraint):
- return claims.ResizeClaim('context', self.instance, instance_type,
- {}, self.tracker, self.resources,
- overhead=overhead, limits=limits)
-
- def _set_pci_request(self, claim):
- request = [{'count': 1,
- 'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
- }]
- claim.instance.update(
- system_metadata={'new_pci_requests': jsonutils.dumps(request)})
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_ext_resources(self, mock_get):
- self._claim()
- self.assertTrue(self.tracker.ext_resources_handler.test_called)
- self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_abort(self, mock_get):
- claim = self._abort()
- self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
deleted file mode 100644
index dcb0895fe4..0000000000
--- a/nova/tests/compute/test_compute.py
+++ /dev/null
@@ -1,11415 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for compute service."""
-
-import base64
-import contextlib
-import datetime
-import operator
-import sys
-import time
-import traceback
-import uuid
-
-from eventlet import greenthread
-import mock
-import mox
-from oslo.config import cfg
-from oslo import messaging
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-from oslo.utils import timeutils
-from oslo.utils import units
-import six
-import testtools
-from testtools import matchers as testtools_matchers
-
-import nova
-from nova import availability_zones
-from nova import block_device
-from nova import compute
-from nova.compute import api as compute_api
-from nova.compute import arch
-from nova.compute import delete_types
-from nova.compute import flavors
-from nova.compute import manager as compute_manager
-from nova.compute import power_state
-from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova.conductor import manager as conductor_manager
-from nova.console import type as ctype
-from nova import context
-from nova import db
-from nova import exception
-from nova.i18n import _
-from nova.image import glance
-from nova.network import api as network_api
-from nova.network import model as network_model
-from nova.network.security_group import openstack_driver
-from nova import objects
-from nova.objects import base as obj_base
-from nova.objects import block_device as block_device_obj
-from nova.objects import instance as instance_obj
-from nova.openstack.common import log as logging
-from nova.openstack.common import uuidutils
-from nova import policy
-from nova import quota
-from nova import test
-from nova.tests.compute import eventlet_utils
-from nova.tests.compute import fake_resource_tracker
-from nova.tests.db import fakes as db_fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
-from nova.tests import fake_notifier
-from nova.tests import fake_server_actions
-from nova.tests.image import fake as fake_image
-from nova.tests import matchers
-from nova.tests.objects import test_flavor
-from nova.tests.objects import test_migration
-from nova import utils
-from nova.virt import block_device as driver_block_device
-from nova.virt import event
-from nova.virt import fake
-from nova.virt import hardware
-from nova.volume import cinder
-
-QUOTAS = quota.QUOTAS
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
-
-
-FAKE_IMAGE_REF = 'fake-image-ref'
-
-NODENAME = 'fakenode1'
-
-
-def fake_not_implemented(*args, **kwargs):
- raise NotImplementedError()
-
-
-def get_primitive_instance_by_uuid(context, instance_uuid):
- """Helper method to get an instance and then convert it to
- a primitive form using jsonutils.
- """
- instance = db.instance_get_by_uuid(context, instance_uuid)
- return jsonutils.to_primitive(instance)
-
-
-def unify_instance(instance):
- """Return a dict-like instance for both object-initiated and
- model-initiated sources that can reasonably be compared.
- """
- newdict = dict()
- for k, v in instance.iteritems():
- if isinstance(v, datetime.datetime):
- # NOTE(danms): DB models and Instance objects have different
- # timezone expectations
- v = v.replace(tzinfo=None)
- elif k == 'fault':
- # NOTE(danms): DB models don't have 'fault'
- continue
- elif k == 'pci_devices':
- # NOTE(yonlig.he) pci devices need lazy loading
- # fake db does not support it yet.
- continue
- newdict[k] = v
- return newdict
-
-
-class FakeSchedulerAPI(object):
-
- def run_instance(self, ctxt, request_spec, admin_password,
- injected_files, requested_networks, is_first_time,
- filter_properties):
- pass
-
- def live_migration(self, ctxt, block_migration, disk_over_commit,
- instance, dest):
- pass
-
- def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
- filter_properties, reservations):
- pass
-
-
-class FakeComputeTaskAPI(object):
-
- def resize_instance(self, context, instance, extra_instance_updates,
- scheduler_hint, flavor, reservations):
- pass
-
-
-class BaseTestCase(test.TestCase):
-
- def setUp(self):
- super(BaseTestCase, self).setUp()
- self.flags(network_manager='nova.network.manager.FlatManager')
- fake.set_nodes([NODENAME])
- self.flags(use_local=True, group='conductor')
-
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- self.compute = importutils.import_object(CONF.compute_manager)
- # execute power syncing synchronously for testing:
- self.compute._sync_power_pool = eventlet_utils.SyncPool()
-
- # override tracker with a version that doesn't need the database:
- fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver, NODENAME)
- self.compute._resource_tracker_dict[NODENAME] = fake_rt
-
- def fake_get_compute_nodes_in_db(context, use_slave=False):
- fake_compute_nodes = [{'local_gb': 259,
- 'vcpus_used': 0,
- 'deleted': 0,
- 'hypervisor_type': 'powervm',
- 'created_at': '2013-04-01T00:27:06.000000',
- 'local_gb_used': 0,
- 'updated_at': '2013-04-03T00:35:41.000000',
- 'hypervisor_hostname': 'fake_phyp1',
- 'memory_mb_used': 512,
- 'memory_mb': 131072,
- 'current_workload': 0,
- 'vcpus': 16,
- 'cpu_info': 'ppc64,powervm,3940',
- 'running_vms': 0,
- 'free_disk_gb': 259,
- 'service_id': 7,
- 'hypervisor_version': 7,
- 'disk_available_least': 265856,
- 'deleted_at': None,
- 'free_ram_mb': 130560,
- 'metrics': '',
- 'stats': '',
- 'numa_topology': '',
- 'id': 2,
- 'host_ip': '127.0.0.1'}]
- return [objects.ComputeNode._from_db_object(
- context, objects.ComputeNode(), cn)
- for cn in fake_compute_nodes]
-
- def fake_compute_node_delete(context, compute_node_id):
- self.assertEqual(2, compute_node_id)
-
- self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
- fake_get_compute_nodes_in_db)
- self.stubs.Set(db, 'compute_node_delete',
- fake_compute_node_delete)
-
- self.compute.update_available_resource(
- context.get_admin_context())
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id)
- self.none_quotas = objects.Quotas.from_reservations(
- self.context, None)
-
- def fake_show(meh, context, id, **kwargs):
- if id:
- return {'id': id, 'min_disk': None, 'min_ram': None,
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id',
- 'something_else': 'meow'}}
- else:
- raise exception.ImageNotFound(image_id=id)
-
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
-
- fake_rpcapi = FakeSchedulerAPI()
- fake_taskapi = FakeComputeTaskAPI()
- self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
- self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
-
- fake_network.set_stub_network_methods(self.stubs)
- fake_server_actions.stub_out_action_events(self.stubs)
-
- def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
- self.assertTrue(ctxt.is_admin)
- return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
-
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
- fake_get_nw_info)
-
- def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
- self.assertFalse(ctxt.is_admin)
- return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
-
- self.stubs.Set(network_api.API, 'allocate_for_instance',
- fake_allocate_for_instance)
- self.compute_api = compute.API()
-
- # Just to make long lines short
- self.rt = self.compute._get_resource_tracker(NODENAME)
-
- def tearDown(self):
- timeutils.clear_time_override()
- ctxt = context.get_admin_context()
- fake_image.FakeImageService_reset()
- instances = db.instance_get_all(ctxt)
- for instance in instances:
- db.instance_destroy(ctxt, instance['uuid'])
- fake.restore_nodes()
- super(BaseTestCase, self).tearDown()
-
- def _create_fake_instance(self, params=None, type_name='m1.tiny',
- services=False):
- """Create a test instance."""
- if not params:
- params = {}
-
- def make_fake_sys_meta():
- sys_meta = params.pop("system_metadata", {})
- inst_type = flavors.get_flavor_by_name(type_name)
- for key in flavors.system_metadata_flavor_props:
- sys_meta['instance_type_%s' % key] = inst_type[key]
- return sys_meta
-
- inst = {}
- inst['vm_state'] = vm_states.ACTIVE
- inst['task_state'] = None
- inst['image_ref'] = FAKE_IMAGE_REF
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['host'] = 'fake_host'
- inst['node'] = NODENAME
- type_id = flavors.get_flavor_by_name(type_name)['id']
- inst['instance_type_id'] = type_id
- inst['ami_launch_index'] = 0
- inst['memory_mb'] = 0
- inst['vcpus'] = 0
- inst['root_gb'] = 0
- inst['ephemeral_gb'] = 0
- inst['architecture'] = arch.X86_64
- inst['os_type'] = 'Linux'
- inst['system_metadata'] = make_fake_sys_meta()
- inst['locked'] = False
- inst['created_at'] = timeutils.utcnow()
- inst['updated_at'] = timeutils.utcnow()
- inst['launched_at'] = timeutils.utcnow()
- inst['security_groups'] = []
- inst.update(params)
- if services:
- _create_service_entries(self.context.elevated(),
- [['fake_zone', [inst['host']]]])
- return db.instance_create(self.context, inst)
-
- def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
- services=False):
- db_inst = self._create_fake_instance(params, type_name=type_name,
- services=services)
- return objects.Instance._from_db_object(
- self.context, objects.Instance(), db_inst,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
-
- def _create_instance_type(self, params=None):
- """Create a test instance type."""
- if not params:
- params = {}
-
- context = self.context.elevated()
- inst = {}
- inst['name'] = 'm1.small'
- inst['memory_mb'] = 1024
- inst['vcpus'] = 1
- inst['root_gb'] = 20
- inst['ephemeral_gb'] = 10
- inst['flavorid'] = '1'
- inst['swap'] = 2048
- inst['rxtx_factor'] = 1
- inst.update(params)
- return db.flavor_create(context, inst)['id']
-
- def _create_group(self):
- values = {'name': 'testgroup',
- 'description': 'testgroup',
- 'user_id': self.user_id,
- 'project_id': self.project_id}
- return db.security_group_create(self.context, values)
-
- def _stub_migrate_server(self):
- def _fake_migrate_server(*args, **kwargs):
- pass
-
- self.stubs.Set(conductor_manager.ComputeTaskManager,
- 'migrate_server', _fake_migrate_server)
-
- def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
- if not aggr:
- aggr = self.api.create_aggregate(self.context, aggr_name, zone)
- aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
- return aggr
-
-
-class ComputeVolumeTestCase(BaseTestCase):
-
- def setUp(self):
- super(ComputeVolumeTestCase, self).setUp()
- self.volume_id = 'fake'
- self.fetched_attempts = 0
- self.instance = {
- 'id': 'fake',
- 'uuid': 'fake',
- 'name': 'fake',
- 'root_device_name': '/dev/vda',
- }
- self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
- self.instance_object = objects.Instance._from_db_object(
- self.context, objects.Instance(),
- fake_instance.fake_db_instance())
- self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
- {'id': self.volume_id,
- 'attach_status': 'detached'})
- self.stubs.Set(self.compute.driver, 'get_volume_connector',
- lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'initialize_connection',
- lambda *a, **kw: {})
- self.stubs.Set(self.compute.volume_api, 'terminate_connection',
- lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'attach',
- lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'detach',
- lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'check_attach',
- lambda *a, **kw: None)
- self.stubs.Set(greenthread, 'sleep',
- lambda *a, **kw: None)
-
- def store_cinfo(context, *args, **kwargs):
- self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
- return self.fake_volume
-
- self.stubs.Set(self.compute.conductor_api,
- 'block_device_mapping_update',
- store_cinfo)
- self.stubs.Set(self.compute.conductor_api,
- 'block_device_mapping_update_or_create',
- store_cinfo)
- self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
- self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
-
- def test_attach_volume_serial(self):
- fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
- with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
- return_value={})):
- instance = self._create_fake_instance_obj()
- self.compute.attach_volume(self.context, self.volume_id,
- '/dev/vdb', instance, bdm=fake_bdm)
- self.assertEqual(self.cinfo.get('serial'), self.volume_id)
-
- def test_attach_volume_raises(self):
- fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
- instance = self._create_fake_instance_obj()
-
- def fake_attach(*args, **kwargs):
- raise test.TestingException
-
- with contextlib.nested(
- mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
- 'attach'),
- mock.patch.object(cinder.API, 'unreserve_volume'),
- mock.patch.object(objects.BlockDeviceMapping,
- 'destroy')
- ) as (mock_attach, mock_unreserve, mock_destroy):
- mock_attach.side_effect = fake_attach
- self.assertRaises(
- test.TestingException, self.compute.attach_volume,
- self.context, 'fake', '/dev/vdb',
- instance, bdm=fake_bdm)
- self.assertTrue(mock_unreserve.called)
- self.assertTrue(mock_destroy.called)
-
- def test_detach_volume_api_raises(self):
- fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
- instance = self._create_fake_instance()
-
- with contextlib.nested(
- mock.patch.object(self.compute, '_detach_volume'),
- mock.patch.object(self.compute.volume_api, 'detach'),
- mock.patch.object(objects.BlockDeviceMapping,
- 'get_by_volume_id'),
- mock.patch.object(fake_bdm, 'destroy')
- ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
- mock_detach.side_effect = test.TestingException
- mock_get.return_value = fake_bdm
- self.assertRaises(
- test.TestingException, self.compute.detach_volume,
- self.context, 'fake', instance)
- mock_internal_detach.assert_called_once_with(self.context,
- instance,
- fake_bdm)
- self.assertTrue(mock_destroy.called)
-
- def test_attach_volume_no_bdm(self):
- fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
- instance = self._create_fake_instance_obj()
-
- with contextlib.nested(
- mock.patch.object(objects.BlockDeviceMapping,
- 'get_by_volume_id', return_value=fake_bdm),
- mock.patch.object(self.compute, '_attach_volume')
- ) as (mock_get_by_id, mock_attach):
- self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
- instance, bdm=None)
- mock_get_by_id.assert_called_once_with(self.context, 'fake')
- self.assertTrue(mock_attach.called)
-
- def test_await_block_device_created_too_slow(self):
- self.flags(block_device_allocate_retries=2)
- self.flags(block_device_allocate_retries_interval=0.1)
-
- def never_get(context, vol_id):
- return {
- 'status': 'creating',
- 'id': 'blah',
- }
-
- self.stubs.Set(self.compute.volume_api, 'get', never_get)
- self.assertRaises(exception.VolumeNotCreated,
- self.compute._await_block_device_map_created,
- self.context, '1')
-
- def test_await_block_device_created_slow(self):
- c = self.compute
- self.flags(block_device_allocate_retries=4)
- self.flags(block_device_allocate_retries_interval=0.1)
-
- def slow_get(context, vol_id):
- if self.fetched_attempts < 2:
- self.fetched_attempts += 1
- return {
- 'status': 'creating',
- 'id': 'blah',
- }
- return {
- 'status': 'available',
- 'id': 'blah',
- }
-
- self.stubs.Set(c.volume_api, 'get', slow_get)
- attempts = c._await_block_device_map_created(self.context, '1')
- self.assertEqual(attempts, 3)
-
- def test_await_block_device_created_retries_negative(self):
- c = self.compute
- self.flags(block_device_allocate_retries=-1)
- self.flags(block_device_allocate_retries_interval=0.1)
-
- def volume_get(context, vol_id):
- return {
- 'status': 'available',
- 'id': 'blah',
- }
-
- self.stubs.Set(c.volume_api, 'get', volume_get)
- attempts = c._await_block_device_map_created(self.context, '1')
- self.assertEqual(1, attempts)
-
- def test_await_block_device_created_retries_zero(self):
- c = self.compute
- self.flags(block_device_allocate_retries=0)
- self.flags(block_device_allocate_retries_interval=0.1)
-
- def volume_get(context, vol_id):
- return {
- 'status': 'available',
- 'id': 'blah',
- }
-
- self.stubs.Set(c.volume_api, 'get', volume_get)
- attempts = c._await_block_device_map_created(self.context, '1')
- self.assertEqual(1, attempts)
-
- def test_boot_volume_serial(self):
- with (
- mock.patch.object(objects.BlockDeviceMapping, 'save')
- ) as mock_save:
- block_device_mapping = [
- block_device.BlockDeviceDict({
- 'id': 1,
- 'no_device': None,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'snapshot_id': None,
- 'volume_id': self.volume_id,
- 'device_name': '/dev/vdb',
- 'delete_on_termination': False,
- })]
- prepped_bdm = self.compute._prep_block_device(
- self.context, self.instance, block_device_mapping)
- mock_save.assert_called_once_with(self.context)
- volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
- self.assertEqual(volume_driver_bdm['connection_info']['serial'],
- self.volume_id)
-
- def test_boot_volume_metadata(self, metadata=True):
- def volume_api_get(*args, **kwargs):
- if metadata:
- return {
- 'size': 1,
- 'volume_image_metadata': {'vol_test_key': 'vol_test_value',
- 'min_ram': u'128',
- 'min_disk': u'256',
- 'size': u'536870912'
- },
- }
- else:
- return {}
-
- self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
-
- expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
- 'size': 0, 'status': 'active'}
-
- block_device_mapping = [{
- 'id': 1,
- 'device_name': 'vda',
- 'no_device': None,
- 'virtual_name': None,
- 'snapshot_id': None,
- 'volume_id': self.volume_id,
- 'delete_on_termination': False,
- }]
-
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
- if metadata:
- self.assertEqual(image_meta['properties']['vol_test_key'],
- 'vol_test_value')
- self.assertEqual(128, image_meta['min_ram'])
- self.assertEqual(256, image_meta['min_disk'])
- self.assertEqual(units.Gi, image_meta['size'])
- else:
- self.assertEqual(expected_no_metadata, image_meta)
-
- # Test it with new-style BDMs
- block_device_mapping = [{
- 'boot_index': 0,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': self.volume_id,
- 'delete_on_termination': False,
- }]
-
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping, legacy_bdm=False)
- if metadata:
- self.assertEqual(image_meta['properties']['vol_test_key'],
- 'vol_test_value')
- self.assertEqual(128, image_meta['min_ram'])
- self.assertEqual(256, image_meta['min_disk'])
- self.assertEqual(units.Gi, image_meta['size'])
- else:
- self.assertEqual(expected_no_metadata, image_meta)
-
- def test_boot_volume_no_metadata(self):
- self.test_boot_volume_metadata(metadata=False)
-
- def test_boot_image_metadata(self, metadata=True):
- def image_api_get(*args, **kwargs):
- if metadata:
- return {
- 'properties': {'img_test_key': 'img_test_value'}
- }
- else:
- return {}
-
- self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
-
- block_device_mapping = [{
- 'boot_index': 0,
- 'source_type': 'image',
- 'destination_type': 'local',
- 'image_id': "fake-image",
- 'delete_on_termination': True,
- }]
-
- image_meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping, legacy_bdm=False)
-
- if metadata:
- self.assertEqual('img_test_value',
- image_meta['properties']['img_test_key'])
- else:
- self.assertEqual(image_meta, {})
-
- def test_boot_image_no_metadata(self):
- self.test_boot_image_metadata(metadata=False)
-
- def test_poll_bandwidth_usage_not_implemented(self):
- ctxt = context.get_admin_context()
-
- self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
- self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
- self.mox.StubOutWithMock(time, 'time')
- self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
- # Following methods will be called
- utils.last_completed_audit_period().AndReturn((0, 0))
- time.time().AndReturn(10)
- # Note - time called two more times from Log
- time.time().AndReturn(20)
- time.time().AndReturn(21)
- objects.InstanceList.get_by_host(ctxt, 'fake-mini',
- use_slave=True).AndReturn([])
- self.compute.driver.get_all_bw_counters([]).AndRaise(
- NotImplementedError)
- self.mox.ReplayAll()
-
- self.flags(bandwidth_poll_interval=1)
- self.compute._poll_bandwidth_usage(ctxt)
- # A second call won't call the stubs again as the bandwidth
- # poll is now disabled
- self.compute._poll_bandwidth_usage(ctxt)
- self.mox.UnsetStubs()
-
- @mock.patch.object(objects.InstanceList, 'get_by_host')
- @mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
- fake_instance = mock.Mock(uuid='fake-instance-uuid')
- mock_get_by_host.return_value = [fake_instance]
-
- volume_bdm = mock.Mock(id=1, is_volume=True)
- not_volume_bdm = mock.Mock(id=2, is_volume=False)
- mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
-
- expected_host_bdms = [{'instance': fake_instance,
- 'instance_bdms': [volume_bdm]}]
-
- got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
- mock_get_by_host.assert_called_once_with('fake-context',
- self.compute.host)
- mock_get_by_inst.assert_called_once_with('fake-context',
- 'fake-instance-uuid',
- use_slave=False)
- self.assertEqual(expected_host_bdms, got_host_bdms)
-
- def test_poll_volume_usage_disabled(self):
- ctxt = 'MockContext'
- self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
- self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
- # None of the mocks should be called.
- self.mox.ReplayAll()
-
- self.flags(volume_usage_poll_interval=0)
- self.compute._poll_volume_usage(ctxt)
- self.mox.UnsetStubs()
-
- def test_poll_volume_usage_returns_no_vols(self):
- ctxt = 'MockContext'
- self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
- self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
- self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
- # Following methods are called.
- utils.last_completed_audit_period().AndReturn((0, 0))
- self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([])
- self.mox.ReplayAll()
-
- self.flags(volume_usage_poll_interval=10)
- self.compute._poll_volume_usage(ctxt)
- self.mox.UnsetStubs()
-
- def test_poll_volume_usage_with_data(self):
- ctxt = 'MockContext'
- self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
- self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
- self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
- self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
- lambda x, y: [3, 4])
- # All the mocks are called
- utils.last_completed_audit_period().AndReturn((10, 20))
- self.compute._get_host_volume_bdms(ctxt,
- use_slave=True).AndReturn([1, 2])
- self.compute._update_volume_usage_cache(ctxt, [3, 4])
- self.mox.ReplayAll()
- self.flags(volume_usage_poll_interval=10)
- self.compute._poll_volume_usage(ctxt)
- self.mox.UnsetStubs()
-
- def test_detach_volume_usage(self):
- # Test that detach volume update the volume usage cache table correctly
- instance = self._create_fake_instance_obj()
- bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'device_name': '/dev/vdb',
- 'connection_info': '{}', 'instance_uuid': instance['uuid'],
- 'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 1})
- host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
- 'connection_info': '{}', 'instance_uuid': instance['uuid'],
- 'volume_id': 1}
-
- self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
- self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
- self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
- self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
-
- # The following methods will be called
- db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
- AndReturn(bdm)
- self.compute.driver.block_stats(instance['name'], 'vdb').\
- AndReturn([1L, 30L, 1L, 20L, None])
- self.compute._get_host_volume_bdms(self.context,
- use_slave=True).AndReturn(
- host_volume_bdms)
- self.compute.driver.get_all_volume_usage(
- self.context, host_volume_bdms).AndReturn(
- [{'volume': 1,
- 'rd_req': 1,
- 'rd_bytes': 10,
- 'wr_req': 1,
- 'wr_bytes': 5,
- 'instance': instance}])
- db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
- AndReturn(bdm)
-
- self.mox.ReplayAll()
-
- def fake_get_volume_encryption_metadata(self, context, volume_id):
- return {}
- self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
- fake_get_volume_encryption_metadata)
-
- self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
-
- # Poll volume usage & then detach the volume. This will update the
- # total fields in the volume usage cache.
- self.flags(volume_usage_poll_interval=10)
- self.compute._poll_volume_usage(self.context)
- # Check that a volume.usage and volume.attach notification was sent
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
-
- self.compute.detach_volume(self.context, 1, instance)
-
- # Check that volume.attach, 2 volume.usage, and volume.detach
- # notifications were sent
- self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('compute.instance.volume.attach', msg.event_type)
- msg = fake_notifier.NOTIFICATIONS[2]
- self.assertEqual('volume.usage', msg.event_type)
- payload = msg.payload
- self.assertEqual(instance['uuid'], payload['instance_id'])
- self.assertEqual('fake', payload['user_id'])
- self.assertEqual('fake', payload['tenant_id'])
- self.assertEqual(1, payload['reads'])
- self.assertEqual(30, payload['read_bytes'])
- self.assertEqual(1, payload['writes'])
- self.assertEqual(20, payload['write_bytes'])
- self.assertIsNone(payload['availability_zone'])
- msg = fake_notifier.NOTIFICATIONS[3]
- self.assertEqual('compute.instance.volume.detach', msg.event_type)
-
- # Check the database for the
- volume_usages = db.vol_get_usage_by_time(self.context, 0)
- self.assertEqual(1, len(volume_usages))
- volume_usage = volume_usages[0]
- self.assertEqual(0, volume_usage['curr_reads'])
- self.assertEqual(0, volume_usage['curr_read_bytes'])
- self.assertEqual(0, volume_usage['curr_writes'])
- self.assertEqual(0, volume_usage['curr_write_bytes'])
- self.assertEqual(1, volume_usage['tot_reads'])
- self.assertEqual(30, volume_usage['tot_read_bytes'])
- self.assertEqual(1, volume_usage['tot_writes'])
- self.assertEqual(20, volume_usage['tot_write_bytes'])
-
- def test_prepare_image_mapping(self):
- swap_size = 1
- ephemeral_size = 1
- instance_type = {'swap': swap_size,
- 'ephemeral_gb': ephemeral_size}
- mappings = [
- {'virtual': 'ami', 'device': 'sda1'},
- {'virtual': 'root', 'device': '/dev/sda1'},
-
- {'virtual': 'swap', 'device': 'sdb4'},
-
- {'virtual': 'ephemeral0', 'device': 'sdc1'},
- {'virtual': 'ephemeral1', 'device': 'sdc2'},
- ]
-
- preped_bdm = self.compute_api._prepare_image_mapping(
- instance_type, mappings)
-
- expected_result = [
- {
- 'device_name': '/dev/sdb4',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': 'swap',
- 'boot_index': -1,
- 'volume_size': swap_size
- },
- {
- 'device_name': '/dev/sdc1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': CONF.default_ephemeral_format,
- 'boot_index': -1,
- 'volume_size': ephemeral_size
- },
- {
- 'device_name': '/dev/sdc2',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': CONF.default_ephemeral_format,
- 'boot_index': -1,
- 'volume_size': ephemeral_size
- }
- ]
-
- for expected, got in zip(expected_result, preped_bdm):
- self.assertThat(expected, matchers.IsSubDictOf(got))
-
- def test_validate_bdm(self):
- def fake_get(self, context, res_id):
- return {'id': res_id}
-
- def fake_check_attach(*args, **kwargs):
- pass
-
- self.stubs.Set(cinder.API, 'get', fake_get)
- self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
- self.stubs.Set(cinder.API, 'check_attach',
- fake_check_attach)
-
- volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
- snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
- image_id = '77777777-aaaa-bbbb-cccc-555555555555'
-
- instance = self._create_fake_instance()
- instance_type = {'swap': 1, 'ephemeral_gb': 2}
- mappings = [
- {
- 'device_name': '/dev/sdb4',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': 'swap',
- 'boot_index': -1,
- 'volume_size': 1
- },
- {
- 'device_name': '/dev/sda1',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_type': 'disk',
- 'volume_id': volume_id,
- 'guest_format': None,
- 'boot_index': 1,
- 'volume_size': 6
- },
- {
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'snapshot_id': snapshot_id,
- 'device_type': 'disk',
- 'guest_format': None,
- 'boot_index': 0,
- 'volume_size': 4
- },
- {
- 'device_name': '/dev/sda3',
- 'source_type': 'image',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': None,
- 'boot_index': 2,
- 'volume_size': 1
- }
- ]
-
- # Make sure it passes at first
- self.compute_api._validate_bdm(self.context, instance,
- instance_type, mappings)
-
- # Boot sequence
- mappings[2]['boot_index'] = 2
- self.assertRaises(exception.InvalidBDMBootSequence,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings)
- mappings[2]['boot_index'] = 0
-
- # number of local block_devices
- self.flags(max_local_block_devices=1)
- self.assertRaises(exception.InvalidBDMLocalsLimit,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings)
- ephemerals = [
- {
- 'device_name': '/dev/vdb',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'volume_id': volume_id,
- 'guest_format': None,
- 'boot_index': -1,
- 'volume_size': 1
- },
- {
- 'device_name': '/dev/vdc',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'volume_id': volume_id,
- 'guest_format': None,
- 'boot_index': -1,
- 'volume_size': 1
- }]
-
- self.flags(max_local_block_devices=4)
- # More ephemerals are OK as long as they are not over the size limit
- self.compute_api._validate_bdm(self.context, instance,
- instance_type, mappings + ephemerals)
-
- # Ephemerals over the size limit
- ephemerals[0]['volume_size'] = 3
- self.assertRaises(exception.InvalidBDMEphemeralSize,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings + ephemerals)
- self.assertRaises(exception.InvalidBDMEphemeralSize,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings + [ephemerals[0]])
-
- # Swap over the size limit
- mappings[0]['volume_size'] = 3
- self.assertRaises(exception.InvalidBDMSwapSize,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings)
- mappings[0]['volume_size'] = 1
-
- additional_swap = [
- {
- 'device_name': '/dev/vdb',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'guest_format': 'swap',
- 'boot_index': -1,
- 'volume_size': 1
- }]
-
- # More than one swap
- self.assertRaises(exception.InvalidBDMFormat,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings + additional_swap)
-
- image_no_size = [
- {
- 'device_name': '/dev/sda4',
- 'source_type': 'image',
- 'image_id': image_id,
- 'destination_type': 'volume',
- 'boot_index': -1,
- 'volume_size': None,
- }]
- self.assertRaises(exception.InvalidBDM,
- self.compute_api._validate_bdm,
- self.context, instance, instance_type,
- mappings + image_no_size)
-
- def test_validate_bdm_media_service_exceptions(self):
- instance_type = {'swap': 1, 'ephemeral_gb': 1}
- all_mappings = [{'id': 1,
- 'no_device': None,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'snapshot_id': None,
- 'volume_id': self.volume_id,
- 'device_name': 'vda',
- 'boot_index': 0,
- 'delete_on_termination': False}]
-
- # First we test a list of invalid status values that should result
- # in an InvalidVolume exception being raised.
- status_values = (
- # First two check that the status is 'available'.
- ('creating', 'detached'),
- ('error', 'detached'),
- # Checks that the attach_status is 'detached'.
- ('available', 'attached')
- )
-
- for status, attach_status in status_values:
- def fake_volume_get(self, ctxt, volume_id):
- return {'id': volume_id,
- 'status': status,
- 'attach_status': attach_status}
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.assertRaises(exception.InvalidVolume,
- self.compute_api._validate_bdm,
- self.context, self.instance,
- instance_type, all_mappings)
-
- # Now we test a 404 case that results in InvalidBDMVolume.
- def fake_volume_get_not_found(self, context, volume_id):
- raise exception.VolumeNotFound(volume_id)
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
- self.assertRaises(exception.InvalidBDMVolume,
- self.compute_api._validate_bdm,
- self.context, self.instance,
- instance_type, all_mappings)
-
- # Check that the volume status is 'available' and attach_status is
- # 'detached' and accept the request if so
- def fake_volume_get_ok(self, context, volume_id):
- return {'id': volume_id,
- 'status': 'available',
- 'attach_status': 'detached'}
- self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
-
- self.compute_api._validate_bdm(self.context, self.instance,
- instance_type, all_mappings)
-
- def test_volume_snapshot_create(self):
- self.assertRaises(messaging.ExpectedException,
- self.compute.volume_snapshot_create, self.context,
- self.instance_object, 'fake_id', {})
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(NotImplementedError,
- self.compute.volume_snapshot_create, self.context,
- self.instance_object, 'fake_id', {})
-
- def test_volume_snapshot_delete(self):
- self.assertRaises(messaging.ExpectedException,
- self.compute.volume_snapshot_delete, self.context,
- self.instance_object, 'fake_id', 'fake_id2', {})
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(NotImplementedError,
- self.compute.volume_snapshot_delete, self.context,
- self.instance_object, 'fake_id', 'fake_id2', {})
-
- @mock.patch.object(cinder.API, 'create',
- side_effect=exception.OverQuota(overs='volumes'))
- def test_prep_block_device_over_quota_failure(self, mock_create):
- instance = self._create_fake_instance()
- bdms = [
- block_device.BlockDeviceDict({
- 'boot_index': 0,
- 'guest_format': None,
- 'connection_info': None,
- 'device_type': u'disk',
- 'source_type': 'image',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'image_id': 1,
- 'device_name': '/dev/vdb',
- })]
- self.assertRaises(exception.InvalidBDM,
- compute_manager.ComputeManager()._prep_block_device,
- self.context, instance, bdms)
- self.assertTrue(mock_create.called)
-
- @mock.patch.object(nova.virt.block_device, 'get_swap')
- @mock.patch.object(nova.virt.block_device, 'convert_blanks')
- @mock.patch.object(nova.virt.block_device, 'convert_images')
- @mock.patch.object(nova.virt.block_device, 'convert_snapshots')
- @mock.patch.object(nova.virt.block_device, 'convert_volumes')
- @mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
- @mock.patch.object(nova.virt.block_device, 'convert_swap')
- @mock.patch.object(nova.virt.block_device, 'attach_block_devices')
- def test_prep_block_device_with_blanks(self, attach_block_devices,
- convert_swap, convert_ephemerals,
- convert_volumes, convert_snapshots,
- convert_images, convert_blanks,
- get_swap):
- instance = self._create_fake_instance()
- instance['root_device_name'] = '/dev/vda'
- root_volume = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'instance_uuid': 'fake-instance',
- 'source_type': 'image',
- 'destination_type': 'volume',
- 'image_id': 'fake-image-id-1',
- 'volume_size': 1,
- 'boot_index': 0}))
- blank_volume1 = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'boot_index': 1}))
- blank_volume2 = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'volume_size': 1,
- 'boot_index': 2}))
- bdms = [blank_volume1, blank_volume2, root_volume]
-
- def fake_attach_block_devices(bdm, *args, **kwargs):
- return bdm
-
- convert_swap.return_value = []
- convert_ephemerals.return_value = []
- convert_volumes.return_value = [blank_volume1, blank_volume2]
- convert_snapshots.return_value = []
- convert_images.return_value = [root_volume]
- convert_blanks.return_value = []
- attach_block_devices.side_effect = fake_attach_block_devices
- get_swap.return_value = []
-
- expected_block_device_info = {
- 'root_device_name': '/dev/vda',
- 'swap': [],
- 'ephemerals': [],
- 'block_device_mapping': bdms
- }
-
- manager = compute_manager.ComputeManager()
- manager.use_legacy_block_device_info = False
- block_device_info = manager._prep_block_device(self.context, instance,
- bdms)
-
- convert_swap.assert_called_once_with(bdms)
- convert_ephemerals.assert_called_once_with(bdms)
- convert_volumes.assert_called_once_with(bdms)
- convert_snapshots.assert_called_once_with(bdms)
- convert_images.assert_called_once_with(bdms)
- convert_blanks.assert_called_once_with(bdms)
-
- self.assertEqual(expected_block_device_info, block_device_info)
- self.assertEqual(4, attach_block_devices.call_count)
- get_swap.assert_called_once_with([])
-
-
-class ComputeTestCase(BaseTestCase):
- def test_wrap_instance_fault(self):
- inst = {"uuid": "fake_uuid"}
-
- called = {'fault_added': False}
-
- def did_it_add_fault(*args):
- called['fault_added'] = True
-
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
- did_it_add_fault)
-
- @compute_manager.wrap_instance_fault
- def failer(self2, context, instance):
- raise NotImplementedError()
-
- self.assertRaises(NotImplementedError, failer,
- self.compute, self.context, instance=inst)
-
- self.assertTrue(called['fault_added'])
-
- def test_wrap_instance_fault_instance_in_args(self):
- inst = {"uuid": "fake_uuid"}
-
- called = {'fault_added': False}
-
- def did_it_add_fault(*args):
- called['fault_added'] = True
-
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
- did_it_add_fault)
-
- @compute_manager.wrap_instance_fault
- def failer(self2, context, instance):
- raise NotImplementedError()
-
- self.assertRaises(NotImplementedError, failer,
- self.compute, self.context, inst)
-
- self.assertTrue(called['fault_added'])
-
- def test_wrap_instance_fault_no_instance(self):
- inst = {"uuid": "fake_uuid"}
-
- called = {'fault_added': False}
-
- def did_it_add_fault(*args):
- called['fault_added'] = True
-
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
- did_it_add_fault)
-
- @compute_manager.wrap_instance_fault
- def failer(self2, context, instance):
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
- self.assertRaises(exception.InstanceNotFound, failer,
- self.compute, self.context, inst)
-
- self.assertFalse(called['fault_added'])
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- def test_wrap_instance_event(self, mock_finish, mock_start):
- inst = {"uuid": "fake_uuid"}
-
- @compute_manager.wrap_instance_event
- def fake_event(self, context, instance):
- pass
-
- fake_event(self.compute, self.context, instance=inst)
-
- self.assertTrue(mock_start.called)
- self.assertTrue(mock_finish.called)
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- def test_wrap_instance_event_return(self, mock_finish, mock_start):
- inst = {"uuid": "fake_uuid"}
-
- @compute_manager.wrap_instance_event
- def fake_event(self, context, instance):
- return True
-
- retval = fake_event(self.compute, self.context, instance=inst)
-
- self.assertTrue(retval)
- self.assertTrue(mock_start.called)
- self.assertTrue(mock_finish.called)
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- def test_wrap_instance_event_log_exception(self, mock_finish, mock_start):
- inst = {"uuid": "fake_uuid"}
-
- @compute_manager.wrap_instance_event
- def fake_event(self2, context, instance):
- raise exception.NovaException()
-
- self.assertRaises(exception.NovaException, fake_event,
- self.compute, self.context, instance=inst)
-
- self.assertTrue(mock_start.called)
- self.assertTrue(mock_finish.called)
- args, kwargs = mock_finish.call_args
- self.assertIsInstance(kwargs['exc_val'], exception.NovaException)
-
- def test_object_compat(self):
- db_inst = fake_instance.fake_db_instance()
-
- @compute_manager.object_compat
- def test_fn(_self, context, instance):
- self.assertIsInstance(instance, objects.Instance)
- self.assertEqual(instance.uuid, db_inst['uuid'])
- test_fn(None, self.context, instance=db_inst)
-
- def test_object_compat_more_positional_args(self):
- db_inst = fake_instance.fake_db_instance()
-
- @compute_manager.object_compat
- def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
- self.assertIsInstance(instance, objects.Instance)
- self.assertEqual(instance.uuid, db_inst['uuid'])
- self.assertEqual(pos_arg_1, 'fake_pos_arg1')
- self.assertEqual(pos_arg_2, 'fake_pos_arg2')
-
- test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
-
- def test_create_instance_with_img_ref_associates_config_drive(self):
- # Make sure create associates a config drive.
-
- instance = self._create_fake_instance_obj(
- params={'config_drive': '1234', })
-
- try:
- self.compute.run_instance(self.context, instance, {}, {},
- [], None, None, True, None, False)
- instances = db.instance_get_all(self.context)
- instance = instances[0]
-
- self.assertTrue(instance['config_drive'])
- finally:
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_create_instance_associates_config_drive(self):
- # Make sure create associates a config drive.
-
- instance = self._create_fake_instance_obj(
- params={'config_drive': '1234', })
-
- try:
- self.compute.run_instance(self.context, instance, {}, {},
- [], None, None, True, None, False)
- instances = db.instance_get_all(self.context)
- instance = instances[0]
-
- self.assertTrue(instance['config_drive'])
- finally:
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_create_instance_unlimited_memory(self):
- # Default of memory limit=None is unlimited.
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
- params = {"memory_mb": 999999999999}
- filter_properties = {'limits': {'memory_mb': None}}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
- self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
-
- def test_create_instance_unlimited_disk(self):
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
- params = {"root_gb": 999999999999,
- "ephemeral_gb": 99999999999}
- filter_properties = {'limits': {'disk_gb': None}}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- def test_create_multiple_instances_then_starve(self):
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
- filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
- params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
- self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
- self.assertEqual(256, self.rt.compute_node['local_gb_used'])
-
- params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
- self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
- self.assertEqual(768, self.rt.compute_node['local_gb_used'])
-
- params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
- instance = self._create_fake_instance_obj(params)
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self.compute.run_instance, self.context, instance,
- {}, filter_properties, [], None, None, True, None, False)
-
- def test_create_multiple_instance_with_neutron_port(self):
- instance_type = flavors.get_default_flavor()
-
- def fake_is_neutron():
- return True
- self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id='adadds')])
- self.assertRaises(exception.MultiplePortsNotApplicable,
- self.compute_api.create,
- self.context,
- instance_type=instance_type,
- image_href=None,
- max_count=2,
- requested_networks=requested_networks)
-
- def test_create_instance_with_oversubscribed_ram(self):
- # Test passing of oversubscribed ram policy from the scheduler.
-
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
-
- # get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource(NODENAME)
- total_mem_mb = resources['memory_mb']
-
- oversub_limit_mb = total_mem_mb * 1.5
- instance_mb = int(total_mem_mb * 1.45)
-
- # build an instance, specifying an amount of memory that exceeds
- # total_mem_mb, but is less than the oversubscribed limit:
- params = {"memory_mb": instance_mb, "root_gb": 128,
- "ephemeral_gb": 128}
- instance = self._create_fake_instance_obj(params)
-
- limits = {'memory_mb': oversub_limit_mb}
- filter_properties = {'limits': limits}
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
-
- def test_create_instance_with_oversubscribed_ram_fail(self):
- """Test passing of oversubscribed ram policy from the scheduler, but
- with insufficient memory.
- """
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
-
- # get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource(NODENAME)
- total_mem_mb = resources['memory_mb']
-
- oversub_limit_mb = total_mem_mb * 1.5
- instance_mb = int(total_mem_mb * 1.55)
-
- # build an instance, specifying an amount of memory that exceeds
- # both total_mem_mb and the oversubscribed limit:
- params = {"memory_mb": instance_mb, "root_gb": 128,
- "ephemeral_gb": 128}
- instance = self._create_fake_instance(params)
-
- filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
-
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self.compute.run_instance, self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- def test_create_instance_with_oversubscribed_cpu(self):
- # Test passing of oversubscribed cpu policy from the scheduler.
-
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
- limits = {'vcpu': 3}
- filter_properties = {'limits': limits}
-
- # get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource(NODENAME)
- self.assertEqual(1, resources['vcpus'])
-
- # build an instance, specifying an amount of memory that exceeds
- # total_mem_mb, but is less than the oversubscribed limit:
- params = {"memory_mb": 10, "root_gb": 1,
- "ephemeral_gb": 1, "vcpus": 2}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- self.assertEqual(2, self.rt.compute_node['vcpus_used'])
-
- # create one more instance:
- params = {"memory_mb": 10, "root_gb": 1,
- "ephemeral_gb": 1, "vcpus": 1}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- self.assertEqual(3, self.rt.compute_node['vcpus_used'])
-
- # delete the instance:
- instance['vm_state'] = vm_states.DELETED
- self.rt.update_usage(self.context,
- instance=instance)
-
- self.assertEqual(2, self.rt.compute_node['vcpus_used'])
-
- # now oversubscribe vcpus and fail:
- params = {"memory_mb": 10, "root_gb": 1,
- "ephemeral_gb": 1, "vcpus": 2}
- instance = self._create_fake_instance_obj(params)
-
- limits = {'vcpu': 3}
- filter_properties = {'limits': limits}
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self.compute.run_instance, self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- def test_create_instance_with_oversubscribed_disk(self):
- # Test passing of oversubscribed disk policy from the scheduler.
-
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
-
- # get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource(NODENAME)
- total_disk_gb = resources['local_gb']
-
- oversub_limit_gb = total_disk_gb * 1.5
- instance_gb = int(total_disk_gb * 1.45)
-
- # build an instance, specifying an amount of disk that exceeds
- # total_disk_gb, but is less than the oversubscribed limit:
- params = {"root_gb": instance_gb, "memory_mb": 10}
- instance = self._create_fake_instance_obj(params)
-
- limits = {'disk_gb': oversub_limit_gb}
- filter_properties = {'limits': limits}
- self.compute.run_instance(self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
-
- def test_create_instance_with_oversubscribed_disk_fail(self):
- """Test passing of oversubscribed disk policy from the scheduler, but
- with insufficient disk.
- """
- self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
- self.rt.update_available_resource(self.context.elevated())
-
- # get total memory as reported by virt driver:
- resources = self.compute.driver.get_available_resource(NODENAME)
- total_disk_gb = resources['local_gb']
-
- oversub_limit_gb = total_disk_gb * 1.5
- instance_gb = int(total_disk_gb * 1.55)
-
- # build an instance, specifying an amount of disk that exceeds
- # total_disk_gb, but is less than the oversubscribed limit:
- params = {"root_gb": instance_gb, "memory_mb": 10}
- instance = self._create_fake_instance(params)
-
- limits = {'disk_gb': oversub_limit_gb}
- filter_properties = {'limits': limits}
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self.compute.run_instance, self.context, instance, {},
- filter_properties, [], None, None, True, None, False)
-
- def test_create_instance_without_node_param(self):
- instance = self._create_fake_instance_obj({'node': None})
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instances = db.instance_get_all(self.context)
- instance = instances[0]
-
- self.assertEqual(NODENAME, instance['node'])
-
- def test_create_instance_no_image(self):
- # Create instance with no image provided.
- params = {'image_ref': ''}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self._assert_state({'vm_state': vm_states.ACTIVE,
- 'task_state': None})
-
- def test_default_access_ip(self):
- self.flags(default_access_ip_network_name='test1')
- fake_network.unset_stub_network_methods(self.stubs)
- instance = self._create_fake_instance_obj()
-
- orig_update = self.compute._instance_update
-
- # Make sure the access_ip_* updates happen in the same DB
- # update as the set to ACTIVE.
- def _instance_update(ctxt, instance_uuid, **kwargs):
- if kwargs.get('vm_state', None) == vm_states.ACTIVE:
- self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
- self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
- return orig_update(ctxt, instance_uuid, **kwargs)
-
- self.stubs.Set(self.compute, '_instance_update', _instance_update)
-
- try:
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instances = db.instance_get_all(self.context)
- instance = instances[0]
-
- self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
- self.assertEqual(instance['access_ip_v6'],
- '2001:db8:0:1:dcad:beff:feef:1')
- finally:
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_no_default_access_ip(self):
- instance = self._create_fake_instance_obj()
-
- try:
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instances = db.instance_get_all(self.context)
- instance = instances[0]
-
- self.assertFalse(instance['access_ip_v4'])
- self.assertFalse(instance['access_ip_v6'])
- finally:
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_fail_to_schedule_persists(self):
- # check the persistence of the ERROR(scheduling) state.
- params = {'vm_state': vm_states.ERROR,
- 'task_state': task_states.SCHEDULING}
- self._create_fake_instance(params=params)
- # check state is failed even after the periodic poll
- self.compute.periodic_tasks(context.get_admin_context())
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': task_states.SCHEDULING})
-
- def test_run_instance_setup_block_device_mapping_fail(self):
- """block device mapping failure test.
-
- Make sure that when there is a block device mapping problem,
- the instance goes to ERROR state, keeping the task state
- """
- def fake(*args, **kwargs):
- raise exception.InvalidBDM()
- self.stubs.Set(nova.compute.manager.ComputeManager,
- '_prep_block_device', fake)
- instance = self._create_fake_instance()
- self.assertRaises(exception.InvalidBDM, self.compute.run_instance,
- self.context, instance=instance, request_spec={},
- filter_properties={}, requested_networks=[],
- injected_files=None, admin_password=None,
- is_first_time=True, node=None,
- legacy_bdm_in_spec=False)
- # check state is failed even after the periodic poll
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
- self.compute.periodic_tasks(context.get_admin_context())
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
-
- @mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
- side_effect=exception.OverQuota(overs='volumes'))
- def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
- """block device mapping over quota failure test.
-
- Make sure when we're over volume quota according to Cinder client, the
- appropriate exception is raised and the instances to ERROR state, keep
- the task state.
- """
- instance = self._create_fake_instance()
- self.assertRaises(exception.OverQuota, self.compute.run_instance,
- self.context, instance=instance, request_spec={},
- filter_properties={}, requested_networks=[],
- injected_files=None, admin_password=None,
- is_first_time=True, node=None,
- legacy_bdm_in_spec=False)
- # check state is failed even after the periodic poll
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
- self.compute.periodic_tasks(context.get_admin_context())
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
- self.assertTrue(mock_prep_block_dev.called)
-
- def test_run_instance_spawn_fail(self):
- """spawn failure test.
-
- Make sure that when there is a spawning problem,
- the instance goes to ERROR state, keeping the task state.
- """
- def fake(*args, **kwargs):
- raise test.TestingException()
- self.stubs.Set(self.compute.driver, 'spawn', fake)
- instance = self._create_fake_instance_obj()
- self.assertRaises(test.TestingException, self.compute.run_instance,
- self.context, instance=instance, request_spec={},
- filter_properties={}, requested_networks=[],
- injected_files=None, admin_password=None,
- is_first_time=True, node=None,
- legacy_bdm_in_spec=False)
- # check state is failed even after the periodic poll
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
- self.compute.periodic_tasks(context.get_admin_context())
- self._assert_state({'vm_state': vm_states.ERROR,
- 'task_state': None})
-
- def test_run_instance_dealloc_network_instance_not_found(self):
- """spawn network deallocate test.
-
- Make sure that when an instance is not found during spawn
- that the network is deallocated
- """
- instance = self._create_fake_instance_obj()
-
- def fake(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id="fake")
-
- self.stubs.Set(self.compute.driver, 'spawn', fake)
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
- self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- def test_run_instance_bails_on_missing_instance(self):
- # Make sure that run_instance() will quickly ignore a deleted instance
- called = {}
- instance = self._create_fake_instance()
-
- def fake_instance_update(self, *a, **args):
- called['instance_update'] = True
- raise exception.InstanceNotFound(instance_id='foo')
- self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.assertIn('instance_update', called)
-
- def test_run_instance_bails_on_deleting_instance(self):
- # Make sure that run_instance() will quickly ignore a deleting instance
- called = {}
- instance = self._create_fake_instance()
-
- def fake_instance_update(self, *a, **args):
- called['instance_update'] = True
- raise exception.UnexpectedDeletingTaskStateError(
- expected='scheduling', actual='deleting')
- self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.assertIn('instance_update', called)
-
- def test_run_instance_bails_on_missing_instance_2(self):
- # Make sure that run_instance() will quickly ignore a deleted instance
- called = {}
- instance = self._create_fake_instance()
-
- def fake_default_block_device_names(self, *a, **args):
- called['default_block_device_names'] = True
- raise exception.InstanceNotFound(instance_id='foo')
- self.stubs.Set(self.compute, '_default_block_device_names',
- fake_default_block_device_names)
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.assertIn('default_block_device_names', called)
-
- def test_can_terminate_on_error_state(self):
- # Make sure that the instance can be terminated in ERROR state.
- # check failed to schedule --> terminate
- params = {'vm_state': vm_states.ERROR}
- instance = self._create_fake_instance_obj(params=params)
- self.compute.terminate_instance(self.context, instance, [], [])
- self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
- self.context, instance['uuid'])
- # Double check it's not there for admins, either.
- self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
- self.context.elevated(), instance['uuid'])
-
- def test_run_terminate(self):
- # Make sure it is possible to run and terminate instance.
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instances = db.instance_get_all(self.context)
- LOG.info("Running instances: %s", instances)
- self.assertEqual(len(instances), 1)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- instances = db.instance_get_all(self.context)
- LOG.info("After terminating instances: %s", instances)
- self.assertEqual(len(instances), 0)
-
- admin_deleted_context = context.get_admin_context(
- read_deleted="only")
- instance = db.instance_get_by_uuid(admin_deleted_context,
- instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.DELETED)
- self.assertIsNone(instance['task_state'])
-
- def test_run_terminate_with_vol_attached(self):
- """Make sure it is possible to run and terminate instance with volume
- attached
- """
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instances = db.instance_get_all(self.context)
- LOG.info("Running instances: %s", instances)
- self.assertEqual(len(instances), 1)
-
- def fake_check_attach(*args, **kwargs):
- pass
-
- def fake_reserve_volume(*args, **kwargs):
- pass
-
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
-
- def fake_terminate_connection(self, context, volume_id, connector):
- pass
-
- def fake_detach(self, context, volume_id):
- pass
-
- bdms = []
-
- def fake_rpc_reserve_block_device_name(self, context, instance, device,
- volume_id, **kwargs):
- bdm = objects.BlockDeviceMapping(
- **{'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 1,
- 'instance_uuid': instance['uuid'],
- 'device_name': '/dev/vdc'})
- bdm.create(context)
- bdms.append(bdm)
- return bdm
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
- self.stubs.Set(cinder.API, 'reserve_volume',
- fake_reserve_volume)
- self.stubs.Set(cinder.API, 'terminate_connection',
- fake_terminate_connection)
- self.stubs.Set(cinder.API, 'detach', fake_detach)
- self.stubs.Set(compute_rpcapi.ComputeAPI,
- 'reserve_block_device_name',
- fake_rpc_reserve_block_device_name)
-
- self.compute_api.attach_volume(self.context, instance, 1,
- '/dev/vdc')
-
- self.compute.terminate_instance(self.context,
- instance, bdms, [])
-
- instances = db.instance_get_all(self.context)
- LOG.info("After terminating instances: %s", instances)
- self.assertEqual(len(instances), 0)
- bdms = db.block_device_mapping_get_all_by_instance(self.context,
- instance['uuid'])
- self.assertEqual(len(bdms), 0)
-
- def test_run_terminate_no_image(self):
- """Make sure instance started without image (from volume)
- can be termintad without issues
- """
- params = {'image_ref': ''}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self._assert_state({'vm_state': vm_states.ACTIVE,
- 'task_state': None})
-
- self.compute.terminate_instance(self.context, instance, [], [])
- instances = db.instance_get_all(self.context)
- self.assertEqual(len(instances), 0)
-
- def test_terminate_no_network(self):
- # This is as reported in LP bug 1008875
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instances = db.instance_get_all(self.context)
- LOG.info("Running instances: %s", instances)
- self.assertEqual(len(instances), 1)
- self.mox.ReplayAll()
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- instances = db.instance_get_all(self.context)
- LOG.info("After terminating instances: %s", instances)
- self.assertEqual(len(instances), 0)
-
- def test_run_terminate_timestamps(self):
- # Make sure timestamps are set for launched and destroyed.
- instance = self._create_fake_instance_obj()
- instance['launched_at'] = None
- self.assertIsNone(instance['launched_at'])
- self.assertIsNone(instance['deleted_at'])
- launch = timeutils.utcnow()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instance.refresh()
- self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch)
- self.assertIsNone(instance['deleted_at'])
- terminate = timeutils.utcnow()
- self.compute.terminate_instance(self.context, instance, [], [])
-
- with utils.temporary_mutation(self.context, read_deleted='only'):
- instance = db.instance_get_by_uuid(self.context,
- instance['uuid'])
- self.assertTrue(instance['launched_at'].replace(
- tzinfo=None) < terminate)
- self.assertTrue(instance['deleted_at'].replace(
- tzinfo=None) > terminate)
-
- def test_run_terminate_deallocate_net_failure_sets_error_state(self):
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instances = db.instance_get_all(self.context)
- LOG.info("Running instances: %s", instances)
- self.assertEqual(len(instances), 1)
-
- def _fake_deallocate_network(*args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(self.compute, '_deallocate_network',
- _fake_deallocate_network)
-
- try:
- self.compute.terminate_instance(self.context, instance, [], [])
- except test.TestingException:
- pass
-
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.ERROR)
-
- def test_stop(self):
- # Ensure instance can be stopped.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.POWERING_OFF})
- inst_uuid = instance['uuid']
- extra = ['system_metadata', 'metadata']
- inst_obj = objects.Instance.get_by_uuid(self.context,
- inst_uuid,
- expected_attrs=extra)
- self.compute.stop_instance(self.context, instance=inst_obj)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_start(self):
- # Ensure instance can be started.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.POWERING_OFF})
- extra = ['system_metadata', 'metadata']
- inst_uuid = instance['uuid']
- inst_obj = objects.Instance.get_by_uuid(self.context,
- inst_uuid,
- expected_attrs=extra)
- self.compute.stop_instance(self.context, instance=inst_obj)
- inst_obj.task_state = task_states.POWERING_ON
- inst_obj.save(self.context)
- self.compute.start_instance(self.context, instance=inst_obj)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_stop_start_no_image(self):
- params = {'image_ref': ''}
- instance = self._create_fake_instance_obj(params)
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.POWERING_OFF})
- extra = ['system_metadata', 'metadata']
- inst_uuid = instance['uuid']
- inst_obj = objects.Instance.get_by_uuid(self.context,
- inst_uuid,
- expected_attrs=extra)
- self.compute.stop_instance(self.context, instance=inst_obj)
- inst_obj.task_state = task_states.POWERING_ON
- inst_obj.save(self.context)
- self.compute.start_instance(self.context, instance=inst_obj)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rescue(self):
- # Ensure instance can be rescued and unrescued.
-
- called = {'rescued': False,
- 'unrescued': False}
-
- def fake_rescue(self, context, instance_ref, network_info, image_meta,
- rescue_password):
- called['rescued'] = True
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
-
- def fake_unrescue(self, instance_ref, network_info):
- called['unrescued'] = True
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
- fake_unrescue)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instance.task_state = task_states.RESCUING
- instance.save()
- self.compute.rescue_instance(self.context, instance, None)
- self.assertTrue(called['rescued'])
- instance.task_state = task_states.UNRESCUING
- instance.save()
- self.compute.unrescue_instance(self.context, instance)
- self.assertTrue(called['unrescued'])
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rescue_notifications(self):
- # Ensure notifications on instance rescue.
- def fake_rescue(self, context, instance_ref, network_info, image_meta,
- rescue_password):
- pass
- self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- fake_notifier.NOTIFICATIONS = []
- instance.task_state = task_states.RESCUING
- instance.save()
- self.compute.rescue_instance(self.context, instance, None)
-
- expected_notifications = ['compute.instance.rescue.start',
- 'compute.instance.exists',
- 'compute.instance.rescue.end']
- self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
- expected_notifications)
- for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
- self.assertEqual(msg.event_type, expected_notifications[n])
- self.assertEqual(msg.priority, 'INFO')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance.uuid)
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertIn('rescue_image_name', msg.payload)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_unrescue_notifications(self):
- # Ensure notifications on instance rescue.
- def fake_unrescue(self, instance_ref, network_info):
- pass
- self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
- fake_unrescue)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- fake_notifier.NOTIFICATIONS = []
- instance.task_state = task_states.UNRESCUING
- instance.save()
- self.compute.unrescue_instance(self.context, instance)
-
- expected_notifications = ['compute.instance.unrescue.start',
- 'compute.instance.unrescue.end']
- self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
- expected_notifications)
- for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
- self.assertEqual(msg.event_type, expected_notifications[n])
- self.assertEqual(msg.priority, 'INFO')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance.uuid)
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rescue_handle_err(self):
- # If the driver fails to rescue, instance state should remain the same
- # and the exception should be converted to InstanceNotRescuable
- inst_obj = self._create_fake_instance_obj()
- self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
- self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
-
- self.compute._get_rescue_image(
- mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
- nova.virt.fake.FakeDriver.rescue(
- mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
- ).AndRaise(RuntimeError("Try again later"))
-
- self.mox.ReplayAll()
-
- expected_message = ('Instance %s cannot be rescued: '
- 'Driver Error: Try again later' % inst_obj.uuid)
- inst_obj.vm_state = 'some_random_state'
-
- with testtools.ExpectedException(
- exception.InstanceNotRescuable, expected_message):
- self.compute.rescue_instance(
- self.context, instance=inst_obj,
- rescue_password='password')
-
- self.assertEqual('some_random_state', inst_obj.vm_state)
-
- @mock.patch.object(nova.compute.utils, "get_image_metadata")
- @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
- def test_rescue_with_image_specified(self, mock_rescue,
- mock_get_image_metadata):
-
- image_ref = "image-ref"
- rescue_image_meta = {}
- params = {"task_state": task_states.RESCUING}
- instance = self._create_fake_instance_obj(params=params)
-
- ctxt = context.get_admin_context()
- mock_context = mock.Mock()
- mock_context.elevated.return_value = ctxt
-
- mock_get_image_metadata.return_value = rescue_image_meta
-
- self.compute.rescue_instance(mock_context, instance=instance,
- rescue_password="password", rescue_image_ref=image_ref)
-
- mock_get_image_metadata.assert_called_with(ctxt,
- self.compute.image_api,
- image_ref, instance)
- mock_rescue.assert_called_with(ctxt, instance, [],
- rescue_image_meta, 'password')
- self.compute.terminate_instance(ctxt, instance, [], [])
-
- @mock.patch.object(nova.compute.utils, "get_image_metadata")
- @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
- def test_rescue_with_base_image_when_image_not_specified(self,
- mock_rescue, mock_get_image_metadata):
-
- image_ref = "image-ref"
- system_meta = {"image_base_image_ref": image_ref}
- rescue_image_meta = {}
- params = {"task_state": task_states.RESCUING,
- "system_metadata": system_meta}
- instance = self._create_fake_instance_obj(params=params)
-
- ctxt = context.get_admin_context()
- mock_context = mock.Mock()
- mock_context.elevated.return_value = ctxt
-
- mock_get_image_metadata.return_value = rescue_image_meta
-
- self.compute.rescue_instance(mock_context, instance=instance,
- rescue_password="password")
-
- mock_get_image_metadata.assert_called_with(ctxt,
- self.compute.image_api,
- image_ref, instance)
- mock_rescue.assert_called_with(ctxt, instance, [],
- rescue_image_meta, 'password')
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_power_on(self):
- # Ensure instance can be powered on.
-
- called = {'power_on': False}
-
- def fake_driver_power_on(self, context, instance, network_info,
- block_device_info):
- called['power_on'] = True
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
- fake_driver_power_on)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- extra = ['system_metadata', 'metadata']
- inst_obj = objects.Instance.get_by_uuid(self.context,
- instance['uuid'],
- expected_attrs=extra)
- inst_obj.task_state = task_states.POWERING_ON
- inst_obj.save(self.context)
- self.compute.start_instance(self.context, instance=inst_obj)
- self.assertTrue(called['power_on'])
- self.compute.terminate_instance(self.context, inst_obj, [], [])
-
- def test_power_off(self):
- # Ensure instance can be powered off.
-
- called = {'power_off': False}
-
- def fake_driver_power_off(self, instance,
- shutdown_timeout, shutdown_attempts):
- called['power_off'] = True
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
- fake_driver_power_off)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- extra = ['system_metadata', 'metadata']
- inst_obj = objects.Instance.get_by_uuid(self.context,
- instance['uuid'],
- expected_attrs=extra)
- inst_obj.task_state = task_states.POWERING_OFF
- inst_obj.save(self.context)
- self.compute.stop_instance(self.context, instance=inst_obj)
- self.assertTrue(called['power_off'])
- self.compute.terminate_instance(self.context, inst_obj, [], [])
-
- def test_pause(self):
- # Ensure instance can be paused and unpaused.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None, None, True,
- None, False)
- instance.task_state = task_states.PAUSING
- instance.save()
- fake_notifier.NOTIFICATIONS = []
- self.compute.pause_instance(self.context, instance=instance)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.pause.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.pause.end')
- instance.task_state = task_states.UNPAUSING
- instance.save()
- fake_notifier.NOTIFICATIONS = []
- self.compute.unpause_instance(self.context, instance=instance)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.unpause.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.unpause.end')
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_suspend(self):
- # ensure instance can be suspended and resumed.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instance.task_state = task_states.SUSPENDING
- instance.save()
- self.compute.suspend_instance(self.context, instance)
- instance.task_state = task_states.RESUMING
- instance.save()
- self.compute.resume_instance(self.context, instance)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_suspend_error(self):
- # Ensure vm_state is ERROR when suspend error occurs.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- with mock.patch.object(self.compute.driver, 'suspend',
- side_effect=test.TestingException):
- self.assertRaises(test.TestingException,
- self.compute.suspend_instance,
- self.context,
- instance=instance)
-
- instance = db.instance_get_by_uuid(self.context, instance.uuid)
- self.assertEqual(vm_states.ERROR, instance.vm_state)
-
- def test_suspend_not_implemented(self):
- # Ensure expected exception is raised and the vm_state of instance
- # restore to original value if suspend is not implemented by driver
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- with mock.patch.object(self.compute.driver, 'suspend',
- side_effect=NotImplementedError('suspend test')):
- self.assertRaises(NotImplementedError,
- self.compute.suspend_instance,
- self.context,
- instance=instance)
-
- instance = db.instance_get_by_uuid(self.context, instance.uuid)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
-
- def test_suspend_rescued(self):
- # ensure rescued instance can be suspended and resumed.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instance.vm_state = vm_states.RESCUED
- instance.task_state = task_states.SUSPENDING
- instance.save()
-
- self.compute.suspend_instance(self.context, instance)
- self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
-
- instance.task_state = task_states.RESUMING
- instance.save()
- self.compute.resume_instance(self.context, instance)
- self.assertEqual(instance.vm_state, vm_states.RESCUED)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resume_no_old_state(self):
- # ensure a suspended instance with no old_vm_state is resumed to the
- # ACTIVE state
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- instance.vm_state = vm_states.SUSPENDED
- instance.task_state = task_states.RESUMING
- instance.save()
-
- self.compute.resume_instance(self.context, instance)
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rebuild(self):
- # Ensure instance can be rebuilt.
- instance = self._create_fake_instance_obj()
- image_ref = instance['image_ref']
- sys_metadata = db.instance_system_metadata_get(self.context,
- instance['uuid'])
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.REBUILDING})
- self.compute.rebuild_instance(self.context, instance,
- image_ref, image_ref,
- injected_files=[],
- new_pass="new_password",
- orig_sys_metadata=sys_metadata,
- bdms=[], recreate=False,
- on_shared_storage=False)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rebuild_driver(self):
- # Make sure virt drivers can override default rebuild
- called = {'rebuild': False}
-
- def fake(**kwargs):
- instance = kwargs['instance']
- instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
- instance.save(expected_task_state=[task_states.REBUILDING])
- instance.task_state = task_states.REBUILD_SPAWNING
- instance.save(
- expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
- called['rebuild'] = True
-
- self.stubs.Set(self.compute.driver, 'rebuild', fake)
- instance = self._create_fake_instance_obj()
- image_ref = instance['image_ref']
- sys_metadata = db.instance_system_metadata_get(self.context,
- instance['uuid'])
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.REBUILDING})
- self.compute.rebuild_instance(self.context, instance,
- image_ref, image_ref,
- injected_files=[],
- new_pass="new_password",
- orig_sys_metadata=sys_metadata,
- bdms=[], recreate=False,
- on_shared_storage=False)
- self.assertTrue(called['rebuild'])
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rebuild_no_image(self):
- # Ensure instance can be rebuilt when started with no image.
- params = {'image_ref': ''}
- instance = self._create_fake_instance_obj(params)
- sys_metadata = db.instance_system_metadata_get(self.context,
- instance['uuid'])
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.REBUILDING})
- self.compute.rebuild_instance(self.context, instance,
- '', '', injected_files=[],
- new_pass="new_password",
- orig_sys_metadata=sys_metadata, bdms=[],
- recreate=False, on_shared_storage=False)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rebuild_launched_at_time(self):
- # Ensure instance can be rebuilt.
- old_time = datetime.datetime(2012, 4, 1)
- cur_time = datetime.datetime(2012, 12, 21, 12, 21)
- timeutils.set_time_override(old_time)
- instance = self._create_fake_instance_obj()
- image_ref = instance['image_ref']
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- timeutils.set_time_override(cur_time)
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.REBUILDING})
- self.compute.rebuild_instance(self.context, instance,
- image_ref, image_ref,
- injected_files=[],
- new_pass="new_password",
- orig_sys_metadata={},
- bdms=[], recreate=False,
- on_shared_storage=False)
- instance.refresh()
- self.assertEqual(cur_time,
- instance['launched_at'].replace(tzinfo=None))
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rebuild_with_injected_files(self):
- # Ensure instance can be rebuilt with injected files.
- injected_files = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
- ]
-
- self.decoded_files = [
- ('/a/b/c', 'foobarbaz'),
- ]
-
- def _spawn(context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info):
- self.assertEqual(self.decoded_files, injected_files)
-
- self.stubs.Set(self.compute.driver, 'spawn', _spawn)
- instance = self._create_fake_instance_obj()
- image_ref = instance['image_ref']
- sys_metadata = db.instance_system_metadata_get(self.context,
- instance['uuid'])
- db.instance_update(self.context, instance['uuid'],
- {"task_state": task_states.REBUILDING})
- self.compute.rebuild_instance(self.context, instance,
- image_ref, image_ref,
- injected_files=injected_files,
- new_pass="new_password",
- orig_sys_metadata=sys_metadata,
- bdms=[], recreate=False,
- on_shared_storage=False)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def _test_reboot(self, soft,
- test_delete=False, test_unrescue=False,
- fail_reboot=False, fail_running=False):
-
- reboot_type = soft and 'SOFT' or 'HARD'
- task_pending = (soft and task_states.REBOOT_PENDING
- or task_states.REBOOT_PENDING_HARD)
- task_started = (soft and task_states.REBOOT_STARTED
- or task_states.REBOOT_STARTED_HARD)
- expected_task = (soft and task_states.REBOOTING
- or task_states.REBOOTING_HARD)
- expected_tasks = (soft and (task_states.REBOOTING,
- task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED)
- or (task_states.REBOOTING_HARD,
- task_states.REBOOT_PENDING_HARD,
- task_states.REBOOT_STARTED_HARD))
-
- # This is a true unit test, so we don't need the network stubs.
- fake_network.unset_stub_network_methods(self.stubs)
-
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute, '_instance_update')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.compute.driver, 'reboot')
-
- # FIXME(comstud): I don't feel like the context needs to
- # be elevated at all. Hopefully remove elevated from
- # reboot_instance and remove the stub here in a future patch.
- # econtext would just become self.context below then.
- econtext = self.context.elevated()
-
- db_instance = fake_instance.fake_db_instance(
- **dict(uuid='fake-instance',
- power_state=power_state.NOSTATE,
- vm_state=vm_states.ACTIVE,
- task_state=expected_task,
- launched_at=timeutils.utcnow()))
- instance = objects.Instance._from_db_object(econtext,
- objects.Instance(),
- db_instance)
-
- updated_dbinstance1 = fake_instance.fake_db_instance(
- **dict(uuid='updated-instance1',
- power_state=10003,
- vm_state=vm_states.ACTIVE,
- task_state=expected_task,
- launched_at=timeutils.utcnow()))
- updated_dbinstance2 = fake_instance.fake_db_instance(
- **dict(uuid='updated-instance2',
- power_state=10003,
- vm_state=vm_states.ACTIVE,
- task_state=expected_task,
- launched_at=timeutils.utcnow()))
-
- if test_unrescue:
- instance.vm_state = vm_states.RESCUED
- instance.obj_reset_changes()
-
- fake_nw_model = network_model.NetworkInfo()
-
- fake_block_dev_info = 'fake_block_dev_info'
- fake_power_state1 = 10001
- fake_power_state2 = power_state.RUNNING
- fake_power_state3 = 10002
-
- # Beginning of calls we expect.
-
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.context.elevated().AndReturn(econtext)
-
- self.compute._get_instance_block_device_info(
- econtext, instance).AndReturn(fake_block_dev_info)
- self.compute._get_instance_nw_info(econtext,
- instance).AndReturn(
- fake_nw_model)
- self.compute._notify_about_instance_usage(econtext,
- instance,
- 'reboot.start')
- self.compute._get_power_state(econtext,
- instance).AndReturn(fake_power_state1)
- db.instance_update_and_get_original(econtext, instance['uuid'],
- {'task_state': task_pending,
- 'expected_task_state': expected_tasks,
- 'power_state': fake_power_state1},
- update_cells=False,
- columns_to_join=['system_metadata']
- ).AndReturn((None,
- updated_dbinstance1))
- expected_nw_info = fake_nw_model
- db.instance_update_and_get_original(econtext,
- updated_dbinstance1['uuid'],
- {'task_state': task_started,
- 'expected_task_state': task_pending},
- update_cells=False,
- columns_to_join=['system_metadata']
- ).AndReturn((None,
- updated_dbinstance1))
-
- # Annoying. driver.reboot is wrapped in a try/except, and
- # doesn't re-raise. It eats exception generated by mox if
- # this is called with the wrong args, so we have to hack
- # around it.
- reboot_call_info = {}
- expected_call_info = {
- 'args': (econtext, instance, expected_nw_info,
- reboot_type),
- 'kwargs': {'block_device_info': fake_block_dev_info}}
- fault = exception.InstanceNotFound(instance_id='instance-0000')
-
- def fake_reboot(*args, **kwargs):
- reboot_call_info['args'] = args
- reboot_call_info['kwargs'] = kwargs
-
- # NOTE(sirp): Since `bad_volumes_callback` is a function defined
- # within `reboot_instance`, we don't have access to its value and
- # can't stub it out, thus we skip that comparison.
- kwargs.pop('bad_volumes_callback')
- if fail_reboot:
- raise fault
-
- self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
-
- # Power state should be updated again
- if not fail_reboot or fail_running:
- new_power_state = fake_power_state2
- self.compute._get_power_state(econtext,
- instance).AndReturn(fake_power_state2)
- else:
- new_power_state = fake_power_state3
- self.compute._get_power_state(econtext,
- instance).AndReturn(fake_power_state3)
-
- if test_delete:
- fault = exception.InstanceNotFound(
- instance_id=instance['uuid'])
- db.instance_update_and_get_original(
- econtext, updated_dbinstance1['uuid'],
- {'power_state': new_power_state,
- 'task_state': None,
- 'vm_state': vm_states.ACTIVE},
- update_cells=False,
- columns_to_join=['system_metadata'],
- ).AndRaise(fault)
- self.compute._notify_about_instance_usage(
- econtext,
- instance,
- 'reboot.end')
- elif fail_reboot and not fail_running:
- db.instance_update_and_get_original(
- econtext, updated_dbinstance1['uuid'],
- {'vm_state': vm_states.ERROR},
- update_cells=False,
- columns_to_join=['system_metadata'],
- ).AndRaise(fault)
- else:
- db.instance_update_and_get_original(
- econtext, updated_dbinstance1['uuid'],
- {'power_state': new_power_state,
- 'task_state': None,
- 'vm_state': vm_states.ACTIVE},
- update_cells=False,
- columns_to_join=['system_metadata'],
- ).AndReturn((None, updated_dbinstance2))
- if fail_running:
- self.compute._notify_about_instance_usage(econtext, instance,
- 'reboot.error', fault=fault)
- self.compute._notify_about_instance_usage(
- econtext,
- instance,
- 'reboot.end')
-
- self.mox.ReplayAll()
-
- if not fail_reboot or fail_running:
- self.compute.reboot_instance(self.context, instance=instance,
- block_device_info=None,
- reboot_type=reboot_type)
- else:
- self.assertRaises(exception.InstanceNotFound,
- self.compute.reboot_instance,
- self.context, instance=instance,
- block_device_info=None,
- reboot_type=reboot_type)
-
- self.assertEqual(expected_call_info, reboot_call_info)
-
- def test_reboot_soft(self):
- self._test_reboot(True)
-
- def test_reboot_soft_and_delete(self):
- self._test_reboot(True, True)
-
- def test_reboot_soft_and_rescued(self):
- self._test_reboot(True, False, True)
-
- def test_reboot_soft_and_delete_and_rescued(self):
- self._test_reboot(True, True, True)
-
- def test_reboot_hard(self):
- self._test_reboot(False)
-
- def test_reboot_hard_and_delete(self):
- self._test_reboot(False, True)
-
- def test_reboot_hard_and_rescued(self):
- self._test_reboot(False, False, True)
-
- def test_reboot_hard_and_delete_and_rescued(self):
- self._test_reboot(False, True, True)
-
- def test_reboot_fail(self):
- self._test_reboot(False, fail_reboot=True)
-
- def test_reboot_fail_running(self):
- self._test_reboot(False, fail_reboot=True,
- fail_running=True)
-
- def test_get_instance_block_device_info_source_image(self):
- bdms = block_device_obj.block_device_make_list(self.context,
- [fake_block_device.FakeDbBlockDeviceDict({
- 'id': 3,
- 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
- 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vda',
- 'connection_info': '{"driver_volume_type": "rbd"}',
- 'source_type': 'image',
- 'destination_type': 'volume',
- 'image_id': 'fake-image-id-1',
- 'boot_index': 0
- })])
-
- with (mock.patch.object(
- objects.BlockDeviceMappingList,
- 'get_by_instance_uuid',
- return_value=bdms)
- ) as mock_get_by_instance:
- block_device_info = (
- self.compute._get_instance_block_device_info(
- self.context, self._create_fake_instance())
- )
- expected = {
- 'swap': None,
- 'ephemerals': [],
- 'block_device_mapping': [{
- 'connection_info': {
- 'driver_volume_type': 'rbd'
- },
- 'mount_device': '/dev/vda',
- 'delete_on_termination': False
- }]
- }
- self.assertTrue(mock_get_by_instance.called)
- self.assertEqual(block_device_info, expected)
-
- def test_get_instance_block_device_info_passed_bdms(self):
- bdms = block_device_obj.block_device_make_list(self.context,
- [fake_block_device.FakeDbBlockDeviceDict({
- 'id': 3,
- 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
- 'device_name': '/dev/vdd',
- 'connection_info': '{"driver_volume_type": "rbd"}',
- 'source_type': 'volume',
- 'destination_type': 'volume'})
- ])
- with (mock.patch.object(
- objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')) as mock_get_by_instance:
- block_device_info = (
- self.compute._get_instance_block_device_info(
- self.context, self._create_fake_instance(), bdms=bdms)
- )
- expected = {
- 'swap': None,
- 'ephemerals': [],
- 'block_device_mapping': [{
- 'connection_info': {
- 'driver_volume_type': 'rbd'
- },
- 'mount_device': '/dev/vdd',
- 'delete_on_termination': False
- }]
- }
- self.assertFalse(mock_get_by_instance.called)
- self.assertEqual(block_device_info, expected)
-
- def test_get_instance_block_device_info_swap_and_ephemerals(self):
- instance = self._create_fake_instance()
-
- ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdb',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True,
- 'guest_format': None,
- 'volume_size': 1,
- 'boot_index': -1
- })
- ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdc',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True,
- 'guest_format': None,
- 'volume_size': 2,
- 'boot_index': -1
- })
- swap = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdd',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'volume_size': 1,
- 'boot_index': -1
- })
-
- bdms = block_device_obj.block_device_make_list(self.context,
- [swap, ephemeral0, ephemeral1])
-
- with (
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid', return_value=bdms)
- ) as mock_get_by_instance_uuid:
- expected_block_device_info = {
- 'swap': {'device_name': '/dev/vdd', 'swap_size': 1},
- 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1,
- 'virtual_name': 'ephemeral0'},
- {'device_name': '/dev/vdc', 'num': 1, 'size': 2,
- 'virtual_name': 'ephemeral1'}],
- 'block_device_mapping': []
- }
-
- block_device_info = (
- self.compute._get_instance_block_device_info(
- self.context, instance)
- )
-
- mock_get_by_instance_uuid.assert_called_once_with(self.context,
- instance['uuid'])
- self.assertEqual(expected_block_device_info, block_device_info)
-
- def test_inject_network_info(self):
- # Ensure we can inject network info.
- called = {'inject': False}
-
- def fake_driver_inject_network(self, instance, network_info):
- called['inject'] = True
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
- fake_driver_inject_network)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.compute.inject_network_info(self.context, instance=instance)
- self.assertTrue(called['inject'])
- self.compute.terminate_instance(self.context,
- instance, [], [])
-
- def test_reset_network(self):
- # Ensure we can reset networking on an instance.
- called = {'count': 0}
-
- def fake_driver_reset_network(self, instance):
- called['count'] += 1
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
- fake_driver_reset_network)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- self.compute.reset_network(self.context, instance)
-
- self.assertEqual(called['count'], 1)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def _get_snapshotting_instance(self):
- # Ensure instance can be snapshotted.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
- instance.save()
- return instance
-
- def test_snapshot(self):
- inst_obj = self._get_snapshotting_instance()
- self.compute.snapshot_instance(self.context, image_id='fakesnap',
- instance=inst_obj)
-
- def test_snapshot_no_image(self):
- inst_obj = self._get_snapshotting_instance()
- inst_obj.image_ref = ''
- inst_obj.save()
- self.compute.snapshot_instance(self.context, image_id='fakesnap',
- instance=inst_obj)
-
- def _test_snapshot_fails(self, raise_during_cleanup, method,
- expected_state=True):
- def fake_snapshot(*args, **kwargs):
- raise test.TestingException()
-
- self.fake_image_delete_called = False
-
- def fake_delete(self_, context, image_id):
- self.fake_image_delete_called = True
- if raise_during_cleanup:
- raise Exception()
-
- self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
-
- inst_obj = self._get_snapshotting_instance()
- if method == 'snapshot':
- self.assertRaises(test.TestingException,
- self.compute.snapshot_instance,
- self.context, image_id='fakesnap',
- instance=inst_obj)
- else:
- self.assertRaises(test.TestingException,
- self.compute.backup_instance,
- self.context, image_id='fakesnap',
- instance=inst_obj, backup_type='fake',
- rotation=1)
-
- self.assertEqual(expected_state, self.fake_image_delete_called)
- self._assert_state({'task_state': None})
-
- @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
- def test_backup_fails(self, mock_rotate):
- self._test_snapshot_fails(False, 'backup')
-
- @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
- def test_backup_fails_cleanup_ignores_exception(self, mock_rotate):
- self._test_snapshot_fails(True, 'backup')
-
- @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
- @mock.patch.object(nova.compute.manager.ComputeManager,
- '_do_snapshot_instance')
- def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate):
- mock_rotate.side_effect = test.TestingException()
- self._test_snapshot_fails(True, 'backup', False)
-
- def test_snapshot_fails(self):
- self._test_snapshot_fails(False, 'snapshot')
-
- def test_snapshot_fails_cleanup_ignores_exception(self):
- self._test_snapshot_fails(True, 'snapshot')
-
- def _test_snapshot_deletes_image_on_failure(self, status, exc):
- self.fake_image_delete_called = False
-
- def fake_show(self_, context, image_id, **kwargs):
- self.assertEqual('fakesnap', image_id)
- image = {'id': image_id,
- 'status': status}
- return image
-
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
-
- def fake_delete(self_, context, image_id):
- self.fake_image_delete_called = True
- self.assertEqual('fakesnap', image_id)
-
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
-
- def fake_snapshot(*args, **kwargs):
- raise exc
-
- self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
-
- fake_image.stub_out_image_service(self.stubs)
-
- inst_obj = self._get_snapshotting_instance()
-
- self.compute.snapshot_instance(self.context, image_id='fakesnap',
- instance=inst_obj)
-
- def test_snapshot_fails_with_glance_error(self):
- image_not_found = exception.ImageNotFound(image_id='fakesnap')
- self._test_snapshot_deletes_image_on_failure('error', image_not_found)
- self.assertFalse(self.fake_image_delete_called)
- self._assert_state({'task_state': None})
-
- def test_snapshot_fails_with_task_state_error(self):
- deleting_state_error = exception.UnexpectedDeletingTaskStateError(
- expected=task_states.IMAGE_SNAPSHOT, actual=task_states.DELETING)
- self._test_snapshot_deletes_image_on_failure(
- 'error', deleting_state_error)
- self.assertTrue(self.fake_image_delete_called)
- self._test_snapshot_deletes_image_on_failure(
- 'active', deleting_state_error)
- self.assertFalse(self.fake_image_delete_called)
-
- def test_snapshot_fails_with_instance_not_found(self):
- instance_not_found = exception.InstanceNotFound(instance_id='uuid')
- self._test_snapshot_deletes_image_on_failure(
- 'error', instance_not_found)
- self.assertTrue(self.fake_image_delete_called)
- self._test_snapshot_deletes_image_on_failure(
- 'active', instance_not_found)
- self.assertFalse(self.fake_image_delete_called)
-
- def test_snapshot_handles_cases_when_instance_is_deleted(self):
- inst_obj = self._get_snapshotting_instance()
- inst_obj.task_state = task_states.DELETING
- inst_obj.save()
- self.compute.snapshot_instance(self.context, image_id='fakesnap',
- instance=inst_obj)
-
- def test_snapshot_handles_cases_when_instance_is_not_found(self):
- inst_obj = self._get_snapshotting_instance()
- inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid)
- inst_obj2.destroy()
- self.compute.snapshot_instance(self.context, image_id='fakesnap',
- instance=inst_obj)
-
- def _assert_state(self, state_dict):
- """Assert state of VM is equal to state passed as parameter."""
- instances = db.instance_get_all(self.context)
- self.assertEqual(len(instances), 1)
-
- if 'vm_state' in state_dict:
- self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
- if 'task_state' in state_dict:
- self.assertEqual(state_dict['task_state'],
- instances[0]['task_state'])
- if 'power_state' in state_dict:
- self.assertEqual(state_dict['power_state'],
- instances[0]['power_state'])
-
- def test_console_output(self):
- # Make sure we can get console output from instance.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- output = self.compute.get_console_output(self.context,
- instance=instance, tail_length=None)
- self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_console_output_tail(self):
- # Make sure we can get console output from instance.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- output = self.compute.get_console_output(self.context,
- instance=instance, tail_length=2)
- self.assertEqual(output, 'ANOTHER\nLAST LINE')
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_console_output_not_implemented(self):
- def fake_not_implemented(*args, **kwargs):
- raise NotImplementedError()
-
- self.stubs.Set(self.compute.driver, 'get_console_output',
- fake_not_implemented)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_console_output, self.context,
- instance, 0)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(NotImplementedError,
- self.compute.get_console_output, self.context,
- instance, 0)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_console_output_instance_not_found(self):
- def fake_not_found(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake-instance')
-
- self.stubs.Set(self.compute.driver, 'get_console_output',
- fake_not_found)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_console_output, self.context,
- instance, 0)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.InstanceNotFound,
- self.compute.get_console_output, self.context,
- instance, 0)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_novnc_vnc_console(self):
- # Make sure we can a vnc console for an instance.
- self.flags(vnc_enabled=True)
- self.flags(enabled=False, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- # Try with the full instance
- console = self.compute.get_vnc_console(self.context, 'novnc',
- instance=instance)
- self.assertTrue(console)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_validate_console_port_vnc(self):
- self.flags(vnc_enabled=True)
- self.flags(enabled=True, group='spice')
- instance = self._create_fake_instance_obj()
-
- def fake_driver_get_console(*args, **kwargs):
- return ctype.ConsoleVNC(host="fake_host", port=5900)
-
- self.stubs.Set(self.compute.driver, "get_vnc_console",
- fake_driver_get_console)
-
- self.assertTrue(self.compute.validate_console_port(
- context=self.context, instance=instance, port=5900,
- console_type="novnc"))
-
- def test_validate_console_port_spice(self):
- self.flags(vnc_enabled=True)
- self.flags(enabled=True, group='spice')
- instance = self._create_fake_instance_obj()
-
- def fake_driver_get_console(*args, **kwargs):
- return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
-
- self.stubs.Set(self.compute.driver, "get_spice_console",
- fake_driver_get_console)
-
- self.assertTrue(self.compute.validate_console_port(
- context=self.context, instance=instance, port=5900,
- console_type="spice-html5"))
-
- def test_validate_console_port_rdp(self):
- self.flags(enabled=True, group='rdp')
- instance = self._create_fake_instance_obj()
-
- def fake_driver_get_console(*args, **kwargs):
- return ctype.ConsoleRDP(host="fake_host", port=5900)
-
- self.stubs.Set(self.compute.driver, "get_rdp_console",
- fake_driver_get_console)
-
- self.assertTrue(self.compute.validate_console_port(
- context=self.context, instance=instance, port=5900,
- console_type="rdp-html5"))
-
- def test_validate_console_port_wrong_port(self):
- self.flags(vnc_enabled=True)
- self.flags(enabled=True, group='spice')
- instance = self._create_fake_instance_obj()
-
- def fake_driver_get_console(*args, **kwargs):
- return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
-
- self.stubs.Set(self.compute.driver, "get_vnc_console",
- fake_driver_get_console)
-
- self.assertFalse(self.compute.validate_console_port(
- context=self.context, instance=instance, port="wrongport",
- console_type="spice-html5"))
-
- def test_xvpvnc_vnc_console(self):
- # Make sure we can a vnc console for an instance.
- self.flags(vnc_enabled=True)
- self.flags(enabled=False, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- console = self.compute.get_vnc_console(self.context, 'xvpvnc',
- instance=instance)
- self.assertTrue(console)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_invalid_vnc_console_type(self):
- # Raise useful error if console type is an unrecognised string.
- self.flags(vnc_enabled=True)
- self.flags(enabled=False, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_vnc_console,
- self.context, 'invalid', instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_vnc_console,
- self.context, 'invalid', instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_missing_vnc_console_type(self):
- # Raise useful error is console type is None.
- self.flags(vnc_enabled=True)
- self.flags(enabled=False, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_vnc_console,
- self.context, None, instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_vnc_console,
- self.context, None, instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_get_vnc_console_not_implemented(self):
- self.stubs.Set(self.compute.driver, 'get_vnc_console',
- fake_not_implemented)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_vnc_console,
- self.context, 'novnc', instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(NotImplementedError,
- self.compute.get_vnc_console,
- self.context, 'novnc', instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_spicehtml5_spice_console(self):
- # Make sure we can a spice console for an instance.
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- # Try with the full instance
- console = self.compute.get_spice_console(self.context, 'spice-html5',
- instance=instance)
- self.assertTrue(console)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_invalid_spice_console_type(self):
- # Raise useful error if console type is an unrecognised string
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_spice_console,
- self.context, 'invalid', instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_spice_console,
- self.context, 'invalid', instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_missing_spice_console_type(self):
- # Raise useful error is console type is None
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='spice')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_spice_console,
- self.context, None, instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_spice_console,
- self.context, None, instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_rdphtml5_rdp_console(self):
- # Make sure we can a rdp console for an instance.
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='rdp')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- # Try with the full instance
- console = self.compute.get_rdp_console(self.context, 'rdp-html5',
- instance=instance)
- self.assertTrue(console)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_invalid_rdp_console_type(self):
- # Raise useful error if console type is an unrecognised string
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='rdp')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_rdp_console,
- self.context, 'invalid', instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_rdp_console,
- self.context, 'invalid', instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_missing_rdp_console_type(self):
- # Raise useful error is console type is None
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='rdp')
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertRaises(messaging.ExpectedException,
- self.compute.get_rdp_console,
- self.context, None, instance=instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeInvalid,
- self.compute.get_rdp_console,
- self.context, None, instance=instance)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_vnc_console_instance_not_ready(self):
- self.flags(vnc_enabled=True)
- self.flags(enabled=False, group='spice')
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- def fake_driver_get_console(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
- self.stubs.Set(self.compute.driver, "get_vnc_console",
- fake_driver_get_console)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute.get_vnc_console, self.context, 'novnc',
- instance=instance)
-
- def test_spice_console_instance_not_ready(self):
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='spice')
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- def fake_driver_get_console(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
- self.stubs.Set(self.compute.driver, "get_spice_console",
- fake_driver_get_console)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute.get_spice_console, self.context, 'spice-html5',
- instance=instance)
-
- def test_rdp_console_instance_not_ready(self):
- self.flags(vnc_enabled=False)
- self.flags(enabled=True, group='rdp')
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- def fake_driver_get_console(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
- self.stubs.Set(self.compute.driver, "get_rdp_console",
- fake_driver_get_console)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute.get_rdp_console, self.context, 'rdp-html5',
- instance=instance)
-
- def test_vnc_console_disabled(self):
- self.flags(vnc_enabled=False)
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeUnavailable,
- self.compute.get_vnc_console, self.context, 'novnc',
- instance=instance)
-
- def test_spice_console_disabled(self):
- self.flags(enabled=False, group='spice')
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeUnavailable,
- self.compute.get_spice_console, self.context, 'spice-html5',
- instance=instance)
-
- def test_rdp_console_disabled(self):
- self.flags(enabled=False, group='rdp')
- instance = self._create_fake_instance_obj(
- params={'vm_state': vm_states.BUILDING})
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(exception.ConsoleTypeUnavailable,
- self.compute.get_rdp_console, self.context, 'rdp-html5',
- instance=instance)
-
- def test_diagnostics(self):
- # Make sure we can get diagnostics for an instance.
- expected_diagnostic = {'cpu0_time': 17300000000,
- 'memory': 524288,
- 'vda_errors': -1,
- 'vda_read': 262144,
- 'vda_read_req': 112,
- 'vda_write': 5778432,
- 'vda_write_req': 488,
- 'vnet1_rx': 2070139,
- 'vnet1_rx_drop': 0,
- 'vnet1_rx_errors': 0,
- 'vnet1_rx_packets': 26701,
- 'vnet1_tx': 140208,
- 'vnet1_tx_drop': 0,
- 'vnet1_tx_errors': 0,
- 'vnet1_tx_packets': 662,
- }
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, [], None,
- None, True, None, False)
-
- diagnostics = self.compute.get_diagnostics(self.context,
- instance=instance)
- self.assertEqual(diagnostics, expected_diagnostic)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_instance_diagnostics(self):
- # Make sure we can get diagnostics for an instance.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- diagnostics = self.compute.get_instance_diagnostics(self.context,
- instance=instance)
- expected = {'config_drive': True,
- 'cpu_details': [{'time': 17300000000}],
- 'disk_details': [{'errors_count': 0,
- 'id': 'fake-disk-id',
- 'read_bytes': 262144,
- 'read_requests': 112,
- 'write_bytes': 5778432,
- 'write_requests': 488}],
- 'driver': 'fake',
- 'hypervisor_os': 'fake-os',
- 'memory_details': {'maximum': 524288, 'used': 0},
- 'nic_details': [{'mac_address': '01:23:45:67:89:ab',
- 'rx_drop': 0,
- 'rx_errors': 0,
- 'rx_octets': 2070139,
- 'rx_packets': 26701,
- 'tx_drop': 0,
- 'tx_errors': 0,
- 'tx_octets': 140208,
- 'tx_packets': 662}],
- 'state': 'running',
- 'uptime': 46664,
- 'version': '1.0'}
- self.assertEqual(expected, diagnostics)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_add_fixed_ip_usage_notification(self):
- def dummy(*args, **kwargs):
- pass
-
- self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
- dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
- 'inject_network_info', dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
- 'reset_network', dummy)
-
- instance = self._create_fake_instance_obj()
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
- self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
- instance=instance)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_remove_fixed_ip_usage_notification(self):
- def dummy(*args, **kwargs):
- pass
-
- self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
- dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
- 'inject_network_info', dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
- 'reset_network', dummy)
-
- instance = self._create_fake_instance_obj()
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
- self.compute.remove_fixed_ip_from_instance(self.context, 1,
- instance=instance)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_run_instance_usage_notification(self, request_spec=None):
- # Ensure run instance generates appropriate usage notification.
- request_spec = request_spec or {}
- instance = self._create_fake_instance_obj()
- expected_image_name = request_spec.get('image', {}).get('name', '')
- self.compute.run_instance(self.context, instance, request_spec,
- {}, [], None, None, True, None, False)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- instance.refresh()
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type, 'compute.instance.create.start')
- # The last event is the one with the sugar in it.
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.create.end')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(expected_image_name, payload['image_name'])
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- self.assertEqual(payload['state'], 'active')
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- self.assertIn('fixed_ips', payload)
- self.assertTrue(payload['launched_at'])
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- self.assertEqual('Success', payload['message'])
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_run_instance_image_usage_notification(self):
- request_spec = {'image': {'name': 'fake_name', 'key': 'value'}}
- self.test_run_instance_usage_notification(request_spec=request_spec)
-
- def test_run_instance_usage_notification_volume_meta(self):
- # Volume's image metadata won't contain the image name
- request_spec = {'image': {'key': 'value'}}
- self.test_run_instance_usage_notification(request_spec=request_spec)
-
- def test_run_instance_end_notification_on_abort(self):
- # Test that an end notif is sent if the build is aborted
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
-
- def build_inst_abort(*args, **kwargs):
- raise exception.BuildAbortException(reason="already deleted",
- instance_uuid=instance_uuid)
-
- self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type, 'compute.instance.create.start')
- msg = fake_notifier.NOTIFICATIONS[1]
-
- self.assertEqual(msg.event_type, 'compute.instance.create.end')
- self.assertEqual('INFO', msg.priority)
- payload = msg.payload
- message = payload['message']
- self.assertNotEqual(-1, message.find("already deleted"))
-
- def test_run_instance_error_notification_on_reschedule(self):
- # Test that error notif is sent if the build got rescheduled
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
-
- def build_inst_fail(*args, **kwargs):
- raise exception.RescheduledException(instance_uuid=instance_uuid,
- reason="something bad happened")
-
- self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
-
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
-
- self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type, 'compute.instance.create.start')
- msg = fake_notifier.NOTIFICATIONS[1]
-
- self.assertEqual(msg.event_type, 'compute.instance.create.error')
- self.assertEqual('ERROR', msg.priority)
- payload = msg.payload
- message = payload['message']
- self.assertNotEqual(-1, message.find("something bad happened"))
-
- def test_run_instance_error_notification_on_failure(self):
- # Test that error notif is sent if build fails hard
- instance = self._create_fake_instance_obj()
-
- def build_inst_fail(*args, **kwargs):
- raise test.TestingException("i'm dying")
-
- self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
-
- self.assertRaises(test.TestingException, self.compute.run_instance,
- self.context, instance, {}, {}, [], None, None, True, None,
- False)
-
- self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type, 'compute.instance.create.start')
- msg = fake_notifier.NOTIFICATIONS[1]
-
- self.assertEqual(msg.event_type, 'compute.instance.create.error')
- self.assertEqual('ERROR', msg.priority)
- payload = msg.payload
- message = payload['message']
- self.assertNotEqual(-1, message.find("i'm dying"))
-
- def test_terminate_usage_notification(self):
- # Ensure terminate_instance generates correct usage notification.
- old_time = datetime.datetime(2012, 4, 1)
- cur_time = datetime.datetime(2012, 12, 21, 12, 21)
-
- timeutils.set_time_override(old_time)
-
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- fake_notifier.NOTIFICATIONS = []
- timeutils.set_time_override(cur_time)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
-
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.delete.start')
- msg1 = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start')
- msg1 = fake_notifier.NOTIFICATIONS[2]
- self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end')
- msg1 = fake_notifier.NOTIFICATIONS[3]
- self.assertEqual(msg1.event_type, 'compute.instance.delete.end')
- payload = msg1.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- self.assertIn('terminated_at', payload)
- self.assertIn('deleted_at', payload)
- self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
- self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
-
- def test_run_instance_existing(self):
- # Ensure failure when running an instance that already exists.
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
- self.assertRaises(exception.InstanceExists,
- self.compute.run_instance,
- self.context, instance, {}, {}, [], None, None, True,
- None, False)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_run_instance_queries_macs(self):
- # run_instance should ask the driver for node mac addresses and pass
- # that to the network_api in use.
- fake_network.unset_stub_network_methods(self.stubs)
- instance = self._create_fake_instance_obj()
-
- macs = set(['01:23:45:67:89:ab'])
- self.mox.StubOutWithMock(self.compute.network_api,
- "allocate_for_instance")
- self.compute.network_api.allocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None,
- vpn=False, macs=macs,
- security_groups=[], dhcp_options=None).AndReturn(
- fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
-
- self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
- self.compute.driver.macs_for_instance(
- mox.IsA(instance_obj.Instance)).AndReturn(macs)
- self.mox.ReplayAll()
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- def _create_server_group(self):
- group_instance = self._create_fake_instance_obj(
- params=dict(host=self.compute.host))
-
- instance_group = objects.InstanceGroup(self.context)
- instance_group.user_id = self.user_id
- instance_group.project_id = self.project_id
- instance_group.name = 'messi'
- instance_group.uuid = str(uuid.uuid4())
- instance_group.members = [group_instance.uuid]
- instance_group.policies = ['anti-affinity']
- fake_notifier.NOTIFICATIONS = []
- instance_group.create()
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(instance_group.name, msg.payload['name'])
- self.assertEqual(instance_group.members, msg.payload['members'])
- self.assertEqual(instance_group.policies, msg.payload['policies'])
- self.assertEqual(instance_group.project_id, msg.payload['project_id'])
- self.assertEqual(instance_group.uuid, msg.payload['uuid'])
- self.assertEqual('servergroup.create', msg.event_type)
- return instance_group
-
- def _run_instance_reschedules_on_anti_affinity_violation(self, group,
- hint):
- instance = self._create_fake_instance_obj()
- filter_properties = {'scheduler_hints': {'group': hint}}
- self.assertRaises(exception.RescheduledException,
- self.compute._build_instance,
- self.context, {}, filter_properties,
- [], None, None, True, None, instance,
- None, False)
-
- def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self):
- group = self._create_server_group()
- self._run_instance_reschedules_on_anti_affinity_violation(group,
- group.name)
-
- def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self):
- group = self._create_server_group()
- self._run_instance_reschedules_on_anti_affinity_violation(group,
- group.uuid)
-
- def test_instance_set_to_error_on_uncaught_exception(self):
- # Test that instance is set to error state when exception is raised.
- instance = self._create_fake_instance_obj()
-
- self.mox.StubOutWithMock(self.compute.network_api,
- "allocate_for_instance")
- self.mox.StubOutWithMock(self.compute.network_api,
- "deallocate_for_instance")
- self.compute.network_api.allocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None,
- vpn=False, macs=None,
- security_groups=[], dhcp_options=None
- ).AndRaise(messaging.RemoteError())
- self.compute.network_api.deallocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None).MultipleTimes()
-
- fake_network.unset_stub_network_methods(self.stubs)
-
- self.mox.ReplayAll()
-
- self.assertRaises(messaging.RemoteError,
- self.compute.run_instance,
- self.context, instance, {}, {}, None, None, None,
- True, None, False)
-
- instance.refresh()
- self.assertEqual(vm_states.ERROR, instance.vm_state)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_delete_instance_keeps_net_on_power_off_fail(self):
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
- exp = exception.InstancePowerOffFailure(reason='')
- self.compute.driver.destroy(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(exp)
- # mox will detect if _deallocate_network gets called unexpectedly
- self.mox.ReplayAll()
- instance = self._create_fake_instance_obj()
- self.assertRaises(exception.InstancePowerOffFailure,
- self.compute._delete_instance,
- self.context,
- instance,
- [],
- self.none_quotas)
-
- def test_delete_instance_loses_net_on_other_fail(self):
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.StubOutWithMock(self.compute, '_deallocate_network')
- exp = test.TestingException()
- self.compute.driver.destroy(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(exp)
- self.compute._deallocate_network(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.ReplayAll()
- instance = self._create_fake_instance_obj()
- self.assertRaises(test.TestingException,
- self.compute._delete_instance,
- self.context,
- instance,
- [],
- self.none_quotas)
-
- def test_delete_instance_deletes_console_auth_tokens(self):
- instance = self._create_fake_instance_obj()
- self.flags(vnc_enabled=True)
-
- self.tokens_deleted = False
-
- def fake_delete_tokens(*args, **kwargs):
- self.tokens_deleted = True
-
- cauth_rpcapi = self.compute.consoleauth_rpcapi
- self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
- fake_delete_tokens)
-
- self.compute._delete_instance(self.context, instance, [],
- self.none_quotas)
-
- self.assertTrue(self.tokens_deleted)
-
- def test_delete_instance_deletes_console_auth_tokens_cells(self):
- instance = self._create_fake_instance_obj()
- self.flags(vnc_enabled=True)
- self.flags(enable=True, group='cells')
-
- self.tokens_deleted = False
-
- def fake_delete_tokens(*args, **kwargs):
- self.tokens_deleted = True
-
- cells_rpcapi = self.compute.cells_rpcapi
- self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
- fake_delete_tokens)
-
- self.compute._delete_instance(self.context, instance,
- [], self.none_quotas)
-
- self.assertTrue(self.tokens_deleted)
-
- def test_instance_termination_exception_sets_error(self):
- """Test that we handle InstanceTerminationFailure
- which is propagated up from the underlying driver.
- """
- instance = self._create_fake_instance_obj()
-
- def fake_delete_instance(context, instance, bdms,
- reservations=None):
- raise exception.InstanceTerminationFailure(reason='')
-
- self.stubs.Set(self.compute, '_delete_instance',
- fake_delete_instance)
-
- self.assertRaises(exception.InstanceTerminationFailure,
- self.compute.terminate_instance,
- self.context,
- instance, [], [])
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.ERROR)
-
- def test_network_is_deallocated_on_spawn_failure(self):
- # When a spawn fails the network must be deallocated.
- instance = self._create_fake_instance_obj()
-
- self.mox.StubOutWithMock(self.compute, "_prep_block_device")
- self.compute._prep_block_device(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', ''))
-
- self.mox.ReplayAll()
-
- self.assertRaises(messaging.RemoteError,
- self.compute.run_instance,
- self.context, instance, {}, {}, None, None, None,
- True, None, False)
-
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_lock(self):
- # FIXME(comstud): This test is such crap. This is testing
- # compute API lock functionality in a test class for the compute
- # manager by running an instance. Hello? We should just have
- # unit tests in test_compute_api that test the check_instance_lock
- # decorator and make sure that appropriate compute_api methods
- # have the decorator.
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- non_admin_context = context.RequestContext(None,
- None,
- is_admin=False)
-
- def check_task_state(task_state):
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_state)
-
- instance.refresh()
-
- # should fail with locked nonadmin context
- self.compute_api.lock(self.context, instance)
- self.assertRaises(exception.InstanceIsLocked,
- self.compute_api.reboot,
- non_admin_context, instance, 'SOFT')
- check_task_state(None)
-
- # should fail with invalid task state
- self.compute_api.unlock(self.context, instance)
- instance.task_state = task_states.REBOOTING
- instance.save()
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.reboot,
- non_admin_context, instance, 'SOFT')
- check_task_state(task_states.REBOOTING)
-
- # should succeed with admin context
- instance.task_state = None
- instance.save()
- self.compute_api.reboot(self.context, instance, 'SOFT')
- check_task_state(task_states.REBOOTING)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def _check_locked_by(self, instance_uuid, locked_by):
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['locked'], locked_by is not None)
- self.assertEqual(instance['locked_by'], locked_by)
- return instance
-
- def test_override_owner_lock(self):
- # FIXME(comstud): This test is such crap. This is testing
- # compute API lock functionality in a test class for the compute
- # manager by running an instance. Hello? We should just have
- # unit tests in test_compute_api that test the check_instance_lock
- # decorator and make sure that appropriate compute_api methods
- # have the decorator.
- admin_context = context.RequestContext('admin-user',
- 'admin-project',
- is_admin=True)
-
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- # Ensure that an admin can override the owner lock
- self.compute_api.lock(self.context, instance)
- self._check_locked_by(instance_uuid, 'owner')
- self.compute_api.unlock(admin_context, instance)
- self._check_locked_by(instance_uuid, None)
-
- def test_upgrade_owner_lock(self):
- # FIXME(comstud): This test is such crap. This is testing
- # compute API lock functionality in a test class for the compute
- # manager by running an instance. Hello? We should just have
- # unit tests in test_compute_api that test the check_instance_lock
- # decorator and make sure that appropriate compute_api methods
- # have the decorator.
- admin_context = context.RequestContext('admin-user',
- 'admin-project',
- is_admin=True)
-
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- # Ensure that an admin can upgrade the lock and that
- # the owner can no longer unlock
- self.compute_api.lock(self.context, instance)
- self.compute_api.lock(admin_context, instance)
- self._check_locked_by(instance_uuid, 'admin')
- instance.refresh()
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.unlock,
- self.context, instance)
- self._check_locked_by(instance_uuid, 'admin')
- self.compute_api.unlock(admin_context, instance)
- self._check_locked_by(instance_uuid, None)
-
- def _test_state_revert(self, instance, operation, pre_task_state,
- kwargs=None, vm_state=None):
- if kwargs is None:
- kwargs = {}
-
- # The API would have set task_state, so do that here to test
- # that the state gets reverted on failure
- db.instance_update(self.context, instance['uuid'],
- {"task_state": pre_task_state})
-
- orig_elevated = self.context.elevated
- orig_notify = self.compute._notify_about_instance_usage
-
- def _get_an_exception(*args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(self.context, 'elevated', _get_an_exception)
- self.stubs.Set(self.compute,
- '_notify_about_instance_usage', _get_an_exception)
-
- func = getattr(self.compute, operation)
-
- self.assertRaises(test.TestingException,
- func, self.context, instance=instance, **kwargs)
- # self.context.elevated() is called in tearDown()
- self.stubs.Set(self.context, 'elevated', orig_elevated)
- self.stubs.Set(self.compute,
- '_notify_about_instance_usage', orig_notify)
-
- # Fetch the instance's task_state and make sure it reverted to None.
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- if vm_state:
- self.assertEqual(instance.vm_state, vm_state)
- self.assertIsNone(instance["task_state"])
-
- def test_state_revert(self):
- # ensure that task_state is reverted after a failed operation.
- migration = objects.Migration()
- migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
- migration.new_instance_type_id = '1'
-
- actions = [
- ("reboot_instance", task_states.REBOOTING,
- {'block_device_info': [],
- 'reboot_type': 'SOFT'}),
- ("stop_instance", task_states.POWERING_OFF),
- ("start_instance", task_states.POWERING_ON),
- ("terminate_instance", task_states.DELETING,
- {'bdms': [],
- 'reservations': []},
- vm_states.ERROR),
- ("soft_delete_instance", task_states.SOFT_DELETING,
- {'reservations': []}),
- ("restore_instance", task_states.RESTORING),
- ("rebuild_instance", task_states.REBUILDING,
- {'orig_image_ref': None,
- 'image_ref': None,
- 'injected_files': [],
- 'new_pass': '',
- 'orig_sys_metadata': {},
- 'bdms': [],
- 'recreate': False,
- 'on_shared_storage': False}),
- ("set_admin_password", task_states.UPDATING_PASSWORD,
- {'new_pass': None}),
- ("rescue_instance", task_states.RESCUING,
- {'rescue_password': None}),
- ("unrescue_instance", task_states.UNRESCUING),
- ("revert_resize", task_states.RESIZE_REVERTING,
- {'migration': migration,
- 'reservations': []}),
- ("prep_resize", task_states.RESIZE_PREP,
- {'image': {},
- 'instance_type': {},
- 'reservations': [],
- 'request_spec': {},
- 'filter_properties': {},
- 'node': None}),
- ("resize_instance", task_states.RESIZE_PREP,
- {'migration': migration,
- 'image': {},
- 'reservations': [],
- 'instance_type': {}}),
- ("pause_instance", task_states.PAUSING),
- ("unpause_instance", task_states.UNPAUSING),
- ("suspend_instance", task_states.SUSPENDING),
- ("resume_instance", task_states.RESUMING),
- ]
-
- self._stub_out_resize_network_methods()
- instance = self._create_fake_instance_obj()
- for operation in actions:
- self._test_state_revert(instance, *operation)
-
- def _ensure_quota_reservations_committed(self, instance):
- """Mock up commit of quota reservations."""
- reservations = list('fake_res')
- self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
- nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
- project_id=instance['project_id'],
- user_id=instance['user_id'])
- self.mox.ReplayAll()
- return reservations
-
- def _ensure_quota_reservations_rolledback(self, instance):
- """Mock up rollback of quota reservations."""
- reservations = list('fake_res')
- self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
- nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
- project_id=instance['project_id'],
- user_id=instance['user_id'])
- self.mox.ReplayAll()
- return reservations
-
- def test_quotas_successful_delete(self):
- instance = self._create_fake_instance_obj()
- resvs = self._ensure_quota_reservations_committed(instance)
- self.compute.terminate_instance(self.context, instance,
- bdms=[], reservations=resvs)
-
- def test_quotas_failed_delete(self):
- instance = self._create_fake_instance_obj()
-
- def fake_shutdown_instance(*args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(self.compute, '_shutdown_instance',
- fake_shutdown_instance)
-
- resvs = self._ensure_quota_reservations_rolledback(instance)
- self.assertRaises(test.TestingException,
- self.compute.terminate_instance,
- self.context, instance,
- bdms=[], reservations=resvs)
-
- def test_quotas_successful_soft_delete(self):
- instance = self._create_fake_instance_obj(
- params=dict(task_state=task_states.SOFT_DELETING))
- resvs = self._ensure_quota_reservations_committed(instance)
- self.compute.soft_delete_instance(self.context, instance,
- reservations=resvs)
-
- def test_quotas_failed_soft_delete(self):
- instance = self._create_fake_instance_obj(
- params=dict(task_state=task_states.SOFT_DELETING))
-
- def fake_soft_delete(*args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(self.compute.driver, delete_types.SOFT_DELETE,
- fake_soft_delete)
-
- resvs = self._ensure_quota_reservations_rolledback(instance)
- self.assertRaises(test.TestingException,
- self.compute.soft_delete_instance,
- self.context, instance,
- reservations=resvs)
-
- def test_quotas_destroy_of_soft_deleted_instance(self):
- instance = self._create_fake_instance_obj(
- params=dict(vm_state=vm_states.SOFT_DELETED))
- # Termination should be successful, but quota reservations
- # rolled back because the instance was in SOFT_DELETED state.
- resvs = self._ensure_quota_reservations_rolledback(instance)
- self.compute.terminate_instance(self.context, instance,
- bdms=[], reservations=resvs)
-
- def _stub_out_resize_network_methods(self):
- def fake(cls, ctxt, instance, *args, **kwargs):
- pass
-
- self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
- self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
- self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
-
- def _test_finish_resize(self, power_on):
- # Contrived test to ensure finish_resize doesn't raise anything and
- # also tests resize from ACTIVE or STOPPED state which determines
- # if the resized instance is powered on or not.
- vm_state = None
- if power_on:
- vm_state = vm_states.ACTIVE
- else:
- vm_state = vm_states.STOPPED
- params = {'vm_state': vm_state}
- instance = self._create_fake_instance_obj(params)
- image = 'fake-image'
- disk_info = 'fake-disk-info'
- instance_type = flavors.get_default_flavor()
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type,
- image={}, reservations=[], request_spec={},
- filter_properties={}, node=None)
- instance.task_state = task_states.RESIZE_MIGRATED
- instance.save()
-
- # NOTE(mriedem): make sure prep_resize set old_vm_state correctly
- sys_meta = instance.system_metadata
- self.assertIn('old_vm_state', sys_meta)
- if power_on:
- self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
- else:
- self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- orig_mig_save = migration.save
- orig_inst_save = instance.save
- network_api = self.compute.network_api
-
- self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
- self.mox.StubOutWithMock(network_api,
- 'migrate_instance_finish')
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute,
- '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(migration, 'save')
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.context, 'elevated')
-
- def _mig_save(context):
- self.assertEqual(migration.status, 'finished')
- self.assertEqual(vm_state, instance.vm_state)
- self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
- orig_mig_save()
-
- def _instance_save1():
- self.assertEqual(instance_type['id'],
- instance.instance_type_id)
- orig_inst_save()
-
- def _instance_save2(expected_task_state=None):
- self.assertEqual(task_states.RESIZE_MIGRATED,
- expected_task_state)
- self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
- orig_inst_save(expected_task_state=expected_task_state)
-
- def _instance_save3(expected_task_state=None):
- self.assertEqual(task_states.RESIZE_FINISH,
- expected_task_state)
- self.assertEqual(vm_states.RESIZED, instance.vm_state)
- self.assertIsNone(instance.task_state)
- self.assertIn('launched_at', instance.obj_what_changed())
- orig_inst_save(expected_task_state=expected_task_state)
-
- # First save to update flavor
- instance.save().WithSideEffects(_instance_save1)
-
- network_api.setup_networks_on_host(self.context, instance,
- 'fake-mini')
- network_api.migrate_instance_finish(self.context,
- mox.IsA(dict),
- mox.IsA(dict))
-
- self.compute._get_instance_nw_info(
- self.context, instance).AndReturn('fake-nwinfo1')
-
- # 2nd save to update task state
- exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
- instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
-
- self.compute._notify_about_instance_usage(
- self.context, instance, 'finish_resize.start',
- network_info='fake-nwinfo1')
-
- self.compute._get_instance_block_device_info(
- self.context, instance,
- refresh_conn_info=True).AndReturn('fake-bdminfo')
- # nova.conf sets the default flavor to m1.small and the test
- # sets the default flavor to m1.tiny so they should be different
- # which makes this a resize
- self.compute.driver.finish_migration(self.context, migration,
- instance, disk_info,
- 'fake-nwinfo1',
- image, True,
- 'fake-bdminfo', power_on)
- # Ensure instance status updates is after the migration finish
- self.context.elevated().AndReturn(self.context)
- migration.save(self.context).WithSideEffects(_mig_save)
- exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
- instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
- self.compute._notify_about_instance_usage(
- self.context, instance, 'finish_resize.end',
- network_info='fake-nwinfo1')
- # NOTE(comstud): This actually does the mox.ReplayAll()
- reservations = self._ensure_quota_reservations_committed(instance)
-
- self.compute.finish_resize(self.context,
- migration=migration,
- disk_info=disk_info, image=image, instance=instance,
- reservations=reservations)
-
- def test_finish_resize_from_active(self):
- self._test_finish_resize(power_on=True)
-
- def test_finish_resize_from_stopped(self):
- self._test_finish_resize(power_on=False)
-
- def test_finish_resize_with_volumes(self):
- """Contrived test to ensure finish_resize doesn't raise anything."""
-
- # create instance
- instance = self._create_fake_instance_obj()
-
- # create volume
- volume_id = 'fake'
- volume = {'instance_uuid': None,
- 'device_name': None,
- 'id': volume_id,
- 'attach_status': 'detached'}
- bdm = objects.BlockDeviceMapping(
- **{'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': volume_id,
- 'instance_uuid': instance['uuid'],
- 'device_name': '/dev/vdc'})
- bdm.create(self.context)
-
- # stub out volume attach
- def fake_volume_get(self, context, volume_id):
- return volume
- self.stubs.Set(cinder.API, "get", fake_volume_get)
-
- def fake_volume_check_attach(self, context, volume_id, instance):
- pass
- self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
-
- def fake_get_volume_encryption_metadata(self, context, volume_id):
- return {}
- self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
- fake_get_volume_encryption_metadata)
-
- orig_connection_data = {
- 'target_discovered': True,
- 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
- 'target_portal': '127.0.0.0.1:3260',
- 'volume_id': volume_id,
- }
- connection_info = {
- 'driver_volume_type': 'iscsi',
- 'data': orig_connection_data,
- }
-
- def fake_init_conn(self, context, volume_id, session):
- return connection_info
- self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
-
- def fake_attach(self, context, volume_id, instance_uuid, device_name,
- mode='rw'):
- volume['instance_uuid'] = instance_uuid
- volume['device_name'] = device_name
- self.stubs.Set(cinder.API, "attach", fake_attach)
-
- # stub out virt driver attach
- def fake_get_volume_connector(*args, **kwargs):
- return {}
- self.stubs.Set(self.compute.driver, 'get_volume_connector',
- fake_get_volume_connector)
-
- def fake_attach_volume(*args, **kwargs):
- pass
- self.stubs.Set(self.compute.driver, 'attach_volume',
- fake_attach_volume)
-
- # attach volume to instance
- self.compute.attach_volume(self.context, volume['id'],
- '/dev/vdc', instance, bdm=bdm)
-
- # assert volume attached correctly
- self.assertEqual(volume['device_name'], '/dev/vdc')
- disk_info = db.block_device_mapping_get_all_by_instance(
- self.context, instance.uuid)
- self.assertEqual(len(disk_info), 1)
- for bdm in disk_info:
- self.assertEqual(bdm['device_name'], volume['device_name'])
- self.assertEqual(bdm['connection_info'],
- jsonutils.dumps(connection_info))
-
- # begin resize
- instance_type = flavors.get_default_flavor()
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type,
- image={}, reservations=[], request_spec={},
- filter_properties={}, node=None)
-
- # fake out detach for prep_resize (and later terminate)
- def fake_terminate_connection(self, context, volume, connector):
- connection_info['data'] = None
- self.stubs.Set(cinder.API, "terminate_connection",
- fake_terminate_connection)
-
- self._stub_out_resize_network_methods()
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
- self.compute.resize_instance(self.context, instance=instance,
- migration=migration, image={}, reservations=[],
- instance_type=jsonutils.to_primitive(instance_type))
-
- # assert bdm is unchanged
- disk_info = db.block_device_mapping_get_all_by_instance(
- self.context, instance.uuid)
- self.assertEqual(len(disk_info), 1)
- for bdm in disk_info:
- self.assertEqual(bdm['device_name'], volume['device_name'])
- cached_connection_info = jsonutils.loads(bdm['connection_info'])
- self.assertEqual(cached_connection_info['data'],
- orig_connection_data)
- # but connection was terminated
- self.assertIsNone(connection_info['data'])
-
- # stub out virt driver finish_migration
- def fake(*args, **kwargs):
- pass
- self.stubs.Set(self.compute.driver, 'finish_migration', fake)
-
- instance.task_state = task_states.RESIZE_MIGRATED
- instance.save()
-
- reservations = self._ensure_quota_reservations_committed(instance)
-
- # new initialize connection
- new_connection_data = dict(orig_connection_data)
- new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
- new_connection_data['target_iqn'] = new_iqn
-
- def fake_init_conn_with_data(self, context, volume, session):
- connection_info['data'] = new_connection_data
- return connection_info
- self.stubs.Set(cinder.API, "initialize_connection",
- fake_init_conn_with_data)
-
- self.compute.finish_resize(self.context,
- migration=migration,
- disk_info={}, image={}, instance=instance,
- reservations=reservations)
-
- # assert volume attached correctly
- disk_info = db.block_device_mapping_get_all_by_instance(
- self.context, instance['uuid'])
- self.assertEqual(len(disk_info), 1)
- for bdm in disk_info:
- self.assertEqual(bdm['connection_info'],
- jsonutils.dumps(connection_info))
-
- # stub out detach
- def fake_detach(self, context, volume_uuid):
- volume['device_path'] = None
- volume['instance_uuid'] = None
- self.stubs.Set(cinder.API, "detach", fake_detach)
-
- # clean up
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_finish_resize_handles_error(self):
- # Make sure we don't leave the instance in RESIZE on error.
-
- def throw_up(*args, **kwargs):
- raise test.TestingException()
-
- def fake(*args, **kwargs):
- pass
-
- self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
-
- self._stub_out_resize_network_methods()
-
- old_flavor_name = 'm1.tiny'
- instance = self._create_fake_instance_obj(type_name=old_flavor_name)
- reservations = self._ensure_quota_reservations_rolledback(instance)
-
- instance_type = flavors.get_flavor_by_name('m1.small')
-
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type,
- image={}, reservations=reservations,
- request_spec={}, filter_properties={},
- node=None)
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- instance.refresh()
- instance.task_state = task_states.RESIZE_MIGRATED
- instance.save()
- self.assertRaises(test.TestingException, self.compute.finish_resize,
- self.context,
- migration=migration,
- disk_info={}, image={}, instance=instance,
- reservations=reservations)
- instance.refresh()
- self.assertEqual(vm_states.ERROR, instance.vm_state)
-
- old_flavor = flavors.get_flavor_by_name(old_flavor_name)
- self.assertEqual(old_flavor['memory_mb'], instance.memory_mb)
- self.assertEqual(old_flavor['vcpus'], instance.vcpus)
- self.assertEqual(old_flavor['root_gb'], instance.root_gb)
- self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
- self.assertEqual(old_flavor['id'], instance.instance_type_id)
- self.assertNotEqual(instance_type['id'], instance.instance_type_id)
-
- def test_save_instance_info(self):
- old_flavor_name = 'm1.tiny'
- new_flavor_name = 'm1.small'
- instance = self._create_fake_instance_obj(type_name=old_flavor_name)
- new_flavor = flavors.get_flavor_by_name(new_flavor_name)
-
- self.compute._save_instance_info(instance, new_flavor,
- instance.system_metadata)
-
- self.assertEqual(new_flavor['memory_mb'], instance.memory_mb)
- self.assertEqual(new_flavor['vcpus'], instance.vcpus)
- self.assertEqual(new_flavor['root_gb'], instance.root_gb)
- self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb)
- self.assertEqual(new_flavor['id'], instance.instance_type_id)
- self.assertEqual(new_flavor['id'], instance.instance_type_id)
-
- def test_rebuild_instance_notification(self):
- # Ensure notifications on instance migrate/resize.
- old_time = datetime.datetime(2012, 4, 1)
- cur_time = datetime.datetime(2012, 12, 21, 12, 21)
- timeutils.set_time_override(old_time)
- inst_ref = self._create_fake_instance_obj()
- self.compute.run_instance(self.context, inst_ref, {}, {}, None, None,
- None, True, None, False)
- timeutils.set_time_override(cur_time)
-
- fake_notifier.NOTIFICATIONS = []
- instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
- orig_sys_metadata = db.instance_system_metadata_get(self.context,
- inst_ref['uuid'])
- image_ref = instance["image_ref"]
- new_image_ref = image_ref + '-new_image_ref'
- db.instance_update(self.context, inst_ref['uuid'],
- {'image_ref': new_image_ref})
-
- password = "new_password"
-
- inst_ref.task_state = task_states.REBUILDING
- inst_ref.save()
- self.compute.rebuild_instance(self.context,
- inst_ref,
- image_ref, new_image_ref,
- injected_files=[],
- new_pass=password,
- orig_sys_metadata=orig_sys_metadata,
- bdms=[], recreate=False,
- on_shared_storage=False)
-
- inst_ref.refresh()
-
- image_ref_url = glance.generate_image_url(image_ref)
- new_image_ref_url = glance.generate_image_url(new_image_ref)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.exists')
- self.assertEqual(msg.payload['image_ref_url'], image_ref_url)
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.rebuild.start')
- self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url)
- self.assertEqual(msg.payload['image_name'], 'fake_name')
- msg = fake_notifier.NOTIFICATIONS[2]
- self.assertEqual(msg.event_type,
- 'compute.instance.rebuild.end')
- self.assertEqual(msg.priority, 'INFO')
- payload = msg.payload
- self.assertEqual(payload['image_name'], 'fake_name')
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], inst_ref['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
- self.assertEqual(payload['image_ref_url'], new_image_ref_url)
- self.compute.terminate_instance(self.context, inst_ref, [], [])
-
- def test_finish_resize_instance_notification(self):
- # Ensure notifications on instance migrate/resize.
- old_time = datetime.datetime(2012, 4, 1)
- cur_time = datetime.datetime(2012, 12, 21, 12, 21)
- timeutils.set_time_override(old_time)
- instance = self._create_fake_instance_obj()
- new_type = flavors.get_flavor_by_name('m1.small')
- new_type = jsonutils.to_primitive(new_type)
- new_type_id = new_type['id']
- flavor_id = new_type['flavorid']
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
-
- instance.host = 'foo'
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
-
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=new_type, image={}, reservations=[],
- request_spec={}, filter_properties={}, node=None)
-
- self._stub_out_resize_network_methods()
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
- self.compute.resize_instance(self.context, instance=instance,
- migration=migration, image={}, instance_type=new_type,
- reservations=[])
- timeutils.set_time_override(cur_time)
- fake_notifier.NOTIFICATIONS = []
-
- self.compute.finish_resize(self.context,
- migration=migration, reservations=[],
- disk_info={}, image={}, instance=instance)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.finish_resize.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.finish_resize.end')
- self.assertEqual(msg.priority, 'INFO')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance.uuid)
- self.assertEqual(payload['instance_type'], 'm1.small')
- self.assertEqual(str(payload['instance_type_id']), str(new_type_id))
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resize_instance_notification(self):
- # Ensure notifications on instance migrate/resize.
- old_time = datetime.datetime(2012, 4, 1)
- cur_time = datetime.datetime(2012, 12, 21, 12, 21)
- timeutils.set_time_override(old_time)
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
- timeutils.set_time_override(cur_time)
- fake_notifier.NOTIFICATIONS = []
-
- instance.host = 'foo'
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
-
- instance_type = flavors.get_default_flavor()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type, image={}, reservations=[],
- request_spec={}, filter_properties={}, node=None)
- db.migration_get_by_instance_and_status(self.context.elevated(),
- instance.uuid,
- 'pre-migrating')
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.exists')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.resize.prep.start')
- msg = fake_notifier.NOTIFICATIONS[2]
- self.assertEqual(msg.event_type,
- 'compute.instance.resize.prep.end')
- self.assertEqual(msg.priority, 'INFO')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance.uuid)
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- self.assertIn('display_name', payload)
- self.assertIn('created_at', payload)
- self.assertIn('launched_at', payload)
- image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_prep_resize_instance_migration_error_on_same_host(self):
- """Ensure prep_resize raise a migration error if destination is set on
- the same source host and allow_resize_to_same_host is false
- """
- self.flags(host="foo", allow_resize_to_same_host=False)
-
- instance = self._create_fake_instance_obj()
-
- reservations = self._ensure_quota_reservations_rolledback(instance)
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = self.compute.host
- instance.save()
- instance_type = flavors.get_default_flavor()
-
- self.assertRaises(exception.MigrationError, self.compute.prep_resize,
- self.context, instance=instance,
- instance_type=instance_type, image={},
- reservations=reservations, request_spec={},
- filter_properties={}, node=None)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_prep_resize_instance_migration_error_on_none_host(self):
- """Ensure prep_resize raises a migration error if destination host is
- not defined
- """
- instance = self._create_fake_instance_obj()
-
- reservations = self._ensure_quota_reservations_rolledback(instance)
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = None
- instance.save()
- instance_type = flavors.get_default_flavor()
-
- self.assertRaises(exception.MigrationError, self.compute.prep_resize,
- self.context, instance=instance,
- instance_type=instance_type, image={},
- reservations=reservations, request_spec={},
- filter_properties={}, node=None)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resize_instance_driver_error(self):
- # Ensure instance status set to Error on resize error.
-
- def throw_up(*args, **kwargs):
- raise test.TestingException()
-
- self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
- throw_up)
-
- instance = self._create_fake_instance_obj()
- instance_type = flavors.get_default_flavor()
-
- reservations = self._ensure_quota_reservations_rolledback(instance)
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = 'foo'
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type, image={},
- reservations=reservations, request_spec={},
- filter_properties={}, node=None)
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- # verify
- self.assertRaises(test.TestingException, self.compute.resize_instance,
- self.context, instance=instance,
- migration=migration, image={},
- reservations=reservations,
- instance_type=jsonutils.to_primitive(instance_type))
- # NOTE(comstud): error path doesn't use objects, so our object
- # is not updated. Refresh and compare against the DB.
- instance.refresh()
- self.assertEqual(instance.vm_state, vm_states.ERROR)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resize_instance_driver_rollback(self):
- # Ensure instance status set to Running after rollback.
-
- def throw_up(*args, **kwargs):
- raise exception.InstanceFaultRollback(test.TestingException())
-
- self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
- throw_up)
-
- instance = self._create_fake_instance_obj()
- instance_type = flavors.get_default_flavor()
- reservations = self._ensure_quota_reservations_rolledback(instance)
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = 'foo'
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type, image={},
- reservations=reservations, request_spec={},
- filter_properties={}, node=None)
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- self.assertRaises(test.TestingException, self.compute.resize_instance,
- self.context, instance=instance,
- migration=migration, image={},
- reservations=reservations,
- instance_type=jsonutils.to_primitive(instance_type))
- # NOTE(comstud): error path doesn't use objects, so our object
- # is not updated. Refresh and compare against the DB.
- instance.refresh()
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
- self.assertIsNone(instance.task_state)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def _test_resize_instance(self, clean_shutdown=True):
- # Ensure instance can be migrated/resized.
- instance = self._create_fake_instance_obj()
- instance_type = flavors.get_default_flavor()
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = 'foo'
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type, image={}, reservations=[],
- request_spec={}, filter_properties={}, node=None)
-
- # verify 'old_vm_state' was set on system_metadata
- instance.refresh()
- sys_meta = instance.system_metadata
- self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
-
- self._stub_out_resize_network_methods()
-
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- with contextlib.nested(
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid', return_value='fake_bdms'),
- mock.patch.object(
- self.compute, '_get_instance_block_device_info',
- return_value='fake_bdinfo'),
- mock.patch.object(self.compute, '_terminate_volume_connections'),
- mock.patch.object(self.compute, '_get_power_off_values',
- return_value=(1, 2))
- ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
- mock_terminate_vol_conn, mock_get_power_off_values):
- self.compute.resize_instance(self.context, instance=instance,
- migration=migration, image={}, reservations=[],
- instance_type=jsonutils.to_primitive(instance_type),
- clean_shutdown=clean_shutdown)
- mock_get_instance_vol_bdinfo.assert_called_once_with(
- self.context, instance, bdms='fake_bdms')
- mock_terminate_vol_conn.assert_called_once_with(self.context,
- instance, 'fake_bdms')
- mock_get_power_off_values.assert_caleld_once_with(self.context,
- instance, clean_shutdown)
- self.assertEqual(migration.dest_compute, instance.host)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resize_instance(self):
- self._test_resize_instance()
-
- def test_resize_instance_forced_shutdown(self):
- self._test_resize_instance(clean_shutdown=False)
-
- def _test_confirm_resize(self, power_on):
- # Common test case method for confirm_resize
- def fake(*args, **kwargs):
- pass
-
- def fake_confirm_migration_driver(*args, **kwargs):
- # Confirm the instance uses the new type in finish_resize
- inst = args[1]
- sys_meta = inst['system_metadata']
- self.assertEqual(sys_meta['instance_type_flavorid'], '3')
-
- old_vm_state = None
- p_state = None
- if power_on:
- old_vm_state = vm_states.ACTIVE
- p_state = power_state.RUNNING
- else:
- old_vm_state = vm_states.STOPPED
- p_state = power_state.SHUTDOWN
- params = {'vm_state': old_vm_state, 'power_state': p_state}
- instance = self._create_fake_instance_obj(params)
-
- self.flags(allow_resize_to_same_host=True)
- self.stubs.Set(self.compute.driver, 'finish_migration', fake)
- self.stubs.Set(self.compute.driver, 'confirm_migration',
- fake_confirm_migration_driver)
-
- self._stub_out_resize_network_methods()
-
- reservations = self._ensure_quota_reservations_committed(instance)
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
-
- # Confirm the instance size before the resize starts
- instance.refresh()
- instance_type_ref = db.flavor_get(self.context,
- instance.instance_type_id)
- self.assertEqual(instance_type_ref['flavorid'], '1')
-
- instance.vm_state = old_vm_state
- instance.power_state = p_state
- instance.save()
-
- new_instance_type_ref = db.flavor_get_by_flavor_id(
- self.context, 3)
- new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
- self.compute.prep_resize(self.context,
- instance=instance,
- instance_type=new_instance_type_p,
- image={}, reservations=reservations, request_spec={},
- filter_properties={}, node=None)
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
- sys_meta = instance.system_metadata
- self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- self.compute.resize_instance(self.context, instance=instance,
- migration=migration,
- image={},
- reservations=[],
- instance_type=new_instance_type_p)
- self.compute.finish_resize(self.context,
- migration=migration, reservations=[],
- disk_info={}, image={}, instance=instance)
-
- # Prove that the instance size is now the new size
- instance_type_ref = db.flavor_get(self.context,
- instance.instance_type_id)
- self.assertEqual(instance_type_ref['flavorid'], '3')
-
- # Finally, confirm the resize and verify the new flavor is applied
- instance.task_state = None
- instance.save()
- self.compute.confirm_resize(self.context, instance=instance,
- reservations=reservations,
- migration=migration)
-
- instance.refresh()
-
- instance_type_ref = db.flavor_get(self.context,
- instance.instance_type_id)
- self.assertEqual(instance_type_ref['flavorid'], '3')
- self.assertEqual('fake-mini', migration.source_compute)
- self.assertEqual(old_vm_state, instance.vm_state)
- self.assertIsNone(instance.task_state)
- self.assertEqual(p_state, instance.power_state)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_confirm_resize_from_active(self):
- self._test_confirm_resize(power_on=True)
-
- def test_confirm_resize_from_stopped(self):
- self._test_confirm_resize(power_on=False)
-
- def _test_finish_revert_resize(self, power_on,
- remove_old_vm_state=False):
- """Convenience method that does most of the work for the
- test_finish_revert_resize tests.
- :param power_on -- True if testing resize from ACTIVE state, False if
- testing resize from STOPPED state.
- :param remove_old_vm_state -- True if testing a case where the
- 'old_vm_state' system_metadata is not present when the
- finish_revert_resize method is called.
- """
- def fake(*args, **kwargs):
- pass
-
- def fake_finish_revert_migration_driver(*args, **kwargs):
- # Confirm the instance uses the old type in finish_revert_resize
- inst = args[1]
- sys_meta = inst.system_metadata
- self.assertEqual(sys_meta['instance_type_flavorid'], '1')
-
- old_vm_state = None
- if power_on:
- old_vm_state = vm_states.ACTIVE
- else:
- old_vm_state = vm_states.STOPPED
- params = {'vm_state': old_vm_state}
- instance = self._create_fake_instance_obj(params)
-
- self.stubs.Set(self.compute.driver, 'finish_migration', fake)
- self.stubs.Set(self.compute.driver, 'finish_revert_migration',
- fake_finish_revert_migration_driver)
-
- self._stub_out_resize_network_methods()
-
- reservations = self._ensure_quota_reservations_committed(instance)
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
-
- instance.refresh()
- instance_type_ref = db.flavor_get(self.context,
- instance.instance_type_id)
- self.assertEqual(instance_type_ref['flavorid'], '1')
-
- old_vm_state = instance['vm_state']
-
- instance.host = 'foo'
- instance.vm_state = old_vm_state
- instance.save()
-
- new_instance_type_ref = db.flavor_get_by_flavor_id(
- self.context, 3)
- new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
- self.compute.prep_resize(self.context,
- instance=instance,
- instance_type=new_instance_type_p,
- image={}, reservations=reservations, request_spec={},
- filter_properties={}, node=None)
-
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
-
- # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
- sys_meta = instance.system_metadata
- self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- self.compute.resize_instance(self.context, instance=instance,
- migration=migration,
- image={},
- reservations=[],
- instance_type=new_instance_type_p)
- self.compute.finish_resize(self.context,
- migration=migration, reservations=[],
- disk_info={}, image={}, instance=instance)
-
- # Prove that the instance size is now the new size
- instance_type_ref = db.flavor_get(self.context,
- instance['instance_type_id'])
- self.assertEqual(instance_type_ref['flavorid'], '3')
-
- instance.task_state = task_states.RESIZE_REVERTING
- instance.save()
-
- self.compute.revert_resize(self.context,
- migration=migration, instance=instance,
- reservations=reservations)
-
- instance.refresh()
- if remove_old_vm_state:
- # need to wipe out the old_vm_state from system_metadata
- # before calling finish_revert_resize
- sys_meta = instance.system_metadata
- sys_meta.pop('old_vm_state')
- # Have to reset for save() to work
- instance.system_metadata = sys_meta
- instance.save()
-
- self.compute.finish_revert_resize(self.context,
- migration=migration,
- instance=instance, reservations=reservations)
-
- self.assertIsNone(instance.task_state)
-
- instance_type_ref = db.flavor_get(self.context,
- instance['instance_type_id'])
- self.assertEqual(instance_type_ref['flavorid'], '1')
- self.assertEqual(instance.host, migration.source_compute)
- if remove_old_vm_state:
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- else:
- self.assertEqual(old_vm_state, instance.vm_state)
-
- def test_finish_revert_resize_from_active(self):
- self._test_finish_revert_resize(power_on=True)
-
- def test_finish_revert_resize_from_stopped(self):
- self._test_finish_revert_resize(power_on=False)
-
- def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
- # in this case we resize from STOPPED but end up with ACTIVE
- # because the old_vm_state value is not present in
- # finish_revert_resize
- self._test_finish_revert_resize(power_on=False,
- remove_old_vm_state=True)
-
- def _test_cleanup_stored_instance_types(self, old, new, revert=False):
- instance = self._create_fake_instance_obj()
- migration = dict(old_instance_type_id=old,
- new_instance_type_id=new)
- instance.system_metadata = dict(instance_type_id=old)
- sys_meta = dict(instance.system_metadata)
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
- self.mox.StubOutWithMock(flavors, 'save_flavor_info')
- if revert:
- flavors.extract_flavor(instance, 'old_').AndReturn(
- {'instance_type_id': old})
- flavors.extract_flavor(instance).AndReturn(
- {'instance_type_id': new})
- flavors.save_flavor_info(
- sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
- else:
- flavors.extract_flavor(instance).AndReturn(
- {'instance_type_id': new})
- flavors.extract_flavor(instance, 'old_').AndReturn(
- {'instance_type_id': old})
- flavors.delete_flavor_info(
- sys_meta, 'old_').AndReturn(sys_meta)
- flavors.delete_flavor_info(
- sys_meta, 'new_').AndReturn(sys_meta)
-
- self.mox.ReplayAll()
- res = self.compute._cleanup_stored_instance_types(migration, instance,
- revert)
- self.assertEqual(res,
- (sys_meta,
- {'instance_type_id': revert and old or new},
- {'instance_type_id': revert and new or old}))
-
- def test_cleanup_stored_instance_types_for_resize(self):
- self._test_cleanup_stored_instance_types('1', '2')
-
- def test_cleanup_stored_instance_types_for_resize_with_update(self):
- self._test_cleanup_stored_instance_types('1', '2', True)
-
- def test_cleanup_stored_instance_types_for_migration(self):
- self._test_cleanup_stored_instance_types('1', '1')
-
- def test_cleanup_stored_instance_types_for_migration_with_update(self):
- self._test_cleanup_stored_instance_types('1', '1', True)
-
- def test_get_by_flavor_id(self):
- flavor_type = flavors.get_flavor_by_flavor_id(1)
- self.assertEqual(flavor_type['name'], 'm1.tiny')
-
- def test_resize_same_source_fails(self):
- """Ensure instance fails to migrate when source and destination are
- the same host.
- """
- instance = self._create_fake_instance_obj()
- reservations = self._ensure_quota_reservations_rolledback(instance)
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.refresh()
- instance_type = flavors.get_default_flavor()
- self.assertRaises(exception.MigrationError, self.compute.prep_resize,
- self.context, instance=instance,
- instance_type=instance_type, image={},
- reservations=reservations, request_spec={},
- filter_properties={}, node=None)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_resize_instance_handles_migration_error(self):
- # Ensure vm_state is ERROR when error occurs.
- def raise_migration_failure(*args):
- raise test.TestingException()
- self.stubs.Set(self.compute.driver,
- 'migrate_disk_and_power_off',
- raise_migration_failure)
-
- instance = self._create_fake_instance_obj()
- reservations = self._ensure_quota_reservations_rolledback(instance)
-
- instance_type = flavors.get_default_flavor()
-
- instance_p = obj_base.obj_to_primitive(instance)
- self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
- None, True, None, False)
- instance.host = 'foo'
- instance.save()
- self.compute.prep_resize(self.context, instance=instance,
- instance_type=instance_type,
- image={}, reservations=reservations,
- request_spec={}, filter_properties={},
- node=None)
- migration = objects.Migration.get_by_instance_and_status(
- self.context.elevated(),
- instance.uuid, 'pre-migrating')
- instance.task_state = task_states.RESIZE_PREP
- instance.save()
- self.assertRaises(test.TestingException, self.compute.resize_instance,
- self.context, instance=instance,
- migration=migration, image={},
- reservations=reservations,
- instance_type=jsonutils.to_primitive(instance_type))
- # NOTE(comstud): error path doesn't use objects, so our object
- # is not updated. Refresh and compare against the DB.
- instance.refresh()
- self.assertEqual(instance.vm_state, vm_states.ERROR)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_pre_live_migration_instance_has_no_fixed_ip(self):
- # Confirm that no exception is raised if there is no fixed ip on
- # pre_live_migration
- instance = self._create_fake_instance_obj()
- c = context.get_admin_context()
-
- self.mox.ReplayAll()
- self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
- {'block_device_mapping': []},
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- def test_pre_live_migration_works_correctly(self):
- # Confirm setup_compute_volume is called when volume is mounted.
- def stupid(*args, **kwargs):
- return fake_network.fake_get_instance_nw_info(self.stubs)
- self.stubs.Set(nova.compute.manager.ComputeManager,
- '_get_instance_nw_info', stupid)
-
- # creating instance testdata
- instance = self._create_fake_instance_obj({'host': 'dummy'})
- c = context.get_admin_context()
- nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- # creating mocks
- self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
- self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
- {'swap': None, 'ephemerals': [],
- 'block_device_mapping': []},
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.StubOutWithMock(self.compute.driver,
- 'ensure_filtering_rules_for_instance')
- self.compute.driver.ensure_filtering_rules_for_instance(
- mox.IsA(instance), nw_info)
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.compute.network_api.setup_networks_on_host(c, instance,
- self.compute.host)
-
- fake_notifier.NOTIFICATIONS = []
- # start test
- self.mox.ReplayAll()
- migrate_data = {'is_shared_instance_path': False}
- ret = self.compute.pre_live_migration(c, instance=instance,
- block_migration=False, disk=None,
- migrate_data=migrate_data)
- self.assertIsNone(ret)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.pre.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.pre.end')
-
- # cleanup
- db.instance_destroy(c, instance['uuid'])
-
- def test_live_migration_exception_rolls_back(self):
- # Confirm exception when pre_live_migration fails.
- c = context.get_admin_context()
-
- instance = self._create_fake_instance_obj(
- {'host': 'src_host',
- 'task_state': task_states.MIGRATING})
- updated_instance = self._create_fake_instance_obj(
- {'host': 'fake-dest-host'})
- dest_host = updated_instance['host']
- fake_bdms = [
- objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'vol1-id', 'source_type': 'volume',
- 'destination_type': 'volume'})),
- objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'vol2-id', 'source_type': 'volume',
- 'destination_type': 'volume'}))
- ]
-
- # creating mocks
- self.mox.StubOutWithMock(self.compute.driver,
- 'get_instance_disk_info')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'pre_live_migration')
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'remove_volume_connection')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'rollback_live_migration_at_destination')
-
- block_device_info = {
- 'swap': None, 'ephemerals': [], 'block_device_mapping': []}
- self.compute.driver.get_instance_disk_info(
- instance.name,
- block_device_info=block_device_info).AndReturn('fake_disk')
- self.compute.compute_rpcapi.pre_live_migration(c,
- instance, True, 'fake_disk', dest_host,
- {}).AndRaise(test.TestingException())
-
- self.compute.network_api.setup_networks_on_host(c,
- instance, self.compute.host)
- objects.BlockDeviceMappingList.get_by_instance_uuid(c,
- instance.uuid).MultipleTimes().AndReturn(fake_bdms)
- self.compute.compute_rpcapi.remove_volume_connection(
- c, instance, 'vol1-id', dest_host)
- self.compute.compute_rpcapi.remove_volume_connection(
- c, instance, 'vol2-id', dest_host)
- self.compute.compute_rpcapi.rollback_live_migration_at_destination(
- c, instance, dest_host, destroy_disks=True, migrate_data={})
-
- # start test
- self.mox.ReplayAll()
- self.assertRaises(test.TestingException,
- self.compute.live_migration,
- c, dest=dest_host, block_migration=True,
- instance=instance, migrate_data={})
- instance.refresh()
- self.assertEqual('src_host', instance.host)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- self.assertIsNone(instance.task_state)
-
- def test_live_migration_works_correctly(self):
- # Confirm live_migration() works as expected correctly.
- # creating instance testdata
- c = context.get_admin_context()
- instance = self._create_fake_instance_obj()
- instance.host = self.compute.host
- dest = 'desthost'
-
- migrate_data = {'is_shared_instance_path': False}
-
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'pre_live_migration')
- self.compute.compute_rpcapi.pre_live_migration(
- c, instance, False, None, dest, migrate_data)
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
- migration = {'source_compute': instance['host'], 'dest_compute': dest}
- self.compute.network_api.migrate_instance_start(c, instance,
- migration)
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'post_live_migration_at_destination')
- self.compute.compute_rpcapi.post_live_migration_at_destination(
- c, instance, False, dest)
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.compute.network_api.setup_networks_on_host(c, instance,
- instance['host'],
- teardown=True)
- self.mox.StubOutWithMock(self.compute.instance_events,
- 'clear_events_for_instance')
- self.compute.instance_events.clear_events_for_instance(
- mox.IgnoreArg())
-
- # start test
- self.mox.ReplayAll()
-
- ret = self.compute.live_migration(c, dest=dest,
- instance=instance,
- block_migration=False,
- migrate_data=migrate_data)
- self.assertIsNone(ret)
-
- # cleanup
- instance.destroy(c)
-
- def test_post_live_migration_no_shared_storage_working_correctly(self):
- """Confirm post_live_migration() works correctly as expected
- for non shared storage migration.
- """
- # Create stubs
- result = {}
- # No share storage live migration don't need to destroy at source
- # server because instance has been migrated to destination, but a
- # cleanup for block device and network are needed.
-
- def fakecleanup(*args, **kwargs):
- result['cleanup'] = True
-
- self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup)
- dest = 'desthost'
- srchost = self.compute.host
-
- # creating testdata
- c = context.get_admin_context()
- instance = self._create_fake_instance_obj({
- 'host': srchost,
- 'state_description': 'migrating',
- 'state': power_state.PAUSED,
- 'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED})
-
- # creating mocks
- self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
- self.compute.driver.unfilter_instance(instance, [])
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_start')
- migration = {'source_compute': srchost, 'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, instance,
- migration)
-
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'post_live_migration_at_destination')
- self.compute.compute_rpcapi.post_live_migration_at_destination(
- c, instance, False, dest)
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.compute.network_api.setup_networks_on_host(c, instance,
- self.compute.host,
- teardown=True)
- self.mox.StubOutWithMock(self.compute.instance_events,
- 'clear_events_for_instance')
- self.compute.instance_events.clear_events_for_instance(
- mox.IgnoreArg())
-
- # start test
- self.mox.ReplayAll()
- migrate_data = {'is_shared_instance_path': False}
- self.compute._post_live_migration(c, instance, dest,
- migrate_data=migrate_data)
- self.assertIn('cleanup', result)
- self.assertEqual(result['cleanup'], True)
-
- def test_post_live_migration_working_correctly(self):
- # Confirm post_live_migration() works as expected correctly.
- dest = 'desthost'
- srchost = self.compute.host
-
- # creating testdata
- c = context.get_admin_context()
- instance = self._create_fake_instance_obj({
- 'host': srchost,
- 'state_description': 'migrating',
- 'state': power_state.PAUSED})
-
- instance.update({'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED})
- instance.save(c)
-
- # creating mocks
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'post_live_migration'),
- mock.patch.object(self.compute.driver, 'unfilter_instance'),
- mock.patch.object(self.compute.network_api,
- 'migrate_instance_start'),
- mock.patch.object(self.compute.compute_rpcapi,
- 'post_live_migration_at_destination'),
- mock.patch.object(self.compute.driver,
- 'post_live_migration_at_source'),
- mock.patch.object(self.compute.network_api,
- 'setup_networks_on_host'),
- mock.patch.object(self.compute.instance_events,
- 'clear_events_for_instance'),
- mock.patch.object(self.compute, 'update_available_resource')
- ) as (
- post_live_migration, unfilter_instance,
- migrate_instance_start, post_live_migration_at_destination,
- post_live_migration_at_source, setup_networks_on_host,
- clear_events, update_available_resource
- ):
- self.compute._post_live_migration(c, instance, dest)
-
- post_live_migration.assert_has_calls([
- mock.call(c, instance, {'swap': None, 'ephemerals': [],
- 'block_device_mapping': []}, None)])
- unfilter_instance.assert_has_calls([mock.call(instance, [])])
- migration = {'source_compute': srchost,
- 'dest_compute': dest, }
- migrate_instance_start.assert_has_calls([
- mock.call(c, instance, migration)])
- post_live_migration_at_destination.assert_has_calls([
- mock.call(c, instance, False, dest)])
- post_live_migration_at_source.assert_has_calls(
- [mock.call(c, instance, [])])
- setup_networks_on_host.assert_has_calls([
- mock.call(c, instance, self.compute.host, teardown=True)])
- clear_events.assert_called_once_with(instance)
- update_available_resource.assert_has_calls([mock.call(c)])
-
- def test_post_live_migration_terminate_volume_connections(self):
- c = context.get_admin_context()
- instance = self._create_fake_instance_obj({
- 'host': self.compute.host,
- 'state_description': 'migrating',
- 'state': power_state.PAUSED})
- instance.update({'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED})
- instance.save(c)
-
- bdms = block_device_obj.block_device_make_list(c,
- [fake_block_device.FakeDbBlockDeviceDict({
- 'source_type': 'blank', 'guest_format': None,
- 'destination_type': 'local'}),
- fake_block_device.FakeDbBlockDeviceDict({
- 'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id'}),
- ])
-
- with contextlib.nested(
- mock.patch.object(self.compute.network_api,
- 'migrate_instance_start'),
- mock.patch.object(self.compute.compute_rpcapi,
- 'post_live_migration_at_destination'),
- mock.patch.object(self.compute.network_api,
- 'setup_networks_on_host'),
- mock.patch.object(self.compute.instance_events,
- 'clear_events_for_instance'),
- mock.patch.object(self.compute,
- '_get_instance_block_device_info'),
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid'),
- mock.patch.object(self.compute.driver, 'get_volume_connector'),
- mock.patch.object(cinder.API, 'terminate_connection')
- ) as (
- migrate_instance_start, post_live_migration_at_destination,
- setup_networks_on_host, clear_events_for_instance,
- get_instance_volume_block_device_info, get_by_instance_uuid,
- get_volume_connector, terminate_connection
- ):
- get_by_instance_uuid.return_value = bdms
- get_volume_connector.return_value = 'fake-connector'
-
- self.compute._post_live_migration(c, instance, 'dest_host')
-
- terminate_connection.assert_called_once_with(
- c, 'fake-volume-id', 'fake-connector')
-
- def _begin_post_live_migration_at_destination(self):
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.compute, '_get_compute_info')
-
- params = {'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED, }
- self.instance = self._create_fake_instance_obj(params)
-
- self.admin_ctxt = context.get_admin_context()
- self.instance = objects.Instance._from_db_object(self.context,
- objects.Instance(),
- db.instance_get_by_uuid(self.admin_ctxt, self.instance['uuid']))
-
- self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
- self.instance,
- self.compute.host)
- migration = {'source_compute': self.instance['host'],
- 'dest_compute': self.compute.host, }
- self.compute.network_api.migrate_instance_finish(
- self.admin_ctxt, self.instance, migration)
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
- self.instance,
- fake_net_info,
- False,
- fake_block_dev_info)
- self.compute._get_power_state(self.admin_ctxt,
- self.instance).AndReturn(10001)
-
- def _finish_post_live_migration_at_destination(self):
- self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
- mox.IgnoreArg(), self.compute.host)
-
- fake_notifier.NOTIFICATIONS = []
- self.mox.ReplayAll()
-
- self.compute.post_live_migration_at_destination(self.admin_ctxt,
- self.instance, False)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.post.dest.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.post.dest.end')
-
- return objects.Instance.get_by_uuid(self.admin_ctxt,
- self.instance['uuid'])
-
- def test_post_live_migration_at_destination_with_compute_info(self):
- """The instance's node property should be updated correctly."""
- self._begin_post_live_migration_at_destination()
- hypervisor_hostname = 'fake_hypervisor_hostname'
- fake_compute_info = objects.ComputeNode(
- hypervisor_hostname=hypervisor_hostname)
- self.compute._get_compute_info(mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- fake_compute_info)
- updated = self._finish_post_live_migration_at_destination()
- self.assertEqual(updated['node'], hypervisor_hostname)
-
- def test_post_live_migration_at_destination_without_compute_info(self):
- """The instance's node property should be set to None if we fail to
- get compute_info.
- """
- self._begin_post_live_migration_at_destination()
- self.compute._get_compute_info(mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(
- exception.NotFound())
- updated = self._finish_post_live_migration_at_destination()
- self.assertIsNone(updated['node'])
-
- def test_rollback_live_migration_at_destination_correctly(self):
- # creating instance testdata
- c = context.get_admin_context()
- instance = self._create_fake_instance_obj({'host': 'dummy'})
-
- fake_notifier.NOTIFICATIONS = []
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.compute.network_api.setup_networks_on_host(c, instance,
- self.compute.host,
- teardown=True)
- self.mox.StubOutWithMock(self.compute.driver,
- 'rollback_live_migration_at_destination')
- self.compute.driver.rollback_live_migration_at_destination(c,
- instance, [], {'swap': None, 'ephemerals': [],
- 'block_device_mapping': []},
- destroy_disks=True, migrate_data=None)
-
- # start test
- self.mox.ReplayAll()
- ret = self.compute.rollback_live_migration_at_destination(c,
- instance=instance)
- self.assertIsNone(ret)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.rollback.dest.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.rollback.dest.end')
-
- def test_run_kill_vm(self):
- # Detect when a vm is terminated behind the scenes.
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- instances = db.instance_get_all(self.context)
- LOG.info("Running instances: %s", instances)
- self.assertEqual(len(instances), 1)
-
- instance_name = instances[0]['name']
- self.compute.driver.test_remove_vm(instance_name)
-
- # Force the compute manager to do its periodic poll
- ctxt = context.get_admin_context()
- self.compute._sync_power_states(ctxt)
-
- instances = db.instance_get_all(self.context)
- LOG.info("After force-killing instances: %s", instances)
- self.assertEqual(len(instances), 1)
- self.assertIsNone(instances[0]['task_state'])
-
- def _fill_fault(self, values):
- extra = dict([(x, None) for x in ['created_at',
- 'deleted_at',
- 'updated_at',
- 'deleted']])
- extra['id'] = 1
- extra['details'] = ''
- extra.update(values)
- return extra
-
- def test_add_instance_fault(self):
- instance = self._create_fake_instance()
- exc_info = None
-
- def fake_db_fault_create(ctxt, values):
- self.assertIn('raise NotImplementedError', values['details'])
- del values['details']
-
- expected = {
- 'code': 500,
- 'message': 'test',
- 'instance_uuid': instance['uuid'],
- 'host': self.compute.host
- }
- self.assertEqual(expected, values)
- return self._fill_fault(expected)
-
- try:
- raise NotImplementedError('test')
- except NotImplementedError:
- exc_info = sys.exc_info()
-
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
-
- ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt,
- instance,
- NotImplementedError('test'),
- exc_info)
-
- def test_add_instance_fault_with_remote_error(self):
- instance = self._create_fake_instance()
- exc_info = None
-
- def fake_db_fault_create(ctxt, values):
- self.assertIn('raise messaging.RemoteError', values['details'])
- del values['details']
-
- expected = {
- 'code': 500,
- 'instance_uuid': instance['uuid'],
- 'message': 'Remote error: test My Test Message\nNone.',
- 'host': self.compute.host
- }
- self.assertEqual(expected, values)
- return self._fill_fault(expected)
-
- try:
- raise messaging.RemoteError('test', 'My Test Message')
- except messaging.RemoteError as exc:
- exc_info = sys.exc_info()
-
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
-
- ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt,
- instance, exc, exc_info)
-
- def test_add_instance_fault_user_error(self):
- instance = self._create_fake_instance()
- exc_info = None
-
- def fake_db_fault_create(ctxt, values):
-
- expected = {
- 'code': 400,
- 'message': 'fake details',
- 'details': '',
- 'instance_uuid': instance['uuid'],
- 'host': self.compute.host
- }
- self.assertEqual(expected, values)
- return self._fill_fault(expected)
-
- user_exc = exception.Invalid('fake details', code=400)
-
- try:
- raise user_exc
- except exception.Invalid:
- exc_info = sys.exc_info()
-
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
-
- ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt,
- instance, user_exc, exc_info)
-
- def test_add_instance_fault_no_exc_info(self):
- instance = self._create_fake_instance()
-
- def fake_db_fault_create(ctxt, values):
- expected = {
- 'code': 500,
- 'message': 'test',
- 'details': '',
- 'instance_uuid': instance['uuid'],
- 'host': self.compute.host
- }
- self.assertEqual(expected, values)
- return self._fill_fault(expected)
-
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
-
- ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt,
- instance,
- NotImplementedError('test'))
-
- def test_add_instance_fault_long_message(self):
- instance = self._create_fake_instance()
-
- message = 300 * 'a'
-
- def fake_db_fault_create(ctxt, values):
- expected = {
- 'code': 500,
- 'message': message[:255],
- 'details': '',
- 'instance_uuid': instance['uuid'],
- 'host': self.compute.host
- }
- self.assertEqual(expected, values)
- return self._fill_fault(expected)
-
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
-
- ctxt = context.get_admin_context()
- compute_utils.add_instance_fault_from_exc(ctxt,
- instance,
- NotImplementedError(message))
-
- def _test_cleanup_running(self, action):
- admin_context = context.get_admin_context()
- deleted_at = (timeutils.utcnow() -
- datetime.timedelta(hours=1, minutes=5))
- instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at,
- "deleted": True})
- instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at,
- "deleted": True})
-
- self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(
- admin_context, {'deleted': True,
- 'soft_deleted': False,
- 'host': self.compute.host}).AndReturn([instance1,
- instance2])
- self.flags(running_deleted_instance_timeout=3600,
- running_deleted_instance_action=action)
-
- return admin_context, instance1, instance2
-
- def test_cleanup_running_deleted_instances_unrecognized_value(self):
- admin_context = context.get_admin_context()
- deleted_at = (timeutils.utcnow() -
- datetime.timedelta(hours=1, minutes=5))
- instance = self._create_fake_instance_obj({"deleted_at": deleted_at,
- "deleted": True})
- self.flags(running_deleted_instance_action='foo-action')
-
- with mock.patch.object(
- self.compute, '_get_instances_on_driver',
- return_value=[instance]):
- try:
- # We cannot simply use an assertRaises here because the
- # exception raised is too generally "Exception". To be sure
- # that the exception raised is the expected one, we check
- # the message.
- self.compute._cleanup_running_deleted_instances(admin_context)
- self.fail("Be sure this will never be executed.")
- except Exception as e:
- self.assertIn("Unrecognized value", six.text_type(e))
-
- def test_cleanup_running_deleted_instances_reap(self):
- ctxt, inst1, inst2 = self._test_cleanup_running('reap')
- bdms = block_device_obj.block_device_make_list(ctxt, [])
-
- self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- "get_by_instance_uuid")
- # Simulate an error and make sure cleanup proceeds with next instance.
- self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
- AndRaise(test.TestingException)
- objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
- inst1.uuid, use_slave=True).AndReturn(bdms)
- objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
- inst2.uuid, use_slave=True).AndReturn(bdms)
- self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
- AndReturn(None)
-
- self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
- self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\
- AndReturn(None)
-
- self.mox.ReplayAll()
- self.compute._cleanup_running_deleted_instances(ctxt)
-
- def test_cleanup_running_deleted_instances_shutdown(self):
- ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
-
- self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
- self.mox.StubOutWithMock(self.compute.driver, 'power_off')
-
- self.compute.driver.set_bootable(inst1, False)
- self.compute.driver.power_off(inst1)
- self.compute.driver.set_bootable(inst2, False)
- self.compute.driver.power_off(inst2)
-
- self.mox.ReplayAll()
- self.compute._cleanup_running_deleted_instances(ctxt)
-
- def test_cleanup_running_deleted_instances_shutdown_notimpl(self):
- ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
-
- self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
- self.mox.StubOutWithMock(self.compute.driver, 'power_off')
-
- self.compute.driver.set_bootable(inst1, False).AndRaise(
- NotImplementedError)
- compute_manager.LOG.warn(mox.IgnoreArg())
- self.compute.driver.power_off(inst1)
- self.compute.driver.set_bootable(inst2, False).AndRaise(
- NotImplementedError)
- compute_manager.LOG.warn(mox.IgnoreArg())
- self.compute.driver.power_off(inst2)
-
- self.mox.ReplayAll()
- self.compute._cleanup_running_deleted_instances(ctxt)
-
- def test_cleanup_running_deleted_instances_shutdown_error(self):
- ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
-
- self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
- self.mox.StubOutWithMock(self.compute.driver, 'power_off')
-
- self.mox.StubOutWithMock(compute_manager.LOG, 'exception')
- e = test.TestingException('bad')
-
- self.compute.driver.set_bootable(inst1, False)
- self.compute.driver.power_off(inst1).AndRaise(e)
- compute_manager.LOG.warn(mox.IgnoreArg())
-
- self.compute.driver.set_bootable(inst2, False)
- self.compute.driver.power_off(inst2).AndRaise(e)
- compute_manager.LOG.warn(mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.compute._cleanup_running_deleted_instances(ctxt)
-
- def test_running_deleted_instances(self):
- admin_context = context.get_admin_context()
-
- self.compute.host = 'host'
-
- instance1 = {}
- instance1['deleted'] = True
- instance1['deleted_at'] = "sometimeago"
-
- self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.compute._get_instances_on_driver(
- admin_context, {'deleted': True,
- 'soft_deleted': False,
- 'host': self.compute.host}).AndReturn([instance1])
-
- self.mox.StubOutWithMock(timeutils, 'is_older_than')
- timeutils.is_older_than('sometimeago',
- CONF.running_deleted_instance_timeout).AndReturn(True)
-
- self.mox.ReplayAll()
- val = self.compute._running_deleted_instances(admin_context)
- self.assertEqual(val, [instance1])
-
- def test_get_instance_nw_info(self):
- fake_network.unset_stub_network_methods(self.stubs)
-
- fake_inst = fake_instance.fake_db_instance(uuid='fake-instance')
- fake_nw_info = network_model.NetworkInfo()
-
- self.mox.StubOutWithMock(self.compute.network_api,
- 'get_instance_nw_info')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
-
- db.instance_get_by_uuid(self.context, fake_inst['uuid']
- ).AndReturn(fake_inst)
- # NOTE(danms): compute manager will re-query since we're not giving
- # it an instance with system_metadata. We're stubbing out the
- # subsequent call so we don't need it, but keep this to make sure it
- # does the right thing.
- db.instance_get_by_uuid(self.context, fake_inst['uuid'],
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.compute.network_api.get_instance_nw_info(self.context,
- mox.IsA(objects.Instance)).AndReturn(fake_nw_info)
-
- self.mox.ReplayAll()
-
- fake_inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), fake_inst, [])
- result = self.compute._get_instance_nw_info(self.context,
- fake_inst_obj)
- self.assertEqual(fake_nw_info, result)
-
- def _heal_instance_info_cache(self, _get_instance_nw_info_raise=False):
- # Update on every call for the test
- self.flags(heal_instance_info_cache_interval=-1)
- ctxt = context.get_admin_context()
-
- instance_map = {}
- instances = []
- for x in xrange(8):
- inst_uuid = 'fake-uuid-%s' % x
- instance_map[inst_uuid] = fake_instance.fake_db_instance(
- uuid=inst_uuid, host=CONF.host, created_at=None)
- # These won't be in our instance since they're not requested
- instances.append(instance_map[inst_uuid])
-
- call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
- 'get_nw_info': 0, 'expected_instance': None}
-
- def fake_instance_get_all_by_host(context, host,
- columns_to_join, use_slave=False):
- call_info['get_all_by_host'] += 1
- self.assertEqual([], columns_to_join)
- return instances[:]
-
- def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join, use_slave=False):
- if instance_uuid not in instance_map:
- raise exception.InstanceNotFound(instance_id=instance_uuid)
- call_info['get_by_uuid'] += 1
- self.assertEqual(['system_metadata', 'info_cache'],
- columns_to_join)
- return instance_map[instance_uuid]
-
- # NOTE(comstud): Override the stub in setUp()
- def fake_get_instance_nw_info(context, instance, use_slave=False):
- # Note that this exception gets caught in compute/manager
- # and is ignored. However, the below increment of
- # 'get_nw_info' won't happen, and you'll get an assert
- # failure checking it below.
- self.assertEqual(call_info['expected_instance']['uuid'],
- instance['uuid'])
- call_info['get_nw_info'] += 1
- if _get_instance_nw_info_raise:
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
-
- self.stubs.Set(db, 'instance_get_all_by_host',
- fake_instance_get_all_by_host)
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
- self.stubs.Set(self.compute, '_get_instance_nw_info',
- fake_get_instance_nw_info)
-
- # Make an instance appear to be still Building
- instances[0]['vm_state'] = vm_states.BUILDING
- # Make an instance appear to be Deleting
- instances[1]['task_state'] = task_states.DELETING
- # '0', '1' should be skipped..
- call_info['expected_instance'] = instances[2]
- self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(1, call_info['get_all_by_host'])
- self.assertEqual(0, call_info['get_by_uuid'])
- self.assertEqual(1, call_info['get_nw_info'])
-
- call_info['expected_instance'] = instances[3]
- self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(1, call_info['get_all_by_host'])
- self.assertEqual(1, call_info['get_by_uuid'])
- self.assertEqual(2, call_info['get_nw_info'])
-
- # Make an instance switch hosts
- instances[4]['host'] = 'not-me'
- # Make an instance disappear
- instance_map.pop(instances[5]['uuid'])
- # Make an instance switch to be Deleting
- instances[6]['task_state'] = task_states.DELETING
- # '4', '5', and '6' should be skipped..
- call_info['expected_instance'] = instances[7]
- self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(1, call_info['get_all_by_host'])
- self.assertEqual(4, call_info['get_by_uuid'])
- self.assertEqual(3, call_info['get_nw_info'])
- # Should be no more left.
- self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
-
- # This should cause a DB query now, so get a list of instances
- # where none can be processed to make sure we handle that case
- # cleanly. Use just '0' (Building) and '1' (Deleting)
- instances = instances[0:2]
-
- self.compute._heal_instance_info_cache(ctxt)
- # Should have called the list once more
- self.assertEqual(2, call_info['get_all_by_host'])
- # Stays the same because we remove invalid entries from the list
- self.assertEqual(4, call_info['get_by_uuid'])
- # Stays the same because we didn't find anything to process
- self.assertEqual(3, call_info['get_nw_info'])
-
- def test_heal_instance_info_cache(self):
- self._heal_instance_info_cache()
-
- def test_heal_instance_info_cache_with_exception(self):
- self._heal_instance_info_cache(_get_instance_nw_info_raise=True)
-
- @mock.patch('nova.objects.InstanceList.get_by_filters')
- @mock.patch('nova.compute.api.API.unrescue')
- def test_poll_rescued_instances(self, unrescue, get):
- timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
- not_timed_out_time = timeutils.utcnow()
-
- instances = [objects.Instance(uuid='fake_uuid1',
- vm_state=vm_states.RESCUED,
- launched_at=timed_out_time),
- objects.Instance(uuid='fake_uuid2',
- vm_state=vm_states.RESCUED,
- launched_at=timed_out_time),
- objects.Instance(uuid='fake_uuid3',
- vm_state=vm_states.RESCUED,
- launched_at=not_timed_out_time)]
- unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
-
- def fake_instance_get_all_by_filters(context, filters,
- expected_attrs=None,
- use_slave=False):
- self.assertEqual(["system_metadata"], expected_attrs)
- return instances
-
- get.side_effect = fake_instance_get_all_by_filters
-
- def fake_unrescue(context, instance):
- unrescued_instances[instance['uuid']] = True
-
- unrescue.side_effect = fake_unrescue
-
- self.flags(rescue_timeout=60)
- ctxt = context.get_admin_context()
-
- self.compute._poll_rescued_instances(ctxt)
-
- for instance in unrescued_instances.values():
- self.assertTrue(instance)
-
- def test_poll_unconfirmed_resizes(self):
- instances = [
- fake_instance.fake_db_instance(uuid='fake_uuid1',
- vm_state=vm_states.RESIZED,
- task_state=None),
- fake_instance.fake_db_instance(uuid='noexist'),
- fake_instance.fake_db_instance(uuid='fake_uuid2',
- vm_state=vm_states.ERROR,
- task_state=None),
- fake_instance.fake_db_instance(uuid='fake_uuid3',
- vm_state=vm_states.ACTIVE,
- task_state=
- task_states.REBOOTING),
- fake_instance.fake_db_instance(uuid='fake_uuid4',
- vm_state=vm_states.RESIZED,
- task_state=None),
- fake_instance.fake_db_instance(uuid='fake_uuid5',
- vm_state=vm_states.ACTIVE,
- task_state=None),
- # The expceted migration result will be None instead of error
- # since _poll_unconfirmed_resizes will not change it
- # when the instance vm state is RESIZED and task state
- # is deleting, see bug 1301696 for more detail
- fake_instance.fake_db_instance(uuid='fake_uuid6',
- vm_state=vm_states.RESIZED,
- task_state='deleting'),
- fake_instance.fake_db_instance(uuid='fake_uuid7',
- vm_state=vm_states.RESIZED,
- task_state='soft-deleting'),
- fake_instance.fake_db_instance(uuid='fake_uuid8',
- vm_state=vm_states.ACTIVE,
- task_state='resize_finish')]
- expected_migration_status = {'fake_uuid1': 'confirmed',
- 'noexist': 'error',
- 'fake_uuid2': 'error',
- 'fake_uuid3': 'error',
- 'fake_uuid4': None,
- 'fake_uuid5': 'error',
- 'fake_uuid6': None,
- 'fake_uuid7': None,
- 'fake_uuid8': None}
- migrations = []
- for i, instance in enumerate(instances, start=1):
- fake_mig = test_migration.fake_db_migration()
- fake_mig.update({'id': i,
- 'instance_uuid': instance['uuid'],
- 'status': None})
- migrations.append(fake_mig)
-
- def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join=None, use_slave=False):
- self.assertIn('metadata', columns_to_join)
- self.assertIn('system_metadata', columns_to_join)
- # raise InstanceNotFound exception for uuid 'noexist'
- if instance_uuid == 'noexist':
- raise exception.InstanceNotFound(instance_id=instance_uuid)
- for instance in instances:
- if instance['uuid'] == instance_uuid:
- return instance
-
- def fake_migration_get_unconfirmed_by_dest_compute(context,
- resize_confirm_window, dest_compute, use_slave=False):
- self.assertEqual(dest_compute, CONF.host)
- return migrations
-
- def fake_migration_update(context, mid, updates):
- for migration in migrations:
- if migration['id'] == mid:
- migration.update(updates)
- return migration
-
- def fake_confirm_resize(context, instance, migration=None):
- # raise exception for 'fake_uuid4' to check migration status
- # does not get set to 'error' on confirm_resize failure.
- if instance['uuid'] == 'fake_uuid4':
- raise test.TestingException('bomb')
- self.assertIsNotNone(migration)
- for migration2 in migrations:
- if (migration2['instance_uuid'] ==
- migration['instance_uuid']):
- migration2['status'] = 'confirmed'
-
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
- self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
- fake_migration_get_unconfirmed_by_dest_compute)
- self.stubs.Set(db, 'migration_update', fake_migration_update)
- self.stubs.Set(self.compute.compute_api, 'confirm_resize',
- fake_confirm_resize)
-
- def fetch_instance_migration_status(instance_uuid):
- for migration in migrations:
- if migration['instance_uuid'] == instance_uuid:
- return migration['status']
-
- self.flags(resize_confirm_window=60)
- ctxt = context.get_admin_context()
-
- self.compute._poll_unconfirmed_resizes(ctxt)
-
- for instance_uuid, status in expected_migration_status.iteritems():
- self.assertEqual(status,
- fetch_instance_migration_status(instance_uuid))
-
- def test_instance_build_timeout_mixed_instances(self):
- # Tests that instances which failed to build within the configured
- # instance_build_timeout value are set to error state.
- self.flags(instance_build_timeout=30)
- ctxt = context.get_admin_context()
- created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
-
- filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
- # these are the ones that are expired
- old_instances = []
- for x in xrange(4):
- instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
- instance.update(filters)
- old_instances.append(fake_instance.fake_db_instance(**instance))
-
- # not expired
- instances = list(old_instances) # copy the contents of old_instances
- new_instance = {
- 'uuid': str(uuid.uuid4()),
- 'created_at': timeutils.utcnow(),
- }
- sort_key = 'created_at'
- sort_dir = 'desc'
- new_instance.update(filters)
- instances.append(fake_instance.fake_db_instance(**new_instance))
-
- # need something to return from conductor_api.instance_update
- # that is defined outside the for loop and can be used in the mock
- # context
- fake_instance_ref = {'host': CONF.host, 'node': 'fake'}
-
- # creating mocks
- with contextlib.nested(
- mock.patch.object(self.compute.db.sqlalchemy.api,
- 'instance_get_all_by_filters',
- return_value=instances),
- mock.patch.object(self.compute.conductor_api, 'instance_update',
- return_value=fake_instance_ref),
- mock.patch.object(self.compute.driver, 'node_is_available',
- return_value=False)
- ) as (
- instance_get_all_by_filters,
- conductor_instance_update,
- node_is_available
- ):
- # run the code
- self.compute._check_instance_build_time(ctxt)
- # check our assertions
- instance_get_all_by_filters.assert_called_once_with(
- ctxt, filters,
- sort_key,
- sort_dir,
- marker=None,
- columns_to_join=[],
- use_slave=True,
- limit=None)
- self.assertThat(conductor_instance_update.mock_calls,
- testtools_matchers.HasLength(len(old_instances)))
- self.assertThat(node_is_available.mock_calls,
- testtools_matchers.HasLength(len(old_instances)))
- for inst in old_instances:
- conductor_instance_update.assert_has_calls([
- mock.call(ctxt, inst['uuid'],
- vm_state=vm_states.ERROR)])
- node_is_available.assert_has_calls([
- mock.call(fake_instance_ref['node'])])
-
- def test_get_resource_tracker_fail(self):
- self.assertRaises(exception.NovaException,
- self.compute._get_resource_tracker,
- 'invalidnodename')
-
- def test_instance_update_host_check(self):
- # make sure rt usage doesn't happen if the host or node is different
- def fail_get(nodename):
- raise test.TestingException(_("wrong host/node"))
- self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
-
- instance = self._create_fake_instance({'host': 'someotherhost'})
- self.compute._instance_update(self.context, instance['uuid'])
-
- instance = self._create_fake_instance({'node': 'someothernode'})
- self.compute._instance_update(self.context, instance['uuid'])
-
- params = {'host': 'someotherhost', 'node': 'someothernode'}
- instance = self._create_fake_instance(params)
- self.compute._instance_update(self.context, instance['uuid'])
-
- def test_destroy_evacuated_instance_on_shared_storage(self):
- fake_context = context.get_admin_context()
-
- # instances in central db
- instances = [
- # those are still related to this host
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host})
- ]
-
- # those are already been evacuated to other host
- evacuated_instance = self._create_fake_instance_obj(
- {'host': 'otherhost'})
-
- instances.append(evacuated_instance)
-
- self.mox.StubOutWithMock(self.compute,
- '_get_instances_on_driver')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute,
- '_is_instance_storage_shared')
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
-
- self.compute._get_instances_on_driver(
- fake_context, {'deleted': False}).AndReturn(instances)
- self.compute._get_instance_nw_info(fake_context,
- evacuated_instance).AndReturn(
- 'fake_network_info')
- self.compute._get_instance_block_device_info(
- fake_context, evacuated_instance).AndReturn('fake_bdi')
- self.compute._is_instance_storage_shared(fake_context,
- evacuated_instance).AndReturn(True)
- self.compute.driver.destroy(fake_context, evacuated_instance,
- 'fake_network_info',
- 'fake_bdi',
- False)
-
- self.mox.ReplayAll()
- self.compute._destroy_evacuated_instances(fake_context)
-
- def test_destroy_evacuated_instance_with_disks(self):
- fake_context = context.get_admin_context()
-
- # instances in central db
- instances = [
- # those are still related to this host
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host})
- ]
-
- # those are already been evacuated to other host
- evacuated_instance = self._create_fake_instance_obj(
- {'host': 'otherhost'})
-
- instances.append(evacuated_instance)
-
- self.mox.StubOutWithMock(self.compute,
- '_get_instances_on_driver')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_instance_shared_storage_local')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'check_instance_shared_storage')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_instance_shared_storage_cleanup')
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
-
- self.compute._get_instances_on_driver(
- fake_context, {'deleted': False}).AndReturn(instances)
- self.compute._get_instance_nw_info(fake_context,
- evacuated_instance).AndReturn(
- 'fake_network_info')
- self.compute._get_instance_block_device_info(
- fake_context, evacuated_instance).AndReturn('fake_bdi')
- self.compute.driver.check_instance_shared_storage_local(fake_context,
- evacuated_instance).AndReturn({'filename': 'tmpfilename'})
- self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
- evacuated_instance,
- {'filename': 'tmpfilename'}).AndReturn(False)
- self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
- {'filename': 'tmpfilename'})
- self.compute.driver.destroy(fake_context, evacuated_instance,
- 'fake_network_info',
- 'fake_bdi',
- True)
-
- self.mox.ReplayAll()
- self.compute._destroy_evacuated_instances(fake_context)
-
- def test_destroy_evacuated_instance_not_implemented(self):
- fake_context = context.get_admin_context()
-
- # instances in central db
- instances = [
- # those are still related to this host
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host}),
- self._create_fake_instance_obj(
- {'host': self.compute.host})
- ]
-
- # those are already been evacuated to other host
- evacuated_instance = self._create_fake_instance_obj(
- {'host': 'otherhost'})
-
- instances.append(evacuated_instance)
-
- self.mox.StubOutWithMock(self.compute,
- '_get_instances_on_driver')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_instance_shared_storage_local')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'check_instance_shared_storage')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_instance_shared_storage_cleanup')
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
-
- self.compute._get_instances_on_driver(
- fake_context, {'deleted': False}).AndReturn(instances)
- self.compute._get_instance_nw_info(fake_context,
- evacuated_instance).AndReturn(
- 'fake_network_info')
- self.compute._get_instance_block_device_info(
- fake_context, evacuated_instance).AndReturn('fake_bdi')
- self.compute.driver.check_instance_shared_storage_local(fake_context,
- evacuated_instance).AndRaise(NotImplementedError())
- self.compute.driver.destroy(fake_context, evacuated_instance,
- 'fake_network_info',
- 'fake_bdi',
- True)
-
- self.mox.ReplayAll()
- self.compute._destroy_evacuated_instances(fake_context)
-
- def test_complete_partial_deletion(self):
- admin_context = context.get_admin_context()
- instance = objects.Instance()
- instance.id = 1
- instance.uuid = 'fake-uuid'
- instance.vm_state = vm_states.DELETED
- instance.task_state = None
- instance.system_metadata = {'fake_key': 'fake_value'}
- instance.vcpus = 1
- instance.memory_mb = 1
- instance.project_id = 'fake-prj'
- instance.user_id = 'fake-user'
- instance.deleted = False
-
- def fake_destroy():
- instance.deleted = True
-
- self.stubs.Set(instance, 'destroy', fake_destroy)
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- lambda *a, **k: None)
-
- self.stubs.Set(self.compute,
- '_complete_deletion',
- lambda *a, **k: None)
-
- self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None)
-
- self.compute._complete_partial_deletion(admin_context, instance)
-
- self.assertNotEqual(0, instance.deleted)
-
- def test_init_instance_for_partial_deletion(self):
- admin_context = context.get_admin_context()
- instance = objects.Instance(admin_context)
- instance.id = 1
- instance.vm_state = vm_states.DELETED
- instance.deleted = False
-
- def fake_partial_deletion(context, instance):
- instance['deleted'] = instance['id']
-
- self.stubs.Set(self.compute,
- '_complete_partial_deletion',
- fake_partial_deletion)
- self.compute._init_instance(admin_context, instance)
-
- self.assertNotEqual(0, instance['deleted'])
-
- def test_partial_deletion_raise_exception(self):
- admin_context = context.get_admin_context()
- instance = objects.Instance(admin_context)
- instance.uuid = str(uuid.uuid4())
- instance.vm_state = vm_states.DELETED
- instance.deleted = False
-
- self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
- self.compute._complete_partial_deletion(
- admin_context, instance).AndRaise(ValueError)
- self.mox.ReplayAll()
-
- self.compute._init_instance(admin_context, instance)
-
- def test_add_remove_fixed_ip_updates_instance_updated_at(self):
- def _noop(*args, **kwargs):
- pass
-
- self.stubs.Set(self.compute.network_api,
- 'add_fixed_ip_to_instance', _noop)
- self.stubs.Set(self.compute.network_api,
- 'remove_fixed_ip_from_instance', _noop)
-
- instance = self._create_fake_instance_obj()
- updated_at_1 = instance['updated_at']
-
- self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
- updated_at_2 = db.instance_get_by_uuid(self.context,
- instance['uuid'])['updated_at']
-
- self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
- instance)
- updated_at_3 = db.instance_get_by_uuid(self.context,
- instance['uuid'])['updated_at']
-
- updated_ats = (updated_at_1, updated_at_2, updated_at_3)
- self.assertEqual(len(updated_ats), len(set(updated_ats)))
-
- def test_no_pending_deletes_for_soft_deleted_instances(self):
- self.flags(reclaim_instance_interval=0)
- ctxt = context.get_admin_context()
-
- instance = self._create_fake_instance(
- params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'deleted_at': timeutils.utcnow()})
-
- self.compute._run_pending_deletes(ctxt)
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertFalse(instance['cleaned'])
-
- def test_reclaim_queued_deletes(self):
- self.flags(reclaim_instance_interval=3600)
- ctxt = context.get_admin_context()
-
- # Active
- self._create_fake_instance(params={'host': CONF.host})
-
- # Deleted not old enough
- self._create_fake_instance(params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'deleted_at': timeutils.utcnow()})
-
- # Deleted old enough (only this one should be reclaimed)
- deleted_at = (timeutils.utcnow() -
- datetime.timedelta(hours=1, minutes=5))
- self._create_fake_instance(
- params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'deleted_at': deleted_at})
-
- # Restoring
- # NOTE(hanlind): This specifically tests for a race condition
- # where restoring a previously soft deleted instance sets
- # deleted_at back to None, causing reclaim to think it can be
- # deleted, see LP #1186243.
- self._create_fake_instance(
- params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'task_state': task_states.RESTORING})
-
- self.mox.StubOutWithMock(self.compute, '_delete_instance')
- self.compute._delete_instance(
- ctxt, mox.IsA(objects.Instance), [],
- mox.IsA(objects.Quotas))
-
- self.mox.ReplayAll()
-
- self.compute._reclaim_queued_deletes(ctxt)
-
- def test_reclaim_queued_deletes_continue_on_error(self):
- # Verify that reclaim continues on error.
- self.flags(reclaim_instance_interval=3600)
- ctxt = context.get_admin_context()
-
- deleted_at = (timeutils.utcnow() -
- datetime.timedelta(hours=1, minutes=5))
- instance1 = self._create_fake_instance_obj(
- params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'deleted_at': deleted_at})
- instance2 = self._create_fake_instance_obj(
- params={'host': CONF.host,
- 'vm_state': vm_states.SOFT_DELETED,
- 'deleted_at': deleted_at})
- instances = []
- instances.append(instance1)
- instances.append(instance2)
-
- self.mox.StubOutWithMock(objects.InstanceList,
- 'get_by_filters')
- self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(self.compute, '_delete_instance')
-
- objects.InstanceList.get_by_filters(
- ctxt, mox.IgnoreArg(),
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- use_slave=True
- ).AndReturn(instances)
-
- # The first instance delete fails.
- self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- ctxt, instance1.uuid).AndReturn([])
- self.compute._delete_instance(ctxt, instance1,
- [], self.none_quotas).AndRaise(
- test.TestingException)
-
- # The second instance delete that follows.
- self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- ctxt, instance2.uuid).AndReturn([])
- self.compute._delete_instance(ctxt, instance2,
- [], self.none_quotas)
-
- self.mox.ReplayAll()
-
- self.compute._reclaim_queued_deletes(ctxt)
-
- def test_sync_power_states(self):
- ctxt = self.context.elevated()
- self._create_fake_instance({'host': self.compute.host})
- self._create_fake_instance({'host': self.compute.host})
- self._create_fake_instance({'host': self.compute.host})
- self.mox.StubOutWithMock(self.compute.driver, 'get_info')
- self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
-
- # Check to make sure task continues on error.
- self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
- exception.InstanceNotFound(instance_id='fake-uuid'))
- self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
- power_state.NOSTATE).AndRaise(
- exception.InstanceNotFound(instance_id='fake-uuid'))
-
- self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
- {'state': power_state.RUNNING})
- self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
- power_state.RUNNING,
- use_slave=True)
- self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
- {'state': power_state.SHUTDOWN})
- self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
- power_state.SHUTDOWN,
- use_slave=True)
- self.mox.ReplayAll()
- self.compute._sync_power_states(ctxt)
-
- def _test_lifecycle_event(self, lifecycle_event, power_state):
- instance = self._create_fake_instance()
- uuid = instance['uuid']
-
- self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
- if power_state is not None:
- self.compute._sync_instance_power_state(
- mox.IgnoreArg(),
- mox.ContainsKeyValue('uuid', uuid),
- power_state)
- self.mox.ReplayAll()
- self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def test_lifecycle_events(self):
- self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
- power_state.SHUTDOWN)
- self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
- power_state.RUNNING)
- self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
- power_state.PAUSED)
- self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
- power_state.RUNNING)
- self._test_lifecycle_event(-1, None)
-
- def test_lifecycle_event_non_existent_instance(self):
- # No error raised for non-existent instance because of inherent race
- # between database updates and hypervisor events. See bug #1180501.
- event_instance = event.LifecycleEvent('does-not-exist',
- event.EVENT_LIFECYCLE_STOPPED)
- self.compute.handle_events(event_instance)
-
- @mock.patch.object(objects.Migration, 'get_by_id')
- @mock.patch.object(objects.Quotas, 'rollback')
- def test_confirm_resize_roll_back_quota_migration_not_found(self,
- mock_rollback, mock_get_by_id):
- instance = self._create_fake_instance_obj()
-
- migration = objects.Migration()
- migration.instance_uuid = instance.uuid
- migration.status = 'finished'
- migration.id = 0
-
- mock_get_by_id.side_effect = exception.MigrationNotFound(
- migration_id=0)
- self.compute.confirm_resize(self.context, instance=instance,
- migration=migration, reservations=[])
- self.assertTrue(mock_rollback.called)
-
- @mock.patch.object(instance_obj.Instance, 'get_by_uuid')
- @mock.patch.object(objects.Quotas, 'rollback')
- def test_confirm_resize_roll_back_quota_instance_not_found(self,
- mock_rollback, mock_get_by_id):
- instance = self._create_fake_instance_obj()
-
- migration = objects.Migration()
- migration.instance_uuid = instance.uuid
- migration.status = 'finished'
- migration.id = 0
-
- mock_get_by_id.side_effect = exception.InstanceNotFound(
- instance_id=instance.uuid)
- self.compute.confirm_resize(self.context, instance=instance,
- migration=migration, reservations=[])
- self.assertTrue(mock_rollback.called)
-
- @mock.patch.object(objects.Migration, 'get_by_id')
- @mock.patch.object(objects.Quotas, 'rollback')
- def test_confirm_resize_roll_back_quota_status_confirmed(self,
- mock_rollback, mock_get_by_id):
- instance = self._create_fake_instance_obj()
-
- migration = objects.Migration()
- migration.instance_uuid = instance.uuid
- migration.status = 'confirmed'
- migration.id = 0
-
- mock_get_by_id.return_value = migration
- self.compute.confirm_resize(self.context, instance=instance,
- migration=migration, reservations=[])
- self.assertTrue(mock_rollback.called)
-
- @mock.patch.object(objects.Migration, 'get_by_id')
- @mock.patch.object(objects.Quotas, 'rollback')
- def test_confirm_resize_roll_back_quota_status_dummy(self,
- mock_rollback, mock_get_by_id):
- instance = self._create_fake_instance_obj()
-
- migration = objects.Migration()
- migration.instance_uuid = instance.uuid
- migration.status = 'dummy'
- migration.id = 0
-
- mock_get_by_id.return_value = migration
- self.compute.confirm_resize(self.context, instance=instance,
- migration=migration, reservations=[])
- self.assertTrue(mock_rollback.called)
-
- def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
- instance = self._create_fake_instance_obj()
- old_type = flavors.extract_flavor(instance)
- new_type = flavors.get_flavor_by_flavor_id('4')
- sys_meta = instance.system_metadata
- sys_meta = flavors.save_flavor_info(sys_meta,
- old_type, 'old_')
- sys_meta = flavors.save_flavor_info(sys_meta,
- new_type, 'new_')
- sys_meta = flavors.save_flavor_info(sys_meta,
- new_type)
-
- fake_rt = self.mox.CreateMockAnything()
-
- def fake_drop_resize_claim(*args, **kwargs):
- pass
-
- def fake_get_resource_tracker(self):
- return fake_rt
-
- def fake_setup_networks_on_host(self, *args, **kwargs):
- pass
-
- self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
- self.stubs.Set(self.compute, '_get_resource_tracker',
- fake_get_resource_tracker)
- self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
- fake_setup_networks_on_host)
-
- migration = objects.Migration()
- migration.instance_uuid = instance.uuid
- migration.status = 'finished'
- migration.create(self.context.elevated())
-
- instance.task_state = task_states.DELETING
- instance.vm_state = vm_states.RESIZED
- instance.system_metadata = sys_meta
- instance.save()
-
- self.compute.confirm_resize(self.context, instance=instance,
- migration=migration, reservations=[])
- instance.refresh()
- self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
-
- def _get_instance_and_bdm_for_dev_defaults_tests(self):
- instance = self._create_fake_instance_obj(
- params={'root_device_name': '/dev/vda'})
- block_device_mapping = block_device_obj.block_device_make_list(
- self.context, [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'image_id': 'fake-image-id-1',
- 'boot_index': 0})])
-
- return instance, block_device_mapping
-
- def test_default_block_device_names_empty_instance_root_dev(self):
- instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
- instance.root_device_name = None
- self.mox.StubOutWithMock(objects.Instance, 'save')
- self.mox.StubOutWithMock(self.compute,
- '_default_device_names_for_instance')
- self.compute._default_device_names_for_instance(instance,
- '/dev/vda', [], [],
- [bdm for bdm in bdms])
- self.mox.ReplayAll()
- self.compute._default_block_device_names(self.context,
- instance,
- {}, bdms)
- self.assertEqual('/dev/vda', instance.root_device_name)
-
- def test_default_block_device_names_empty_root_device(self):
- instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
- bdms[0]['device_name'] = None
- self.mox.StubOutWithMock(self.compute,
- '_default_device_names_for_instance')
- self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
- bdms[0].save().AndReturn(None)
- self.compute._default_device_names_for_instance(instance,
- '/dev/vda', [], [],
- [bdm for bdm in bdms])
- self.mox.ReplayAll()
- self.compute._default_block_device_names(self.context,
- instance,
- {}, bdms)
-
- def test_default_block_device_names_no_root_device(self):
- instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
- instance.root_device_name = None
- bdms[0]['device_name'] = None
- self.mox.StubOutWithMock(objects.Instance, 'save')
- self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
- self.mox.StubOutWithMock(self.compute,
- '_default_root_device_name')
- self.mox.StubOutWithMock(self.compute,
- '_default_device_names_for_instance')
-
- self.compute._default_root_device_name(instance, mox.IgnoreArg(),
- bdms[0]).AndReturn('/dev/vda')
- bdms[0].save().AndReturn(None)
- self.compute._default_device_names_for_instance(instance,
- '/dev/vda', [], [],
- [bdm for bdm in bdms])
- self.mox.ReplayAll()
- self.compute._default_block_device_names(self.context,
- instance,
- {}, bdms)
- self.assertEqual('/dev/vda', instance.root_device_name)
-
- def test_default_block_device_names_with_blank_volumes(self):
- instance = self._create_fake_instance_obj()
- image_meta = {}
- root_volume = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'id': 1, 'instance_uuid': 'fake-instance',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'image_id': 'fake-image-id-1',
- 'boot_index': 0}))
- blank_volume1 = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'id': 2, 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'boot_index': -1}))
- blank_volume2 = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'id': 3, 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'boot_index': -1}))
- ephemeral = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'id': 4, 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'local'}))
- swap = objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict({
- 'id': 5, 'instance_uuid': 'fake-instance',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': 'swap'
- }))
- bdms = block_device_obj.block_device_make_list(
- self.context, [root_volume, blank_volume1, blank_volume2,
- ephemeral, swap])
-
- with contextlib.nested(
- mock.patch.object(self.compute, '_default_root_device_name',
- return_value='/dev/vda'),
- mock.patch.object(objects.BlockDeviceMapping, 'save'),
- mock.patch.object(self.compute,
- '_default_device_names_for_instance')
- ) as (default_root_device, object_save,
- default_device_names):
- self.compute._default_block_device_names(self.context, instance,
- image_meta, bdms)
- default_root_device.assert_called_once_with(instance, image_meta,
- bdms[0])
- self.assertEqual('/dev/vda', instance.root_device_name)
- self.assertTrue(object_save.called)
- default_device_names.assert_called_once_with(instance,
- '/dev/vda', [bdms[-2]], [bdms[-1]],
- [bdm for bdm in bdms[:-2]])
-
- def test_reserve_block_device_name(self):
- instance = self._create_fake_instance_obj(
- params={'root_device_name': '/dev/vda'})
- bdm = objects.BlockDeviceMapping(
- **{'source_type': 'image', 'destination_type': 'local',
- 'image_id': 'fake-image-id', 'device_name': '/dev/vda',
- 'instance_uuid': instance.uuid})
- bdm.create(self.context)
-
- self.compute.reserve_block_device_name(self.context, instance,
- '/dev/vdb', 'fake-volume-id',
- 'virtio', 'disk')
-
- bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, instance.uuid)
- bdms = list(bdms)
- self.assertEqual(len(bdms), 2)
- bdms.sort(key=operator.attrgetter('device_name'))
- vol_bdm = bdms[1]
- self.assertEqual(vol_bdm.source_type, 'volume')
- self.assertEqual(vol_bdm.destination_type, 'volume')
- self.assertEqual(vol_bdm.device_name, '/dev/vdb')
- self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
- self.assertEqual(vol_bdm.disk_bus, 'virtio')
- self.assertEqual(vol_bdm.device_type, 'disk')
-
-
-class ComputeAPITestCase(BaseTestCase):
- def setUp(self):
- def fake_get_nw_info(cls, ctxt, instance):
- self.assertTrue(ctxt.is_admin)
- return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
-
- super(ComputeAPITestCase, self).setUp()
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
- fake_get_nw_info)
- self.security_group_api = (
- openstack_driver.get_openstack_security_group_driver())
-
- self.compute_api = compute.API(
- security_group_api=self.security_group_api)
- self.fake_image = {
- 'id': 1,
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id'},
- }
-
- def fake_show(obj, context, image_id, **kwargs):
- if image_id:
- return self.fake_image
- else:
- raise exception.ImageNotFound(image_id=image_id)
-
- self.fake_show = fake_show
-
- def _run_instance(self, params=None):
- instance = self._create_fake_instance_obj(params, services=True)
- instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- instance.refresh()
- self.assertIsNone(instance['task_state'])
- return instance, instance_uuid
-
- def test_ip_filtering(self):
- info = [{
- 'address': 'aa:bb:cc:dd:ee:ff',
- 'id': 1,
- 'network': {
- 'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [{
- 'cidr': '192.168.0.0/24',
- 'ips': [{
- 'address': '192.168.0.10',
- 'type': 'fixed',
- }]
- }]
- }
- }]
-
- info1 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
- inst1 = objects.Instance(id=1, info_cache=info1)
- info[0]['network']['subnets'][0]['ips'][0]['address'] = '192.168.0.20'
- info2 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
- inst2 = objects.Instance(id=2, info_cache=info2)
- instances = objects.InstanceList(objects=[inst1, inst2])
-
- instances = self.compute_api._ip_filter(instances, {'ip': '.*10'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0].id, 1)
-
- def test_create_with_too_little_ram(self):
- # Test an instance type with too little memory.
-
- inst_type = flavors.get_default_flavor()
- inst_type['memory_mb'] = 1
-
- self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorMemoryTooSmall,
- self.compute_api.create, self.context,
- inst_type, self.fake_image['id'])
-
- # Now increase the inst_type memory and make sure all is fine.
- inst_type['memory_mb'] = 2
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, self.fake_image['id'])
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_create_with_too_little_disk(self):
- # Test an instance type with too little disk space.
-
- inst_type = flavors.get_default_flavor()
- inst_type['root_gb'] = 1
-
- self.fake_image['min_disk'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api.create, self.context,
- inst_type, self.fake_image['id'])
-
- # Now increase the inst_type disk space and make sure all is fine.
- inst_type['root_gb'] = 2
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, self.fake_image['id'])
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_create_with_too_large_image(self):
- # Test an instance type with too little disk space.
-
- inst_type = flavors.get_default_flavor()
- inst_type['root_gb'] = 1
-
- self.fake_image['size'] = '1073741825'
-
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api.create, self.context,
- inst_type, self.fake_image['id'])
-
- # Reduce image to 1 GB limit and ensure it works
- self.fake_image['size'] = '1073741824'
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, self.fake_image['id'])
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_create_just_enough_ram_and_disk(self):
- # Test an instance type with just enough ram and disk space.
-
- inst_type = flavors.get_default_flavor()
- inst_type['root_gb'] = 2
- inst_type['memory_mb'] = 2
-
- self.fake_image['min_ram'] = 2
- self.fake_image['min_disk'] = 2
- self.fake_image['name'] = 'fake_name'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, self.fake_image['id'])
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_create_with_no_ram_and_disk_reqs(self):
- # Test an instance type with no min_ram or min_disk.
-
- inst_type = flavors.get_default_flavor()
- inst_type['root_gb'] = 1
- inst_type['memory_mb'] = 1
-
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- (refs, resv_id) = self.compute_api.create(self.context,
- inst_type, self.fake_image['id'])
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_create_with_deleted_image(self):
- # If we're given a deleted image by glance, we should not be able to
- # build from it
- inst_type = flavors.get_default_flavor()
-
- self.fake_image['name'] = 'fake_name'
- self.fake_image['status'] = 'DELETED'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- expected_message = (
- exception.ImageNotActive.msg_fmt % {'image_id':
- self.fake_image['id']})
- with testtools.ExpectedException(exception.ImageNotActive,
- expected_message):
- self.compute_api.create(self.context, inst_type,
- self.fake_image['id'])
-
- @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
- def test_create_with_numa_topology(self, numa_constraints_mock):
- inst_type = flavors.get_default_flavor()
- # This is what the stubbed out method will return
- fake_image_props = {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id',
- 'something_else': 'meow'}
-
- numa_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1, 2]), 512),
- hardware.VirtNUMATopologyCellInstance(1, set([3, 4]), 512)])
- numa_constraints_mock.return_value = numa_topology
-
- instances, resv_id = self.compute_api.create(self.context, inst_type,
- self.fake_image['id'])
- numa_constraints_mock.assert_called_once_with(
- inst_type, fake_image_props)
- self.assertThat(numa_topology._to_dict(),
- matchers.DictMatches(
- instances[0].numa_topology
- .topology_from_obj()._to_dict()))
-
- def test_create_instance_defaults_display_name(self):
- # Verify that an instance cannot be created without a display_name.
- cases = [dict(), dict(display_name=None)]
- for instance in cases:
- (ref, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(),
- 'fake-image-uuid', **instance)
- try:
- self.assertIsNotNone(ref[0]['display_name'])
- finally:
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def test_create_instance_sets_system_metadata(self):
- # Make sure image properties are copied into system metadata.
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=flavors.get_default_flavor(),
- image_href='fake-image-uuid')
- try:
- sys_metadata = db.instance_system_metadata_get(self.context,
- ref[0]['uuid'])
-
- image_props = {'image_kernel_id': 'fake_kernel_id',
- 'image_ramdisk_id': 'fake_ramdisk_id',
- 'image_something_else': 'meow', }
- for key, value in image_props.iteritems():
- self.assertIn(key, sys_metadata)
- self.assertEqual(value, sys_metadata[key])
-
- finally:
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def test_create_saves_type_in_system_metadata(self):
- instance_type = flavors.get_default_flavor()
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=instance_type,
- image_href='some-fake-image')
- try:
- sys_metadata = db.instance_system_metadata_get(self.context,
- ref[0]['uuid'])
-
- instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
- 'ephemeral_gb', 'flavorid', 'swap',
- 'rxtx_factor', 'vcpu_weight']
- for key in instance_type_props:
- sys_meta_key = "instance_type_%s" % key
- self.assertIn(sys_meta_key, sys_metadata)
- self.assertEqual(str(instance_type[key]),
- str(sys_metadata[sys_meta_key]))
-
- finally:
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def test_create_instance_associates_security_groups(self):
- # Make sure create associates security groups.
- group = self._create_group()
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=flavors.get_default_flavor(),
- image_href='some-fake-image',
- security_group=['testgroup'])
- try:
- self.assertEqual(len(db.security_group_get_by_instance(
- self.context, ref[0]['uuid'])), 1)
- group = db.security_group_get(self.context, group['id'])
- self.assertEqual(1, len(group['instances']))
- finally:
- db.security_group_destroy(self.context, group['id'])
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def test_create_instance_with_invalid_security_group_raises(self):
- instance_type = flavors.get_default_flavor()
-
- pre_build_len = len(db.instance_get_all(self.context))
- self.assertRaises(exception.SecurityGroupNotFoundForProject,
- self.compute_api.create,
- self.context,
- instance_type=instance_type,
- image_href=None,
- security_group=['this_is_a_fake_sec_group'])
- self.assertEqual(pre_build_len,
- len(db.instance_get_all(self.context)))
-
- def test_create_with_large_user_data(self):
- # Test an instance type with too much user data.
-
- inst_type = flavors.get_default_flavor()
-
- self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.InstanceUserDataTooLarge,
- self.compute_api.create, self.context, inst_type,
- self.fake_image['id'], user_data=('1' * 65536))
-
- def test_create_with_malformed_user_data(self):
- # Test an instance type with malformed user data.
-
- inst_type = flavors.get_default_flavor()
-
- self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.InstanceUserDataMalformed,
- self.compute_api.create, self.context, inst_type,
- self.fake_image['id'], user_data='banana')
-
- def test_create_with_base64_user_data(self):
- # Test an instance type with ok much user data.
-
- inst_type = flavors.get_default_flavor()
-
- self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- # NOTE(mikal): a string of length 48510 encodes to 65532 characters of
- # base64
- (refs, resv_id) = self.compute_api.create(
- self.context, inst_type, self.fake_image['id'],
- user_data=base64.encodestring('1' * 48510))
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_populate_instance_for_create(self):
- base_options = {'image_ref': self.fake_image['id'],
- 'system_metadata': {'fake': 'value'}}
- instance = objects.Instance()
- instance.update(base_options)
- inst_type = flavors.get_flavor_by_name("m1.tiny")
- instance = self.compute_api._populate_instance_for_create(
- self.context,
- instance,
- self.fake_image,
- 1,
- security_groups=None,
- instance_type=inst_type)
- self.assertEqual(str(base_options['image_ref']),
- instance['system_metadata']['image_base_image_ref'])
- self.assertEqual(vm_states.BUILDING, instance['vm_state'])
- self.assertEqual(task_states.SCHEDULING, instance['task_state'])
- self.assertEqual(1, instance['launch_index'])
- self.assertIsNotNone(instance.get('uuid'))
- self.assertEqual([], instance.security_groups.objects)
-
- def test_default_hostname_generator(self):
- fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
-
- orig_populate = self.compute_api._populate_instance_for_create
-
- def _fake_populate(context, base_options, *args, **kwargs):
- base_options['uuid'] = fake_uuids.pop(0)
- return orig_populate(context, base_options, *args, **kwargs)
-
- self.stubs.Set(self.compute_api,
- '_populate_instance_for_create',
- _fake_populate)
-
- cases = [(None, 'server-%s' % fake_uuids[0]),
- ('Hello, Server!', 'hello-server'),
- ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
- ('hello_server', 'hello-server')]
- for display_name, hostname in cases:
- (ref, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image',
- display_name=display_name)
- try:
- self.assertEqual(ref[0]['hostname'], hostname)
- finally:
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def test_instance_create_adds_to_instance_group(self):
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- group = objects.InstanceGroup(self.context)
- group.uuid = str(uuid.uuid4())
- group.create()
-
- inst_type = flavors.get_default_flavor()
- (refs, resv_id) = self.compute_api.create(
- self.context, inst_type, self.fake_image['id'],
- scheduler_hints={'group': group.uuid})
-
- group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid)
- self.assertIn(refs[0]['uuid'], group.members)
-
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_instance_create_auto_creates_group(self):
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- inst_type = flavors.get_default_flavor()
- (refs, resv_id) = self.compute_api.create(
- self.context, inst_type, self.fake_image['id'],
- scheduler_hints={'group': 'groupname'})
-
- group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
- self.assertEqual('groupname', group.name)
- self.assertIn('legacy', group.policies)
- self.assertEqual(1, len(group.members))
- self.assertIn(refs[0]['uuid'], group.members)
-
- # On a second instance, make sure it gets added to the group that was
- # auto-created above
- (refs2, resv_id) = self.compute_api.create(
- self.context, inst_type, self.fake_image['id'],
- scheduler_hints={'group': 'groupname'})
- group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
- self.assertEqual('groupname', group.name)
- self.assertIn('legacy', group.policies)
- self.assertEqual(2, len(group.members))
- self.assertIn(refs[0]['uuid'], group.members)
- self.assertIn(refs2[0]['uuid'], group.members)
-
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_destroy_instance_disassociates_security_groups(self):
- # Make sure destroying disassociates security groups.
- group = self._create_group()
-
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=flavors.get_default_flavor(),
- image_href='some-fake-image',
- security_group=['testgroup'])
- try:
- db.instance_destroy(self.context, ref[0]['uuid'])
- group = db.security_group_get(self.context, group['id'])
- self.assertEqual(0, len(group['instances']))
- finally:
- db.security_group_destroy(self.context, group['id'])
-
- def test_destroy_security_group_disassociates_instances(self):
- # Make sure destroying security groups disassociates instances.
- group = self._create_group()
-
- (ref, resv_id) = self.compute_api.create(
- self.context,
- instance_type=flavors.get_default_flavor(),
- image_href='some-fake-image',
- security_group=['testgroup'])
-
- try:
- db.security_group_destroy(self.context, group['id'])
- admin_deleted_context = context.get_admin_context(
- read_deleted="only")
- group = db.security_group_get(admin_deleted_context, group['id'])
- self.assertEqual(0, len(group['instances']))
- finally:
- db.instance_destroy(self.context, ref[0]['uuid'])
-
- def _test_rebuild(self, vm_state):
- instance = self._create_fake_instance_obj()
- instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- instance = objects.Instance.get_by_uuid(self.context,
- instance_uuid)
- self.assertIsNone(instance.task_state)
- # Set some image metadata that should get wiped out and reset
- # as well as some other metadata that should be preserved.
- instance.system_metadata.update({
- 'image_kernel_id': 'old-data',
- 'image_ramdisk_id': 'old_data',
- 'image_something_else': 'old-data',
- 'image_should_remove': 'bye-bye',
- 'preserved': 'preserve this!'})
-
- instance.save()
-
- # Make sure Compute API updates the image_ref before casting to
- # compute manager.
- info = {'image_ref': None, 'clean': False}
-
- def fake_rpc_rebuild(context, **kwargs):
- info['image_ref'] = kwargs['instance'].image_ref
- info['clean'] = kwargs['instance'].obj_what_changed() == set()
-
- self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
- fake_rpc_rebuild)
-
- image_ref = instance["image_ref"] + '-new_image_ref'
- password = "new_password"
-
- instance.vm_state = vm_state
- instance.save()
-
- self.compute_api.rebuild(self.context, instance, image_ref, password)
- self.assertEqual(info['image_ref'], image_ref)
- self.assertTrue(info['clean'])
-
- instance.refresh()
- self.assertEqual(instance.task_state, task_states.REBUILDING)
- sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
- if not k.startswith('instance_type')])
- self.assertEqual(sys_meta,
- {'image_kernel_id': 'fake_kernel_id',
- 'image_min_disk': '1',
- 'image_ramdisk_id': 'fake_ramdisk_id',
- 'image_something_else': 'meow',
- 'preserved': 'preserve this!'})
- instance.destroy()
-
- def test_rebuild(self):
- self._test_rebuild(vm_state=vm_states.ACTIVE)
-
- def test_rebuild_in_error_state(self):
- self._test_rebuild(vm_state=vm_states.ERROR)
-
- def test_rebuild_in_error_not_launched(self):
- instance = self._create_fake_instance_obj(params={'image_ref': ''})
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- db.instance_update(self.context, instance['uuid'],
- {"vm_state": vm_states.ERROR,
- "launched_at": None})
-
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.rebuild,
- self.context,
- instance,
- instance['image_ref'],
- "new password")
-
- def test_rebuild_no_image(self):
- instance = self._create_fake_instance_obj(params={'image_ref': ''})
- instance_uuid = instance.uuid
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
- self.compute_api.rebuild(self.context, instance, '', 'new_password')
-
- instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['task_state'], task_states.REBUILDING)
-
- def test_rebuild_with_deleted_image(self):
- # If we're given a deleted image by glance, we should not be able to
- # rebuild from it
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
- self.fake_image['name'] = 'fake_name'
- self.fake_image['status'] = 'DELETED'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- expected_message = (
- exception.ImageNotActive.msg_fmt % {'image_id':
- self.fake_image['id']})
- with testtools.ExpectedException(exception.ImageNotActive,
- expected_message):
- self.compute_api.rebuild(self.context, instance,
- self.fake_image['id'], 'new_password')
-
- def test_rebuild_with_too_little_ram(self):
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
-
- def fake_extract_flavor(_inst, prefix):
- self.assertEqual('', prefix)
- return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
-
- self.stubs.Set(flavors, 'extract_flavor',
- fake_extract_flavor)
-
- self.fake_image['min_ram'] = 128
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorMemoryTooSmall,
- self.compute_api.rebuild, self.context,
- instance, self.fake_image['id'], 'new_password')
-
- # Reduce image memory requirements and make sure it works
- self.fake_image['min_ram'] = 64
-
- self.compute_api.rebuild(self.context,
- instance, self.fake_image['id'], 'new_password')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_rebuild_with_too_little_disk(self):
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
-
- def fake_extract_flavor(_inst, prefix):
- self.assertEqual('', prefix)
- return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
-
- self.stubs.Set(flavors, 'extract_flavor',
- fake_extract_flavor)
-
- self.fake_image['min_disk'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api.rebuild, self.context,
- instance, self.fake_image['id'], 'new_password')
-
- # Reduce image disk requirements and make sure it works
- self.fake_image['min_disk'] = 1
-
- self.compute_api.rebuild(self.context,
- instance, self.fake_image['id'], 'new_password')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_rebuild_with_just_enough_ram_and_disk(self):
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
-
- def fake_extract_flavor(_inst, prefix):
- self.assertEqual('', prefix)
- return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
-
- self.stubs.Set(flavors, 'extract_flavor',
- fake_extract_flavor)
-
- self.fake_image['min_ram'] = 64
- self.fake_image['min_disk'] = 1
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.compute_api.rebuild(self.context,
- instance, self.fake_image['id'], 'new_password')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_rebuild_with_no_ram_and_disk_reqs(self):
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
-
- def fake_extract_flavor(_inst, prefix):
- self.assertEqual('', prefix)
- return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
-
- self.stubs.Set(flavors, 'extract_flavor',
- fake_extract_flavor)
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.compute_api.rebuild(self.context,
- instance, self.fake_image['id'], 'new_password')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_rebuild_with_too_large_image(self):
- instance = self._create_fake_instance_obj(params={'image_ref': '1'})
-
- def fake_extract_flavor(_inst, prefix):
- self.assertEqual('', prefix)
- return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
-
- self.stubs.Set(flavors, 'extract_flavor',
- fake_extract_flavor)
-
- self.fake_image['size'] = '1073741825'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api.rebuild, self.context,
- instance, self.fake_image['id'], 'new_password')
-
- # Reduce image to 1 GB limit and ensure it works
- self.fake_image['size'] = '1073741824'
- self.compute_api.rebuild(self.context,
- instance, self.fake_image['id'], 'new_password')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_hostname_create(self):
- # Ensure instance hostname is set during creation.
- inst_type = flavors.get_flavor_by_name('m1.tiny')
- (instances, _) = self.compute_api.create(self.context,
- inst_type,
- image_href='some-fake-image',
- display_name='test host')
-
- self.assertEqual('test-host', instances[0]['hostname'])
-
- def _fake_rescue_block_devices(self, instance, status="in-use"):
- fake_bdms = block_device_obj.block_device_make_list(self.context,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'device_name': '/dev/vda',
- 'source_type': 'volume',
- 'boot_index': 0,
- 'destination_type': 'volume',
- 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})])
-
- volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
- 'state': 'active', 'instance_uuid': instance['uuid']}
-
- return fake_bdms, volume
-
- @mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- @mock.patch.object(cinder.API, 'get')
- def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms):
- # Instance started without an image
- params = {'image_ref': ''}
- volume_backed_inst_1 = self._create_fake_instance_obj(params=params)
- bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1)
-
- mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
- mock_get_bdms.return_value = bdms
-
- with mock.patch.object(self.compute, '_prep_block_device'):
- self.compute.run_instance(self.context,
- volume_backed_inst_1, {}, {}, None, None,
- None, True, None, False)
-
- self.assertRaises(exception.InstanceNotRescuable,
- self.compute_api.rescue, self.context,
- volume_backed_inst_1)
-
- self.compute.terminate_instance(self.context, volume_backed_inst_1,
- [], [])
-
- @mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- @mock.patch.object(cinder.API, 'get')
- def test_rescue_volume_backed_placeholder_image(self,
- mock_get_vol,
- mock_get_bdms):
- # Instance started with a placeholder image (for metadata)
- volume_backed_inst_2 = self._create_fake_instance_obj(
- {'image_ref': 'my_placeholder_img',
- 'root_device_name': '/dev/vda'})
- bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2)
-
- mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
- mock_get_bdms.return_value = bdms
-
- with mock.patch.object(self.compute, '_prep_block_device'):
- self.compute.run_instance(self.context,
- volume_backed_inst_2, {}, {}, None, None,
- None, True, None, False)
-
- self.assertRaises(exception.InstanceNotRescuable,
- self.compute_api.rescue, self.context,
- volume_backed_inst_2)
-
- self.compute.terminate_instance(self.context, volume_backed_inst_2,
- [], [])
-
- def test_get(self):
- # Test get instance.
- exp_instance = self._create_fake_instance()
- # NOTE(danms): Transform the db object in a similar way as
- # the API method will do.
- expected = obj_base.obj_to_primitive(
- objects.Instance._from_db_object(
- self.context, objects.Instance(), exp_instance,
- instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
-
- def fake_db_get(_context, _instance_uuid,
- columns_to_join=None, use_slave=False):
- return exp_instance
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
-
- instance = self.compute_api.get(self.context, exp_instance['uuid'])
- self.assertEqual(unify_instance(expected),
- unify_instance(instance))
-
- def test_get_with_admin_context(self):
- # Test get instance.
- c = context.get_admin_context()
- exp_instance = self._create_fake_instance()
- # NOTE(danms): Transform the db object in a similar way as
- # the API method will do.
- expected = obj_base.obj_to_primitive(
- objects.Instance._from_db_object(
- c, objects.Instance(), exp_instance,
- instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
-
- def fake_db_get(context, instance_uuid,
- columns_to_join=None, use_slave=False):
- return exp_instance
-
- self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
-
- instance = self.compute_api.get(c, exp_instance['uuid'])
- self.assertEqual(unify_instance(expected),
- unify_instance(instance))
-
- def test_get_with_integer_id(self):
- # Test get instance with an integer id.
- exp_instance = self._create_fake_instance()
- # NOTE(danms): Transform the db object in a similar way as
- # the API method will do.
- expected = obj_base.obj_to_primitive(
- objects.Instance._from_db_object(
- self.context, objects.Instance(), exp_instance,
- instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
-
- def fake_db_get(_context, _instance_id, columns_to_join=None):
- return exp_instance
-
- self.stubs.Set(db, 'instance_get', fake_db_get)
-
- instance = self.compute_api.get(self.context, exp_instance['id'])
- self.assertEqual(unify_instance(expected),
- unify_instance(instance))
-
- def test_get_all_by_name_regexp(self):
- # Test searching instances by name (display_name).
- c = context.get_admin_context()
- instance1 = self._create_fake_instance({'display_name': 'woot'})
- instance2 = self._create_fake_instance({
- 'display_name': 'woo'})
- instance3 = self._create_fake_instance({
- 'display_name': 'not-woot'})
-
- instances = self.compute_api.get_all(c,
- search_opts={'name': '^woo.*'})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance1['uuid'], instance_uuids)
- self.assertIn(instance2['uuid'], instance_uuids)
-
- instances = self.compute_api.get_all(c,
- search_opts={'name': '^woot.*'})
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertEqual(len(instances), 1)
- self.assertIn(instance1['uuid'], instance_uuids)
-
- instances = self.compute_api.get_all(c,
- search_opts={'name': '.*oot.*'})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance1['uuid'], instance_uuids)
- self.assertIn(instance3['uuid'], instance_uuids)
-
- instances = self.compute_api.get_all(c,
- search_opts={'name': '^n.*'})
- self.assertEqual(len(instances), 1)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance3['uuid'], instance_uuids)
-
- instances = self.compute_api.get_all(c,
- search_opts={'name': 'noth.*'})
- self.assertEqual(len(instances), 0)
-
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
-
- def test_get_all_by_multiple_options_at_once(self):
- # Test searching by multiple options at once.
- c = context.get_admin_context()
-
- def fake_network_info(ip):
- info = [{
- 'address': 'aa:bb:cc:dd:ee:ff',
- 'id': 1,
- 'network': {
- 'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [{
- 'cidr': '192.168.0.0/24',
- 'ips': [{
- 'address': ip,
- 'type': 'fixed',
- }]
- }]
- }
- }]
- return jsonutils.dumps(info)
-
- instance1 = self._create_fake_instance({
- 'display_name': 'woot',
- 'id': 1,
- 'uuid': '00000000-0000-0000-0000-000000000010',
- 'info_cache': {'network_info':
- fake_network_info('192.168.0.1')}})
- instance2 = self._create_fake_instance({
- 'display_name': 'woo',
- 'id': 20,
- 'uuid': '00000000-0000-0000-0000-000000000020',
- 'info_cache': {'network_info':
- fake_network_info('192.168.0.2')}})
- instance3 = self._create_fake_instance({
- 'display_name': 'not-woot',
- 'id': 30,
- 'uuid': '00000000-0000-0000-0000-000000000030',
- 'info_cache': {'network_info':
- fake_network_info('192.168.0.3')}})
-
- # ip ends up matching 2nd octet here.. so all 3 match ip
- # but 'name' only matches one
- instances = self.compute_api.get_all(c,
- search_opts={'ip': '.*\.1', 'name': 'not.*'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance3['uuid'])
-
- # ip ends up matching any ip with a '1' in the last octet..
- # so instance 1 and 3.. but name should only match #1
- # but 'name' only matches one
- instances = self.compute_api.get_all(c,
- search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance1['uuid'])
-
- # same as above but no match on name (name matches instance1
- # but the ip query doesn't
- instances = self.compute_api.get_all(c,
- search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
- self.assertEqual(len(instances), 0)
-
- # ip matches all 3... ipv6 matches #2+#3...name matches #3
- instances = self.compute_api.get_all(c,
- search_opts={'ip': '.*\.1',
- 'name': 'not.*',
- 'ip6': '^.*12.*34.*'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance3['uuid'])
-
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
-
- def test_get_all_by_image(self):
- # Test searching instances by image.
-
- c = context.get_admin_context()
- instance1 = self._create_fake_instance({'image_ref': '1234'})
- instance2 = self._create_fake_instance({'image_ref': '4567'})
- instance3 = self._create_fake_instance({'image_ref': '4567'})
-
- instances = self.compute_api.get_all(c, search_opts={'image': '123'})
- self.assertEqual(len(instances), 0)
-
- instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance1['uuid'])
-
- instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance2['uuid'], instance_uuids)
- self.assertIn(instance3['uuid'], instance_uuids)
-
- # Test passing a list as search arg
- instances = self.compute_api.get_all(c,
- search_opts={'image': ['1234', '4567']})
- self.assertEqual(len(instances), 3)
-
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
-
- def test_get_all_by_flavor(self):
- # Test searching instances by image.
-
- c = context.get_admin_context()
- instance1 = self._create_fake_instance({'instance_type_id': 1})
- instance2 = self._create_fake_instance({'instance_type_id': 2})
- instance3 = self._create_fake_instance({'instance_type_id': 2})
-
- # NOTE(comstud): Migrations set up the instance_types table
- # for us. Therefore, we assume the following is true for
- # these tests:
- # instance_type_id 1 == flavor 3
- # instance_type_id 2 == flavor 1
- # instance_type_id 3 == flavor 4
- # instance_type_id 4 == flavor 5
- # instance_type_id 5 == flavor 2
-
- instances = self.compute_api.get_all(c,
- search_opts={'flavor': 5})
- self.assertEqual(len(instances), 0)
-
- # ensure unknown filter maps to an exception
- self.assertRaises(exception.FlavorNotFound,
- self.compute_api.get_all, c,
- search_opts={'flavor': 99})
-
- instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['id'], instance1['id'])
-
- instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance2['uuid'], instance_uuids)
- self.assertIn(instance3['uuid'], instance_uuids)
-
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
-
- def test_get_all_by_state(self):
- # Test searching instances by state.
-
- c = context.get_admin_context()
- instance1 = self._create_fake_instance({
- 'power_state': power_state.SHUTDOWN,
- })
- instance2 = self._create_fake_instance({
- 'power_state': power_state.RUNNING,
- })
- instance3 = self._create_fake_instance({
- 'power_state': power_state.RUNNING,
- })
-
- instances = self.compute_api.get_all(c,
- search_opts={'power_state': power_state.SUSPENDED})
- self.assertEqual(len(instances), 0)
-
- instances = self.compute_api.get_all(c,
- search_opts={'power_state': power_state.SHUTDOWN})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance1['uuid'])
-
- instances = self.compute_api.get_all(c,
- search_opts={'power_state': power_state.RUNNING})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance2['uuid'], instance_uuids)
- self.assertIn(instance3['uuid'], instance_uuids)
-
- # Test passing a list as search arg
- instances = self.compute_api.get_all(c,
- search_opts={'power_state': [power_state.SHUTDOWN,
- power_state.RUNNING]})
- self.assertEqual(len(instances), 3)
-
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
-
- def test_get_all_by_metadata(self):
- # Test searching instances by metadata.
-
- c = context.get_admin_context()
- instance0 = self._create_fake_instance()
- instance1 = self._create_fake_instance({
- 'metadata': {'key1': 'value1'}})
- instance2 = self._create_fake_instance({
- 'metadata': {'key2': 'value2'}})
- instance3 = self._create_fake_instance({
- 'metadata': {'key3': 'value3'}})
- instance4 = self._create_fake_instance({
- 'metadata': {'key3': 'value3',
- 'key4': 'value4'}})
-
- # get all instances
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u"{}"})
- self.assertEqual(len(instances), 5)
-
- # wrong key/value combination
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u'{"key1": "value3"}'})
- self.assertEqual(len(instances), 0)
-
- # non-existing keys
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u'{"key5": "value1"}'})
- self.assertEqual(len(instances), 0)
-
- # find existing instance
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u'{"key2": "value2"}'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance2['uuid'])
-
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u'{"key3": "value3"}'})
- self.assertEqual(len(instances), 2)
- instance_uuids = [instance['uuid'] for instance in instances]
- self.assertIn(instance3['uuid'], instance_uuids)
- self.assertIn(instance4['uuid'], instance_uuids)
-
- # multiple criteria as a dict
- instances = self.compute_api.get_all(c,
- search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance4['uuid'])
-
- # multiple criteria as a list
- instances = self.compute_api.get_all(c,
- search_opts=
- {'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance4['uuid'])
-
- db.instance_destroy(c, instance0['uuid'])
- db.instance_destroy(c, instance1['uuid'])
- db.instance_destroy(c, instance2['uuid'])
- db.instance_destroy(c, instance3['uuid'])
- db.instance_destroy(c, instance4['uuid'])
-
- def test_get_all_by_system_metadata(self):
- # Test searching instances by system metadata.
-
- c = context.get_admin_context()
- instance1 = self._create_fake_instance({
- 'system_metadata': {'key1': 'value1'}})
-
- # find existing instance
- instances = self.compute_api.get_all(c,
- search_opts={'system_metadata': u'{"key1": "value1"}'})
- self.assertEqual(len(instances), 1)
- self.assertEqual(instances[0]['uuid'], instance1['uuid'])
-
- def test_all_instance_metadata(self):
- self._create_fake_instance({'metadata': {'key1': 'value1'},
- 'user_id': 'user1',
- 'project_id': 'project1'})
-
- self._create_fake_instance({'metadata': {'key2': 'value2'},
- 'user_id': 'user2',
- 'project_id': 'project2'})
-
- _context = self.context
- _context.user_id = 'user1'
- _context.project_id = 'project1'
- metadata = self.compute_api.get_all_instance_metadata(_context,
- search_filts=[])
- self.assertEqual(1, len(metadata))
- self.assertEqual(metadata[0]['key'], 'key1')
-
- _context.user_id = 'user2'
- _context.project_id = 'project2'
- metadata = self.compute_api.get_all_instance_metadata(_context,
- search_filts=[])
- self.assertEqual(1, len(metadata))
- self.assertEqual(metadata[0]['key'], 'key2')
-
- _context = context.get_admin_context()
- metadata = self.compute_api.get_all_instance_metadata(_context,
- search_filts=[])
- self.assertEqual(2, len(metadata))
-
- def test_instance_metadata(self):
- meta_changes = [None]
- self.flags(notify_on_state_change='vm_state')
-
- def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
- instance_uuid=None):
- meta_changes[0] = diff
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
-
- _context = context.get_admin_context()
- instance = self._create_fake_instance_obj({'metadata':
- {'key1': 'value1'}})
-
- metadata = self.compute_api.get_instance_metadata(_context, instance)
- self.assertEqual(metadata, {'key1': 'value1'})
-
- self.compute_api.update_instance_metadata(_context, instance,
- {'key2': 'value2'})
- metadata = self.compute_api.get_instance_metadata(_context, instance)
- self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
- self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
- msg = fake_notifier.NOTIFICATIONS[0]
- payload = msg.payload
- self.assertIn('metadata', payload)
- self.assertEqual(payload['metadata'], metadata)
-
- new_metadata = {'key2': 'bah', 'key3': 'value3'}
- self.compute_api.update_instance_metadata(_context, instance,
- new_metadata, delete=True)
- metadata = self.compute_api.get_instance_metadata(_context, instance)
- self.assertEqual(metadata, new_metadata)
- self.assertEqual(meta_changes, [{
- 'key1': ['-'],
- 'key2': ['+', 'bah'],
- 'key3': ['+', 'value3'],
- }])
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[1]
- payload = msg.payload
- self.assertIn('metadata', payload)
- self.assertEqual(payload['metadata'], metadata)
-
- self.compute_api.delete_instance_metadata(_context, instance, 'key2')
- metadata = self.compute_api.get_instance_metadata(_context, instance)
- self.assertEqual(metadata, {'key3': 'value3'})
- self.assertEqual(meta_changes, [{'key2': ['-']}])
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
- msg = fake_notifier.NOTIFICATIONS[2]
- payload = msg.payload
- self.assertIn('metadata', payload)
- self.assertEqual(payload['metadata'], {'key3': 'value3'})
-
- db.instance_destroy(_context, instance['uuid'])
-
- def test_disallow_metadata_changes_during_building(self):
- def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
- instance_uuid=None):
- pass
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
-
- instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
- instance = dict(instance)
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.delete_instance_metadata, self.context,
- instance, "key")
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.update_instance_metadata, self.context,
- instance, "key")
-
- def test_get_instance_faults(self):
- # Get an instances latest fault.
- instance = self._create_fake_instance()
-
- fault_fixture = {
- 'code': 404,
- 'instance_uuid': instance['uuid'],
- 'message': "HTTPNotFound",
- 'details': "Stock details for test",
- 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
- }
-
- def return_fault(_ctxt, instance_uuids):
- return dict.fromkeys(instance_uuids, [fault_fixture])
-
- self.stubs.Set(nova.db,
- 'instance_fault_get_by_instance_uuids',
- return_fault)
-
- _context = context.get_admin_context()
- output = self.compute_api.get_instance_faults(_context, [instance])
- expected = {instance['uuid']: [fault_fixture]}
- self.assertEqual(output, expected)
-
- db.instance_destroy(_context, instance['uuid'])
-
- @staticmethod
- def _parse_db_block_device_mapping(bdm_ref):
- attr_list = ('delete_on_termination', 'device_name', 'no_device',
- 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
- bdm = {}
- for attr in attr_list:
- val = bdm_ref.get(attr, None)
- if val:
- bdm[attr] = val
-
- return bdm
-
- def test_update_block_device_mapping(self):
- swap_size = ephemeral_size = 1
- instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
- instance = self._create_fake_instance_obj()
- mappings = [
- {'virtual': 'ami', 'device': 'sda1'},
- {'virtual': 'root', 'device': '/dev/sda1'},
-
- {'virtual': 'swap', 'device': 'sdb4'},
- {'virtual': 'swap', 'device': 'sdb3'},
- {'virtual': 'swap', 'device': 'sdb2'},
- {'virtual': 'swap', 'device': 'sdb1'},
-
- {'virtual': 'ephemeral0', 'device': 'sdc1'},
- {'virtual': 'ephemeral1', 'device': 'sdc2'},
- {'virtual': 'ephemeral2', 'device': 'sdc3'}]
- block_device_mapping = [
- # root
- {'device_name': '/dev/sda1',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
- 'delete_on_termination': False},
-
- # overwrite swap
- {'device_name': '/dev/sdb2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdb3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
- {'device_name': '/dev/sdb4',
- 'no_device': True},
-
- # overwrite ephemeral
- {'device_name': '/dev/sdc1',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
- {'device_name': '/dev/sdc4',
- 'no_device': True},
-
- # volume
- {'device_name': '/dev/sdd1',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdd2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
- {'device_name': '/dev/sdd3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
- {'device_name': '/dev/sdd4',
- 'no_device': True}]
-
- image_mapping = self.compute_api._prepare_image_mapping(
- instance_type, mappings)
- self.compute_api._update_block_device_mapping(
- self.context, instance_type, instance['uuid'], image_mapping)
-
- bdms = [block_device.BlockDeviceDict(bdm) for bdm in
- db.block_device_mapping_get_all_by_instance(
- self.context, instance['uuid'])]
- expected_result = [
- {'source_type': 'blank', 'destination_type': 'local',
- 'guest_format': 'swap', 'device_name': '/dev/sdb1',
- 'volume_size': swap_size, 'delete_on_termination': True},
- {'source_type': 'blank', 'destination_type': 'local',
- 'guest_format': CONF.default_ephemeral_format,
- 'device_name': '/dev/sdc3', 'delete_on_termination': True},
- {'source_type': 'blank', 'destination_type': 'local',
- 'guest_format': CONF.default_ephemeral_format,
- 'device_name': '/dev/sdc1', 'delete_on_termination': True},
- {'source_type': 'blank', 'destination_type': 'local',
- 'guest_format': CONF.default_ephemeral_format,
- 'device_name': '/dev/sdc2', 'delete_on_termination': True},
- ]
- bdms.sort(key=operator.itemgetter('device_name'))
- expected_result.sort(key=operator.itemgetter('device_name'))
- self.assertEqual(len(bdms), len(expected_result))
- for expected, got in zip(expected_result, bdms):
- self.assertThat(expected, matchers.IsSubDictOf(got))
-
- self.compute_api._update_block_device_mapping(
- self.context, flavors.get_default_flavor(),
- instance['uuid'], block_device_mapping)
- bdms = [block_device.BlockDeviceDict(bdm) for bdm in
- db.block_device_mapping_get_all_by_instance(
- self.context, instance['uuid'])]
- expected_result = [
- {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
- 'device_name': '/dev/sda1'},
-
- {'source_type': 'blank', 'destination_type': 'local',
- 'guest_format': 'swap', 'device_name': '/dev/sdb1',
- 'volume_size': swap_size, 'delete_on_termination': True},
- {'device_name': '/dev/sdb2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdb3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
- {'device_name': '/dev/sdb4', 'no_device': True},
-
- {'device_name': '/dev/sdc1',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
- {'no_device': True, 'device_name': '/dev/sdc4'},
-
- {'device_name': '/dev/sdd1',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
- 'delete_on_termination': False},
- {'device_name': '/dev/sdd2',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
- {'device_name': '/dev/sdd3',
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
- {'no_device': True, 'device_name': '/dev/sdd4'}]
- bdms.sort(key=operator.itemgetter('device_name'))
- expected_result.sort(key=operator.itemgetter('device_name'))
- self.assertEqual(len(bdms), len(expected_result))
- for expected, got in zip(expected_result, bdms):
- self.assertThat(expected, matchers.IsSubDictOf(got))
-
- for bdm in db.block_device_mapping_get_all_by_instance(
- self.context, instance['uuid']):
- db.block_device_mapping_destroy(self.context, bdm['id'])
- instance.refresh()
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def _test_check_and_transform_bdm(self, bdms, expected_bdms,
- image_bdms=None, base_options=None,
- legacy_bdms=False,
- legacy_image_bdms=False):
- image_bdms = image_bdms or []
- image_meta = {}
- if image_bdms:
- image_meta = {'properties': {'block_device_mapping': image_bdms}}
- if not legacy_image_bdms:
- image_meta['properties']['bdm_v2'] = True
- base_options = base_options or {'root_device_name': 'vda',
- 'image_ref': FAKE_IMAGE_REF}
- transformed_bdm = self.compute_api._check_and_transform_bdm(
- base_options, {}, image_meta, 1, 1, bdms, legacy_bdms)
- self.assertThat(expected_bdms,
- matchers.DictListMatches(transformed_bdm))
-
- def test_check_and_transform_legacy_bdm_no_image_bdms(self):
- legacy_bdms = [
- {'device_name': '/dev/vda',
- 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False}]
- expected_bdms = [block_device.BlockDeviceDict.from_legacy(
- legacy_bdms[0])]
- expected_bdms[0]['boot_index'] = 0
- self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
- legacy_bdms=True)
-
- def test_check_and_transform_legacy_bdm_legacy_image_bdms(self):
- image_bdms = [
- {'device_name': '/dev/vda',
- 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False}]
- legacy_bdms = [
- {'device_name': '/dev/vdb',
- 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
- 'delete_on_termination': False}]
- expected_bdms = [
- block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
- block_device.BlockDeviceDict.from_legacy(image_bdms[0])]
- expected_bdms[0]['boot_index'] = -1
- expected_bdms[1]['boot_index'] = 0
- self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
- image_bdms=image_bdms,
- legacy_bdms=True,
- legacy_image_bdms=True)
-
- def test_check_and_transform_legacy_bdm_image_bdms(self):
- legacy_bdms = [
- {'device_name': '/dev/vdb',
- 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
- 'delete_on_termination': False}]
- image_bdms = [block_device.BlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
- 'boot_index': 0})]
- expected_bdms = [
- block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
- image_bdms[0]]
- expected_bdms[0]['boot_index'] = -1
- self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
- image_bdms=image_bdms,
- legacy_bdms=True)
-
- def test_check_and_transform_bdm_no_image_bdms(self):
- bdms = [block_device.BlockDeviceDict({'source_type': 'image',
- 'destination_type': 'local',
- 'image_id': FAKE_IMAGE_REF,
- 'boot_index': 0})]
- expected_bdms = bdms
- self._test_check_and_transform_bdm(bdms, expected_bdms)
-
- def test_check_and_transform_bdm_image_bdms(self):
- bdms = [block_device.BlockDeviceDict({'source_type': 'image',
- 'destination_type': 'local',
- 'image_id': FAKE_IMAGE_REF,
- 'boot_index': 0})]
- image_bdms = [block_device.BlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})]
- expected_bdms = bdms + image_bdms
- self._test_check_and_transform_bdm(bdms, expected_bdms,
- image_bdms=image_bdms)
-
- def test_check_and_transform_bdm_legacy_image_bdms(self):
- bdms = [block_device.BlockDeviceDict({'source_type': 'image',
- 'destination_type': 'local',
- 'image_id': FAKE_IMAGE_REF,
- 'boot_index': 0})]
- image_bdms = [{'device_name': '/dev/vda',
- 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False}]
- expected_bdms = [block_device.BlockDeviceDict.from_legacy(
- image_bdms[0])]
- expected_bdms[0]['boot_index'] = 0
- self._test_check_and_transform_bdm(bdms, expected_bdms,
- image_bdms=image_bdms,
- legacy_image_bdms=True)
-
- def test_check_and_transform_image(self):
- base_options = {'root_device_name': 'vdb',
- 'image_ref': FAKE_IMAGE_REF}
- fake_legacy_bdms = [
- {'device_name': '/dev/vda',
- 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
- 'delete_on_termination': False}]
-
- image_meta = {'properties': {'block_device_mapping': [
- {'device_name': '/dev/vda',
- 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333'}]}}
-
- # We get an image BDM
- transformed_bdm = self.compute_api._check_and_transform_bdm(
- base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
- self.assertEqual(len(transformed_bdm), 2)
-
- # No image BDM created if image already defines a root BDM
- base_options['root_device_name'] = 'vda'
- transformed_bdm = self.compute_api._check_and_transform_bdm(
- base_options, {}, image_meta, 1, 1, [], True)
- self.assertEqual(len(transformed_bdm), 1)
-
- # No image BDM created
- transformed_bdm = self.compute_api._check_and_transform_bdm(
- base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
- self.assertEqual(len(transformed_bdm), 1)
-
- # Volumes with multiple instances fails
- self.assertRaises(exception.InvalidRequest,
- self.compute_api._check_and_transform_bdm,
- base_options, {}, {}, 1, 2, fake_legacy_bdms, True)
-
- checked_bdm = self.compute_api._check_and_transform_bdm(
- base_options, {}, {}, 1, 1, transformed_bdm, True)
- self.assertEqual(checked_bdm, transformed_bdm)
-
- def test_volume_size(self):
- ephemeral_size = 2
- swap_size = 3
- volume_size = 5
-
- swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
- ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
- volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
-
- inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
- self.assertEqual(
- self.compute_api._volume_size(inst_type, ephemeral_bdm),
- ephemeral_size)
- ephemeral_bdm['volume_size'] = 42
- self.assertEqual(
- self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
- self.assertEqual(
- self.compute_api._volume_size(inst_type, swap_bdm),
- swap_size)
- swap_bdm['volume_size'] = 42
- self.assertEqual(
- self.compute_api._volume_size(inst_type, swap_bdm), 42)
- self.assertEqual(
- self.compute_api._volume_size(inst_type, volume_bdm),
- volume_size)
-
- def test_is_volume_backed_instance(self):
- ctxt = self.context
-
- instance = self._create_fake_instance({'image_ref': ''})
- self.assertTrue(
- self.compute_api.is_volume_backed_instance(ctxt, instance, None))
-
- instance = self._create_fake_instance({'root_device_name': 'vda'})
- self.assertFalse(
- self.compute_api.is_volume_backed_instance(
- ctxt, instance,
- block_device_obj.block_device_make_list(ctxt, [])))
-
- bdms = block_device_obj.block_device_make_list(ctxt,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume',
- 'device_name': '/dev/vda',
- 'volume_id': 'fake_volume_id',
- 'boot_index': 0,
- 'destination_type': 'volume'})])
- self.assertTrue(
- self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
-
- bdms = block_device_obj.block_device_make_list(ctxt,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume',
- 'device_name': '/dev/vda',
- 'volume_id': 'fake_volume_id',
- 'destination_type': 'local',
- 'boot_index': 0,
- 'snapshot_id': None}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume',
- 'device_name': '/dev/vdb',
- 'boot_index': 1,
- 'destination_type': 'volume',
- 'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
- 'snapshot_id': None})])
- self.assertFalse(
- self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
-
- bdms = block_device_obj.block_device_make_list(ctxt,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume',
- 'device_name': '/dev/vda',
- 'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
- 'destination_type': 'volume',
- 'boot_index': 0,
- 'volume_id': None})])
- self.assertTrue(
- self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
-
- def test_is_volume_backed_instance_no_bdms(self):
- ctxt = self.context
- instance = self._create_fake_instance()
-
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- ctxt, instance['uuid']).AndReturn(
- block_device_obj.block_device_make_list(ctxt, []))
- self.mox.ReplayAll()
-
- self.compute_api.is_volume_backed_instance(ctxt, instance, None)
-
- def test_reservation_id_one_instance(self):
- """Verify building an instance has a reservation_id that
- matches return value from create.
- """
- (refs, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image')
- try:
- self.assertEqual(len(refs), 1)
- self.assertEqual(refs[0]['reservation_id'], resv_id)
- finally:
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_reservation_ids_two_instances(self):
- """Verify building 2 instances at once results in a
- reservation_id being returned equal to reservation id set
- in both instances.
- """
- (refs, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image',
- min_count=2, max_count=2)
- try:
- self.assertEqual(len(refs), 2)
- self.assertIsNotNone(resv_id)
- finally:
- for instance in refs:
- self.assertEqual(instance['reservation_id'], resv_id)
-
- db.instance_destroy(self.context, refs[0]['uuid'])
-
- def test_multi_instance_display_name_template(self):
- self.flags(multi_instance_display_name_template='%(name)s')
- (refs, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image',
- min_count=2, max_count=2, display_name='x')
- self.assertEqual(refs[0]['display_name'], 'x')
- self.assertEqual(refs[0]['hostname'], 'x')
- self.assertEqual(refs[1]['display_name'], 'x')
- self.assertEqual(refs[1]['hostname'], 'x')
-
- self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
- (refs, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image',
- min_count=2, max_count=2, display_name='x')
- self.assertEqual(refs[0]['display_name'], 'x-1')
- self.assertEqual(refs[0]['hostname'], 'x-1')
- self.assertEqual(refs[1]['display_name'], 'x-2')
- self.assertEqual(refs[1]['hostname'], 'x-2')
-
- self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
- (refs, resv_id) = self.compute_api.create(self.context,
- flavors.get_default_flavor(), image_href='some-fake-image',
- min_count=2, max_count=2, display_name='x')
- self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
- self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
- self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
- self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
-
- def test_instance_architecture(self):
- # Test the instance architecture.
- i_ref = self._create_fake_instance()
- self.assertEqual(i_ref['architecture'], arch.X86_64)
- db.instance_destroy(self.context, i_ref['uuid'])
-
- def test_instance_unknown_architecture(self):
- # Test if the architecture is unknown.
- instance = self._create_fake_instance_obj(
- params={'architecture': ''})
- try:
- self.compute.run_instance(self.context, instance, {}, {}, None,
- None, None, True, None, False)
- instance = db.instance_get_by_uuid(self.context,
- instance['uuid'])
- self.assertNotEqual(instance['architecture'], 'Unknown')
- finally:
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_instance_name_template(self):
- # Test the instance_name template.
- self.flags(instance_name_template='instance-%d')
- i_ref = self._create_fake_instance()
- self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
- db.instance_destroy(self.context, i_ref['uuid'])
-
- self.flags(instance_name_template='instance-%(uuid)s')
- i_ref = self._create_fake_instance()
- self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
- db.instance_destroy(self.context, i_ref['uuid'])
-
- self.flags(instance_name_template='%(id)d-%(uuid)s')
- i_ref = self._create_fake_instance()
- self.assertEqual(i_ref['name'], '%d-%s' %
- (i_ref['id'], i_ref['uuid']))
- db.instance_destroy(self.context, i_ref['uuid'])
-
- # not allowed.. default is uuid
- self.flags(instance_name_template='%(name)s')
- i_ref = self._create_fake_instance()
- self.assertEqual(i_ref['name'], i_ref['uuid'])
- db.instance_destroy(self.context, i_ref['uuid'])
-
- def test_add_remove_fixed_ip(self):
- instance = self._create_fake_instance_obj(params={'host': CONF.host})
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.add_fixed_ip(self.context, instance, '1')
- self.compute_api.remove_fixed_ip(self.context,
- instance, '192.168.1.1')
- self.compute_api.delete(self.context, instance)
-
- def test_attach_volume_invalid(self):
- self.assertRaises(exception.InvalidDevicePath,
- self.compute_api.attach_volume,
- self.context,
- {'locked': False, 'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'launched_at': timeutils.utcnow()},
- None,
- '/invalid')
-
- def test_no_attach_volume_in_rescue_state(self):
- def fake(*args, **kwargs):
- pass
-
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake)
- self.stubs.Set(cinder.API, 'reserve_volume', fake)
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.attach_volume,
- self.context,
- {'uuid': 'fake_uuid', 'locked': False,
- 'vm_state': vm_states.RESCUED},
- None,
- '/dev/vdb')
-
- def test_no_attach_volume_in_suspended_state(self):
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.attach_volume,
- self.context,
- {'uuid': 'fake_uuid', 'locked': False,
- 'vm_state': vm_states.SUSPENDED},
- {'id': 'fake-volume-id'},
- '/dev/vdb')
-
- def test_no_detach_volume_in_rescue_state(self):
- # Ensure volume can be detached from instance
-
- params = {'vm_state': vm_states.RESCUED}
- instance = self._create_fake_instance(params=params)
-
- volume = {'id': 1, 'attach_status': 'in-use',
- 'instance_uuid': instance['uuid']}
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.detach_volume,
- self.context, instance, volume)
-
- @mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- @mock.patch.object(cinder.API, 'get')
- def test_no_rescue_in_volume_state_attaching(self,
- mock_get_vol,
- mock_get_bdms):
- # Make sure a VM cannot be rescued while volume is being attached
- instance = self._create_fake_instance_obj()
- bdms, volume = self._fake_rescue_block_devices(instance)
-
- mock_get_vol.return_value = {'id': volume['id'],
- 'status': "attaching"}
- mock_get_bdms.return_value = bdms
-
- self.assertRaises(exception.InvalidVolume,
- self.compute_api.rescue, self.context, instance)
-
- def test_vnc_console(self):
- # Make sure we can a vnc console for an instance.
-
- fake_instance = {'uuid': 'fake_uuid',
- 'host': 'fake_compute_host'}
- fake_console_type = "novnc"
- fake_connect_info = {'token': 'fake_token',
- 'console_type': fake_console_type,
- 'host': 'fake_console_host',
- 'port': 'fake_console_port',
- 'internal_access_path': 'fake_access_path',
- 'instance_uuid': fake_instance['uuid'],
- 'access_url': 'fake_console_url'}
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_vnc_console')
- rpcapi.get_vnc_console(
- self.context, instance=fake_instance,
- console_type=fake_console_type).AndReturn(fake_connect_info)
-
- self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- self.compute_api.consoleauth_rpcapi.authorize_console(
- self.context, 'fake_token', fake_console_type, 'fake_console_host',
- 'fake_console_port', 'fake_access_path', 'fake_uuid')
-
- self.mox.ReplayAll()
-
- console = self.compute_api.get_vnc_console(self.context,
- fake_instance, fake_console_type)
- self.assertEqual(console, {'url': 'fake_console_url'})
-
- def test_get_vnc_console_no_host(self):
- instance = self._create_fake_instance(params={'host': ''})
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.get_vnc_console,
- self.context, instance, 'novnc')
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_spice_console(self):
- # Make sure we can a spice console for an instance.
-
- fake_instance = {'uuid': 'fake_uuid',
- 'host': 'fake_compute_host'}
- fake_console_type = "spice-html5"
- fake_connect_info = {'token': 'fake_token',
- 'console_type': fake_console_type,
- 'host': 'fake_console_host',
- 'port': 'fake_console_port',
- 'internal_access_path': 'fake_access_path',
- 'instance_uuid': fake_instance['uuid'],
- 'access_url': 'fake_console_url'}
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
- rpcapi.get_spice_console(
- self.context, instance=fake_instance,
- console_type=fake_console_type).AndReturn(fake_connect_info)
-
- self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- self.compute_api.consoleauth_rpcapi.authorize_console(
- self.context, 'fake_token', fake_console_type, 'fake_console_host',
- 'fake_console_port', 'fake_access_path', 'fake_uuid')
-
- self.mox.ReplayAll()
-
- console = self.compute_api.get_spice_console(self.context,
- fake_instance, fake_console_type)
- self.assertEqual(console, {'url': 'fake_console_url'})
-
- def test_get_spice_console_no_host(self):
- instance = self._create_fake_instance(params={'host': ''})
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.get_spice_console,
- self.context, instance, 'spice')
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_rdp_console(self):
- # Make sure we can a rdp console for an instance.
-
- fake_instance = {'uuid': 'fake_uuid',
- 'host': 'fake_compute_host'}
- fake_console_type = "rdp-html5"
- fake_connect_info = {'token': 'fake_token',
- 'console_type': fake_console_type,
- 'host': 'fake_console_host',
- 'port': 'fake_console_port',
- 'internal_access_path': 'fake_access_path',
- 'instance_uuid': fake_instance['uuid'],
- 'access_url': 'fake_console_url'}
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
- rpcapi.get_rdp_console(
- self.context, instance=fake_instance,
- console_type=fake_console_type).AndReturn(fake_connect_info)
-
- self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- self.compute_api.consoleauth_rpcapi.authorize_console(
- self.context, 'fake_token', fake_console_type, 'fake_console_host',
- 'fake_console_port', 'fake_access_path', 'fake_uuid')
-
- self.mox.ReplayAll()
-
- console = self.compute_api.get_rdp_console(self.context,
- fake_instance, fake_console_type)
- self.assertEqual(console, {'url': 'fake_console_url'})
-
- def test_get_rdp_console_no_host(self):
- instance = self._create_fake_instance(params={'host': ''})
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.get_rdp_console,
- self.context, instance, 'rdp')
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_serial_console(self):
- # Make sure we can get a serial proxy url for an instance.
-
- fake_instance = {'uuid': 'fake_uuid',
- 'host': 'fake_compute_host'}
- fake_console_type = 'serial'
- fake_connect_info = {'token': 'fake_token',
- 'console_type': fake_console_type,
- 'host': 'fake_serial_host',
- 'port': 'fake_tcp_port',
- 'internal_access_path': 'fake_access_path',
- 'instance_uuid': fake_instance['uuid'],
- 'access_url': 'fake_access_url'}
-
- rpcapi = compute_rpcapi.ComputeAPI
-
- with contextlib.nested(
- mock.patch.object(rpcapi, 'get_serial_console',
- return_value=fake_connect_info),
- mock.patch.object(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- ) as (mock_get_serial_console, mock_authorize_console):
- self.compute_api.consoleauth_rpcapi.authorize_console(
- self.context, 'fake_token', fake_console_type,
- 'fake_serial_host', 'fake_tcp_port',
- 'fake_access_path', 'fake_uuid')
-
- console = self.compute_api.get_serial_console(self.context,
- fake_instance,
- fake_console_type)
- self.assertEqual(console, {'url': 'fake_access_url'})
-
- def test_get_serial_console_no_host(self):
- # Make sure an exception is raised when instance is not Active.
- instance = self._create_fake_instance(params={'host': ''})
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.get_serial_console,
- self.context, instance, 'serial')
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_console_output(self):
- fake_instance = {'uuid': 'fake_uuid',
- 'host': 'fake_compute_host'}
- fake_tail_length = 699
- fake_console_output = 'fake console output'
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_console_output')
- rpcapi.get_console_output(
- self.context, instance=fake_instance,
- tail_length=fake_tail_length).AndReturn(fake_console_output)
-
- self.mox.ReplayAll()
-
- output = self.compute_api.get_console_output(self.context,
- fake_instance, tail_length=fake_tail_length)
- self.assertEqual(output, fake_console_output)
-
- def test_console_output_no_host(self):
- instance = self._create_fake_instance(params={'host': ''})
-
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.get_console_output,
- self.context, instance)
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_attach_interface(self):
- new_type = flavors.get_flavor_by_flavor_id('4')
- sys_meta = flavors.save_flavor_info({}, new_type)
-
- instance = objects.Instance(image_ref='foo',
- system_metadata=sys_meta)
- self.mox.StubOutWithMock(self.compute.network_api,
- 'allocate_port_for_instance')
- nwinfo = [fake_network_cache_model.new_vif()]
- network_id = nwinfo[0]['network']['id']
- port_id = nwinfo[0]['id']
- req_ip = '1.2.3.4'
- self.compute.network_api.allocate_port_for_instance(
- self.context, instance, port_id, network_id, req_ip
- ).AndReturn(nwinfo)
- self.mox.ReplayAll()
- vif = self.compute.attach_interface(self.context,
- instance,
- network_id,
- port_id,
- req_ip)
- self.assertEqual(vif['id'], network_id)
- return nwinfo, port_id
-
- def test_detach_interface(self):
- nwinfo, port_id = self.test_attach_interface()
- self.stubs.Set(self.compute.network_api,
- 'deallocate_port_for_instance',
- lambda a, b, c: [])
- instance = objects.Instance()
- instance.info_cache = objects.InstanceInfoCache.new(
- self.context, 'fake-uuid')
- instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
- nwinfo)
- self.compute.detach_interface(self.context, instance, port_id)
- self.assertEqual(self.compute.driver._interfaces, {})
-
- def test_attach_volume(self):
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
- bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
- self.context,
- block_device_obj.BlockDeviceMapping(),
- fake_bdm)
- instance = self._create_fake_instance()
- fake_volume = {'id': 'fake-volume-id'}
-
- with contextlib.nested(
- mock.patch.object(cinder.API, 'get', return_value=fake_volume),
- mock.patch.object(cinder.API, 'check_attach'),
- mock.patch.object(cinder.API, 'reserve_volume'),
- mock.patch.object(compute_rpcapi.ComputeAPI,
- 'reserve_block_device_name', return_value=bdm),
- mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
- ) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
- mock_attach):
-
- self.compute_api.attach_volume(
- self.context, instance, 'fake-volume-id',
- '/dev/vdb', 'ide', 'cdrom')
-
- mock_reserve_bdm.assert_called_once_with(
- self.context, instance, '/dev/vdb', 'fake-volume-id',
- disk_bus='ide', device_type='cdrom')
- self.assertEqual(mock_get.call_args,
- mock.call(self.context, 'fake-volume-id'))
- self.assertEqual(mock_check_attach.call_args,
- mock.call(
- self.context, fake_volume, instance=instance))
- mock_reserve_vol.assert_called_once_with(
- self.context, 'fake-volume-id')
- a, kw = mock_attach.call_args
- self.assertEqual(kw['volume_id'], 'fake-volume-id')
- self.assertEqual(kw['mountpoint'], '/dev/vdb')
- self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
- self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
-
- def test_attach_volume_no_device(self):
-
- called = {}
-
- def fake_check_attach(*args, **kwargs):
- called['fake_check_attach'] = True
-
- def fake_reserve_volume(*args, **kwargs):
- called['fake_reserve_volume'] = True
-
- def fake_volume_get(self, context, volume_id):
- called['fake_volume_get'] = True
- return {'id': volume_id}
-
- def fake_rpc_attach_volume(self, context, **kwargs):
- called['fake_rpc_attach_volume'] = True
-
- def fake_rpc_reserve_block_device_name(self, context, instance, device,
- volume_id, **kwargs):
- called['fake_rpc_reserve_block_device_name'] = True
- bdm = block_device_obj.BlockDeviceMapping()
- bdm['device_name'] = '/dev/vdb'
- return bdm
-
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
- self.stubs.Set(cinder.API, 'reserve_volume',
- fake_reserve_volume)
- self.stubs.Set(compute_rpcapi.ComputeAPI,
- 'reserve_block_device_name',
- fake_rpc_reserve_block_device_name)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
- fake_rpc_attach_volume)
-
- instance = self._create_fake_instance()
- self.compute_api.attach_volume(self.context, instance, 1, device=None)
- self.assertTrue(called.get('fake_check_attach'))
- self.assertTrue(called.get('fake_reserve_volume'))
- self.assertTrue(called.get('fake_volume_get'))
- self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
- self.assertTrue(called.get('fake_rpc_attach_volume'))
-
- def test_detach_volume(self):
- # Ensure volume can be detached from instance
- called = {}
- instance = self._create_fake_instance()
- volume = {'id': 1, 'attach_status': 'in-use',
- 'instance_uuid': instance['uuid']}
-
- def fake_check_detach(*args, **kwargs):
- called['fake_check_detach'] = True
-
- def fake_begin_detaching(*args, **kwargs):
- called['fake_begin_detaching'] = True
-
- def fake_rpc_detach_volume(self, context, **kwargs):
- called['fake_rpc_detach_volume'] = True
-
- self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
- self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
- fake_rpc_detach_volume)
-
- self.compute_api.detach_volume(self.context,
- instance, volume)
- self.assertTrue(called.get('fake_check_detach'))
- self.assertTrue(called.get('fake_begin_detaching'))
- self.assertTrue(called.get('fake_rpc_detach_volume'))
-
- def test_detach_invalid_volume(self):
- # Ensure exception is raised while detaching an un-attached volume
- instance = {'uuid': 'uuid1',
- 'locked': False,
- 'launched_at': timeutils.utcnow(),
- 'vm_state': vm_states.ACTIVE,
- 'task_state': None}
- volume = {'id': 1, 'attach_status': 'detached'}
-
- self.assertRaises(exception.InvalidVolume,
- self.compute_api.detach_volume, self.context,
- instance, volume)
-
- def test_detach_unattached_volume(self):
- # Ensure exception is raised when volume's idea of attached
- # instance doesn't match.
- instance = {'uuid': 'uuid1',
- 'locked': False,
- 'launched_at': timeutils.utcnow(),
- 'vm_state': vm_states.ACTIVE,
- 'task_state': None}
- volume = {'id': 1, 'attach_status': 'in-use',
- 'instance_uuid': 'uuid2'}
-
- self.assertRaises(exception.VolumeUnattached,
- self.compute_api.detach_volume, self.context,
- instance, volume)
-
- def test_detach_suspended_instance_fails(self):
- instance = {'uuid': 'uuid1',
- 'locked': False,
- 'launched_at': timeutils.utcnow(),
- 'vm_state': vm_states.SUSPENDED,
- 'task_state': None}
- volume = {'id': 1, 'attach_status': 'in-use',
- 'instance_uuid': 'uuid2'}
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.detach_volume, self.context,
- instance, volume)
-
- def test_detach_volume_libvirt_is_down(self):
- # Ensure rollback during detach if libvirt goes down
-
- called = {}
- instance = self._create_fake_instance()
-
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'device_name': '/dev/vdb', 'volume_id': 1,
- 'source_type': 'snapshot', 'destination_type': 'volume',
- 'connection_info': '{"test": "test"}'})
-
- def fake_libvirt_driver_instance_exists(_instance):
- called['fake_libvirt_driver_instance_exists'] = True
- return False
-
- def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
- called['fake_libvirt_driver_detach_volume_fails'] = True
- raise AttributeError()
-
- def fake_roll_detaching(*args, **kwargs):
- called['fake_roll_detaching'] = True
-
- self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
- self.stubs.Set(self.compute.driver, "instance_exists",
- fake_libvirt_driver_instance_exists)
- self.stubs.Set(self.compute.driver, "detach_volume",
- fake_libvirt_driver_detach_volume_fails)
-
- self.mox.StubOutWithMock(objects.BlockDeviceMapping,
- 'get_by_volume_id')
- objects.BlockDeviceMapping.get_by_volume_id(
- self.context, 1).AndReturn(objects.BlockDeviceMapping(
- **fake_bdm))
- self.mox.ReplayAll()
-
- self.assertRaises(AttributeError, self.compute.detach_volume,
- self.context, 1, instance)
- self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
- self.assertTrue(called.get('fake_roll_detaching'))
-
- def test_detach_volume_not_found(self):
- # Ensure that a volume can be detached even when it is removed
- # from an instance but remaining in bdm. See bug #1367964.
-
- instance = self._create_fake_instance()
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'volume_id': 'fake-id', 'device_name': '/dev/vdb',
- 'connection_info': '{"test": "test"}'})
- bdm = objects.BlockDeviceMapping(**fake_bdm)
-
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'detach_volume',
- side_effect=exception.DiskNotFound('sdb')),
- mock.patch.object(objects.BlockDeviceMapping,
- 'get_by_volume_id', return_value=bdm),
- mock.patch.object(cinder.API, 'terminate_connection'),
- mock.patch.object(bdm, 'destroy'),
- mock.patch.object(self.compute, '_notify_about_instance_usage'),
- mock.patch.object(self.compute.volume_api, 'detach'),
- mock.patch.object(self.compute.driver, 'get_volume_connector',
- return_value='fake-connector')
- ) as (mock_detach_volume, mock_volume, mock_terminate_connection,
- mock_destroy, mock_notify, mock_detach, mock_volume_connector):
- self.compute.detach_volume(self.context, 'fake-id', instance)
- self.assertTrue(mock_detach_volume.called)
- mock_terminate_connection.assert_called_once_with(self.context,
- 'fake-id',
- 'fake-connector')
- mock_destroy.assert_called_once_with()
- mock_detach.assert_called_once_with(mock.ANY, 'fake-id')
-
- def test_terminate_with_volumes(self):
- # Make sure that volumes get detached during instance termination.
- admin = context.get_admin_context()
- instance = self._create_fake_instance_obj()
-
- volume_id = 'fake'
- values = {'instance_uuid': instance['uuid'],
- 'device_name': '/dev/vdc',
- 'delete_on_termination': False,
- 'volume_id': volume_id,
- }
- db.block_device_mapping_create(admin, values)
-
- def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
- self.stubs.Set(cinder.API, "get", fake_volume_get)
-
- # Stub out and record whether it gets detached
- result = {"detached": False}
-
- def fake_detach(self, context, volume_id_param):
- result["detached"] = volume_id_param == volume_id
- self.stubs.Set(cinder.API, "detach", fake_detach)
-
- def fake_terminate_connection(self, context, volume_id, connector):
- return {}
- self.stubs.Set(cinder.API, "terminate_connection",
- fake_terminate_connection)
-
- # Kill the instance and check that it was detached
- bdms = db.block_device_mapping_get_all_by_instance(admin,
- instance['uuid'])
- self.compute.terminate_instance(admin, instance, bdms, [])
-
- self.assertTrue(result["detached"])
-
- def test_terminate_deletes_all_bdms(self):
- admin = context.get_admin_context()
- instance = self._create_fake_instance_obj()
-
- img_bdm = {'instance_uuid': instance['uuid'],
- 'device_name': '/dev/vda',
- 'source_type': 'image',
- 'destination_type': 'local',
- 'delete_on_termination': False,
- 'boot_index': 0,
- 'image_id': 'fake_image'}
- vol_bdm = {'instance_uuid': instance['uuid'],
- 'device_name': '/dev/vdc',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'delete_on_termination': False,
- 'volume_id': 'fake_vol'}
- bdms = []
- for bdm in img_bdm, vol_bdm:
- bdm_obj = objects.BlockDeviceMapping(**bdm)
- bdm_obj.create(admin)
- bdms.append(bdm_obj)
-
- self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
- self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
- self.compute.run_instance(self.context, instance, {}, {}, None, None,
- None, True, None, False)
-
- self.compute.terminate_instance(self.context, instance, bdms, [])
-
- bdms = db.block_device_mapping_get_all_by_instance(admin,
- instance['uuid'])
- self.assertEqual(len(bdms), 0)
-
- def test_inject_network_info(self):
- instance = self._create_fake_instance_obj(params={'host': CONF.host})
- self.compute.run_instance(self.context,
- instance, {}, {}, None, None,
- None, True, None, False)
- instance = self.compute_api.get(self.context, instance['uuid'],
- want_objects=True)
- self.compute_api.inject_network_info(self.context, instance)
- self.stubs.Set(self.compute_api.network_api,
- 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.delete(self.context, instance)
-
- def test_reset_network(self):
- instance = self._create_fake_instance_obj()
- self.compute.run_instance(self.context,
- instance, {}, {}, None, None,
- None, True, None, False)
- instance = self.compute_api.get(self.context, instance['uuid'],
- want_objects=True)
- self.compute_api.reset_network(self.context, instance)
-
- def test_lock(self):
- instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.lock(self.context, instance)
-
- def test_unlock(self):
- instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.unlock(self.context, instance)
-
- def test_get_lock(self):
- instance = self._create_fake_instance()
- self.assertFalse(self.compute_api.get_lock(self.context, instance))
- db.instance_update(self.context, instance['uuid'], {'locked': True})
- self.assertTrue(self.compute_api.get_lock(self.context, instance))
-
- def test_add_remove_security_group(self):
- instance = self._create_fake_instance_obj()
-
- self.compute.run_instance(self.context,
- instance, {}, {}, None, None,
- None, True, None, False)
- instance = self.compute_api.get(self.context, instance['uuid'])
- security_group_name = self._create_group()['name']
-
- self.security_group_api.add_to_instance(self.context,
- instance,
- security_group_name)
- self.security_group_api.remove_from_instance(self.context,
- instance,
- security_group_name)
-
- def test_get_diagnostics(self):
- instance = self._create_fake_instance_obj()
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
- rpcapi.get_diagnostics(self.context, instance=instance)
- self.mox.ReplayAll()
-
- self.compute_api.get_diagnostics(self.context, instance)
-
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.delete(self.context, instance)
-
- def test_get_instance_diagnostics(self):
- instance = self._create_fake_instance_obj()
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics')
- rpcapi.get_instance_diagnostics(self.context, instance=instance)
- self.mox.ReplayAll()
-
- self.compute_api.get_instance_diagnostics(self.context, instance)
-
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.delete(self.context, instance)
-
- def test_secgroup_refresh(self):
- instance = self._create_fake_instance()
-
- def rule_get(*args, **kwargs):
- mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
- return [mock_rule]
-
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': [instance]})
- return mock_group
-
- self.stubs.Set(
- self.compute_api.db,
- 'security_group_rule_get_by_security_group_grantee',
- rule_get)
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
- rpcapi.refresh_instance_security_rules(self.context,
- instance['host'],
- instance)
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_members_refresh(self.context, [1])
-
- def test_secgroup_refresh_once(self):
- instance = self._create_fake_instance()
-
- def rule_get(*args, **kwargs):
- mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
- return [mock_rule]
-
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': [instance]})
- return mock_group
-
- self.stubs.Set(
- self.compute_api.db,
- 'security_group_rule_get_by_security_group_grantee',
- rule_get)
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
- rpcapi.refresh_instance_security_rules(self.context,
- instance['host'],
- instance)
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_members_refresh(self.context, [1, 2])
-
- def test_secgroup_refresh_none(self):
- def rule_get(*args, **kwargs):
- mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
- return [mock_rule]
-
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': []})
- return mock_group
-
- self.stubs.Set(
- self.compute_api.db,
- 'security_group_rule_get_by_security_group_grantee',
- rule_get)
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
-
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_members_refresh(self.context, [1])
-
- def test_secrule_refresh(self):
- instance = self._create_fake_instance()
-
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': [instance]})
- return mock_group
-
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
- rpcapi.refresh_instance_security_rules(self.context,
- instance['host'],
- instance)
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_rules_refresh(self.context, [1])
-
- def test_secrule_refresh_once(self):
- instance = self._create_fake_instance()
-
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': [instance]})
- return mock_group
-
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
- rpcapi.refresh_instance_security_rules(self.context,
- instance['host'],
- instance)
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
-
- def test_secrule_refresh_none(self):
- def group_get(*args, **kwargs):
- mock_group = db_fakes.FakeModel({'instances': []})
- return mock_group
-
- self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
-
- rpcapi = self.security_group_api.security_group_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
- self.mox.ReplayAll()
-
- self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
-
- def test_live_migrate(self):
- instance, instance_uuid = self._run_instance()
-
- rpcapi = self.compute_api.compute_task_api
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
- self.compute_api._record_action_start(self.context, instance,
- 'live-migration')
- rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
- block_migration=True,
- disk_over_commit=True)
-
- self.mox.ReplayAll()
-
- self.compute_api.live_migrate(self.context, instance,
- block_migration=True,
- disk_over_commit=True,
- host_name='fake_dest_host')
-
- instance.refresh()
- self.assertEqual(instance['task_state'], task_states.MIGRATING)
-
- def test_evacuate(self):
- instance = self._create_fake_instance_obj(services=True)
- self.assertIsNone(instance.task_state)
-
- def fake_service_is_up(*args, **kwargs):
- return False
-
- def fake_rebuild_instance(*args, **kwargs):
- instance.host = kwargs['host']
- instance.save()
-
- self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
- fake_service_is_up)
- self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
- fake_rebuild_instance)
- self.compute_api.evacuate(self.context.elevated(),
- instance,
- host='fake_dest_host',
- on_shared_storage=True,
- admin_password=None)
-
- instance.refresh()
- self.assertEqual(instance.task_state, task_states.REBUILDING)
- self.assertEqual(instance.host, 'fake_dest_host')
- instance.destroy()
-
- def test_fail_evacuate_from_non_existing_host(self):
- inst = {}
- inst['vm_state'] = vm_states.ACTIVE
- inst['launched_at'] = timeutils.utcnow()
- inst['image_ref'] = FAKE_IMAGE_REF
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['host'] = 'fake_host'
- inst['node'] = NODENAME
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- inst['instance_type_id'] = type_id
- inst['ami_launch_index'] = 0
- inst['memory_mb'] = 0
- inst['vcpus'] = 0
- inst['root_gb'] = 0
- inst['ephemeral_gb'] = 0
- inst['architecture'] = arch.X86_64
- inst['os_type'] = 'Linux'
- instance = self._create_fake_instance_obj(inst)
-
- self.assertIsNone(instance.task_state)
- self.assertRaises(exception.ComputeHostNotFound,
- self.compute_api.evacuate, self.context.elevated(), instance,
- host='fake_dest_host', on_shared_storage=True,
- admin_password=None)
- instance.destroy()
-
- def test_fail_evacuate_from_running_host(self):
- instance = self._create_fake_instance_obj(services=True)
- self.assertIsNone(instance.task_state)
-
- def fake_service_is_up(*args, **kwargs):
- return True
-
- self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
- fake_service_is_up)
-
- self.assertRaises(exception.ComputeServiceInUse,
- self.compute_api.evacuate, self.context.elevated(), instance,
- host='fake_dest_host', on_shared_storage=True,
- admin_password=None)
- instance.destroy()
-
- def test_fail_evacuate_instance_in_wrong_state(self):
- states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED,
- vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED,
- vm_states.DELETED]
- instances = [self._create_fake_instance_obj({'vm_state': state})
- for state in states]
-
- for instance in instances:
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.evacuate, self.context, instance,
- host='fake_dest_host', on_shared_storage=True,
- admin_password=None)
- instance.destroy()
-
- def test_get_migrations(self):
- migration = test_migration.fake_db_migration(uuid="1234")
- filters = {'host': 'host1'}
- self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
- db.migration_get_all_by_filters(self.context,
- filters).AndReturn([migration])
- self.mox.ReplayAll()
-
- migrations = self.compute_api.get_migrations(self.context,
- filters)
- self.assertEqual(1, len(migrations))
- self.assertEqual(migrations[0].id, migration['id'])
-
-
-def fake_rpc_method(context, method, **kwargs):
- pass
-
-
-def _create_service_entries(context, values=[['avail_zone1', ['fake_host1',
- 'fake_host2']],
- ['avail_zone2', ['fake_host3']]]):
- for (avail_zone, hosts) in values:
- for host in hosts:
- db.service_create(context,
- {'host': host,
- 'binary': 'nova-compute',
- 'topic': 'compute',
- 'report_count': 0})
- return values
-
-
-class ComputeAPIAggrTestCase(BaseTestCase):
- """This is for unit coverage of aggregate-related methods
- defined in nova.compute.api.
- """
-
- def setUp(self):
- super(ComputeAPIAggrTestCase, self).setUp()
- self.api = compute_api.AggregateAPI()
- self.context = context.get_admin_context()
- self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
- self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
-
- def test_aggregate_no_zone(self):
- # Ensure we can create an aggregate without an availability zone
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- None)
- self.api.delete_aggregate(self.context, aggr['id'])
- db.aggregate_get(self.context.elevated(read_deleted='yes'),
- aggr['id'])
- self.assertRaises(exception.AggregateNotFound,
- self.api.delete_aggregate, self.context, aggr['id'])
-
- def test_check_az_for_aggregate(self):
- # Ensure all conflict hosts can be returned
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host1 = values[0][1][0]
- fake_host2 = values[0][1][1]
- aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host1)
- aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host2)
- aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1)
- metadata = {'availability_zone': 'another_zone'}
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.update_aggregate,
- self.context, aggr2['id'], metadata)
-
- def test_update_aggregate(self):
- # Ensure metadata can be updated.
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- fake_notifier.NOTIFICATIONS = []
- aggr = self.api.update_aggregate(self.context, aggr['id'],
- {'name': 'new_fake_aggregate'})
- self.assertIsNone(availability_zones._get_cache().get('cache'))
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updateprop.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updateprop.end')
-
- def test_update_aggregate_no_az(self):
- # Ensure metadata without availability zone can be
- # updated,even the aggregate contains hosts belong
- # to another availability zone
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'name': 'new_fake_aggregate'}
- fake_notifier.NOTIFICATIONS = []
- aggr2 = self.api.update_aggregate(self.context, aggr2['id'],
- metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updateprop.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updateprop.end')
-
- def test_update_aggregate_az_change(self):
- # Ensure availability zone can be updated,
- # when the aggregate is the only one with
- # availability zone
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'availability_zone': 'new_fake_zone'}
- fake_notifier.NOTIFICATIONS = []
- aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
- metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
-
- def test_update_aggregate_az_fails(self):
- # Ensure aggregate's availability zone can't be updated,
- # when aggregate has hosts in other availability zone
- fake_notifier.NOTIFICATIONS = []
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'availability_zone': 'another_zone'}
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.update_aggregate,
- self.context, aggr2['id'], metadata)
- fake_host2 = values[0][1][1]
- aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
- None, fake_host2)
- metadata = {'availability_zone': fake_zone}
- aggr3 = self.api.update_aggregate(self.context, aggr3['id'],
- metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
- msg = fake_notifier.NOTIFICATIONS[13]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[14]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
-
- def test_update_aggregate_az_fails_with_nova_az(self):
- # Ensure aggregate's availability zone can't be updated,
- # when aggregate has hosts in other availability zone
- fake_notifier.NOTIFICATIONS = []
- values = _create_service_entries(self.context)
- fake_host = values[0][1][0]
- self._init_aggregate_with_host(None, 'fake_aggregate1',
- CONF.default_availability_zone,
- fake_host)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'availability_zone': 'another_zone'}
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.update_aggregate,
- self.context, aggr2['id'], metadata)
-
- def test_update_aggregate_metadata(self):
- # Ensure metadata can be updated.
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- metadata = {'foo_key1': 'foo_value1',
- 'foo_key2': 'foo_value2',
- 'availability_zone': 'fake_zone'}
- fake_notifier.NOTIFICATIONS = []
- availability_zones._get_cache().add('fake_key', 'fake_value')
- aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
- metadata)
- self.assertIsNone(availability_zones._get_cache().get('fake_key'))
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
- fake_notifier.NOTIFICATIONS = []
- metadata['foo_key1'] = None
- expected_payload_meta_data = {'foo_key1': None,
- 'foo_key2': 'foo_value2',
- 'availability_zone': 'fake_zone'}
- expected = self.api.update_aggregate_metadata(self.context,
- aggr['id'], metadata)
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
- self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
- self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
- self.assertThat(expected['metadata'],
- matchers.DictMatches({'availability_zone': 'fake_zone',
- 'foo_key2': 'foo_value2'}))
-
- def test_update_aggregate_metadata_no_az(self):
- # Ensure metadata without availability zone can be
- # updated,even the aggregate contains hosts belong
- # to another availability zone
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'foo_key2': 'foo_value3'}
- fake_notifier.NOTIFICATIONS = []
- aggr2 = self.api.update_aggregate_metadata(self.context, aggr2['id'],
- metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
- self.assertThat(aggr2['metadata'],
- matchers.DictMatches({'foo_key2': 'foo_value3'}))
-
- def test_update_aggregate_metadata_az_change(self):
- # Ensure availability zone can be updated,
- # when the aggregate is the only one with
- # availability zone
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'availability_zone': 'new_fake_zone'}
- fake_notifier.NOTIFICATIONS = []
- aggr1 = self.api.update_aggregate_metadata(self.context,
- aggr1['id'], metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
-
- def test_update_aggregate_az_do_not_replace_existing_metadata(self):
- # Ensure that that update of the aggregate availability zone
- # does not replace the aggregate existing metadata
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- metadata = {'foo_key1': 'foo_value1'}
- aggr = self.api.update_aggregate_metadata(self.context,
- aggr['id'],
- metadata)
- metadata = {'availability_zone': 'new_fake_zone'}
- aggr = self.api.update_aggregate(self.context,
- aggr['id'],
- metadata)
- self.assertThat(aggr['metadata'], matchers.DictMatches(
- {'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'}))
-
- def test_update_aggregate_metadata_az_fails(self):
- # Ensure aggregate's availability zone can't be updated,
- # when aggregate has hosts in other availability zone
- fake_notifier.NOTIFICATIONS = []
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- self._init_aggregate_with_host(None, 'fake_aggregate1',
- fake_zone, fake_host)
- aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
- fake_host)
- metadata = {'availability_zone': 'another_zone'}
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.update_aggregate_metadata,
- self.context, aggr2['id'], metadata)
- aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
- None, fake_host)
- metadata = {'availability_zone': fake_zone}
- aggr3 = self.api.update_aggregate_metadata(self.context,
- aggr3['id'],
- metadata)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
- msg = fake_notifier.NOTIFICATIONS[13]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.start')
- msg = fake_notifier.NOTIFICATIONS[14]
- self.assertEqual(msg.event_type,
- 'aggregate.updatemetadata.end')
-
- def test_delete_aggregate(self):
- # Ensure we can delete an aggregate.
- fake_notifier.NOTIFICATIONS = []
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.create.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.create.end')
- fake_notifier.NOTIFICATIONS = []
- self.api.delete_aggregate(self.context, aggr['id'])
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.delete.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.delete.end')
- db.aggregate_get(self.context.elevated(read_deleted='yes'),
- aggr['id'])
- self.assertRaises(exception.AggregateNotFound,
- self.api.delete_aggregate, self.context, aggr['id'])
-
- def test_delete_non_empty_aggregate(self):
- # Ensure InvalidAggregateAction is raised when non empty aggregate.
- _create_service_entries(self.context,
- [['fake_availability_zone', ['fake_host']]])
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_availability_zone')
- self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.delete_aggregate, self.context, aggr['id'])
-
- def test_add_host_to_aggregate(self):
- # Ensure we can add a host to an aggregate.
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
-
- def fake_add_aggregate_host(*args, **kwargs):
- hosts = kwargs["aggregate"]["hosts"]
- self.assertIn(fake_host, hosts)
-
- self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
- fake_add_aggregate_host)
-
- self.mox.StubOutWithMock(availability_zones,
- 'update_host_availability_zone_cache')
-
- def _stub_update_host_avail_zone_cache(host, az=None):
- if az is not None:
- availability_zones.update_host_availability_zone_cache(
- self.context, host, az)
- else:
- availability_zones.update_host_availability_zone_cache(
- self.context, host)
-
- for (avail_zone, hosts) in values:
- for host in hosts:
- _stub_update_host_avail_zone_cache(
- host, CONF.default_availability_zone)
- _stub_update_host_avail_zone_cache(fake_host)
- self.mox.ReplayAll()
-
- fake_notifier.NOTIFICATIONS = []
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], fake_host)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.addhost.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.addhost.end')
- self.assertEqual(len(aggr['hosts']), 1)
-
- def test_add_host_to_aggr_with_no_az(self):
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
- aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
- fake_host)
- aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
- None)
- aggr_no_az = self.api.add_host_to_aggregate(self.context,
- aggr_no_az['id'],
- fake_host)
- self.assertIn(fake_host, aggr['hosts'])
- self.assertIn(fake_host, aggr_no_az['hosts'])
-
- def test_add_host_no_az_metadata(self):
- # NOTE(mtreinish) based on how create works this is not how the
- # the metadata is supposed to end up in the database but it has
- # been seen. See lp bug #1209007. This test just confirms that
- # the host is still added to the aggregate if there is no
- # availability zone metadata.
- def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
- return {'meta_key': 'fake_value'}
- self.stubs.Set(self.compute.db,
- 'aggregate_metadata_get_by_metadata_key',
- fake_aggregate_metadata_get_by_metadata_key)
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- fake_zone)
- aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
- fake_host)
- self.assertIn(fake_host, aggr['hosts'])
-
- def test_add_host_to_multi_az(self):
- # Ensure we can't add a host to different availability zone
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- fake_host = values[0][1][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], fake_host)
- self.assertEqual(len(aggr['hosts']), 1)
- fake_zone2 = "another_zone"
- aggr2 = self.api.create_aggregate(self.context,
- 'fake_aggregate2', fake_zone2)
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.add_host_to_aggregate,
- self.context, aggr2['id'], fake_host)
-
- def test_add_host_to_multi_az_with_nova_agg(self):
- # Ensure we can't add a host if already existing in an agg with AZ set
- # to default
- values = _create_service_entries(self.context)
- fake_host = values[0][1][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate',
- CONF.default_availability_zone)
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], fake_host)
- self.assertEqual(len(aggr['hosts']), 1)
- fake_zone2 = "another_zone"
- aggr2 = self.api.create_aggregate(self.context,
- 'fake_aggregate2', fake_zone2)
- self.assertRaises(exception.InvalidAggregateAction,
- self.api.add_host_to_aggregate,
- self.context, aggr2['id'], fake_host)
-
- def test_add_host_to_aggregate_multiple(self):
- # Ensure we can add multiple hosts to an aggregate.
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
- for host in values[0][1]:
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], host)
- self.assertEqual(len(aggr['hosts']), len(values[0][1]))
-
- def test_add_host_to_aggregate_raise_not_found(self):
- # Ensure ComputeHostNotFound is raised when adding invalid host.
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- fake_notifier.NOTIFICATIONS = []
- self.assertRaises(exception.ComputeHostNotFound,
- self.api.add_host_to_aggregate,
- self.context, aggr['id'], 'invalid_host')
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
- 'compute.fake-mini')
-
- def test_remove_host_from_aggregate_active(self):
- # Ensure we can remove a host from an aggregate.
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
- for host in values[0][1]:
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], host)
- host_to_remove = values[0][1][0]
-
- def fake_remove_aggregate_host(*args, **kwargs):
- hosts = kwargs["aggregate"]["hosts"]
- self.assertNotIn(host_to_remove, hosts)
-
- self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
- fake_remove_aggregate_host)
-
- self.mox.StubOutWithMock(availability_zones,
- 'update_host_availability_zone_cache')
- availability_zones.update_host_availability_zone_cache(self.context,
- host_to_remove)
- self.mox.ReplayAll()
-
- fake_notifier.NOTIFICATIONS = []
- expected = self.api.remove_host_from_aggregate(self.context,
- aggr['id'],
- host_to_remove)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'aggregate.removehost.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'aggregate.removehost.end')
- self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
-
- def test_remove_host_from_aggregate_raise_not_found(self):
- # Ensure ComputeHostNotFound is raised when removing invalid host.
- _create_service_entries(self.context, [['fake_zone', ['fake_host']]])
- aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
- 'fake_zone')
- self.assertRaises(exception.ComputeHostNotFound,
- self.api.remove_host_from_aggregate,
- self.context, aggr['id'], 'invalid_host')
-
- def test_aggregate_list(self):
- aggregate = self.api.create_aggregate(self.context,
- 'fake_aggregate',
- 'fake_zone')
- metadata = {'foo_key1': 'foo_value1',
- 'foo_key2': 'foo_value2'}
- meta_aggregate = self.api.create_aggregate(self.context,
- 'fake_aggregate2',
- 'fake_zone2')
- self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
- metadata)
- aggregate_list = self.api.get_aggregate_list(self.context)
- self.assertIn(aggregate['id'],
- map(lambda x: x['id'], aggregate_list))
- self.assertIn(meta_aggregate['id'],
- map(lambda x: x['id'], aggregate_list))
- self.assertIn('fake_aggregate',
- map(lambda x: x['name'], aggregate_list))
- self.assertIn('fake_aggregate2',
- map(lambda x: x['name'], aggregate_list))
- self.assertIn('fake_zone',
- map(lambda x: x['availability_zone'], aggregate_list))
- self.assertIn('fake_zone2',
- map(lambda x: x['availability_zone'], aggregate_list))
- test_meta_aggregate = aggregate_list[1]
- self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
- self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
- self.assertEqual('foo_value1',
- test_meta_aggregate.get('metadata')['foo_key1'])
- self.assertEqual('foo_value2',
- test_meta_aggregate.get('metadata')['foo_key2'])
-
- def test_aggregate_list_with_hosts(self):
- values = _create_service_entries(self.context)
- fake_zone = values[0][0]
- host_aggregate = self.api.create_aggregate(self.context,
- 'fake_aggregate',
- fake_zone)
- self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
- values[0][1][0])
- aggregate_list = self.api.get_aggregate_list(self.context)
- aggregate = aggregate_list[0]
- self.assertIn(values[0][1][0], aggregate.get('hosts'))
-
-
-class ComputeAggrTestCase(BaseTestCase):
- """This is for unit coverage of aggregate-related methods
- defined in nova.compute.manager.
- """
-
- def setUp(self):
- super(ComputeAggrTestCase, self).setUp()
- self.context = context.get_admin_context()
- values = {'name': 'test_aggr'}
- az = {'availability_zone': 'test_zone'}
- self.aggr = db.aggregate_create(self.context, values, metadata=az)
-
- def test_add_aggregate_host(self):
- def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
- fake_driver_add_to_aggregate.called = True
- return {"foo": "bar"}
- self.stubs.Set(self.compute.driver, "add_to_aggregate",
- fake_driver_add_to_aggregate)
-
- self.compute.add_aggregate_host(self.context, host="host",
- aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
- self.assertTrue(fake_driver_add_to_aggregate.called)
-
- def test_remove_aggregate_host(self):
- def fake_driver_remove_from_aggregate(context, aggregate, host,
- **_ignore):
- fake_driver_remove_from_aggregate.called = True
- self.assertEqual("host", host, "host")
- return {"foo": "bar"}
- self.stubs.Set(self.compute.driver, "remove_from_aggregate",
- fake_driver_remove_from_aggregate)
-
- self.compute.remove_aggregate_host(self.context,
- aggregate=jsonutils.to_primitive(self.aggr), host="host",
- slave_info=None)
- self.assertTrue(fake_driver_remove_from_aggregate.called)
-
- def test_add_aggregate_host_passes_slave_info_to_driver(self):
- def driver_add_to_aggregate(context, aggregate, host, **kwargs):
- self.assertEqual(self.context, context)
- self.assertEqual(aggregate['id'], self.aggr['id'])
- self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
-
- self.stubs.Set(self.compute.driver, "add_to_aggregate",
- driver_add_to_aggregate)
-
- self.compute.add_aggregate_host(self.context, host="the_host",
- slave_info="SLAVE_INFO",
- aggregate=jsonutils.to_primitive(self.aggr))
-
- def test_remove_from_aggregate_passes_slave_info_to_driver(self):
- def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
- self.assertEqual(self.context, context)
- self.assertEqual(aggregate['id'], self.aggr['id'])
- self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
-
- self.stubs.Set(self.compute.driver, "remove_from_aggregate",
- driver_remove_from_aggregate)
-
- self.compute.remove_aggregate_host(self.context,
- aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
- slave_info="SLAVE_INFO")
-
-
-class ComputePolicyTestCase(BaseTestCase):
-
- def setUp(self):
- super(ComputePolicyTestCase, self).setUp()
-
- self.compute_api = compute.API()
-
- def test_actions_are_prefixed(self):
- self.mox.StubOutWithMock(policy, 'enforce')
- nova.policy.enforce(self.context, 'compute:reboot', {})
- self.mox.ReplayAll()
- compute_api.check_policy(self.context, 'reboot', {})
-
- def test_wrapped_method(self):
- instance = self._create_fake_instance_obj(params={'host': None,
- 'cell_name': 'foo'})
-
- # force delete to fail
- rules = {"compute:delete": [["false:false"]]}
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.delete, self.context, instance)
-
- # reset rules to allow deletion
- rules = {"compute:delete": []}
- self.policy.set_rules(rules)
-
- self.compute_api.delete(self.context, instance)
-
- def test_create_fail(self):
- rules = {"compute:create": [["false:false"]]}
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.create, self.context, '1', '1')
-
- def test_create_attach_volume_fail(self):
- rules = {
- "compute:create": [],
- "compute:create:attach_network": [["false:false"]],
- "compute:create:attach_volume": [],
- }
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.create, self.context, '1', '1',
- requested_networks='blah',
- block_device_mapping='blah')
-
- def test_create_attach_network_fail(self):
- rules = {
- "compute:create": [],
- "compute:create:attach_network": [],
- "compute:create:attach_volume": [["false:false"]],
- }
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.create, self.context, '1', '1',
- requested_networks='blah',
- block_device_mapping='blah')
-
- def test_get_fail(self):
- instance = self._create_fake_instance()
-
- rules = {
- "compute:get": [["false:false"]],
- }
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.get, self.context, instance['uuid'])
-
- def test_get_all_fail(self):
- rules = {
- "compute:get_all": [["false:false"]],
- }
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.get_all, self.context)
-
- def test_get_instance_faults(self):
- instance1 = self._create_fake_instance()
- instance2 = self._create_fake_instance()
- instances = [instance1, instance2]
-
- rules = {
- "compute:get_instance_faults": [["false:false"]],
- }
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.get_instance_faults,
- context.get_admin_context(), instances)
-
- def test_force_host_fail(self):
- rules = {"compute:create": [],
- "compute:create:forced_host": [["role:fake"]],
- "network:validate_networks": []}
- self.policy.set_rules(rules)
-
- self.assertRaises(exception.PolicyNotAuthorized,
- self.compute_api.create, self.context, None, '1',
- availability_zone='1:1')
-
- def test_force_host_pass(self):
- rules = {"compute:create": [],
- "compute:create:forced_host": [],
- "network:validate_networks": []}
- self.policy.set_rules(rules)
-
- self.compute_api.create(self.context, None, '1',
- availability_zone='1:1')
-
-
-class DisabledInstanceTypesTestCase(BaseTestCase):
- """Some instance-types are marked 'disabled' which means that they will not
- show up in customer-facing listings. We do, however, want those
- instance-types to be available for emergency migrations and for rebuilding
- of existing instances.
-
- One legitimate use of the 'disabled' field would be when phasing out a
- particular instance-type. We still want customers to be able to use an
- instance that of the old type, and we want Ops to be able perform
- migrations against it, but we *don't* want customers building new
- instances with the phased-out instance-type.
- """
- def setUp(self):
- super(DisabledInstanceTypesTestCase, self).setUp()
- self.compute_api = compute.API()
- self.inst_type = flavors.get_default_flavor()
-
- def test_can_build_instance_from_visible_instance_type(self):
- self.inst_type['disabled'] = False
- # Assert that exception.FlavorNotFound is not raised
- self.compute_api.create(self.context, self.inst_type,
- image_href='some-fake-image')
-
- def test_cannot_build_instance_from_disabled_instance_type(self):
- self.inst_type['disabled'] = True
- self.assertRaises(exception.FlavorNotFound,
- self.compute_api.create, self.context, self.inst_type, None)
-
- def test_can_resize_to_visible_instance_type(self):
- instance = self._create_fake_instance_obj()
- orig_get_flavor_by_flavor_id =\
- flavors.get_flavor_by_flavor_id
-
- def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
- read_deleted="yes"):
- instance_type = orig_get_flavor_by_flavor_id(flavor_id,
- ctxt,
- read_deleted)
- instance_type['disabled'] = False
- return instance_type
-
- self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
- fake_get_flavor_by_flavor_id)
-
- self._stub_migrate_server()
- self.compute_api.resize(self.context, instance, '4')
-
- def test_cannot_resize_to_disabled_instance_type(self):
- instance = self._create_fake_instance_obj()
- orig_get_flavor_by_flavor_id = \
- flavors.get_flavor_by_flavor_id
-
- def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
- read_deleted="yes"):
- instance_type = orig_get_flavor_by_flavor_id(flavor_id,
- ctxt,
- read_deleted)
- instance_type['disabled'] = True
- return instance_type
-
- self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
- fake_get_flavor_by_flavor_id)
-
- self.assertRaises(exception.FlavorNotFound,
- self.compute_api.resize, self.context, instance, '4')
-
-
-class ComputeReschedulingTestCase(BaseTestCase):
- """Tests re-scheduling logic for new build requests."""
-
- def setUp(self):
- super(ComputeReschedulingTestCase, self).setUp()
-
- self.expected_task_state = task_states.SCHEDULING
-
- def fake_update(*args, **kwargs):
- self.updated_task_state = kwargs.get('task_state')
- self.stubs.Set(self.compute, '_instance_update', fake_update)
-
- def _reschedule(self, request_spec=None, filter_properties=None,
- exc_info=None):
- if not filter_properties:
- filter_properties = {}
-
- instance = self._create_fake_instance_obj()
-
- admin_password = None
- injected_files = None
- requested_networks = None
- is_first_time = False
-
- scheduler_method = self.compute.scheduler_rpcapi.run_instance
- method_args = (request_spec, admin_password, injected_files,
- requested_networks, is_first_time, filter_properties)
- return self.compute._reschedule(self.context, request_spec,
- filter_properties, instance, scheduler_method,
- method_args, self.expected_task_state, exc_info=exc_info)
-
- def test_reschedule_no_filter_properties(self):
- # no filter_properties will disable re-scheduling.
- self.assertFalse(self._reschedule())
-
- def test_reschedule_no_retry_info(self):
- # no retry info will also disable re-scheduling.
- filter_properties = {}
- self.assertFalse(self._reschedule(filter_properties=filter_properties))
-
- def test_reschedule_no_request_spec(self):
- # no request spec will also disable re-scheduling.
- retry = dict(num_attempts=1)
- filter_properties = dict(retry=retry)
- self.assertFalse(self._reschedule(filter_properties=filter_properties))
-
- def test_reschedule_success(self):
- retry = dict(num_attempts=1)
- filter_properties = dict(retry=retry)
- request_spec = {'instance_uuids': ['foo', 'bar']}
- try:
- raise test.TestingException("just need an exception")
- except test.TestingException:
- exc_info = sys.exc_info()
- exc_str = traceback.format_exception_only(exc_info[0],
- exc_info[1])
-
- self.assertTrue(self._reschedule(filter_properties=filter_properties,
- request_spec=request_spec, exc_info=exc_info))
- self.assertEqual(1, len(request_spec['instance_uuids']))
- self.assertEqual(self.updated_task_state, self.expected_task_state)
- self.assertEqual(exc_str, filter_properties['retry']['exc'])
-
-
-class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
- """Test re-scheduling logic for prep_resize requests."""
-
- def setUp(self):
- super(ComputeReschedulingResizeTestCase, self).setUp()
- self.expected_task_state = task_states.RESIZE_PREP
-
- def _reschedule(self, request_spec=None, filter_properties=None,
- exc_info=None):
- if not filter_properties:
- filter_properties = {}
-
- instance_uuid = str(uuid.uuid4())
- instance = self._create_fake_instance_obj(
- params={'uuid': instance_uuid})
- instance_type = {}
- reservations = None
-
- scheduler_method = self.compute.compute_task_api.resize_instance
- scheduler_hint = dict(filter_properties=filter_properties)
- method_args = (instance, None, scheduler_hint, instance_type,
- reservations)
-
- return self.compute._reschedule(self.context, request_spec,
- filter_properties, instance, scheduler_method,
- method_args, self.expected_task_state, exc_info=exc_info)
-
-
-class InnerTestingException(Exception):
- pass
-
-
-class ComputeRescheduleOrErrorTestCase(BaseTestCase):
- """Test logic and exception handling around rescheduling or re-raising
- original exceptions when builds fail.
- """
-
- def setUp(self):
- super(ComputeRescheduleOrErrorTestCase, self).setUp()
- self.instance = self._create_fake_instance_obj()
-
- def test_reschedule_or_error_called(self):
- """Basic sanity check to make sure _reschedule_or_error is called
- when a build fails.
- """
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(self.compute, '_spawn')
- self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
-
- bdms = block_device_obj.block_device_make_list(self.context, [])
-
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms)
- self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
- [], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
- test.TestingException("BuildError"))
- self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(), None, None, None,
- False, None, {}, bdms, False).AndReturn(True)
-
- self.mox.ReplayAll()
- self.compute._run_instance(self.context, None, {}, None, None, None,
- False, None, self.instance, False)
-
- def test_shutdown_instance_fail(self):
- """Test shutdown instance failing before re-scheduling logic can even
- run.
- """
- instance_uuid = self.instance['uuid']
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
-
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
-
- compute_utils.add_instance_fault_from_exc(self.context,
- self.instance, exc_info[0], exc_info=exc_info)
- self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
- self.compute._log_original_error(exc_info, instance_uuid)
-
- self.mox.ReplayAll()
-
- # should raise the deallocation exception, not the original build
- # error:
- self.assertRaises(InnerTestingException,
- self.compute._reschedule_or_error, self.context,
- self.instance, exc_info, None, None, None, False, None, {})
-
- def test_shutdown_instance_fail_instance_info_cache_not_found(self):
- # Covers the case that _shutdown_instance fails with an
- # InstanceInfoCacheNotFound exception when getting instance network
- # information prior to calling driver.destroy.
- elevated_context = self.context.elevated()
- error = exception.InstanceInfoCacheNotFound(
- instance_uuid=self.instance['uuid'])
- with contextlib.nested(
- mock.patch.object(self.context, 'elevated',
- return_value=elevated_context),
- mock.patch.object(self.compute, '_get_instance_nw_info',
- side_effect=error),
- mock.patch.object(self.compute,
- '_get_instance_block_device_info'),
- mock.patch.object(self.compute.driver, 'destroy'),
- mock.patch.object(self.compute, '_try_deallocate_network')
- ) as (
- elevated_mock,
- _get_instance_nw_info_mock,
- _get_instance_block_device_info_mock,
- destroy_mock,
- _try_deallocate_network_mock
- ):
- inst_obj = self.instance
- self.compute._shutdown_instance(self.context, inst_obj,
- bdms=[], notify=False)
- # By asserting that _try_deallocate_network_mock was called
- # exactly once, we know that _get_instance_nw_info raising
- # InstanceInfoCacheNotFound did not make _shutdown_instance error
- # out and driver.destroy was still called.
- _try_deallocate_network_mock.assert_called_once_with(
- elevated_context, inst_obj, None)
-
- def test_reschedule_fail(self):
- # Test handling of exception from _reschedule.
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
-
- instance_uuid = self.instance['uuid']
- method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.mox.StubOutWithMock(self.compute, '_reschedule')
-
- self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
- mox.IgnoreArg())
- self.compute._reschedule(self.context, None, self.instance,
- {}, self.compute.scheduler_rpcapi.run_instance,
- method_args, task_states.SCHEDULING, exc_info).AndRaise(
- InnerTestingException("Inner"))
-
- self.mox.ReplayAll()
-
- self.assertFalse(self.compute._reschedule_or_error(self.context,
- self.instance, exc_info, None, None, None, False, None, {}))
-
- def test_reschedule_false(self):
- # Test not-rescheduling, but no nested exception.
- instance_uuid = self.instance['uuid']
- method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.mox.StubOutWithMock(self.compute, '_reschedule')
-
- try:
- raise test.TestingException("Original")
- except test.TestingException:
- exc_info = sys.exc_info()
- compute_utils.add_instance_fault_from_exc(self.context,
- self.instance, exc_info[0], exc_info=exc_info)
-
- self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
- mox.IgnoreArg())
- self.compute._reschedule(self.context, None, {}, self.instance,
- self.compute.scheduler_rpcapi.run_instance, method_args,
- task_states.SCHEDULING, exc_info).AndReturn(False)
-
- self.mox.ReplayAll()
-
- # re-scheduling is False, the original build error should be
- # raised here:
- self.assertFalse(self.compute._reschedule_or_error(self.context,
- self.instance, exc_info, None, None, None, False, None, {}))
-
- def test_reschedule_true(self):
- # Test behavior when re-scheduling happens.
- instance_uuid = self.instance['uuid']
- method_args = (None, None, None, None, False, {})
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.mox.StubOutWithMock(self.compute, '_reschedule')
-
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
-
- compute_utils.add_instance_fault_from_exc(self.context,
- self.instance, exc_info[0], exc_info=exc_info)
- self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
- mox.IgnoreArg())
- self.compute._reschedule(self.context, None, {}, self.instance,
- self.compute.scheduler_rpcapi.run_instance,
- method_args, task_states.SCHEDULING, exc_info).AndReturn(
- True)
- self.compute._log_original_error(exc_info, instance_uuid)
-
- self.mox.ReplayAll()
-
- # re-scheduling is True, original error is logged, but nothing
- # is raised:
- self.compute._reschedule_or_error(self.context, self.instance,
- exc_info, None, None, None, False, None, {})
-
- def test_no_reschedule_on_delete_during_spawn(self):
- # instance should not be rescheduled if instance is deleted
- # during the build
- self.mox.StubOutWithMock(self.compute, '_spawn')
- self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
-
- exc = exception.UnexpectedDeletingTaskStateError(
- expected=task_states.SPAWNING, actual=task_states.DELETING)
- self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
-
- self.mox.ReplayAll()
- # test succeeds if mocked method '_reschedule_or_error' is not
- # called.
- self.compute._run_instance(self.context, None, {}, None, None, None,
- False, None, self.instance, False)
-
- def test_no_reschedule_on_unexpected_task_state(self):
- # instance shouldn't be rescheduled if unexpected task state arises.
- # the exception should get reraised.
- self.mox.StubOutWithMock(self.compute, '_spawn')
- self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
-
- exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
- actual=task_states.SCHEDULING)
- self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.UnexpectedTaskStateError,
- self.compute._run_instance, self.context, None, {}, None, None,
- None, False, None, self.instance, False)
-
- def test_no_reschedule_on_block_device_fail(self):
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
-
- exc = exception.InvalidBDM()
-
- self.compute._prep_block_device(mox.IgnoreArg(), self.instance,
- mox.IgnoreArg()).AndRaise(exc)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidBDM, self.compute._run_instance,
- self.context, None, {}, None, None, None, False,
- None, self.instance, False)
-
-
-class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
- """Test logic and exception handling around rescheduling prep resize
- requests
- """
- def setUp(self):
- super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
- self.instance = self._create_fake_instance()
- self.instance_uuid = self.instance['uuid']
- self.instance_type = flavors.get_flavor_by_name(
- "m1.tiny")
-
- def test_reschedule_resize_or_reraise_called(self):
- """Verify the rescheduling logic gets called when there is an error
- during prep_resize.
- """
- inst_obj = self._create_fake_instance_obj()
-
- self.mox.StubOutWithMock(self.compute.db, 'migration_create')
- self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
-
- self.compute.db.migration_create(mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
-
- self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
- inst_obj, mox.IgnoreArg(), self.instance_type,
- mox.IgnoreArg(), {},
- {})
-
- self.mox.ReplayAll()
-
- self.compute.prep_resize(self.context, image=None,
- instance=inst_obj,
- instance_type=self.instance_type,
- reservations=[], request_spec={},
- filter_properties={}, node=None)
-
- def test_reschedule_fails_with_exception(self):
- """Original exception should be raised if the _reschedule method
- raises another exception
- """
- instance = self._create_fake_instance_obj()
- scheduler_hint = dict(filter_properties={})
- method_args = (instance, None, scheduler_hint, self.instance_type,
- None)
- self.mox.StubOutWithMock(self.compute, "_reschedule")
-
- self.compute._reschedule(
- self.context, None, None, instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP).AndRaise(
- InnerTestingException("Inner"))
- self.mox.ReplayAll()
-
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
- self.assertRaises(test.TestingException,
- self.compute._reschedule_resize_or_reraise, self.context,
- None, instance, exc_info, self.instance_type,
- self.none_quotas, {}, {})
-
- def test_reschedule_false(self):
- """Original exception should be raised if the resize is not
- rescheduled.
- """
- instance = self._create_fake_instance_obj()
- scheduler_hint = dict(filter_properties={})
- method_args = (instance, None, scheduler_hint, self.instance_type,
- None)
- self.mox.StubOutWithMock(self.compute, "_reschedule")
-
- self.compute._reschedule(
- self.context, None, None, instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP).AndReturn(False)
- self.mox.ReplayAll()
-
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
- self.assertRaises(test.TestingException,
- self.compute._reschedule_resize_or_reraise, self.context,
- None, instance, exc_info, self.instance_type,
- self.none_quotas, {}, {})
-
- def test_reschedule_true(self):
- # If rescheduled, the original resize exception should be logged.
- instance = self._create_fake_instance_obj()
- scheduler_hint = dict(filter_properties={})
- method_args = (instance, None, scheduler_hint, self.instance_type,
- None)
-
- try:
- raise test.TestingException("Original")
- except Exception:
- exc_info = sys.exc_info()
-
- self.mox.StubOutWithMock(self.compute, "_reschedule")
- self.mox.StubOutWithMock(self.compute, "_log_original_error")
- self.compute._reschedule(self.context, {}, {},
- instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP, exc_info).AndReturn(True)
-
- self.compute._log_original_error(exc_info, instance.uuid)
- self.mox.ReplayAll()
-
- self.compute._reschedule_resize_or_reraise(
- self.context, None, instance, exc_info,
- self.instance_type, self.none_quotas, {}, {})
-
-
-class ComputeInactiveImageTestCase(BaseTestCase):
- def setUp(self):
- super(ComputeInactiveImageTestCase, self).setUp()
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': id, 'min_disk': None, 'min_ram': None,
- 'name': 'fake_name',
- 'status': 'deleted',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id',
- 'something_else': 'meow'}}
-
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- self.compute_api = compute.API()
-
- def test_create_instance_with_deleted_image(self):
- # Make sure we can't start an instance with a deleted image.
- inst_type = flavors.get_flavor_by_name('m1.tiny')
- self.assertRaises(exception.ImageNotActive,
- self.compute_api.create,
- self.context, inst_type, 'fake-image-uuid')
-
-
-class EvacuateHostTestCase(BaseTestCase):
- def setUp(self):
- super(EvacuateHostTestCase, self).setUp()
- self.inst = self._create_fake_instance_obj(
- {'host': 'fake_host_2', 'node': 'fakenode2'})
- self.inst.task_state = task_states.REBUILDING
- self.inst.save()
-
- def tearDown(self):
- db.instance_destroy(self.context, self.inst.uuid)
- super(EvacuateHostTestCase, self).tearDown()
-
- def _rebuild(self, on_shared_storage=True):
- def fake(cls, ctxt, instance, *args, **kwargs):
- pass
-
- self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
-
- orig_image_ref = None
- image_ref = None
- injected_files = None
- bdms = db.block_device_mapping_get_all_by_instance(self.context,
- self.inst.uuid)
- self.compute.rebuild_instance(
- self.context, self.inst, orig_image_ref,
- image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
- on_shared_storage=on_shared_storage)
-
- def test_rebuild_on_host_updated_target(self):
- """Confirm evacuate scenario updates host and node."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
-
- def fake_get_compute_info(context, host):
- self.assertTrue(context.is_admin)
- self.assertEqual('fake-mini', host)
- cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
- return cn
-
- self.stubs.Set(self.compute, '_get_compute_info',
- fake_get_compute_info)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # Should be on destination host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertEqual(NODENAME, instance['node'])
-
- def test_rebuild_on_host_updated_target_node_not_found(self):
- """Confirm evacuate scenario where compute_node isn't found."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
-
- def fake_get_compute_info(context, host):
- raise exception.NotFound(_("Host %s not found") % host)
-
- self.stubs.Set(self.compute, '_get_compute_info',
- fake_get_compute_info)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # Should be on destination host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
-
- def test_rebuild_with_instance_in_stopped_state(self):
- """Confirm evacuate scenario updates vm_state to stopped
- if instance is in stopped state
- """
- # Initialize the VM to stopped state
- db.instance_update(self.context, self.inst.uuid,
- {"vm_state": vm_states.STOPPED})
- self.inst.vm_state = vm_states.STOPPED
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # Check the vm state is reset to stopped
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['vm_state'], vm_states.STOPPED)
-
- def test_rebuild_with_wrong_shared_storage(self):
- """Confirm evacuate scenario does not update host."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
- self.assertRaises(exception.InvalidSharedStorage,
- lambda: self._rebuild(on_shared_storage=False))
-
- # Should remain on original host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], 'fake_host_2')
-
- def test_rebuild_on_host_with_volumes(self):
- """Confirm evacuate scenario reconnects volumes."""
- values = {'instance_uuid': self.inst.uuid,
- 'source_type': 'volume',
- 'device_name': '/dev/vdc',
- 'delete_on_termination': False,
- 'volume_id': 'fake_volume_id'}
-
- db.block_device_mapping_create(self.context, values)
-
- def fake_volume_get(self, context, volume):
- return {'id': 'fake_volume_id'}
- self.stubs.Set(cinder.API, "get", fake_volume_get)
-
- # Stub out and record whether it gets detached
- result = {"detached": False}
-
- def fake_detach(self, context, volume):
- result["detached"] = volume["id"] == 'fake_volume_id'
- self.stubs.Set(cinder.API, "detach", fake_detach)
-
- def fake_terminate_connection(self, context, volume, connector):
- return {}
- self.stubs.Set(cinder.API, "terminate_connection",
- fake_terminate_connection)
-
- # make sure volumes attach, detach are called
- self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
- self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
-
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.compute._prep_block_device(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # cleanup
- for bdms in db.block_device_mapping_get_all_by_instance(
- self.context, self.inst.uuid):
- db.block_device_mapping_destroy(self.context, bdms['id'])
-
- def test_rebuild_on_host_with_shared_storage(self):
- """Confirm evacuate scenario on shared storage."""
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- def test_rebuild_on_host_without_shared_storage(self):
- """Confirm evacuate scenario without shared storage
- (rebuild from image)
- """
- fake_image = {'id': 1,
- 'name': 'fake_name',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id'}}
-
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), mox.IsA(fake_image),
- mox.IgnoreArg(), mox.IsA('newpass'),
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- lambda x: False)
- self.mox.ReplayAll()
-
- self._rebuild(on_shared_storage=False)
-
- def test_rebuild_on_host_instance_exists(self):
- """Rebuild if instance exists raises an exception."""
- db.instance_update(self.context, self.inst.uuid,
- {"task_state": task_states.SCHEDULING})
- self.compute.run_instance(self.context,
- self.inst, {}, {},
- [], None, None, True, None, False)
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.assertRaises(exception.InstanceExists,
- lambda: self._rebuild(on_shared_storage=True))
-
- def test_driver_does_not_support_recreate(self):
- with utils.temporary_mutation(self.compute.driver.capabilities,
- supports_recreate=False):
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- lambda x: True)
- self.assertRaises(exception.InstanceRecreateNotSupported,
- lambda: self._rebuild(on_shared_storage=True))
-
-
-class ComputeInjectedFilesTestCase(BaseTestCase):
- # Test that running instances with injected_files decodes files correctly
-
- def setUp(self):
- super(ComputeInjectedFilesTestCase, self).setUp()
- self.instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
-
- def _spawn(self, context, instance, image_meta, injected_files,
- admin_password, nw_info, block_device_info, db_api=None):
- self.assertEqual(self.expected, injected_files)
-
- def _test(self, injected_files, decoded_files):
- self.expected = decoded_files
- self.compute.run_instance(self.context, self.instance, {}, {}, [],
- injected_files, None, True, None, False)
-
- def test_injected_none(self):
- # test an input of None for injected_files
- self._test(None, [])
-
- def test_injected_empty(self):
- # test an input of [] for injected_files
- self._test([], [])
-
- def test_injected_success(self):
- # test with valid b64 encoded content.
- injected_files = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
- ('/d/e/f', base64.b64encode('seespotrun')),
- ]
-
- decoded_files = [
- ('/a/b/c', 'foobarbaz'),
- ('/d/e/f', 'seespotrun'),
- ]
- self._test(injected_files, decoded_files)
-
- def test_injected_invalid(self):
- # test with invalid b64 encoded content
- injected_files = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
- ('/d/e/f', 'seespotrun'),
- ]
-
- self.assertRaises(exception.Base64Exception, self.compute.run_instance,
- self.context, self.instance, {}, {}, [], injected_files, None,
- True, None, False)
-
- def test_reschedule(self):
- # test that rescheduling is done with original encoded files
- expected = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
- ('/d/e/f', base64.b64encode('seespotrun')),
- ]
-
- def _roe(context, instance, exc_info, requested_networks,
- admin_password, injected_files, is_first_time, request_spec,
- filter_properties, bdms=None, legacy_bdm_in_spec=False):
- self.assertEqual(expected, injected_files)
- return True
-
- def spawn_explode(context, instance, image_meta, injected_files,
- admin_password, nw_info, block_device_info):
- # force reschedule logic to execute
- raise test.TestingException(_("spawn error"))
-
- self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
- self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
-
- self.compute.run_instance(self.context, self.instance, {}, {}, [],
- expected, None, True, None, False)
-
-
-class CheckConfigDriveTestCase(test.TestCase):
- # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
- # probably derive from a `test.FastTestCase` that omits DB and env
- # handling
- def setUp(self):
- super(CheckConfigDriveTestCase, self).setUp()
- self.compute_api = compute.API()
-
- def _assertCheck(self, expected, config_drive):
- self.assertEqual(expected,
- self.compute_api._check_config_drive(config_drive))
-
- def _assertInvalid(self, config_drive):
- self.assertRaises(exception.ConfigDriveInvalidValue,
- self.compute_api._check_config_drive,
- config_drive)
-
- def test_config_drive_false_values(self):
- self._assertCheck('', None)
- self._assertCheck('', '')
- self._assertCheck('', 'False')
- self._assertCheck('', 'f')
- self._assertCheck('', '0')
-
- def test_config_drive_true_values(self):
- self._assertCheck(True, 'True')
- self._assertCheck(True, 't')
- self._assertCheck(True, '1')
-
- def test_config_drive_bogus_values_raise(self):
- self._assertInvalid('asd')
- self._assertInvalid(uuidutils.generate_uuid())
-
-
-class CheckRequestedImageTestCase(test.TestCase):
- def setUp(self):
- super(CheckRequestedImageTestCase, self).setUp()
- self.compute_api = compute.API()
- self.context = context.RequestContext(
- 'fake_user_id', 'fake_project_id')
-
- self.instance_type = flavors.get_default_flavor()
- self.instance_type['memory_mb'] = 64
- self.instance_type['root_gb'] = 1
-
- def test_no_image_specified(self):
- self.compute_api._check_requested_image(self.context, None, None,
- self.instance_type)
-
- def test_image_status_must_be_active(self):
- image = dict(id='123', status='foo')
-
- self.assertRaises(exception.ImageNotActive,
- self.compute_api._check_requested_image, self.context,
- image['id'], image, self.instance_type)
-
- image['status'] = 'active'
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_image_min_ram_check(self):
- image = dict(id='123', status='active', min_ram='65')
-
- self.assertRaises(exception.FlavorMemoryTooSmall,
- self.compute_api._check_requested_image, self.context,
- image['id'], image, self.instance_type)
-
- image['min_ram'] = '64'
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_image_min_disk_check(self):
- image = dict(id='123', status='active', min_disk='2')
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api._check_requested_image, self.context,
- image['id'], image, self.instance_type)
-
- image['min_disk'] = '1'
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_image_too_large(self):
- image = dict(id='123', status='active', size='1073741825')
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- self.compute_api._check_requested_image, self.context,
- image['id'], image, self.instance_type)
-
- image['size'] = '1073741824'
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_root_gb_zero_disables_size_check(self):
- self.instance_type['root_gb'] = 0
- image = dict(id='123', status='active', size='1073741825')
-
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_root_gb_zero_disables_min_disk(self):
- self.instance_type['root_gb'] = 0
- image = dict(id='123', status='active', min_disk='2')
-
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
-
- def test_config_drive_option(self):
- image = {'id': 1, 'status': 'active'}
- image['properties'] = {'img_config_drive': 'optional'}
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
- image['properties'] = {'img_config_drive': 'mandatory'}
- self.compute_api._check_requested_image(self.context, image['id'],
- image, self.instance_type)
- image['properties'] = {'img_config_drive': 'bar'}
- self.assertRaises(exception.InvalidImageConfigDrive,
- self.compute_api._check_requested_image,
- self.context, image['id'], image, self.instance_type)
-
-
-class ComputeHooksTestCase(test.BaseHookTestCase):
- def test_delete_instance_has_hook(self):
- delete_func = compute_manager.ComputeManager._delete_instance
- self.assert_has_hook('delete_instance', delete_func)
-
- def test_create_instance_has_hook(self):
- create_func = compute_api.API.create
- self.assert_has_hook('create_instance', create_func)
diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py
deleted file mode 100644
index 932c6af3ae..0000000000
--- a/nova/tests/compute/test_compute_api.py
+++ /dev/null
@@ -1,2635 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for compute API."""
-
-import contextlib
-import copy
-import datetime
-
-import iso8601
-import mock
-import mox
-from oslo.utils import timeutils
-
-from nova.compute import api as compute_api
-from nova.compute import arch
-from nova.compute import cells_api as compute_cells_api
-from nova.compute import delete_types
-from nova.compute import flavors
-from nova.compute import instance_actions
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_mode
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import base as obj_base
-from nova.objects import quotas as quotas_obj
-from nova.openstack.common import uuidutils
-from nova import quota
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.image import fake as fake_image
-from nova.tests import matchers
-from nova.tests.objects import test_flavor
-from nova.tests.objects import test_migration
-from nova.tests.objects import test_service
-from nova.volume import cinder
-
-
-FAKE_IMAGE_REF = 'fake-image-ref'
-NODENAME = 'fakenode1'
-SHELVED_IMAGE = 'fake-shelved-image'
-SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
-SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
-SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
-
-
-class _ComputeAPIUnitTestMixIn(object):
- def setUp(self):
- super(_ComputeAPIUnitTestMixIn, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id)
-
- def _get_vm_states(self, exclude_states=None):
- vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
- vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
- vm_states.RESIZED, vm_states.SOFT_DELETED,
- vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED])
- if not exclude_states:
- exclude_states = set()
- return vm_state - exclude_states
-
- def _create_flavor(self, params=None):
- flavor = {'id': 1,
- 'flavorid': 1,
- 'name': 'm1.tiny',
- 'memory_mb': 512,
- 'vcpus': 1,
- 'vcpu_weight': None,
- 'root_gb': 1,
- 'ephemeral_gb': 0,
- 'rxtx_factor': 1,
- 'swap': 0,
- 'deleted': 0,
- 'disabled': False,
- 'is_public': True,
- }
- if params:
- flavor.update(params)
- return flavor
-
- def _create_instance_obj(self, params=None, flavor=None):
- """Create a test instance."""
- if not params:
- params = {}
-
- if flavor is None:
- flavor = self._create_flavor()
-
- def make_fake_sys_meta():
- sys_meta = params.pop("system_metadata", {})
- for key in flavors.system_metadata_flavor_props:
- sys_meta['instance_type_%s' % key] = flavor[key]
- return sys_meta
-
- now = timeutils.utcnow()
-
- instance = objects.Instance()
- instance.metadata = {}
- instance.metadata.update(params.pop('metadata', {}))
- instance.system_metadata = make_fake_sys_meta()
- instance.system_metadata.update(params.pop('system_metadata', {}))
- instance._context = self.context
- instance.id = 1
- instance.uuid = uuidutils.generate_uuid()
- instance.cell_name = 'api!child'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = None
- instance.image_ref = FAKE_IMAGE_REF
- instance.reservation_id = 'r-fakeres'
- instance.user_id = self.user_id
- instance.project_id = self.project_id
- instance.host = 'fake_host'
- instance.node = NODENAME
- instance.instance_type_id = flavor['id']
- instance.ami_launch_index = 0
- instance.memory_mb = 0
- instance.vcpus = 0
- instance.root_gb = 0
- instance.ephemeral_gb = 0
- instance.architecture = arch.X86_64
- instance.os_type = 'Linux'
- instance.locked = False
- instance.created_at = now
- instance.updated_at = now
- instance.launched_at = now
- instance.disable_terminate = False
- instance.info_cache = objects.InstanceInfoCache()
-
- if params:
- instance.update(params)
- instance.obj_reset_changes()
- return instance
-
- def test_create_quota_exceeded_messages(self):
- image_href = "image_href"
- image_id = 0
- instance_type = self._create_flavor()
-
- self.mox.StubOutWithMock(self.compute_api, "_get_image")
- self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
- self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
-
- quotas = {'instances': 1, 'cores': 1, 'ram': 1}
- usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
- ['instances', 'cores', 'ram'])
- headroom = dict((res, quotas[res] -
- (usages[res]['in_use'] + usages[res]['reserved']))
- for res in quotas.keys())
- quota_exception = exception.OverQuota(quotas=quotas,
- usages=usages, overs=['instances'], headroom=headroom)
-
- for _unused in range(2):
- self.compute_api._get_image(self.context, image_href).AndReturn(
- (image_id, {}))
- quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
- quota.QUOTAS.reserve(self.context, instances=40,
- cores=mox.IsA(int),
- expire=mox.IgnoreArg(),
- project_id=mox.IgnoreArg(),
- user_id=mox.IgnoreArg(),
- ram=mox.IsA(int)).AndRaise(quota_exception)
-
- self.mox.ReplayAll()
-
- for min_count, message in [(20, '20-40'), (40, '40')]:
- try:
- self.compute_api.create(self.context, instance_type,
- "image_href", min_count=min_count,
- max_count=40)
- except exception.TooManyInstances as e:
- self.assertEqual(message, e.kwargs['req'])
- else:
- self.fail("Exception not raised")
-
- def test_specified_port_and_multiple_instances_neutronv2(self):
- # Tests that if port is specified there is only one instance booting
- # (i.e max_count == 1) as we can't share the same port across multiple
- # instances.
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- address = '10.0.0.1'
- min_count = 1
- max_count = 2
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(address=address,
- port_id=port)])
-
- self.assertRaises(exception.MultiplePortsNotApplicable,
- self.compute_api.create, self.context, 'fake_flavor', 'image_id',
- min_count=min_count, max_count=max_count,
- requested_networks=requested_networks)
-
- def _test_specified_ip_and_multiple_instances_helper(self,
- requested_networks):
- # Tests that if ip is specified there is only one instance booting
- # (i.e max_count == 1)
- min_count = 1
- max_count = 2
- self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
- self.compute_api.create, self.context, "fake_flavor", 'image_id',
- min_count=min_count, max_count=max_count,
- requested_networks=requested_networks)
-
- def test_specified_ip_and_multiple_instances(self):
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- address = '10.0.0.1'
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=network,
- address=address)])
- self._test_specified_ip_and_multiple_instances_helper(
- requested_networks)
-
- def test_specified_ip_and_multiple_instances_neutronv2(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- address = '10.0.0.1'
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=network,
- address=address)])
- self._test_specified_ip_and_multiple_instances_helper(
- requested_networks)
-
- def test_suspend(self):
- # Ensure instance can be suspended.
- instance = self._create_instance_obj()
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
- self.assertIsNone(instance.task_state)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.SUSPEND)
- rpcapi.suspend_instance(self.context, instance)
-
- self.mox.ReplayAll()
-
- self.compute_api.suspend(self.context, instance)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- self.assertEqual(task_states.SUSPENDING,
- instance.task_state)
-
- def _test_suspend_fails(self, vm_state):
- params = dict(vm_state=vm_state)
- instance = self._create_instance_obj(params=params)
- self.assertIsNone(instance.task_state)
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.suspend,
- self.context, instance)
-
- def test_suspend_fails_invalid_states(self):
- invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
- for state in invalid_vm_states:
- self._test_suspend_fails(state)
-
- def test_resume(self):
- # Ensure instance can be resumed (if suspended).
- instance = self._create_instance_obj(
- params=dict(vm_state=vm_states.SUSPENDED))
- self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
- self.assertIsNone(instance.task_state)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'resume_instance')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.RESUME)
- rpcapi.resume_instance(self.context, instance)
-
- self.mox.ReplayAll()
-
- self.compute_api.resume(self.context, instance)
- self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
- self.assertEqual(task_states.RESUMING,
- instance.task_state)
-
- def test_start(self):
- params = dict(vm_state=vm_states.STOPPED)
- instance = self._create_instance_obj(params=params)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.START)
-
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
-
- self.mox.StubOutWithMock(rpcapi, 'start_instance')
- rpcapi.start_instance(self.context, instance)
-
- self.mox.ReplayAll()
-
- self.compute_api.start(self.context, instance)
- self.assertEqual(task_states.POWERING_ON,
- instance.task_state)
-
- def test_start_invalid_state(self):
- instance = self._create_instance_obj()
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.start,
- self.context, instance)
-
- def test_start_no_host(self):
- params = dict(vm_state=vm_states.STOPPED, host='')
- instance = self._create_instance_obj(params=params)
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.start,
- self.context, instance)
-
- def _test_stop(self, vm_state, force=False):
- # Make sure 'progress' gets reset
- params = dict(task_state=None, progress=99, vm_state=vm_state)
- instance = self._create_instance_obj(params=params)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.STOP)
-
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
-
- self.mox.StubOutWithMock(rpcapi, 'stop_instance')
- rpcapi.stop_instance(self.context, instance, do_cast=True)
-
- self.mox.ReplayAll()
-
- if force:
- self.compute_api.force_stop(self.context, instance)
- else:
- self.compute_api.stop(self.context, instance)
- self.assertEqual(task_states.POWERING_OFF,
- instance.task_state)
- self.assertEqual(0, instance.progress)
-
- def test_stop(self):
- self._test_stop(vm_states.ACTIVE)
-
- def test_stop_stopped_instance_with_bypass(self):
- self._test_stop(vm_states.STOPPED, force=True)
-
- def _test_stop_invalid_state(self, vm_state):
- params = dict(vm_state=vm_state)
- instance = self._create_instance_obj(params=params)
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.stop,
- self.context, instance)
-
- def test_stop_fails_invalid_states(self):
- invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
- vm_states.ERROR]))
- for state in invalid_vm_states:
- self._test_stop_invalid_state(state)
-
- def test_stop_a_stopped_inst(self):
- params = {'vm_state': vm_states.STOPPED}
- instance = self._create_instance_obj(params=params)
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.stop,
- self.context, instance)
-
- def test_stop_no_host(self):
- params = {'host': ''}
- instance = self._create_instance_obj(params=params)
- self.assertRaises(exception.InstanceNotReady,
- self.compute_api.stop,
- self.context, instance)
-
- def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
- # Ensure instance can be soft rebooted.
- inst = self._create_instance_obj()
- inst.vm_state = vm_state
- inst.task_state = task_state
-
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api, 'update')
- self.mox.StubOutWithMock(inst, 'save')
- inst.save(expected_task_state=[None, task_states.REBOOTING,
- task_states.REBOOT_PENDING,
- task_states.REBOOT_STARTED])
- self.compute_api._record_action_start(self.context, inst,
- instance_actions.REBOOT)
-
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
-
- self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
- rpcapi.reboot_instance(self.context, instance=inst,
- block_device_info=None,
- reboot_type=reboot_type)
- self.mox.ReplayAll()
-
- self.compute_api.reboot(self.context, inst, reboot_type)
-
- def _test_reboot_type_fails(self, reboot_type, **updates):
- inst = self._create_instance_obj()
- inst.update(updates)
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.reboot,
- self.context, inst, reboot_type)
-
- def test_reboot_hard_active(self):
- self._test_reboot_type(vm_states.ACTIVE, 'HARD')
-
- def test_reboot_hard_error(self):
- self._test_reboot_type(vm_states.ERROR, 'HARD')
-
- def test_reboot_hard_rebooting(self):
- self._test_reboot_type(vm_states.ACTIVE, 'HARD',
- task_state=task_states.REBOOTING)
-
- def test_reboot_hard_reboot_started(self):
- self._test_reboot_type(vm_states.ACTIVE, 'HARD',
- task_state=task_states.REBOOT_STARTED)
-
- def test_reboot_hard_reboot_pending(self):
- self._test_reboot_type(vm_states.ACTIVE, 'HARD',
- task_state=task_states.REBOOT_PENDING)
-
- def test_reboot_hard_rescued(self):
- self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
-
- def test_reboot_hard_error_not_launched(self):
- self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
- launched_at=None)
-
- def test_reboot_soft(self):
- self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
-
- def test_reboot_soft_error(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
-
- def test_reboot_soft_paused(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
-
- def test_reboot_soft_stopped(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
-
- def test_reboot_soft_suspended(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
-
- def test_reboot_soft_rebooting(self):
- self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
-
- def test_reboot_soft_rebooting_hard(self):
- self._test_reboot_type_fails('SOFT',
- task_state=task_states.REBOOTING_HARD)
-
- def test_reboot_soft_reboot_started(self):
- self._test_reboot_type_fails('SOFT',
- task_state=task_states.REBOOT_STARTED)
-
- def test_reboot_soft_reboot_pending(self):
- self._test_reboot_type_fails('SOFT',
- task_state=task_states.REBOOT_PENDING)
-
- def test_reboot_soft_rescued(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
-
- def test_reboot_soft_error_not_launched(self):
- self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
- launched_at=None)
-
- def _test_delete_resizing_part(self, inst, deltas):
- fake_db_migration = test_migration.fake_db_migration()
- migration = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- fake_db_migration)
- inst.instance_type_id = migration.new_instance_type_id
- old_flavor = {'vcpus': 1,
- 'memory_mb': 512}
- deltas['cores'] = -old_flavor['vcpus']
- deltas['ram'] = -old_flavor['memory_mb']
-
- self.mox.StubOutWithMock(objects.Migration,
- 'get_by_instance_and_status')
- self.mox.StubOutWithMock(flavors, 'get_flavor')
-
- self.context.elevated().AndReturn(self.context)
- objects.Migration.get_by_instance_and_status(
- self.context, inst.uuid, 'post-migrating').AndReturn(migration)
- flavors.get_flavor(migration.old_instance_type_id).AndReturn(
- old_flavor)
-
- def _test_delete_resized_part(self, inst):
- migration = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- test_migration.fake_db_migration())
-
- self.mox.StubOutWithMock(objects.Migration,
- 'get_by_instance_and_status')
-
- self.context.elevated().AndReturn(self.context)
- objects.Migration.get_by_instance_and_status(
- self.context, inst.uuid, 'finished').AndReturn(migration)
- self.compute_api._downsize_quota_delta(self.context, inst
- ).AndReturn('deltas')
- fake_quotas = objects.Quotas.from_reservations(self.context,
- ['rsvs'])
- self.compute_api._reserve_quota_delta(self.context, 'deltas', inst,
- ).AndReturn(fake_quotas)
- self.compute_api._record_action_start(
- self.context, inst, instance_actions.CONFIRM_RESIZE)
- self.compute_api.compute_rpcapi.confirm_resize(
- self.context, inst, migration,
- migration['source_compute'], fake_quotas.reservations, cast=False)
-
- def _test_delete_shelved_part(self, inst):
- image_api = self.compute_api.image_api
- self.mox.StubOutWithMock(image_api, 'delete')
-
- snapshot_id = inst.system_metadata.get('shelved_image_id')
- if snapshot_id == SHELVED_IMAGE:
- image_api.delete(self.context, snapshot_id).AndReturn(True)
- elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
- image_api.delete(self.context, snapshot_id).AndRaise(
- exception.ImageNotFound(image_id=snapshot_id))
- elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
- image_api.delete(self.context, snapshot_id).AndRaise(
- exception.ImageNotAuthorized(image_id=snapshot_id))
- elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
- image_api.delete(self.context, snapshot_id).AndRaise(
- test.TestingException("Unexpected error"))
-
- def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
- inst.info_cache.delete()
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier, self.context, inst,
- '%s.start' % delete_type)
- self.context.elevated().AndReturn(self.context)
- self.compute_api.network_api.deallocate_for_instance(
- self.context, inst)
- state = (delete_types.SOFT_DELETE in delete_type and
- vm_states.SOFT_DELETED or
- vm_states.DELETED)
- updates.update({'vm_state': state,
- 'task_state': None,
- 'terminated_at': delete_time})
- inst.save()
-
- updates.update({'deleted_at': delete_time,
- 'deleted': True})
- fake_inst = fake_instance.fake_db_instance(**updates)
- db.instance_destroy(self.context, inst.uuid,
- constraint=None).AndReturn(fake_inst)
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier,
- self.context, inst, '%s.end' % delete_type,
- system_metadata=inst.system_metadata)
-
- def _test_delete(self, delete_type, **attrs):
- reservations = ['fake-resv']
- inst = self._create_instance_obj()
- inst.update(attrs)
- inst._context = self.context
- deltas = {'instances': -1,
- 'cores': -inst.vcpus,
- 'ram': -inst.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.iso8601.Utc())
- timeutils.set_time_override(delete_time)
- task_state = (delete_type == delete_types.SOFT_DELETE and
- task_states.SOFT_DELETING or task_states.DELETING)
- updates = {'progress': 0, 'task_state': task_state}
- if delete_type == delete_types.SOFT_DELETE:
- updates['deleted_at'] = delete_time
- self.mox.StubOutWithMock(inst, 'save')
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
- 'service_is_up')
- self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(inst.info_cache, 'delete')
- self.mox.StubOutWithMock(self.compute_api.network_api,
- 'deallocate_for_instance')
- self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
- self.mox.StubOutWithMock(db, 'instance_destroy')
- self.mox.StubOutWithMock(compute_utils,
- 'notify_about_instance_usage')
- self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
-
- if (inst.vm_state in
- (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
- self._test_delete_shelved_part(inst)
-
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
- self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
-
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, inst.uuid).AndReturn([])
- inst.save()
- if inst.task_state == task_states.RESIZE_FINISH:
- self._test_delete_resizing_part(inst, deltas)
- quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
- user_id=inst.user_id,
- expire=mox.IgnoreArg(),
- **deltas).AndReturn(reservations)
-
- # NOTE(comstud): This is getting messy. But what we are wanting
- # to test is:
- # If cells is enabled and we're the API cell:
- # * Cast to cells_rpcapi.<method> with reservations=None
- # * Commit reservations
- # Otherwise:
- # * Check for downed host
- # * If downed host:
- # * Clean up instance, destroying it, sending notifications.
- # (Tested in _test_downed_host_part())
- # * Commit reservations
- # * If not downed host:
- # * Record the action start.
- # * Cast to compute_rpcapi.<method> with the reservations
-
- cast = True
- commit_quotas = True
- if self.cell_type != 'api':
- if inst.vm_state == vm_states.RESIZED:
- self._test_delete_resized_part(inst)
-
- self.context.elevated().AndReturn(self.context)
- db.service_get_by_compute_host(
- self.context, inst.host).AndReturn(
- test_service.fake_service)
- self.compute_api.servicegroup_api.service_is_up(
- mox.IsA(objects.Service)).AndReturn(
- inst.host != 'down-host')
-
- if inst.host == 'down-host':
- self._test_downed_host_part(inst, updates, delete_time,
- delete_type)
- cast = False
- else:
- # Happens on the manager side
- commit_quotas = False
-
- if cast:
- if self.cell_type != 'api':
- self.compute_api._record_action_start(self.context, inst,
- instance_actions.DELETE)
- if commit_quotas:
- cast_reservations = None
- else:
- cast_reservations = reservations
- if delete_type == delete_types.SOFT_DELETE:
- rpcapi.soft_delete_instance(self.context, inst,
- reservations=cast_reservations)
- elif delete_type in [delete_types.DELETE,
- delete_types.FORCE_DELETE]:
- rpcapi.terminate_instance(self.context, inst, [],
- reservations=cast_reservations)
-
- if commit_quotas:
- # Local delete or when we're testing API cell.
- quota.QUOTAS.commit(self.context, reservations,
- project_id=inst.project_id,
- user_id=inst.user_id)
-
- self.mox.ReplayAll()
-
- getattr(self.compute_api, delete_type)(self.context, inst)
- for k, v in updates.items():
- self.assertEqual(inst[k], v)
-
- self.mox.UnsetStubs()
-
- def test_delete(self):
- self._test_delete(delete_types.DELETE)
-
- def test_delete_if_not_launched(self):
- self._test_delete(delete_types.DELETE, launched_at=None)
-
- def test_delete_in_resizing(self):
- self._test_delete(delete_types.DELETE,
- task_state=task_states.RESIZE_FINISH)
-
- def test_delete_in_resized(self):
- self._test_delete(delete_types.DELETE, vm_state=vm_states.RESIZED)
-
- def test_delete_shelved(self):
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
- self._test_delete(delete_types.DELETE,
- vm_state=vm_states.SHELVED,
- system_metadata=fake_sys_meta)
-
- def test_delete_shelved_offloaded(self):
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
- self._test_delete(delete_types.DELETE,
- vm_state=vm_states.SHELVED_OFFLOADED,
- system_metadata=fake_sys_meta)
-
- def test_delete_shelved_image_not_found(self):
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
- self._test_delete(delete_types.DELETE,
- vm_state=vm_states.SHELVED_OFFLOADED,
- system_metadata=fake_sys_meta)
-
- def test_delete_shelved_image_not_authorized(self):
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
- self._test_delete(delete_types.DELETE,
- vm_state=vm_states.SHELVED_OFFLOADED,
- system_metadata=fake_sys_meta)
-
- def test_delete_shelved_exception(self):
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
- self._test_delete(delete_types.DELETE,
- vm_state=vm_states.SHELVED,
- system_metadata=fake_sys_meta)
-
- def test_delete_with_down_host(self):
- self._test_delete(delete_types.DELETE, host='down-host')
-
- def test_delete_soft_with_down_host(self):
- self._test_delete(delete_types.SOFT_DELETE, host='down-host')
-
- def test_delete_soft(self):
- self._test_delete(delete_types.SOFT_DELETE)
-
- def test_delete_forced(self):
- for vm_state in self._get_vm_states():
- self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state)
-
- def test_delete_forced_when_task_state_deleting(self):
- for vm_state in self._get_vm_states():
- self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state,
- task_state=task_states.DELETING)
-
- def test_no_delete_when_task_state_deleting(self):
- if self.cell_type == 'api':
- # In 'api' cell, the callback terminate_instance will
- # get called, and quota will be committed before returning.
- # It doesn't check for below condition, hence skipping the test.
- """
- if original_task_state in (task_states.DELETING,
- task_states.SOFT_DELETING):
- LOG.info(_('Instance is already in deleting state, '
- 'ignoring this request'), instance=instance)
- quotas.rollback()
- return
- """
- self.skipTest("API cell doesn't delete instance directly.")
-
- attrs = {}
- fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
-
- for vm_state in self._get_vm_states():
- if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
- attrs.update({'system_metadata': fake_sys_meta})
-
- attrs.update({'vm_state': vm_state, 'task_state': 'deleting'})
- reservations = ['fake-resv']
- inst = self._create_instance_obj()
- inst.update(attrs)
- inst._context = self.context
- deltas = {'instances': -1,
- 'cores': -inst.vcpus,
- 'ram': -inst.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.iso8601.Utc())
- timeutils.set_time_override(delete_time)
- bdms = []
- migration = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- test_migration.fake_db_migration())
-
- fake_quotas = objects.Quotas.from_reservations(self.context,
- ['rsvs'])
-
- image_api = self.compute_api.image_api
- rpcapi = self.compute_api.compute_rpcapi
-
- with contextlib.nested(
- mock.patch.object(image_api, 'delete'),
- mock.patch.object(inst, 'save'),
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid',
- return_value=bdms),
- mock.patch.object(objects.Migration,
- 'get_by_instance_and_status'),
- mock.patch.object(quota.QUOTAS, 'reserve',
- return_value=reservations),
- mock.patch.object(self.context, 'elevated',
- return_value=self.context),
- mock.patch.object(db, 'service_get_by_compute_host',
- return_value=test_service.fake_service),
- mock.patch.object(self.compute_api.servicegroup_api,
- 'service_is_up',
- return_value=inst.host != 'down-host'),
- mock.patch.object(self.compute_api,
- '_downsize_quota_delta',
- return_value=fake_quotas),
- mock.patch.object(self.compute_api,
- '_reserve_quota_delta'),
- mock.patch.object(self.compute_api,
- '_record_action_start'),
- mock.patch.object(db, 'instance_update_and_get_original'),
- mock.patch.object(inst.info_cache, 'delete'),
- mock.patch.object(self.compute_api.network_api,
- 'deallocate_for_instance'),
- mock.patch.object(db, 'instance_system_metadata_get'),
- mock.patch.object(db, 'instance_destroy'),
- mock.patch.object(compute_utils,
- 'notify_about_instance_usage'),
- mock.patch.object(quota.QUOTAS, 'commit'),
- mock.patch.object(quota.QUOTAS, 'rollback'),
- mock.patch.object(rpcapi, 'confirm_resize'),
- mock.patch.object(rpcapi, 'terminate_instance')
- ) as (
- image_delete,
- save,
- get_by_instance_uuid,
- get_by_instance_and_status,
- reserve,
- elevated,
- service_get_by_compute_host,
- service_is_up,
- _downsize_quota_delta,
- _reserve_quota_delta,
- _record_action_start,
- instance_update_and_get_original,
- delete,
- deallocate_for_instance,
- instance_system_metadata_get,
- instance_destroy,
- notify_about_instance_usage,
- commit,
- rollback,
- confirm_resize,
- terminate_instance
- ):
- if (inst.vm_state in (vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED)):
- image_delete.return_value = True
-
- if inst.vm_state == vm_states.RESIZED:
- get_by_instance_and_status.return_value = migration
- _downsize_quota_delta.return_value = deltas
-
- self.compute_api.delete(self.context, inst)
- self.assertEqual(1, rollback.call_count)
- self.assertEqual(0, terminate_instance.call_count)
-
- def test_delete_fast_if_host_not_set(self):
- inst = self._create_instance_obj()
- inst.host = ''
- quotas = quotas_obj.Quotas(self.context)
- updates = {'progress': 0, 'task_state': task_states.DELETING}
-
- self.mox.StubOutWithMock(inst, 'save')
- self.mox.StubOutWithMock(db,
- 'block_device_mapping_get_all_by_instance')
-
- self.mox.StubOutWithMock(db, 'constraint')
- self.mox.StubOutWithMock(db, 'instance_destroy')
- self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
- self.mox.StubOutWithMock(compute_utils,
- 'notify_about_instance_usage')
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
-
- db.block_device_mapping_get_all_by_instance(self.context,
- inst.uuid,
- use_slave=False).AndReturn([])
- inst.save()
- self.compute_api._create_reservations(self.context,
- inst, inst.task_state,
- inst.project_id, inst.user_id
- ).AndReturn(quotas)
-
- if self.cell_type == 'api':
- rpcapi.terminate_instance(
- self.context, inst,
- mox.IsA(objects.BlockDeviceMappingList),
- reservations=None)
- else:
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier, self.context,
- inst, 'delete.start')
- db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.iso8601.Utc())
- updates['deleted_at'] = delete_time
- updates['deleted'] = True
- fake_inst = fake_instance.fake_db_instance(**updates)
- db.instance_destroy(self.context, inst.uuid,
- constraint='constraint').AndReturn(fake_inst)
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier, self.context,
- inst, 'delete.end',
- system_metadata=inst.system_metadata)
-
- self.mox.ReplayAll()
-
- self.compute_api.delete(self.context, inst)
- for k, v in updates.items():
- self.assertEqual(inst[k], v)
-
- def test_local_delete_with_deleted_volume(self):
- bdms = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 42, 'volume_id': 'volume_id',
- 'source_type': 'volume', 'destination_type': 'volume',
- 'delete_on_termination': False}))]
-
- def _fake_do_delete(context, instance, bdms,
- rservations=None, local=False):
- pass
-
- inst = self._create_instance_obj()
- inst._context = self.context
-
- self.mox.StubOutWithMock(inst, 'destroy')
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(inst.info_cache, 'delete')
- self.mox.StubOutWithMock(self.compute_api.network_api,
- 'deallocate_for_instance')
- self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
- self.mox.StubOutWithMock(compute_utils,
- 'notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute_api.volume_api,
- 'terminate_connection')
- self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy')
-
- inst.info_cache.delete()
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier, self.context,
- inst, 'delete.start')
- self.context.elevated().MultipleTimes().AndReturn(self.context)
- if self.cell_type != 'api':
- self.compute_api.network_api.deallocate_for_instance(
- self.context, inst)
-
- self.compute_api.volume_api.terminate_connection(
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
- AndRaise(exception. VolumeNotFound('volume_id'))
- bdms[0].destroy(self.context)
-
- inst.destroy()
- compute_utils.notify_about_instance_usage(
- self.compute_api.notifier, self.context,
- inst, 'delete.end',
- system_metadata=inst.system_metadata)
-
- self.mox.ReplayAll()
- self.compute_api._local_delete(self.context, inst, bdms,
- delete_types.DELETE,
- _fake_do_delete)
-
- def test_delete_disabled(self):
- inst = self._create_instance_obj()
- inst.disable_terminate = True
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.ReplayAll()
- self.compute_api.delete(self.context, inst)
-
- def test_delete_soft_rollback(self):
- inst = self._create_instance_obj()
- self.mox.StubOutWithMock(db,
- 'block_device_mapping_get_all_by_instance')
- self.mox.StubOutWithMock(inst, 'save')
-
- delete_time = datetime.datetime(1955, 11, 5)
- timeutils.set_time_override(delete_time)
-
- db.block_device_mapping_get_all_by_instance(
- self.context, inst.uuid, use_slave=False).AndReturn([])
- inst.save().AndRaise(test.TestingException)
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.compute_api.soft_delete, self.context, inst)
-
- def _test_confirm_resize(self, mig_ref_passed=False):
- params = dict(vm_state=vm_states.RESIZED)
- fake_inst = self._create_instance_obj(params=params)
- fake_mig = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- test_migration.fake_db_migration())
-
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(objects.Migration,
- 'get_by_instance_and_status')
- self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(fake_mig, 'save')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'confirm_resize')
-
- self.context.elevated().AndReturn(self.context)
- if not mig_ref_passed:
- objects.Migration.get_by_instance_and_status(
- self.context, fake_inst['uuid'], 'finished').AndReturn(
- fake_mig)
- self.compute_api._downsize_quota_delta(self.context,
- fake_inst).AndReturn('deltas')
-
- resvs = ['resvs']
- fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
-
- self.compute_api._reserve_quota_delta(self.context, 'deltas',
- fake_inst).AndReturn(fake_quotas)
-
- def _check_mig(expected_task_state=None):
- self.assertEqual('confirming', fake_mig.status)
-
- fake_mig.save().WithSideEffects(_check_mig)
-
- if self.cell_type:
- fake_quotas.commit(self.context)
-
- self.compute_api._record_action_start(self.context, fake_inst,
- 'confirmResize')
-
- self.compute_api.compute_rpcapi.confirm_resize(
- self.context, fake_inst, fake_mig, 'compute-source',
- [] if self.cell_type else fake_quotas.reservations)
-
- self.mox.ReplayAll()
-
- if mig_ref_passed:
- self.compute_api.confirm_resize(self.context, fake_inst,
- migration=fake_mig)
- else:
- self.compute_api.confirm_resize(self.context, fake_inst)
-
- def test_confirm_resize(self):
- self._test_confirm_resize()
-
- def test_confirm_resize_with_migration_ref(self):
- self._test_confirm_resize(mig_ref_passed=True)
-
- def _test_revert_resize(self):
- params = dict(vm_state=vm_states.RESIZED)
- fake_inst = self._create_instance_obj(params=params)
- fake_mig = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- test_migration.fake_db_migration())
-
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(objects.Migration,
- 'get_by_instance_and_status')
- self.mox.StubOutWithMock(self.compute_api,
- '_reverse_upsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(fake_inst, 'save')
- self.mox.StubOutWithMock(fake_mig, 'save')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'revert_resize')
-
- self.context.elevated().AndReturn(self.context)
- objects.Migration.get_by_instance_and_status(
- self.context, fake_inst['uuid'], 'finished').AndReturn(
- fake_mig)
- self.compute_api._reverse_upsize_quota_delta(
- self.context, fake_mig).AndReturn('deltas')
-
- resvs = ['resvs']
- fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
-
- self.compute_api._reserve_quota_delta(self.context, 'deltas',
- fake_inst).AndReturn(fake_quotas)
-
- def _check_state(expected_task_state=None):
- self.assertEqual(task_states.RESIZE_REVERTING,
- fake_inst.task_state)
-
- fake_inst.save(expected_task_state=[None]).WithSideEffects(
- _check_state)
-
- def _check_mig(expected_task_state=None):
- self.assertEqual('reverting', fake_mig.status)
-
- fake_mig.save().WithSideEffects(_check_mig)
-
- if self.cell_type:
- fake_quotas.commit(self.context)
-
- self.compute_api._record_action_start(self.context, fake_inst,
- 'revertResize')
-
- self.compute_api.compute_rpcapi.revert_resize(
- self.context, fake_inst, fake_mig, 'compute-dest',
- [] if self.cell_type else fake_quotas.reservations)
-
- self.mox.ReplayAll()
-
- self.compute_api.revert_resize(self.context, fake_inst)
-
- def test_revert_resize(self):
- self._test_revert_resize()
-
- def test_revert_resize_concurent_fail(self):
- params = dict(vm_state=vm_states.RESIZED)
- fake_inst = self._create_instance_obj(params=params)
- fake_mig = objects.Migration._from_db_object(
- self.context, objects.Migration(),
- test_migration.fake_db_migration())
-
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(objects.Migration,
- 'get_by_instance_and_status')
- self.mox.StubOutWithMock(self.compute_api,
- '_reverse_upsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(fake_inst, 'save')
-
- self.context.elevated().AndReturn(self.context)
- objects.Migration.get_by_instance_and_status(
- self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
-
- delta = ['delta']
- self.compute_api._reverse_upsize_quota_delta(
- self.context, fake_mig).AndReturn(delta)
- resvs = ['resvs']
- fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
- self.compute_api._reserve_quota_delta(
- self.context, delta, fake_inst).AndReturn(fake_quotas)
-
- exc = exception.UnexpectedTaskStateError(
- actual=task_states.RESIZE_REVERTING, expected=None)
- fake_inst.save(expected_task_state=[None]).AndRaise(exc)
-
- fake_quotas.rollback(self.context)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.UnexpectedTaskStateError,
- self.compute_api.revert_resize,
- self.context,
- fake_inst)
-
- def _test_resize(self, flavor_id_passed=True,
- same_host=False, allow_same_host=False,
- allow_mig_same_host=False,
- project_id=None,
- extra_kwargs=None,
- same_flavor=False):
- if extra_kwargs is None:
- extra_kwargs = {}
-
- self.flags(allow_resize_to_same_host=allow_same_host,
- allow_migrate_to_same_host=allow_mig_same_host)
-
- params = {}
- if project_id is not None:
- # To test instance w/ different project id than context (admin)
- params['project_id'] = project_id
- fake_inst = self._create_instance_obj(params=params)
-
- self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
- self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(fake_inst, 'save')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_task_api,
- 'resize_instance')
-
- current_flavor = flavors.extract_flavor(fake_inst)
- if flavor_id_passed:
- new_flavor = dict(id=200, flavorid='new-flavor-id',
- name='new_flavor', disabled=False)
- if same_flavor:
- cur_flavor = flavors.extract_flavor(fake_inst)
- new_flavor['id'] = cur_flavor['id']
- flavors.get_flavor_by_flavor_id(
- 'new-flavor-id',
- read_deleted='no').AndReturn(new_flavor)
- else:
- new_flavor = current_flavor
-
- if (self.cell_type == 'compute' or
- not (flavor_id_passed and same_flavor)):
- resvs = ['resvs']
- project_id, user_id = quotas_obj.ids_from_instance(self.context,
- fake_inst)
- fake_quotas = objects.Quotas.from_reservations(self.context,
- resvs)
-
- self.compute_api._upsize_quota_delta(
- self.context, new_flavor,
- current_flavor).AndReturn('deltas')
- self.compute_api._reserve_quota_delta(self.context, 'deltas',
- fake_inst).AndReturn(fake_quotas)
-
- def _check_state(expected_task_state=None):
- self.assertEqual(task_states.RESIZE_PREP,
- fake_inst.task_state)
- self.assertEqual(fake_inst.progress, 0)
- for key, value in extra_kwargs.items():
- self.assertEqual(value, getattr(fake_inst, key))
-
- fake_inst.save(expected_task_state=[None]).WithSideEffects(
- _check_state)
-
- if allow_same_host:
- filter_properties = {'ignore_hosts': []}
- else:
- filter_properties = {'ignore_hosts': [fake_inst['host']]}
-
- if not flavor_id_passed and not allow_mig_same_host:
- filter_properties['ignore_hosts'].append(fake_inst['host'])
-
- expected_reservations = fake_quotas.reservations
- if self.cell_type == 'api':
- fake_quotas.commit(self.context)
- expected_reservations = []
- mig = objects.Migration()
-
- def _get_migration():
- return mig
-
- def _check_mig(ctxt):
- self.assertEqual(fake_inst.uuid, mig.instance_uuid)
- self.assertEqual(current_flavor['id'],
- mig.old_instance_type_id)
- self.assertEqual(new_flavor['id'],
- mig.new_instance_type_id)
- self.assertEqual('finished', mig.status)
-
- self.stubs.Set(objects, 'Migration', _get_migration)
- self.mox.StubOutWithMock(self.context, 'elevated')
- self.mox.StubOutWithMock(mig, 'create')
-
- self.context.elevated().AndReturn(self.context)
- mig.create(self.context).WithSideEffects(_check_mig)
-
- if flavor_id_passed:
- self.compute_api._record_action_start(self.context, fake_inst,
- 'resize')
- else:
- self.compute_api._record_action_start(self.context, fake_inst,
- 'migrate')
-
- scheduler_hint = {'filter_properties': filter_properties}
-
- self.compute_api.compute_task_api.resize_instance(
- self.context, fake_inst, extra_kwargs,
- scheduler_hint=scheduler_hint,
- flavor=new_flavor, reservations=expected_reservations)
-
- self.mox.ReplayAll()
-
- if flavor_id_passed:
- self.compute_api.resize(self.context, fake_inst,
- flavor_id='new-flavor-id',
- **extra_kwargs)
- else:
- self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
-
- def _test_migrate(self, *args, **kwargs):
- self._test_resize(*args, flavor_id_passed=False, **kwargs)
-
- def test_resize(self):
- self._test_resize()
-
- def test_resize_with_kwargs(self):
- self._test_resize(extra_kwargs=dict(cow='moo'))
-
- def test_resize_same_host_and_allowed(self):
- self._test_resize(same_host=True, allow_same_host=True)
-
- def test_resize_same_host_and_not_allowed(self):
- self._test_resize(same_host=True, allow_same_host=False)
-
- def test_resize_different_project_id(self):
- self._test_resize(project_id='different')
-
- def test_migrate(self):
- self._test_migrate()
-
- def test_migrate_with_kwargs(self):
- self._test_migrate(extra_kwargs=dict(cow='moo'))
-
- def test_migrate_same_host_and_allowed(self):
- self._test_migrate(same_host=True, allow_same_host=True)
-
- def test_migrate_same_host_and_not_allowed(self):
- self._test_migrate(same_host=True, allow_same_host=False)
-
- def test_migrate_different_project_id(self):
- self._test_migrate(project_id='different')
-
- def test_resize_invalid_flavor_fails(self):
- self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
- # Should never reach these.
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, 'update')
- self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_task_api,
- 'resize_instance')
-
- fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
- exc = exception.FlavorNotFound(flavor_id='flavor-id')
-
- flavors.get_flavor_by_flavor_id('flavor-id',
- read_deleted='no').AndRaise(exc)
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FlavorNotFound,
- self.compute_api.resize, self.context,
- fake_inst, flavor_id='flavor-id')
-
- def test_resize_disabled_flavor_fails(self):
- self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
- # Should never reach these.
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, 'update')
- self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_task_api,
- 'resize_instance')
-
- fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
- fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
- disabled=True)
-
- flavors.get_flavor_by_flavor_id(
- 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FlavorNotFound,
- self.compute_api.resize, self.context,
- fake_inst, flavor_id='flavor-id')
-
- @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
- def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
- fake_inst = self._create_instance_obj()
- fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
- root_gb=0)
-
- get_flavor_by_flavor_id.return_value = fake_flavor
-
- self.assertRaises(exception.CannotResizeDisk,
- self.compute_api.resize, self.context,
- fake_inst, flavor_id='flavor-id')
-
- def test_resize_quota_exceeds_fails(self):
- self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
- self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
- self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
- # Should never reach these.
- self.mox.StubOutWithMock(self.compute_api, 'update')
- self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(self.compute_api.compute_task_api,
- 'resize_instance')
-
- fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
- current_flavor = flavors.extract_flavor(fake_inst)
- fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
- disabled=False)
- flavors.get_flavor_by_flavor_id(
- 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
- deltas = dict(resource=0)
- self.compute_api._upsize_quota_delta(
- self.context, fake_flavor,
- current_flavor).AndReturn(deltas)
- usage = dict(in_use=0, reserved=0)
- quotas = {'resource': 0}
- usages = {'resource': usage}
- overs = ['resource']
- headroom = {'resource': quotas['resource'] -
- (usages['resource']['in_use'] + usages['resource']['reserved'])}
- over_quota_args = dict(quotas=quotas,
- usages=usages,
- overs=overs,
- headroom=headroom)
-
- self.compute_api._reserve_quota_delta(self.context, deltas,
- fake_inst).AndRaise(
- exception.OverQuota(**over_quota_args))
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.TooManyInstances,
- self.compute_api.resize, self.context,
- fake_inst, flavor_id='flavor-id')
-
- def test_pause(self):
- # Ensure instance can be paused.
- instance = self._create_instance_obj()
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
- self.assertIsNone(instance.task_state)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'pause_instance')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.PAUSE)
- rpcapi.pause_instance(self.context, instance)
-
- self.mox.ReplayAll()
-
- self.compute_api.pause(self.context, instance)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- self.assertEqual(task_states.PAUSING,
- instance.task_state)
-
- def _test_pause_fails(self, vm_state):
- params = dict(vm_state=vm_state)
- instance = self._create_instance_obj(params=params)
- self.assertIsNone(instance.task_state)
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.pause,
- self.context, instance)
-
- def test_pause_fails_invalid_states(self):
- invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
- for state in invalid_vm_states:
- self._test_pause_fails(state)
-
- def test_unpause(self):
- # Ensure instance can be unpaused.
- params = dict(vm_state=vm_states.PAUSED)
- instance = self._create_instance_obj(params=params)
- self.assertEqual(instance.vm_state, vm_states.PAUSED)
- self.assertIsNone(instance.task_state)
-
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api,
- '_record_action_start')
- if self.cell_type == 'api':
- rpcapi = self.compute_api.cells_rpcapi
- else:
- rpcapi = self.compute_api.compute_rpcapi
- self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
-
- instance.save(expected_task_state=[None])
- self.compute_api._record_action_start(self.context,
- instance, instance_actions.UNPAUSE)
- rpcapi.unpause_instance(self.context, instance)
-
- self.mox.ReplayAll()
-
- self.compute_api.unpause(self.context, instance)
- self.assertEqual(vm_states.PAUSED, instance.vm_state)
- self.assertEqual(task_states.UNPAUSING, instance.task_state)
-
- def test_swap_volume_volume_api_usage(self):
- # This test ensures that volume_id arguments are passed to volume_api
- # and that volumes return to previous states in case of error.
- def fake_vol_api_begin_detaching(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- volumes[volume_id]['status'] = 'detaching'
-
- def fake_vol_api_roll_detaching(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'detaching':
- volumes[volume_id]['status'] = 'in-use'
-
- def fake_vol_api_reserve(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- self.assertEqual(volumes[volume_id]['status'], 'available')
- volumes[volume_id]['status'] = 'attaching'
-
- def fake_vol_api_unreserve(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'attaching':
- volumes[volume_id]['status'] = 'available'
-
- def fake_swap_volume_exc(context, instance, old_volume_id,
- new_volume_id):
- raise AttributeError # Random exception
-
- # Should fail if VM state is not valid
- instance = {'vm_state': vm_states.BUILDING,
- 'launched_at': timeutils.utcnow(),
- 'locked': False,
- 'availability_zone': 'fake_az',
- 'uuid': 'fake'}
- volumes = {}
- old_volume_id = uuidutils.generate_uuid()
- volumes[old_volume_id] = {'id': old_volume_id,
- 'display_name': 'old_volume',
- 'attach_status': 'attached',
- 'instance_uuid': 'fake',
- 'size': 5,
- 'status': 'in-use'}
- new_volume_id = uuidutils.generate_uuid()
- volumes[new_volume_id] = {'id': new_volume_id,
- 'display_name': 'new_volume',
- 'attach_status': 'detached',
- 'instance_uuid': None,
- 'size': 5,
- 'status': 'available'}
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- instance['vm_state'] = vm_states.ACTIVE
- instance['task_state'] = None
-
- # Should fail if old volume is not attached
- volumes[old_volume_id]['attach_status'] = 'detached'
- self.assertRaises(exception.VolumeUnattached,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- volumes[old_volume_id]['attach_status'] = 'attached'
-
- # Should fail if old volume's instance_uuid is not that of the instance
- volumes[old_volume_id]['instance_uuid'] = 'fake2'
- self.assertRaises(exception.InvalidVolume,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- volumes[old_volume_id]['instance_uuid'] = 'fake'
-
- # Should fail if new volume is attached
- volumes[new_volume_id]['attach_status'] = 'attached'
- self.assertRaises(exception.InvalidVolume,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- volumes[new_volume_id]['attach_status'] = 'detached'
-
- # Should fail if new volume is smaller than the old volume
- volumes[new_volume_id]['size'] = 4
- self.assertRaises(exception.InvalidVolume,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
- volumes[new_volume_id]['size'] = 5
-
- # Fail call to swap_volume
- self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
- fake_vol_api_begin_detaching)
- self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
- fake_vol_api_roll_detaching)
- self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
- fake_vol_api_reserve)
- self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
- fake_vol_api_unreserve)
- self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
- fake_swap_volume_exc)
- self.assertRaises(AttributeError,
- self.compute_api.swap_volume, self.context, instance,
- volumes[old_volume_id], volumes[new_volume_id])
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
-
- # Should succeed
- self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
- lambda c, instance, old_volume_id, new_volume_id: True)
- self.compute_api.swap_volume(self.context, instance,
- volumes[old_volume_id],
- volumes[new_volume_id])
-
- def _test_snapshot_and_backup(self, is_snapshot=True,
- with_base_ref=False, min_ram=None,
- min_disk=None,
- create_fails=False,
- instance_vm_state=vm_states.ACTIVE):
- # 'cache_in_nova' is for testing non-inheritable properties
- # 'user_id' should also not be carried from sys_meta into
- # image property...since it should be set explicitly by
- # _create_image() in compute api.
- fake_sys_meta = dict(image_foo='bar', blah='bug?',
- image_cache_in_nova='dropped',
- cache_in_nova='dropped',
- user_id='meow')
- if with_base_ref:
- fake_sys_meta['image_base_image_ref'] = 'fake-base-ref'
- params = dict(system_metadata=fake_sys_meta, locked=True)
- instance = self._create_instance_obj(params=params)
- instance.vm_state = instance_vm_state
- fake_sys_meta.update(instance.system_metadata)
- extra_props = dict(cow='moo', cat='meow')
-
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(self.compute_api.image_api,
- 'create')
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'snapshot_instance')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'backup_instance')
-
- image_type = is_snapshot and 'snapshot' or 'backup'
-
- expected_sys_meta = dict(fake_sys_meta)
- expected_sys_meta.pop('cache_in_nova')
- expected_sys_meta.pop('image_cache_in_nova')
- expected_sys_meta.pop('user_id')
- expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo')
- if with_base_ref:
- expected_sys_meta['base_image_ref'] = expected_sys_meta.pop(
- 'image_base_image_ref')
-
- expected_props = {'instance_uuid': instance.uuid,
- 'user_id': self.context.user_id,
- 'image_type': image_type}
- expected_props.update(extra_props)
- expected_props.update(expected_sys_meta)
- expected_meta = {'name': 'fake-name',
- 'is_public': False,
- 'properties': expected_props}
- if is_snapshot:
- if min_ram is not None:
- expected_meta['min_ram'] = min_ram
- if min_disk is not None:
- expected_meta['min_disk'] = min_disk
- else:
- expected_props['backup_type'] = 'fake-backup-type'
-
- compute_utils.get_image_metadata(
- self.context, self.compute_api.image_api,
- FAKE_IMAGE_REF, instance).AndReturn(expected_meta)
-
- fake_image = dict(id='fake-image-id')
- mock_method = self.compute_api.image_api.create(
- self.context, expected_meta)
- if create_fails:
- mock_method.AndRaise(test.TestingException())
- else:
- mock_method.AndReturn(fake_image)
-
- def check_state(expected_task_state=None):
- expected_state = (is_snapshot and
- task_states.IMAGE_SNAPSHOT_PENDING or
- task_states.IMAGE_BACKUP)
- self.assertEqual(expected_state, instance.task_state)
-
- if not create_fails:
- instance.save(expected_task_state=[None]).WithSideEffects(
- check_state)
- if is_snapshot:
- self.compute_api.compute_rpcapi.snapshot_instance(
- self.context, instance, fake_image['id'])
- else:
- self.compute_api.compute_rpcapi.backup_instance(
- self.context, instance, fake_image['id'],
- 'fake-backup-type', 'fake-rotation')
-
- self.mox.ReplayAll()
-
- got_exc = False
- try:
- if is_snapshot:
- res = self.compute_api.snapshot(self.context, instance,
- 'fake-name',
- extra_properties=extra_props)
- else:
- res = self.compute_api.backup(self.context, instance,
- 'fake-name',
- 'fake-backup-type',
- 'fake-rotation',
- extra_properties=extra_props)
- self.assertEqual(fake_image, res)
- except test.TestingException:
- got_exc = True
- self.assertEqual(create_fails, got_exc)
- self.mox.UnsetStubs()
-
- def test_snapshot(self):
- self._test_snapshot_and_backup()
-
- def test_snapshot_fails(self):
- self._test_snapshot_and_backup(create_fails=True)
-
- def test_snapshot_invalid_state(self):
- instance = self._create_instance_obj()
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_SNAPSHOT
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.snapshot,
- self.context, instance, 'fake-name')
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_BACKUP
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.snapshot,
- self.context, instance, 'fake-name')
- instance.vm_state = vm_states.BUILDING
- instance.task_state = None
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.snapshot,
- self.context, instance, 'fake-name')
-
- def test_snapshot_with_base_image_ref(self):
- self._test_snapshot_and_backup(with_base_ref=True)
-
- def test_snapshot_min_ram(self):
- self._test_snapshot_and_backup(min_ram=42)
-
- def test_snapshot_min_disk(self):
- self._test_snapshot_and_backup(min_disk=42)
-
- def test_backup(self):
- for state in [vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.PAUSED, vm_states.SUSPENDED]:
- self._test_snapshot_and_backup(is_snapshot=False,
- instance_vm_state=state)
-
- def test_backup_fails(self):
- self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
-
- def test_backup_invalid_state(self):
- instance = self._create_instance_obj()
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_SNAPSHOT
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.backup,
- self.context, instance, 'fake-name',
- 'fake', 'fake')
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_BACKUP
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.backup,
- self.context, instance, 'fake-name',
- 'fake', 'fake')
- instance.vm_state = vm_states.BUILDING
- instance.task_state = None
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.backup,
- self.context, instance, 'fake-name',
- 'fake', 'fake')
-
- def test_backup_with_base_image_ref(self):
- self._test_snapshot_and_backup(is_snapshot=False,
- with_base_ref=True)
-
- def test_snapshot_volume_backed(self):
- params = dict(locked=True)
- instance = self._create_instance_obj(params=params)
- instance['root_device_name'] = 'vda'
-
- instance_bdms = []
-
- image_meta = {
- 'id': 'fake-image-id',
- 'properties': {'mappings': []},
- 'status': 'fake-status',
- 'location': 'far-away',
- 'owner': 'fake-tenant',
- }
-
- expect_meta = {
- 'name': 'test-snapshot',
- 'properties': {'root_device_name': 'vda',
- 'mappings': 'DONTCARE'},
- 'size': 0,
- 'is_public': False
- }
-
- def fake_get_all_by_instance(context, instance, use_slave=False):
- return copy.deepcopy(instance_bdms)
-
- def fake_image_create(context, image_meta, data=None):
- self.assertThat(image_meta, matchers.DictMatches(expect_meta))
-
- def fake_volume_get(context, volume_id):
- return {'id': volume_id, 'display_description': ''}
-
- def fake_volume_create_snapshot(context, volume_id, name, description):
- return {'id': '%s-snapshot' % volume_id}
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_get_all_by_instance)
- self.stubs.Set(self.compute_api.image_api, 'create',
- fake_image_create)
- self.stubs.Set(self.compute_api.volume_api, 'get',
- fake_volume_get)
- self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
- fake_volume_create_snapshot)
-
- # No block devices defined
- self.compute_api.snapshot_volume_backed(
- self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
-
- bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'no_device': False, 'volume_id': '1', 'boot_index': 0,
- 'connection_info': 'inf', 'device_name': '/dev/vda',
- 'source_type': 'volume', 'destination_type': 'volume'})
- instance_bdms.append(bdm)
-
- expect_meta['properties']['bdm_v2'] = True
- expect_meta['properties']['block_device_mapping'] = []
- expect_meta['properties']['block_device_mapping'].append(
- {'guest_format': None, 'boot_index': 0, 'no_device': None,
- 'image_id': None, 'volume_id': None, 'disk_bus': None,
- 'volume_size': None, 'source_type': 'snapshot',
- 'device_type': None, 'snapshot_id': '1-snapshot',
- 'destination_type': 'volume', 'delete_on_termination': None})
-
- # All the db_only fields and the volume ones are removed
- self.compute_api.snapshot_volume_backed(
- self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
-
- image_mappings = [{'virtual': 'ami', 'device': 'vda'},
- {'device': 'vda', 'virtual': 'ephemeral0'},
- {'device': 'vdb', 'virtual': 'swap'},
- {'device': 'vdc', 'virtual': 'ephemeral1'}]
-
- image_meta['properties']['mappings'] = image_mappings
-
- expect_meta['properties']['mappings'] = [
- {'virtual': 'ami', 'device': 'vda'}]
-
- # Check that the mappgins from the image properties are included
- self.compute_api.snapshot_volume_backed(
- self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
-
- def test_volume_snapshot_create(self):
- volume_id = '1'
- create_info = {'id': 'eyedee'}
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 123,
- 'device_name': '/dev/sda2',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'volume_id': 1,
- 'boot_index': -1})
- fake_bdm['instance'] = fake_instance.fake_db_instance()
- fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
- fake_bdm = objects.BlockDeviceMapping._from_db_object(
- self.context, objects.BlockDeviceMapping(),
- fake_bdm, expected_attrs=['instance'])
-
- self.mox.StubOutWithMock(objects.BlockDeviceMapping,
- 'get_by_volume_id')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'volume_snapshot_create')
-
- objects.BlockDeviceMapping.get_by_volume_id(
- self.context, volume_id,
- expected_attrs=['instance']).AndReturn(fake_bdm)
- self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
- fake_bdm['instance'], volume_id, create_info)
-
- self.mox.ReplayAll()
-
- snapshot = self.compute_api.volume_snapshot_create(self.context,
- volume_id, create_info)
-
- expected_snapshot = {
- 'snapshot': {
- 'id': create_info['id'],
- 'volumeId': volume_id,
- },
- }
- self.assertEqual(snapshot, expected_snapshot)
-
- def test_volume_snapshot_delete(self):
- volume_id = '1'
- snapshot_id = '2'
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 123,
- 'device_name': '/dev/sda2',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'volume_id': 1,
- 'boot_index': -1})
- fake_bdm['instance'] = fake_instance.fake_db_instance()
- fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
- fake_bdm = objects.BlockDeviceMapping._from_db_object(
- self.context, objects.BlockDeviceMapping(),
- fake_bdm, expected_attrs=['instance'])
-
- self.mox.StubOutWithMock(objects.BlockDeviceMapping,
- 'get_by_volume_id')
- self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
- 'volume_snapshot_delete')
-
- objects.BlockDeviceMapping.get_by_volume_id(
- self.context, volume_id,
- expected_attrs=['instance']).AndReturn(fake_bdm)
- self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
- fake_bdm['instance'], volume_id, snapshot_id, {})
-
- self.mox.ReplayAll()
-
- self.compute_api.volume_snapshot_delete(self.context, volume_id,
- snapshot_id, {})
-
- def _test_boot_volume_bootable(self, is_bootable=False):
- def get_vol_data(*args, **kwargs):
- return {'bootable': is_bootable}
- block_device_mapping = [{
- 'id': 1,
- 'device_name': 'vda',
- 'no_device': None,
- 'virtual_name': None,
- 'snapshot_id': None,
- 'volume_id': '1',
- 'delete_on_termination': False,
- }]
-
- expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {},
- 'size': 0, 'status': 'active'}
-
- with mock.patch.object(self.compute_api.volume_api, 'get',
- side_effect=get_vol_data):
- if not is_bootable:
- self.assertRaises(exception.InvalidBDMVolumeNotBootable,
- self.compute_api._get_bdm_image_metadata,
- self.context, block_device_mapping)
- else:
- meta = self.compute_api._get_bdm_image_metadata(self.context,
- block_device_mapping)
- self.assertEqual(expected_meta, meta)
-
- def test_boot_volume_non_bootable(self):
- self._test_boot_volume_bootable(False)
-
- def test_boot_volume_bootable(self):
- self._test_boot_volume_bootable(True)
-
- def test_boot_volume_basic_property(self):
- block_device_mapping = [{
- 'id': 1,
- 'device_name': 'vda',
- 'no_device': None,
- 'virtual_name': None,
- 'snapshot_id': None,
- 'volume_id': '1',
- 'delete_on_termination': False,
- }]
- fake_volume = {"volume_image_metadata":
- {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
- with mock.patch.object(self.compute_api.volume_api, 'get',
- return_value=fake_volume):
- meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
- self.assertEqual(256, meta['min_ram'])
- self.assertEqual(128, meta['min_disk'])
- self.assertEqual('active', meta['status'])
- self.assertEqual('bar', meta['properties']['foo'])
-
- def test_boot_volume_snapshot_basic_property(self):
- block_device_mapping = [{
- 'id': 1,
- 'device_name': 'vda',
- 'no_device': None,
- 'virtual_name': None,
- 'snapshot_id': '2',
- 'volume_id': None,
- 'delete_on_termination': False,
- }]
- fake_volume = {"volume_image_metadata":
- {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
- fake_snapshot = {"volume_id": "1"}
- with contextlib.nested(
- mock.patch.object(self.compute_api.volume_api, 'get',
- return_value=fake_volume),
- mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
- return_value=fake_snapshot)) as (
- volume_get, volume_get_snapshot):
- meta = self.compute_api._get_bdm_image_metadata(
- self.context, block_device_mapping)
- self.assertEqual(256, meta['min_ram'])
- self.assertEqual(128, meta['min_disk'])
- self.assertEqual('active', meta['status'])
- self.assertEqual('bar', meta['properties']['foo'])
- volume_get_snapshot.assert_called_once_with(self.context,
- block_device_mapping[0]['snapshot_id'])
- volume_get.assert_called_once_with(self.context,
- fake_snapshot['volume_id'])
-
- def _create_instance_with_disabled_disk_config(self, object=False):
- sys_meta = {"image_auto_disk_config": "Disabled"}
- params = {"system_metadata": sys_meta}
- instance = self._create_instance_obj(params=params)
- if object:
- return instance
- return obj_base.obj_to_primitive(instance)
-
- def _setup_fake_image_with_disabled_disk_config(self):
- self.fake_image = {
- 'id': 1,
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {"auto_disk_config": "Disabled"},
- }
-
- def fake_show(obj, context, image_id, **kwargs):
- return self.fake_image
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- return self.fake_image['id']
-
- def test_resize_with_disabled_auto_disk_config_fails(self):
- fake_inst = self._create_instance_with_disabled_disk_config()
-
- self.assertRaises(exception.AutoDiskConfigDisabledByImage,
- self.compute_api.resize,
- self.context, fake_inst,
- auto_disk_config=True)
-
- def test_create_with_disabled_auto_disk_config_fails(self):
- image_id = self._setup_fake_image_with_disabled_disk_config()
-
- self.assertRaises(exception.AutoDiskConfigDisabledByImage,
- self.compute_api.create, self.context,
- "fake_flavor", image_id, auto_disk_config=True)
-
- def test_rebuild_with_disabled_auto_disk_config_fails(self):
- fake_inst = self._create_instance_with_disabled_disk_config(
- object=True)
- image_id = self._setup_fake_image_with_disabled_disk_config()
- self.assertRaises(exception.AutoDiskConfigDisabledByImage,
- self.compute_api.rebuild,
- self.context,
- fake_inst,
- image_id,
- "new password",
- auto_disk_config=True)
-
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(objects.Instance, 'get_flavor')
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch.object(compute_api.API, '_get_image')
- @mock.patch.object(compute_api.API, '_check_auto_disk_config')
- @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
- @mock.patch.object(compute_api.API, '_record_action_start')
- def test_rebuild(self, _record_action_start,
- _checks_for_create_and_rebuild, _check_auto_disk_config,
- _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
- orig_system_metadata = {}
- instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE, cell_name='fake-cell',
- launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata,
- expected_attrs=['system_metadata'])
- get_flavor.return_value = test_flavor.fake_flavor
- flavor = instance.get_flavor()
- image_href = ''
- image = {"min_ram": 10, "min_disk": 1,
- "properties": {'architecture': arch.X86_64}}
- admin_pass = ''
- files_to_inject = []
- bdms = []
-
- _get_image.return_value = (None, image)
- bdm_get_by_instance_uuid.return_value = bdms
-
- with mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, image_href,
- admin_pass, files_to_inject)
-
- rebuild_instance.assert_called_once_with(self.context,
- instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=image_href,
- orig_image_ref=image_href,
- orig_sys_metadata=orig_system_metadata, bdms=bdms,
- preserve_ephemeral=False, host=instance.host, kwargs={})
-
- _check_auto_disk_config.assert_called_once_with(image=image)
- _checks_for_create_and_rebuild.assert_called_once_with(self.context,
- None, image, flavor, {}, [])
- self.assertNotEqual(orig_system_metadata, instance.system_metadata)
-
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(objects.Instance, 'get_flavor')
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch.object(compute_api.API, '_get_image')
- @mock.patch.object(compute_api.API, '_check_auto_disk_config')
- @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
- @mock.patch.object(compute_api.API, '_record_action_start')
- def test_rebuild_change_image(self, _record_action_start,
- _checks_for_create_and_rebuild, _check_auto_disk_config,
- _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
- orig_system_metadata = {}
- get_flavor.return_value = test_flavor.fake_flavor
- orig_image_href = 'orig_image'
- orig_image = {"min_ram": 10, "min_disk": 1,
- "properties": {'architecture': arch.X86_64,
- 'vm_mode': 'hvm'}}
- new_image_href = 'new_image'
- new_image = {"min_ram": 10, "min_disk": 1,
- "properties": {'architecture': arch.X86_64,
- 'vm_mode': 'xen'}}
- admin_pass = ''
- files_to_inject = []
- bdms = []
-
- instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE, cell_name='fake-cell',
- launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata,
- expected_attrs=['system_metadata'],
- image_ref=orig_image_href,
- vm_mode=vm_mode.HVM)
- flavor = instance.get_flavor()
-
- def get_image(context, image_href):
- if image_href == new_image_href:
- return (None, new_image)
- if image_href == orig_image_href:
- return (None, orig_image)
- _get_image.side_effect = get_image
- bdm_get_by_instance_uuid.return_value = bdms
-
- with mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance') as rebuild_instance:
- self.compute_api.rebuild(self.context, instance, new_image_href,
- admin_pass, files_to_inject)
-
- rebuild_instance.assert_called_once_with(self.context,
- instance=instance, new_pass=admin_pass,
- injected_files=files_to_inject, image_ref=new_image_href,
- orig_image_ref=orig_image_href,
- orig_sys_metadata=orig_system_metadata, bdms=bdms,
- preserve_ephemeral=False, host=instance.host, kwargs={})
-
- _check_auto_disk_config.assert_called_once_with(image=new_image)
- _checks_for_create_and_rebuild.assert_called_once_with(self.context,
- None, new_image, flavor, {}, [])
- self.assertEqual(vm_mode.XEN, instance.vm_mode)
-
- def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
- side_effect):
- injected_files = [
- {
- "path": "/etc/banner.txt",
- "contents": "foo"
- }
- ]
- with mock.patch.object(quota.QUOTAS, 'limit_check',
- side_effect=side_effect):
- self.compute_api._check_injected_file_quota(
- self.context, injected_files)
-
- def test_check_injected_file_quota_onset_file_limit_exceeded(self):
- # This is the first call to limit_check.
- side_effect = exception.OverQuota(overs='injected_files')
- self.assertRaises(exception.OnsetFileLimitExceeded,
- self._test_check_injected_file_quota_onset_file_limit_exceeded,
- side_effect)
-
- def test_check_injected_file_quota_onset_file_path_limit(self):
- # This is the second call to limit_check.
- side_effect = (mock.DEFAULT,
- exception.OverQuota(overs='injected_file_path_bytes'))
- self.assertRaises(exception.OnsetFilePathLimitExceeded,
- self._test_check_injected_file_quota_onset_file_limit_exceeded,
- side_effect)
-
- def test_check_injected_file_quota_onset_file_content_limit(self):
- # This is the second call to limit_check but with different overs.
- side_effect = (mock.DEFAULT,
- exception.OverQuota(overs='injected_file_content_bytes'))
- self.assertRaises(exception.OnsetFileContentLimitExceeded,
- self._test_check_injected_file_quota_onset_file_limit_exceeded,
- side_effect)
-
- @mock.patch('nova.objects.Quotas.commit')
- @mock.patch('nova.objects.Quotas.reserve')
- @mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.objects.InstanceAction.action_start')
- def test_restore(self, action_start, instance_save, quota_reserve,
- quota_commit):
- instance = self._create_instance_obj()
- instance.vm_state = vm_states.SOFT_DELETED
- instance.task_state = None
- instance.save()
- with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
- self.compute_api.restore(self.context, instance)
- rpc.restore_instance.assert_called_once_with(self.context,
- instance)
- self.assertEqual(instance.task_state, task_states.RESTORING)
- self.assertEqual(1, quota_commit.call_count)
-
- def test_external_instance_event(self):
- instances = [
- objects.Instance(uuid='uuid1', host='host1'),
- objects.Instance(uuid='uuid2', host='host1'),
- objects.Instance(uuid='uuid3', host='host2'),
- ]
- events = [
- objects.InstanceExternalEvent(instance_uuid='uuid1'),
- objects.InstanceExternalEvent(instance_uuid='uuid2'),
- objects.InstanceExternalEvent(instance_uuid='uuid3'),
- ]
- self.compute_api.compute_rpcapi = mock.MagicMock()
- self.compute_api.external_instance_event(self.context,
- instances, events)
- method = self.compute_api.compute_rpcapi.external_instance_event
- method.assert_any_call(self.context, instances[0:2], events[0:2])
- method.assert_any_call(self.context, instances[2:], events[2:])
- self.assertEqual(2, method.call_count)
-
- def test_volume_ops_invalid_task_state(self):
- instance = self._create_instance_obj()
- self.assertEqual(instance.vm_state, vm_states.ACTIVE)
- instance.task_state = 'Any'
- volume_id = uuidutils.generate_uuid()
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.attach_volume,
- self.context, instance, volume_id)
-
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.detach_volume,
- self.context, instance, volume_id)
-
- new_volume_id = uuidutils.generate_uuid()
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.swap_volume,
- self.context, instance,
- volume_id, new_volume_id)
-
- @mock.patch.object(cinder.API, 'get',
- side_effect=exception.CinderConnectionFailed(reason='error'))
- def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
- bdms = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {
- 'id': 1,
- 'volume_id': 1,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': 'vda',
- }))]
- self.assertRaises(exception.CinderConnectionFailed,
- self.compute_api._get_bdm_image_metadata,
- self.context,
- bdms, legacy_bdm=True)
-
- @mock.patch.object(cinder.API, 'get')
- @mock.patch.object(cinder.API, 'check_attach',
- side_effect=exception.InvalidVolume(reason='error'))
- def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
- # Tests that an InvalidVolume exception raised from
- # volume_api.check_attach due to the volume status not being
- # 'available' results in _validate_bdm re-raising InvalidVolume.
- instance = self._create_instance_obj()
- instance_type = self._create_flavor()
- volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
- volume_info = {'status': 'error',
- 'attach_status': 'detached',
- 'id': volume_id}
- mock_get.return_value = volume_info
- bdms = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {
- 'boot_index': 0,
- 'volume_id': volume_id,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': 'vda',
- }))]
-
- self.assertRaises(exception.InvalidVolume,
- self.compute_api._validate_bdm,
- self.context,
- instance, instance_type, bdms)
-
- mock_get.assert_called_once_with(self.context, volume_id)
- mock_check_attach.assert_called_once_with(
- self.context, volume_info, instance=instance)
-
- @mock.patch.object(cinder.API, 'get_snapshot',
- side_effect=exception.CinderConnectionFailed(reason='error'))
- @mock.patch.object(cinder.API, 'get',
- side_effect=exception.CinderConnectionFailed(reason='error'))
- def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
- instance = self._create_instance_obj()
- instance_type = self._create_flavor()
- bdm = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {
- 'id': 1,
- 'volume_id': 1,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': 'vda',
- 'boot_index': 0,
- }))]
- bdms = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {
- 'id': 1,
- 'snapshot_id': 1,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': 'vda',
- 'boot_index': 0,
- }))]
- self.assertRaises(exception.CinderConnectionFailed,
- self.compute_api._validate_bdm,
- self.context,
- instance, instance_type, bdm)
- self.assertRaises(exception.CinderConnectionFailed,
- self.compute_api._validate_bdm,
- self.context,
- instance, instance_type, bdms)
-
- def _test_create_db_entry_for_new_instance_with_cinder_error(self,
- expected_exception):
-
- @mock.patch.object(objects.Instance, 'create')
- @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
- @mock.patch.object(compute_api.API, '_populate_instance_names')
- @mock.patch.object(compute_api.API, '_populate_instance_for_create')
- def do_test(self, mock_create, mock_names, mock_ensure,
- mock_inst_create):
- instance = self._create_instance_obj()
- instance['display_name'] = 'FAKE_DISPLAY_NAME'
- instance['shutdown_terminate'] = False
- instance_type = self._create_flavor()
- fake_image = {
- 'id': 'fake-image-id',
- 'properties': {'mappings': []},
- 'status': 'fake-status',
- 'location': 'far-away'}
- fake_security_group = None
- fake_num_instances = 1
- fake_index = 1
- bdm = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {
- 'id': 1,
- 'volume_id': 1,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': 'vda',
- 'boot_index': 0,
- }))]
- with mock.patch.object(instance, "destroy") as destroy:
- self.assertRaises(expected_exception,
- self.compute_api.
- create_db_entry_for_new_instance,
- self.context,
- instance_type,
- fake_image,
- instance,
- fake_security_group,
- bdm,
- fake_num_instances,
- fake_index)
- destroy.assert_called_once_with(self.context)
-
- # We use a nested method so we can decorate with the mocks.
- do_test(self)
-
- @mock.patch.object(cinder.API, 'get',
- side_effect=exception.CinderConnectionFailed(reason='error'))
- def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
- self._test_create_db_entry_for_new_instance_with_cinder_error(
- expected_exception=exception.CinderConnectionFailed)
-
- @mock.patch.object(cinder.API, 'get',
- return_value={'id': 1, 'status': 'error',
- 'attach_status': 'detached'})
- def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
- self._test_create_db_entry_for_new_instance_with_cinder_error(
- expected_exception=exception.InvalidVolume)
-
- def _test_rescue(self, vm_state):
- instance = self._create_instance_obj(params={'vm_state': vm_state})
- bdms = []
- with contextlib.nested(
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid', return_value=bdms),
- mock.patch.object(self.compute_api, 'is_volume_backed_instance',
- return_value=False),
- mock.patch.object(instance, 'save'),
- mock.patch.object(self.compute_api, '_record_action_start'),
- mock.patch.object(self.compute_api.compute_rpcapi,
- 'rescue_instance')
- ) as (
- bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
- record_action_start, rpcapi_rescue_instance
- ):
- self.compute_api.rescue(self.context, instance)
- # assert field values set on the instance object
- self.assertEqual(task_states.RESCUING, instance.task_state)
- # assert our mock calls
- bdm_get_by_instance_uuid.assert_called_once_with(
- self.context, instance.uuid)
- volume_backed_inst.assert_called_once_with(
- self.context, instance, bdms)
- instance_save.assert_called_once_with(expected_task_state=[None])
- record_action_start.assert_called_once_with(
- self.context, instance, instance_actions.RESCUE)
- rpcapi_rescue_instance.assert_called_once_with(
- self.context, instance=instance, rescue_password=None,
- rescue_image_ref=None)
-
- def test_rescue_active(self):
- self._test_rescue(vm_state=vm_states.ACTIVE)
-
- def test_rescue_stopped(self):
- self._test_rescue(vm_state=vm_states.STOPPED)
-
- def test_rescue_error(self):
- self._test_rescue(vm_state=vm_states.ERROR)
-
- def test_unrescue(self):
- instance = self._create_instance_obj(
- params={'vm_state': vm_states.RESCUED})
- with contextlib.nested(
- mock.patch.object(instance, 'save'),
- mock.patch.object(self.compute_api, '_record_action_start'),
- mock.patch.object(self.compute_api.compute_rpcapi,
- 'unrescue_instance')
- ) as (
- instance_save, record_action_start, rpcapi_unrescue_instance
- ):
- self.compute_api.unrescue(self.context, instance)
- # assert field values set on the instance object
- self.assertEqual(task_states.UNRESCUING, instance.task_state)
- # assert our mock calls
- instance_save.assert_called_once_with(expected_task_state=[None])
- record_action_start.assert_called_once_with(
- self.context, instance, instance_actions.UNRESCUE)
- rpcapi_unrescue_instance.assert_called_once_with(
- self.context, instance=instance)
-
- def test_set_admin_password_invalid_state(self):
- # Tests that InstanceInvalidState is raised when not ACTIVE.
- instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.set_admin_password,
- self.context, instance)
-
- def test_set_admin_password(self):
- # Ensure instance can have its admin password set.
- instance = self._create_instance_obj()
-
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(self.compute_api, '_record_action_start')
- @mock.patch.object(self.compute_api.compute_rpcapi,
- 'set_admin_password')
- def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
- # call the API
- self.compute_api.set_admin_password(self.context, instance)
- # make our assertions
- instance_save_mock.assert_called_once_with(
- expected_task_state=[None])
- record_mock.assert_called_once_with(
- self.context, instance, instance_actions.CHANGE_PASSWORD)
- compute_rpcapi_mock.assert_called_once_with(
- self.context, instance=instance, new_pass=None)
-
- do_test()
-
- def _test_attach_interface_invalid_state(self, state):
- instance = self._create_instance_obj(
- params={'vm_state': state})
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.attach_interface,
- self.context, instance, '', '', '', [])
-
- def test_attach_interface_invalid_state(self):
- for state in [vm_states.BUILDING, vm_states.DELETED,
- vm_states.ERROR, vm_states.RESCUED,
- vm_states.RESIZED, vm_states.SOFT_DELETED,
- vm_states.SUSPENDED, vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED]:
- self._test_attach_interface_invalid_state(state)
-
- def _test_detach_interface_invalid_state(self, state):
- instance = self._create_instance_obj(
- params={'vm_state': state})
- self.assertRaises(exception.InstanceInvalidState,
- self.compute_api.detach_interface,
- self.context, instance, '', '', '', [])
-
- def test_detach_interface_invalid_state(self):
- for state in [vm_states.BUILDING, vm_states.DELETED,
- vm_states.ERROR, vm_states.RESCUED,
- vm_states.RESIZED, vm_states.SOFT_DELETED,
- vm_states.SUSPENDED, vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED]:
- self._test_detach_interface_invalid_state(state)
-
-
-class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
- def setUp(self):
- super(ComputeAPIUnitTestCase, self).setUp()
- self.compute_api = compute_api.API()
- self.cell_type = None
-
- def test_resize_same_flavor_fails(self):
- self.assertRaises(exception.CannotResizeToSameFlavor,
- self._test_resize, same_flavor=True)
-
-
-class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
- test.NoDBTestCase):
- def setUp(self):
- super(ComputeAPIAPICellUnitTestCase, self).setUp()
- self.flags(cell_type='api', enable=True, group='cells')
- self.compute_api = compute_cells_api.ComputeCellsAPI()
- self.cell_type = 'api'
-
- def test_resize_same_flavor_fails(self):
- self.assertRaises(exception.CannotResizeToSameFlavor,
- self._test_resize, same_flavor=True)
-
-
-class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
- test.NoDBTestCase):
- def setUp(self):
- super(ComputeAPIComputeCellUnitTestCase, self).setUp()
- self.flags(cell_type='compute', enable=True, group='cells')
- self.compute_api = compute_api.API()
- self.cell_type = 'compute'
-
- def test_resize_same_flavor_passes(self):
- self._test_resize(same_flavor=True)
-
-
-class DiffDictTestCase(test.NoDBTestCase):
- """Unit tests for _diff_dict()."""
-
- def test_no_change(self):
- old = dict(a=1, b=2, c=3)
- new = dict(a=1, b=2, c=3)
- diff = compute_api._diff_dict(old, new)
-
- self.assertEqual(diff, {})
-
- def test_new_key(self):
- old = dict(a=1, b=2, c=3)
- new = dict(a=1, b=2, c=3, d=4)
- diff = compute_api._diff_dict(old, new)
-
- self.assertEqual(diff, dict(d=['+', 4]))
-
- def test_changed_key(self):
- old = dict(a=1, b=2, c=3)
- new = dict(a=1, b=4, c=3)
- diff = compute_api._diff_dict(old, new)
-
- self.assertEqual(diff, dict(b=['+', 4]))
-
- def test_removed_key(self):
- old = dict(a=1, b=2, c=3)
- new = dict(a=1, c=3)
- diff = compute_api._diff_dict(old, new)
-
- self.assertEqual(diff, dict(b=['-']))
-
-
-class SecurityGroupAPITest(test.NoDBTestCase):
- def setUp(self):
- super(SecurityGroupAPITest, self).setUp()
- self.secgroup_api = compute_api.SecurityGroupAPI()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id)
-
- @mock.patch('nova.objects.security_group.SecurityGroupList.'
- 'get_by_instance')
- def test_get_instance_security_groups(self, mock_get):
- groups = objects.SecurityGroupList()
- groups.objects = [objects.SecurityGroup(name='foo'),
- objects.SecurityGroup(name='bar')]
- mock_get.return_value = groups
- names = self.secgroup_api.get_instance_security_groups(self.context,
- 'fake-uuid')
- self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
- self.assertEqual(1, mock_get.call_count)
- self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
deleted file mode 100644
index b96a47a926..0000000000
--- a/nova/tests/compute/test_compute_cells.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Copyright (c) 2012 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Compute w/ Cells
-"""
-import functools
-import inspect
-
-import mock
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova.cells import manager
-from nova.compute import api as compute_api
-from nova.compute import cells_api as compute_cells_api
-from nova.compute import delete_types
-from nova.compute import flavors
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import objects
-from nova import quota
-from nova import test
-from nova.tests.compute import test_compute
-from nova.tests import fake_instance
-
-
-ORIG_COMPUTE_API = None
-cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells')
-
-
-def stub_call_to_cells(context, instance, method, *args, **kwargs):
- fn = getattr(ORIG_COMPUTE_API, method)
- original_instance = kwargs.pop('original_instance', None)
- if original_instance:
- instance = original_instance
- # Restore this in 'child cell DB'
- db.instance_update(context, instance['uuid'],
- dict(vm_state=instance['vm_state'],
- task_state=instance['task_state']))
-
- # Use NoopQuotaDriver in child cells.
- saved_quotas = quota.QUOTAS
- quota.QUOTAS = quota.QuotaEngine(
- quota_driver_class=quota.NoopQuotaDriver())
- compute_api.QUOTAS = quota.QUOTAS
- try:
- return fn(context, instance, *args, **kwargs)
- finally:
- quota.QUOTAS = saved_quotas
- compute_api.QUOTAS = saved_quotas
-
-
-def stub_cast_to_cells(context, instance, method, *args, **kwargs):
- fn = getattr(ORIG_COMPUTE_API, method)
- original_instance = kwargs.pop('original_instance', None)
- if original_instance:
- instance = original_instance
- # Restore this in 'child cell DB'
- db.instance_update(context, instance['uuid'],
- dict(vm_state=instance['vm_state'],
- task_state=instance['task_state']))
-
- # Use NoopQuotaDriver in child cells.
- saved_quotas = quota.QUOTAS
- quota.QUOTAS = quota.QuotaEngine(
- quota_driver_class=quota.NoopQuotaDriver())
- compute_api.QUOTAS = quota.QUOTAS
- try:
- fn(context, instance, *args, **kwargs)
- finally:
- quota.QUOTAS = saved_quotas
- compute_api.QUOTAS = saved_quotas
-
-
-def deploy_stubs(stubs, api, original_instance=None):
- call = stub_call_to_cells
- cast = stub_cast_to_cells
-
- if original_instance:
- kwargs = dict(original_instance=original_instance)
- call = functools.partial(stub_call_to_cells, **kwargs)
- cast = functools.partial(stub_cast_to_cells, **kwargs)
-
- stubs.Set(api, '_call_to_cells', call)
- stubs.Set(api, '_cast_to_cells', cast)
-
-
-class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
- def setUp(self):
- super(CellsComputeAPITestCase, self).setUp()
- global ORIG_COMPUTE_API
- ORIG_COMPUTE_API = self.compute_api
- self.flags(enable=True, group='cells')
-
- def _fake_cell_read_only(*args, **kwargs):
- return False
-
- def _fake_validate_cell(*args, **kwargs):
- return
-
- def _nop_update(context, instance, **kwargs):
- return instance
-
- self.compute_api = compute_cells_api.ComputeCellsAPI()
- self.stubs.Set(self.compute_api, '_cell_read_only',
- _fake_cell_read_only)
- self.stubs.Set(self.compute_api, '_validate_cell',
- _fake_validate_cell)
-
- # NOTE(belliott) Don't update the instance state
- # for the tests at the API layer. Let it happen after
- # the stub cast to cells so that expected_task_states
- # match.
- self.stubs.Set(self.compute_api, 'update', _nop_update)
-
- deploy_stubs(self.stubs, self.compute_api)
-
- def tearDown(self):
- global ORIG_COMPUTE_API
- self.compute_api = ORIG_COMPUTE_API
- super(CellsComputeAPITestCase, self).tearDown()
-
- def test_instance_metadata(self):
- self.skipTest("Test is incompatible with cells.")
-
- def test_evacuate(self):
- self.skipTest("Test is incompatible with cells.")
-
- def test_error_evacuate(self):
- self.skipTest("Test is incompatible with cells.")
-
- def test_delete_instance_no_cell(self):
- cells_rpcapi = self.compute_api.cells_rpcapi
- self.mox.StubOutWithMock(cells_rpcapi,
- 'instance_delete_everywhere')
- inst = self._create_fake_instance_obj()
- cells_rpcapi.instance_delete_everywhere(self.context,
- inst, delete_types.DELETE)
- self.mox.ReplayAll()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.delete(self.context, inst)
-
- def test_soft_delete_instance_no_cell(self):
- cells_rpcapi = self.compute_api.cells_rpcapi
- self.mox.StubOutWithMock(cells_rpcapi,
- 'instance_delete_everywhere')
- inst = self._create_fake_instance_obj()
- cells_rpcapi.instance_delete_everywhere(self.context,
- inst, delete_types.SOFT_DELETE)
- self.mox.ReplayAll()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
- lambda *a, **kw: None)
- self.compute_api.soft_delete(self.context, inst)
-
- def test_get_migrations(self):
- filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
- migrations = {'migrations': [{'id': 1234}]}
- cells_rpcapi = self.compute_api.cells_rpcapi
- self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations')
- cells_rpcapi.get_migrations(self.context,
- filters).AndReturn(migrations)
- self.mox.ReplayAll()
-
- response = self.compute_api.get_migrations(self.context, filters)
-
- self.assertEqual(migrations, response)
-
- @mock.patch('nova.cells.messaging._TargetedMessage')
- def test_rebuild_sig(self, mock_msg):
- # TODO(belliott) Cells could benefit from better testing to ensure API
- # and manager signatures stay up to date
-
- def wire(version):
- # wire the rpc cast directly to the manager method to make sure
- # the signature matches
- cells_mgr = manager.CellsManager()
-
- def cast(context, method, *args, **kwargs):
- fn = getattr(cells_mgr, method)
- fn(context, *args, **kwargs)
-
- cells_mgr.cast = cast
- return cells_mgr
-
- cells_rpcapi = self.compute_api.cells_rpcapi
- client = cells_rpcapi.client
-
- with mock.patch.object(client, 'prepare', side_effect=wire):
- inst = self._create_fake_instance_obj()
- inst.cell_name = 'mycell'
-
- cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
- None, None, None, None,
- recreate=False,
- on_shared_storage=False, host='host',
- preserve_ephemeral=True, kwargs=None)
-
- # one targeted message should have been created
- self.assertEqual(1, mock_msg.call_count)
-
-
-class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
- def setUp(self):
- super(CellsConductorAPIRPCRedirect, self).setUp()
-
- self.compute_api = compute_cells_api.ComputeCellsAPI()
- self.cells_rpcapi = mock.MagicMock()
- self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi
-
- self.context = context.RequestContext('fake', 'fake')
-
- @mock.patch.object(compute_api.API, '_record_action_start')
- @mock.patch.object(compute_api.API, '_provision_instances')
- @mock.patch.object(compute_api.API, '_check_and_transform_bdm')
- @mock.patch.object(compute_api.API, '_get_image')
- @mock.patch.object(compute_api.API, '_validate_and_build_base_options')
- def test_build_instances(self, _validate, _get_image, _check_bdm,
- _provision, _record_action_start):
- _get_image.return_value = (None, 'fake-image')
- _validate.return_value = ({}, 1)
- _check_bdm.return_value = 'bdms'
- _provision.return_value = 'instances'
-
- self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
-
- # Subsequent tests in class are verifying the hooking. We don't check
- # args since this is verified in compute test code.
- self.assertTrue(self.cells_rpcapi.build_instances.called)
-
- @mock.patch.object(compute_api.API, '_record_action_start')
- @mock.patch.object(compute_api.API, '_resize_cells_support')
- @mock.patch.object(compute_api.API, '_reserve_quota_delta')
- @mock.patch.object(compute_api.API, '_upsize_quota_delta')
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(flavors, 'extract_flavor')
- @mock.patch.object(compute_api.API, '_check_auto_disk_config')
- def test_resize_instance(self, _check, _extract, _save, _upsize, _reserve,
- _cells, _record):
- _extract.return_value = {'name': 'fake', 'id': 'fake'}
- orig_system_metadata = {}
- instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE, cell_name='fake-cell',
- launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata,
- expected_attrs=['system_metadata'])
-
- self.compute_api.resize(self.context, instance)
- self.assertTrue(self.cells_rpcapi.resize_instance.called)
-
- @mock.patch.object(compute_api.API, '_record_action_start')
- @mock.patch.object(objects.Instance, 'save')
- def test_live_migrate_instance(self, instance_save, _record):
- orig_system_metadata = {}
- instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE, cell_name='fake-cell',
- launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata,
- expected_attrs=['system_metadata'])
-
- self.compute_api.live_migrate(self.context, instance,
- True, True, 'fake_dest_host')
-
- self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
-
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(objects.Instance, 'get_flavor')
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch.object(compute_api.API, '_get_image')
- @mock.patch.object(compute_api.API, '_check_auto_disk_config')
- @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
- @mock.patch.object(compute_api.API, '_record_action_start')
- def test_rebuild_instance(self, _record_action_start,
- _checks_for_create_and_rebuild, _check_auto_disk_config,
- _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
- orig_system_metadata = {}
- instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE, cell_name='fake-cell',
- launched_at=timeutils.utcnow(),
- system_metadata=orig_system_metadata,
- expected_attrs=['system_metadata'])
- get_flavor.return_value = ''
- image_href = ''
- image = {"min_ram": 10, "min_disk": 1,
- "properties": {'architecture': 'x86_64'}}
- admin_pass = ''
- files_to_inject = []
- bdms = []
-
- _get_image.return_value = (None, image)
- bdm_get_by_instance_uuid.return_value = bdms
-
- self.compute_api.rebuild(self.context, instance, image_href,
- admin_pass, files_to_inject)
-
- self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
-
- def test_check_equal(self):
- task_api = self.compute_api.compute_task_api
- tests = set()
- for (name, value) in inspect.getmembers(self, inspect.ismethod):
- if name.startswith('test_') and name != 'test_check_equal':
- tests.add(name[5:])
- if tests != set(task_api.cells_compatible):
- self.fail("Testcases not equivalent to cells_compatible list")
-
-
-class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
- def setUp(self):
- super(CellsComputePolicyTestCase, self).setUp()
- global ORIG_COMPUTE_API
- ORIG_COMPUTE_API = self.compute_api
- self.compute_api = compute_cells_api.ComputeCellsAPI()
- deploy_stubs(self.stubs, self.compute_api)
-
- def tearDown(self):
- global ORIG_COMPUTE_API
- self.compute_api = ORIG_COMPUTE_API
- super(CellsComputePolicyTestCase, self).tearDown()
diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py
deleted file mode 100644
index 0a804ea2a9..0000000000
--- a/nova/tests/compute/test_compute_mgr.py
+++ /dev/null
@@ -1,3053 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for ComputeManager()."""
-
-import contextlib
-import time
-
-from cinderclient import exceptions as cinder_exception
-from eventlet import event as eventlet_event
-import mock
-import mox
-from oslo.config import cfg
-from oslo import messaging
-from oslo.utils import importutils
-
-from nova.compute import manager
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova.conductor import rpcapi as conductor_rpcapi
-from nova import context
-from nova import db
-from nova import exception
-from nova.network import api as network_api
-from nova.network import model as network_model
-from nova import objects
-from nova.objects import block_device as block_device_obj
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests.compute import fake_resource_tracker
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.objects import test_instance_fault
-from nova.tests.objects import test_instance_info_cache
-from nova import utils
-
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-
-
-class ComputeManagerUnitTestCase(test.NoDBTestCase):
- def setUp(self):
- super(ComputeManagerUnitTestCase, self).setUp()
- self.compute = importutils.import_object(CONF.compute_manager)
- self.context = context.RequestContext('fake', 'fake')
-
- def test_allocate_network_succeeds_after_retries(self):
- self.flags(network_allocate_retries=8)
-
- nwapi = self.compute.network_api
- self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
- self.mox.StubOutWithMock(self.compute, '_instance_update')
- self.mox.StubOutWithMock(time, 'sleep')
-
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'])
-
- is_vpn = 'fake-is-vpn'
- req_networks = 'fake-req-networks'
- macs = 'fake-macs'
- sec_groups = 'fake-sec-groups'
- final_result = 'meow'
- dhcp_options = None
-
- expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
-
- for sleep_time in expected_sleep_times:
- nwapi.allocate_for_instance(
- self.context, instance, vpn=is_vpn,
- requested_networks=req_networks, macs=macs,
- security_groups=sec_groups,
- dhcp_options=dhcp_options).AndRaise(
- test.TestingException())
- time.sleep(sleep_time)
-
- nwapi.allocate_for_instance(
- self.context, instance, vpn=is_vpn,
- requested_networks=req_networks, macs=macs,
- security_groups=sec_groups,
- dhcp_options=dhcp_options).AndReturn(final_result)
- self.compute._instance_update(self.context, instance['uuid'],
- system_metadata={'network_allocated': 'True'})
-
- self.mox.ReplayAll()
-
- res = self.compute._allocate_network_async(self.context, instance,
- req_networks,
- macs,
- sec_groups,
- is_vpn,
- dhcp_options)
- self.assertEqual(final_result, res)
-
- def test_allocate_network_maintains_context(self):
- # override tracker with a version that doesn't need the database:
- class FakeResourceTracker(object):
- def instance_claim(self, context, instance, limits):
- return mox.MockAnything()
-
- self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
- self.mox.StubOutWithMock(self.compute, '_allocate_network')
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
-
- instance = fake_instance.fake_instance_obj(self.context)
-
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- mox.IgnoreArg(), instance.uuid).AndReturn([])
-
- node = 'fake_node'
- self.compute._get_resource_tracker(node).AndReturn(
- FakeResourceTracker())
-
- self.admin_context = False
-
- def fake_allocate(context, *args, **kwargs):
- if context.is_admin:
- self.admin_context = True
-
- # NOTE(vish): The nice mox parameter matchers here don't work well
- # because they raise an exception that gets wrapped by
- # the retry exception handling, so use a side effect
- # to keep track of whether allocate was called with admin
- # context.
- self.compute._allocate_network(mox.IgnoreArg(), instance,
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).WithSideEffects(fake_allocate)
-
- self.mox.ReplayAll()
-
- instance, nw_info = self.compute._build_instance(self.context, {}, {},
- None, None, None, True,
- node, instance,
- {}, False)
- self.assertFalse(self.admin_context,
- "_allocate_network called with admin context")
- self.assertEqual(vm_states.BUILDING, instance.vm_state)
- self.assertEqual(task_states.BLOCK_DEVICE_MAPPING, instance.task_state)
-
- def test_reschedule_maintains_context(self):
- # override tracker with a version that causes a reschedule
- class FakeResourceTracker(object):
- def instance_claim(self, context, instance, limits):
- raise test.TestingException()
-
- self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
- self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- instance = fake_instance.fake_instance_obj(self.context)
-
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- mox.IgnoreArg(), instance.uuid).AndReturn([])
-
- node = 'fake_node'
- self.compute._get_resource_tracker(node).AndReturn(
- FakeResourceTracker())
-
- self.admin_context = False
-
- def fake_retry_or_error(context, *args, **kwargs):
- if context.is_admin:
- self.admin_context = True
-
- # NOTE(vish): we could use a mos parameter matcher here but it leads
- # to a very cryptic error message, so use the same method
- # as the allocate_network_maintains_context test.
- self.compute._reschedule_or_error(mox.IgnoreArg(), instance,
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).WithSideEffects(fake_retry_or_error)
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.compute._build_instance, self.context, {}, {},
- None, None, None, True, node, instance, {}, False)
- self.assertFalse(self.admin_context,
- "_reschedule_or_error called with admin context")
-
- def test_allocate_network_fails(self):
- self.flags(network_allocate_retries=0)
-
- nwapi = self.compute.network_api
- self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
-
- instance = {}
- is_vpn = 'fake-is-vpn'
- req_networks = 'fake-req-networks'
- macs = 'fake-macs'
- sec_groups = 'fake-sec-groups'
- dhcp_options = None
-
- nwapi.allocate_for_instance(
- self.context, instance, vpn=is_vpn,
- requested_networks=req_networks, macs=macs,
- security_groups=sec_groups,
- dhcp_options=dhcp_options).AndRaise(test.TestingException())
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.compute._allocate_network_async,
- self.context, instance, req_networks, macs,
- sec_groups, is_vpn, dhcp_options)
-
- def test_allocate_network_neg_conf_value_treated_as_zero(self):
- self.flags(network_allocate_retries=-1)
-
- nwapi = self.compute.network_api
- self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
-
- instance = {}
- is_vpn = 'fake-is-vpn'
- req_networks = 'fake-req-networks'
- macs = 'fake-macs'
- sec_groups = 'fake-sec-groups'
- dhcp_options = None
-
- # Only attempted once.
- nwapi.allocate_for_instance(
- self.context, instance, vpn=is_vpn,
- requested_networks=req_networks, macs=macs,
- security_groups=sec_groups,
- dhcp_options=dhcp_options).AndRaise(test.TestingException())
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.compute._allocate_network_async,
- self.context, instance, req_networks, macs,
- sec_groups, is_vpn, dhcp_options)
-
- @mock.patch.object(network_api.API, 'allocate_for_instance')
- @mock.patch.object(manager.ComputeManager, '_instance_update')
- @mock.patch.object(time, 'sleep')
- def test_allocate_network_with_conf_value_is_one(
- self, sleep, _instance_update, allocate_for_instance):
- self.flags(network_allocate_retries=1)
-
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'])
- is_vpn = 'fake-is-vpn'
- req_networks = 'fake-req-networks'
- macs = 'fake-macs'
- sec_groups = 'fake-sec-groups'
- dhcp_options = None
- final_result = 'zhangtralon'
-
- allocate_for_instance.side_effect = [test.TestingException(),
- final_result]
- res = self.compute._allocate_network_async(self.context, instance,
- req_networks,
- macs,
- sec_groups,
- is_vpn,
- dhcp_options)
- self.assertEqual(final_result, res)
- self.assertEqual(1, sleep.call_count)
-
- def test_init_host(self):
- our_host = self.compute.host
- fake_context = 'fake-context'
- inst = fake_instance.fake_db_instance(
- vm_state=vm_states.ACTIVE,
- info_cache=dict(test_instance_info_cache.fake_info_cache,
- network_info=None),
- security_groups=None)
- startup_instances = [inst, inst, inst]
-
- def _do_mock_calls(defer_iptables_apply):
- self.compute.driver.init_host(host=our_host)
- context.get_admin_context().AndReturn(fake_context)
- db.instance_get_all_by_host(
- fake_context, our_host, columns_to_join=['info_cache'],
- use_slave=False
- ).AndReturn(startup_instances)
- if defer_iptables_apply:
- self.compute.driver.filter_defer_apply_on()
- self.compute._destroy_evacuated_instances(fake_context)
- self.compute._init_instance(fake_context,
- mox.IsA(objects.Instance))
- self.compute._init_instance(fake_context,
- mox.IsA(objects.Instance))
- self.compute._init_instance(fake_context,
- mox.IsA(objects.Instance))
- if defer_iptables_apply:
- self.compute.driver.filter_defer_apply_off()
-
- self.mox.StubOutWithMock(self.compute.driver, 'init_host')
- self.mox.StubOutWithMock(self.compute.driver,
- 'filter_defer_apply_on')
- self.mox.StubOutWithMock(self.compute.driver,
- 'filter_defer_apply_off')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- self.mox.StubOutWithMock(context, 'get_admin_context')
- self.mox.StubOutWithMock(self.compute,
- '_destroy_evacuated_instances')
- self.mox.StubOutWithMock(self.compute,
- '_init_instance')
-
- # Test with defer_iptables_apply
- self.flags(defer_iptables_apply=True)
- _do_mock_calls(True)
-
- self.mox.ReplayAll()
- self.compute.init_host()
- self.mox.VerifyAll()
-
- # Test without defer_iptables_apply
- self.mox.ResetAll()
- self.flags(defer_iptables_apply=False)
- _do_mock_calls(False)
-
- self.mox.ReplayAll()
- self.compute.init_host()
- # tearDown() uses context.get_admin_context(), so we have
- # to do the verification here and unstub it.
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- @mock.patch('nova.objects.InstanceList')
- def test_cleanup_host(self, mock_instance_list):
- # just testing whether the cleanup_host method
- # when fired will invoke the underlying driver's
- # equivalent method.
-
- mock_instance_list.get_by_host.return_value = []
-
- with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
- mock_driver.init_host.assert_called_once_with(host='fake-mini')
-
- self.compute.cleanup_host()
- mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
-
- def test_init_host_with_deleted_migration(self):
- our_host = self.compute.host
- not_our_host = 'not-' + our_host
- fake_context = 'fake-context'
-
- deleted_instance = fake_instance.fake_instance_obj(
- self.context, host=not_our_host, uuid='fake-uuid')
-
- self.mox.StubOutWithMock(self.compute.driver, 'init_host')
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- self.mox.StubOutWithMock(context, 'get_admin_context')
- self.mox.StubOutWithMock(self.compute, 'init_virt_events')
- self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
- self.mox.StubOutWithMock(self.compute, '_init_instance')
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
-
- self.compute.driver.init_host(host=our_host)
- context.get_admin_context().AndReturn(fake_context)
- db.instance_get_all_by_host(fake_context, our_host,
- columns_to_join=['info_cache'],
- use_slave=False
- ).AndReturn([])
- self.compute.init_virt_events()
-
- # simulate failed instance
- self.compute._get_instances_on_driver(
- fake_context, {'deleted': False}).AndReturn([deleted_instance])
- self.compute._get_instance_nw_info(fake_context, deleted_instance
- ).AndRaise(exception.InstanceNotFound(
- instance_id=deleted_instance['uuid']))
- # ensure driver.destroy is called so that driver may
- # clean up any dangling files
- self.compute.driver.destroy(fake_context, deleted_instance,
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.compute.init_host()
- # tearDown() uses context.get_admin_context(), so we have
- # to do the verification here and unstub it.
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def test_init_instance_failed_resume_sets_error(self):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='fake-uuid',
- info_cache=None,
- power_state=power_state.RUNNING,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- expected_attrs=['info_cache'])
-
- self.flags(resume_guests_state_on_host_boot=True)
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
- self.mox.StubOutWithMock(self.compute.driver,
- 'resume_state_on_host_boot')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute,
- '_set_instance_error_state')
- self.compute._get_power_state(mox.IgnoreArg(),
- instance).AndReturn(power_state.SHUTDOWN)
- self.compute._get_power_state(mox.IgnoreArg(),
- instance).AndReturn(power_state.SHUTDOWN)
- self.compute._get_power_state(mox.IgnoreArg(),
- instance).AndReturn(power_state.SHUTDOWN)
- self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
- self.compute._get_instance_block_device_info(mox.IgnoreArg(),
- instance).AndReturn('fake-bdm')
- self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
- instance, mox.IgnoreArg(),
- 'fake-bdm').AndRaise(test.TestingException)
- self.compute._set_instance_error_state(mox.IgnoreArg(), instance)
- self.mox.ReplayAll()
- self.compute._init_instance('fake-context', instance)
-
- def test_init_instance_stuck_in_deleting(self):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='fake-uuid',
- power_state=power_state.RUNNING,
- vm_state=vm_states.ACTIVE,
- task_state=task_states.DELETING)
-
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(self.compute, '_delete_instance')
- self.mox.StubOutWithMock(instance, 'obj_load_attr')
-
- bdms = []
- instance.obj_load_attr('metadata')
- instance.obj_load_attr('system_metadata')
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, instance.uuid).AndReturn(bdms)
- self.compute._delete_instance(self.context, instance, bdms,
- mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.compute._init_instance(self.context, instance)
-
- def _test_init_instance_reverts_crashed_migrations(self,
- old_vm_state=None):
- power_on = True if (not old_vm_state or
- old_vm_state == vm_states.ACTIVE) else False
- sys_meta = {
- 'old_vm_state': old_vm_state
- }
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='foo',
- vm_state=vm_states.ERROR,
- task_state=task_states.RESIZE_MIGRATING,
- power_state=power_state.SHUTDOWN,
- system_metadata=sys_meta,
- expected_attrs=['system_metadata'])
-
- self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
- self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
- self.mox.StubOutWithMock(self.compute.driver,
- 'finish_revert_migration')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute.driver, 'get_info')
- self.mox.StubOutWithMock(instance, 'save')
- self.mox.StubOutWithMock(self.compute, '_retry_reboot')
-
- self.compute._retry_reboot(self.context, instance).AndReturn(
- (False, None))
- compute_utils.get_nw_info_for_instance(instance).AndReturn(
- network_model.NetworkInfo())
- self.compute.driver.plug_vifs(instance, [])
- self.compute._get_instance_block_device_info(
- self.context, instance).AndReturn([])
- self.compute.driver.finish_revert_migration(self.context, instance,
- [], [], power_on)
- instance.save()
- self.compute.driver.get_info(instance).AndReturn(
- {'state': power_state.SHUTDOWN})
- self.compute.driver.get_info(instance).AndReturn(
- {'state': power_state.SHUTDOWN})
-
- self.mox.ReplayAll()
-
- self.compute._init_instance(self.context, instance)
- self.assertIsNone(instance.task_state)
-
- def test_init_instance_reverts_crashed_migration_from_active(self):
- self._test_init_instance_reverts_crashed_migrations(
- old_vm_state=vm_states.ACTIVE)
-
- def test_init_instance_reverts_crashed_migration_from_stopped(self):
- self._test_init_instance_reverts_crashed_migrations(
- old_vm_state=vm_states.STOPPED)
-
- def test_init_instance_reverts_crashed_migration_no_old_state(self):
- self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
-
- def test_init_instance_resets_crashed_live_migration(self):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='foo',
- vm_state=vm_states.ACTIVE,
- task_state=task_states.MIGRATING)
- with contextlib.nested(
- mock.patch.object(instance, 'save'),
- mock.patch('nova.compute.utils.get_nw_info_for_instance',
- return_value=network_model.NetworkInfo())
- ) as (save, get_nw_info):
- self.compute._init_instance(self.context, instance)
- save.assert_called_once_with(expected_task_state=['migrating'])
- get_nw_info.assert_called_once_with(instance)
- self.assertIsNone(instance.task_state)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
-
- def _test_init_instance_sets_building_error(self, vm_state,
- task_state=None):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='foo',
- vm_state=vm_state,
- task_state=task_state)
- with mock.patch.object(instance, 'save') as save:
- self.compute._init_instance(self.context, instance)
- save.assert_called_once_with()
- self.assertIsNone(instance.task_state)
- self.assertEqual(vm_states.ERROR, instance.vm_state)
-
- def test_init_instance_sets_building_error(self):
- self._test_init_instance_sets_building_error(vm_states.BUILDING)
-
- def test_init_instance_sets_rebuilding_errors(self):
- tasks = [task_states.REBUILDING,
- task_states.REBUILD_BLOCK_DEVICE_MAPPING,
- task_states.REBUILD_SPAWNING]
- vms = [vm_states.ACTIVE, vm_states.STOPPED]
-
- for vm_state in vms:
- for task_state in tasks:
- self._test_init_instance_sets_building_error(
- vm_state, task_state)
-
- def _test_init_instance_sets_building_tasks_error(self, instance):
- with mock.patch.object(instance, 'save') as save:
- self.compute._init_instance(self.context, instance)
- save.assert_called_once_with()
- self.assertIsNone(instance.task_state)
- self.assertEqual(vm_states.ERROR, instance.vm_state)
-
- def test_init_instance_sets_building_tasks_error_scheduling(self):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='foo',
- vm_state=None,
- task_state=task_states.SCHEDULING)
- self._test_init_instance_sets_building_tasks_error(instance)
-
- def test_init_instance_sets_building_tasks_error_block_device(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = None
- instance.task_state = task_states.BLOCK_DEVICE_MAPPING
- self._test_init_instance_sets_building_tasks_error(instance)
-
- def test_init_instance_sets_building_tasks_error_networking(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = None
- instance.task_state = task_states.NETWORKING
- self._test_init_instance_sets_building_tasks_error(instance)
-
- def test_init_instance_sets_building_tasks_error_spawning(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = None
- instance.task_state = task_states.SPAWNING
- self._test_init_instance_sets_building_tasks_error(instance)
-
- def _test_init_instance_cleans_image_states(self, instance):
- with mock.patch.object(instance, 'save') as save:
- self.compute._get_power_state = mock.Mock()
- self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
- instance.info_cache = None
- instance.power_state = power_state.RUNNING
- self.compute._init_instance(self.context, instance)
- save.assert_called_once_with()
- self.compute.driver.post_interrupted_snapshot_cleanup.\
- assert_called_once_with(self.context, instance)
- self.assertIsNone(instance.task_state)
-
- def test_init_instance_cleans_image_state_pending_upload(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_PENDING_UPLOAD
- self._test_init_instance_cleans_image_states(instance)
-
- def test_init_instance_cleans_image_state_uploading(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_UPLOADING
- self._test_init_instance_cleans_image_states(instance)
-
- def test_init_instance_cleans_image_state_snapshot(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_SNAPSHOT
- self._test_init_instance_cleans_image_states(instance)
-
- def test_init_instance_cleans_image_state_snapshot_pending(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
- self._test_init_instance_cleans_image_states(instance)
-
- def test_init_instance_errors_when_not_migrating(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ERROR
- instance.task_state = task_states.IMAGE_UPLOADING
- self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
- self.mox.ReplayAll()
- self.compute._init_instance(self.context, instance)
- self.mox.VerifyAll()
-
- def test_init_instance_deletes_error_deleting_instance(self):
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='fake',
- vm_state=vm_states.ERROR,
- task_state=task_states.DELETING)
- self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid')
- self.mox.StubOutWithMock(self.compute, '_delete_instance')
- self.mox.StubOutWithMock(instance, 'obj_load_attr')
-
- bdms = []
- instance.obj_load_attr('metadata')
- instance.obj_load_attr('system_metadata')
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, instance.uuid).AndReturn(bdms)
- self.compute._delete_instance(self.context, instance, bdms,
- mox.IgnoreArg())
- self.mox.ReplayAll()
-
- self.compute._init_instance(self.context, instance)
- self.mox.VerifyAll()
-
- @mock.patch('nova.context.RequestContext.elevated')
- @mock.patch('nova.compute.utils.get_nw_info_for_instance')
- @mock.patch(
- 'nova.compute.manager.ComputeManager._get_instance_block_device_info')
- @mock.patch('nova.virt.driver.ComputeDriver.destroy')
- @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
- def test_shutdown_instance_endpoint_not_found(self, mock_connector,
- mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
- mock_connector.side_effect = cinder_exception.EndpointNotFound
- mock_elevated.return_value = self.context
- instance = fake_instance.fake_instance_obj(
- self.context,
- uuid='fake',
- vm_state=vm_states.ERROR,
- task_state=task_states.DELETING)
- bdms = [mock.Mock(id=1, is_volume=True)]
-
- self.compute._shutdown_instance(self.context, instance, bdms,
- notify=False, try_deallocate_networks=False)
-
- def _test_init_instance_retries_reboot(self, instance, reboot_type,
- return_power_state):
- with contextlib.nested(
- mock.patch.object(self.compute, '_get_power_state',
- return_value=return_power_state),
- mock.patch.object(self.compute.compute_rpcapi, 'reboot_instance'),
- mock.patch.object(compute_utils, 'get_nw_info_for_instance')
- ) as (
- _get_power_state,
- reboot_instance,
- get_nw_info_for_instance
- ):
- self.compute._init_instance(self.context, instance)
- call = mock.call(self.context, instance, block_device_info=None,
- reboot_type=reboot_type)
- reboot_instance.assert_has_calls([call])
-
- def test_init_instance_retries_reboot_pending(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_PENDING
- for state in vm_states.ALLOW_SOFT_REBOOT:
- instance.vm_state = state
- self._test_init_instance_retries_reboot(instance, 'SOFT',
- power_state.RUNNING)
-
- def test_init_instance_retries_reboot_pending_hard(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_PENDING_HARD
- for state in vm_states.ALLOW_HARD_REBOOT:
- # NOTE(dave-mcnally) while a reboot of a vm in error state is
- # possible we don't attempt to recover an error during init
- if state == vm_states.ERROR:
- continue
- instance.vm_state = state
- self._test_init_instance_retries_reboot(instance, 'HARD',
- power_state.RUNNING)
-
- def test_init_instance_retries_reboot_started(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.REBOOT_STARTED
- self._test_init_instance_retries_reboot(instance, 'HARD',
- power_state.NOSTATE)
-
- def test_init_instance_retries_reboot_started_hard(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.REBOOT_STARTED_HARD
- self._test_init_instance_retries_reboot(instance, 'HARD',
- power_state.NOSTATE)
-
- def _test_init_instance_cleans_reboot_state(self, instance):
- with contextlib.nested(
- mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING),
- mock.patch.object(instance, 'save', autospec=True),
- mock.patch.object(compute_utils, 'get_nw_info_for_instance')
- ) as (
- _get_power_state,
- instance_save,
- get_nw_info_for_instance
- ):
- self.compute._init_instance(self.context, instance)
- instance_save.assert_called_once_with()
- self.assertIsNone(instance.task_state)
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
-
- def test_init_instance_cleans_image_state_reboot_started(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.REBOOT_STARTED
- instance.power_state = power_state.RUNNING
- self._test_init_instance_cleans_reboot_state(instance)
-
- def test_init_instance_cleans_image_state_reboot_started_hard(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.REBOOT_STARTED_HARD
- instance.power_state = power_state.RUNNING
- self._test_init_instance_cleans_reboot_state(instance)
-
- def test_init_instance_retries_power_off(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.id = 1
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.POWERING_OFF
- with mock.patch.object(self.compute, 'stop_instance'):
- self.compute._init_instance(self.context, instance)
- call = mock.call(self.context, instance)
- self.compute.stop_instance.assert_has_calls([call])
-
- def test_init_instance_retries_power_on(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.id = 1
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.POWERING_ON
- with mock.patch.object(self.compute, 'start_instance'):
- self.compute._init_instance(self.context, instance)
- call = mock.call(self.context, instance)
- self.compute.start_instance.assert_has_calls([call])
-
- def test_init_instance_retries_power_on_silent_exception(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.id = 1
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.POWERING_ON
- with mock.patch.object(self.compute, 'start_instance',
- return_value=Exception):
- init_return = self.compute._init_instance(self.context, instance)
- call = mock.call(self.context, instance)
- self.compute.start_instance.assert_has_calls([call])
- self.assertIsNone(init_return)
-
- def test_init_instance_retries_power_off_silent_exception(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.id = 1
- instance.vm_state = vm_states.ACTIVE
- instance.task_state = task_states.POWERING_OFF
- with mock.patch.object(self.compute, 'stop_instance',
- return_value=Exception):
- init_return = self.compute._init_instance(self.context, instance)
- call = mock.call(self.context, instance)
- self.compute.stop_instance.assert_has_calls([call])
- self.assertIsNone(init_return)
-
- def test_get_instances_on_driver(self):
- fake_context = context.get_admin_context()
-
- driver_instances = []
- for x in xrange(10):
- driver_instances.append(fake_instance.fake_db_instance())
-
- self.mox.StubOutWithMock(self.compute.driver,
- 'list_instance_uuids')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
-
- self.compute.driver.list_instance_uuids().AndReturn(
- [inst['uuid'] for inst in driver_instances])
- db.instance_get_all_by_filters(
- fake_context,
- {'uuid': [inst['uuid'] for
- inst in driver_instances]},
- 'created_at', 'desc', columns_to_join=None,
- limit=None, marker=None,
- use_slave=True).AndReturn(
- driver_instances)
-
- self.mox.ReplayAll()
-
- result = self.compute._get_instances_on_driver(fake_context)
- self.assertEqual([x['uuid'] for x in driver_instances],
- [x['uuid'] for x in result])
-
- def test_get_instances_on_driver_fallback(self):
- # Test getting instances when driver doesn't support
- # 'list_instance_uuids'
- self.compute.host = 'host'
- filters = {'host': self.compute.host}
- fake_context = context.get_admin_context()
-
- self.flags(instance_name_template='inst-%i')
-
- all_instances = []
- driver_instances = []
- for x in xrange(10):
- instance = fake_instance.fake_db_instance(name='inst-%i' % x,
- id=x)
- if x % 2:
- driver_instances.append(instance)
- all_instances.append(instance)
-
- self.mox.StubOutWithMock(self.compute.driver,
- 'list_instance_uuids')
- self.mox.StubOutWithMock(self.compute.driver,
- 'list_instances')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
-
- self.compute.driver.list_instance_uuids().AndRaise(
- NotImplementedError())
- self.compute.driver.list_instances().AndReturn(
- [inst['name'] for inst in driver_instances])
- db.instance_get_all_by_filters(
- fake_context, filters,
- 'created_at', 'desc', columns_to_join=None,
- limit=None, marker=None,
- use_slave=True).AndReturn(all_instances)
-
- self.mox.ReplayAll()
-
- result = self.compute._get_instances_on_driver(fake_context, filters)
- self.assertEqual([x['uuid'] for x in driver_instances],
- [x['uuid'] for x in result])
-
- def test_instance_usage_audit(self):
- instances = [objects.Instance(uuid='foo')]
-
- @classmethod
- def fake_get(*a, **k):
- return instances
-
- self.flags(instance_usage_audit=True)
- self.stubs.Set(compute_utils, 'has_audit_been_run',
- lambda *a, **k: False)
- self.stubs.Set(objects.InstanceList,
- 'get_active_by_window_joined', fake_get)
- self.stubs.Set(compute_utils, 'start_instance_usage_audit',
- lambda *a, **k: None)
- self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
- lambda *a, **k: None)
-
- self.mox.StubOutWithMock(self.compute.conductor_api,
- 'notify_usage_exists')
- self.compute.conductor_api.notify_usage_exists(
- self.context, instances[0], ignore_missing_network_data=False)
- self.mox.ReplayAll()
- self.compute._instance_usage_audit(self.context)
-
- def _get_sync_instance(self, power_state, vm_state, task_state=None,
- shutdown_terminate=False):
- instance = objects.Instance()
- instance.uuid = 'fake-uuid'
- instance.power_state = power_state
- instance.vm_state = vm_state
- instance.host = self.compute.host
- instance.task_state = task_state
- instance.shutdown_terminate = shutdown_terminate
- self.mox.StubOutWithMock(instance, 'refresh')
- self.mox.StubOutWithMock(instance, 'save')
- return instance
-
- def test_sync_instance_power_state_match(self):
- instance = self._get_sync_instance(power_state.RUNNING,
- vm_states.ACTIVE)
- instance.refresh(use_slave=False)
- self.mox.ReplayAll()
- self.compute._sync_instance_power_state(self.context, instance,
- power_state.RUNNING)
-
- def test_sync_instance_power_state_running_stopped(self):
- instance = self._get_sync_instance(power_state.RUNNING,
- vm_states.ACTIVE)
- instance.refresh(use_slave=False)
- instance.save()
- self.mox.ReplayAll()
- self.compute._sync_instance_power_state(self.context, instance,
- power_state.SHUTDOWN)
- self.assertEqual(instance.power_state, power_state.SHUTDOWN)
-
- def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
- stop=True, force=False, shutdown_terminate=False):
- instance = self._get_sync_instance(
- power_state, vm_state, shutdown_terminate=shutdown_terminate)
- instance.refresh(use_slave=False)
- instance.save()
- self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
- self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
- self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
- if shutdown_terminate:
- self.compute.compute_api.delete(self.context, instance)
- elif stop:
- if force:
- self.compute.compute_api.force_stop(self.context, instance)
- else:
- self.compute.compute_api.stop(self.context, instance)
- self.mox.ReplayAll()
- self.compute._sync_instance_power_state(self.context, instance,
- driver_power_state)
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
- def test_sync_instance_power_state_to_stop(self):
- for ps in (power_state.SHUTDOWN, power_state.CRASHED,
- power_state.SUSPENDED):
- self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
-
- for ps in (power_state.SHUTDOWN, power_state.CRASHED):
- self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
- force=True)
-
- self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
- power_state.RUNNING, force=True)
-
- def test_sync_instance_power_state_to_terminate(self):
- self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
- power_state.SHUTDOWN,
- force=False, shutdown_terminate=True)
-
- def test_sync_instance_power_state_to_no_stop(self):
- for ps in (power_state.PAUSED, power_state.NOSTATE):
- self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
- stop=False)
- for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
- for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
- self._test_sync_to_stop(power_state.RUNNING, vs, ps,
- stop=False)
-
- @mock.patch('nova.compute.manager.ComputeManager.'
- '_sync_instance_power_state')
- def test_query_driver_power_state_and_sync_pending_task(
- self, mock_sync_power_state):
- with mock.patch.object(self.compute.driver,
- 'get_info') as mock_get_info:
- db_instance = objects.Instance(uuid='fake-uuid',
- task_state=task_states.POWERING_OFF)
- self.compute._query_driver_power_state_and_sync(self.context,
- db_instance)
- self.assertFalse(mock_get_info.called)
- self.assertFalse(mock_sync_power_state.called)
-
- @mock.patch('nova.compute.manager.ComputeManager.'
- '_sync_instance_power_state')
- def test_query_driver_power_state_and_sync_not_found_driver(
- self, mock_sync_power_state):
- error = exception.InstanceNotFound(instance_id=1)
- with mock.patch.object(self.compute.driver,
- 'get_info', side_effect=error) as mock_get_info:
- db_instance = objects.Instance(uuid='fake-uuid', task_state=None)
- self.compute._query_driver_power_state_and_sync(self.context,
- db_instance)
- mock_get_info.assert_called_once_with(db_instance)
- mock_sync_power_state.assert_called_once_with(self.context,
- db_instance,
- power_state.NOSTATE,
- use_slave=True)
-
- def test_run_pending_deletes(self):
- self.flags(instance_delete_interval=10)
-
- class FakeInstance(object):
- def __init__(self, uuid, name, smd):
- self.uuid = uuid
- self.name = name
- self.system_metadata = smd
- self.cleaned = False
-
- def __getitem__(self, name):
- return getattr(self, name)
-
- def save(self, context):
- pass
-
- class FakeInstanceList(object):
- def get_by_filters(self, *args, **kwargs):
- return []
-
- a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
- b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
- c = FakeInstance('789', 'banana', {})
-
- self.mox.StubOutWithMock(objects.InstanceList,
- 'get_by_filters')
- objects.InstanceList.get_by_filters(
- {'read_deleted': 'yes'},
- {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
- 'cleaned': False},
- expected_attrs=['info_cache', 'security_groups',
- 'system_metadata'],
- use_slave=True).AndReturn([a, b, c])
-
- self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
- self.compute.driver.delete_instance_files(
- mox.IgnoreArg()).AndReturn(True)
- self.compute.driver.delete_instance_files(
- mox.IgnoreArg()).AndReturn(False)
-
- self.mox.ReplayAll()
-
- self.compute._run_pending_deletes({})
- self.assertFalse(a.cleaned)
- self.assertEqual('100', a.system_metadata['clean_attempts'])
- self.assertTrue(b.cleaned)
- self.assertEqual('4', b.system_metadata['clean_attempts'])
- self.assertFalse(c.cleaned)
- self.assertEqual('1', c.system_metadata['clean_attempts'])
-
- def test_attach_interface_failure(self):
- # Test that the fault methods are invoked when an attach fails
- db_instance = fake_instance.fake_db_instance()
- f_instance = objects.Instance._from_db_object(self.context,
- objects.Instance(),
- db_instance)
- e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
-
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- @mock.patch.object(self.compute.network_api,
- 'allocate_port_for_instance',
- side_effect=e)
- def do_test(meth, add_fault):
- self.assertRaises(exception.InterfaceAttachFailed,
- self.compute.attach_interface,
- self.context, f_instance, 'net_id', 'port_id',
- None)
- add_fault.assert_has_calls(
- mock.call(self.context, f_instance, e,
- mock.ANY))
-
- do_test()
-
- def test_detach_interface_failure(self):
- # Test that the fault methods are invoked when a detach fails
-
- # Build test data that will cause a PortNotFound exception
- f_instance = mock.MagicMock()
- f_instance.info_cache = mock.MagicMock()
- f_instance.info_cache.network_info = []
-
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- @mock.patch.object(self.compute, '_set_instance_error_state')
- def do_test(meth, add_fault):
- self.assertRaises(exception.PortNotFound,
- self.compute.detach_interface,
- self.context, f_instance, 'port_id')
- add_fault.assert_has_calls(
- mock.call(self.context, f_instance, mock.ANY, mock.ANY))
-
- do_test()
-
- def test_swap_volume_volume_api_usage(self):
- # This test ensures that volume_id arguments are passed to volume_api
- # and that volume states are OK
- volumes = {}
- old_volume_id = uuidutils.generate_uuid()
- volumes[old_volume_id] = {'id': old_volume_id,
- 'display_name': 'old_volume',
- 'status': 'detaching',
- 'size': 1}
- new_volume_id = uuidutils.generate_uuid()
- volumes[new_volume_id] = {'id': new_volume_id,
- 'display_name': 'new_volume',
- 'status': 'available',
- 'size': 2}
-
- def fake_vol_api_roll_detaching(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'detaching':
- volumes[volume_id]['status'] = 'in-use'
-
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
- {'device_name': '/dev/vdb', 'source_type': 'volume',
- 'destination_type': 'volume', 'instance_uuid': 'fake',
- 'connection_info': '{"foo": "bar"}'})
-
- def fake_vol_api_func(context, volume, *args):
- self.assertTrue(uuidutils.is_uuid_like(volume))
- return {}
-
- def fake_vol_get(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- return volumes[volume_id]
-
- def fake_vol_unreserve(context, volume_id):
- self.assertTrue(uuidutils.is_uuid_like(volume_id))
- if volumes[volume_id]['status'] == 'attaching':
- volumes[volume_id]['status'] = 'available'
-
- def fake_vol_migrate_volume_completion(context, old_volume_id,
- new_volume_id, error=False):
- self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
- self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
- volumes[old_volume_id]['status'] = 'in-use'
- return {'save_volume_id': new_volume_id}
-
- def fake_func_exc(*args, **kwargs):
- raise AttributeError # Random exception
-
- def fake_swap_volume(old_connection_info, new_connection_info,
- instance, mountpoint, resize_to):
- self.assertEqual(resize_to, 2)
-
- self.stubs.Set(self.compute.volume_api, 'roll_detaching',
- fake_vol_api_roll_detaching)
- self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
- self.stubs.Set(self.compute.volume_api, 'initialize_connection',
- fake_vol_api_func)
- self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
- fake_vol_unreserve)
- self.stubs.Set(self.compute.volume_api, 'terminate_connection',
- fake_vol_api_func)
- self.stubs.Set(db, 'block_device_mapping_get_by_volume_id',
- lambda x, y, z: fake_bdm)
- self.stubs.Set(self.compute.driver, 'get_volume_connector',
- lambda x: {})
- self.stubs.Set(self.compute.driver, 'swap_volume',
- fake_swap_volume)
- self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
- fake_vol_migrate_volume_completion)
- self.stubs.Set(db, 'block_device_mapping_update',
- lambda *a, **k: fake_bdm)
- self.stubs.Set(db,
- 'instance_fault_create',
- lambda x, y:
- test_instance_fault.fake_faults['fake-uuid'][0])
-
- # Good path
- self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
- fake_instance.fake_instance_obj(
- self.context, **{'uuid': 'fake'}))
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
-
- # Error paths
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- fake_instance.fake_instance_obj(
- self.context, **{'uuid': 'fake'}))
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
-
- volumes[old_volume_id]['status'] = 'detaching'
- volumes[new_volume_id]['status'] = 'attaching'
- self.stubs.Set(self.compute.volume_api, 'initialize_connection',
- fake_func_exc)
- self.assertRaises(AttributeError, self.compute.swap_volume,
- self.context, old_volume_id, new_volume_id,
- fake_instance.fake_instance_obj(
- self.context, **{'uuid': 'fake'}))
- self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
- self.assertEqual(volumes[new_volume_id]['status'], 'available')
-
- def test_check_can_live_migrate_source(self):
- is_volume_backed = 'volume_backed'
- dest_check_data = dict(foo='bar')
- db_instance = fake_instance.fake_db_instance()
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), db_instance)
- expected_dest_check_data = dict(dest_check_data,
- is_volume_backed=is_volume_backed)
-
- self.mox.StubOutWithMock(self.compute.compute_api,
- 'is_volume_backed_instance')
- self.mox.StubOutWithMock(self.compute,
- '_get_instance_block_device_info')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_can_live_migrate_source')
-
- self.compute.compute_api.is_volume_backed_instance(
- self.context, instance).AndReturn(is_volume_backed)
- self.compute._get_instance_block_device_info(
- self.context, instance, refresh_conn_info=True
- ).AndReturn({'block_device_mapping': 'fake'})
- self.compute.driver.check_can_live_migrate_source(
- self.context, instance, expected_dest_check_data,
- {'block_device_mapping': 'fake'})
-
- self.mox.ReplayAll()
-
- self.compute.check_can_live_migrate_source(
- self.context, instance=instance,
- dest_check_data=dest_check_data)
-
- def _test_check_can_live_migrate_destination(self, do_raise=False,
- has_mig_data=False):
- db_instance = fake_instance.fake_db_instance(host='fake-host')
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), db_instance)
- instance.host = 'fake-host'
- block_migration = 'block_migration'
- disk_over_commit = 'disk_over_commit'
- src_info = 'src_info'
- dest_info = 'dest_info'
- dest_check_data = dict(foo='bar')
- mig_data = dict(cow='moo')
- expected_result = dict(mig_data)
- if has_mig_data:
- dest_check_data['migrate_data'] = dict(cat='meow')
- expected_result.update(cat='meow')
-
- self.mox.StubOutWithMock(self.compute, '_get_compute_info')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_can_live_migrate_destination')
- self.mox.StubOutWithMock(self.compute.compute_rpcapi,
- 'check_can_live_migrate_source')
- self.mox.StubOutWithMock(self.compute.driver,
- 'check_can_live_migrate_destination_cleanup')
-
- self.compute._get_compute_info(self.context,
- 'fake-host').AndReturn(src_info)
- self.compute._get_compute_info(self.context,
- CONF.host).AndReturn(dest_info)
- self.compute.driver.check_can_live_migrate_destination(
- self.context, instance, src_info, dest_info,
- block_migration, disk_over_commit).AndReturn(dest_check_data)
-
- mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
- self.context, instance, dest_check_data)
- if do_raise:
- mock_meth.AndRaise(test.TestingException())
- self.mox.StubOutWithMock(db, 'instance_fault_create')
- db.instance_fault_create(
- self.context, mox.IgnoreArg()).AndReturn(
- test_instance_fault.fake_faults['fake-uuid'][0])
- else:
- mock_meth.AndReturn(mig_data)
- self.compute.driver.check_can_live_migrate_destination_cleanup(
- self.context, dest_check_data)
-
- self.mox.ReplayAll()
-
- result = self.compute.check_can_live_migrate_destination(
- self.context, instance=instance,
- block_migration=block_migration,
- disk_over_commit=disk_over_commit)
- self.assertEqual(expected_result, result)
-
- def test_check_can_live_migrate_destination_success(self):
- self._test_check_can_live_migrate_destination()
-
- def test_check_can_live_migrate_destination_success_w_mig_data(self):
- self._test_check_can_live_migrate_destination(has_mig_data=True)
-
- def test_check_can_live_migrate_destination_fail(self):
- self.assertRaises(
- test.TestingException,
- self._test_check_can_live_migrate_destination,
- do_raise=True)
-
- @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
- def test_prepare_for_instance_event(self, lock_name_mock):
- inst_obj = objects.Instance(uuid='foo')
- result = self.compute.instance_events.prepare_for_instance_event(
- inst_obj, 'test-event')
- self.assertIn('foo', self.compute.instance_events._events)
- self.assertIn('test-event',
- self.compute.instance_events._events['foo'])
- self.assertEqual(
- result,
- self.compute.instance_events._events['foo']['test-event'])
- self.assertTrue(hasattr(result, 'send'))
- lock_name_mock.assert_called_once_with(inst_obj)
-
- @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
- def test_pop_instance_event(self, lock_name_mock):
- event = eventlet_event.Event()
- self.compute.instance_events._events = {
- 'foo': {
- 'test-event': event,
- }
- }
- inst_obj = objects.Instance(uuid='foo')
- event_obj = objects.InstanceExternalEvent(name='test-event',
- tag=None)
- result = self.compute.instance_events.pop_instance_event(inst_obj,
- event_obj)
- self.assertEqual(result, event)
- lock_name_mock.assert_called_once_with(inst_obj)
-
- @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
- def test_clear_events_for_instance(self, lock_name_mock):
- event = eventlet_event.Event()
- self.compute.instance_events._events = {
- 'foo': {
- 'test-event': event,
- }
- }
- inst_obj = objects.Instance(uuid='foo')
- result = self.compute.instance_events.clear_events_for_instance(
- inst_obj)
- self.assertEqual(result, {'test-event': event})
- lock_name_mock.assert_called_once_with(inst_obj)
-
- def test_instance_events_lock_name(self):
- inst_obj = objects.Instance(uuid='foo')
- result = self.compute.instance_events._lock_name(inst_obj)
- self.assertEqual(result, 'foo-events')
-
- def test_prepare_for_instance_event_again(self):
- inst_obj = objects.Instance(uuid='foo')
- self.compute.instance_events.prepare_for_instance_event(
- inst_obj, 'test-event')
- # A second attempt will avoid creating a new list; make sure we
- # get the current list
- result = self.compute.instance_events.prepare_for_instance_event(
- inst_obj, 'test-event')
- self.assertIn('foo', self.compute.instance_events._events)
- self.assertIn('test-event',
- self.compute.instance_events._events['foo'])
- self.assertEqual(
- result,
- self.compute.instance_events._events['foo']['test-event'])
- self.assertTrue(hasattr(result, 'send'))
-
- def test_process_instance_event(self):
- event = eventlet_event.Event()
- self.compute.instance_events._events = {
- 'foo': {
- 'test-event': event,
- }
- }
- inst_obj = objects.Instance(uuid='foo')
- event_obj = objects.InstanceExternalEvent(name='test-event', tag=None)
- self.compute._process_instance_event(inst_obj, event_obj)
- self.assertTrue(event.ready())
- self.assertEqual(event_obj, event.wait())
- self.assertEqual({}, self.compute.instance_events._events)
-
- def test_external_instance_event(self):
- instances = [
- objects.Instance(id=1, uuid='uuid1'),
- objects.Instance(id=2, uuid='uuid2')]
- events = [
- objects.InstanceExternalEvent(name='network-changed',
- tag='tag1',
- instance_uuid='uuid1'),
- objects.InstanceExternalEvent(name='foo', instance_uuid='uuid2',
- tag='tag2')]
-
- @mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
- @mock.patch.object(self.compute, '_process_instance_event')
- def do_test(_process_instance_event, get_instance_nw_info):
- self.compute.external_instance_event(self.context,
- instances, events)
- get_instance_nw_info.assert_called_once_with(self.context,
- instances[0])
- _process_instance_event.assert_called_once_with(instances[1],
- events[1])
- do_test()
-
- def test_retry_reboot_pending_soft(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_PENDING
- instance.vm_state = vm_states.ACTIVE
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertTrue(allow_reboot)
- self.assertEqual(reboot_type, 'SOFT')
-
- def test_retry_reboot_pending_hard(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_PENDING_HARD
- instance.vm_state = vm_states.ACTIVE
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertTrue(allow_reboot)
- self.assertEqual(reboot_type, 'HARD')
-
- def test_retry_reboot_starting_soft_off(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_STARTED
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.NOSTATE):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertTrue(allow_reboot)
- self.assertEqual(reboot_type, 'HARD')
-
- def test_retry_reboot_starting_hard_off(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_STARTED_HARD
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.NOSTATE):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertTrue(allow_reboot)
- self.assertEqual(reboot_type, 'HARD')
-
- def test_retry_reboot_starting_hard_on(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = task_states.REBOOT_STARTED_HARD
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertFalse(allow_reboot)
- self.assertEqual(reboot_type, 'HARD')
-
- def test_retry_reboot_no_reboot(self):
- instance = objects.Instance(self.context)
- instance.uuid = 'foo'
- instance.task_state = 'bar'
- with mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING):
- allow_reboot, reboot_type = self.compute._retry_reboot(
- context, instance)
- self.assertFalse(allow_reboot)
- self.assertEqual(reboot_type, 'HARD')
-
- @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
- @mock.patch('nova.compute.manager.ComputeManager._detach_volume')
- @mock.patch('nova.objects.Instance._from_db_object')
- def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
- bdm = mock.sentinel.bdm
- inst_obj = mock.sentinel.inst_obj
- bdm_get.return_value = bdm
- inst_from_db.return_value = inst_obj
- with mock.patch.object(self.compute, 'volume_api'):
- self.compute.remove_volume_connection(self.context, 'vol',
- inst_obj)
- detach.assert_called_once_with(self.context, inst_obj, bdm)
-
- def _test_rescue(self, clean_shutdown=True):
- instance = fake_instance.fake_instance_obj(
- self.context, vm_state=vm_states.ACTIVE)
- fake_nw_info = network_model.NetworkInfo()
- rescue_image_meta = {'id': 'fake', 'name': 'fake'}
- with contextlib.nested(
- mock.patch.object(objects.InstanceActionEvent, 'event_start'),
- mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure'),
- mock.patch.object(self.context, 'elevated',
- return_value=self.context),
- mock.patch.object(self.compute, '_get_instance_nw_info',
- return_value=fake_nw_info),
- mock.patch.object(self.compute, '_get_rescue_image',
- return_value=rescue_image_meta),
- mock.patch.object(self.compute, '_notify_about_instance_usage'),
- mock.patch.object(self.compute, '_power_off_instance'),
- mock.patch.object(self.compute.driver, 'rescue'),
- mock.patch.object(self.compute.conductor_api,
- 'notify_usage_exists'),
- mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING),
- mock.patch.object(instance, 'save')
- ) as (
- event_start, event_finish, elevated_context, get_nw_info,
- get_rescue_image, notify_instance_usage, power_off_instance,
- driver_rescue, notify_usage_exists, get_power_state, instance_save
- ):
- self.compute.rescue_instance(
- self.context, instance, rescue_password='verybadpass',
- rescue_image_ref=None, clean_shutdown=clean_shutdown)
-
- # assert the field values on the instance object
- self.assertEqual(vm_states.RESCUED, instance.vm_state)
- self.assertIsNone(instance.task_state)
- self.assertEqual(power_state.RUNNING, instance.power_state)
- self.assertIsNotNone(instance.launched_at)
-
- # assert our mock calls
- get_nw_info.assert_called_once_with(self.context, instance)
- get_rescue_image.assert_called_once_with(
- self.context, instance, None)
-
- extra_usage_info = {'rescue_image_name': 'fake'}
- notify_calls = [
- mock.call(self.context, instance, "rescue.start",
- extra_usage_info=extra_usage_info,
- network_info=fake_nw_info),
- mock.call(self.context, instance, "rescue.end",
- extra_usage_info=extra_usage_info,
- network_info=fake_nw_info)
- ]
- notify_instance_usage.assert_has_calls(notify_calls)
-
- power_off_instance.assert_called_once_with(self.context, instance,
- clean_shutdown)
-
- driver_rescue.assert_called_once_with(
- self.context, instance, fake_nw_info, rescue_image_meta,
- 'verybadpass')
-
- notify_usage_exists.assert_called_once_with(
- self.context, instance, current_period=True)
-
- instance_save.assert_called_once_with(
- expected_task_state=task_states.RESCUING)
-
- def test_rescue(self):
- self._test_rescue()
-
- def test_rescue_forced_shutdown(self):
- self._test_rescue(clean_shutdown=False)
-
- def test_unrescue(self):
- instance = fake_instance.fake_instance_obj(
- self.context, vm_state=vm_states.RESCUED)
- fake_nw_info = network_model.NetworkInfo()
- with contextlib.nested(
- mock.patch.object(objects.InstanceActionEvent, 'event_start'),
- mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure'),
- mock.patch.object(self.context, 'elevated',
- return_value=self.context),
- mock.patch.object(self.compute, '_get_instance_nw_info',
- return_value=fake_nw_info),
- mock.patch.object(self.compute, '_notify_about_instance_usage'),
- mock.patch.object(self.compute.driver, 'unrescue'),
- mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.RUNNING),
- mock.patch.object(instance, 'save')
- ) as (
- event_start, event_finish, elevated_context, get_nw_info,
- notify_instance_usage, driver_unrescue, get_power_state,
- instance_save
- ):
- self.compute.unrescue_instance(self.context, instance)
-
- # assert the field values on the instance object
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- self.assertIsNone(instance.task_state)
- self.assertEqual(power_state.RUNNING, instance.power_state)
-
- # assert our mock calls
- get_nw_info.assert_called_once_with(self.context, instance)
-
- notify_calls = [
- mock.call(self.context, instance, "unrescue.start",
- network_info=fake_nw_info),
- mock.call(self.context, instance, "unrescue.end",
- network_info=fake_nw_info)
- ]
- notify_instance_usage.assert_has_calls(notify_calls)
-
- driver_unrescue.assert_called_once_with(instance, fake_nw_info)
-
- instance_save.assert_called_once_with(
- expected_task_state=task_states.UNRESCUING)
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
- return_value=power_state.RUNNING)
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch('nova.utils.generate_password', return_value='fake-pass')
- def test_set_admin_password(self, gen_password_mock,
- instance_save_mock, power_state_mock,
- event_finish_mock, event_start_mock):
- # Ensure instance can have its admin password set.
- instance = fake_instance.fake_instance_obj(
- self.context,
- vm_state=vm_states.ACTIVE,
- task_state=task_states.UPDATING_PASSWORD)
-
- @mock.patch.object(self.context, 'elevated', return_value=self.context)
- @mock.patch.object(self.compute.driver, 'set_admin_password')
- def do_test(driver_mock, elevated_mock):
- # call the manager method
- self.compute.set_admin_password(self.context, instance, None)
- # make our assertions
- self.assertEqual(vm_states.ACTIVE, instance.vm_state)
- self.assertIsNone(instance.task_state)
-
- power_state_mock.assert_called_once_with(self.context, instance)
- driver_mock.assert_called_once_with(instance, 'fake-pass')
- instance_save_mock.assert_called_once_with(
- expected_task_state=task_states.UPDATING_PASSWORD)
-
- do_test()
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
- return_value=power_state.NOSTATE)
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- def test_set_admin_password_bad_state(self, add_fault_mock,
- instance_save_mock, power_state_mock,
- event_finish_mock, event_start_mock):
- # Test setting password while instance is rebuilding.
- instance = fake_instance.fake_instance_obj(self.context)
- with mock.patch.object(self.context, 'elevated',
- return_value=self.context):
- # call the manager method
- self.assertRaises(exception.InstancePasswordSetFailed,
- self.compute.set_admin_password,
- self.context, instance, None)
-
- # make our assertions
- power_state_mock.assert_called_once_with(self.context, instance)
- instance_save_mock.assert_called_once_with(
- expected_task_state=task_states.UPDATING_PASSWORD)
- add_fault_mock.assert_called_once_with(
- self.context, instance, mock.ANY, mock.ANY)
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- @mock.patch('nova.utils.generate_password', return_value='fake-pass')
- @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
- return_value=power_state.RUNNING)
- @mock.patch('nova.compute.manager.ComputeManager._instance_update')
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- def _do_test_set_admin_password_driver_error(self, exc,
- expected_vm_state,
- expected_task_state,
- expected_exception,
- add_fault_mock,
- instance_save_mock,
- update_mock,
- power_state_mock,
- gen_password_mock,
- event_finish_mock,
- event_start_mock):
- # Ensure expected exception is raised if set_admin_password fails.
- instance = fake_instance.fake_instance_obj(
- self.context,
- vm_state=vm_states.ACTIVE,
- task_state=task_states.UPDATING_PASSWORD)
-
- @mock.patch.object(self.context, 'elevated', return_value=self.context)
- @mock.patch.object(self.compute.driver, 'set_admin_password',
- side_effect=exc)
- def do_test(driver_mock, elevated_mock):
- # error raised from the driver should not reveal internal
- # information so a new error is raised
- self.assertRaises(expected_exception,
- self.compute.set_admin_password,
- self.context,
- instance=instance,
- new_pass=None)
-
- if expected_exception == NotImplementedError:
- instance_save_mock.assert_called_once_with(
- expected_task_state=task_states.UPDATING_PASSWORD)
- else:
- # setting the instance to error state
- instance_save_mock.assert_called_once_with()
-
- self.assertEqual(expected_vm_state, instance.vm_state)
- # check revert_task_state decorator
- update_mock.assert_called_once_with(
- self.context, instance.uuid,
- task_state=expected_task_state)
- # check wrap_instance_fault decorator
- add_fault_mock.assert_called_once_with(
- self.context, instance, mock.ANY, mock.ANY)
-
- do_test()
-
- def test_set_admin_password_driver_not_authorized(self):
- # Ensure expected exception is raised if set_admin_password not
- # authorized.
- exc = exception.Forbidden('Internal error')
- expected_exception = exception.InstancePasswordSetFailed
- self._do_test_set_admin_password_driver_error(
- exc, vm_states.ERROR, None, expected_exception)
-
- def test_set_admin_password_driver_not_implemented(self):
- # Ensure expected exception is raised if set_admin_password not
- # implemented by driver.
- exc = NotImplementedError()
- expected_exception = NotImplementedError
- self._do_test_set_admin_password_driver_error(
- exc, vm_states.ACTIVE, None, expected_exception)
-
- def _test_init_host_with_partial_migration(self, task_state=None,
- vm_state=vm_states.ACTIVE):
- our_host = self.compute.host
- instance_1 = objects.Instance(self.context)
- instance_1.uuid = 'foo'
- instance_1.task_state = task_state
- instance_1.vm_state = vm_state
- instance_1.host = 'not-' + our_host
- instance_2 = objects.Instance(self.context)
- instance_2.uuid = 'bar'
- instance_2.task_state = None
- instance_2.vm_state = vm_states.ACTIVE
- instance_2.host = 'not-' + our_host
-
- with contextlib.nested(
- mock.patch.object(self.compute, '_get_instances_on_driver',
- return_value=[instance_1,
- instance_2]),
- mock.patch.object(self.compute, '_get_instance_nw_info',
- return_value=None),
- mock.patch.object(self.compute, '_get_instance_block_device_info',
- return_value={}),
- mock.patch.object(self.compute, '_is_instance_storage_shared',
- return_value=False),
- mock.patch.object(self.compute.driver, 'destroy')
- ) as (_get_instances_on_driver, _get_instance_nw_info,
- _get_instance_block_device_info, _is_instance_storage_shared,
- destroy):
- self.compute._destroy_evacuated_instances(self.context)
- destroy.assert_called_once_with(self.context, instance_2, None,
- {}, True)
-
- def test_init_host_with_partial_migration_migrating(self):
- self._test_init_host_with_partial_migration(
- task_state=task_states.MIGRATING)
-
- def test_init_host_with_partial_migration_resize_migrating(self):
- self._test_init_host_with_partial_migration(
- task_state=task_states.RESIZE_MIGRATING)
-
- def test_init_host_with_partial_migration_resize_migrated(self):
- self._test_init_host_with_partial_migration(
- task_state=task_states.RESIZE_MIGRATED)
-
- def test_init_host_with_partial_migration_finish_resize(self):
- self._test_init_host_with_partial_migration(
- task_state=task_states.RESIZE_FINISH)
-
- def test_init_host_with_partial_migration_resized(self):
- self._test_init_host_with_partial_migration(
- vm_state=vm_states.RESIZED)
-
- @mock.patch('nova.compute.manager.ComputeManager._instance_update')
- def test_error_out_instance_on_exception_not_implemented_err(self,
- inst_update_mock):
- instance = fake_instance.fake_instance_obj(self.context)
-
- def do_test():
- with self.compute._error_out_instance_on_exception(
- self.context, instance, instance_state=vm_states.STOPPED):
- raise NotImplementedError('test')
-
- self.assertRaises(NotImplementedError, do_test)
- inst_update_mock.assert_called_once_with(
- self.context, instance.uuid,
- vm_state=vm_states.STOPPED, task_state=None)
-
- @mock.patch('nova.compute.manager.ComputeManager._instance_update')
- def test_error_out_instance_on_exception_inst_fault_rollback(self,
- inst_update_mock):
- instance = fake_instance.fake_instance_obj(self.context)
-
- def do_test():
- with self.compute._error_out_instance_on_exception(self.context,
- instance):
- raise exception.InstanceFaultRollback(
- inner_exception=test.TestingException('test'))
-
- self.assertRaises(test.TestingException, do_test)
- inst_update_mock.assert_called_once_with(
- self.context, instance.uuid,
- vm_state=vm_states.ACTIVE, task_state=None)
-
- @mock.patch('nova.compute.manager.ComputeManager.'
- '_set_instance_error_state')
- def test_error_out_instance_on_exception_unknown_with_quotas(self,
- set_error):
- instance = fake_instance.fake_instance_obj(self.context)
- quotas = mock.create_autospec(objects.Quotas, spec_set=True)
-
- def do_test():
- with self.compute._error_out_instance_on_exception(
- self.context, instance, quotas):
- raise test.TestingException('test')
-
- self.assertRaises(test.TestingException, do_test)
- self.assertEqual(1, len(quotas.method_calls))
- self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
- set_error.assert_called_once_with(self.context, instance)
-
- def test_cleanup_volumes(self):
- instance = fake_instance.fake_instance_obj(self.context)
- bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id1', 'source_type': 'image',
- 'delete_on_termination': False})
- bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id2', 'source_type': 'image',
- 'delete_on_termination': True})
- bdms = block_device_obj.block_device_make_list(self.context,
- [bdm_do_not_delete_dict, bdm_delete_dict])
-
- with mock.patch.object(self.compute.volume_api,
- 'delete') as volume_delete:
- self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
- volume_delete.assert_called_once_with(self.context,
- bdms[1].volume_id)
-
- def test_cleanup_volumes_exception_do_not_raise(self):
- instance = fake_instance.fake_instance_obj(self.context)
- bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id1', 'source_type': 'image',
- 'delete_on_termination': True})
- bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id2', 'source_type': 'image',
- 'delete_on_termination': True})
- bdms = block_device_obj.block_device_make_list(self.context,
- [bdm_dict1, bdm_dict2])
-
- with mock.patch.object(self.compute.volume_api,
- 'delete',
- side_effect=[test.TestingException(), None]) as volume_delete:
- self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
- raise_exc=False)
- calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
- self.assertEqual(calls, volume_delete.call_args_list)
-
- def test_cleanup_volumes_exception_raise(self):
- instance = fake_instance.fake_instance_obj(self.context)
- bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id1', 'source_type': 'image',
- 'delete_on_termination': True})
- bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 'fake-id2', 'source_type': 'image',
- 'delete_on_termination': True})
- bdms = block_device_obj.block_device_make_list(self.context,
- [bdm_dict1, bdm_dict2])
-
- with mock.patch.object(self.compute.volume_api,
- 'delete',
- side_effect=[test.TestingException(), None]) as volume_delete:
- self.assertRaises(test.TestingException,
- self.compute._cleanup_volumes, self.context, instance.uuid,
- bdms)
- calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
- self.assertEqual(calls, volume_delete.call_args_list)
-
- def test_start_building(self):
- instance = fake_instance.fake_instance_obj(self.context)
- with mock.patch.object(self.compute, '_instance_update') as update:
- self.compute._start_building(self.context, instance)
- update.assert_called_once_with(
- self.context, instance.uuid, vm_state=vm_states.BUILDING,
- task_state=None, expected_task_state=(task_states.SCHEDULING,
- None))
-
- def _test_prebuild_instance_build_abort_exception(self, exc):
- instance = fake_instance.fake_instance_obj(self.context)
- with contextlib.nested(
- mock.patch.object(self.compute, '_check_instance_exists'),
- mock.patch.object(self.compute, '_start_building',
- side_effect=exc)
- ) as (
- check, start
- ):
- # run the code
- self.assertRaises(exception.BuildAbortException,
- self.compute._prebuild_instance,
- self.context, instance)
- # assert the calls
- check.assert_called_once_with(self.context, instance)
- start.assert_called_once_with(self.context, instance)
-
- def test_prebuild_instance_instance_not_found(self):
- self._test_prebuild_instance_build_abort_exception(
- exception.InstanceNotFound(instance_id='fake'))
-
- def test_prebuild_instance_unexpected_deleting_task_state_err(self):
- self._test_prebuild_instance_build_abort_exception(
- exception.UnexpectedDeletingTaskStateError(expected='foo',
- actual='bar'))
-
- def test_stop_instance_task_state_none_power_state_shutdown(self):
- # Tests that stop_instance doesn't puke when the instance power_state
- # is shutdown and the task_state is None.
- instance = fake_instance.fake_instance_obj(
- self.context, vm_state=vm_states.ACTIVE,
- task_state=None, power_state=power_state.SHUTDOWN)
-
- @mock.patch.object(objects.InstanceActionEvent, 'event_start')
- @mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- @mock.patch.object(self.compute, '_get_power_state',
- return_value=power_state.SHUTDOWN)
- @mock.patch.object(self.compute, '_notify_about_instance_usage')
- @mock.patch.object(self.compute, '_power_off_instance')
- @mock.patch.object(instance, 'save')
- def do_test(save_mock, power_off_mock, notify_mock, get_state_mock,
- event_finish_mock, event_start_mock):
- # run the code
- self.compute.stop_instance(self.context, instance)
- # assert the calls
- self.assertEqual(2, get_state_mock.call_count)
- notify_mock.assert_has_calls([
- mock.call(self.context, instance, 'power_off.start'),
- mock.call(self.context, instance, 'power_off.end')
- ])
- power_off_mock.assert_called_once_with(
- self.context, instance, True)
- save_mock.assert_called_once_with(
- expected_task_state=[task_states.POWERING_OFF, None])
- self.assertEqual(power_state.SHUTDOWN, instance.power_state)
- self.assertIsNone(instance.task_state)
- self.assertEqual(vm_states.STOPPED, instance.vm_state)
-
- do_test()
-
- def test_reset_network_driver_not_implemented(self):
- instance = fake_instance.fake_instance_obj(self.context)
-
- @mock.patch.object(self.compute.driver, 'reset_network',
- side_effect=NotImplementedError())
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- def do_test(mock_add_fault, mock_reset):
- self.assertRaises(messaging.ExpectedException,
- self.compute.reset_network,
- self.context,
- instance)
-
- self.compute = utils.ExceptionHelper(self.compute)
-
- self.assertRaises(NotImplementedError,
- self.compute.reset_network,
- self.context,
- instance)
-
- do_test()
-
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
- pass
-
- def _attach(context, instance, bdms, do_check_attach=True):
- return {'block_device_mapping': 'shared_block_storage'}
-
- def _spawn(context, instance, image_meta, injected_files,
- admin_password, network_info=None, block_device_info=None):
- self.assertEqual(block_device_info['block_device_mapping'],
- 'shared_block_storage')
-
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'destroy',
- return_value=None),
- mock.patch.object(self.compute.driver, 'spawn',
- side_effect=_spawn),
- mock.patch.object(objects.Instance, 'save',
- return_value=None)
- ) as(
- mock_destroy,
- mock_spawn,
- mock_save
- ):
- instance = fake_instance.fake_instance_obj(self.context)
- instance.task_state = task_states.REBUILDING
- instance.save(expected_task_state=[task_states.REBUILDING])
- self.compute._rebuild_default_impl(self.context,
- instance,
- None,
- [],
- admin_password='new_pass',
- bdms=[],
- detach_block_devices=_detach,
- attach_block_devices=_attach,
- network_info=None,
- recreate=True,
- block_device_info=None,
- preserve_ephemeral=False)
-
- self.assertFalse(mock_destroy.called)
- self.assertTrue(mock_save.called)
- self.assertTrue(mock_spawn.called)
-
-
-class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
- def setUp(self):
- super(ComputeManagerBuildInstanceTestCase, self).setUp()
- self.compute = importutils.import_object(CONF.compute_manager)
- self.context = context.RequestContext('fake', 'fake')
- self.instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE,
- expected_attrs=['metadata', 'system_metadata', 'info_cache'])
- self.admin_pass = 'pass'
- self.injected_files = []
- self.image = {}
- self.node = 'fake-node'
- self.limits = {}
- self.requested_networks = []
- self.security_groups = []
- self.block_device_mapping = []
- self.filter_properties = {'retry': {'num_attempts': 1,
- 'hosts': [[self.compute.host,
- 'fake-node']]}}
-
- def fake_network_info():
- return network_model.NetworkInfo()
-
- self.network_info = network_model.NetworkInfoAsyncWrapper(
- fake_network_info)
- self.block_device_info = self.compute._prep_block_device(context,
- self.instance, self.block_device_mapping)
-
- # override tracker with a version that doesn't need the database:
- fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
- self.compute.driver, self.node)
- self.compute._resource_tracker_dict[self.node] = fake_rt
-
- def _do_build_instance_update(self, reschedule_update=False):
- self.mox.StubOutWithMock(self.instance, 'save')
- self.instance.save(
- expected_task_state=(task_states.SCHEDULING, None)).AndReturn(
- self.instance)
- if reschedule_update:
- self.instance.save().AndReturn(self.instance)
-
- def _build_and_run_instance_update(self):
- self.mox.StubOutWithMock(self.instance, 'save')
- self._build_resources_instance_update(stub=False)
- self.instance.save(expected_task_state=
- task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance)
-
- def _build_resources_instance_update(self, stub=True):
- if stub:
- self.mox.StubOutWithMock(self.instance, 'save')
- self.instance.save().AndReturn(self.instance)
-
- def _notify_about_instance_usage(self, event, stub=True, **kwargs):
- if stub:
- self.mox.StubOutWithMock(self.compute,
- '_notify_about_instance_usage')
- self.compute._notify_about_instance_usage(self.context, self.instance,
- event, **kwargs)
-
- def _instance_action_events(self):
- self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start')
- self.mox.StubOutWithMock(objects.InstanceActionEvent,
- 'event_finish_with_failure')
- objects.InstanceActionEvent.event_start(
- self.context, self.instance.uuid, mox.IgnoreArg(),
- want_result=False)
- objects.InstanceActionEvent.event_finish_with_failure(
- self.context, self.instance.uuid, mox.IgnoreArg(),
- exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
- want_result=False)
-
- @mock.patch('nova.utils.spawn_n')
- def test_build_and_run_instance_called_with_proper_args(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self._do_build_instance_update()
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- # This test when sending an icehouse compatible rpc call to juno compute
- # node, NetworkRequest object can load from three items tuple.
- @mock.patch('nova.objects.InstanceActionEvent.event_finish_with_failure')
- @mock.patch('nova.objects.InstanceActionEvent.event_start')
- @mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance')
- @mock.patch('nova.utils.spawn_n')
- def test_build_and_run_instance_with_icehouse_requested_network(
- self, mock_spawn, mock_build_and_run, mock_save, mock_event_start,
- mock_event_finish):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- mock_save.return_value = self.instance
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=[('fake_network_id', '10.0.0.1',
- 'fake_port_id')],
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
- requested_network = mock_build_and_run.call_args[0][5][0]
- self.assertEqual('fake_network_id', requested_network.network_id)
- self.assertEqual('10.0.0.1', str(requested_network.address))
- self.assertEqual('fake_port_id', requested_network.port_id)
-
- @mock.patch('nova.utils.spawn_n')
- def test_build_abort_exception(self, mock_spawn):
- def fake_spawn(f, *args, **kwargs):
- # NOTE(danms): Simulate the detached nature of spawn so that
- # we confirm that the inner task has the fault logic
- try:
- return f(*args, **kwargs)
- except Exception:
- pass
-
- mock_spawn.side_effect = fake_spawn
-
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self._do_build_instance_update()
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties).AndRaise(
- exception.BuildAbortException(reason='',
- instance_uuid=self.instance.uuid))
- self.compute._cleanup_allocated_networks(self.context, self.instance,
- self.requested_networks)
- self.compute._cleanup_volumes(self.context, self.instance.uuid,
- self.block_device_mapping, raise_exc=False)
- compute_utils.add_instance_fault_from_exc(self.context,
- self.instance, mox.IgnoreArg(), mox.IgnoreArg())
- self.compute._set_instance_error_state(self.context, self.instance)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- @mock.patch('nova.utils.spawn_n')
- def test_rescheduled_exception(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self._do_build_instance_update(reschedule_update=True)
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties).AndRaise(
- exception.RescheduledException(reason='',
- instance_uuid=self.instance.uuid))
- self.compute.compute_task_api.build_instances(self.context,
- [self.instance], self.image, self.filter_properties,
- self.admin_pass, self.injected_files, self.requested_networks,
- self.security_groups, self.block_device_mapping)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- def test_rescheduled_exception_with_non_ascii_exception(self):
- exc = exception.NovaException(u's\xe9quence')
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
- 'instance_update')
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
- self._notify_about_instance_usage('create.start',
- extra_usage_info={'image_name': self.image.get('name')})
- self._build_and_run_instance_update()
- self.compute.driver.spawn(self.context, self.instance, self.image,
- self.injected_files, self.admin_pass,
- network_info=self.network_info,
- block_device_info=self.block_device_info).AndRaise(exc)
- self._notify_about_instance_usage('create.error',
- fault=exc, stub=False)
- conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
- self.mox.ReplayAll()
-
- self.assertRaises(exception.RescheduledException,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
-
- @mock.patch('nova.utils.spawn_n')
- def test_rescheduled_exception_without_retry(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
- self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self._do_build_instance_update()
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- {}).AndRaise(
- exception.RescheduledException(reason='',
- instance_uuid=self.instance.uuid))
- self.compute._cleanup_allocated_networks(self.context, self.instance,
- self.requested_networks)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- mox.IgnoreArg(), mox.IgnoreArg())
- self.compute._set_instance_error_state(self.context,
- self.instance)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties={},
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- @mock.patch('nova.utils.spawn_n')
- def test_rescheduled_exception_do_not_deallocate_network(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(self.compute.driver,
- 'deallocate_networks_on_reschedule')
- self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self._do_build_instance_update(reschedule_update=True)
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties).AndRaise(
- exception.RescheduledException(reason='',
- instance_uuid=self.instance.uuid))
- self.compute.driver.deallocate_networks_on_reschedule(
- self.instance).AndReturn(False)
- self.compute.compute_task_api.build_instances(self.context,
- [self.instance], self.image, self.filter_properties,
- self.admin_pass, self.injected_files, self.requested_networks,
- self.security_groups, self.block_device_mapping)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- @mock.patch('nova.utils.spawn_n')
- def test_rescheduled_exception_deallocate_network(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(self.compute.driver,
- 'deallocate_networks_on_reschedule')
- self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self._do_build_instance_update(reschedule_update=True)
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties).AndRaise(
- exception.RescheduledException(reason='',
- instance_uuid=self.instance.uuid))
- self.compute.driver.deallocate_networks_on_reschedule(
- self.instance).AndReturn(True)
- self.compute._cleanup_allocated_networks(self.context, self.instance,
- self.requested_networks)
- self.compute.compute_task_api.build_instances(self.context,
- [self.instance], self.image, self.filter_properties,
- self.admin_pass, self.injected_files, self.requested_networks,
- self.security_groups, self.block_device_mapping)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- def _test_build_and_run_exceptions(self, exc, set_error=False,
- cleanup_volumes=False):
- self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
- self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self._do_build_instance_update()
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties).AndRaise(exc)
- self.compute._cleanup_allocated_networks(self.context, self.instance,
- self.requested_networks)
- if cleanup_volumes:
- self.compute._cleanup_volumes(self.context, self.instance.uuid,
- self.block_device_mapping, raise_exc=False)
- if set_error:
- self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
- self.mox.StubOutWithMock(compute_utils,
- 'add_instance_fault_from_exc')
- compute_utils.add_instance_fault_from_exc(self.context,
- self.instance, mox.IgnoreArg(), mox.IgnoreArg())
- self.compute._set_instance_error_state(self.context, self.instance)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- with mock.patch('nova.utils.spawn_n') as mock_spawn:
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- def test_build_and_run_notfound_exception(self):
- self._test_build_and_run_exceptions(exception.InstanceNotFound(
- instance_id=''))
-
- def test_build_and_run_unexpecteddeleting_exception(self):
- self._test_build_and_run_exceptions(
- exception.UnexpectedDeletingTaskStateError(expected='',
- actual=''))
-
- def test_build_and_run_buildabort_exception(self):
- self._test_build_and_run_exceptions(exception.BuildAbortException(
- instance_uuid='', reason=''), set_error=True, cleanup_volumes=True)
-
- def test_build_and_run_unhandled_exception(self):
- self._test_build_and_run_exceptions(test.TestingException(),
- set_error=True, cleanup_volumes=True)
-
- def test_instance_not_found(self):
- exc = exception.InstanceNotFound(instance_id=1)
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
- 'instance_update')
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
- self._notify_about_instance_usage('create.start',
- extra_usage_info={'image_name': self.image.get('name')})
- self._build_and_run_instance_update()
- self.compute.driver.spawn(self.context, self.instance, self.image,
- self.injected_files, self.admin_pass,
- network_info=self.network_info,
- block_device_info=self.block_device_info).AndRaise(exc)
- self._notify_about_instance_usage('create.end',
- fault=exc, stub=False)
- conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
- self.mox.ReplayAll()
-
- self.assertRaises(exception.InstanceNotFound,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
-
- def test_reschedule_on_exception(self):
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
- 'instance_update')
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
- self._notify_about_instance_usage('create.start',
- extra_usage_info={'image_name': self.image.get('name')})
- self._build_and_run_instance_update()
- exc = test.TestingException()
- self.compute.driver.spawn(self.context, self.instance, self.image,
- self.injected_files, self.admin_pass,
- network_info=self.network_info,
- block_device_info=self.block_device_info).AndRaise(exc)
- conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
- self._notify_about_instance_usage('create.error',
- fault=exc, stub=False)
- self.mox.ReplayAll()
-
- self.assertRaises(exception.RescheduledException,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
-
- def test_spawn_network_alloc_failure(self):
- # Because network allocation is asynchronous, failures may not present
- # themselves until the virt spawn method is called.
- self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks())
-
- def test_build_and_run_flavor_disk_too_small_exception(self):
- self._test_build_and_run_spawn_exceptions(
- exception.FlavorDiskTooSmall())
-
- def test_build_and_run_flavor_memory_too_small_exception(self):
- self._test_build_and_run_spawn_exceptions(
- exception.FlavorMemoryTooSmall())
-
- def test_build_and_run_image_not_active_exception(self):
- self._test_build_and_run_spawn_exceptions(
- exception.ImageNotActive(image_id=self.image.get('id')))
-
- def test_build_and_run_image_unacceptable_exception(self):
- self._test_build_and_run_spawn_exceptions(
- exception.ImageUnacceptable(image_id=self.image.get('id'),
- reason=""))
-
- def _test_build_and_run_spawn_exceptions(self, exc):
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'spawn',
- side_effect=exc),
- mock.patch.object(conductor_rpcapi.ConductorAPI,
- 'instance_update'),
- mock.patch.object(self.instance, 'save',
- side_effect=[self.instance, self.instance]),
- mock.patch.object(self.compute,
- '_build_networks_for_instance',
- return_value=self.network_info),
- mock.patch.object(self.compute,
- '_notify_about_instance_usage'),
- mock.patch.object(self.compute,
- '_shutdown_instance'),
- mock.patch.object(self.compute,
- '_validate_instance_group_policy')
- ) as (spawn, instance_update, save,
- _build_networks_for_instance, _notify_about_instance_usage,
- _shutdown_instance, _validate_instance_group_policy):
-
- self.assertRaises(exception.BuildAbortException,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks,
- self.security_groups, self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
-
- _validate_instance_group_policy.assert_called_once_with(
- self.context, self.instance, self.filter_properties)
- _build_networks_for_instance.assert_has_calls(
- mock.call(self.context, self.instance,
- self.requested_networks, self.security_groups))
-
- _notify_about_instance_usage.assert_has_calls([
- mock.call(self.context, self.instance, 'create.start',
- extra_usage_info={'image_name': self.image.get('name')}),
- mock.call(self.context, self.instance, 'create.error',
- fault=exc)])
-
- save.assert_has_calls([
- mock.call(),
- mock.call(
- expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
-
- spawn.assert_has_calls(mock.call(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- network_info=self.network_info,
- block_device_info=self.block_device_info))
-
- instance_update.assert_has_calls(mock.call(self.context,
- self.instance.uuid, mock.ANY, 'conductor'))
-
- _shutdown_instance.assert_called_once_with(self.context,
- self.instance, self.block_device_mapping,
- self.requested_networks, try_deallocate_networks=False)
-
- @mock.patch('nova.compute.manager.ComputeManager._get_power_state')
- def test_spawn_waits_for_network_and_saves_info_cache(self, gps):
- inst = mock.MagicMock()
- network_info = mock.MagicMock()
- with mock.patch.object(self.compute, 'driver'):
- self.compute._spawn(self.context, inst, {}, network_info, None,
- None, None)
- network_info.wait.assert_called_once_with(do_raise=True)
- self.assertEqual(network_info, inst.info_cache.network_info)
- inst.save.assert_called_with(expected_task_state=task_states.SPAWNING)
-
- @mock.patch('nova.utils.spawn_n')
- def test_reschedule_on_resources_unavailable(self, mock_spawn):
- mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
- reason = 'resource unavailable'
- exc = exception.ComputeResourcesUnavailable(reason=reason)
-
- class FakeResourceTracker(object):
- def instance_claim(self, context, instance, limits):
- raise exc
-
- self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
- self.mox.StubOutWithMock(self.compute.compute_task_api,
- 'build_instances')
- self.compute._get_resource_tracker(self.node).AndReturn(
- FakeResourceTracker())
- self._do_build_instance_update(reschedule_update=True)
- self._notify_about_instance_usage('create.start',
- extra_usage_info={'image_name': self.image.get('name')})
- self._notify_about_instance_usage('create.error',
- fault=exc, stub=False)
- self.compute.compute_task_api.build_instances(self.context,
- [self.instance], self.image, self.filter_properties,
- self.admin_pass, self.injected_files, self.requested_networks,
- self.security_groups, self.block_device_mapping)
- self._instance_action_events()
- self.mox.ReplayAll()
-
- self.compute.build_and_run_instance(self.context, self.instance,
- self.image, request_spec={},
- filter_properties=self.filter_properties,
- injected_files=self.injected_files,
- admin_password=self.admin_pass,
- requested_networks=self.requested_networks,
- security_groups=self.security_groups,
- block_device_mapping=self.block_device_mapping, node=self.node,
- limits=self.limits)
-
- def test_build_resources_buildabort_reraise(self):
- exc = exception.BuildAbortException(
- instance_uuid=self.instance.uuid, reason='')
- self.mox.StubOutWithMock(self.compute, '_build_resources')
- self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
- 'instance_update')
- conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
- self._notify_about_instance_usage('create.start',
- extra_usage_info={'image_name': self.image.get('name')})
- self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups, self.image,
- self.block_device_mapping).AndRaise(exc)
- self._notify_about_instance_usage('create.error',
- fault=exc, stub=False)
- self.mox.ReplayAll()
- self.assertRaises(exception.BuildAbortException,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks,
- self.security_groups, self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
-
- def test_build_resources_reraises_on_failed_bdm_prep(self):
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self._build_resources_instance_update()
- self.compute._prep_block_device(self.context, self.instance,
- self.block_device_mapping).AndRaise(test.TestingException())
- self.mox.ReplayAll()
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- pass
- except Exception as e:
- self.assertIsInstance(e, exception.BuildAbortException)
-
- def test_failed_bdm_prep_from_delete_raises_unexpected(self):
- with contextlib.nested(
- mock.patch.object(self.compute,
- '_build_networks_for_instance',
- return_value=self.network_info),
- mock.patch.object(self.instance, 'save',
- side_effect=exception.UnexpectedDeletingTaskStateError(
- actual=task_states.DELETING, expected='None')),
- ) as (_build_networks_for_instance, save):
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- pass
- except Exception as e:
- self.assertIsInstance(e,
- exception.UnexpectedDeletingTaskStateError)
-
- _build_networks_for_instance.assert_has_calls(
- mock.call(self.context, self.instance,
- self.requested_networks, self.security_groups))
-
- save.assert_has_calls(mock.call())
-
- def test_build_resources_aborts_on_failed_network_alloc(self):
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndRaise(
- test.TestingException())
- self.mox.ReplayAll()
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups, self.image,
- self.block_device_mapping):
- pass
- except Exception as e:
- self.assertIsInstance(e, exception.BuildAbortException)
-
- def test_failed_network_alloc_from_delete_raises_unexpected(self):
- with mock.patch.object(self.compute,
- '_build_networks_for_instance') as _build_networks:
-
- exc = exception.UnexpectedDeletingTaskStateError
- _build_networks.side_effect = exc(actual=task_states.DELETING,
- expected='None')
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- pass
- except Exception as e:
- self.assertIsInstance(e, exc)
-
- _build_networks.assert_has_calls(
- mock.call(self.context, self.instance,
- self.requested_networks, self.security_groups))
-
- def test_build_resources_with_network_info_obj_on_spawn_failure(self):
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- network_model.NetworkInfo())
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
- self._build_resources_instance_update()
- self.mox.ReplayAll()
-
- test_exception = test.TestingException()
-
- def fake_spawn():
- raise test_exception
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- fake_spawn()
- except Exception as e:
- self.assertEqual(test_exception, e)
-
- def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
- self._build_resources_instance_update()
- self.mox.ReplayAll()
-
- test_exception = test.TestingException()
-
- def fake_spawn():
- raise test_exception
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- fake_spawn()
- except Exception as e:
- self.assertEqual(test_exception, e)
-
- @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
- @mock.patch(
- 'nova.compute.manager.ComputeManager._build_networks_for_instance')
- @mock.patch('nova.objects.Instance.save')
- def test_build_resources_instance_not_found_before_yield(
- self, mock_save, mock_build_network, mock_info_wait):
- mock_build_network.return_value = self.network_info
- expected_exc = exception.InstanceNotFound(
- instance_id=self.instance.uuid)
- mock_save.side_effect = expected_exc
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- raise
- except Exception as e:
- self.assertEqual(expected_exc, e)
- mock_build_network.assert_called_once_with(self.context, self.instance,
- self.requested_networks, self.security_groups)
- mock_info_wait.assert_called_once_with(do_raise=False)
-
- @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
- @mock.patch(
- 'nova.compute.manager.ComputeManager._build_networks_for_instance')
- @mock.patch('nova.objects.Instance.save')
- def test_build_resources_unexpected_task_error_before_yield(
- self, mock_save, mock_build_network, mock_info_wait):
- mock_build_network.return_value = self.network_info
- mock_save.side_effect = exception.UnexpectedTaskStateError(
- expected='', actual='')
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- raise
- except exception.BuildAbortException:
- pass
- mock_build_network.assert_called_once_with(self.context, self.instance,
- self.requested_networks, self.security_groups)
- mock_info_wait.assert_called_once_with(do_raise=False)
-
- @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
- @mock.patch(
- 'nova.compute.manager.ComputeManager._build_networks_for_instance')
- @mock.patch('nova.objects.Instance.save')
- def test_build_resources_exception_before_yield(
- self, mock_save, mock_build_network, mock_info_wait):
- mock_build_network.return_value = self.network_info
- mock_save.side_effect = Exception()
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- raise
- except exception.BuildAbortException:
- pass
- mock_build_network.assert_called_once_with(self.context, self.instance,
- self.requested_networks, self.security_groups)
- mock_info_wait.assert_called_once_with(do_raise=False)
-
- def test_build_resources_aborts_on_cleanup_failure(self):
- self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
- self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
- self.compute._build_networks_for_instance(self.context, self.instance,
- self.requested_networks, self.security_groups).AndReturn(
- self.network_info)
- self.compute._shutdown_instance(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False).AndRaise(
- test.TestingException())
- self._build_resources_instance_update()
- self.mox.ReplayAll()
-
- def fake_spawn():
- raise test.TestingException()
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- fake_spawn()
- except Exception as e:
- self.assertIsInstance(e, exception.BuildAbortException)
-
- def test_build_networks_if_not_allocated(self):
- instance = fake_instance.fake_instance_obj(self.context,
- system_metadata={},
- expected_attrs=['system_metadata'])
-
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute, '_allocate_network')
- self.compute._allocate_network(self.context, instance,
- self.requested_networks, None, self.security_groups, None)
- self.mox.ReplayAll()
-
- self.compute._build_networks_for_instance(self.context, instance,
- self.requested_networks, self.security_groups)
-
- def test_build_networks_if_allocated_false(self):
- instance = fake_instance.fake_instance_obj(self.context,
- system_metadata=dict(network_allocated='False'),
- expected_attrs=['system_metadata'])
-
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute, '_allocate_network')
- self.compute._allocate_network(self.context, instance,
- self.requested_networks, None, self.security_groups, None)
- self.mox.ReplayAll()
-
- self.compute._build_networks_for_instance(self.context, instance,
- self.requested_networks, self.security_groups)
-
- def test_return_networks_if_found(self):
- instance = fake_instance.fake_instance_obj(self.context,
- system_metadata=dict(network_allocated='True'),
- expected_attrs=['system_metadata'])
-
- def fake_network_info():
- return network_model.NetworkInfo([{'address': '123.123.123.123'}])
-
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.mox.StubOutWithMock(self.compute, '_allocate_network')
- self.compute._get_instance_nw_info(self.context, instance).AndReturn(
- network_model.NetworkInfoAsyncWrapper(fake_network_info))
- self.mox.ReplayAll()
-
- self.compute._build_networks_for_instance(self.context, instance,
- self.requested_networks, self.security_groups)
-
- def test_cleanup_allocated_networks_instance_not_found(self):
- with contextlib.nested(
- mock.patch.object(self.compute, '_deallocate_network'),
- mock.patch.object(self.instance, 'save',
- side_effect=exception.InstanceNotFound(instance_id=''))
- ) as (_deallocate_network, save):
- # Testing that this doesn't raise an exeption
- self.compute._cleanup_allocated_networks(self.context,
- self.instance, self.requested_networks)
- save.assert_called_once_with()
- self.assertEqual('False',
- self.instance.system_metadata['network_allocated'])
-
- @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
- def test_launched_at_in_create_end_notification(self,
- mock_instance_update):
-
- def fake_notify(*args, **kwargs):
- if args[2] == 'create.end':
- # Check that launched_at is set on the instance
- self.assertIsNotNone(args[1].launched_at)
-
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'spawn'),
- mock.patch.object(self.compute,
- '_build_networks_for_instance', return_value=[]),
- mock.patch.object(self.instance, 'save'),
- mock.patch.object(self.compute, '_notify_about_instance_usage',
- side_effect=fake_notify)
- ) as (mock_spawn, mock_networks, mock_save, mock_notify):
- self.compute._build_and_run_instance(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
- self.requested_networks, self.security_groups,
- self.block_device_mapping, self.node, self.limits,
- self.filter_properties)
- expected_call = mock.call(self.context, self.instance,
- 'create.end', extra_usage_info={'message': u'Success'},
- network_info=[])
- create_end_call = mock_notify.call_args_list[
- mock_notify.call_count - 1]
- self.assertEqual(expected_call, create_end_call)
-
- @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
- def test_create_end_on_instance_delete(self, mock_instance_update):
-
- def fake_notify(*args, **kwargs):
- if args[2] == 'create.end':
- # Check that launched_at is set on the instance
- self.assertIsNotNone(args[1].launched_at)
-
- exc = exception.InstanceNotFound(instance_id='')
-
- with contextlib.nested(
- mock.patch.object(self.compute.driver, 'spawn'),
- mock.patch.object(self.compute,
- '_build_networks_for_instance', return_value=[]),
- mock.patch.object(self.instance, 'save',
- side_effect=[None, None, exc]),
- mock.patch.object(self.compute, '_notify_about_instance_usage',
- side_effect=fake_notify)
- ) as (mock_spawn, mock_networks, mock_save, mock_notify):
- self.assertRaises(exception.InstanceNotFound,
- self.compute._build_and_run_instance, self.context,
- self.instance, self.image, self.injected_files,
- self.admin_pass, self.requested_networks,
- self.security_groups, self.block_device_mapping, self.node,
- self.limits, self.filter_properties)
- expected_call = mock.call(self.context, self.instance,
- 'create.end', fault=exc)
- create_end_call = mock_notify.call_args_list[
- mock_notify.call_count - 1]
- self.assertEqual(expected_call, create_end_call)
-
-
-class ComputeManagerMigrationTestCase(test.NoDBTestCase):
- def setUp(self):
- super(ComputeManagerMigrationTestCase, self).setUp()
- self.compute = importutils.import_object(CONF.compute_manager)
- self.context = context.RequestContext('fake', 'fake')
- self.image = {}
- self.instance = fake_instance.fake_instance_obj(self.context,
- vm_state=vm_states.ACTIVE,
- expected_attrs=['metadata', 'system_metadata', 'info_cache'])
- self.migration = objects.Migration()
- self.migration.status = 'migrating'
-
- def test_finish_resize_failure(self):
- elevated_context = self.context.elevated()
- with contextlib.nested(
- mock.patch.object(self.compute, '_finish_resize',
- side_effect=exception.ResizeError(reason='')),
- mock.patch.object(objects.InstanceActionEvent, 'event_start'),
- mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure'),
- mock.patch.object(db, 'instance_fault_create'),
- mock.patch.object(self.compute, '_instance_update'),
- mock.patch.object(self.migration, 'save'),
- mock.patch.object(self.context, 'elevated',
- return_value=elevated_context)
- ) as (meth, event_start, event_finish, fault_create, instance_update,
- migration_save, context_elevated):
- fault_create.return_value = (
- test_instance_fault.fake_faults['fake-uuid'][0])
- self.assertRaises(
- exception.ResizeError, self.compute.finish_resize,
- context=self.context, disk_info=[], image=self.image,
- instance=self.instance, reservations=[],
- migration=self.migration
- )
- self.assertEqual("error", self.migration.status)
- migration_save.assert_has_calls([mock.call(elevated_context)])
-
- def test_resize_instance_failure(self):
- elevated_context = self.context.elevated()
- self.migration.dest_host = None
- with contextlib.nested(
- mock.patch.object(self.compute.driver,
- 'migrate_disk_and_power_off',
- side_effect=exception.ResizeError(reason='')),
- mock.patch.object(objects.InstanceActionEvent, 'event_start'),
- mock.patch.object(objects.InstanceActionEvent,
- 'event_finish_with_failure'),
- mock.patch.object(db, 'instance_fault_create'),
- mock.patch.object(self.compute, '_instance_update'),
- mock.patch.object(self.migration, 'save'),
- mock.patch.object(self.context, 'elevated',
- return_value=elevated_context),
- mock.patch.object(self.compute, '_get_instance_nw_info',
- return_value=None),
- mock.patch.object(self.instance, 'save'),
- mock.patch.object(self.compute, '_notify_about_instance_usage'),
- mock.patch.object(self.compute,
- '_get_instance_block_device_info',
- return_value=None),
- mock.patch.object(objects.BlockDeviceMappingList,
- 'get_by_instance_uuid',
- return_value=None)
- ) as (meth, event_start, event_finish, fault_create, instance_update,
- migration_save, context_elevated, nw_info, save_inst, notify,
- vol_block_info, bdm):
- fault_create.return_value = (
- test_instance_fault.fake_faults['fake-uuid'][0])
- self.assertRaises(
- exception.ResizeError, self.compute.resize_instance,
- context=self.context, instance=self.instance, image=self.image,
- reservations=[], migration=self.migration, instance_type='type'
- )
- self.assertEqual("error", self.migration.status)
- migration_save.assert_has_calls([mock.call(elevated_context)])
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
deleted file mode 100644
index 28732ed3fb..0000000000
--- a/nova/tests/compute/test_compute_utils.py
+++ /dev/null
@@ -1,827 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests For miscellaneous util methods used with compute."""
-
-import copy
-import string
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-import six
-import testtools
-
-from nova.compute import flavors
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import glance
-from nova.network import api as network_api
-from nova import objects
-from nova.objects import block_device as block_device_obj
-from nova.objects import instance as instance_obj
-from nova import rpc
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests import fake_notifier
-from nova.tests import fake_server_actions
-import nova.tests.image.fake
-from nova.tests import matchers
-from nova import utils
-from nova.virt import driver
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
-
-class ComputeValidateDeviceTestCase(test.TestCase):
- def setUp(self):
- super(ComputeValidateDeviceTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- # check if test name includes "xen"
- if 'xen' in self.id():
- self.flags(compute_driver='xenapi.XenAPIDriver')
- self.instance = {
- 'uuid': 'fake',
- 'root_device_name': None,
- 'instance_type_id': 'fake',
- }
- else:
- self.instance = {
- 'uuid': 'fake',
- 'root_device_name': '/dev/vda',
- 'default_ephemeral_device': '/dev/vdb',
- 'instance_type_id': 'fake',
- }
- self.data = []
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- lambda context, instance, use_slave=False: self.data)
-
- def _update_flavor(self, flavor_info):
- self.flavor = {
- 'id': 1,
- 'name': 'foo',
- 'memory_mb': 128,
- 'vcpus': 1,
- 'root_gb': 10,
- 'ephemeral_gb': 10,
- 'flavorid': 1,
- 'swap': 0,
- 'rxtx_factor': 1.0,
- 'vcpu_weight': 1,
- }
- self.flavor.update(flavor_info)
- self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
- 'value': value}
- for key, value in
- self.flavor.items()]
-
- def _validate_device(self, device=None):
- bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, self.instance['uuid'])
- return compute_utils.get_device_name_for_instance(
- self.context, self.instance, bdms, device)
-
- @staticmethod
- def _fake_bdm(device):
- return fake_block_device.FakeDbBlockDeviceDict({
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': device,
- 'no_device': None,
- 'volume_id': 'fake',
- 'snapshot_id': None,
- 'guest_format': None
- })
-
- def test_wrap(self):
- self.data = []
- for letter in string.ascii_lowercase[2:]:
- self.data.append(self._fake_bdm('/dev/vd' + letter))
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdaa')
-
- def test_wrap_plus_one(self):
- self.data = []
- for letter in string.ascii_lowercase[2:]:
- self.data.append(self._fake_bdm('/dev/vd' + letter))
- self.data.append(self._fake_bdm('/dev/vdaa'))
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdab')
-
- def test_later(self):
- self.data = [
- self._fake_bdm('/dev/vdc'),
- self._fake_bdm('/dev/vdd'),
- self._fake_bdm('/dev/vde'),
- ]
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdf')
-
- def test_gap(self):
- self.data = [
- self._fake_bdm('/dev/vdc'),
- self._fake_bdm('/dev/vde'),
- ]
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdd')
-
- def test_no_bdms(self):
- self.data = []
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdc')
-
- def test_lxc_names_work(self):
- self.instance['root_device_name'] = '/dev/a'
- self.instance['ephemeral_device_name'] = '/dev/b'
- self.data = []
- device = self._validate_device()
- self.assertEqual(device, '/dev/c')
-
- def test_name_conversion(self):
- self.data = []
- device = self._validate_device('/dev/c')
- self.assertEqual(device, '/dev/vdc')
- device = self._validate_device('/dev/sdc')
- self.assertEqual(device, '/dev/vdc')
- device = self._validate_device('/dev/xvdc')
- self.assertEqual(device, '/dev/vdc')
-
- def test_invalid_device_prefix(self):
- self.assertRaises(exception.InvalidDevicePath,
- self._validate_device, '/baddata/vdc')
-
- def test_device_in_use(self):
- exc = self.assertRaises(exception.DevicePathInUse,
- self._validate_device, '/dev/vda')
- self.assertIn('/dev/vda', six.text_type(exc))
-
- def test_swap(self):
- self.instance['default_swap_device'] = "/dev/vdc"
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdd')
-
- def test_swap_no_ephemeral(self):
- del self.instance['default_ephemeral_device']
- self.instance['default_swap_device'] = "/dev/vdb"
- device = self._validate_device()
- self.assertEqual(device, '/dev/vdc')
-
- def test_ephemeral_xenapi(self):
- self._update_flavor({
- 'ephemeral_gb': 10,
- 'swap': 0,
- })
- self.stubs.Set(flavors, 'get_flavor',
- lambda instance_type_id, ctxt=None: self.flavor)
- device = self._validate_device()
- self.assertEqual(device, '/dev/xvdc')
-
- def test_swap_xenapi(self):
- self._update_flavor({
- 'ephemeral_gb': 0,
- 'swap': 10,
- })
- self.stubs.Set(flavors, 'get_flavor',
- lambda instance_type_id, ctxt=None: self.flavor)
- device = self._validate_device()
- self.assertEqual(device, '/dev/xvdb')
-
- def test_swap_and_ephemeral_xenapi(self):
- self._update_flavor({
- 'ephemeral_gb': 10,
- 'swap': 10,
- })
- self.stubs.Set(flavors, 'get_flavor',
- lambda instance_type_id, ctxt=None: self.flavor)
- device = self._validate_device()
- self.assertEqual(device, '/dev/xvdd')
-
- def test_swap_and_one_attachment_xenapi(self):
- self._update_flavor({
- 'ephemeral_gb': 0,
- 'swap': 10,
- })
- self.stubs.Set(flavors, 'get_flavor',
- lambda instance_type_id, ctxt=None: self.flavor)
- device = self._validate_device()
- self.assertEqual(device, '/dev/xvdb')
- self.data.append(self._fake_bdm(device))
- device = self._validate_device()
- self.assertEqual(device, '/dev/xvdd')
-
- def test_no_dev_root_device_name_get_next_name(self):
- self.instance['root_device_name'] = 'vda'
- device = self._validate_device()
- self.assertEqual('/dev/vdc', device)
-
-
-class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(DefaultDeviceNamesForInstanceTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.ephemerals = block_device_obj.block_device_make_list(
- self.context,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdb',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'guest_format': None,
- 'boot_index': -1})])
-
- self.swap = block_device_obj.block_device_make_list(
- self.context,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdc',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'boot_index': -1})])
-
- self.block_device_mapping = block_device_obj.block_device_make_list(
- self.context,
- [fake_block_device.FakeDbBlockDeviceDict(
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'boot_index': 0}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdd',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'snapshot_id': 'fake-snapshot-id-1',
- 'boot_index': -1}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 5, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vde',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'boot_index': -1})])
- self.flavor = {'swap': 4}
- self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
- self.is_libvirt = False
- self.root_device_name = '/dev/vda'
- self.update_called = False
-
- def fake_extract_flavor(instance):
- return self.flavor
-
- def fake_driver_matches(driver_string):
- if driver_string == 'libvirt.LibvirtDriver':
- return self.is_libvirt
- return False
-
- self.patchers = []
- self.patchers.append(
- mock.patch.object(objects.BlockDeviceMapping, 'save'))
- self.patchers.append(
- mock.patch.object(
- flavors, 'extract_flavor',
- new=mock.Mock(side_effect=fake_extract_flavor)))
- self.patchers.append(
- mock.patch.object(driver,
- 'compute_driver_matches',
- new=mock.Mock(
- side_effect=fake_driver_matches)))
- for patcher in self.patchers:
- patcher.start()
-
- def tearDown(self):
- super(DefaultDeviceNamesForInstanceTestCase, self).tearDown()
- for patcher in self.patchers:
- patcher.stop()
-
- def _test_default_device_names(self, *block_device_lists):
- compute_utils.default_device_names_for_instance(self.instance,
- self.root_device_name,
- *block_device_lists)
-
- def test_only_block_device_mapping(self):
- # Test no-op
- original_bdm = copy.deepcopy(self.block_device_mapping)
- self._test_default_device_names([], [], self.block_device_mapping)
- for original, new in zip(original_bdm, self.block_device_mapping):
- self.assertEqual(original.device_name, new.device_name)
-
- # Assert it defaults the missing one as expected
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names([], [], self.block_device_mapping)
- self.assertEqual('/dev/vdb',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vdc',
- self.block_device_mapping[2]['device_name'])
-
- def test_with_ephemerals(self):
- # Test ephemeral gets assigned
- self.ephemerals[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals, [],
- self.block_device_mapping)
- self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
-
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names(self.ephemerals, [],
- self.block_device_mapping)
- self.assertEqual('/dev/vdc',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[2]['device_name'])
-
- def test_with_swap(self):
- # Test swap only
- self.swap[0]['device_name'] = None
- self._test_default_device_names([], self.swap, [])
- self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
-
- # Test swap and block_device_mapping
- self.swap[0]['device_name'] = None
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names([], self.swap,
- self.block_device_mapping)
- self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
- self.assertEqual('/dev/vdc',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[2]['device_name'])
-
- def test_all_together(self):
- # Test swap missing
- self.swap[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
-
- # Test swap and eph missing
- self.swap[0]['device_name'] = None
- self.ephemerals[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
- self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
-
- # Test all missing
- self.swap[0]['device_name'] = None
- self.ephemerals[0]['device_name'] = None
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
- self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vde',
- self.block_device_mapping[2]['device_name'])
-
-
-class UsageInfoTestCase(test.TestCase):
-
- def setUp(self):
- def fake_get_nw_info(cls, ctxt, instance):
- self.assertTrue(ctxt.is_admin)
- return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
-
- super(UsageInfoTestCase, self).setUp()
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
- fake_get_nw_info)
-
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- self.flags(use_local=True, group='conductor')
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- network_manager='nova.network.manager.FlatManager')
- self.compute = importutils.import_object(CONF.compute_manager)
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def fake_show(meh, context, id, **kwargs):
- return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
-
- self.stubs.Set(nova.tests.image.fake._FakeImageService,
- 'show', fake_show)
- fake_network.set_stub_network_methods(self.stubs)
- fake_server_actions.stub_out_action_events(self.stubs)
-
- def _create_instance(self, params=None):
- """Create a test instance."""
- params = params or {}
- flavor = flavors.get_flavor_by_name('m1.tiny')
- sys_meta = flavors.save_flavor_info({}, flavor)
- inst = {}
- inst['image_ref'] = 1
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['instance_type_id'] = flavor['id']
- inst['system_metadata'] = sys_meta
- inst['ami_launch_index'] = 0
- inst['root_gb'] = 0
- inst['ephemeral_gb'] = 0
- inst['info_cache'] = {'network_info': '[]'}
- inst.update(params)
- return db.instance_create(self.context, inst)['id']
-
- def test_notify_usage_exists(self):
- # Ensure 'exists' notification generates appropriate usage data.
- instance_id = self._create_instance()
- instance = objects.Instance.get_by_id(self.context, instance_id)
- # Set some system metadata
- sys_metadata = {'image_md_key1': 'val1',
- 'image_md_key2': 'val2',
- 'other_data': 'meow'}
- instance.system_metadata.update(sys_metadata)
- instance.save()
- compute_utils.notify_usage_exists(
- rpc.get_notifier('compute'), self.context, instance)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.exists')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- for attr in ('display_name', 'created_at', 'launched_at',
- 'state', 'state_description',
- 'bandwidth', 'audit_period_beginning',
- 'audit_period_ending', 'image_meta'):
- self.assertTrue(attr in payload,
- msg="Key %s not in payload" % attr)
- self.assertEqual(payload['image_meta'],
- {'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % glance.generate_glance_url()
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_notify_usage_exists_deleted_instance(self):
- # Ensure 'exists' notification generates appropriate usage data.
- instance_id = self._create_instance()
- instance = objects.Instance.get_by_id(self.context, instance_id,
- expected_attrs=['metadata', 'system_metadata', 'info_cache'])
- # Set some system metadata
- sys_metadata = {'image_md_key1': 'val1',
- 'image_md_key2': 'val2',
- 'other_data': 'meow'}
- instance.system_metadata.update(sys_metadata)
- instance.save()
- self.compute.terminate_instance(self.context, instance, [], [])
- instance = objects.Instance.get_by_id(
- self.context.elevated(read_deleted='yes'), instance_id,
- expected_attrs=['system_metadata'])
- compute_utils.notify_usage_exists(
- rpc.get_notifier('compute'), self.context, instance)
- msg = fake_notifier.NOTIFICATIONS[-1]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.exists')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- for attr in ('display_name', 'created_at', 'launched_at',
- 'state', 'state_description',
- 'bandwidth', 'audit_period_beginning',
- 'audit_period_ending', 'image_meta'):
- self.assertTrue(attr in payload,
- msg="Key %s not in payload" % attr)
- self.assertEqual(payload['image_meta'],
- {'md_key1': 'val1', 'md_key2': 'val2'})
- image_ref_url = "%s/images/1" % glance.generate_glance_url()
- self.assertEqual(payload['image_ref_url'], image_ref_url)
-
- def test_notify_usage_exists_instance_not_found(self):
- # Ensure 'exists' notification generates appropriate usage data.
- instance_id = self._create_instance()
- instance = objects.Instance.get_by_id(self.context, instance_id,
- expected_attrs=['metadata', 'system_metadata', 'info_cache'])
- self.compute.terminate_instance(self.context, instance, [], [])
- compute_utils.notify_usage_exists(
- rpc.get_notifier('compute'), self.context, instance)
- msg = fake_notifier.NOTIFICATIONS[-1]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.exists')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- for attr in ('display_name', 'created_at', 'launched_at',
- 'state', 'state_description',
- 'bandwidth', 'audit_period_beginning',
- 'audit_period_ending', 'image_meta'):
- self.assertTrue(attr in payload,
- msg="Key %s not in payload" % attr)
- self.assertEqual(payload['image_meta'], {})
- image_ref_url = "%s/images/1" % glance.generate_glance_url()
- self.assertEqual(payload['image_ref_url'], image_ref_url)
-
- def test_notify_about_instance_usage(self):
- instance_id = self._create_instance()
- instance = objects.Instance.get_by_id(self.context, instance_id,
- expected_attrs=['metadata', 'system_metadata', 'info_cache'])
- # Set some system metadata
- sys_metadata = {'image_md_key1': 'val1',
- 'image_md_key2': 'val2',
- 'other_data': 'meow'}
- instance.system_metadata.update(sys_metadata)
- instance.save()
- extra_usage_info = {'image_name': 'fake_name'}
- compute_utils.notify_about_instance_usage(
- rpc.get_notifier('compute'),
- self.context, instance, 'create.start',
- extra_usage_info=extra_usage_info)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'compute.instance.create.start')
- payload = msg.payload
- self.assertEqual(payload['tenant_id'], self.project_id)
- self.assertEqual(payload['user_id'], self.user_id)
- self.assertEqual(payload['instance_id'], instance['uuid'])
- self.assertEqual(payload['instance_type'], 'm1.tiny')
- type_id = flavors.get_flavor_by_name('m1.tiny')['id']
- self.assertEqual(str(payload['instance_type_id']), str(type_id))
- flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
- self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
- for attr in ('display_name', 'created_at', 'launched_at',
- 'state', 'state_description', 'image_meta'):
- self.assertTrue(attr in payload,
- msg="Key %s not in payload" % attr)
- self.assertEqual(payload['image_meta'],
- {'md_key1': 'val1', 'md_key2': 'val2'})
- self.assertEqual(payload['image_name'], 'fake_name')
- image_ref_url = "%s/images/1" % glance.generate_glance_url()
- self.assertEqual(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance, [], [])
-
- def test_notify_about_aggregate_update_with_id(self):
- # Set aggregate payload
- aggregate_payload = {'aggregate_id': 1}
- compute_utils.notify_about_aggregate_update(self.context,
- "create.end",
- aggregate_payload)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'aggregate.create.end')
- payload = msg.payload
- self.assertEqual(payload['aggregate_id'], 1)
-
- def test_notify_about_aggregate_update_with_name(self):
- # Set aggregate payload
- aggregate_payload = {'name': 'fakegroup'}
- compute_utils.notify_about_aggregate_update(self.context,
- "create.start",
- aggregate_payload)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.priority, 'INFO')
- self.assertEqual(msg.event_type, 'aggregate.create.start')
- payload = msg.payload
- self.assertEqual(payload['name'], 'fakegroup')
-
- def test_notify_about_aggregate_update_without_name_id(self):
- # Set empty aggregate payload
- aggregate_payload = {}
- compute_utils.notify_about_aggregate_update(self.context,
- "create.start",
- aggregate_payload)
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
-
-
-class ComputeGetImageMetadataTestCase(test.TestCase):
- def setUp(self):
- super(ComputeGetImageMetadataTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
-
- self.image = {
- "min_ram": 10,
- "min_disk": 1,
- "disk_format": "raw",
- "container_format": "bare",
- "properties": {},
- }
-
- self.mock_image_api = mock.Mock()
- self.mock_image_api.get.return_value = self.image
-
- self.ctx = context.RequestContext('fake', 'fake')
-
- sys_meta = {
- 'image_min_ram': 10,
- 'image_min_disk': 1,
- 'image_disk_format': 'raw',
- 'image_container_format': 'bare',
- 'instance_type_id': 0,
- 'instance_type_name': 'm1.fake',
- 'instance_type_memory_mb': 10,
- 'instance_type_vcpus': 1,
- 'instance_type_root_gb': 1,
- 'instance_type_ephemeral_gb': 1,
- 'instance_type_flavorid': '0',
- 'instance_type_swap': 1,
- 'instance_type_rxtx_factor': 0.0,
- 'instance_type_vcpu_weight': None,
- }
-
- self.instance = fake_instance.fake_db_instance(
- memory_mb=0, root_gb=0,
- system_metadata=sys_meta)
-
- @property
- def instance_obj(self):
- return objects.Instance._from_db_object(
- self.ctx, objects.Instance(), self.instance,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
-
- def test_get_image_meta(self):
- image_meta = compute_utils.get_image_metadata(
- self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
-
- self.image['properties'] = 'DONTCARE'
- self.assertThat(self.image, matchers.DictMatches(image_meta))
-
- def test_get_image_meta_with_image_id_none(self):
- self.image['properties'] = {'fake_property': 'fake_value'}
-
- with mock.patch.object(flavors,
- "extract_flavor") as mock_extract_flavor:
- with mock.patch.object(utils, "get_system_metadata_from_image"
- ) as mock_get_sys_metadata:
- image_meta = compute_utils.get_image_metadata(
- self.ctx, self.mock_image_api, None, self.instance_obj)
-
- self.assertEqual(0, self.mock_image_api.get.call_count)
- self.assertEqual(0, mock_extract_flavor.call_count)
- self.assertEqual(0, mock_get_sys_metadata.call_count)
- self.assertNotIn('fake_property', image_meta['properties'])
-
- # Checking mock_image_api_get is called with 0 image_id
- # as 0 is a valid image ID
- image_meta = compute_utils.get_image_metadata(self.ctx,
- self.mock_image_api,
- 0, self.instance_obj)
- self.assertEqual(1, self.mock_image_api.get.call_count)
- self.assertIn('fake_property', image_meta['properties'])
-
- def _test_get_image_meta_exception(self, error):
- self.mock_image_api.get.side_effect = error
-
- image_meta = compute_utils.get_image_metadata(
- self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
-
- self.image['properties'] = 'DONTCARE'
- # NOTE(danms): The trip through system_metadata will stringify things
- for key in self.image:
- self.image[key] = str(self.image[key])
- self.assertThat(self.image, matchers.DictMatches(image_meta))
-
- def test_get_image_meta_no_image(self):
- error = exception.ImageNotFound(image_id='fake-image')
- self._test_get_image_meta_exception(error)
-
- def test_get_image_meta_not_authorized(self):
- error = exception.ImageNotAuthorized(image_id='fake-image')
- self._test_get_image_meta_exception(error)
-
- def test_get_image_meta_bad_request(self):
- error = exception.Invalid()
- self._test_get_image_meta_exception(error)
-
- def test_get_image_meta_unexpected_exception(self):
- error = test.TestingException()
- with testtools.ExpectedException(test.TestingException):
- self._test_get_image_meta_exception(error)
-
- def test_get_image_meta_no_image_system_meta(self):
- for k in self.instance['system_metadata'].keys():
- if k.startswith('image_'):
- del self.instance['system_metadata'][k]
-
- image_meta = compute_utils.get_image_metadata(
- self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
-
- self.image['properties'] = 'DONTCARE'
- self.assertThat(self.image, matchers.DictMatches(image_meta))
-
- def test_get_image_meta_no_image_no_image_system_meta(self):
- e = exception.ImageNotFound(image_id='fake-image')
- self.mock_image_api.get.side_effect = e
-
- for k in self.instance['system_metadata'].keys():
- if k.startswith('image_'):
- del self.instance['system_metadata'][k]
-
- image_meta = compute_utils.get_image_metadata(
- self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
-
- expected = {'properties': 'DONTCARE'}
- self.assertThat(expected, matchers.DictMatches(image_meta))
-
-
-class ComputeUtilsGetValFromSysMetadata(test.TestCase):
-
- def test_get_value_from_system_metadata(self):
- instance = fake_instance.fake_instance_obj('fake-context')
- system_meta = {'int_val': 1,
- 'int_string': '2',
- 'not_int': 'Nope'}
- instance.system_metadata = system_meta
-
- result = compute_utils.get_value_from_system_metadata(
- instance, 'int_val', int, 0)
- self.assertEqual(1, result)
-
- result = compute_utils.get_value_from_system_metadata(
- instance, 'int_string', int, 0)
- self.assertEqual(2, result)
-
- result = compute_utils.get_value_from_system_metadata(
- instance, 'not_int', int, 0)
- self.assertEqual(0, result)
-
-
-class ComputeUtilsGetNWInfo(test.TestCase):
- def test_instance_object_none_info_cache(self):
- inst = fake_instance.fake_instance_obj('fake-context',
- expected_attrs=['info_cache'])
- self.assertIsNone(inst.info_cache)
- result = compute_utils.get_nw_info_for_instance(inst)
- self.assertEqual(jsonutils.dumps([]), result.json())
-
- def test_instance_dict_none_info_cache(self):
- inst = fake_instance.fake_db_instance(info_cache=None)
- self.assertIsNone(inst['info_cache'])
- result = compute_utils.get_nw_info_for_instance(inst)
- self.assertEqual(jsonutils.dumps([]), result.json())
-
-
-class ComputeUtilsGetRebootTypes(test.TestCase):
- def setUp(self):
- super(ComputeUtilsGetRebootTypes, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
-
- def test_get_reboot_type_started_soft(self):
- reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED,
- power_state.RUNNING)
- self.assertEqual(reboot_type, 'SOFT')
-
- def test_get_reboot_type_pending_soft(self):
- reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING,
- power_state.RUNNING)
- self.assertEqual(reboot_type, 'SOFT')
-
- def test_get_reboot_type_hard(self):
- reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING)
- self.assertEqual(reboot_type, 'HARD')
-
- def test_get_reboot_not_running_hard(self):
- reboot_type = compute_utils.get_reboot_type('foo', 'bar')
- self.assertEqual(reboot_type, 'HARD')
diff --git a/nova/tests/compute/test_compute_xen.py b/nova/tests/compute/test_compute_xen.py
deleted file mode 100644
index 29300e6511..0000000000
--- a/nova/tests/compute/test_compute_xen.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for expectations of behaviour from the Xen driver."""
-
-from oslo.config import cfg
-from oslo.utils import importutils
-
-from nova.compute import power_state
-from nova import context
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.tests.compute import eventlet_utils
-from nova.tests import fake_instance
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import vm_utils
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
-
-class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(ComputeXenTestCase, self).setUp()
- self.flags(compute_driver='xenapi.XenAPIDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.compute = importutils.import_object(CONF.compute_manager)
- # execute power syncing synchronously for testing:
- self.compute._sync_power_pool = eventlet_utils.SyncPool()
-
- def test_sync_power_states_instance_not_found(self):
- db_instance = fake_instance.fake_db_instance()
- ctxt = context.get_admin_context()
- instance_list = instance_obj._make_instance_list(ctxt,
- objects.InstanceList(), [db_instance], None)
- instance = instance_list[0]
-
- self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
- self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances')
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
-
- objects.InstanceList.get_by_host(ctxt,
- self.compute.host, use_slave=True).AndReturn(instance_list)
- self.compute.driver.get_num_instances().AndReturn(1)
- vm_utils.lookup(self.compute.driver._session, instance['name'],
- False).AndReturn(None)
- self.compute._sync_instance_power_state(ctxt, instance,
- power_state.NOSTATE)
-
- self.mox.ReplayAll()
-
- self.compute._sync_power_states(ctxt)
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
deleted file mode 100644
index eeebe0a357..0000000000
--- a/nova/tests/compute/test_host_api.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-
-from nova.cells import utils as cells_utils
-from nova import compute
-from nova import context
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests import fake_notifier
-from nova.tests.objects import test_objects
-from nova.tests.objects import test_service
-
-
-class ComputeHostAPITestCase(test.TestCase):
- def setUp(self):
- super(ComputeHostAPITestCase, self).setUp()
- self.host_api = compute.HostAPI()
- self.ctxt = context.get_admin_context()
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- def _compare_obj(self, obj, db_obj):
- test_objects.compare_obj(self, obj, db_obj,
- allow_missing=test_service.OPTIONAL)
-
- def _compare_objs(self, obj_list, db_obj_list):
- for index, obj in enumerate(obj_list):
- self._compare_obj(obj, db_obj_list[index])
-
- def _mock_rpc_call(self, method, **kwargs):
- self.mox.StubOutWithMock(self.host_api.rpcapi, method)
- getattr(self.host_api.rpcapi, method)(
- self.ctxt, **kwargs).AndReturn('fake-result')
-
- def _mock_assert_host_exists(self):
- """Sets it so that the host API always thinks that 'fake_host'
- exists.
- """
- def fake_assert_host_exists(context, host_name, must_be_up=False):
- return 'fake_host'
- self.stubs.Set(self.host_api, '_assert_host_exists',
- fake_assert_host_exists)
-
- def test_set_host_enabled(self):
- self._mock_assert_host_exists()
- self._mock_rpc_call('set_host_enabled',
- host='fake_host',
- enabled='fake_enabled')
- self.mox.ReplayAll()
- fake_notifier.NOTIFICATIONS = []
- result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
- 'fake_enabled')
- self.assertEqual('fake-result', result)
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('HostAPI.set_enabled.start', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_enabled', msg.payload['enabled'])
- self.assertEqual('fake_host', msg.payload['host_name'])
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('HostAPI.set_enabled.end', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_enabled', msg.payload['enabled'])
- self.assertEqual('fake_host', msg.payload['host_name'])
-
- def test_host_name_from_assert_hosts_exists(self):
- self._mock_assert_host_exists()
- self._mock_rpc_call('set_host_enabled',
- host='fake_host',
- enabled='fake_enabled')
- self.mox.ReplayAll()
- result = self.host_api.set_host_enabled(self.ctxt, 'fake_hosT',
- 'fake_enabled')
- self.assertEqual('fake-result', result)
-
- def test_get_host_uptime(self):
- self._mock_assert_host_exists()
- self._mock_rpc_call('get_host_uptime',
- host='fake_host')
- self.mox.ReplayAll()
- result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
- self.assertEqual('fake-result', result)
-
- def test_get_host_uptime_service_down(self):
- def fake_service_get_by_compute_host(context, host_name):
- return dict(test_service.fake_service, id=1)
- self.stubs.Set(self.host_api.db, 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
-
- def fake_service_is_up(service):
- return False
- self.stubs.Set(self.host_api.servicegroup_api,
- 'service_is_up', fake_service_is_up)
-
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.host_api.get_host_uptime, self.ctxt,
- 'fake_host')
-
- def test_host_power_action(self):
- self._mock_assert_host_exists()
- self._mock_rpc_call('host_power_action',
- host='fake_host',
- action='fake_action')
- self.mox.ReplayAll()
- fake_notifier.NOTIFICATIONS = []
- result = self.host_api.host_power_action(self.ctxt, 'fake_host',
- 'fake_action')
- self.assertEqual('fake-result', result)
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('HostAPI.power_action.start', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_action', msg.payload['action'])
- self.assertEqual('fake_host', msg.payload['host_name'])
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('HostAPI.power_action.end', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_action', msg.payload['action'])
- self.assertEqual('fake_host', msg.payload['host_name'])
-
- def test_set_host_maintenance(self):
- self._mock_assert_host_exists()
- self._mock_rpc_call('host_maintenance_mode',
- host='fake_host',
- host_param='fake_host',
- mode='fake_mode')
- self.mox.ReplayAll()
- fake_notifier.NOTIFICATIONS = []
- result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
- 'fake_mode')
- self.assertEqual('fake-result', result)
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('HostAPI.set_maintenance.start', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_host', msg.payload['host_name'])
- self.assertEqual('fake_mode', msg.payload['mode'])
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('HostAPI.set_maintenance.end', msg.event_type)
- self.assertEqual('api.fake_host', msg.publisher_id)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake_host', msg.payload['host_name'])
- self.assertEqual('fake_mode', msg.payload['mode'])
-
- def test_service_get_all_no_zones(self):
- services = [dict(test_service.fake_service,
- id=1, topic='compute', host='host1'),
- dict(test_service.fake_service,
- topic='compute', host='host2')]
-
- self.mox.StubOutWithMock(self.host_api.db,
- 'service_get_all')
-
- # Test no filters
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt)
- self.mox.VerifyAll()
- self._compare_objs(result, services)
-
- # Test no filters #2
- self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt, filters={})
- self.mox.VerifyAll()
- self._compare_objs(result, services)
-
- # Test w/ filter
- self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt,
- filters=dict(host='host2'))
- self.mox.VerifyAll()
- self._compare_objs(result, [services[1]])
-
- def test_service_get_all(self):
- services = [dict(test_service.fake_service,
- topic='compute', host='host1'),
- dict(test_service.fake_service,
- topic='compute', host='host2')]
- exp_services = []
- for service in services:
- exp_service = {}
- exp_service.update(availability_zone='nova', **service)
- exp_services.append(exp_service)
-
- self.mox.StubOutWithMock(self.host_api.db,
- 'service_get_all')
-
- # Test no filters
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt, set_zones=True)
- self.mox.VerifyAll()
- self._compare_objs(result, exp_services)
-
- # Test no filters #2
- self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt, filters={},
- set_zones=True)
- self.mox.VerifyAll()
- self._compare_objs(result, exp_services)
-
- # Test w/ filter
- self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt,
- filters=dict(host='host2'),
- set_zones=True)
- self.mox.VerifyAll()
- self._compare_objs(result, [exp_services[1]])
-
- # Test w/ zone filter but no set_zones arg.
- self.mox.ResetAll()
- self.host_api.db.service_get_all(self.ctxt,
- disabled=None).AndReturn(services)
- self.mox.ReplayAll()
- filters = {'availability_zone': 'nova'}
- result = self.host_api.service_get_all(self.ctxt,
- filters=filters)
- self.mox.VerifyAll()
- self._compare_objs(result, exp_services)
-
- def test_service_get_by_compute_host(self):
- self.mox.StubOutWithMock(self.host_api.db,
- 'service_get_by_compute_host')
-
- self.host_api.db.service_get_by_compute_host(self.ctxt,
- 'fake-host').AndReturn(test_service.fake_service)
- self.mox.ReplayAll()
- result = self.host_api.service_get_by_compute_host(self.ctxt,
- 'fake-host')
- self.assertEqual(test_service.fake_service['id'], result.id)
-
- def test_service_update(self):
- host_name = 'fake-host'
- binary = 'nova-compute'
- params_to_update = dict(disabled=True)
- service_id = 42
- expected_result = dict(test_service.fake_service, id=service_id)
-
- self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_args')
- self.host_api.db.service_get_by_args(self.ctxt,
- host_name, binary).AndReturn(expected_result)
-
- self.mox.StubOutWithMock(self.host_api.db, 'service_update')
- self.host_api.db.service_update(
- self.ctxt, service_id, params_to_update).AndReturn(expected_result)
-
- self.mox.ReplayAll()
-
- result = self.host_api.service_update(
- self.ctxt, host_name, binary, params_to_update)
- self._compare_obj(result, expected_result)
-
- def test_instance_get_all_by_host(self):
- self.mox.StubOutWithMock(self.host_api.db,
- 'instance_get_all_by_host')
-
- self.host_api.db.instance_get_all_by_host(self.ctxt,
- 'fake-host').AndReturn(['fake-responses'])
- self.mox.ReplayAll()
- result = self.host_api.instance_get_all_by_host(self.ctxt,
- 'fake-host')
- self.assertEqual(['fake-responses'], result)
-
- def test_task_log_get_all(self):
- self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all')
-
- self.host_api.db.task_log_get_all(self.ctxt,
- 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
- state='fake-state').AndReturn('fake-response')
- self.mox.ReplayAll()
- result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
- 'fake-begin', 'fake-end', host='fake-host',
- state='fake-state')
- self.assertEqual('fake-response', result)
-
- def test_service_delete(self):
- with contextlib.nested(
- mock.patch.object(objects.Service, 'get_by_id',
- return_value=objects.Service()),
- mock.patch.object(objects.Service, 'destroy')
- ) as (
- get_by_id, destroy
- ):
- self.host_api.service_delete(self.ctxt, 1)
- get_by_id.assert_called_once_with(self.ctxt, 1)
- destroy.assert_called_once_with()
-
-
-class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
- def setUp(self):
- self.flags(enable=True, group='cells')
- self.flags(cell_type='api', group='cells')
- super(ComputeHostAPICellsTestCase, self).setUp()
-
- def _mock_rpc_call(self, method, **kwargs):
- if 'host_param' in kwargs:
- kwargs.pop('host_param')
- else:
- kwargs.pop('host')
- rpc_message = {
- 'method': method,
- 'namespace': None,
- 'args': kwargs,
- 'version': self.host_api.rpcapi.client.target.version,
- }
- cells_rpcapi = self.host_api.rpcapi.client.cells_rpcapi
- self.mox.StubOutWithMock(cells_rpcapi, 'proxy_rpc_to_manager')
- cells_rpcapi.proxy_rpc_to_manager(self.ctxt,
- rpc_message,
- 'compute.fake_host',
- call=True).AndReturn('fake-result')
-
- def test_service_get_all_no_zones(self):
- services = [dict(test_service.fake_service,
- id='cell1@1', topic='compute', host='host1'),
- dict(test_service.fake_service,
- id='cell1@2', topic='compute', host='host2')]
- exp_services = [s.copy() for s in services]
-
- fake_filters = {'host': 'host1'}
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
- 'service_get_all')
- self.host_api.cells_rpcapi.service_get_all(self.ctxt,
- filters=fake_filters).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt,
- filters=fake_filters)
- self._compare_objs(result, exp_services)
-
- def _test_service_get_all(self, fake_filters, **kwargs):
- services = [dict(test_service.fake_service,
- id='cell1@1', key1='val1', key2='val2',
- topic='compute', host='host1'),
- dict(test_service.fake_service,
- id='cell1@2', key1='val2', key3='val3',
- topic='compute', host='host2')]
- exp_services = []
- for service in services:
- exp_service = {}
- exp_service.update(availability_zone='nova', **service)
- exp_services.append(exp_service)
-
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
- 'service_get_all')
- self.host_api.cells_rpcapi.service_get_all(self.ctxt,
- filters=fake_filters).AndReturn(services)
- self.mox.ReplayAll()
- result = self.host_api.service_get_all(self.ctxt,
- filters=fake_filters,
- **kwargs)
- self.mox.VerifyAll()
- self._compare_objs(result, exp_services)
-
- def test_service_get_all(self):
- fake_filters = {'availability_zone': 'nova'}
- self._test_service_get_all(fake_filters)
-
- def test_service_get_all_set_zones(self):
- fake_filters = {'key1': 'val1'}
- self._test_service_get_all(fake_filters, set_zones=True)
-
- def test_service_get_by_compute_host(self):
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
- 'service_get_by_compute_host')
-
- self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt,
- 'fake-host').AndReturn(test_service.fake_service)
- self.mox.ReplayAll()
- result = self.host_api.service_get_by_compute_host(self.ctxt,
- 'fake-host')
- self._compare_obj(result, test_service.fake_service)
-
- def test_service_update(self):
- host_name = 'fake-host'
- binary = 'nova-compute'
- params_to_update = dict(disabled=True)
- service_id = 42
- expected_result = dict(test_service.fake_service, id=service_id)
-
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_update')
- self.host_api.cells_rpcapi.service_update(
- self.ctxt, host_name,
- binary, params_to_update).AndReturn(expected_result)
-
- self.mox.ReplayAll()
-
- result = self.host_api.service_update(
- self.ctxt, host_name, binary, params_to_update)
- self._compare_obj(result, expected_result)
-
- def test_service_delete(self):
- cell_service_id = cells_utils.cell_with_item('cell1', 1)
- with mock.patch.object(self.host_api.cells_rpcapi,
- 'service_delete') as service_delete:
- self.host_api.service_delete(self.ctxt, cell_service_id)
- service_delete.assert_called_once_with(
- self.ctxt, cell_service_id)
-
- def test_instance_get_all_by_host(self):
- instances = [dict(id=1, cell_name='cell1', host='host1'),
- dict(id=2, cell_name='cell2', host='host1'),
- dict(id=3, cell_name='cell1', host='host2')]
-
- self.mox.StubOutWithMock(self.host_api.db,
- 'instance_get_all_by_host')
-
- self.host_api.db.instance_get_all_by_host(self.ctxt,
- 'fake-host').AndReturn(instances)
- self.mox.ReplayAll()
- expected_result = [instances[0], instances[2]]
- cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
- result = self.host_api.instance_get_all_by_host(self.ctxt,
- cell_and_host)
- self.assertEqual(expected_result, result)
-
- def test_task_log_get_all(self):
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
- 'task_log_get_all')
-
- self.host_api.cells_rpcapi.task_log_get_all(self.ctxt,
- 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
- state='fake-state').AndReturn('fake-response')
- self.mox.ReplayAll()
- result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
- 'fake-begin', 'fake-end', host='fake-host',
- state='fake-state')
- self.assertEqual('fake-response', result)
-
- def test_get_host_uptime_service_down(self):
- # The corresponding Compute test case depends on the
- # _assert_host_exists which is a no-op in the cells api
- pass
-
- def test_get_host_uptime(self):
- self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
- 'get_host_uptime')
-
- self.host_api.cells_rpcapi.get_host_uptime(self.ctxt,
- 'fake-host'). \
- AndReturn('fake-response')
- self.mox.ReplayAll()
- result = self.host_api.get_host_uptime(self.ctxt, 'fake-host')
- self.assertEqual('fake-response', result)
diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py
deleted file mode 100644
index 2644250cc2..0000000000
--- a/nova/tests/compute/test_keypairs.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for keypair API."""
-
-from oslo.config import cfg
-import six
-
-from nova.compute import api as compute_api
-from nova import context
-from nova import db
-from nova import exception
-from nova.i18n import _
-from nova import quota
-from nova.tests.compute import test_compute
-from nova.tests import fake_notifier
-from nova.tests.objects import test_keypair
-
-CONF = cfg.CONF
-QUOTAS = quota.QUOTAS
-
-
-class KeypairAPITestCase(test_compute.BaseTestCase):
- def setUp(self):
- super(KeypairAPITestCase, self).setUp()
- self.keypair_api = compute_api.KeypairAPI()
- self.ctxt = context.RequestContext('fake', 'fake')
- self._keypair_db_call_stubs()
- self.existing_key_name = 'fake existing key name'
- self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
- '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
- 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
- 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu'
- 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8'
- 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK'
- 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU'
- 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz')
- self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a'
- self.key_destroyed = False
-
- def _keypair_db_call_stubs(self):
-
- def db_key_pair_get_all_by_user(context, user_id):
- return [dict(test_keypair.fake_keypair,
- name=self.existing_key_name,
- public_key=self.pub_key,
- fingerprint=self.fingerprint)]
-
- def db_key_pair_create(context, keypair):
- return dict(test_keypair.fake_keypair, **keypair)
-
- def db_key_pair_destroy(context, user_id, name):
- if name == self.existing_key_name:
- self.key_destroyed = True
-
- def db_key_pair_get(context, user_id, name):
- if name == self.existing_key_name and not self.key_destroyed:
- return dict(test_keypair.fake_keypair,
- name=self.existing_key_name,
- public_key=self.pub_key,
- fingerprint=self.fingerprint)
- else:
- raise exception.KeypairNotFound(user_id=user_id, name=name)
-
- self.stubs.Set(db, "key_pair_get_all_by_user",
- db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_create",
- db_key_pair_create)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
- self.stubs.Set(db, "key_pair_get",
- db_key_pair_get)
-
- def _check_notifications(self, action='create', key_name='foo'):
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
-
- n1 = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('INFO', n1.priority)
- self.assertEqual('keypair.%s.start' % action, n1.event_type)
- self.assertEqual('api.%s' % CONF.host, n1.publisher_id)
- self.assertEqual('fake', n1.payload['user_id'])
- self.assertEqual('fake', n1.payload['tenant_id'])
- self.assertEqual(key_name, n1.payload['key_name'])
-
- n2 = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('INFO', n2.priority)
- self.assertEqual('keypair.%s.end' % action, n2.event_type)
- self.assertEqual('api.%s' % CONF.host, n2.publisher_id)
- self.assertEqual('fake', n2.payload['user_id'])
- self.assertEqual('fake', n2.payload['tenant_id'])
- self.assertEqual(key_name, n2.payload['key_name'])
-
-
-class CreateImportSharedTestMixIn(object):
- """Tests shared between create and import_key.
-
- Mix-in pattern is used here so that these `test_*` methods aren't picked
- up by the test runner unless they are part of a 'concrete' test case.
- """
-
- def assertKeyNameRaises(self, exc_class, expected_message, name):
- func = getattr(self.keypair_api, self.func_name)
-
- args = []
- if self.func_name == 'import_key_pair':
- args.append(self.pub_key)
-
- exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
- name, *args)
- self.assertEqual(expected_message, six.text_type(exc))
-
- def assertInvalidKeypair(self, expected_message, name):
- msg = _('Keypair data is invalid: %s') % expected_message
- self.assertKeyNameRaises(exception.InvalidKeypair, msg, name)
-
- def test_name_too_short(self):
- msg = _('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, '')
-
- def test_name_too_long(self):
- msg = _('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, 'x' * 256)
-
- def test_invalid_chars(self):
- msg = _("Keypair name contains unsafe characters")
- self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
-
- def test_already_exists(self):
- def db_key_pair_create_duplicate(context, keypair):
- raise exception.KeyPairExists(key_name=keypair.get('name', ''))
-
- self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
-
- msg = (_("Key pair '%(key_name)s' already exists.") %
- {'key_name': self.existing_key_name})
- self.assertKeyNameRaises(exception.KeyPairExists, msg,
- self.existing_key_name)
-
- def test_quota_limit(self):
- def fake_quotas_count(self, context, resource, *args, **kwargs):
- return CONF.quota_key_pairs
-
- self.stubs.Set(QUOTAS, "count", fake_quotas_count)
-
- msg = _("Maximum number of key pairs exceeded")
- self.assertKeyNameRaises(exception.KeypairLimitExceeded, msg, 'foo')
-
-
-class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
- func_name = 'create_key_pair'
-
- def test_success(self):
- keypair, private_key = self.keypair_api.create_key_pair(
- self.ctxt, self.ctxt.user_id, 'foo')
- self.assertEqual('foo', keypair['name'])
- self._check_notifications()
-
-
-class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
- func_name = 'import_key_pair'
-
- def test_success(self):
- keypair = self.keypair_api.import_key_pair(self.ctxt,
- self.ctxt.user_id,
- 'foo',
- self.pub_key)
-
- self.assertEqual('foo', keypair['name'])
- self.assertEqual(self.fingerprint, keypair['fingerprint'])
- self.assertEqual(self.pub_key, keypair['public_key'])
- self._check_notifications(action='import')
-
- def test_bad_key_data(self):
- exc = self.assertRaises(exception.InvalidKeypair,
- self.keypair_api.import_key_pair,
- self.ctxt, self.ctxt.user_id, 'foo',
- 'bad key data')
- msg = u'Keypair data is invalid: failed to generate fingerprint'
- self.assertEqual(msg, six.text_type(exc))
-
-
-class GetKeypairTestCase(KeypairAPITestCase):
- def test_success(self):
- keypair = self.keypair_api.get_key_pair(self.ctxt,
- self.ctxt.user_id,
- self.existing_key_name)
- self.assertEqual(self.existing_key_name, keypair['name'])
-
-
-class GetKeypairsTestCase(KeypairAPITestCase):
- def test_success(self):
- keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id)
- self.assertEqual([self.existing_key_name],
- [k['name'] for k in keypairs])
-
-
-class DeleteKeypairTestCase(KeypairAPITestCase):
- def test_success(self):
- self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
- self.existing_key_name)
- self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id,
- self.existing_key_name)
- self.assertRaises(exception.KeypairNotFound,
- self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id,
- self.existing_key_name)
-
- self._check_notifications(action='delete',
- key_name=self.existing_key_name)
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
deleted file mode 100644
index af859d91ad..0000000000
--- a/nova/tests/compute/test_resource_tracker.py
+++ /dev/null
@@ -1,1539 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for compute resource tracking."""
-
-import uuid
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-
-from nova.compute import flavors
-from nova.compute import resource_tracker
-from nova.compute import resources
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import objects
-from nova.objects import base as obj_base
-from nova import rpc
-from nova import test
-from nova.tests.compute.monitors import test_monitors
-from nova.tests.objects import test_migration
-from nova.tests.pci import fakes as pci_fakes
-from nova.virt import driver
-from nova.virt import hardware
-
-
-FAKE_VIRT_MEMORY_MB = 5
-FAKE_VIRT_MEMORY_OVERHEAD = 1
-FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
- FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
-FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
- cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
- hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
-FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
- cells=[hardware.VirtNUMATopologyCellLimit(
- 0, set([1, 2]), 3072, 4, 10240),
- hardware.VirtNUMATopologyCellLimit(
- 1, set([3, 4]), 3072, 4, 10240)])
-ROOT_GB = 5
-EPHEMERAL_GB = 1
-FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
-FAKE_VIRT_VCPUS = 1
-FAKE_VIRT_STATS = {'virt_stat': 10}
-FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
-RESOURCE_NAMES = ['vcpu']
-CONF = cfg.CONF
-
-
-class UnsupportedVirtDriver(driver.ComputeDriver):
- """Pretend version of a lame virt driver."""
-
- def __init__(self):
- super(UnsupportedVirtDriver, self).__init__(None)
-
- def get_host_ip_addr(self):
- return '127.0.0.1'
-
- def get_available_resource(self, nodename):
- # no support for getting resource usage info
- return {}
-
-
-class FakeVirtDriver(driver.ComputeDriver):
-
- def __init__(self, pci_support=False, stats=None,
- numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
- super(FakeVirtDriver, self).__init__(None)
- self.memory_mb = FAKE_VIRT_MEMORY_MB
- self.local_gb = FAKE_VIRT_LOCAL_GB
- self.vcpus = FAKE_VIRT_VCPUS
- self.numa_topology = numa_topology
-
- self.memory_mb_used = 0
- self.local_gb_used = 0
- self.pci_support = pci_support
- self.pci_devices = [{
- 'label': 'forza-napoli',
- 'dev_type': 'foo',
- 'compute_node_id': 1,
- 'address': '0000:00:00.1',
- 'product_id': 'p1',
- 'vendor_id': 'v1',
- 'status': 'available',
- 'extra_k1': 'v1'}] if self.pci_support else []
- self.pci_stats = [{
- 'count': 1,
- 'vendor_id': 'v1',
- 'product_id': 'p1'}] if self.pci_support else []
- if stats is not None:
- self.stats = stats
-
- def get_host_ip_addr(self):
- return '127.0.0.1'
-
- def get_available_resource(self, nodename):
- d = {
- 'vcpus': self.vcpus,
- 'memory_mb': self.memory_mb,
- 'local_gb': self.local_gb,
- 'vcpus_used': 0,
- 'memory_mb_used': self.memory_mb_used,
- 'local_gb_used': self.local_gb_used,
- 'hypervisor_type': 'fake',
- 'hypervisor_version': 0,
- 'hypervisor_hostname': 'fakehost',
- 'cpu_info': '',
- 'numa_topology': (
- self.numa_topology.to_json() if self.numa_topology else None),
- }
- if self.pci_support:
- d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
- if hasattr(self, 'stats'):
- d['stats'] = self.stats
- return d
-
- def estimate_instance_overhead(self, instance_info):
- instance_info['memory_mb'] # make sure memory value is present
- overhead = {
- 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
- }
- return overhead # just return a constant value for testing
-
-
-class BaseTestCase(test.TestCase):
-
- def setUp(self):
- super(BaseTestCase, self).setUp()
-
- self.flags(reserved_host_disk_mb=0,
- reserved_host_memory_mb=0)
-
- self.context = context.get_admin_context()
-
- self.flags(use_local=True, group='conductor')
- self.conductor = self.start_service('conductor',
- manager=CONF.conductor.manager)
-
- self._instances = {}
- self._numa_topologies = {}
- self._instance_types = {}
-
- self.stubs.Set(self.conductor.db,
- 'instance_get_all_by_host_and_node',
- self._fake_instance_get_all_by_host_and_node)
- self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
- self._fake_instance_extra_get_by_instance_uuid)
- self.stubs.Set(self.conductor.db,
- 'instance_update_and_get_original',
- self._fake_instance_update_and_get_original)
- self.stubs.Set(self.conductor.db,
- 'flavor_get', self._fake_flavor_get)
-
- self.host = 'fakehost'
-
- def _create_compute_node(self, values=None):
- compute = {
- "id": 1,
- "service_id": 1,
- "vcpus": 1,
- "memory_mb": 1,
- "local_gb": 1,
- "vcpus_used": 1,
- "memory_mb_used": 1,
- "local_gb_used": 1,
- "free_ram_mb": 1,
- "free_disk_gb": 1,
- "current_workload": 1,
- "running_vms": 0,
- "cpu_info": None,
- "numa_topology": None,
- "stats": {
- "num_instances": "1",
- },
- "hypervisor_hostname": "fakenode",
- }
- if values:
- compute.update(values)
- return compute
-
- def _create_service(self, host="fakehost", compute=None):
- if compute:
- compute = [compute]
-
- service = {
- "id": 1,
- "host": host,
- "binary": "nova-compute",
- "topic": "compute",
- "compute_node": compute,
- }
- return service
-
- def _fake_instance_system_metadata(self, instance_type, prefix=''):
- sys_meta = []
- for key in flavors.system_metadata_flavor_props.keys():
- sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
- 'value': instance_type[key]})
- return sys_meta
-
- def _fake_instance(self, stash=True, flavor=None, **kwargs):
-
- # Default to an instance ready to resize to or from the same
- # instance_type
- flavor = flavor or self._fake_flavor_create()
- sys_meta = self._fake_instance_system_metadata(flavor)
-
- if stash:
- # stash instance types in system metadata.
- sys_meta = (sys_meta +
- self._fake_instance_system_metadata(flavor, 'new_') +
- self._fake_instance_system_metadata(flavor, 'old_'))
-
- instance_uuid = str(uuid.uuid1())
- instance = {
- 'uuid': instance_uuid,
- 'vm_state': vm_states.RESIZED,
- 'task_state': None,
- 'ephemeral_key_uuid': None,
- 'os_type': 'Linux',
- 'project_id': '123456',
- 'host': None,
- 'node': None,
- 'instance_type_id': flavor['id'],
- 'memory_mb': flavor['memory_mb'],
- 'vcpus': flavor['vcpus'],
- 'root_gb': flavor['root_gb'],
- 'ephemeral_gb': flavor['ephemeral_gb'],
- 'launched_on': None,
- 'system_metadata': sys_meta,
- 'availability_zone': None,
- 'vm_mode': None,
- 'reservation_id': None,
- 'display_name': None,
- 'default_swap_device': None,
- 'power_state': None,
- 'scheduled_at': None,
- 'access_ip_v6': None,
- 'access_ip_v4': None,
- 'key_name': None,
- 'updated_at': None,
- 'cell_name': None,
- 'locked': None,
- 'locked_by': None,
- 'launch_index': None,
- 'architecture': None,
- 'auto_disk_config': None,
- 'terminated_at': None,
- 'ramdisk_id': None,
- 'user_data': None,
- 'cleaned': None,
- 'deleted_at': None,
- 'id': 333,
- 'disable_terminate': None,
- 'hostname': None,
- 'display_description': None,
- 'key_data': None,
- 'deleted': None,
- 'default_ephemeral_device': None,
- 'progress': None,
- 'launched_at': None,
- 'config_drive': None,
- 'kernel_id': None,
- 'user_id': None,
- 'shutdown_terminate': None,
- 'created_at': None,
- 'image_ref': None,
- 'root_device_name': None,
- }
- numa_topology = kwargs.pop('numa_topology', None)
- if numa_topology:
- numa_topology = {
- 'id': 1, 'created_at': None, 'updated_at': None,
- 'deleted_at': None, 'deleted': None,
- 'instance_uuid': instance['uuid'],
- 'numa_topology': numa_topology.to_json()
- }
- instance.update(kwargs)
-
- self._instances[instance_uuid] = instance
- self._numa_topologies[instance_uuid] = numa_topology
- return instance
-
- def _fake_flavor_create(self, **kwargs):
- instance_type = {
- 'id': 1,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'disabled': False,
- 'is_public': True,
- 'name': 'fakeitype',
- 'memory_mb': FAKE_VIRT_MEMORY_MB,
- 'vcpus': FAKE_VIRT_VCPUS,
- 'root_gb': ROOT_GB,
- 'ephemeral_gb': EPHEMERAL_GB,
- 'swap': 0,
- 'rxtx_factor': 1.0,
- 'vcpu_weight': 1,
- 'flavorid': 'fakeflavor',
- 'extra_specs': {},
- }
- instance_type.update(**kwargs)
-
- id_ = instance_type['id']
- self._instance_types[id_] = instance_type
- return instance_type
-
- def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
- return [i for i in self._instances.values() if i['host'] == host]
-
- def _fake_instance_extra_get_by_instance_uuid(self, context,
- instance_uuid, columns=None):
- return self._numa_topologies.get(instance_uuid)
-
- def _fake_flavor_get(self, ctxt, id_):
- return self._instance_types[id_]
-
- def _fake_instance_update_and_get_original(self, context, instance_uuid,
- values):
- instance = self._instances[instance_uuid]
- instance.update(values)
- # the test doesn't care what the original instance values are, it's
- # only used in the subsequent notification:
- return (instance, instance)
-
- def _driver(self):
- return FakeVirtDriver()
-
- def _tracker(self, host=None):
-
- if host is None:
- host = self.host
-
- node = "fakenode"
-
- driver = self._driver()
-
- tracker = resource_tracker.ResourceTracker(host, driver, node)
- tracker.ext_resources_handler = \
- resources.ResourceHandler(RESOURCE_NAMES, True)
- return tracker
-
-
-class UnsupportedDriverTestCase(BaseTestCase):
- """Resource tracking should be disabled when the virt driver doesn't
- support it.
- """
- def setUp(self):
- super(UnsupportedDriverTestCase, self).setUp()
- self.tracker = self._tracker()
- # seed tracker with data:
- self.tracker.update_available_resource(self.context)
-
- def _driver(self):
- return UnsupportedVirtDriver()
-
- def test_disabled(self):
- # disabled = no compute node stats
- self.assertTrue(self.tracker.disabled)
- self.assertIsNone(self.tracker.compute_node)
-
- def test_disabled_claim(self):
- # basic claim:
- instance = self._fake_instance()
- claim = self.tracker.instance_claim(self.context, instance)
- self.assertEqual(0, claim.memory_mb)
-
- def test_disabled_instance_claim(self):
- # instance variation:
- instance = self._fake_instance()
- claim = self.tracker.instance_claim(self.context, instance)
- self.assertEqual(0, claim.memory_mb)
-
- def test_disabled_instance_context_claim(self):
- # instance context manager variation:
- instance = self._fake_instance()
- claim = self.tracker.instance_claim(self.context, instance)
- with self.tracker.instance_claim(self.context, instance) as claim:
- self.assertEqual(0, claim.memory_mb)
-
- def test_disabled_updated_usage(self):
- instance = self._fake_instance(host='fakehost', memory_mb=5,
- root_gb=10)
- self.tracker.update_usage(self.context, instance)
-
- def test_disabled_resize_claim(self):
- instance = self._fake_instance()
- instance_type = self._fake_flavor_create()
- claim = self.tracker.resize_claim(self.context, instance,
- instance_type)
- self.assertEqual(0, claim.memory_mb)
- self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
- self.assertEqual(instance_type['id'],
- claim.migration['new_instance_type_id'])
-
- def test_disabled_resize_context_claim(self):
- instance = self._fake_instance()
- instance_type = self._fake_flavor_create()
- with self.tracker.resize_claim(self.context, instance, instance_type) \
- as claim:
- self.assertEqual(0, claim.memory_mb)
-
-
-class MissingServiceTestCase(BaseTestCase):
- def setUp(self):
- super(MissingServiceTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.tracker = self._tracker()
-
- def test_missing_service(self):
- self.tracker.update_available_resource(self.context)
- self.assertTrue(self.tracker.disabled)
-
-
-class MissingComputeNodeTestCase(BaseTestCase):
- def setUp(self):
- super(MissingComputeNodeTestCase, self).setUp()
- self.tracker = self._tracker()
-
- self.stubs.Set(db, 'service_get_by_compute_host',
- self._fake_service_get_by_compute_host)
- self.stubs.Set(db, 'compute_node_create',
- self._fake_create_compute_node)
- self.tracker.scheduler_client.update_resource_stats = mock.Mock()
-
- def _fake_create_compute_node(self, context, values):
- self.created = True
- return self._create_compute_node()
-
- def _fake_service_get_by_compute_host(self, ctx, host):
- # return a service with no joined compute
- service = self._create_service()
- return service
-
- def test_create_compute_node(self):
- self.tracker.update_available_resource(self.context)
- self.assertTrue(self.created)
-
- def test_enabled(self):
- self.tracker.update_available_resource(self.context)
- self.assertFalse(self.tracker.disabled)
-
-
-class BaseTrackerTestCase(BaseTestCase):
-
- def setUp(self):
- # setup plumbing for a working resource tracker with required
- # database models and a compatible compute driver:
- super(BaseTrackerTestCase, self).setUp()
-
- self.updated = False
- self.deleted = False
- self.update_call_count = 0
-
- self.tracker = self._tracker()
- self._migrations = {}
-
- self.stubs.Set(db, 'service_get_by_compute_host',
- self._fake_service_get_by_compute_host)
- self.stubs.Set(db, 'compute_node_update',
- self._fake_compute_node_update)
- self.stubs.Set(db, 'compute_node_delete',
- self._fake_compute_node_delete)
- self.stubs.Set(db, 'migration_update',
- self._fake_migration_update)
- self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
- self._fake_migration_get_in_progress_by_host_and_node)
-
- # Note that this must be called before the call to _init_tracker()
- patcher = pci_fakes.fake_pci_whitelist()
- self.addCleanup(patcher.stop)
-
- self._init_tracker()
- self.limits = self._limits()
-
- def _fake_service_get_by_compute_host(self, ctx, host):
- self.compute = self._create_compute_node()
- self.service = self._create_service(host, compute=self.compute)
- return self.service
-
- def _fake_compute_node_update(self, ctx, compute_node_id, values,
- prune_stats=False):
- self.update_call_count += 1
- self.updated = True
- self.compute.update(values)
- return self.compute
-
- def _fake_compute_node_delete(self, ctx, compute_node_id):
- self.deleted = True
- self.compute.update({'deleted': 1})
- return self.compute
-
- def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
- node):
- status = ['confirmed', 'reverted', 'error']
- migrations = []
-
- for migration in self._migrations.values():
- migration = obj_base.obj_to_primitive(migration)
- if migration['status'] in status:
- continue
-
- uuid = migration['instance_uuid']
- migration['instance'] = self._instances[uuid]
- migrations.append(migration)
-
- return migrations
-
- def _fake_migration_update(self, ctxt, migration_id, values):
- # cheat and assume there's only 1 migration present
- migration = self._migrations.values()[0]
- migration.update(values)
- return migration
-
- def _init_tracker(self):
- self.tracker.update_available_resource(self.context)
-
- def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
- disk_gb=FAKE_VIRT_LOCAL_GB,
- vcpus=FAKE_VIRT_VCPUS,
- numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
- """Create limits dictionary used for oversubscribing resources."""
-
- return {
- 'memory_mb': memory_mb,
- 'disk_gb': disk_gb,
- 'vcpu': vcpus,
- 'numa_topology': numa_topology.to_json() if numa_topology else None
- }
-
- def assertEqualNUMAHostTopology(self, expected, got):
- attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
- if None in (expected, got):
- if expected != got:
- raise AssertionError("Topologies don't match. Expected: "
- "%(expected)s, but got: %(got)s" %
- {'expected': expected, 'got': got})
- else:
- return
-
- if len(expected) != len(got):
- raise AssertionError("Topologies don't match due to different "
- "number of cells. Expected: "
- "%(expected)s, but got: %(got)s" %
- {'expected': expected, 'got': got})
- for exp_cell, got_cell in zip(expected.cells, got.cells):
- for attr in attrs:
- if getattr(exp_cell, attr) != getattr(got_cell, attr):
- raise AssertionError("Topologies don't match. Expected: "
- "%(expected)s, but got: %(got)s" %
- {'expected': expected, 'got': got})
-
- def _assert(self, value, field, tracker=None):
-
- if tracker is None:
- tracker = self.tracker
-
- if field not in tracker.compute_node:
- raise test.TestingException(
- "'%(field)s' not in compute node." % {'field': field})
- x = tracker.compute_node[field]
-
- if field == 'numa_topology':
- self.assertEqualNUMAHostTopology(
- value, hardware.VirtNUMAHostTopology.from_json(x))
- else:
- self.assertEqual(value, x)
-
-
-class TrackerTestCase(BaseTrackerTestCase):
-
- def test_free_ram_resource_value(self):
- driver = FakeVirtDriver()
- mem_free = driver.memory_mb - driver.memory_mb_used
- self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
-
- def test_free_disk_resource_value(self):
- driver = FakeVirtDriver()
- mem_free = driver.local_gb - driver.local_gb_used
- self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
-
- def test_update_compute_node(self):
- self.assertFalse(self.tracker.disabled)
- self.assertTrue(self.updated)
-
- def test_init(self):
- driver = self._driver()
- self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus')
- self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'vcpus_used')
- self._assert(0, 'running_vms')
- self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
- self.assertFalse(self.tracker.disabled)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
- self.assertEqual(driver.pci_stats,
- jsonutils.loads(self.tracker.compute_node['pci_stats']))
-
-
-class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
-
- def setUp(self):
- super(SchedulerClientTrackerTestCase, self).setUp()
- self.tracker.scheduler_client.update_resource_stats = mock.Mock()
-
- def test_create_resource(self):
- self.tracker._write_ext_resources = mock.Mock()
- self.tracker.conductor_api.compute_node_create = mock.Mock(
- return_value=dict(id=1))
- values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
- self.tracker._create(self.context, values)
-
- expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
- 'id': 1}
- self.tracker.scheduler_client.update_resource_stats.\
- assert_called_once_with(self.context,
- ("fakehost", "fakenode"),
- expected)
-
- def test_update_resource(self):
- self.tracker._write_ext_resources = mock.Mock()
- values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
- self.tracker._update(self.context, values)
-
- expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
- 'id': 1}
- self.tracker.scheduler_client.update_resource_stats.\
- assert_called_once_with(self.context,
- ("fakehost", "fakenode"),
- expected)
-
-
-class TrackerPciStatsTestCase(BaseTrackerTestCase):
-
- def test_update_compute_node(self):
- self.assertFalse(self.tracker.disabled)
- self.assertTrue(self.updated)
-
- def test_init(self):
- driver = self._driver()
- self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus')
- self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'vcpus_used')
- self._assert(0, 'running_vms')
- self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
- self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
- self.assertFalse(self.tracker.disabled)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
- self.assertEqual(driver.pci_stats,
- jsonutils.loads(self.tracker.compute_node['pci_stats']))
-
- def _driver(self):
- return FakeVirtDriver(pci_support=True)
-
-
-class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
-
- def setUp(self):
- super(TrackerExtraResourcesTestCase, self).setUp()
- self.driver = self._driver()
-
- def _driver(self):
- return FakeVirtDriver()
-
- def test_set_empty_ext_resources(self):
- resources = self.driver.get_available_resource(self.tracker.nodename)
- self.assertNotIn('stats', resources)
- self.tracker._write_ext_resources(resources)
- self.assertIn('stats', resources)
-
- def test_set_extra_resources(self):
- def fake_write_resources(resources):
- resources['stats']['resA'] = '123'
- resources['stats']['resB'] = 12
-
- self.stubs.Set(self.tracker.ext_resources_handler,
- 'write_resources',
- fake_write_resources)
-
- resources = self.driver.get_available_resource(self.tracker.nodename)
- self.tracker._write_ext_resources(resources)
-
- expected = {"resA": "123", "resB": 12}
- self.assertEqual(sorted(expected),
- sorted(resources['stats']))
-
-
-class InstanceClaimTestCase(BaseTrackerTestCase):
- def _instance_topology(self, mem):
- mem = mem * 1024
- return hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), mem),
- hardware.VirtNUMATopologyCellInstance(1, set([3]), mem)])
-
- def _claim_topology(self, mem, cpus=1):
- if self.tracker.driver.numa_topology is None:
- return None
- mem = mem * 1024
- return hardware.VirtNUMAHostTopology(
- cells=[hardware.VirtNUMATopologyCellUsage(
- 0, set([1, 2]), 3072, cpu_usage=cpus,
- memory_usage=mem),
- hardware.VirtNUMATopologyCellUsage(
- 1, set([3, 4]), 3072, cpu_usage=cpus,
- memory_usage=mem)])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_update_usage_only_for_tracked(self, mock_get):
- flavor = self._fake_flavor_create()
- claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
- claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
- claim_topology = self._claim_topology(claim_mem / 2)
-
- instance_topology = self._instance_topology(claim_mem / 2)
-
- instance = self._fake_instance(
- flavor=flavor, task_state=None,
- numa_topology=instance_topology)
- self.tracker.update_usage(self.context, instance)
-
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'current_workload')
- self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
-
- claim = self.tracker.instance_claim(self.context, instance,
- self.limits)
- self.assertNotEqual(0, claim.memory_mb)
- self._assert(claim_mem, 'memory_mb_used')
- self._assert(claim_gb, 'local_gb_used')
- self._assert(claim_topology, 'numa_topology')
-
- # now update should actually take effect
- instance['task_state'] = task_states.SCHEDULING
- self.tracker.update_usage(self.context, instance)
-
- self._assert(claim_mem, 'memory_mb_used')
- self._assert(claim_gb, 'local_gb_used')
- self._assert(claim_topology, 'numa_topology')
- self._assert(1, 'current_workload')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_claim_and_audit(self, mock_get):
- claim_mem = 3
- claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
- claim_disk = 2
- claim_topology = self._claim_topology(claim_mem_total / 2)
-
- instance_topology = self._instance_topology(claim_mem_total / 2)
- instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
- ephemeral_gb=0, numa_topology=instance_topology)
-
- self.tracker.instance_claim(self.context, instance, self.limits)
-
- self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
- self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
- self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
- self.compute["free_ram_mb"])
- self.assertEqualNUMAHostTopology(
- claim_topology, hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
- self.assertEqual(claim_disk, self.compute["local_gb_used"])
- self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
- self.compute["free_disk_gb"])
-
- # 1st pretend that the compute operation finished and claimed the
- # desired resources from the virt layer
- driver = self.tracker.driver
- driver.memory_mb_used = claim_mem
- driver.local_gb_used = claim_disk
-
- self.tracker.update_available_resource(self.context)
-
- # confirm tracker is adding in host_ip
- self.assertIsNotNone(self.compute.get('host_ip'))
-
- # confirm that resource usage is derived from instance usages,
- # not virt layer:
- self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
- self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
- self.compute['free_ram_mb'])
- self.assertEqualNUMAHostTopology(
- claim_topology, hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- self.assertEqual(claim_disk, self.compute['local_gb_used'])
- self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
- self.compute['free_disk_gb'])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_claim_and_abort(self, mock_get):
- claim_mem = 3
- claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
- claim_disk = 2
- claim_topology = self._claim_topology(claim_mem_total / 2)
-
- instance_topology = self._instance_topology(claim_mem_total / 2)
- instance = self._fake_instance(memory_mb=claim_mem,
- root_gb=claim_disk, ephemeral_gb=0,
- numa_topology=instance_topology)
-
- claim = self.tracker.instance_claim(self.context, instance,
- self.limits)
- self.assertIsNotNone(claim)
-
- self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
- self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
- self.compute["free_ram_mb"])
- self.assertEqualNUMAHostTopology(
- claim_topology, hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- self.assertEqual(claim_disk, self.compute["local_gb_used"])
- self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
- self.compute["free_disk_gb"])
-
- claim.abort()
-
- self.assertEqual(0, self.compute["memory_mb_used"])
- self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
- self.assertEqualNUMAHostTopology(
- FAKE_VIRT_NUMA_TOPOLOGY,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- self.assertEqual(0, self.compute["local_gb_used"])
- self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_instance_claim_with_oversubscription(self, mock_get):
- memory_mb = FAKE_VIRT_MEMORY_MB * 2
- root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
- vcpus = FAKE_VIRT_VCPUS * 2
- claim_topology = self._claim_topology(memory_mb)
- instance_topology = self._instance_topology(memory_mb)
-
- limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
- 'disk_gb': root_gb * 2,
- 'vcpu': vcpus,
- 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
-
- instance = self._fake_instance(memory_mb=memory_mb,
- root_gb=root_gb, ephemeral_gb=ephemeral_gb,
- numa_topology=instance_topology)
-
- self.tracker.instance_claim(self.context, instance, limits)
- self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
- self.tracker.compute_node['memory_mb_used'])
- self.assertEqualNUMAHostTopology(
- claim_topology,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
- self.assertEqual(root_gb * 2,
- self.tracker.compute_node['local_gb_used'])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_additive_claims(self, mock_get):
- self.limits['vcpu'] = 2
- claim_topology = self._claim_topology(2, cpus=2)
-
- flavor = self._fake_flavor_create(
- memory_mb=1, root_gb=1, ephemeral_gb=0)
- instance_topology = self._instance_topology(1)
- instance = self._fake_instance(
- flavor=flavor, numa_topology=instance_topology)
- with self.tracker.instance_claim(self.context, instance, self.limits):
- pass
- instance = self._fake_instance(
- flavor=flavor, numa_topology=instance_topology)
- with self.tracker.instance_claim(self.context, instance, self.limits):
- pass
-
- self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
- self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
- self.tracker.compute_node['local_gb_used'])
- self.assertEqual(2 * flavor['vcpus'],
- self.tracker.compute_node['vcpus_used'])
-
- self.assertEqualNUMAHostTopology(
- claim_topology,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_context_claim_with_exception(self, mock_get):
- instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
- try:
- with self.tracker.instance_claim(self.context, instance):
- # <insert exciting things that utilize resources>
- raise test.TestingException()
- except test.TestingException:
- pass
-
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
- self.assertEqual(0, self.compute['memory_mb_used'])
- self.assertEqual(0, self.compute['local_gb_used'])
- self.assertEqualNUMAHostTopology(
- FAKE_VIRT_NUMA_TOPOLOGY,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_instance_context_claim(self, mock_get):
- flavor = self._fake_flavor_create(
- memory_mb=1, root_gb=2, ephemeral_gb=3)
- claim_topology = self._claim_topology(1)
-
- instance_topology = self._instance_topology(1)
- instance = self._fake_instance(
- flavor=flavor, numa_topology=instance_topology)
- with self.tracker.instance_claim(self.context, instance):
- # <insert exciting things that utilize resources>
- self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
- self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
- self.tracker.compute_node['local_gb_used'])
- self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
- self.compute['memory_mb_used'])
- self.assertEqualNUMAHostTopology(
- claim_topology,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
- self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
- self.compute['local_gb_used'])
-
- # after exiting claim context, build is marked as finished. usage
- # totals should be same:
- self.tracker.update_available_resource(self.context)
- self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
- self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
- self.tracker.compute_node['local_gb_used'])
- self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
- self.compute['memory_mb_used'])
- self.assertEqualNUMAHostTopology(
- claim_topology,
- hardware.VirtNUMAHostTopology.from_json(
- self.compute['numa_topology']))
- self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
- self.compute['local_gb_used'])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_update_load_stats_for_instance(self, mock_get):
- instance = self._fake_instance(task_state=task_states.SCHEDULING)
- with self.tracker.instance_claim(self.context, instance):
- pass
-
- self.assertEqual(1, self.tracker.compute_node['current_workload'])
-
- instance['vm_state'] = vm_states.ACTIVE
- instance['task_state'] = None
- instance['host'] = 'fakehost'
-
- self.tracker.update_usage(self.context, instance)
- self.assertEqual(0, self.tracker.compute_node['current_workload'])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_cpu_stats(self, mock_get):
- limits = {'disk_gb': 100, 'memory_mb': 100}
- self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
-
- vcpus = 1
- instance = self._fake_instance(vcpus=vcpus)
-
- # should not do anything until a claim is made:
- self.tracker.update_usage(self.context, instance)
- self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
-
- with self.tracker.instance_claim(self.context, instance, limits):
- pass
- self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
-
- # instance state can change without modifying vcpus in use:
- instance['task_state'] = task_states.SCHEDULING
- self.tracker.update_usage(self.context, instance)
- self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
-
- add_vcpus = 10
- vcpus += add_vcpus
- instance = self._fake_instance(vcpus=add_vcpus)
- with self.tracker.instance_claim(self.context, instance, limits):
- pass
- self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
-
- instance['vm_state'] = vm_states.DELETED
- self.tracker.update_usage(self.context, instance)
- vcpus -= add_vcpus
- self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
-
- def test_skip_deleted_instances(self):
- # ensure that the audit process skips instances that have vm_state
- # DELETED, but the DB record is not yet deleted.
- self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
- self.tracker.update_available_resource(self.context)
-
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
-
-
-class ResizeClaimTestCase(BaseTrackerTestCase):
-
- def setUp(self):
- super(ResizeClaimTestCase, self).setUp()
-
- def _fake_migration_create(mig_self, ctxt):
- self._migrations[mig_self.instance_uuid] = mig_self
- mig_self.obj_reset_changes()
-
- self.stubs.Set(objects.Migration, 'create',
- _fake_migration_create)
-
- self.instance = self._fake_instance()
- self.instance_type = self._fake_flavor_create()
-
- def _fake_migration_create(self, context, values=None):
- instance_uuid = str(uuid.uuid1())
- mig_dict = test_migration.fake_db_migration()
- mig_dict.update({
- 'id': 1,
- 'source_compute': 'host1',
- 'source_node': 'fakenode',
- 'dest_compute': 'host2',
- 'dest_node': 'fakenode',
- 'dest_host': '127.0.0.1',
- 'old_instance_type_id': 1,
- 'new_instance_type_id': 2,
- 'instance_uuid': instance_uuid,
- 'status': 'pre-migrating',
- 'updated_at': timeutils.utcnow()
- })
- if values:
- mig_dict.update(values)
-
- migration = objects.Migration()
- migration.update(mig_dict)
- # This hits the stub in setUp()
- migration.create('fake')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_claim(self, mock_get):
- self.tracker.resize_claim(self.context, self.instance,
- self.instance_type, self.limits)
- self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
- self.assertEqual(1, len(self.tracker.tracked_migrations))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_abort(self, mock_get):
- try:
- with self.tracker.resize_claim(self.context, self.instance,
- self.instance_type, self.limits):
- raise test.TestingException("abort")
- except test.TestingException:
- pass
-
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'vcpus_used')
- self.assertEqual(0, len(self.tracker.tracked_migrations))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_additive_claims(self, mock_get):
-
- limits = self._limits(
- 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
- 2 * FAKE_VIRT_LOCAL_GB,
- 2 * FAKE_VIRT_VCPUS)
- self.tracker.resize_claim(self.context, self.instance,
- self.instance_type, limits)
- instance2 = self._fake_instance()
- self.tracker.resize_claim(self.context, instance2, self.instance_type,
- limits)
-
- self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
- self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
- self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_claim_and_audit(self, mock_get):
- self.tracker.resize_claim(self.context, self.instance,
- self.instance_type, self.limits)
-
- self.tracker.update_available_resource(self.context)
-
- self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_same_host(self, mock_get):
- self.limits['vcpu'] = 3
-
- src_dict = {
- 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
- dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
- src_type = self._fake_flavor_create(
- id=10, name="srcflavor", **src_dict)
- dest_type = self._fake_flavor_create(
- id=11, name="destflavor", **dest_dict)
-
- # make an instance of src_type:
- instance = self._fake_instance(flavor=src_type)
- instance['system_metadata'] = self._fake_instance_system_metadata(
- dest_type)
- self.tracker.instance_claim(self.context, instance, self.limits)
-
- # resize to dest_type:
- claim = self.tracker.resize_claim(self.context, instance,
- dest_type, self.limits)
-
- self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
- + 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
- self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
- + dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
- 'local_gb_used')
- self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
-
- self.tracker.update_available_resource(self.context)
- claim.abort()
-
- # only the original instance should remain, not the migration:
- self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
- 'memory_mb_used')
- self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
- 'local_gb_used')
- self._assert(src_dict['vcpus'], 'vcpus_used')
- self.assertEqual(1, len(self.tracker.tracked_instances))
- self.assertEqual(0, len(self.tracker.tracked_migrations))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_revert(self, mock_get):
- self.tracker.resize_claim(self.context, self.instance,
- self.instance_type, {}, self.limits)
- self.tracker.drop_resize_claim(self.context, self.instance)
-
- self.assertEqual(0, len(self.tracker.tracked_instances))
- self.assertEqual(0, len(self.tracker.tracked_migrations))
- self._assert(0, 'memory_mb_used')
- self._assert(0, 'local_gb_used')
- self._assert(0, 'vcpus_used')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_revert_reserve_source(self, mock_get):
- # if a revert has started at the API and audit runs on
- # the source compute before the instance flips back to source,
- # resources should still be held at the source based on the
- # migration:
- dest = "desthost"
- dest_tracker = self._tracker(host=dest)
- dest_tracker.update_available_resource(self.context)
-
- self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
- root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
- vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
-
- values = {'source_compute': self.host, 'dest_compute': dest,
- 'old_instance_type_id': 1, 'new_instance_type_id': 1,
- 'status': 'post-migrating',
- 'instance_uuid': self.instance['uuid']}
- self._fake_migration_create(self.context, values)
-
- # attach an instance to the destination host tracker:
- dest_tracker.instance_claim(self.context, self.instance)
-
- self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
- 'memory_mb_used', tracker=dest_tracker)
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
- tracker=dest_tracker)
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
- tracker=dest_tracker)
-
- # audit and recheck to confirm migration doesn't get double counted
- # on dest:
- dest_tracker.update_available_resource(self.context)
-
- self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
- 'memory_mb_used', tracker=dest_tracker)
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
- tracker=dest_tracker)
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
- tracker=dest_tracker)
-
- # apply the migration to the source host tracker:
- self.tracker.update_available_resource(self.context)
-
- self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
-
- # flag the instance and migration as reverting and re-audit:
- self.instance['vm_state'] = vm_states.RESIZED
- self.instance['task_state'] = task_states.RESIZE_REVERTING
- self.tracker.update_available_resource(self.context)
-
- self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
- self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
- self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
-
- def test_resize_filter(self):
- instance = self._fake_instance(vm_state=vm_states.ACTIVE,
- task_state=task_states.SUSPENDING)
- self.assertFalse(self.tracker._instance_in_resize_state(instance))
-
- instance = self._fake_instance(vm_state=vm_states.RESIZED,
- task_state=task_states.SUSPENDING)
- self.assertTrue(self.tracker._instance_in_resize_state(instance))
-
- states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
- task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
- for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
- for task_state in states:
- instance = self._fake_instance(vm_state=vm_state,
- task_state=task_state)
- result = self.tracker._instance_in_resize_state(instance)
- self.assertTrue(result)
-
- def test_dupe_filter(self):
- instance = self._fake_instance(host=self.host)
-
- values = {'source_compute': self.host, 'dest_compute': self.host,
- 'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
- self._fake_flavor_create(id=2)
- self._fake_migration_create(self.context, values)
- self._fake_migration_create(self.context, values)
-
- self.tracker.update_available_resource(self.context)
- self.assertEqual(1, len(self.tracker.tracked_migrations))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
- return_value=objects.InstancePCIRequests(requests=[]))
- def test_set_instance_host_and_node(self, mock_get):
- instance = self._fake_instance()
- self.assertIsNone(instance['host'])
- self.assertIsNone(instance['launched_on'])
- self.assertIsNone(instance['node'])
-
- claim = self.tracker.instance_claim(self.context, instance)
- self.assertNotEqual(0, claim.memory_mb)
-
- self.assertEqual('fakehost', instance['host'])
- self.assertEqual('fakehost', instance['launched_on'])
- self.assertEqual('fakenode', instance['node'])
-
-
-class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
- """Make sure we handle the case where the following are true:
-
- #) Compute node C gets upgraded to code that looks for instance types in
- system metadata. AND
- #) C already has instances in the process of migrating that do not have
- stashed instance types.
-
- bug 1164110
- """
- def setUp(self):
- super(NoInstanceTypesInSysMetadata, self).setUp()
- self.instance = self._fake_instance(stash=False)
-
- def test_get_instance_type_stash_false(self):
- with (mock.patch.object(objects.Flavor, 'get_by_id',
- return_value=self.instance_type)):
- flavor = self.tracker._get_instance_type(self.context,
- self.instance, "new_")
- self.assertEqual(self.instance_type, flavor)
-
-
-class OrphanTestCase(BaseTrackerTestCase):
- def _driver(self):
- class OrphanVirtDriver(FakeVirtDriver):
- def get_per_instance_usage(self):
- return {
- '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
- 'uuid': '1-2-3-4-5'},
- '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
- 'uuid': '2-3-4-5-6'},
- }
-
- return OrphanVirtDriver()
-
- def test_usage(self):
- self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
- self.tracker.compute_node['memory_mb_used'])
-
- def test_find(self):
- # create one legit instance and verify the 2 orphans remain
- self._fake_instance()
- orphans = self.tracker._find_orphaned_instances()
-
- self.assertEqual(2, len(orphans))
-
-
-class ComputeMonitorTestCase(BaseTestCase):
- def setUp(self):
- super(ComputeMonitorTestCase, self).setUp()
- fake_monitors = [
- 'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
- 'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
- self.flags(compute_available_monitors=fake_monitors)
- self.tracker = self._tracker()
- self.node_name = 'nodename'
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.info = {}
- self.context = context.RequestContext(self.user_id,
- self.project_id)
-
- def test_get_host_metrics_none(self):
- self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
- self.tracker.monitors = []
- metrics = self.tracker._get_host_metrics(self.context,
- self.node_name)
- self.assertEqual(len(metrics), 0)
-
- def test_get_host_metrics_one_failed(self):
- self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
- class1 = test_monitors.FakeMonitorClass1(self.tracker)
- class4 = test_monitors.FakeMonitorClass4(self.tracker)
- self.tracker.monitors = [class1, class4]
- metrics = self.tracker._get_host_metrics(self.context,
- self.node_name)
- self.assertTrue(len(metrics) > 0)
-
- def test_get_host_metrics(self):
- self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
- class1 = test_monitors.FakeMonitorClass1(self.tracker)
- class2 = test_monitors.FakeMonitorClass2(self.tracker)
- self.tracker.monitors = [class1, class2]
-
- mock_notifier = mock.Mock()
-
- with mock.patch.object(rpc, 'get_notifier',
- return_value=mock_notifier) as mock_get:
- metrics = self.tracker._get_host_metrics(self.context,
- self.node_name)
- mock_get.assert_called_once_with(service='compute',
- host=self.node_name)
-
- expected_metrics = [{
- 'timestamp': 1232,
- 'name': 'key1',
- 'value': 2600,
- 'source': 'libvirt'
- }, {
- 'name': 'key2',
- 'source': 'libvirt',
- 'timestamp': 123,
- 'value': 1600
- }]
-
- payload = {
- 'metrics': expected_metrics,
- 'host': self.tracker.host,
- 'host_ip': CONF.my_ip,
- 'nodename': self.node_name
- }
-
- mock_notifier.info.assert_called_once_with(
- self.context, 'compute.metrics.update', payload)
-
- self.assertEqual(metrics, expected_metrics)
-
-
-class TrackerPeriodicTestCase(BaseTrackerTestCase):
-
- def test_periodic_status_update(self):
- # verify update called on instantiation
- self.assertEqual(1, self.update_call_count)
-
- # verify update not called if no change to resources
- self.tracker.update_available_resource(self.context)
- self.assertEqual(1, self.update_call_count)
-
- # verify update is called when resources change
- driver = self.tracker.driver
- driver.memory_mb += 1
- self.tracker.update_available_resource(self.context)
- self.assertEqual(2, self.update_call_count)
-
- def test_update_available_resource_calls_locked_inner(self):
- @mock.patch.object(self.tracker, 'driver')
- @mock.patch.object(self.tracker,
- '_update_available_resource')
- @mock.patch.object(self.tracker, '_verify_resources')
- @mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
- def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
- resources = {'there is someone in my head': 'but it\'s not me'}
- mock_driver.get_available_resource.return_value = resources
- self.tracker.update_available_resource(self.context)
- mock_uar.assert_called_once_with(self.context, resources)
-
- _test()
-
-
-class StatsDictTestCase(BaseTrackerTestCase):
- """Test stats handling for a virt driver that provides
- stats as a dictionary.
- """
- def _driver(self):
- return FakeVirtDriver(stats=FAKE_VIRT_STATS)
-
- def _get_stats(self):
- return jsonutils.loads(self.tracker.compute_node['stats'])
-
- def test_virt_stats(self):
- # start with virt driver stats
- stats = self._get_stats()
- self.assertEqual(FAKE_VIRT_STATS, stats)
-
- # adding an instance should keep virt driver stats
- self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
- self.tracker.update_available_resource(self.context)
-
- stats = self._get_stats()
- expected_stats = {}
- expected_stats.update(FAKE_VIRT_STATS)
- expected_stats.update(self.tracker.stats)
- self.assertEqual(expected_stats, stats)
-
- # removing the instances should keep only virt driver stats
- self._instances = {}
- self.tracker.update_available_resource(self.context)
-
- stats = self._get_stats()
- self.assertEqual(FAKE_VIRT_STATS, stats)
-
-
-class StatsJsonTestCase(BaseTrackerTestCase):
- """Test stats handling for a virt driver that provides
- stats as a json string.
- """
- def _driver(self):
- return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
-
- def _get_stats(self):
- return jsonutils.loads(self.tracker.compute_node['stats'])
-
- def test_virt_stats(self):
- # start with virt driver stats
- stats = self._get_stats()
- self.assertEqual(FAKE_VIRT_STATS, stats)
-
- # adding an instance should keep virt driver stats
- # and add rt stats
- self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
- self.tracker.update_available_resource(self.context)
-
- stats = self._get_stats()
- expected_stats = {}
- expected_stats.update(FAKE_VIRT_STATS)
- expected_stats.update(self.tracker.stats)
- self.assertEqual(expected_stats, stats)
-
- # removing the instances should keep only virt driver stats
- self._instances = {}
- self.tracker.update_available_resource(self.context)
- stats = self._get_stats()
- self.assertEqual(FAKE_VIRT_STATS, stats)
-
-
-class StatsInvalidJsonTestCase(BaseTrackerTestCase):
- """Test stats handling for a virt driver that provides
- an invalid type for stats.
- """
- def _driver(self):
- return FakeVirtDriver(stats='this is not json')
-
- def _init_tracker(self):
- # do not do initial update in setup
- pass
-
- def test_virt_stats(self):
- # should throw exception for string that does not parse as json
- self.assertRaises(ValueError,
- self.tracker.update_available_resource,
- context=self.context)
-
-
-class StatsInvalidTypeTestCase(BaseTrackerTestCase):
- """Test stats handling for a virt driver that provides
- an invalid type for stats.
- """
- def _driver(self):
- return FakeVirtDriver(stats=10)
-
- def _init_tracker(self):
- # do not do initial update in setup
- pass
-
- def test_virt_stats(self):
- # should throw exception for incorrect stats value type
- self.assertRaises(ValueError,
- self.tracker.update_available_resource,
- context=self.context)
diff --git a/nova/tests/compute/test_resources.py b/nova/tests/compute/test_resources.py
deleted file mode 100644
index db2722ccb5..0000000000
--- a/nova/tests/compute/test_resources.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for the compute extra resources framework."""
-
-
-from oslo.config import cfg
-from stevedore import extension
-from stevedore import named
-
-from nova.compute import resources
-from nova.compute.resources import base
-from nova.compute.resources import vcpu
-from nova import context
-from nova.i18n import _
-from nova.objects import flavor as flavor_obj
-from nova import test
-from nova.tests.fake_instance import fake_instance_obj
-
-CONF = cfg.CONF
-
-
-class FakeResourceHandler(resources.ResourceHandler):
- def __init__(self, extensions):
- self._mgr = \
- named.NamedExtensionManager.make_test_instance(extensions)
-
-
-class FakeResource(base.Resource):
-
- def __init__(self):
- self.total_res = 0
- self.used_res = 0
-
- def _get_requested(self, usage):
- if 'extra_specs' not in usage:
- return
- if self.resource_name not in usage['extra_specs']:
- return
- req = usage['extra_specs'][self.resource_name]
- return int(req)
-
- def _get_limit(self, limits):
- if self.resource_name not in limits:
- return
- limit = limits[self.resource_name]
- return int(limit)
-
- def reset(self, resources, driver):
- self.total_res = 0
- self.used_res = 0
-
- def test(self, usage, limits):
- requested = self._get_requested(usage)
- if not requested:
- return
-
- limit = self._get_limit(limits)
- if not limit:
- return
-
- free = limit - self.used_res
- if requested <= free:
- return
- else:
- return (_('Free %(free)d < requested %(requested)d ') %
- {'free': free, 'requested': requested})
-
- def add_instance(self, usage):
- requested = self._get_requested(usage)
- if requested:
- self.used_res += requested
-
- def remove_instance(self, usage):
- requested = self._get_requested(usage)
- if requested:
- self.used_res -= requested
-
- def write(self, resources):
- pass
-
- def report_free(self):
- return "Free %s" % (self.total_res - self.used_res)
-
-
-class ResourceA(FakeResource):
-
- def reset(self, resources, driver):
- # ResourceA uses a configuration option
- self.total_res = int(CONF.resA)
- self.used_res = 0
- self.resource_name = 'resource:resA'
-
- def write(self, resources):
- resources['resA'] = self.total_res
- resources['used_resA'] = self.used_res
-
-
-class ResourceB(FakeResource):
-
- def reset(self, resources, driver):
- # ResourceB uses resource details passed in parameter resources
- self.total_res = resources['resB']
- self.used_res = 0
- self.resource_name = 'resource:resB'
-
- def write(self, resources):
- resources['resB'] = self.total_res
- resources['used_resB'] = self.used_res
-
-
-def fake_flavor_obj(**updates):
- flavor = flavor_obj.Flavor()
- flavor.id = 1
- flavor.name = 'fakeflavor'
- flavor.memory_mb = 8000
- flavor.vcpus = 3
- flavor.root_gb = 11
- flavor.ephemeral_gb = 4
- flavor.swap = 0
- flavor.rxtx_factor = 1.0
- flavor.vcpu_weight = 1
- if updates:
- flavor.update(updates)
- return flavor
-
-
-class BaseTestCase(test.TestCase):
-
- def _initialize_used_res_counter(self):
- # Initialize the value for the used resource
- for ext in self.r_handler._mgr.extensions:
- ext.obj.used_res = 0
-
- def setUp(self):
- super(BaseTestCase, self).setUp()
-
- # initialize flavors and stub get_by_id to
- # get flavors from here
- self._flavors = {}
- self.ctxt = context.get_admin_context()
-
- # Create a flavor without extra_specs defined
- _flavor_id = 1
- _flavor = fake_flavor_obj(id=_flavor_id)
- self._flavors[_flavor_id] = _flavor
-
- # Create a flavor with extra_specs defined
- _flavor_id = 2
- requested_resA = 5
- requested_resB = 7
- requested_resC = 7
- _extra_specs = {'resource:resA': requested_resA,
- 'resource:resB': requested_resB,
- 'resource:resC': requested_resC}
- _flavor = fake_flavor_obj(id=_flavor_id,
- extra_specs=_extra_specs)
- self._flavors[_flavor_id] = _flavor
-
- # create fake resource extensions and resource handler
- _extensions = [
- extension.Extension('resA', None, ResourceA, ResourceA()),
- extension.Extension('resB', None, ResourceB, ResourceB()),
- ]
- self.r_handler = FakeResourceHandler(_extensions)
-
- # Resources details can be passed to each plugin or can be specified as
- # configuration options
- driver_resources = {'resB': 5}
- CONF.resA = '10'
-
- # initialise the resources
- self.r_handler.reset_resources(driver_resources, None)
-
- def test_update_from_instance_with_extra_specs(self):
- # Flavor with extra_specs
- _flavor_id = 2
- sign = 1
- self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
-
- expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
- expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
- self.assertEqual(int(expected_resA),
- self.r_handler._mgr['resA'].obj.used_res)
- self.assertEqual(int(expected_resB),
- self.r_handler._mgr['resB'].obj.used_res)
-
- def test_update_from_instance_without_extra_specs(self):
- # Flavor id without extra spec
- _flavor_id = 1
- self._initialize_used_res_counter()
- self.r_handler.resource_list = []
- sign = 1
- self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
- self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
- self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
-
- def test_write_resources(self):
- self._initialize_used_res_counter()
- extra_resources = {}
- expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
- self.r_handler.write_resources(extra_resources)
- self.assertEqual(expected, extra_resources)
-
- def test_test_resources_without_extra_specs(self):
- limits = {}
- # Flavor id without extra_specs
- flavor = self._flavors[1]
- result = self.r_handler.test_resources(flavor, limits)
- self.assertEqual([None, None], result)
-
- def test_test_resources_with_limits_for_different_resource(self):
- limits = {'resource:resC': 20}
- # Flavor id with extra_specs
- flavor = self._flavors[2]
- result = self.r_handler.test_resources(flavor, limits)
- self.assertEqual([None, None], result)
-
- def test_passing_test_resources(self):
- limits = {'resource:resA': 10, 'resource:resB': 20}
- # Flavor id with extra_specs
- flavor = self._flavors[2]
- self._initialize_used_res_counter()
- result = self.r_handler.test_resources(flavor, limits)
- self.assertEqual([None, None], result)
-
- def test_failing_test_resources_for_single_resource(self):
- limits = {'resource:resA': 4, 'resource:resB': 20}
- # Flavor id with extra_specs
- flavor = self._flavors[2]
- self._initialize_used_res_counter()
- result = self.r_handler.test_resources(flavor, limits)
- expected = ['Free 4 < requested 5 ', None]
- self.assertEqual(sorted(expected),
- sorted(result))
-
- def test_empty_resource_handler(self):
- """An empty resource handler has no resource extensions,
- should have no effect, and should raise no exceptions.
- """
- empty_r_handler = FakeResourceHandler([])
-
- resources = {}
- empty_r_handler.reset_resources(resources, None)
-
- flavor = self._flavors[1]
- sign = 1
- empty_r_handler.update_from_instance(flavor, sign)
-
- limits = {}
- test_result = empty_r_handler.test_resources(flavor, limits)
- self.assertEqual([], test_result)
-
- sign = -1
- empty_r_handler.update_from_instance(flavor, sign)
-
- extra_resources = {}
- expected_extra_resources = extra_resources
- empty_r_handler.write_resources(extra_resources)
- self.assertEqual(expected_extra_resources, extra_resources)
-
- empty_r_handler.report_free_resources()
-
- def test_vcpu_resource_load(self):
- # load the vcpu example
- names = ['vcpu']
- real_r_handler = resources.ResourceHandler(names)
- ext_names = real_r_handler._mgr.names()
- self.assertEqual(names, ext_names)
-
- # check the extension loaded is the one we expect
- # and an instance of the object has been created
- ext = real_r_handler._mgr['vcpu']
- self.assertIsInstance(ext.obj, vcpu.VCPU)
-
-
-class TestVCPU(test.TestCase):
-
- def setUp(self):
- super(TestVCPU, self).setUp()
- self._vcpu = vcpu.VCPU()
- self._vcpu._total = 10
- self._vcpu._used = 0
- self._flavor = fake_flavor_obj(vcpus=5)
- self._big_flavor = fake_flavor_obj(vcpus=20)
- self._instance = fake_instance_obj(None)
-
- def test_reset(self):
- # set vcpu values to something different to test reset
- self._vcpu._total = 10
- self._vcpu._used = 5
-
- driver_resources = {'vcpus': 20}
- self._vcpu.reset(driver_resources, None)
- self.assertEqual(20, self._vcpu._total)
- self.assertEqual(0, self._vcpu._used)
-
- def test_add_and_remove_instance(self):
- self._vcpu.add_instance(self._flavor)
- self.assertEqual(10, self._vcpu._total)
- self.assertEqual(5, self._vcpu._used)
-
- self._vcpu.remove_instance(self._flavor)
- self.assertEqual(10, self._vcpu._total)
- self.assertEqual(0, self._vcpu._used)
-
- def test_test_pass_limited(self):
- result = self._vcpu.test(self._flavor, {'vcpu': 10})
- self.assertIsNone(result, 'vcpu test failed when it should pass')
-
- def test_test_pass_unlimited(self):
- result = self._vcpu.test(self._big_flavor, {})
- self.assertIsNone(result, 'vcpu test failed when it should pass')
-
- def test_test_fail(self):
- result = self._vcpu.test(self._flavor, {'vcpu': 2})
- expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs')
- self.assertEqual(expected, result)
-
- def test_write(self):
- resources = {'stats': {}}
- self._vcpu.write(resources)
- expected = {
- 'vcpus': 10,
- 'vcpus_used': 0,
- 'stats': {
- 'num_vcpus': 10,
- 'num_vcpus_used': 0
- }
- }
- self.assertEqual(sorted(expected),
- sorted(resources))
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
deleted file mode 100644
index df13dbff99..0000000000
--- a/nova/tests/compute/test_rpcapi.py
+++ /dev/null
@@ -1,486 +0,0 @@
-# Copyright 2012, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unit Tests for nova.compute.rpcapi
-"""
-
-import contextlib
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.compute import rpcapi as compute_rpcapi
-from nova import context
-from nova.objects import block_device as objects_block_dev
-from nova.objects import network_request as objects_network_request
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests.fake_instance import fake_instance_obj
-
-CONF = cfg.CONF
-
-
-class ComputeRpcAPITestCase(test.TestCase):
-
- def setUp(self):
- super(ComputeRpcAPITestCase, self).setUp()
- self.context = context.get_admin_context()
- instance_attr = {'host': 'fake_host',
- 'instance_type_id': 1}
- self.fake_instance_obj = fake_instance_obj(self.context,
- **instance_attr)
- self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
- self.fake_volume_bdm = jsonutils.to_primitive(
- fake_block_device.FakeDbBlockDeviceDict(
- {'source_type': 'volume', 'destination_type': 'volume',
- 'instance_uuid': self.fake_instance['uuid'],
- 'volume_id': 'fake-volume-id'}))
-
- def test_serialized_instance_has_name(self):
- self.assertIn('name', self.fake_instance)
-
- def _test_compute_api(self, method, rpc_method, **kwargs):
- ctxt = context.RequestContext('fake_user', 'fake_project')
-
- rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
- self.assertIsNotNone(rpcapi.client)
- self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
-
- orig_prepare = rpcapi.client.prepare
- expected_version = kwargs.pop('version', rpcapi.client.target.version)
-
- expected_kwargs = kwargs.copy()
- if ('requested_networks' in expected_kwargs and
- expected_version == '3.23'):
- expected_kwargs['requested_networks'] = []
- for requested_network in kwargs['requested_networks']:
- expected_kwargs['requested_networks'].append(
- (requested_network.network_id,
- str(requested_network.address),
- requested_network.port_id))
- if 'host_param' in expected_kwargs:
- expected_kwargs['host'] = expected_kwargs.pop('host_param')
- else:
- expected_kwargs.pop('host', None)
- expected_kwargs.pop('destination', None)
-
- cast_and_call = ['confirm_resize', 'stop_instance']
- if rpc_method == 'call' and method in cast_and_call:
- if method == 'confirm_resize':
- kwargs['cast'] = False
- else:
- kwargs['do_cast'] = False
- if 'host' in kwargs:
- host = kwargs['host']
- elif 'destination' in kwargs:
- host = kwargs['destination']
- elif 'instances' in kwargs:
- host = kwargs['instances'][0]['host']
- else:
- host = kwargs['instance']['host']
-
- with contextlib.nested(
- mock.patch.object(rpcapi.client, rpc_method),
- mock.patch.object(rpcapi.client, 'prepare'),
- mock.patch.object(rpcapi.client, 'can_send_version'),
- ) as (
- rpc_mock, prepare_mock, csv_mock
- ):
- prepare_mock.return_value = rpcapi.client
- if 'return_bdm_object' in kwargs:
- del kwargs['return_bdm_object']
- rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
- elif rpc_method == 'call':
- rpc_mock.return_value = 'foo'
- else:
- rpc_mock.return_value = None
- csv_mock.side_effect = (
- lambda v: orig_prepare(version=v).can_send_version())
-
- retval = getattr(rpcapi, method)(ctxt, **kwargs)
- self.assertEqual(retval, rpc_mock.return_value)
-
- prepare_mock.assert_called_once_with(version=expected_version,
- server=host)
- rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
-
- def test_add_aggregate_host(self):
- self._test_compute_api('add_aggregate_host', 'cast',
- aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
-
- def test_add_fixed_ip_to_instance(self):
- self._test_compute_api('add_fixed_ip_to_instance', 'cast',
- instance=self.fake_instance_obj, network_id='id',
- version='3.12')
-
- def test_attach_interface(self):
- self._test_compute_api('attach_interface', 'call',
- instance=self.fake_instance_obj, network_id='id',
- port_id='id2', version='3.17', requested_ip='192.168.1.50')
-
- def test_attach_volume(self):
- self._test_compute_api('attach_volume', 'cast',
- instance=self.fake_instance_obj, volume_id='id',
- mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16')
-
- def test_change_instance_metadata(self):
- self._test_compute_api('change_instance_metadata', 'cast',
- instance=self.fake_instance_obj, diff={}, version='3.7')
-
- def test_check_can_live_migrate_destination(self):
- self._test_compute_api('check_can_live_migrate_destination', 'call',
- instance=self.fake_instance_obj,
- destination='dest', block_migration=True,
- disk_over_commit=True, version='3.32')
-
- def test_check_can_live_migrate_source(self):
- self._test_compute_api('check_can_live_migrate_source', 'call',
- instance=self.fake_instance_obj,
- dest_check_data={"test": "data"}, version='3.32')
-
- def test_check_instance_shared_storage(self):
- self._test_compute_api('check_instance_shared_storage', 'call',
- instance=self.fake_instance_obj, data='foo',
- version='3.29')
-
- def test_confirm_resize_cast(self):
- self._test_compute_api('confirm_resize', 'cast',
- instance=self.fake_instance_obj, migration={'id': 'foo'},
- host='host', reservations=list('fake_res'))
-
- def test_confirm_resize_call(self):
- self._test_compute_api('confirm_resize', 'call',
- instance=self.fake_instance_obj, migration={'id': 'foo'},
- host='host', reservations=list('fake_res'))
-
- def test_detach_interface(self):
- self._test_compute_api('detach_interface', 'cast',
- version='3.17', instance=self.fake_instance_obj,
- port_id='fake_id')
-
- def test_detach_volume(self):
- self._test_compute_api('detach_volume', 'cast',
- instance=self.fake_instance_obj, volume_id='id',
- version='3.25')
-
- def test_finish_resize(self):
- self._test_compute_api('finish_resize', 'cast',
- instance=self.fake_instance_obj, migration={'id': 'foo'},
- image='image', disk_info='disk_info', host='host',
- reservations=list('fake_res'))
-
- def test_finish_revert_resize(self):
- self._test_compute_api('finish_revert_resize', 'cast',
- instance=self.fake_instance_obj, migration={'id': 'fake_id'},
- host='host', reservations=list('fake_res'))
-
- def test_get_console_output(self):
- self._test_compute_api('get_console_output', 'call',
- instance=self.fake_instance_obj, tail_length='tl',
- version='3.28')
-
- def test_get_console_pool_info(self):
- self._test_compute_api('get_console_pool_info', 'call',
- console_type='type', host='host')
-
- def test_get_console_topic(self):
- self._test_compute_api('get_console_topic', 'call', host='host')
-
- def test_get_diagnostics(self):
- self._test_compute_api('get_diagnostics', 'call',
- instance=self.fake_instance_obj, version='3.18')
-
- def test_get_instance_diagnostics(self):
- self._test_compute_api('get_instance_diagnostics', 'call',
- instance=self.fake_instance, version='3.31')
-
- def test_get_vnc_console(self):
- self._test_compute_api('get_vnc_console', 'call',
- instance=self.fake_instance_obj, console_type='type',
- version='3.2')
-
- def test_get_spice_console(self):
- self._test_compute_api('get_spice_console', 'call',
- instance=self.fake_instance_obj, console_type='type',
- version='3.1')
-
- def test_get_rdp_console(self):
- self._test_compute_api('get_rdp_console', 'call',
- instance=self.fake_instance_obj, console_type='type',
- version='3.10')
-
- def test_get_serial_console(self):
- self._test_compute_api('get_serial_console', 'call',
- instance=self.fake_instance, console_type='serial',
- version='3.34')
-
- def test_validate_console_port(self):
- self._test_compute_api('validate_console_port', 'call',
- instance=self.fake_instance_obj, port="5900",
- console_type="novnc", version='3.3')
-
- def test_host_maintenance_mode(self):
- self._test_compute_api('host_maintenance_mode', 'call',
- host_param='param', mode='mode', host='host')
-
- def test_host_power_action(self):
- self._test_compute_api('host_power_action', 'call', action='action',
- host='host')
-
- def test_inject_network_info(self):
- self._test_compute_api('inject_network_info', 'cast',
- instance=self.fake_instance_obj)
-
- def test_live_migration(self):
- self._test_compute_api('live_migration', 'cast',
- instance=self.fake_instance_obj, dest='dest',
- block_migration='blockity_block', host='tsoh',
- migrate_data={}, version='3.26')
-
- def test_post_live_migration_at_destination(self):
- self._test_compute_api('post_live_migration_at_destination', 'cast',
- instance=self.fake_instance_obj,
- block_migration='block_migration', host='host', version='3.14')
-
- def test_pause_instance(self):
- self._test_compute_api('pause_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_soft_delete_instance(self):
- self._test_compute_api('soft_delete_instance', 'cast',
- instance=self.fake_instance_obj,
- reservations=['uuid1', 'uuid2'])
-
- def test_swap_volume(self):
- self._test_compute_api('swap_volume', 'cast',
- instance=self.fake_instance_obj, old_volume_id='oldid',
- new_volume_id='newid')
-
- def test_restore_instance(self):
- self._test_compute_api('restore_instance', 'cast',
- instance=self.fake_instance_obj, version='3.20')
-
- def test_pre_live_migration(self):
- self._test_compute_api('pre_live_migration', 'call',
- instance=self.fake_instance_obj,
- block_migration='block_migration', disk='disk', host='host',
- migrate_data=None, version='3.19')
-
- def test_prep_resize(self):
- self._test_compute_api('prep_resize', 'cast',
- instance=self.fake_instance_obj, instance_type='fake_type',
- image='fake_image', host='host',
- reservations=list('fake_res'),
- request_spec='fake_spec',
- filter_properties={'fakeprop': 'fakeval'},
- node='node')
-
- def test_reboot_instance(self):
- self.maxDiff = None
- self._test_compute_api('reboot_instance', 'cast',
- instance=self.fake_instance_obj,
- block_device_info={},
- reboot_type='type')
-
- def test_rebuild_instance(self):
- self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
- injected_files='None', image_ref='None', orig_image_ref='None',
- bdms=[], instance=self.fake_instance_obj, host='new_host',
- orig_sys_metadata=None, recreate=True, on_shared_storage=True,
- preserve_ephemeral=True, version='3.21')
-
- def test_reserve_block_device_name(self):
- self._test_compute_api('reserve_block_device_name', 'call',
- instance=self.fake_instance_obj, device='device',
- volume_id='id', disk_bus='ide', device_type='cdrom',
- version='3.35', return_bdm_object=True)
-
- def refresh_provider_fw_rules(self):
- self._test_compute_api('refresh_provider_fw_rules', 'cast',
- host='host')
-
- def test_refresh_security_group_rules(self):
- self._test_compute_api('refresh_security_group_rules', 'cast',
- rpcapi_class=compute_rpcapi.SecurityGroupAPI,
- security_group_id='id', host='host')
-
- def test_refresh_security_group_members(self):
- self._test_compute_api('refresh_security_group_members', 'cast',
- rpcapi_class=compute_rpcapi.SecurityGroupAPI,
- security_group_id='id', host='host')
-
- def test_remove_aggregate_host(self):
- self._test_compute_api('remove_aggregate_host', 'cast',
- aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
-
- def test_remove_fixed_ip_from_instance(self):
- self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
- instance=self.fake_instance_obj, address='addr',
- version='3.13')
-
- def test_remove_volume_connection(self):
- self._test_compute_api('remove_volume_connection', 'call',
- instance=self.fake_instance, volume_id='id', host='host',
- version='3.30')
-
- def test_rescue_instance(self):
- self.flags(compute='3.9', group='upgrade_levels')
- self._test_compute_api('rescue_instance', 'cast',
- instance=self.fake_instance_obj, rescue_password='pw',
- version='3.9')
-
- def test_rescue_instance_with_rescue_image_ref_passed(self):
- self._test_compute_api('rescue_instance', 'cast',
- instance=self.fake_instance_obj, rescue_password='pw',
- rescue_image_ref='fake_image_ref', version='3.24')
-
- def test_reset_network(self):
- self._test_compute_api('reset_network', 'cast',
- instance=self.fake_instance_obj)
-
- def test_resize_instance(self):
- self._test_compute_api('resize_instance', 'cast',
- instance=self.fake_instance_obj, migration={'id': 'fake_id'},
- image='image', instance_type={'id': 1},
- reservations=list('fake_res'))
-
- def test_resume_instance(self):
- self._test_compute_api('resume_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_revert_resize(self):
- self._test_compute_api('revert_resize', 'cast',
- instance=self.fake_instance_obj, migration={'id': 'fake_id'},
- host='host', reservations=list('fake_res'))
-
- def test_rollback_live_migration_at_destination(self):
- self._test_compute_api('rollback_live_migration_at_destination',
- 'cast', instance=self.fake_instance_obj, host='host',
- destroy_disks=True, migrate_data=None, version='3.32')
-
- def test_run_instance(self):
- self._test_compute_api('run_instance', 'cast',
- instance=self.fake_instance_obj, host='fake_host',
- request_spec='fake_spec', filter_properties={},
- requested_networks='networks', injected_files='files',
- admin_password='pw', is_first_time=True, node='node',
- legacy_bdm_in_spec=False, version='3.27')
-
- def test_set_admin_password(self):
- self._test_compute_api('set_admin_password', 'call',
- instance=self.fake_instance_obj, new_pass='pw',
- version='3.8')
-
- def test_set_host_enabled(self):
- self._test_compute_api('set_host_enabled', 'call',
- enabled='enabled', host='host')
-
- def test_get_host_uptime(self):
- self._test_compute_api('get_host_uptime', 'call', host='host')
-
- def test_backup_instance(self):
- self._test_compute_api('backup_instance', 'cast',
- instance=self.fake_instance_obj, image_id='id',
- backup_type='type', rotation='rotation')
-
- def test_snapshot_instance(self):
- self._test_compute_api('snapshot_instance', 'cast',
- instance=self.fake_instance_obj, image_id='id')
-
- def test_start_instance(self):
- self._test_compute_api('start_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_stop_instance_cast(self):
- self._test_compute_api('stop_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_stop_instance_call(self):
- self._test_compute_api('stop_instance', 'call',
- instance=self.fake_instance_obj)
-
- def test_suspend_instance(self):
- self._test_compute_api('suspend_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_terminate_instance(self):
- self._test_compute_api('terminate_instance', 'cast',
- instance=self.fake_instance_obj, bdms=[],
- reservations=['uuid1', 'uuid2'], version='3.22')
-
- def test_unpause_instance(self):
- self._test_compute_api('unpause_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_unrescue_instance(self):
- self._test_compute_api('unrescue_instance', 'cast',
- instance=self.fake_instance_obj, version='3.11')
-
- def test_shelve_instance(self):
- self._test_compute_api('shelve_instance', 'cast',
- instance=self.fake_instance_obj, image_id='image_id')
-
- def test_shelve_offload_instance(self):
- self._test_compute_api('shelve_offload_instance', 'cast',
- instance=self.fake_instance_obj)
-
- def test_unshelve_instance(self):
- self._test_compute_api('unshelve_instance', 'cast',
- instance=self.fake_instance_obj, host='host', image='image',
- filter_properties={'fakeprop': 'fakeval'}, node='node',
- version='3.15')
-
- def test_volume_snapshot_create(self):
- self._test_compute_api('volume_snapshot_create', 'cast',
- instance=self.fake_instance, volume_id='fake_id',
- create_info={}, version='3.6')
-
- def test_volume_snapshot_delete(self):
- self._test_compute_api('volume_snapshot_delete', 'cast',
- instance=self.fake_instance_obj, volume_id='fake_id',
- snapshot_id='fake_id2', delete_info={}, version='3.6')
-
- def test_external_instance_event(self):
- self._test_compute_api('external_instance_event', 'cast',
- instances=[self.fake_instance_obj],
- events=['event'],
- version='3.23')
-
- def test_build_and_run_instance(self):
- self._test_compute_api('build_and_run_instance', 'cast',
- instance=self.fake_instance_obj, host='host', image='image',
- request_spec={'request': 'spec'}, filter_properties=[],
- admin_password='passwd', injected_files=None,
- requested_networks=['network1'], security_groups=None,
- block_device_mapping=None, node='node', limits=[],
- version='3.33')
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_build_and_run_instance_icehouse_compat(self, is_neutron):
- self.flags(compute='icehouse', group='upgrade_levels')
- self._test_compute_api('build_and_run_instance', 'cast',
- instance=self.fake_instance_obj, host='host', image='image',
- request_spec={'request': 'spec'}, filter_properties=[],
- admin_password='passwd', injected_files=None,
- requested_networks= objects_network_request.NetworkRequestList(
- objects=[objects_network_request.NetworkRequest(
- network_id="fake_network_id", address="10.0.0.1",
- port_id="fake_port_id")]),
- security_groups=None,
- block_device_mapping=None, node='node', limits=[],
- version='3.23')
diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py
deleted file mode 100644
index 3d8a0c7b6f..0000000000
--- a/nova/tests/compute/test_shelve.py
+++ /dev/null
@@ -1,414 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import iso8601
-import mock
-import mox
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova.compute import claims
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import db
-from nova import objects
-from nova.objects import base as obj_base
-from nova.tests.compute import test_compute
-from nova.tests.image import fake as fake_image
-from nova import utils
-
-CONF = cfg.CONF
-CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
-
-
-def _fake_resources():
- resources = {
- 'memory_mb': 2048,
- 'memory_mb_used': 0,
- 'free_ram_mb': 2048,
- 'local_gb': 20,
- 'local_gb_used': 0,
- 'free_disk_gb': 20,
- 'vcpus': 2,
- 'vcpus_used': 0
- }
- return resources
-
-
-class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
- def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
- CONF.set_override('shelved_offload_time', shelved_offload_time)
- instance = self._create_fake_instance_obj()
- db_instance = obj_base.obj_to_primitive(instance)
- image_id = 'fake_image_id'
- host = 'fake-mini'
- cur_time = timeutils.utcnow()
- timeutils.set_time_override(cur_time)
- instance.task_state = task_states.SHELVING
- instance.save()
- sys_meta = dict(instance.system_metadata)
- sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
- sys_meta['shelved_image_id'] = image_id
- sys_meta['shelved_host'] = host
- db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
-
- self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
- self.mox.StubOutWithMock(self.compute.driver, 'power_off')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- self.compute._notify_about_instance_usage(self.context, instance,
- 'shelve.start')
- if clean_shutdown:
- self.compute.driver.power_off(instance,
- CONF.shutdown_timeout,
- self.compute.SHUTDOWN_RETRY_INTERVAL)
- else:
- self.compute.driver.power_off(instance, 0, 0)
- self.compute._get_power_state(self.context,
- instance).AndReturn(123)
- self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
- mox.IgnoreArg())
-
- update_values = {'power_state': 123,
- 'vm_state': vm_states.SHELVED,
- 'task_state': None,
- 'expected_task_state': [task_states.SHELVING,
- task_states.SHELVING_IMAGE_UPLOADING],
- 'system_metadata': sys_meta}
- if CONF.shelved_offload_time == 0:
- update_values['task_state'] = task_states.SHELVING_OFFLOADING
- db.instance_update_and_get_original(self.context, instance['uuid'],
- update_values, update_cells=False,
- columns_to_join=['metadata', 'system_metadata', 'info_cache',
- 'security_groups'],
- ).AndReturn((db_instance,
- db_instance))
- self.compute._notify_about_instance_usage(self.context,
- instance, 'shelve.end')
- if CONF.shelved_offload_time == 0:
- self.compute._notify_about_instance_usage(self.context, instance,
- 'shelve_offload.start')
- self.compute.driver.power_off(instance)
- self.compute._get_power_state(self.context,
- instance).AndReturn(123)
- db.instance_update_and_get_original(self.context,
- instance['uuid'],
- {'power_state': 123, 'host': None, 'node': None,
- 'vm_state': vm_states.SHELVED_OFFLOADED,
- 'task_state': None,
- 'expected_task_state': [task_states.SHELVING,
- task_states.SHELVING_OFFLOADING]},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata',
- 'info_cache',
- 'security_groups'],
- ).AndReturn((db_instance, db_instance))
- self.compute._notify_about_instance_usage(self.context, instance,
- 'shelve_offload.end')
- self.mox.ReplayAll()
-
- self.compute.shelve_instance(self.context, instance,
- image_id=image_id, clean_shutdown=clean_shutdown)
-
- def test_shelve(self):
- self._shelve_instance(-1)
-
- def test_shelve_forced_shutdown(self):
- self._shelve_instance(-1, clean_shutdown=False)
-
- def test_shelve_offload(self):
- self._shelve_instance(0)
-
- def test_shelve_volume_backed(self):
- instance = self._create_fake_instance_obj()
- instance.task_state = task_states.SHELVING
- instance.save()
- db_instance = obj_base.obj_to_primitive(instance)
- host = 'fake-mini'
- cur_time = timeutils.utcnow()
- timeutils.set_time_override(cur_time)
- sys_meta = dict(instance.system_metadata)
- sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
- sys_meta['shelved_image_id'] = None
- sys_meta['shelved_host'] = host
- db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
-
- self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute.driver, 'power_off')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- self.compute._notify_about_instance_usage(self.context, instance,
- 'shelve_offload.start')
- self.compute.driver.power_off(instance)
- self.compute._get_power_state(self.context,
- instance).AndReturn(123)
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'power_state': 123, 'host': None, 'node': None,
- 'vm_state': vm_states.SHELVED_OFFLOADED,
- 'task_state': None,
- 'expected_task_state': [task_states.SHELVING,
- task_states.SHELVING_OFFLOADING]},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata',
- 'info_cache', 'security_groups'],
- ).AndReturn((db_instance, db_instance))
- self.compute._notify_about_instance_usage(self.context, instance,
- 'shelve_offload.end')
- self.mox.ReplayAll()
-
- self.compute.shelve_offload_instance(self.context, instance)
-
- def test_unshelve(self):
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(
- self.context, db_instance['uuid'],
- expected_attrs=['metadata', 'system_metadata'])
- instance.task_state = task_states.UNSHELVING
- instance.save()
- image = {'id': 'fake_id'}
- host = 'fake-mini'
- node = test_compute.NODENAME
- limits = {}
- filter_properties = {'limits': limits}
- cur_time = timeutils.utcnow()
- cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
- timeutils.set_time_override(cur_time)
- sys_meta = dict(instance.system_metadata)
- sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
- sys_meta['shelved_image_id'] = image['id']
- sys_meta['shelved_host'] = host
-
- self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.rt, 'instance_claim')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
-
- self.deleted_image_id = None
-
- def fake_delete(self2, ctxt, image_id):
- self.deleted_image_id = image_id
-
- def fake_claim(context, instance, limits):
- instance.host = self.compute.host
- return claims.Claim(context, db_instance,
- self.rt, _fake_resources())
-
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
-
- self.compute._notify_about_instance_usage(self.context, instance,
- 'unshelve.start')
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'task_state': task_states.SPAWNING},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata'],
- ).AndReturn((db_instance, db_instance))
- self.compute._prep_block_device(self.context, instance,
- mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
- db_instance['key_data'] = None
- db_instance['auto_disk_config'] = None
- self.compute.network_api.migrate_instance_finish(
- self.context, instance, {'source_compute': '',
- 'dest_compute': self.compute.host})
- self.compute.driver.spawn(self.context, instance, image,
- injected_files=[], admin_password=None,
- network_info=[],
- block_device_info='fake_bdm')
- self.compute._get_power_state(self.context, instance).AndReturn(123)
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'power_state': 123,
- 'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'image_ref': instance['image_ref'],
- 'key_data': None,
- 'host': self.compute.host, # rt.instance_claim set this
- 'auto_disk_config': False,
- 'expected_task_state': task_states.SPAWNING,
- 'launched_at': cur_time_tz},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata']
- ).AndReturn((db_instance,
- dict(db_instance,
- host=self.compute.host,
- metadata={})))
- self.compute._notify_about_instance_usage(self.context, instance,
- 'unshelve.end')
- self.mox.ReplayAll()
-
- with mock.patch.object(self.rt, 'instance_claim',
- side_effect=fake_claim):
- self.compute.unshelve_instance(self.context, instance, image=image,
- filter_properties=filter_properties, node=node)
- self.assertEqual(image['id'], self.deleted_image_id)
- self.assertEqual(instance.host, self.compute.host)
-
- def test_unshelve_volume_backed(self):
- db_instance = self._create_fake_instance()
- node = test_compute.NODENAME
- limits = {}
- filter_properties = {'limits': limits}
- cur_time = timeutils.utcnow()
- cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
- timeutils.set_time_override(cur_time)
- instance = objects.Instance.get_by_uuid(
- self.context, db_instance['uuid'],
- expected_attrs=['metadata', 'system_metadata'])
- instance.task_state = task_states.UNSHELVING
- instance.save()
-
- self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.rt, 'instance_claim')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
-
- self.compute._notify_about_instance_usage(self.context, instance,
- 'unshelve.start')
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'task_state': task_states.SPAWNING},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata']
- ).AndReturn((db_instance, db_instance))
- self.compute._prep_block_device(self.context, instance,
- mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
- db_instance['key_data'] = None
- db_instance['auto_disk_config'] = None
- self.compute.network_api.migrate_instance_finish(
- self.context, instance, {'source_compute': '',
- 'dest_compute': self.compute.host})
- self.rt.instance_claim(self.context, instance, limits).AndReturn(
- claims.Claim(self.context, db_instance, self.rt,
- _fake_resources()))
- self.compute.driver.spawn(self.context, instance, None,
- injected_files=[], admin_password=None,
- network_info=[],
- block_device_info='fake_bdm')
- self.compute._get_power_state(self.context, instance).AndReturn(123)
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'power_state': 123,
- 'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'key_data': None,
- 'auto_disk_config': False,
- 'expected_task_state': task_states.SPAWNING,
- 'launched_at': cur_time_tz},
- update_cells=False,
- columns_to_join=['metadata', 'system_metadata']
- ).AndReturn((db_instance, db_instance))
- self.compute._notify_about_instance_usage(self.context, instance,
- 'unshelve.end')
- self.mox.ReplayAll()
-
- self.compute.unshelve_instance(self.context, instance, image=None,
- filter_properties=filter_properties, node=node)
-
- def test_shelved_poll_none_exist(self):
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.StubOutWithMock(timeutils, 'is_older_than')
- self.mox.ReplayAll()
- self.compute._poll_shelved_instances(self.context)
-
- def test_shelved_poll_not_timedout(self):
- instance = self._create_fake_instance_obj()
- sys_meta = instance.system_metadata
- shelved_time = timeutils.utcnow()
- timeutils.set_time_override(shelved_time)
- timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
- sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
- db.instance_update_and_get_original(self.context, instance['uuid'],
- {'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
-
- self.mox.StubOutWithMock(self.compute.driver, 'destroy')
- self.mox.ReplayAll()
- self.compute._poll_shelved_instances(self.context)
-
- def test_shelved_poll_timedout(self):
- instance = self._create_fake_instance_obj()
- sys_meta = instance.system_metadata
- shelved_time = timeutils.utcnow()
- timeutils.set_time_override(shelved_time)
- timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
- sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
- (old, instance) = db.instance_update_and_get_original(self.context,
- instance['uuid'], {'vm_state': vm_states.SHELVED,
- 'system_metadata': sys_meta})
-
- def fake_destroy(inst, nw_info, bdm):
- # NOTE(alaski) There are too many differences between an instance
- # as returned by instance_update_and_get_original and
- # instance_get_all_by_filters so just compare the uuid.
- self.assertEqual(instance['uuid'], inst['uuid'])
-
- self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
- self.compute._poll_shelved_instances(self.context)
-
-
-class ShelveComputeAPITestCase(test_compute.BaseTestCase):
- def test_shelve(self):
- # Ensure instance can be shelved.
- fake_instance = self._create_fake_instance_obj(
- {'display_name': 'vm01'})
- instance = fake_instance
-
- self.assertIsNone(instance['task_state'])
-
- def fake_init(self2):
- # In original _FakeImageService.__init__(), some fake images are
- # created. To verify the snapshot name of this test only, here
- # sets a fake method.
- self2.images = {}
-
- def fake_create(self2, ctxt, metadata, data=None):
- self.assertEqual(metadata['name'], 'vm01-shelved')
- metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
- return metadata
-
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
- self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
-
- self.compute_api.shelve(self.context, instance)
-
- instance.refresh()
- self.assertEqual(instance.task_state, task_states.SHELVING)
-
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_unshelve(self):
- # Ensure instance can be unshelved.
- instance = self._create_fake_instance_obj()
-
- self.assertIsNone(instance['task_state'])
-
- self.compute_api.shelve(self.context, instance)
-
- instance.refresh()
- instance.task_state = None
- instance.vm_state = vm_states.SHELVED
- instance.save()
-
- self.compute_api.unshelve(self.context, instance)
-
- instance.refresh()
- self.assertEqual(instance.task_state, task_states.UNSHELVING)
-
- db.instance_destroy(self.context, instance['uuid'])
diff --git a/nova/tests/conductor/tasks/test_live_migrate.py b/nova/tests/conductor/tasks/test_live_migrate.py
deleted file mode 100644
index fbf0bc802b..0000000000
--- a/nova/tests/conductor/tasks/test_live_migrate.py
+++ /dev/null
@@ -1,384 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mox
-
-from nova.compute import power_state
-from nova.compute import utils as compute_utils
-from nova.conductor.tasks import live_migrate
-from nova import db
-from nova import exception
-from nova import objects
-from nova.scheduler import utils as scheduler_utils
-from nova import test
-from nova.tests import fake_instance
-
-
-class LiveMigrationTaskTestCase(test.NoDBTestCase):
- def setUp(self):
- super(LiveMigrationTaskTestCase, self).setUp()
- self.context = "context"
- self.instance_host = "host"
- self.instance_uuid = "uuid"
- self.instance_image = "image_ref"
- db_instance = fake_instance.fake_db_instance(
- host=self.instance_host,
- uuid=self.instance_uuid,
- power_state=power_state.RUNNING,
- memory_mb=512,
- image_ref=self.instance_image)
- self.instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), db_instance)
- self.destination = "destination"
- self.block_migration = "bm"
- self.disk_over_commit = "doc"
- self._generate_task()
-
- def _generate_task(self):
- self.task = live_migrate.LiveMigrationTask(self.context,
- self.instance, self.destination, self.block_migration,
- self.disk_over_commit)
-
- def test_execute_with_destination(self):
- self.mox.StubOutWithMock(self.task, '_check_host_is_up')
- self.mox.StubOutWithMock(self.task, '_check_requested_destination')
- self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
-
- self.task._check_host_is_up(self.instance_host)
- self.task._check_requested_destination()
- self.task.compute_rpcapi.live_migration(self.context,
- host=self.instance_host,
- instance=self.instance,
- dest=self.destination,
- block_migration=self.block_migration,
- migrate_data=None).AndReturn("bob")
-
- self.mox.ReplayAll()
- self.assertEqual("bob", self.task.execute())
-
- def test_execute_without_destination(self):
- self.destination = None
- self._generate_task()
- self.assertIsNone(self.task.destination)
-
- self.mox.StubOutWithMock(self.task, '_check_host_is_up')
- self.mox.StubOutWithMock(self.task, '_find_destination')
- self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
-
- self.task._check_host_is_up(self.instance_host)
- self.task._find_destination().AndReturn("found_host")
- self.task.compute_rpcapi.live_migration(self.context,
- host=self.instance_host,
- instance=self.instance,
- dest="found_host",
- block_migration=self.block_migration,
- migrate_data=None).AndReturn("bob")
-
- self.mox.ReplayAll()
- self.assertEqual("bob", self.task.execute())
-
- def test_check_instance_is_running_passes(self):
- self.task._check_instance_is_running()
-
- def test_check_instance_is_running_fails_when_shutdown(self):
- self.task.instance['power_state'] = power_state.SHUTDOWN
- self.assertRaises(exception.InstanceNotRunning,
- self.task._check_instance_is_running)
-
- def test_check_instance_host_is_up(self):
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
-
- db.service_get_by_compute_host(self.context,
- "host").AndReturn("service")
- self.task.servicegroup_api.service_is_up("service").AndReturn(True)
-
- self.mox.ReplayAll()
- self.task._check_host_is_up("host")
-
- def test_check_instance_host_is_up_fails_if_not_up(self):
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
-
- db.service_get_by_compute_host(self.context,
- "host").AndReturn("service")
- self.task.servicegroup_api.service_is_up("service").AndReturn(False)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.task._check_host_is_up, "host")
-
- def test_check_instance_host_is_up_fails_if_not_found(self):
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
-
- db.service_get_by_compute_host(self.context,
- "host").AndRaise(exception.NotFound)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.task._check_host_is_up, "host")
-
- def test_check_requested_destination(self):
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
- self.mox.StubOutWithMock(self.task, '_get_compute_info')
- self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
- self.mox.StubOutWithMock(self.task.compute_rpcapi,
- 'check_can_live_migrate_destination')
-
- db.service_get_by_compute_host(self.context,
- self.destination).AndReturn("service")
- self.task.servicegroup_api.service_is_up("service").AndReturn(True)
- hypervisor_details = {
- "hypervisor_type": "a",
- "hypervisor_version": 6.1,
- "free_ram_mb": 513
- }
- self.task._get_compute_info(self.destination)\
- .AndReturn(hypervisor_details)
- self.task._get_compute_info(self.instance_host)\
- .AndReturn(hypervisor_details)
- self.task._get_compute_info(self.destination)\
- .AndReturn(hypervisor_details)
-
- self.task.compute_rpcapi.check_can_live_migrate_destination(
- self.context, self.instance, self.destination,
- self.block_migration, self.disk_over_commit).AndReturn(
- "migrate_data")
-
- self.mox.ReplayAll()
- self.task._check_requested_destination()
- self.assertEqual("migrate_data", self.task.migrate_data)
-
- def test_check_requested_destination_fails_with_same_dest(self):
- self.task.destination = "same"
- self.task.source = "same"
- self.assertRaises(exception.UnableToMigrateToSelf,
- self.task._check_requested_destination)
-
- def test_check_requested_destination_fails_when_destination_is_up(self):
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
-
- db.service_get_by_compute_host(self.context,
- self.destination).AndRaise(exception.NotFound)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.ComputeServiceUnavailable,
- self.task._check_requested_destination)
-
- def test_check_requested_destination_fails_with_not_enough_memory(self):
- self.mox.StubOutWithMock(self.task, '_check_host_is_up')
- self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
-
- self.task._check_host_is_up(self.destination)
- db.service_get_by_compute_host(self.context,
- self.destination).AndReturn({
- "compute_node": [{"free_ram_mb": 511}]
- })
-
- self.mox.ReplayAll()
- self.assertRaises(exception.MigrationPreCheckError,
- self.task._check_requested_destination)
-
- def test_check_requested_destination_fails_with_hypervisor_diff(self):
- self.mox.StubOutWithMock(self.task, '_check_host_is_up')
- self.mox.StubOutWithMock(self.task,
- '_check_destination_has_enough_memory')
- self.mox.StubOutWithMock(self.task, '_get_compute_info')
-
- self.task._check_host_is_up(self.destination)
- self.task._check_destination_has_enough_memory()
- self.task._get_compute_info(self.instance_host).AndReturn({
- "hypervisor_type": "b"
- })
- self.task._get_compute_info(self.destination).AndReturn({
- "hypervisor_type": "a"
- })
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidHypervisorType,
- self.task._check_requested_destination)
-
- def test_check_requested_destination_fails_with_hypervisor_too_old(self):
- self.mox.StubOutWithMock(self.task, '_check_host_is_up')
- self.mox.StubOutWithMock(self.task,
- '_check_destination_has_enough_memory')
- self.mox.StubOutWithMock(self.task, '_get_compute_info')
-
- self.task._check_host_is_up(self.destination)
- self.task._check_destination_has_enough_memory()
- self.task._get_compute_info(self.instance_host).AndReturn({
- "hypervisor_type": "a",
- "hypervisor_version": 7
- })
- self.task._get_compute_info(self.destination).AndReturn({
- "hypervisor_type": "a",
- "hypervisor_version": 6
- })
-
- self.mox.ReplayAll()
- self.assertRaises(exception.DestinationHypervisorTooOld,
- self.task._check_requested_destination)
-
- def test_find_destination_works(self):
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.task,
- '_check_compatible_with_source_hypervisor')
- self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
-
- compute_utils.get_image_metadata(self.context,
- self.task.image_api, self.instance_image,
- self.instance).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host1'}])
- self.task._check_compatible_with_source_hypervisor("host1")
- self.task._call_livem_checks_on_host("host1")
-
- self.mox.ReplayAll()
- self.assertEqual("host1", self.task._find_destination())
-
- def test_find_destination_no_image_works(self):
- self.instance['image_ref'] = ''
-
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.task,
- '_check_compatible_with_source_hypervisor')
- self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
-
- scheduler_utils.build_request_spec(self.context, None,
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host1'}])
- self.task._check_compatible_with_source_hypervisor("host1")
- self.task._call_livem_checks_on_host("host1")
-
- self.mox.ReplayAll()
- self.assertEqual("host1", self.task._find_destination())
-
- def _test_find_destination_retry_hypervisor_raises(self, error):
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.task,
- '_check_compatible_with_source_hypervisor')
- self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
-
- compute_utils.get_image_metadata(self.context,
- self.task.image_api, self.instance_image,
- self.instance).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host1'}])
- self.task._check_compatible_with_source_hypervisor("host1")\
- .AndRaise(error)
-
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host2'}])
- self.task._check_compatible_with_source_hypervisor("host2")
- self.task._call_livem_checks_on_host("host2")
-
- self.mox.ReplayAll()
- self.assertEqual("host2", self.task._find_destination())
-
- def test_find_destination_retry_with_old_hypervisor(self):
- self._test_find_destination_retry_hypervisor_raises(
- exception.DestinationHypervisorTooOld)
-
- def test_find_destination_retry_with_invalid_hypervisor_type(self):
- self._test_find_destination_retry_hypervisor_raises(
- exception.InvalidHypervisorType)
-
- def test_find_destination_retry_with_invalid_livem_checks(self):
- self.flags(migrate_max_retries=1)
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.task,
- '_check_compatible_with_source_hypervisor')
- self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
-
- compute_utils.get_image_metadata(self.context,
- self.task.image_api, self.instance_image,
- self.instance).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host1'}])
- self.task._check_compatible_with_source_hypervisor("host1")
- self.task._call_livem_checks_on_host("host1")\
- .AndRaise(exception.Invalid)
-
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host2'}])
- self.task._check_compatible_with_source_hypervisor("host2")
- self.task._call_livem_checks_on_host("host2")
-
- self.mox.ReplayAll()
- self.assertEqual("host2", self.task._find_destination())
-
- def test_find_destination_retry_exceeds_max(self):
- self.flags(migrate_max_retries=0)
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.task,
- '_check_compatible_with_source_hypervisor')
-
- compute_utils.get_image_metadata(self.context,
- self.task.image_api, self.instance_image,
- self.instance).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
- [{'host': 'host1'}])
- self.task._check_compatible_with_source_hypervisor("host1")\
- .AndRaise(exception.DestinationHypervisorTooOld)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.NoValidHost, self.task._find_destination)
-
- def test_find_destination_when_runs_out_of_hosts(self):
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.task.scheduler_client,
- 'select_destinations')
- compute_utils.get_image_metadata(self.context,
- self.task.image_api, self.instance_image,
- self.instance).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
- self.task.scheduler_client.select_destinations(self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
- exception.NoValidHost(reason=""))
-
- self.mox.ReplayAll()
- self.assertRaises(exception.NoValidHost, self.task._find_destination)
-
- def test_not_implemented_rollback(self):
- self.assertRaises(NotImplementedError, self.task.rollback)
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
deleted file mode 100644
index 8f2d6bbe5f..0000000000
--- a/nova/tests/conductor/test_conductor.py
+++ /dev/null
@@ -1,2151 +0,0 @@
-# Copyright 2012 IBM Corp.
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for the conductor service."""
-
-import contextlib
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo import messaging
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-
-from nova.api.ec2 import ec2utils
-from nova.compute import arch
-from nova.compute import flavors
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova import conductor
-from nova.conductor import api as conductor_api
-from nova.conductor import manager as conductor_manager
-from nova.conductor import rpcapi as conductor_rpcapi
-from nova.conductor.tasks import live_migrate
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception as exc
-from nova import notifications
-from nova import objects
-from nova.objects import base as obj_base
-from nova.objects import block_device as block_device_obj
-from nova.objects import fields
-from nova.objects import quotas as quotas_obj
-from nova import quota
-from nova import rpc
-from nova.scheduler import driver as scheduler_driver
-from nova.scheduler import utils as scheduler_utils
-from nova import test
-from nova.tests import cast_as_call
-from nova.tests.compute import test_compute
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_notifier
-from nova.tests import fake_server_actions
-from nova.tests import fake_utils
-from nova import utils
-
-
-CONF = cfg.CONF
-CONF.import_opt('report_interval', 'nova.service')
-
-
-FAKE_IMAGE_REF = 'fake-image-ref'
-
-
-class FakeContext(context.RequestContext):
- def elevated(self):
- """Return a consistent elevated context so we can detect it."""
- if not hasattr(self, '_elevated'):
- self._elevated = super(FakeContext, self).elevated()
- return self._elevated
-
-
-class _BaseTestCase(object):
- def setUp(self):
- super(_BaseTestCase, self).setUp()
- self.db = None
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = FakeContext(self.user_id, self.project_id)
-
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- def fake_deserialize_context(serializer, ctxt_dict):
- self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
- self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
- return self.context
-
- self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
- fake_deserialize_context)
-
- fake_utils.stub_out_utils_spawn_n(self.stubs)
-
- def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- if not params:
- params = {}
-
- inst = {}
- inst['vm_state'] = vm_states.ACTIVE
- inst['image_ref'] = FAKE_IMAGE_REF
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['host'] = 'fake_host'
- type_id = flavors.get_flavor_by_name(type_name)['id']
- inst['instance_type_id'] = type_id
- inst['ami_launch_index'] = 0
- inst['memory_mb'] = 0
- inst['vcpus'] = 0
- inst['root_gb'] = 0
- inst['ephemeral_gb'] = 0
- inst['architecture'] = arch.X86_64
- inst['os_type'] = 'Linux'
- inst['availability_zone'] = 'fake-az'
- inst.update(params)
- return db.instance_create(self.context, inst)
-
- def _do_update(self, instance_uuid, **updates):
- return self.conductor.instance_update(self.context, instance_uuid,
- updates, None)
-
- def test_instance_update(self):
- instance = self._create_fake_instance()
- new_inst = self._do_update(instance['uuid'],
- vm_state=vm_states.STOPPED)
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.STOPPED)
- self.assertEqual(new_inst['vm_state'], instance['vm_state'])
-
- def test_instance_update_invalid_key(self):
- # NOTE(danms): the real DB API call ignores invalid keys
- if self.db is None:
- self.conductor = utils.ExceptionHelper(self.conductor)
- self.assertRaises(KeyError,
- self._do_update, 'any-uuid', foobar=1)
-
- def test_migration_get_in_progress_by_host_and_node(self):
- self.mox.StubOutWithMock(db,
- 'migration_get_in_progress_by_host_and_node')
- db.migration_get_in_progress_by_host_and_node(
- self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.migration_get_in_progress_by_host_and_node(
- self.context, 'fake-host', 'fake-node')
- self.assertEqual(result, 'fake-result')
-
- def test_aggregate_metadata_get_by_host(self):
- self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
- db.aggregate_metadata_get_by_host(self.context, 'host',
- 'key').AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.aggregate_metadata_get_by_host(self.context,
- 'host', 'key')
- self.assertEqual(result, 'result')
-
- def test_bw_usage_update(self):
- self.mox.StubOutWithMock(db, 'bw_usage_update')
- self.mox.StubOutWithMock(db, 'bw_usage_get')
-
- update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
- get_args = (self.context, 'uuid', 0, 'mac')
-
- db.bw_usage_update(*update_args, update_cells=True)
- db.bw_usage_get(*get_args).AndReturn('foo')
-
- self.mox.ReplayAll()
- result = self.conductor.bw_usage_update(*update_args,
- update_cells=True)
- self.assertEqual(result, 'foo')
-
- def test_provider_fw_rule_get_all(self):
- fake_rules = ['a', 'b', 'c']
- self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
- db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
- self.mox.ReplayAll()
- result = self.conductor.provider_fw_rule_get_all(self.context)
- self.assertEqual(result, fake_rules)
-
- def test_block_device_mapping_get_all_by_instance(self):
- fake_inst = {'uuid': 'fake-uuid'}
- self.mox.StubOutWithMock(db,
- 'block_device_mapping_get_all_by_instance')
- db.block_device_mapping_get_all_by_instance(
- self.context, fake_inst['uuid']).AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.block_device_mapping_get_all_by_instance(
- self.context, fake_inst, legacy=False)
- self.assertEqual(result, 'fake-result')
-
- def test_vol_usage_update(self):
- self.mox.StubOutWithMock(db, 'vol_usage_update')
- self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
-
- fake_inst = {'uuid': 'fake-uuid',
- 'project_id': 'fake-project',
- 'user_id': 'fake-user',
- 'availability_zone': 'fake-az',
- }
-
- db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
- fake_inst['uuid'],
- fake_inst['project_id'],
- fake_inst['user_id'],
- fake_inst['availability_zone'],
- False).AndReturn('fake-usage')
- compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
-
- self.mox.ReplayAll()
-
- self.conductor.vol_usage_update(self.context, 'fake-vol',
- 22, 33, 44, 55, fake_inst, None, False)
-
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('conductor.%s' % self.conductor_manager.host,
- msg.publisher_id)
- self.assertEqual('volume.usage', msg.event_type)
- self.assertEqual('INFO', msg.priority)
- self.assertEqual('fake-info', msg.payload)
-
- def test_compute_node_create(self):
- self.mox.StubOutWithMock(db, 'compute_node_create')
- db.compute_node_create(self.context, 'fake-values').AndReturn(
- 'fake-result')
- self.mox.ReplayAll()
- result = self.conductor.compute_node_create(self.context,
- 'fake-values')
- self.assertEqual(result, 'fake-result')
-
- def test_compute_node_update(self):
- node = {'id': 'fake-id'}
- self.mox.StubOutWithMock(db, 'compute_node_update')
- db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
- AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.compute_node_update(self.context, node,
- {'fake': 'values'})
- self.assertEqual(result, 'fake-result')
-
- def test_compute_node_delete(self):
- node = {'id': 'fake-id'}
- self.mox.StubOutWithMock(db, 'compute_node_delete')
- db.compute_node_delete(self.context, node['id']).AndReturn(None)
- self.mox.ReplayAll()
- result = self.conductor.compute_node_delete(self.context, node)
- self.assertIsNone(result)
-
- def test_task_log_get(self):
- self.mox.StubOutWithMock(db, 'task_log_get')
- db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
- 'state').AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.task_log_get(self.context, 'task', 'begin',
- 'end', 'host', 'state')
- self.assertEqual(result, 'result')
-
- def test_task_log_get_with_no_state(self):
- self.mox.StubOutWithMock(db, 'task_log_get')
- db.task_log_get(self.context, 'task', 'begin', 'end',
- 'host', None).AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.task_log_get(self.context, 'task', 'begin',
- 'end', 'host', None)
- self.assertEqual(result, 'result')
-
- def test_task_log_begin_task(self):
- self.mox.StubOutWithMock(db, 'task_log_begin_task')
- db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
- 'end', 'host', 'items',
- 'message').AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.task_log_begin_task(
- self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
- self.assertEqual(result, 'result')
-
- def test_task_log_end_task(self):
- self.mox.StubOutWithMock(db, 'task_log_end_task')
- db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
- 'host', 'errors', 'message').AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.task_log_end_task(
- self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
- self.assertEqual(result, 'result')
-
- def test_notify_usage_exists(self):
- info = {
- 'audit_period_beginning': 'start',
- 'audit_period_ending': 'end',
- 'bandwidth': 'bw_usage',
- 'image_meta': {},
- 'extra': 'info',
- }
- instance = {
- 'system_metadata': [],
- }
-
- self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
- self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
- self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
-
- notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
- notifications.bandwidth_usage(instance, 'start', True).AndReturn(
- 'bw_usage')
- notifier = self.conductor_manager.notifier
- compute_utils.notify_about_instance_usage(notifier,
- self.context, instance,
- 'exists',
- system_metadata={},
- extra_usage_info=info)
-
- self.mox.ReplayAll()
-
- self.conductor.notify_usage_exists(self.context, instance, False, True,
- system_metadata={},
- extra_usage_info=dict(extra='info'))
-
- def test_security_groups_trigger_members_refresh(self):
- self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
- 'trigger_members_refresh')
- self.conductor_manager.security_group_api.trigger_members_refresh(
- self.context, [1, 2, 3])
- self.mox.ReplayAll()
- self.conductor.security_groups_trigger_members_refresh(self.context,
- [1, 2, 3])
-
- def test_get_ec2_ids(self):
- expected = {
- 'instance-id': 'ec2-inst-id',
- 'ami-id': 'ec2-ami-id',
- 'kernel-id': 'ami-kernel-ec2-kernelid',
- 'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
- }
- inst = {
- 'uuid': 'fake-uuid',
- 'kernel_id': 'ec2-kernelid',
- 'ramdisk_id': 'ec2-ramdiskid',
- 'image_ref': 'fake-image',
- }
- self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
- self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
- self.mox.StubOutWithMock(ec2utils, 'image_type')
-
- ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
- expected['instance-id'])
- ec2utils.glance_id_to_ec2_id(self.context,
- inst['image_ref']).AndReturn(
- expected['ami-id'])
- for image_type in ['kernel', 'ramdisk']:
- image_id = inst['%s_id' % image_type]
- ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
- ec2utils.glance_id_to_ec2_id(self.context, image_id,
- 'ami-' + image_type).AndReturn(
- 'ami-%s-ec2-%sid' % (image_type, image_type))
-
- self.mox.ReplayAll()
- result = self.conductor.get_ec2_ids(self.context, inst)
- self.assertEqual(result, expected)
-
-
-class ConductorTestCase(_BaseTestCase, test.TestCase):
- """Conductor Manager Tests."""
- def setUp(self):
- super(ConductorTestCase, self).setUp()
- self.conductor = conductor_manager.ConductorManager()
- self.conductor_manager = self.conductor
-
- def test_instance_get_by_uuid(self):
- orig_instance = self._create_fake_instance()
- copy_instance = self.conductor.instance_get_by_uuid(
- self.context, orig_instance['uuid'], None)
- self.assertEqual(orig_instance['name'],
- copy_instance['name'])
-
- def test_block_device_mapping_update_or_create(self):
- fake_bdm = {'id': 1, 'device_name': 'foo',
- 'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume'}
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
- fake_bdm2 = {'id': 1, 'device_name': 'foo2',
- 'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume'}
- fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
- cells_rpcapi = self.conductor.cells_rpcapi
- self.mox.StubOutWithMock(db, 'block_device_mapping_create')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
- self.mox.StubOutWithMock(cells_rpcapi,
- 'bdm_update_or_create_at_top')
- db.block_device_mapping_create(self.context,
- fake_bdm).AndReturn(fake_bdm2)
- cells_rpcapi.bdm_update_or_create_at_top(
- self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
- create=True)
- db.block_device_mapping_update(self.context, fake_bdm['id'],
- fake_bdm).AndReturn(fake_bdm2)
- cells_rpcapi.bdm_update_or_create_at_top(
- self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
- create=False)
- self.mox.ReplayAll()
- self.conductor.block_device_mapping_update_or_create(self.context,
- fake_bdm,
- create=True)
- self.conductor.block_device_mapping_update_or_create(self.context,
- fake_bdm,
- create=False)
-
- def test_instance_get_all_by_filters(self):
- filters = {'foo': 'bar'}
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all_by_filters(self.context, filters,
- 'fake-key', 'fake-sort',
- columns_to_join=None, use_slave=False)
- self.mox.ReplayAll()
- self.conductor.instance_get_all_by_filters(self.context, filters,
- 'fake-key', 'fake-sort',
- None, False)
-
- def test_instance_get_all_by_filters_use_slave(self):
- filters = {'foo': 'bar'}
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all_by_filters(self.context, filters,
- 'fake-key', 'fake-sort',
- columns_to_join=None, use_slave=True)
- self.mox.ReplayAll()
- self.conductor.instance_get_all_by_filters(self.context, filters,
- 'fake-key', 'fake-sort',
- columns_to_join=None,
- use_slave=True)
-
- def test_instance_get_all_by_host(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
- db.instance_get_all_by_host(self.context.elevated(),
- 'host', None).AndReturn('result')
- db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
- 'node').AndReturn('result')
- self.mox.ReplayAll()
- result = self.conductor.instance_get_all_by_host(self.context, 'host',
- None, None)
- self.assertEqual(result, 'result')
- result = self.conductor.instance_get_all_by_host(self.context, 'host',
- 'node', None)
- self.assertEqual(result, 'result')
-
- def _test_stubbed(self, name, dbargs, condargs,
- db_result_listified=False, db_exception=None):
-
- self.mox.StubOutWithMock(db, name)
- if db_exception:
- getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
- getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
- else:
- getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
- self.mox.ReplayAll()
- if db_exception:
- self.assertRaises(messaging.ExpectedException,
- self.conductor.service_get_all_by,
- self.context, **condargs)
-
- self.conductor = utils.ExceptionHelper(self.conductor)
-
- self.assertRaises(db_exception.__class__,
- self.conductor.service_get_all_by,
- self.context, **condargs)
- else:
- result = self.conductor.service_get_all_by(self.context,
- **condargs)
- if db_result_listified:
- self.assertEqual(['fake-result'], result)
- else:
- self.assertEqual('fake-result', result)
-
- def test_service_get_all(self):
- self._test_stubbed('service_get_all', (),
- dict(host=None, topic=None, binary=None))
-
- def test_service_get_by_host_and_topic(self):
- self._test_stubbed('service_get_by_host_and_topic',
- ('host', 'topic'),
- dict(topic='topic', host='host', binary=None))
-
- def test_service_get_all_by_topic(self):
- self._test_stubbed('service_get_all_by_topic',
- ('topic',),
- dict(topic='topic', host=None, binary=None))
-
- def test_service_get_all_by_host(self):
- self._test_stubbed('service_get_all_by_host',
- ('host',),
- dict(host='host', topic=None, binary=None))
-
- def test_service_get_by_compute_host(self):
- self._test_stubbed('service_get_by_compute_host',
- ('host',),
- dict(topic='compute', host='host', binary=None),
- db_result_listified=True)
-
- def test_service_get_by_args(self):
- self._test_stubbed('service_get_by_args',
- ('host', 'binary'),
- dict(host='host', binary='binary', topic=None))
-
- def test_service_get_by_compute_host_not_found(self):
- self._test_stubbed('service_get_by_compute_host',
- ('host',),
- dict(topic='compute', host='host', binary=None),
- db_exception=exc.ComputeHostNotFound(host='host'))
-
- def test_service_get_by_args_not_found(self):
- self._test_stubbed('service_get_by_args',
- ('host', 'binary'),
- dict(host='host', binary='binary', topic=None),
- db_exception=exc.HostBinaryNotFound(binary='binary',
- host='host'))
-
- def test_security_groups_trigger_handler(self):
- self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
- 'trigger_handler')
- self.conductor_manager.security_group_api.trigger_handler('event',
- self.context,
- 'args')
- self.mox.ReplayAll()
- self.conductor.security_groups_trigger_handler(self.context,
- 'event', ['args'])
-
- def _test_object_action(self, is_classmethod, raise_exception):
- class TestObject(obj_base.NovaObject):
- def foo(self, context, raise_exception=False):
- if raise_exception:
- raise Exception('test')
- else:
- return 'test'
-
- @classmethod
- def bar(cls, context, raise_exception=False):
- if raise_exception:
- raise Exception('test')
- else:
- return 'test'
-
- obj = TestObject()
- if is_classmethod:
- result = self.conductor.object_class_action(
- self.context, TestObject.obj_name(), 'bar', '1.0',
- tuple(), {'raise_exception': raise_exception})
- else:
- updates, result = self.conductor.object_action(
- self.context, obj, 'foo', tuple(),
- {'raise_exception': raise_exception})
- self.assertEqual('test', result)
-
- def test_object_action(self):
- self._test_object_action(False, False)
-
- def test_object_action_on_raise(self):
- self.assertRaises(messaging.ExpectedException,
- self._test_object_action, False, True)
-
- def test_object_class_action(self):
- self._test_object_action(True, False)
-
- def test_object_class_action_on_raise(self):
- self.assertRaises(messaging.ExpectedException,
- self._test_object_action, True, True)
-
- def test_object_action_copies_object(self):
- class TestObject(obj_base.NovaObject):
- fields = {'dict': fields.DictOfStringsField()}
-
- def touch_dict(self, context):
- self.dict['foo'] = 'bar'
- self.obj_reset_changes()
-
- obj = TestObject()
- obj.dict = {}
- obj.obj_reset_changes()
- updates, result = self.conductor.object_action(
- self.context, obj, 'touch_dict', tuple(), {})
- # NOTE(danms): If conductor did not properly copy the object, then
- # the new and reference copies of the nested dict object will be
- # the same, and thus 'dict' will not be reported as changed
- self.assertIn('dict', updates)
- self.assertEqual({'foo': 'bar'}, updates['dict'])
-
- def _test_expected_exceptions(self, db_method, conductor_method, errors,
- *args, **kwargs):
- # Tests that expected exceptions are handled properly.
- for error in errors:
- with mock.patch.object(db, db_method, side_effect=error):
- self.assertRaises(messaging.ExpectedException,
- conductor_method,
- self.context, *args, **kwargs)
-
- def test_action_event_start_expected_exceptions(self):
- error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
- self._test_expected_exceptions(
- 'action_event_start', self.conductor.action_event_start, [error],
- {'foo': 'bar'})
-
- def test_action_event_finish_expected_exceptions(self):
- errors = (exc.InstanceActionNotFound(request_id='1',
- instance_uuid='2'),
- exc.InstanceActionEventNotFound(event='1', action_id='2'))
- self._test_expected_exceptions(
- 'action_event_finish', self.conductor.action_event_finish,
- errors, {'foo': 'bar'})
-
- def test_instance_update_expected_exceptions(self):
- errors = (exc.InvalidUUID(uuid='foo'),
- exc.InstanceNotFound(instance_id=1),
- exc.UnexpectedTaskStateError(expected='foo',
- actual='bar'))
- self._test_expected_exceptions(
- 'instance_update', self.conductor.instance_update,
- errors, None, {'foo': 'bar'}, None)
-
- def test_instance_get_by_uuid_expected_exceptions(self):
- error = exc.InstanceNotFound(instance_id=1)
- self._test_expected_exceptions(
- 'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
- [error], None, [])
-
- def test_aggregate_host_add_expected_exceptions(self):
- error = exc.AggregateHostExists(aggregate_id=1, host='foo')
- self._test_expected_exceptions(
- 'aggregate_host_add', self.conductor.aggregate_host_add,
- [error], {'id': 1}, None)
-
- def test_aggregate_host_delete_expected_exceptions(self):
- error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
- self._test_expected_exceptions(
- 'aggregate_host_delete', self.conductor.aggregate_host_delete,
- [error], {'id': 1}, None)
-
- def test_service_update_expected_exceptions(self):
- error = exc.ServiceNotFound(service_id=1)
- self._test_expected_exceptions(
- 'service_update',
- self.conductor.service_update,
- [error], {'id': 1}, None)
-
- def test_service_destroy_expected_exceptions(self):
- error = exc.ServiceNotFound(service_id=1)
- self._test_expected_exceptions(
- 'service_destroy',
- self.conductor.service_destroy,
- [error], 1)
-
- def _setup_aggregate_with_host(self):
- aggregate_ref = db.aggregate_create(self.context.elevated(),
- {'name': 'foo'}, metadata={'availability_zone': 'foo'})
-
- self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
-
- aggregate_ref = db.aggregate_get(self.context.elevated(),
- aggregate_ref['id'])
-
- return aggregate_ref
-
- def test_aggregate_host_add(self):
- aggregate_ref = self._setup_aggregate_with_host()
-
- self.assertIn('bar', aggregate_ref['hosts'])
-
- db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
-
- def test_aggregate_host_delete(self):
- aggregate_ref = self._setup_aggregate_with_host()
-
- self.conductor.aggregate_host_delete(self.context, aggregate_ref,
- 'bar')
-
- aggregate_ref = db.aggregate_get(self.context.elevated(),
- aggregate_ref['id'])
-
- self.assertNotIn('bar', aggregate_ref['hosts'])
-
- db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
-
- def test_network_migrate_instance_start(self):
- self.mox.StubOutWithMock(self.conductor_manager.network_api,
- 'migrate_instance_start')
- self.conductor_manager.network_api.migrate_instance_start(self.context,
- 'instance',
- 'migration')
- self.mox.ReplayAll()
- self.conductor.network_migrate_instance_start(self.context,
- 'instance',
- 'migration')
-
- def test_network_migrate_instance_finish(self):
- self.mox.StubOutWithMock(self.conductor_manager.network_api,
- 'migrate_instance_finish')
- self.conductor_manager.network_api.migrate_instance_finish(
- self.context, 'instance', 'migration')
- self.mox.ReplayAll()
- self.conductor.network_migrate_instance_finish(self.context,
- 'instance',
- 'migration')
-
- def test_instance_destroy(self):
- self.mox.StubOutWithMock(db, 'instance_destroy')
- db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.instance_destroy(self.context,
- {'uuid': 'fake-uuid'})
- self.assertEqual(result, 'fake-result')
-
- def test_compute_unrescue(self):
- self.mox.StubOutWithMock(self.conductor_manager.compute_api,
- 'unrescue')
- self.conductor_manager.compute_api.unrescue(self.context, 'instance')
- self.mox.ReplayAll()
- self.conductor.compute_unrescue(self.context, 'instance')
-
- def test_instance_get_active_by_window_joined(self):
- self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
- db.instance_get_active_by_window_joined(self.context, 'fake-begin',
- 'fake-end', 'fake-proj',
- 'fake-host')
- self.mox.ReplayAll()
- self.conductor.instance_get_active_by_window_joined(
- self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
-
- def test_instance_fault_create(self):
- self.mox.StubOutWithMock(db, 'instance_fault_create')
- db.instance_fault_create(self.context, 'fake-values').AndReturn(
- 'fake-result')
- self.mox.ReplayAll()
- result = self.conductor.instance_fault_create(self.context,
- 'fake-values')
- self.assertEqual(result, 'fake-result')
-
- def test_action_event_start(self):
- self.mox.StubOutWithMock(db, 'action_event_start')
- db.action_event_start(self.context, mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conductor.action_event_start(self.context, {})
-
- def test_action_event_finish(self):
- self.mox.StubOutWithMock(db, 'action_event_finish')
- db.action_event_finish(self.context, mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conductor.action_event_finish(self.context, {})
-
- def test_agent_build_get_by_triple(self):
- self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
- db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
- 'fake-arch').AndReturn('it worked')
- self.mox.ReplayAll()
- result = self.conductor.agent_build_get_by_triple(self.context,
- 'fake-hv',
- 'fake-os',
- 'fake-arch')
- self.assertEqual(result, 'it worked')
-
-
-class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor RPC API Tests."""
- def setUp(self):
- super(ConductorRPCAPITestCase, self).setUp()
- self.conductor_service = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
- self.conductor_manager = self.conductor_service.manager
- self.conductor = conductor_rpcapi.ConductorAPI()
-
- def test_block_device_mapping_update_or_create(self):
- fake_bdm = {'id': 'fake-id'}
- self.mox.StubOutWithMock(db, 'block_device_mapping_create')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
- self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
- '_from_db_object')
- db.block_device_mapping_create(self.context, fake_bdm)
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
- db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
- db.block_device_mapping_update_or_create(self.context, fake_bdm)
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conductor.block_device_mapping_update_or_create(self.context,
- fake_bdm,
- create=True)
- self.conductor.block_device_mapping_update_or_create(self.context,
- fake_bdm,
- create=False)
- self.conductor.block_device_mapping_update_or_create(self.context,
- fake_bdm)
-
- def _test_stubbed(self, name, dbargs, condargs,
- db_result_listified=False, db_exception=None):
- self.mox.StubOutWithMock(db, name)
- if db_exception:
- getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
- else:
- getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
- self.mox.ReplayAll()
- if db_exception:
- self.assertRaises(db_exception.__class__,
- self.conductor.service_get_all_by,
- self.context, **condargs)
- else:
- result = self.conductor.service_get_all_by(self.context,
- **condargs)
- if db_result_listified:
- self.assertEqual(['fake-result'], result)
- else:
- self.assertEqual('fake-result', result)
-
- def test_service_get_all(self):
- self._test_stubbed('service_get_all', (),
- dict(topic=None, host=None, binary=None))
-
- def test_service_get_by_host_and_topic(self):
- self._test_stubbed('service_get_by_host_and_topic',
- ('host', 'topic'),
- dict(topic='topic', host='host', binary=None))
-
- def test_service_get_all_by_topic(self):
- self._test_stubbed('service_get_all_by_topic',
- ('topic',),
- dict(topic='topic', host=None, binary=None))
-
- def test_service_get_all_by_host(self):
- self._test_stubbed('service_get_all_by_host',
- ('host',),
- dict(host='host', topic=None, binary=None))
-
- def test_service_get_by_compute_host(self):
- self._test_stubbed('service_get_by_compute_host',
- ('host',),
- dict(topic='compute', host='host', binary=None),
- db_result_listified=True)
-
- def test_service_get_by_args(self):
- self._test_stubbed('service_get_by_args',
- ('host', 'binary'),
- dict(host='host', binary='binary', topic=None))
-
- def test_service_get_by_compute_host_not_found(self):
- self._test_stubbed('service_get_by_compute_host',
- ('host',),
- dict(topic='compute', host='host', binary=None),
- db_exception=exc.ComputeHostNotFound(host='host'))
-
- def test_service_get_by_args_not_found(self):
- self._test_stubbed('service_get_by_args',
- ('host', 'binary'),
- dict(host='host', binary='binary', topic=None),
- db_exception=exc.HostBinaryNotFound(binary='binary',
- host='host'))
-
- def test_security_groups_trigger_handler(self):
- self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
- 'trigger_handler')
- self.conductor_manager.security_group_api.trigger_handler('event',
- self.context,
- 'arg')
- self.mox.ReplayAll()
- self.conductor.security_groups_trigger_handler(self.context,
- 'event', ['arg'])
-
- @mock.patch.object(db, 'service_update')
- @mock.patch('oslo.messaging.RPCClient.prepare')
- def test_service_update_time_big(self, mock_prepare, mock_update):
- CONF.set_override('report_interval', 10)
- services = {'id': 1}
- self.conductor.service_update(self.context, services, {})
- mock_prepare.assert_called_once_with(timeout=9)
-
- @mock.patch.object(db, 'service_update')
- @mock.patch('oslo.messaging.RPCClient.prepare')
- def test_service_update_time_small(self, mock_prepare, mock_update):
- CONF.set_override('report_interval', 3)
- services = {'id': 1}
- self.conductor.service_update(self.context, services, {})
- mock_prepare.assert_called_once_with(timeout=3)
-
- @mock.patch.object(db, 'service_update')
- @mock.patch('oslo.messaging.RPCClient.prepare')
- def test_service_update_no_time(self, mock_prepare, mock_update):
- CONF.set_override('report_interval', None)
- services = {'id': 1}
- self.conductor.service_update(self.context, services, {})
- mock_prepare.assert_called_once_with()
-
-
-class ConductorAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor API Tests."""
- def setUp(self):
- super(ConductorAPITestCase, self).setUp()
- self.conductor_service = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
- self.conductor = conductor_api.API()
- self.conductor_manager = self.conductor_service.manager
- self.db = None
-
- def _do_update(self, instance_uuid, **updates):
- # NOTE(danms): the public API takes actual keyword arguments,
- # so override the base class here to make the call correctly
- return self.conductor.instance_update(self.context, instance_uuid,
- **updates)
-
- def test_bw_usage_get(self):
- self.mox.StubOutWithMock(db, 'bw_usage_update')
- self.mox.StubOutWithMock(db, 'bw_usage_get')
-
- get_args = (self.context, 'uuid', 0, 'mac')
-
- db.bw_usage_get(*get_args).AndReturn('foo')
-
- self.mox.ReplayAll()
- result = self.conductor.bw_usage_get(*get_args)
- self.assertEqual(result, 'foo')
-
- def test_block_device_mapping_update_or_create(self):
- self.mox.StubOutWithMock(db, 'block_device_mapping_create')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update')
- self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
- self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
- '_from_db_object')
- db.block_device_mapping_create(self.context, 'fake-bdm')
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
- db.block_device_mapping_update(self.context,
- 'fake-id', {'id': 'fake-id'})
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
- db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
- block_device_obj.BlockDeviceMapping._from_db_object(
- self.context, mox.IgnoreArg(), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
- self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
- self.conductor.block_device_mapping_update_or_create(self.context,
- 'fake-bdm')
-
- def _test_stubbed(self, name, *args, **kwargs):
- if args and isinstance(args[0], FakeContext):
- ctxt = args[0]
- args = args[1:]
- else:
- ctxt = self.context
- db_exception = kwargs.get('db_exception')
- self.mox.StubOutWithMock(db, name)
- if db_exception:
- getattr(db, name)(ctxt, *args).AndRaise(db_exception)
- else:
- getattr(db, name)(ctxt, *args).AndReturn('fake-result')
- if name == 'service_destroy':
- # TODO(russellb) This is a hack ... SetUp() starts the conductor()
- # service. There is a cleanup step that runs after this test which
- # also deletes the associated service record. This involves a call
- # to db.service_destroy(), which we have stubbed out.
- db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- if db_exception:
- self.assertRaises(db_exception.__class__,
- getattr(self.conductor, name),
- self.context, *args)
- else:
- result = getattr(self.conductor, name)(self.context, *args)
- self.assertEqual(
- result, 'fake-result' if kwargs.get('returns', True) else None)
-
- def test_service_get_all(self):
- self._test_stubbed('service_get_all')
-
- def test_service_get_by_host_and_topic(self):
- self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
-
- def test_service_get_all_by_topic(self):
- self._test_stubbed('service_get_all_by_topic', 'topic')
-
- def test_service_get_all_by_host(self):
- self._test_stubbed('service_get_all_by_host', 'host')
-
- def test_service_get_by_compute_host(self):
- self._test_stubbed('service_get_by_compute_host', 'host')
-
- def test_service_get_by_args(self):
- self._test_stubbed('service_get_by_args', 'host', 'binary')
-
- def test_service_get_by_compute_host_not_found(self):
- self._test_stubbed('service_get_by_compute_host', 'host',
- db_exception=exc.ComputeHostNotFound(host='host'))
-
- def test_service_get_by_args_not_found(self):
- self._test_stubbed('service_get_by_args', 'host', 'binary',
- db_exception=exc.HostBinaryNotFound(binary='binary',
- host='host'))
-
- def test_service_create(self):
- self._test_stubbed('service_create', {})
-
- def test_service_destroy(self):
- self._test_stubbed('service_destroy', '', returns=False)
-
- def test_service_update(self):
- ctxt = self.context
- self.mox.StubOutWithMock(db, 'service_update')
- db.service_update(ctxt, '', {}).AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.service_update(self.context, {'id': ''}, {})
- self.assertEqual(result, 'fake-result')
-
- def test_instance_get_all_by_host_and_node(self):
- self._test_stubbed('instance_get_all_by_host_and_node',
- self.context.elevated(), 'host', 'node')
-
- def test_instance_get_all_by_host(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
- db.instance_get_all_by_host(self.context.elevated(), 'host',
- None).AndReturn('fake-result')
- self.mox.ReplayAll()
- result = self.conductor.instance_get_all_by_host(self.context,
- 'host', None)
- self.assertEqual(result, 'fake-result')
-
- def test_wait_until_ready(self):
- timeouts = []
- calls = dict(count=0)
-
- def fake_ping(context, message, timeout):
- timeouts.append(timeout)
- calls['count'] += 1
- if calls['count'] < 15:
- raise messaging.MessagingTimeout("fake")
-
- self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
-
- self.conductor.wait_until_ready(self.context)
-
- self.assertEqual(timeouts.count(10), 10)
- self.assertIn(None, timeouts)
-
- def test_security_groups_trigger_handler(self):
- self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
- 'trigger_handler')
- self.conductor_manager.security_group_api.trigger_handler('event',
- self.context,
- 'arg')
- self.mox.ReplayAll()
- self.conductor.security_groups_trigger_handler(self.context,
- 'event', 'arg')
-
-
-class ConductorLocalAPITestCase(ConductorAPITestCase):
- """Conductor LocalAPI Tests."""
- def setUp(self):
- super(ConductorLocalAPITestCase, self).setUp()
- self.conductor = conductor_api.LocalAPI()
- self.conductor_manager = self.conductor._manager._target
- self.db = db
-
- def test_client_exceptions(self):
- instance = self._create_fake_instance()
- # NOTE(danms): The LocalAPI should not raise exceptions wrapped
- # in ClientException. KeyError should be raised if an invalid
- # update key is passed, so use that to validate.
- self.assertRaises(KeyError,
- self._do_update, instance['uuid'], foo='bar')
-
- def test_wait_until_ready(self):
- # Override test in ConductorAPITestCase
- pass
-
-
-class ConductorImportTest(test.TestCase):
- def test_import_conductor_local(self):
- self.flags(use_local=True, group='conductor')
- self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
- self.assertIsInstance(conductor.ComputeTaskAPI(),
- conductor_api.LocalComputeTaskAPI)
-
- def test_import_conductor_rpc(self):
- self.flags(use_local=False, group='conductor')
- self.assertIsInstance(conductor.API(), conductor_api.API)
- self.assertIsInstance(conductor.ComputeTaskAPI(),
- conductor_api.ComputeTaskAPI)
-
- def test_import_conductor_override_to_local(self):
- self.flags(use_local=False, group='conductor')
- self.assertIsInstance(conductor.API(use_local=True),
- conductor_api.LocalAPI)
- self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
- conductor_api.LocalComputeTaskAPI)
-
-
-class ConductorPolicyTest(test.TestCase):
- def test_all_allowed_keys(self):
-
- def fake_db_instance_update(self, *args, **kwargs):
- return None, None
- self.stubs.Set(db, 'instance_update_and_get_original',
- fake_db_instance_update)
-
- ctxt = context.RequestContext('fake-user', 'fake-project')
- conductor = conductor_api.LocalAPI()
- updates = {}
- for key in conductor_manager.allowed_updates:
- if key in conductor_manager.datetime_fields:
- updates[key] = timeutils.utcnow()
- else:
- updates[key] = 'foo'
- conductor.instance_update(ctxt, 'fake-instance', **updates)
-
- def test_allowed_keys_are_real(self):
- instance = models.Instance()
- keys = list(conductor_manager.allowed_updates)
-
- # NOTE(danms): expected_task_state is a parameter that gets
- # passed to the db layer, but is not actually an instance attribute
- del keys[keys.index('expected_task_state')]
-
- for key in keys:
- self.assertTrue(hasattr(instance, key))
-
-
-class _BaseTaskTestCase(object):
- def setUp(self):
- super(_BaseTaskTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = FakeContext(self.user_id, self.project_id)
- fake_server_actions.stub_out_action_events(self.stubs)
-
- def fake_deserialize_context(serializer, ctxt_dict):
- self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
- self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
- return self.context
-
- self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
- fake_deserialize_context)
-
- def _prepare_rebuild_args(self, update_args=None):
- rebuild_args = {'new_pass': 'admin_password',
- 'injected_files': 'files_to_inject',
- 'image_ref': 'image_ref',
- 'orig_image_ref': 'orig_image_ref',
- 'orig_sys_metadata': 'orig_sys_meta',
- 'bdms': {},
- 'recreate': False,
- 'on_shared_storage': False,
- 'preserve_ephemeral': False,
- 'host': 'compute-host'}
- if update_args:
- rebuild_args.update(update_args)
- return rebuild_args
-
- def test_live_migrate(self):
- inst = fake_instance.fake_db_instance()
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst, [])
-
- self.mox.StubOutWithMock(live_migrate, 'execute')
- live_migrate.execute(self.context,
- mox.IsA(objects.Instance),
- 'destination',
- 'block_migration',
- 'disk_over_commit')
- self.mox.ReplayAll()
-
- if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
- conductor_api.LocalComputeTaskAPI)):
- # The API method is actually 'live_migrate_instance'. It gets
- # converted into 'migrate_server' when doing RPC.
- self.conductor.live_migrate_instance(self.context, inst_obj,
- 'destination', 'block_migration', 'disk_over_commit')
- else:
- self.conductor.migrate_server(self.context, inst_obj,
- {'host': 'destination'}, True, False, None,
- 'block_migration', 'disk_over_commit')
-
- def test_cold_migrate(self):
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(
- self.conductor_manager.compute_rpcapi, 'prep_resize')
- self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
- 'select_destinations')
- inst = fake_instance.fake_db_instance(image_ref='image_ref')
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst, [])
- flavor = flavors.get_default_flavor()
- flavor['extra_specs'] = 'extra_specs'
- request_spec = {'instance_type': flavor,
- 'instance_properties': {}}
- compute_utils.get_image_metadata(
- self.context, self.conductor_manager.image_api,
- 'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
-
- scheduler_utils.build_request_spec(
- self.context, 'image',
- [mox.IsA(objects.Instance)],
- instance_type=flavor).AndReturn(request_spec)
-
- hosts = [dict(host='host1', nodename=None, limits={})]
- self.conductor_manager.scheduler_client.select_destinations(
- self.context, request_spec,
- {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
-
- filter_properties = {'limits': {},
- 'retry': {'num_attempts': 1,
- 'hosts': [['host1', None]]}}
-
- self.conductor_manager.compute_rpcapi.prep_resize(
- self.context, 'image', mox.IsA(objects.Instance),
- mox.IsA(dict), 'host1', [], request_spec=request_spec,
- filter_properties=filter_properties, node=None)
-
- self.mox.ReplayAll()
-
- scheduler_hint = {'filter_properties': {}}
-
- if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
- conductor_api.LocalComputeTaskAPI)):
- # The API method is actually 'resize_instance'. It gets
- # converted into 'migrate_server' when doing RPC.
- self.conductor.resize_instance(
- self.context, inst_obj, {}, scheduler_hint, flavor, [])
- else:
- self.conductor.migrate_server(
- self.context, inst_obj, scheduler_hint,
- False, False, flavor, None, None, [])
-
- def test_build_instances(self):
- system_metadata = flavors.save_flavor_info({},
- flavors.get_default_flavor())
- instances = [fake_instance.fake_instance_obj(
- self.context,
- system_metadata=system_metadata,
- expected_attrs=['system_metadata']) for i in xrange(2)]
- instance_type = flavors.extract_flavor(instances[0])
- instance_type['extra_specs'] = 'fake-specs'
- instance_properties = jsonutils.to_primitive(instances[0])
-
- self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
- self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db,
- 'block_device_mapping_get_all_by_instance')
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'build_and_run_instance')
-
- db.flavor_extra_specs_get(
- self.context,
- instance_type['flavorid']).AndReturn('fake-specs')
- scheduler_utils.setup_instance_group(self.context, None, None)
- self.conductor_manager.scheduler_client.select_destinations(
- self.context, {'image': {'fake_data': 'should_pass_silently'},
- 'instance_properties': jsonutils.to_primitive(
- instances[0]),
- 'instance_type': instance_type,
- 'instance_uuids': [inst.uuid for inst in instances],
- 'num_instances': 2},
- {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
- [{'host': 'host1', 'nodename': 'node1', 'limits': []},
- {'host': 'host2', 'nodename': 'node2', 'limits': []}])
- db.instance_get_by_uuid(self.context, instances[0].uuid,
- columns_to_join=['system_metadata'],
- use_slave=False).AndReturn(
- jsonutils.to_primitive(instances[0]))
- db.block_device_mapping_get_all_by_instance(self.context,
- instances[0].uuid, use_slave=False).AndReturn([])
- self.conductor_manager.compute_rpcapi.build_and_run_instance(
- self.context,
- instance=mox.IgnoreArg(),
- host='host1',
- image={'fake_data': 'should_pass_silently'},
- request_spec={
- 'image': {'fake_data': 'should_pass_silently'},
- 'instance_properties': instance_properties,
- 'instance_type': instance_type,
- 'instance_uuids': [inst.uuid for inst in instances],
- 'num_instances': 2},
- filter_properties={'retry': {'num_attempts': 1,
- 'hosts': [['host1', 'node1']]},
- 'limits': []},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping=mox.IgnoreArg(),
- node='node1', limits=[])
- db.instance_get_by_uuid(self.context, instances[1].uuid,
- columns_to_join=['system_metadata'],
- use_slave=False).AndReturn(
- jsonutils.to_primitive(instances[1]))
- db.block_device_mapping_get_all_by_instance(self.context,
- instances[1].uuid, use_slave=False).AndReturn([])
- self.conductor_manager.compute_rpcapi.build_and_run_instance(
- self.context,
- instance=mox.IgnoreArg(),
- host='host2',
- image={'fake_data': 'should_pass_silently'},
- request_spec={
- 'image': {'fake_data': 'should_pass_silently'},
- 'instance_properties': instance_properties,
- 'instance_type': instance_type,
- 'instance_uuids': [inst.uuid for inst in instances],
- 'num_instances': 2},
- filter_properties={'limits': [],
- 'retry': {'num_attempts': 1,
- 'hosts': [['host2', 'node2']]}},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping=mox.IgnoreArg(),
- node='node2', limits=[])
- self.mox.ReplayAll()
-
- # build_instances() is a cast, we need to wait for it to complete
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- self.conductor.build_instances(self.context,
- instances=instances,
- image={'fake_data': 'should_pass_silently'},
- filter_properties={},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping='block_device_mapping',
- legacy_bdm=False)
-
- def test_build_instances_scheduler_failure(self):
- instances = [fake_instance.fake_instance_obj(self.context)
- for i in xrange(2)]
- image = {'fake-data': 'should_pass_silently'}
- spec = {'fake': 'specs',
- 'instance_properties': instances[0]}
- exception = exc.NoValidHost(reason='fake-reason')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
- self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
- 'select_destinations')
-
- scheduler_utils.build_request_spec(self.context, image,
- mox.IgnoreArg()).AndReturn(spec)
- scheduler_utils.setup_instance_group(self.context, None, None)
- self.conductor_manager.scheduler_client.select_destinations(
- self.context, spec,
- {'retry': {'num_attempts': 1,
- 'hosts': []}}).AndRaise(exception)
- for instance in instances:
- scheduler_driver.handle_schedule_error(self.context, exception,
- instance.uuid, spec)
- self.mox.ReplayAll()
-
- # build_instances() is a cast, we need to wait for it to complete
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- self.conductor.build_instances(self.context,
- instances=instances,
- image=image,
- filter_properties={},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping='block_device_mapping',
- legacy_bdm=False)
-
- def test_unshelve_instance_on_host(self):
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'], expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED
- instance.task_state = task_states.UNSHELVING
- instance.save()
- system_metadata = instance.system_metadata
-
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'start_instance')
- self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'unshelve_instance')
-
- self.conductor_manager.compute_rpcapi.start_instance(self.context,
- instance)
- self.conductor_manager._delete_image(self.context,
- 'fake_image_id')
- self.mox.ReplayAll()
-
- system_metadata['shelved_at'] = timeutils.utcnow()
- system_metadata['shelved_image_id'] = 'fake_image_id'
- system_metadata['shelved_host'] = 'fake-mini'
- self.conductor_manager.unshelve_instance(self.context, instance)
-
- def test_unshelve_offloaded_instance_glance_image_not_found(self):
- shelved_image_id = "image_not_found"
-
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(
- self.context,
- db_instance['uuid'],
- expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED_OFFLOADED
- instance.task_state = task_states.UNSHELVING
- instance.save()
- system_metadata = instance.system_metadata
-
- self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
-
- e = exc.ImageNotFound(image_id=shelved_image_id)
- self.conductor_manager.image_api.get(
- self.context, shelved_image_id, show_deleted=False).AndRaise(e)
- self.mox.ReplayAll()
-
- system_metadata['shelved_at'] = timeutils.utcnow()
- system_metadata['shelved_host'] = 'fake-mini'
- system_metadata['shelved_image_id'] = shelved_image_id
-
- self.assertRaises(
- exc.UnshelveException,
- self.conductor_manager.unshelve_instance,
- self.context, instance)
- self.assertEqual(instance.vm_state, vm_states.ERROR)
-
- def test_unshelve_offloaded_instance_image_id_is_none(self):
- db_instance = jsonutils.to_primitive(self._create_fake_instance())
- instance = objects.Instance.get_by_uuid(
- self.context,
- db_instance['uuid'],
- expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED_OFFLOADED
- instance.task_state = task_states.UNSHELVING
- system_metadata = instance.system_metadata
- system_metadata['shelved_image_id'] = None
- instance.save()
-
- self.assertRaises(
- exc.UnshelveException,
- self.conductor_manager.unshelve_instance,
- self.context, instance)
- self.assertEqual(instance.vm_state, vm_states.ERROR)
-
- def test_unshelve_instance_schedule_and_rebuild(self):
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'], expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED_OFFLOADED
- instance.save()
- filter_properties = {}
- system_metadata = instance.system_metadata
-
- self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
- self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'unshelve_instance')
-
- self.conductor_manager.image_api.get(self.context,
- 'fake_image_id', show_deleted=False).AndReturn('fake_image')
- self.conductor_manager._schedule_instances(self.context,
- 'fake_image', filter_properties, instance).AndReturn(
- [{'host': 'fake_host',
- 'nodename': 'fake_node',
- 'limits': {}}])
- self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
- instance, 'fake_host', image='fake_image',
- filter_properties={'limits': {}}, node='fake_node')
- self.mox.ReplayAll()
-
- system_metadata['shelved_at'] = timeutils.utcnow()
- system_metadata['shelved_image_id'] = 'fake_image_id'
- system_metadata['shelved_host'] = 'fake-mini'
- self.conductor_manager.unshelve_instance(self.context, instance)
-
- def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'], expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED_OFFLOADED
- instance.save()
- system_metadata = instance.system_metadata
-
- def fake_schedule_instances(context, image, filter_properties,
- *instances):
- raise exc.NoValidHost(reason='')
-
- with contextlib.nested(
- mock.patch.object(self.conductor_manager.image_api, 'get',
- return_value='fake_image'),
- mock.patch.object(self.conductor_manager, '_schedule_instances',
- fake_schedule_instances)
- ) as (_get_image, _schedule_instances):
- system_metadata['shelved_at'] = timeutils.utcnow()
- system_metadata['shelved_image_id'] = 'fake_image_id'
- system_metadata['shelved_host'] = 'fake-mini'
- self.conductor_manager.unshelve_instance(self.context, instance)
- _get_image.assert_has_calls([mock.call(self.context,
- system_metadata['shelved_image_id'],
- show_deleted=False)])
- self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
-
- def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
- db_instance = self._create_fake_instance()
- instance = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'], expected_attrs=['system_metadata'])
- instance.vm_state = vm_states.SHELVED_OFFLOADED
- instance.save()
- filter_properties = {}
- system_metadata = instance.system_metadata
-
- self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
- self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'unshelve_instance')
-
- self.conductor_manager.image_api.get(self.context,
- 'fake_image_id', show_deleted=False).AndReturn(None)
- self.conductor_manager._schedule_instances(self.context,
- None, filter_properties, instance).AndReturn(
- [{'host': 'fake_host',
- 'nodename': 'fake_node',
- 'limits': {}}])
- self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
- instance, 'fake_host', image=None,
- filter_properties={'limits': {}}, node='fake_node')
- self.mox.ReplayAll()
-
- system_metadata['shelved_at'] = timeutils.utcnow()
- system_metadata['shelved_image_id'] = 'fake_image_id'
- system_metadata['shelved_host'] = 'fake-mini'
- self.conductor_manager.unshelve_instance(self.context, instance)
-
- def test_rebuild_instance(self):
- db_instance = self._create_fake_instance()
- inst_obj = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'])
- rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
-
- with contextlib.nested(
- mock.patch.object(self.conductor_manager.compute_rpcapi,
- 'rebuild_instance'),
- mock.patch.object(self.conductor_manager.scheduler_client,
- 'select_destinations')
- ) as (rebuild_mock, select_dest_mock):
- self.conductor_manager.rebuild_instance(context=self.context,
- instance=inst_obj,
- **rebuild_args)
- self.assertFalse(select_dest_mock.called)
- rebuild_mock.assert_called_once_with(self.context,
- instance=inst_obj,
- **rebuild_args)
-
- def test_rebuild_instance_with_scheduler(self):
- db_instance = self._create_fake_instance()
- inst_obj = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'])
- inst_obj.host = 'noselect'
- rebuild_args = self._prepare_rebuild_args({'host': None})
- expected_host = 'thebesthost'
- request_spec = {}
- filter_properties = {'ignore_hosts': [(inst_obj.host)]}
-
- with contextlib.nested(
- mock.patch.object(self.conductor_manager.compute_rpcapi,
- 'rebuild_instance'),
- mock.patch.object(self.conductor_manager.scheduler_client,
- 'select_destinations',
- return_value=[{'host': expected_host}]),
- mock.patch('nova.scheduler.utils.build_request_spec',
- return_value=request_spec)
- ) as (rebuild_mock, select_dest_mock, bs_mock):
- self.conductor_manager.rebuild_instance(context=self.context,
- instance=inst_obj,
- **rebuild_args)
- select_dest_mock.assert_called_once_with(self.context,
- request_spec,
- filter_properties)
- rebuild_args['host'] = expected_host
- rebuild_mock.assert_called_once_with(self.context,
- instance=inst_obj,
- **rebuild_args)
-
- def test_rebuild_instance_with_scheduler_no_host(self):
- db_instance = self._create_fake_instance()
- inst_obj = objects.Instance.get_by_uuid(self.context,
- db_instance['uuid'])
- inst_obj.host = 'noselect'
- rebuild_args = self._prepare_rebuild_args({'host': None})
- request_spec = {}
- filter_properties = {'ignore_hosts': [(inst_obj.host)]}
-
- with contextlib.nested(
- mock.patch.object(self.conductor_manager.compute_rpcapi,
- 'rebuild_instance'),
- mock.patch.object(self.conductor_manager.scheduler_client,
- 'select_destinations',
- side_effect=exc.NoValidHost(reason='')),
- mock.patch('nova.scheduler.utils.build_request_spec',
- return_value=request_spec)
- ) as (rebuild_mock, select_dest_mock, bs_mock):
- self.assertRaises(exc.NoValidHost,
- self.conductor_manager.rebuild_instance,
- context=self.context, instance=inst_obj,
- **rebuild_args)
- select_dest_mock.assert_called_once_with(self.context,
- request_spec,
- filter_properties)
- self.assertFalse(rebuild_mock.called)
-
-
-class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
- """ComputeTaskManager Tests."""
- def setUp(self):
- super(ConductorTaskTestCase, self).setUp()
- self.conductor = conductor_manager.ComputeTaskManager()
- self.conductor_manager = self.conductor
-
- def test_migrate_server_fails_with_rebuild(self):
- self.assertRaises(NotImplementedError, self.conductor.migrate_server,
- self.context, None, None, True, True, None, None, None)
-
- def test_migrate_server_fails_with_flavor(self):
- self.assertRaises(NotImplementedError, self.conductor.migrate_server,
- self.context, None, None, True, False, "dummy", None, None)
-
- def _build_request_spec(self, instance):
- return {
- 'instance_properties': {
- 'uuid': instance['uuid'], },
- }
-
- def _test_migrate_server_deals_with_expected_exceptions(self, ex):
- instance = fake_instance.fake_db_instance(uuid='uuid',
- vm_state=vm_states.ACTIVE)
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance, [])
- self.mox.StubOutWithMock(live_migrate, 'execute')
- self.mox.StubOutWithMock(scheduler_utils,
- 'set_vm_state_and_notify')
-
- live_migrate.execute(self.context, mox.IsA(objects.Instance),
- 'destination', 'block_migration',
- 'disk_over_commit').AndRaise(ex)
-
- scheduler_utils.set_vm_state_and_notify(self.context,
- 'compute_task', 'migrate_server',
- {'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'expected_task_state': task_states.MIGRATING},
- ex, self._build_request_spec(inst_obj),
- self.conductor_manager.db)
- self.mox.ReplayAll()
-
- self.conductor = utils.ExceptionHelper(self.conductor)
-
- self.assertRaises(type(ex),
- self.conductor.migrate_server, self.context, inst_obj,
- {'host': 'destination'}, True, False, None, 'block_migration',
- 'disk_over_commit')
-
- def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
- instance = fake_instance.fake_db_instance(uuid='uuid',
- vm_state=vm_states.ACTIVE)
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance, [])
- self.mox.StubOutWithMock(live_migrate, 'execute')
- self.mox.StubOutWithMock(scheduler_utils,
- 'set_vm_state_and_notify')
-
- ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
- live_migrate.execute(self.context, mox.IsA(objects.Instance),
- 'destination', 'block_migration',
- 'disk_over_commit').AndRaise(ex)
-
- scheduler_utils.set_vm_state_and_notify(self.context,
- 'compute_task', 'migrate_server',
- {'vm_state': vm_states.ACTIVE,
- 'task_state': None,
- 'expected_task_state': task_states.MIGRATING},
- ex, self._build_request_spec(inst_obj),
- self.conductor_manager.db)
- self.mox.ReplayAll()
-
- self.conductor = utils.ExceptionHelper(self.conductor)
-
- self.assertRaises(exc.InvalidCPUInfo,
- self.conductor.migrate_server, self.context, inst_obj,
- {'host': 'destination'}, True, False, None, 'block_migration',
- 'disk_over_commit')
-
- @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
- @mock.patch.object(live_migrate, 'execute')
- def test_migrate_server_deals_with_instancenotrunning_exception(self,
- mock_live_migrate, mock_set_state):
- inst = fake_instance.fake_db_instance()
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst, [])
-
- error = exc.InstanceNotRunning(instance_id="fake")
- mock_live_migrate.side_effect = error
-
- self.conductor = utils.ExceptionHelper(self.conductor)
-
- self.assertRaises(exc.InstanceNotRunning,
- self.conductor.migrate_server, self.context, inst_obj,
- {'host': 'destination'}, True, False, None,
- 'block_migration', 'disk_over_commit')
-
- request_spec = self._build_request_spec(inst_obj)
- mock_set_state.assert_called_once_with(self.context, 'compute_task',
- 'migrate_server',
- dict(vm_state=inst_obj.vm_state,
- task_state=None,
- expected_task_state=task_states.MIGRATING),
- error, request_spec, self.conductor_manager.db)
-
- def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
- ex = exc.DestinationHypervisorTooOld()
- self._test_migrate_server_deals_with_expected_exceptions(ex)
-
- def test_migrate_server_deals_with_HypervisorUnavailable(self):
- ex = exc.HypervisorUnavailable(host='dummy')
- self._test_migrate_server_deals_with_expected_exceptions(ex)
-
- def test_migrate_server_deals_with_unexpected_exceptions(self):
- instance = fake_instance.fake_db_instance()
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance, [])
- self.mox.StubOutWithMock(live_migrate, 'execute')
- self.mox.StubOutWithMock(scheduler_utils,
- 'set_vm_state_and_notify')
-
- ex = IOError()
- live_migrate.execute(self.context, mox.IsA(objects.Instance),
- 'destination', 'block_migration',
- 'disk_over_commit').AndRaise(ex)
- self.mox.ReplayAll()
-
- self.conductor = utils.ExceptionHelper(self.conductor)
-
- self.assertRaises(exc.MigrationError,
- self.conductor.migrate_server, self.context, inst_obj,
- {'host': 'destination'}, True, False, None, 'block_migration',
- 'disk_over_commit')
-
- def test_set_vm_state_and_notify(self):
- self.mox.StubOutWithMock(scheduler_utils,
- 'set_vm_state_and_notify')
- scheduler_utils.set_vm_state_and_notify(
- self.context, 'compute_task', 'method', 'updates',
- 'ex', 'request_spec', self.conductor.db)
-
- self.mox.ReplayAll()
-
- self.conductor._set_vm_state_and_notify(
- self.context, 'method', 'updates', 'ex', 'request_spec')
-
- def test_cold_migrate_no_valid_host_back_in_active_state(self):
- flavor = flavors.get_flavor_by_name('m1.tiny')
- inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
- instance_type_id=flavor['id'])
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst,
- expected_attrs=[])
- request_spec = dict(instance_type=dict(extra_specs=dict()),
- instance_properties=dict())
- filter_props = dict(context=None)
- resvs = 'fake-resvs'
- image = 'fake-image'
-
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.conductor.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.conductor,
- '_set_vm_state_and_notify')
- self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
-
- compute_utils.get_image_metadata(
- self.context, self.conductor_manager.image_api,
- 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
-
- scheduler_utils.build_request_spec(
- self.context, image, [inst_obj],
- instance_type=flavor).AndReturn(request_spec)
-
- exc_info = exc.NoValidHost(reason="")
-
- self.conductor.scheduler_client.select_destinations(
- self.context, request_spec,
- filter_props).AndRaise(exc_info)
-
- updates = {'vm_state': vm_states.ACTIVE,
- 'task_state': None}
-
- self.conductor._set_vm_state_and_notify(self.context,
- 'migrate_server',
- updates, exc_info,
- request_spec)
- # NOTE(mriedem): Validate that the quota rollback is using
- # the correct project_id and user_id.
- project_id, user_id = quotas_obj.ids_from_instance(self.context,
- inst_obj)
- quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
- user_id=user_id)
-
- self.mox.ReplayAll()
-
- self.assertRaises(exc.NoValidHost,
- self.conductor._cold_migrate,
- self.context, inst_obj,
- flavor, filter_props, [resvs])
-
- def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
- flavor = flavors.get_flavor_by_name('m1.tiny')
- inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
- vm_state=vm_states.STOPPED,
- instance_type_id=flavor['id'])
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst,
- expected_attrs=[])
- request_spec = dict(instance_type=dict(extra_specs=dict()),
- instance_properties=dict())
- filter_props = dict(context=None)
- resvs = 'fake-resvs'
- image = 'fake-image'
-
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.conductor.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.conductor,
- '_set_vm_state_and_notify')
- self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
-
- compute_utils.get_image_metadata(
- self.context, self.conductor_manager.image_api,
- 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
-
- scheduler_utils.build_request_spec(
- self.context, image, [inst_obj],
- instance_type=flavor).AndReturn(request_spec)
-
- exc_info = exc.NoValidHost(reason="")
-
- self.conductor.scheduler_client.select_destinations(
- self.context, request_spec,
- filter_props).AndRaise(exc_info)
-
- updates = {'vm_state': vm_states.STOPPED,
- 'task_state': None}
-
- self.conductor._set_vm_state_and_notify(self.context,
- 'migrate_server',
- updates, exc_info,
- request_spec)
- # NOTE(mriedem): Validate that the quota rollback is using
- # the correct project_id and user_id.
- project_id, user_id = quotas_obj.ids_from_instance(self.context,
- inst_obj)
- quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
- user_id=user_id)
-
- self.mox.ReplayAll()
-
- self.assertRaises(exc.NoValidHost,
- self.conductor._cold_migrate, self.context,
- inst_obj, flavor, filter_props, [resvs])
-
- def test_cold_migrate_no_valid_host_error_msg(self):
- flavor = flavors.get_flavor_by_name('m1.tiny')
- inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
- vm_state=vm_states.STOPPED,
- instance_type_id=flavor['id'])
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst,
- expected_attrs=[])
- request_spec = dict(instance_type=dict(extra_specs=dict()),
- instance_properties=dict())
- filter_props = dict(context=None)
- resvs = 'fake-resvs'
- image = 'fake-image'
-
- with contextlib.nested(
- mock.patch.object(compute_utils, 'get_image_metadata',
- return_value=image),
- mock.patch.object(scheduler_utils, 'build_request_spec',
- return_value=request_spec),
- mock.patch.object(self.conductor.scheduler_client,
- 'select_destinations',
- side_effect=exc.NoValidHost(reason=""))
- ) as (image_mock, brs_mock, select_dest_mock):
- nvh = self.assertRaises(exc.NoValidHost,
- self.conductor._cold_migrate, self.context,
- inst_obj, flavor, filter_props, [resvs])
- self.assertIn('cold migrate', nvh.message)
-
- def test_cold_migrate_exception_host_in_error_state_and_raise(self):
- inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
- vm_state=vm_states.STOPPED)
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst,
- expected_attrs=[])
- request_spec = dict(instance_type=dict(extra_specs=dict()),
- instance_properties=dict())
- filter_props = dict(context=None)
- resvs = 'fake-resvs'
- image = 'fake-image'
- hosts = [dict(host='host1', nodename=None, limits={})]
-
- self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(self.conductor.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(scheduler_utils,
- 'populate_filter_properties')
- self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
- 'prep_resize')
- self.mox.StubOutWithMock(self.conductor,
- '_set_vm_state_and_notify')
- self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
-
- compute_utils.get_image_metadata(
- self.context, self.conductor_manager.image_api,
- 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
-
- scheduler_utils.build_request_spec(
- self.context, image, [inst_obj],
- instance_type='flavor').AndReturn(request_spec)
-
- expected_filter_props = {'retry': {'num_attempts': 1,
- 'hosts': []},
- 'context': None}
- self.conductor.scheduler_client.select_destinations(
- self.context, request_spec,
- expected_filter_props).AndReturn(hosts)
-
- scheduler_utils.populate_filter_properties(filter_props,
- hosts[0])
- exc_info = test.TestingException('something happened')
-
- expected_filter_props = {'retry': {'num_attempts': 1,
- 'hosts': []}}
-
- self.conductor.compute_rpcapi.prep_resize(
- self.context, image, inst_obj,
- 'flavor', hosts[0]['host'], [resvs],
- request_spec=request_spec,
- filter_properties=expected_filter_props,
- node=hosts[0]['nodename']).AndRaise(exc_info)
-
- updates = {'vm_state': vm_states.STOPPED,
- 'task_state': None}
-
- self.conductor._set_vm_state_and_notify(self.context,
- 'migrate_server',
- updates, exc_info,
- request_spec)
- # NOTE(mriedem): Validate that the quota rollback is using
- # the correct project_id and user_id.
- project_id, user_id = quotas_obj.ids_from_instance(self.context,
- inst_obj)
- quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
- user_id=user_id)
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.conductor._cold_migrate,
- self.context, inst_obj, 'flavor',
- filter_props, [resvs])
-
- def test_resize_no_valid_host_error_msg(self):
- flavor = flavors.get_flavor_by_name('m1.tiny')
- flavor_new = flavors.get_flavor_by_name('m1.small')
- inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
- vm_state=vm_states.STOPPED,
- instance_type_id=flavor['id'])
- inst_obj = objects.Instance._from_db_object(
- self.context, objects.Instance(), inst,
- expected_attrs=[])
- request_spec = dict(instance_type=dict(extra_specs=dict()),
- instance_properties=dict())
- filter_props = dict(context=None)
- resvs = 'fake-resvs'
- image = 'fake-image'
-
- with contextlib.nested(
- mock.patch.object(compute_utils, 'get_image_metadata',
- return_value=image),
- mock.patch.object(scheduler_utils, 'build_request_spec',
- return_value=request_spec),
- mock.patch.object(self.conductor.scheduler_client,
- 'select_destinations',
- side_effect=exc.NoValidHost(reason=""))
- ) as (image_mock, brs_mock, select_dest_mock):
- nvh = self.assertRaises(exc.NoValidHost,
- self.conductor._cold_migrate, self.context,
- inst_obj, flavor_new, filter_props,
- [resvs])
- self.assertIn('resize', nvh.message)
-
- def test_build_instances_instance_not_found(self):
- instances = [fake_instance.fake_instance_obj(self.context)
- for i in xrange(2)]
- self.mox.StubOutWithMock(instances[0], 'refresh')
- self.mox.StubOutWithMock(instances[1], 'refresh')
- image = {'fake-data': 'should_pass_silently'}
- spec = {'fake': 'specs',
- 'instance_properties': instances[0]}
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
- self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
- self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
- 'select_destinations')
- self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
- 'build_and_run_instance')
-
- scheduler_utils.build_request_spec(self.context, image,
- mox.IgnoreArg()).AndReturn(spec)
- scheduler_utils.setup_instance_group(self.context, None, None)
- self.conductor_manager.scheduler_client.select_destinations(
- self.context, spec,
- {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
- [{'host': 'host1', 'nodename': 'node1', 'limits': []},
- {'host': 'host2', 'nodename': 'node2', 'limits': []}])
- instances[0].refresh().AndRaise(
- exc.InstanceNotFound(instance_id=instances[0].uuid))
- instances[1].refresh()
- self.conductor_manager.compute_rpcapi.build_and_run_instance(
- self.context, instance=instances[1], host='host2',
- image={'fake-data': 'should_pass_silently'}, request_spec=spec,
- filter_properties={'limits': [],
- 'retry': {'num_attempts': 1,
- 'hosts': [['host2',
- 'node2']]}},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
- node='node2', limits=[])
- self.mox.ReplayAll()
-
- # build_instances() is a cast, we need to wait for it to complete
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- self.conductor.build_instances(self.context,
- instances=instances,
- image=image,
- filter_properties={},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping='block_device_mapping',
- legacy_bdm=False)
-
- @mock.patch.object(scheduler_utils, 'setup_instance_group')
- @mock.patch.object(scheduler_utils, 'build_request_spec')
- def test_build_instances_info_cache_not_found(self, build_request_spec,
- setup_instance_group):
- instances = [fake_instance.fake_instance_obj(self.context)
- for i in xrange(2)]
- image = {'fake-data': 'should_pass_silently'}
- destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
- {'host': 'host2', 'nodename': 'node2', 'limits': []}]
- spec = {'fake': 'specs',
- 'instance_properties': instances[0]}
- build_request_spec.return_value = spec
- with contextlib.nested(
- mock.patch.object(instances[0], 'refresh',
- side_effect=exc.InstanceInfoCacheNotFound(
- instance_uuid=instances[0].uuid)),
- mock.patch.object(instances[1], 'refresh'),
- mock.patch.object(self.conductor_manager.scheduler_client,
- 'select_destinations', return_value=destinations),
- mock.patch.object(self.conductor_manager.compute_rpcapi,
- 'build_and_run_instance')
- ) as (inst1_refresh, inst2_refresh, select_destinations,
- build_and_run_instance):
-
- # build_instances() is a cast, we need to wait for it to complete
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- self.conductor.build_instances(self.context,
- instances=instances,
- image=image,
- filter_properties={},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping='block_device_mapping',
- legacy_bdm=False)
-
- setup_instance_group.assert_called_once_with(
- self.context, None, None)
- build_and_run_instance.assert_called_once_with(self.context,
- instance=instances[1], host='host2', image={'fake-data':
- 'should_pass_silently'}, request_spec=spec,
- filter_properties={'limits': [],
- 'retry': {'num_attempts': 1,
- 'hosts': [['host2',
- 'node2']]}},
- admin_password='admin_password',
- injected_files='injected_files',
- requested_networks=None,
- security_groups='security_groups',
- block_device_mapping=mock.ANY,
- node='node2', limits=[])
-
-
-class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
- test_compute.BaseTestCase):
- """Conductor compute_task RPC namespace Tests."""
- def setUp(self):
- super(ConductorTaskRPCAPITestCase, self).setUp()
- self.conductor_service = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
- self.conductor = conductor_rpcapi.ComputeTaskAPI()
- service_manager = self.conductor_service.manager
- self.conductor_manager = service_manager.compute_task_mgr
-
-
-class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
- """Compute task API Tests."""
- def setUp(self):
- super(ConductorTaskAPITestCase, self).setUp()
- self.conductor_service = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
- self.conductor = conductor_api.ComputeTaskAPI()
- service_manager = self.conductor_service.manager
- self.conductor_manager = service_manager.compute_task_mgr
-
-
-class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
- """Conductor LocalComputeTaskAPI Tests."""
- def setUp(self):
- super(ConductorLocalComputeTaskAPITestCase, self).setUp()
- self.conductor = conductor_api.LocalComputeTaskAPI()
- self.conductor_manager = self.conductor._manager._target
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
deleted file mode 100644
index 3dc7a1b72b..0000000000
--- a/nova/tests/conf_fixture.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-
-from nova import config
-from nova import ipv6
-from nova.openstack.common.fixture import config as config_fixture
-from nova import paths
-from nova.tests import utils
-
-CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.netconf')
-CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
-CONF.import_opt('fake_network', 'nova.network.linux_net')
-CONF.import_opt('network_size', 'nova.network.manager')
-CONF.import_opt('num_networks', 'nova.network.manager')
-CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
-CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
-CONF.import_opt('policy_file', 'nova.openstack.common.policy')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('api_paste_config', 'nova.wsgi')
-
-
-class ConfFixture(config_fixture.Config):
- """Fixture to manage global conf settings."""
- def setUp(self):
- super(ConfFixture, self).setUp()
- self.conf.set_default('api_paste_config',
- paths.state_path_def('etc/nova/api-paste.ini'))
- self.conf.set_default('host', 'fake-mini')
- self.conf.set_default('compute_driver',
- 'nova.virt.fake.SmallFakeDriver')
- self.conf.set_default('fake_network', True)
- self.conf.set_default('flat_network_bridge', 'br100')
- self.conf.set_default('floating_ip_dns_manager',
- 'nova.tests.utils.dns_manager')
- self.conf.set_default('instance_dns_manager',
- 'nova.tests.utils.dns_manager')
- self.conf.set_default('network_size', 8)
- self.conf.set_default('num_networks', 2)
- self.conf.set_default('use_ipv6', True)
- self.conf.set_default('vlan_interface', 'eth0')
- self.conf.set_default('auth_strategy', 'noauth')
- config.parse_args([], default_config_files=[])
- self.conf.set_default('connection', "sqlite://", group='database')
- self.conf.set_default('sqlite_synchronous', False, group='database')
- self.addCleanup(utils.cleanup_dns_managers)
- self.addCleanup(ipv6.api.reset_backend)
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
deleted file mode 100644
index 03354d834e..0000000000
--- a/nova/tests/db/test_db_api.py
+++ /dev/null
@@ -1,7517 +0,0 @@
-# encoding=UTF8
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for the DB API."""
-
-import copy
-import datetime
-import types
-import uuid as stdlib_uuid
-
-import iso8601
-import mock
-import netaddr
-from oslo.config import cfg
-from oslo.db import exception as db_exc
-from oslo.db.sqlalchemy import test_base
-from oslo.db.sqlalchemy import utils as sqlalchemyutils
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six
-from sqlalchemy import Column
-from sqlalchemy.dialects import sqlite
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy.orm import query
-from sqlalchemy import sql
-from sqlalchemy import Table
-
-from nova import block_device
-from nova.compute import arch
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import api as sqlalchemy_api
-from nova.db.sqlalchemy import models
-from nova.db.sqlalchemy import types as col_types
-from nova.db.sqlalchemy import utils as db_utils
-from nova import exception
-from nova.openstack.common import uuidutils
-from nova import quota
-from nova import test
-from nova.tests import matchers
-from nova import utils
-
-CONF = cfg.CONF
-CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
-CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
-
-get_engine = sqlalchemy_api.get_engine
-get_session = sqlalchemy_api.get_session
-
-
-def _reservation_get(context, uuid):
- result = sqlalchemy_api.model_query(context, models.Reservation,
- read_deleted="no").filter_by(uuid=uuid).first()
-
- if not result:
- raise exception.ReservationNotFound(uuid=uuid)
-
- return result
-
-
-def _quota_reserve(context, project_id, user_id):
- """Create sample Quota, QuotaUsage and Reservation objects.
-
- There is no method db.quota_usage_create(), so we have to use
- db.quota_reserve() for creating QuotaUsage objects.
-
- Returns reservations uuids.
-
- """
- def get_sync(resource, usage):
- def sync(elevated, project_id, user_id, session):
- return {resource: usage}
- return sync
- quotas = {}
- user_quotas = {}
- resources = {}
- deltas = {}
- for i in range(3):
- resource = 'resource%d' % i
- if i == 2:
- # test for project level resources
- resource = 'fixed_ips'
- quotas[resource] = db.quota_create(context,
- project_id, resource, i)
- user_quotas[resource] = quotas[resource]
- else:
- quotas[resource] = db.quota_create(context,
- project_id, resource, i)
- user_quotas[resource] = db.quota_create(context, project_id,
- resource, i,
- user_id=user_id)
- sync_name = '_sync_%s' % resource
- resources[resource] = quota.ReservableResource(
- resource, sync_name, 'quota_res_%d' % i)
- deltas[resource] = i
- setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
- sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
- sqlalchemy_api, sync_name)
- return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
- timeutils.utcnow(), CONF.until_refresh,
- datetime.timedelta(days=1), project_id, user_id)
-
-
-class DbTestCase(test.TestCase):
- def setUp(self):
- super(DbTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def create_instance_with_args(self, **kwargs):
- args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
- 'node': 'node1', 'project_id': self.project_id,
- 'vm_state': 'fake'}
- if 'context' in kwargs:
- ctxt = kwargs.pop('context')
- args['project_id'] = ctxt.project_id
- else:
- ctxt = self.context
- args.update(kwargs)
- return db.instance_create(ctxt, args)
-
- def fake_metadata(self, content):
- meta = {}
- for i in range(0, 10):
- meta["foo%i" % i] = "this is %s item %i" % (content, i)
- return meta
-
- def create_metadata_for_instance(self, instance_uuid):
- meta = self.fake_metadata('metadata')
- db.instance_metadata_update(self.context, instance_uuid, meta, False)
- sys_meta = self.fake_metadata('system_metadata')
- db.instance_system_metadata_update(self.context, instance_uuid,
- sys_meta, False)
- return meta, sys_meta
-
-
-class DecoratorTestCase(test.TestCase):
- def _test_decorator_wraps_helper(self, decorator):
- def test_func():
- """Test docstring."""
-
- decorated_func = decorator(test_func)
-
- self.assertEqual(test_func.func_name, decorated_func.func_name)
- self.assertEqual(test_func.__doc__, decorated_func.__doc__)
- self.assertEqual(test_func.__module__, decorated_func.__module__)
-
- def test_require_context_decorator_wraps_functions_properly(self):
- self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
-
- def test_require_admin_context_decorator_wraps_functions_properly(self):
- self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
-
- def test_require_deadlock_retry_wraps_functions_properly(self):
- self._test_decorator_wraps_helper(sqlalchemy_api._retry_on_deadlock)
-
-
-def _get_fake_aggr_values():
- return {'name': 'fake_aggregate'}
-
-
-def _get_fake_aggr_metadata():
- return {'fake_key1': 'fake_value1',
- 'fake_key2': 'fake_value2',
- 'availability_zone': 'fake_avail_zone'}
-
-
-def _get_fake_aggr_hosts():
- return ['foo.openstack.org']
-
-
-def _create_aggregate(context=context.get_admin_context(),
- values=_get_fake_aggr_values(),
- metadata=_get_fake_aggr_metadata()):
- return db.aggregate_create(context, values, metadata)
-
-
-def _create_aggregate_with_hosts(context=context.get_admin_context(),
- values=_get_fake_aggr_values(),
- metadata=_get_fake_aggr_metadata(),
- hosts=_get_fake_aggr_hosts()):
- result = _create_aggregate(context=context,
- values=values, metadata=metadata)
- for host in hosts:
- db.aggregate_host_add(context, result['id'], host)
- return result
-
-
-class NotDbApiTestCase(DbTestCase):
- def setUp(self):
- super(NotDbApiTestCase, self).setUp()
- self.flags(connection='notdb://', group='database')
-
- def test_instance_get_all_by_filters_regex_unsupported_db(self):
- # Ensure that the 'LIKE' operator is used for unsupported dbs.
- self.create_instance_with_args(display_name='test1')
- self.create_instance_with_args(display_name='test2')
- self.create_instance_with_args(display_name='diff')
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': 'test'})
- self.assertEqual(2, len(result))
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': 'di'})
- self.assertEqual(1, len(result))
-
- def test_instance_get_all_by_filters_paginate(self):
- test1 = self.create_instance_with_args(display_name='test1')
- test2 = self.create_instance_with_args(display_name='test2')
- test3 = self.create_instance_with_args(display_name='test3')
-
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': '%test%'},
- marker=None)
- self.assertEqual(3, len(result))
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': '%test%'},
- sort_dir="asc",
- marker=test1['uuid'])
- self.assertEqual(2, len(result))
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': '%test%'},
- sort_dir="asc",
- marker=test2['uuid'])
- self.assertEqual(1, len(result))
- result = db.instance_get_all_by_filters(self.context,
- {'display_name': '%test%'},
- sort_dir="asc",
- marker=test3['uuid'])
- self.assertEqual(0, len(result))
-
- self.assertRaises(exception.MarkerNotFound,
- db.instance_get_all_by_filters,
- self.context, {'display_name': '%test%'},
- marker=str(stdlib_uuid.uuid4()))
-
- def test_convert_objects_related_datetimes(self):
-
- t1 = timeutils.utcnow()
- t2 = t1 + datetime.timedelta(seconds=10)
- t3 = t2 + datetime.timedelta(hours=1)
-
- t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc())
- t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc())
-
- datetime_keys = ('created_at', 'deleted_at')
-
- test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
- expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
- sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
- self.assertEqual(test1, expected_dict)
-
- test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
- expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
- sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
- self.assertEqual(test2, expected_dict)
-
- test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
- expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
- sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
- self.assertEqual(test3, expected_dict)
-
-
-class AggregateDBApiTestCase(test.TestCase):
- def setUp(self):
- super(AggregateDBApiTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def test_aggregate_create_no_metadata(self):
- result = _create_aggregate(metadata=None)
- self.assertEqual(result['name'], 'fake_aggregate')
-
- def test_aggregate_create_avoid_name_conflict(self):
- r1 = _create_aggregate(metadata=None)
- db.aggregate_delete(context.get_admin_context(), r1['id'])
- values = {'name': r1['name']}
- metadata = {'availability_zone': 'new_zone'}
- r2 = _create_aggregate(values=values, metadata=metadata)
- self.assertEqual(r2['name'], values['name'])
- self.assertEqual(r2['availability_zone'],
- metadata['availability_zone'])
-
- def test_aggregate_create_raise_exist_exc(self):
- _create_aggregate(metadata=None)
- self.assertRaises(exception.AggregateNameExists,
- _create_aggregate, metadata=None)
-
- def test_aggregate_get_raise_not_found(self):
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_get,
- ctxt, aggregate_id)
-
- def test_aggregate_metadata_get_raise_not_found(self):
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_metadata_get,
- ctxt, aggregate_id)
-
- def test_aggregate_create_with_metadata(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(expected_metadata,
- matchers.DictMatches(_get_fake_aggr_metadata()))
-
- def test_aggregate_create_delete_create_with_metadata(self):
- # test for bug 1052479
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(expected_metadata,
- matchers.DictMatches(_get_fake_aggr_metadata()))
- db.aggregate_delete(ctxt, result['id'])
- result = _create_aggregate(metadata={'availability_zone':
- 'fake_avail_zone'})
- expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertEqual(expected_metadata, {'availability_zone':
- 'fake_avail_zone'})
-
- def test_aggregate_get(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt)
- expected = db.aggregate_get(ctxt, result['id'])
- self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
- self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
-
- def test_aggregate_get_by_host(self):
- ctxt = context.get_admin_context()
- values2 = {'name': 'fake_aggregate2'}
- values3 = {'name': 'fake_aggregate3'}
- values4 = {'name': 'fake_aggregate4'}
- values5 = {'name': 'fake_aggregate5'}
- a1 = _create_aggregate_with_hosts(context=ctxt)
- a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
- # a3 has no hosts and should not be in the results.
- _create_aggregate(context=ctxt, values=values3)
- # a4 has no matching hosts.
- _create_aggregate_with_hosts(context=ctxt, values=values4,
- hosts=['foo4.openstack.org'])
- # a5 has no matching hosts after deleting the only matching host.
- a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
- hosts=['foo5.openstack.org', 'foo.openstack.org'])
- db.aggregate_host_delete(ctxt, a5['id'],
- 'foo.openstack.org')
- r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
- self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
-
- def test_aggregate_get_by_host_with_key(self):
- ctxt = context.get_admin_context()
- values2 = {'name': 'fake_aggregate2'}
- values3 = {'name': 'fake_aggregate3'}
- values4 = {'name': 'fake_aggregate4'}
- a1 = _create_aggregate_with_hosts(context=ctxt,
- metadata={'goodkey': 'good'})
- _create_aggregate_with_hosts(context=ctxt, values=values2)
- _create_aggregate(context=ctxt, values=values3)
- _create_aggregate_with_hosts(context=ctxt, values=values4,
- hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
- # filter result by key
- r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
- self.assertEqual([a1['id']], [x['id'] for x in r1])
-
- def test_aggregate_metadata_get_by_host(self):
- ctxt = context.get_admin_context()
- values = {'name': 'fake_aggregate2'}
- values2 = {'name': 'fake_aggregate3'}
- _create_aggregate_with_hosts(context=ctxt)
- _create_aggregate_with_hosts(context=ctxt, values=values)
- _create_aggregate_with_hosts(context=ctxt, values=values2,
- hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
- r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
- self.assertEqual(r1['fake_key1'], set(['fake_value1']))
- self.assertNotIn('badkey', r1)
-
- def test_aggregate_metadata_get_by_metadata_key(self):
- ctxt = context.get_admin_context()
- values = {'aggregate_id': 'fake_id',
- 'name': 'fake_aggregate'}
- aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
- hosts=['bar.openstack.org'],
- metadata={'availability_zone':
- 'az1'})
- r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
- 'availability_zone')
- self.assertEqual(r1['availability_zone'], set(['az1']))
- self.assertIn('availability_zone', r1)
- self.assertNotIn('name', r1)
-
- def test_aggregate_metadata_get_by_host_with_key(self):
- ctxt = context.get_admin_context()
- values2 = {'name': 'fake_aggregate12'}
- values3 = {'name': 'fake_aggregate23'}
- a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
- a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
- a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
- a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
- _create_aggregate_with_hosts(context=ctxt)
- _create_aggregate_with_hosts(context=ctxt, values=values2,
- hosts=a2_hosts, metadata=a2_metadata)
- a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
- hosts=a3_hosts, metadata=a3_metadata)
- r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
- key='good')
- self.assertEqual(r1['good'], set(['value12', 'value23']))
- self.assertNotIn('fake_key1', r1)
- self.assertNotIn('bad', r1)
- # Delete metadata
- db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
- r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
- key='good')
- self.assertNotIn('good', r2)
-
- def test_aggregate_host_get_by_metadata_key(self):
- ctxt = context.get_admin_context()
- values2 = {'name': 'fake_aggregate12'}
- values3 = {'name': 'fake_aggregate23'}
- a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
- a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
- a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
- a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
- _create_aggregate_with_hosts(context=ctxt)
- _create_aggregate_with_hosts(context=ctxt, values=values2,
- hosts=a2_hosts, metadata=a2_metadata)
- _create_aggregate_with_hosts(context=ctxt, values=values3,
- hosts=a3_hosts, metadata=a3_metadata)
- r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
- self.assertEqual({
- 'foo1.openstack.org': set(['value12']),
- 'foo2.openstack.org': set(['value12', 'value23']),
- 'foo3.openstack.org': set(['value23']),
- }, r1)
- self.assertNotIn('fake_key1', r1)
-
- def test_aggregate_get_by_host_not_found(self):
- ctxt = context.get_admin_context()
- _create_aggregate_with_hosts(context=ctxt)
- self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
-
- def test_aggregate_delete_raise_not_found(self):
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_delete,
- ctxt, aggregate_id)
-
- def test_aggregate_delete(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- db.aggregate_delete(ctxt, result['id'])
- expected = db.aggregate_get_all(ctxt)
- self.assertEqual(0, len(expected))
- aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
- result['id'])
- self.assertEqual(aggregate['deleted'], result['id'])
-
- def test_aggregate_update(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata={'availability_zone':
- 'fake_avail_zone'})
- self.assertEqual(result['availability_zone'], 'fake_avail_zone')
- new_values = _get_fake_aggr_values()
- new_values['availability_zone'] = 'different_avail_zone'
- updated = db.aggregate_update(ctxt, result['id'], new_values)
- self.assertNotEqual(result['availability_zone'],
- updated['availability_zone'])
-
- def test_aggregate_update_with_metadata(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- values = _get_fake_aggr_values()
- values['metadata'] = _get_fake_aggr_metadata()
- values['availability_zone'] = 'different_avail_zone'
- db.aggregate_update(ctxt, result['id'], values)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- updated = db.aggregate_get(ctxt, result['id'])
- self.assertThat(values['metadata'],
- matchers.DictMatches(expected))
- self.assertNotEqual(result['availability_zone'],
- updated['availability_zone'])
-
- def test_aggregate_update_with_existing_metadata(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- values = _get_fake_aggr_values()
- values['metadata'] = _get_fake_aggr_metadata()
- values['metadata']['fake_key1'] = 'foo'
- db.aggregate_update(ctxt, result['id'], values)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(values['metadata'], matchers.DictMatches(expected))
-
- def test_aggregate_update_zone_with_existing_metadata(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- new_zone = {'availability_zone': 'fake_avail_zone_2'}
- metadata = _get_fake_aggr_metadata()
- metadata.update(new_zone)
- db.aggregate_update(ctxt, result['id'], new_zone)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(metadata, matchers.DictMatches(expected))
-
- def test_aggregate_update_raise_not_found(self):
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- new_values = _get_fake_aggr_values()
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_update, ctxt, aggregate_id, new_values)
-
- def test_aggregate_update_raise_name_exist(self):
- ctxt = context.get_admin_context()
- _create_aggregate(context=ctxt, values={'name': 'test1'},
- metadata={'availability_zone': 'fake_avail_zone'})
- _create_aggregate(context=ctxt, values={'name': 'test2'},
- metadata={'availability_zone': 'fake_avail_zone'})
- aggregate_id = 1
- new_values = {'name': 'test2'}
- self.assertRaises(exception.AggregateNameExists,
- db.aggregate_update, ctxt, aggregate_id, new_values)
-
- def test_aggregate_get_all(self):
- ctxt = context.get_admin_context()
- counter = 3
- for c in range(counter):
- _create_aggregate(context=ctxt,
- values={'name': 'fake_aggregate_%d' % c},
- metadata=None)
- results = db.aggregate_get_all(ctxt)
- self.assertEqual(len(results), counter)
-
- def test_aggregate_get_all_non_deleted(self):
- ctxt = context.get_admin_context()
- add_counter = 5
- remove_counter = 2
- aggregates = []
- for c in range(1, add_counter):
- values = {'name': 'fake_aggregate_%d' % c}
- aggregates.append(_create_aggregate(context=ctxt,
- values=values, metadata=None))
- for c in range(1, remove_counter):
- db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
- results = db.aggregate_get_all(ctxt)
- self.assertEqual(len(results), add_counter - remove_counter)
-
- def test_aggregate_metadata_add(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result['id'], metadata)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(metadata, matchers.DictMatches(expected))
-
- def test_aggregate_metadata_add_and_update(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- metadata = _get_fake_aggr_metadata()
- key = metadata.keys()[0]
- new_metadata = {key: 'foo',
- 'fake_new_key': 'fake_new_value'}
- metadata.update(new_metadata)
- db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- self.assertThat(metadata, matchers.DictMatches(expected))
-
- def test_aggregate_metadata_add_retry(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
-
- def counted():
- def get_query(context, id, session, read_deleted):
- get_query.counter += 1
- raise db_exc.DBDuplicateEntry
- get_query.counter = 0
- return get_query
-
- get_query = counted()
- self.stubs.Set(sqlalchemy_api,
- '_aggregate_metadata_get_query', get_query)
- self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
- aggregate_metadata_add, ctxt, result['id'], {},
- max_retries=5)
- self.assertEqual(get_query.counter, 5)
-
- def test_aggregate_metadata_update(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- metadata = _get_fake_aggr_metadata()
- key = metadata.keys()[0]
- db.aggregate_metadata_delete(ctxt, result['id'], key)
- new_metadata = {key: 'foo'}
- db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- metadata[key] = 'foo'
- self.assertThat(metadata, matchers.DictMatches(expected))
-
- def test_aggregate_metadata_delete(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata=None)
- metadata = _get_fake_aggr_metadata()
- db.aggregate_metadata_add(ctxt, result['id'], metadata)
- db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- del metadata[metadata.keys()[0]]
- self.assertThat(metadata, matchers.DictMatches(expected))
-
- def test_aggregate_remove_availability_zone(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt, metadata={'availability_zone':
- 'fake_avail_zone'})
- db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
- expected = db.aggregate_metadata_get(ctxt, result['id'])
- aggregate = db.aggregate_get(ctxt, result['id'])
- self.assertIsNone(aggregate['availability_zone'])
- self.assertThat({}, matchers.DictMatches(expected))
-
- def test_aggregate_metadata_delete_raise_not_found(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- self.assertRaises(exception.AggregateMetadataNotFound,
- db.aggregate_metadata_delete,
- ctxt, result['id'], 'foo_key')
-
- def test_aggregate_host_add(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- expected = db.aggregate_host_get_all(ctxt, result['id'])
- self.assertEqual(_get_fake_aggr_hosts(), expected)
-
- def test_aggregate_host_re_add(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- host = _get_fake_aggr_hosts()[0]
- db.aggregate_host_delete(ctxt, result['id'], host)
- db.aggregate_host_add(ctxt, result['id'], host)
- expected = db.aggregate_host_get_all(ctxt, result['id'])
- self.assertEqual(len(expected), 1)
-
- def test_aggregate_host_add_duplicate_works(self):
- ctxt = context.get_admin_context()
- r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- r2 = _create_aggregate_with_hosts(ctxt,
- values={'name': 'fake_aggregate2'},
- metadata={'availability_zone': 'fake_avail_zone2'})
- h1 = db.aggregate_host_get_all(ctxt, r1['id'])
- h2 = db.aggregate_host_get_all(ctxt, r2['id'])
- self.assertEqual(h1, h2)
-
- def test_aggregate_host_add_duplicate_raise_exist_exc(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- self.assertRaises(exception.AggregateHostExists,
- db.aggregate_host_add,
- ctxt, result['id'], _get_fake_aggr_hosts()[0])
-
- def test_aggregate_host_add_raise_not_found(self):
- ctxt = context.get_admin_context()
- # this does not exist!
- aggregate_id = 1
- host = _get_fake_aggr_hosts()[0]
- self.assertRaises(exception.AggregateNotFound,
- db.aggregate_host_add,
- ctxt, aggregate_id, host)
-
- def test_aggregate_host_delete(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
- db.aggregate_host_delete(ctxt, result['id'],
- _get_fake_aggr_hosts()[0])
- expected = db.aggregate_host_get_all(ctxt, result['id'])
- self.assertEqual(0, len(expected))
-
- def test_aggregate_host_delete_raise_not_found(self):
- ctxt = context.get_admin_context()
- result = _create_aggregate(context=ctxt)
- self.assertRaises(exception.AggregateHostNotFound,
- db.aggregate_host_delete,
- ctxt, result['id'], _get_fake_aggr_hosts()[0])
-
-
-class SqlAlchemyDbApiTestCase(DbTestCase):
- def test_instance_get_all_by_host(self):
- ctxt = context.get_admin_context()
-
- self.create_instance_with_args()
- self.create_instance_with_args()
- self.create_instance_with_args(host='host2')
- result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
- self.assertEqual(2, len(result))
-
- def test_instance_get_all_uuids_by_host(self):
- ctxt = context.get_admin_context()
- self.create_instance_with_args()
- self.create_instance_with_args()
- self.create_instance_with_args(host='host2')
- result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
- self.assertEqual(2, len(result))
- self.assertEqual(types.UnicodeType, type(result[0]))
-
- def test_instance_get_active_by_window_joined(self):
- now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
- start_time = now - datetime.timedelta(minutes=10)
- now1 = now + datetime.timedelta(minutes=1)
- now2 = now + datetime.timedelta(minutes=2)
- now3 = now + datetime.timedelta(minutes=3)
- ctxt = context.get_admin_context()
- # used for testing columns_to_join
- network_info = jsonutils.dumps({'ckey': 'cvalue'})
- sample_data = {
- 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
- 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
- 'info_cache': {'network_info': network_info},
- }
- self.create_instance_with_args(launched_at=now, **sample_data)
- self.create_instance_with_args(launched_at=now1, terminated_at=now2,
- **sample_data)
- self.create_instance_with_args(launched_at=now2, terminated_at=now3,
- **sample_data)
- self.create_instance_with_args(launched_at=now3, terminated_at=None,
- **sample_data)
-
- result = sqlalchemy_api.instance_get_active_by_window_joined(
- ctxt, begin=now)
- self.assertEqual(4, len(result))
- # verify that all default columns are joined
- meta = utils.metadata_to_dict(result[0]['metadata'])
- self.assertEqual(sample_data['metadata'], meta)
- sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
- self.assertEqual(sample_data['system_metadata'], sys_meta)
- self.assertIn('info_cache', result[0])
-
- result = sqlalchemy_api.instance_get_active_by_window_joined(
- ctxt, begin=now3, columns_to_join=['info_cache'])
- self.assertEqual(2, len(result))
- # verify that only info_cache is loaded
- meta = utils.metadata_to_dict(result[0]['metadata'])
- self.assertEqual({}, meta)
- self.assertIn('info_cache', result[0])
-
- result = sqlalchemy_api.instance_get_active_by_window_joined(
- ctxt, begin=start_time, end=now)
- self.assertEqual(0, len(result))
-
- result = sqlalchemy_api.instance_get_active_by_window_joined(
- ctxt, begin=start_time, end=now2,
- columns_to_join=['system_metadata'])
- self.assertEqual(2, len(result))
- # verify that only system_metadata is loaded
- meta = utils.metadata_to_dict(result[0]['metadata'])
- self.assertEqual({}, meta)
- sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
- self.assertEqual(sample_data['system_metadata'], sys_meta)
- self.assertNotIn('info_cache', result[0])
-
- result = sqlalchemy_api.instance_get_active_by_window_joined(
- ctxt, begin=now2, end=now3,
- columns_to_join=['metadata', 'info_cache'])
- self.assertEqual(2, len(result))
- # verify that only metadata and info_cache are loaded
- meta = utils.metadata_to_dict(result[0]['metadata'])
- self.assertEqual(sample_data['metadata'], meta)
- sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
- self.assertEqual({}, sys_meta)
- self.assertIn('info_cache', result[0])
- self.assertEqual(network_info, result[0]['info_cache']['network_info'])
-
-
-class ProcessSortParamTestCase(test.TestCase):
-
- def test_process_sort_params_defaults(self):
- '''Verifies default sort parameters.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
- self.assertEqual(['created_at', 'id'], sort_keys)
- self.assertEqual(['asc', 'asc'], sort_dirs)
-
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
- self.assertEqual(['created_at', 'id'], sort_keys)
- self.assertEqual(['asc', 'asc'], sort_dirs)
-
- def test_process_sort_params_override_default_keys(self):
- '''Verifies that the default keys can be overridden.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- [], [], default_keys=['key1', 'key2', 'key3'])
- self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
- self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
-
- def test_process_sort_params_override_default_dir(self):
- '''Verifies that the default direction can be overridden.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- [], [], default_dir='dir1')
- self.assertEqual(['created_at', 'id'], sort_keys)
- self.assertEqual(['dir1', 'dir1'], sort_dirs)
-
- def test_process_sort_params_override_default_key_and_dir(self):
- '''Verifies that the default key and dir can be overridden.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- [], [], default_keys=['key1', 'key2', 'key3'],
- default_dir='dir1')
- self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
- self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
-
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- [], [], default_keys=[], default_dir='dir1')
- self.assertEqual([], sort_keys)
- self.assertEqual([], sort_dirs)
-
- def test_process_sort_params_non_default(self):
- '''Verifies that non-default keys are added correctly.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['key1', 'key2'], ['asc', 'desc'])
- self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
- # First sort_dir in list is used when adding the default keys
- self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
-
- def test_process_sort_params_default(self):
- '''Verifies that default keys are added correctly.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2'], ['asc', 'desc'])
- self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
- self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
-
- # Include default key value, rely on default direction
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2'], [])
- self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
- self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
-
- def test_process_sort_params_default_dir(self):
- '''Verifies that the default dir is applied to all keys.'''
- # Direction is set, ignore default dir
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2'], ['desc'], default_dir='dir')
- self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
- self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
-
- # But should be used if no direction is set
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2'], [], default_dir='dir')
- self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
- self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
-
- def test_process_sort_params_unequal_length(self):
- '''Verifies that a sort direction list is applied correctly.'''
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2', 'key3'], ['desc'])
- self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
- self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
-
- # Default direction is the first key in the list
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2', 'key3'], ['desc', 'asc'])
- self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
- self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
-
- sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
- ['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
- self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
- self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
-
- def test_process_sort_params_extra_dirs_lengths(self):
- '''InvalidInput raised if more directions are given.'''
- self.assertRaises(exception.InvalidInput,
- sqlalchemy_api.process_sort_params,
- ['key1', 'key2'],
- ['asc', 'desc', 'desc'])
-
-
-class MigrationTestCase(test.TestCase):
-
- def setUp(self):
- super(MigrationTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- self._create()
- self._create()
- self._create(status='reverted')
- self._create(status='confirmed')
- self._create(status='error')
- self._create(source_compute='host2', source_node='b',
- dest_compute='host1', dest_node='a')
- self._create(source_compute='host2', dest_compute='host3')
- self._create(source_compute='host3', dest_compute='host4')
-
- def _create(self, status='migrating', source_compute='host1',
- source_node='a', dest_compute='host2', dest_node='b',
- system_metadata=None):
-
- values = {'host': source_compute}
- instance = db.instance_create(self.ctxt, values)
- if system_metadata:
- db.instance_system_metadata_update(self.ctxt, instance['uuid'],
- system_metadata, False)
-
- values = {'status': status, 'source_compute': source_compute,
- 'source_node': source_node, 'dest_compute': dest_compute,
- 'dest_node': dest_node, 'instance_uuid': instance['uuid']}
- db.migration_create(self.ctxt, values)
-
- def _assert_in_progress(self, migrations):
- for migration in migrations:
- self.assertNotEqual('confirmed', migration['status'])
- self.assertNotEqual('reverted', migration['status'])
- self.assertNotEqual('error', migration['status'])
-
- def test_migration_get_in_progress_joins(self):
- self._create(source_compute='foo', system_metadata={'foo': 'bar'})
- migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
- 'foo', 'a')
- system_metadata = migrations[0]['instance']['system_metadata'][0]
- self.assertEqual(system_metadata['key'], 'foo')
- self.assertEqual(system_metadata['value'], 'bar')
-
- def test_in_progress_host1_nodea(self):
- migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
- 'host1', 'a')
- # 2 as source + 1 as dest
- self.assertEqual(3, len(migrations))
- self._assert_in_progress(migrations)
-
- def test_in_progress_host1_nodeb(self):
- migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
- 'host1', 'b')
- # some migrations are to/from host1, but none with a node 'b'
- self.assertEqual(0, len(migrations))
-
- def test_in_progress_host2_nodeb(self):
- migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
- 'host2', 'b')
- # 2 as dest, 1 as source
- self.assertEqual(3, len(migrations))
- self._assert_in_progress(migrations)
-
- def test_instance_join(self):
- migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
- 'host2', 'b')
- for migration in migrations:
- instance = migration['instance']
- self.assertEqual(migration['instance_uuid'], instance['uuid'])
-
- def test_get_migrations_by_filters(self):
- filters = {"status": "migrating", "host": "host3"}
- migrations = db.migration_get_all_by_filters(self.ctxt, filters)
- self.assertEqual(2, len(migrations))
- for migration in migrations:
- self.assertEqual(filters["status"], migration['status'])
- hosts = [migration['source_compute'], migration['dest_compute']]
- self.assertIn(filters["host"], hosts)
-
- def test_only_admin_can_get_all_migrations_by_filters(self):
- user_ctxt = context.RequestContext(user_id=None, project_id=None,
- is_admin=False, read_deleted="no",
- overwrite=False)
-
- self.assertRaises(exception.AdminRequired,
- db.migration_get_all_by_filters, user_ctxt, {})
-
- def test_migration_get_unconfirmed_by_dest_compute(self):
- # Ensure no migrations are returned.
- results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
- 'fake_host')
- self.assertEqual(0, len(results))
-
- # Ensure no migrations are returned.
- results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
- 'fake_host2')
- self.assertEqual(0, len(results))
-
- updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
- values = {"status": "finished", "updated_at": updated_at,
- "dest_compute": "fake_host2"}
- migration = db.migration_create(self.ctxt, values)
-
- # Ensure different host is not returned
- results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
- 'fake_host')
- self.assertEqual(0, len(results))
-
- # Ensure one migration older than 10 seconds is returned.
- results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
- 'fake_host2')
- self.assertEqual(1, len(results))
- db.migration_update(self.ctxt, migration['id'],
- {"status": "CONFIRMED"})
-
- # Ensure the new migration is not returned.
- updated_at = timeutils.utcnow()
- values = {"status": "finished", "updated_at": updated_at,
- "dest_compute": "fake_host2"}
- migration = db.migration_create(self.ctxt, values)
- results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
- "fake_host2")
- self.assertEqual(0, len(results))
- db.migration_update(self.ctxt, migration['id'],
- {"status": "CONFIRMED"})
-
- def test_migration_update_not_found(self):
- self.assertRaises(exception.MigrationNotFound,
- db.migration_update, self.ctxt, 42, {})
-
-
-class ModelsObjectComparatorMixin(object):
- def _dict_from_object(self, obj, ignored_keys):
- if ignored_keys is None:
- ignored_keys = []
- return dict([(k, v) for k, v in obj.iteritems()
- if k not in ignored_keys])
-
- def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
- obj1 = self._dict_from_object(obj1, ignored_keys)
- obj2 = self._dict_from_object(obj2, ignored_keys)
-
- self.assertEqual(len(obj1),
- len(obj2),
- "Keys mismatch: %s" %
- str(set(obj1.keys()) ^ set(obj2.keys())))
- for key, value in obj1.iteritems():
- self.assertEqual(value, obj2[key])
-
- def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
- obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
- sort_key = lambda d: [d[k] for k in sorted(d)]
- conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
-
- self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
-
- def _assertEqualOrderedListOfObjects(self, objs1, objs2,
- ignored_keys=None):
- obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
- conv = lambda obj: map(obj_to_dict, obj)
-
- self.assertEqual(conv(objs1), conv(objs2))
-
- def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
- self.assertEqual(len(primitives1), len(primitives2))
- for primitive in primitives1:
- self.assertIn(primitive, primitives2)
-
- for primitive in primitives2:
- self.assertIn(primitive, primitives1)
-
-
-class InstanceSystemMetadataTestCase(test.TestCase):
-
- """Tests for db.api.instance_system_metadata_* methods."""
-
- def setUp(self):
- super(InstanceSystemMetadataTestCase, self).setUp()
- values = {'host': 'h1', 'project_id': 'p1',
- 'system_metadata': {'key': 'value'}}
- self.ctxt = context.get_admin_context()
- self.instance = db.instance_create(self.ctxt, values)
-
- def test_instance_system_metadata_get(self):
- metadata = db.instance_system_metadata_get(self.ctxt,
- self.instance['uuid'])
- self.assertEqual(metadata, {'key': 'value'})
-
- def test_instance_system_metadata_update_new_pair(self):
- db.instance_system_metadata_update(
- self.ctxt, self.instance['uuid'],
- {'new_key': 'new_value'}, False)
- metadata = db.instance_system_metadata_get(self.ctxt,
- self.instance['uuid'])
- self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
-
- def test_instance_system_metadata_update_existent_pair(self):
- db.instance_system_metadata_update(
- self.ctxt, self.instance['uuid'],
- {'key': 'new_value'}, True)
- metadata = db.instance_system_metadata_get(self.ctxt,
- self.instance['uuid'])
- self.assertEqual(metadata, {'key': 'new_value'})
-
- def test_instance_system_metadata_update_delete_true(self):
- db.instance_system_metadata_update(
- self.ctxt, self.instance['uuid'],
- {'new_key': 'new_value'}, True)
- metadata = db.instance_system_metadata_get(self.ctxt,
- self.instance['uuid'])
- self.assertEqual(metadata, {'new_key': 'new_value'})
-
- @test.testtools.skip("bug 1189462")
- def test_instance_system_metadata_update_nonexistent(self):
- self.assertRaises(exception.InstanceNotFound,
- db.instance_system_metadata_update,
- self.ctxt, 'nonexistent-uuid',
- {'key': 'value'}, True)
-
-
-class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- """Tests for db.api.reservation_* methods."""
-
- def setUp(self):
- super(ReservationTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
- usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1')
-
- self.values = {'uuid': 'sample-uuid',
- 'project_id': 'project1',
- 'user_id': 'user1',
- 'resource': 'resource1',
- 'delta': 42,
- 'expire': timeutils.utcnow() + datetime.timedelta(days=1),
- 'usage': {'id': usage.id}}
-
- def test_reservation_commit(self):
- expected = {'project_id': 'project1', 'user_id': 'user1',
- 'resource0': {'reserved': 0, 'in_use': 0},
- 'resource1': {'reserved': 1, 'in_use': 1},
- 'fixed_ips': {'reserved': 2, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'))
- _reservation_get(self.ctxt, self.reservations[0])
- db.reservation_commit(self.ctxt, self.reservations, 'project1',
- 'user1')
- self.assertRaises(exception.ReservationNotFound,
- _reservation_get, self.ctxt, self.reservations[0])
- expected = {'project_id': 'project1', 'user_id': 'user1',
- 'resource0': {'reserved': 0, 'in_use': 0},
- 'resource1': {'reserved': 0, 'in_use': 2},
- 'fixed_ips': {'reserved': 0, 'in_use': 4}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'))
-
- def test_reservation_rollback(self):
- expected = {'project_id': 'project1', 'user_id': 'user1',
- 'resource0': {'reserved': 0, 'in_use': 0},
- 'resource1': {'reserved': 1, 'in_use': 1},
- 'fixed_ips': {'reserved': 2, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'))
- _reservation_get(self.ctxt, self.reservations[0])
- db.reservation_rollback(self.ctxt, self.reservations, 'project1',
- 'user1')
- self.assertRaises(exception.ReservationNotFound,
- _reservation_get, self.ctxt, self.reservations[0])
- expected = {'project_id': 'project1', 'user_id': 'user1',
- 'resource0': {'reserved': 0, 'in_use': 0},
- 'resource1': {'reserved': 0, 'in_use': 1},
- 'fixed_ips': {'reserved': 0, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'))
-
- def test_reservation_expire(self):
- db.reservation_expire(self.ctxt)
-
- expected = {'project_id': 'project1', 'user_id': 'user1',
- 'resource0': {'reserved': 0, 'in_use': 0},
- 'resource1': {'reserved': 0, 'in_use': 1},
- 'fixed_ips': {'reserved': 0, 'in_use': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'))
-
-
-class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(SecurityGroupRuleTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_base_values(self):
- return {
- 'name': 'fake_sec_group',
- 'description': 'fake_sec_group_descr',
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'instances': []
- }
-
- def _get_base_rule_values(self):
- return {
- 'protocol': "tcp",
- 'from_port': 80,
- 'to_port': 8080,
- 'cidr': None,
- 'deleted': 0,
- 'deleted_at': None,
- 'grantee_group': None,
- 'updated_at': None
- }
-
- def _create_security_group(self, values):
- v = self._get_base_values()
- v.update(values)
- return db.security_group_create(self.ctxt, v)
-
- def _create_security_group_rule(self, values):
- v = self._get_base_rule_values()
- v.update(values)
- return db.security_group_rule_create(self.ctxt, v)
-
- def test_security_group_rule_create(self):
- security_group_rule = self._create_security_group_rule({})
- self.assertIsNotNone(security_group_rule['id'])
- for key, value in self._get_base_rule_values().items():
- self.assertEqual(value, security_group_rule[key])
-
- def _test_security_group_rule_get_by_security_group(self, columns=None):
- instance = db.instance_create(self.ctxt,
- {'system_metadata': {'foo': 'bar'}})
- security_group = self._create_security_group({
- 'instances': [instance]})
- security_group_rule = self._create_security_group_rule(
- {'parent_group': security_group, 'grantee_group': security_group})
- security_group_rule1 = self._create_security_group_rule(
- {'parent_group': security_group, 'grantee_group': security_group})
- found_rules = db.security_group_rule_get_by_security_group(
- self.ctxt, security_group['id'], columns_to_join=columns)
- self.assertEqual(len(found_rules), 2)
- rules_ids = [security_group_rule['id'], security_group_rule1['id']]
- for rule in found_rules:
- if columns is None:
- self.assertIn('grantee_group', dict(rule.iteritems()))
- self.assertIn('instances',
- dict(rule.grantee_group.iteritems()))
- self.assertIn(
- 'system_metadata',
- dict(rule.grantee_group.instances[0].iteritems()))
- self.assertIn(rule['id'], rules_ids)
- else:
- self.assertNotIn('grantee_group', dict(rule.iteritems()))
-
- def test_security_group_rule_get_by_security_group(self):
- self._test_security_group_rule_get_by_security_group()
-
- def test_security_group_rule_get_by_security_group_no_joins(self):
- self._test_security_group_rule_get_by_security_group(columns=[])
-
- def test_security_group_rule_get_by_security_group_grantee(self):
- security_group = self._create_security_group({})
- security_group_rule = self._create_security_group_rule(
- {'grantee_group': security_group})
- rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
- security_group['id'])
- self.assertEqual(len(rules), 1)
- self.assertEqual(rules[0]['id'], security_group_rule['id'])
-
- def test_security_group_rule_destroy(self):
- self._create_security_group({'name': 'fake1'})
- self._create_security_group({'name': 'fake2'})
- security_group_rule1 = self._create_security_group_rule({})
- security_group_rule2 = self._create_security_group_rule({})
- db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
- self.assertRaises(exception.SecurityGroupNotFound,
- db.security_group_rule_get,
- self.ctxt, security_group_rule1['id'])
- self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
- security_group_rule2['id']),
- security_group_rule2, ['grantee_group'])
-
- def test_security_group_rule_destroy_not_found_exception(self):
- self.assertRaises(exception.SecurityGroupNotFound,
- db.security_group_rule_destroy, self.ctxt, 100500)
-
- def test_security_group_rule_get(self):
- security_group_rule1 = (
- self._create_security_group_rule({}))
- self._create_security_group_rule({})
- real_security_group_rule = db.security_group_rule_get(self.ctxt,
- security_group_rule1['id'])
- self._assertEqualObjects(security_group_rule1,
- real_security_group_rule, ['grantee_group'])
-
- def test_security_group_rule_get_not_found_exception(self):
- self.assertRaises(exception.SecurityGroupNotFound,
- db.security_group_rule_get, self.ctxt, 100500)
-
- def test_security_group_rule_count_by_group(self):
- sg1 = self._create_security_group({'name': 'fake1'})
- sg2 = self._create_security_group({'name': 'fake2'})
- rules_by_group = {sg1: [], sg2: []}
- for group in rules_by_group:
- rules = rules_by_group[group]
- for i in range(0, 10):
- rules.append(
- self._create_security_group_rule({'parent_group_id':
- group['id']}))
- db.security_group_rule_destroy(self.ctxt,
- rules_by_group[sg1][0]['id'])
- counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
- group['id'])
- for group in [sg1, sg2]]
- expected = [9, 10]
- self.assertEqual(counted_groups, expected)
-
-
-class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(SecurityGroupTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_base_values(self):
- return {
- 'name': 'fake_sec_group',
- 'description': 'fake_sec_group_descr',
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'instances': []
- }
-
- def _create_security_group(self, values):
- v = self._get_base_values()
- v.update(values)
- return db.security_group_create(self.ctxt, v)
-
- def test_security_group_create(self):
- security_group = self._create_security_group({})
- self.assertIsNotNone(security_group['id'])
- for key, value in self._get_base_values().iteritems():
- self.assertEqual(value, security_group[key])
-
- def test_security_group_destroy(self):
- security_group1 = self._create_security_group({})
- security_group2 = \
- self._create_security_group({'name': 'fake_sec_group2'})
-
- db.security_group_destroy(self.ctxt, security_group1['id'])
- self.assertRaises(exception.SecurityGroupNotFound,
- db.security_group_get,
- self.ctxt, security_group1['id'])
- self._assertEqualObjects(db.security_group_get(
- self.ctxt, security_group2['id'],
- columns_to_join=['instances']), security_group2)
-
- def test_security_group_get(self):
- security_group1 = self._create_security_group({})
- self._create_security_group({'name': 'fake_sec_group2'})
- real_security_group = db.security_group_get(self.ctxt,
- security_group1['id'],
- columns_to_join=['instances'])
- self._assertEqualObjects(security_group1,
- real_security_group)
-
- def test_security_group_get_with_instance_columns(self):
- instance = db.instance_create(self.ctxt,
- {'system_metadata': {'foo': 'bar'}})
- secgroup = self._create_security_group({'instances': [instance]})
- secgroup = db.security_group_get(
- self.ctxt, secgroup['id'],
- columns_to_join=['instances.system_metadata'])
- inst = secgroup.instances[0]
- self.assertIn('system_metadata', dict(inst.iteritems()).keys())
-
- def test_security_group_get_no_instances(self):
- instance = db.instance_create(self.ctxt, {})
- sid = self._create_security_group({'instances': [instance]})['id']
-
- security_group = db.security_group_get(self.ctxt, sid,
- columns_to_join=['instances'])
- self.assertIn('instances', security_group.__dict__)
-
- security_group = db.security_group_get(self.ctxt, sid)
- self.assertNotIn('instances', security_group.__dict__)
-
- def test_security_group_get_not_found_exception(self):
- self.assertRaises(exception.SecurityGroupNotFound,
- db.security_group_get, self.ctxt, 100500)
-
- def test_security_group_get_by_name(self):
- security_group1 = self._create_security_group({'name': 'fake1'})
- security_group2 = self._create_security_group({'name': 'fake2'})
-
- real_security_group1 = db.security_group_get_by_name(
- self.ctxt,
- security_group1['project_id'],
- security_group1['name'],
- columns_to_join=None)
- real_security_group2 = db.security_group_get_by_name(
- self.ctxt,
- security_group2['project_id'],
- security_group2['name'],
- columns_to_join=None)
- self._assertEqualObjects(security_group1, real_security_group1)
- self._assertEqualObjects(security_group2, real_security_group2)
-
- def test_security_group_get_by_project(self):
- security_group1 = self._create_security_group(
- {'name': 'fake1', 'project_id': 'fake_proj1'})
- security_group2 = self._create_security_group(
- {'name': 'fake2', 'project_id': 'fake_proj2'})
-
- real1 = db.security_group_get_by_project(
- self.ctxt,
- security_group1['project_id'])
- real2 = db.security_group_get_by_project(
- self.ctxt,
- security_group2['project_id'])
-
- expected1, expected2 = [security_group1], [security_group2]
- self._assertEqualListsOfObjects(expected1, real1,
- ignored_keys=['instances'])
- self._assertEqualListsOfObjects(expected2, real2,
- ignored_keys=['instances'])
-
- def test_security_group_get_by_instance(self):
- instance = db.instance_create(self.ctxt, dict(host='foo'))
- values = [
- {'name': 'fake1', 'instances': [instance]},
- {'name': 'fake2', 'instances': [instance]},
- {'name': 'fake3', 'instances': []},
- ]
- security_groups = [self._create_security_group(vals)
- for vals in values]
-
- real = db.security_group_get_by_instance(self.ctxt,
- instance['uuid'])
- expected = security_groups[:2]
- self._assertEqualListsOfObjects(expected, real,
- ignored_keys=['instances'])
-
- def test_security_group_get_all(self):
- values = [
- {'name': 'fake1', 'project_id': 'fake_proj1'},
- {'name': 'fake2', 'project_id': 'fake_proj2'},
- ]
- security_groups = [self._create_security_group(vals)
- for vals in values]
-
- real = db.security_group_get_all(self.ctxt)
-
- self._assertEqualListsOfObjects(security_groups, real,
- ignored_keys=['instances'])
-
- def test_security_group_in_use(self):
- instance = db.instance_create(self.ctxt, dict(host='foo'))
- values = [
- {'instances': [instance],
- 'name': 'fake_in_use'},
- {'instances': []},
- ]
-
- security_groups = [self._create_security_group(vals)
- for vals in values]
-
- real = []
- for security_group in security_groups:
- in_use = db.security_group_in_use(self.ctxt,
- security_group['id'])
- real.append(in_use)
- expected = [True, False]
-
- self.assertEqual(expected, real)
-
- def test_security_group_ensure_default(self):
- self.ctxt.project_id = 'fake'
- self.ctxt.user_id = 'fake'
- self.assertEqual(0, len(db.security_group_get_by_project(
- self.ctxt,
- self.ctxt.project_id)))
-
- db.security_group_ensure_default(self.ctxt)
-
- security_groups = db.security_group_get_by_project(
- self.ctxt,
- self.ctxt.project_id)
-
- self.assertEqual(1, len(security_groups))
- self.assertEqual("default", security_groups[0]["name"])
-
- usage = db.quota_usage_get(self.ctxt,
- self.ctxt.project_id,
- 'security_groups',
- self.ctxt.user_id)
- self.assertEqual(1, usage.in_use)
-
- @mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names')
- def test_security_group_ensure_default_called_concurrently(self, sg_mock):
- # make sure NotFound is always raised here to trick Nova to insert the
- # duplicate security group entry
- sg_mock.side_effect = exception.NotFound
-
- # create the first db entry
- self.ctxt.project_id = 1
- db.security_group_ensure_default(self.ctxt)
- security_groups = db.security_group_get_by_project(
- self.ctxt,
- self.ctxt.project_id)
- self.assertEqual(1, len(security_groups))
-
- # create the second one and ensure the exception is handled properly
- default_group = db.security_group_ensure_default(self.ctxt)
- self.assertEqual('default', default_group.name)
-
- def test_security_group_update(self):
- security_group = self._create_security_group({})
- new_values = {
- 'name': 'sec_group1',
- 'description': 'sec_group_descr1',
- 'user_id': 'fake_user1',
- 'project_id': 'fake_proj1',
- }
-
- updated_group = db.security_group_update(self.ctxt,
- security_group['id'],
- new_values,
- columns_to_join=['rules.grantee_group'])
- for key, value in new_values.iteritems():
- self.assertEqual(updated_group[key], value)
- self.assertEqual(updated_group['rules'], [])
-
- def test_security_group_update_to_duplicate(self):
- self._create_security_group(
- {'name': 'fake1', 'project_id': 'fake_proj1'})
- security_group2 = self._create_security_group(
- {'name': 'fake1', 'project_id': 'fake_proj2'})
-
- self.assertRaises(exception.SecurityGroupExists,
- db.security_group_update,
- self.ctxt, security_group2['id'],
- {'project_id': 'fake_proj1'})
-
-
-class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- """Tests for db.api.instance_* methods."""
-
- sample_data = {
- 'project_id': 'project1',
- 'hostname': 'example.com',
- 'host': 'h1',
- 'node': 'n1',
- 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
- 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
- 'info_cache': {'ckey': 'cvalue'},
- }
-
- def setUp(self):
- super(InstanceTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _assertEqualInstances(self, instance1, instance2):
- self._assertEqualObjects(instance1, instance2,
- ignored_keys=['metadata', 'system_metadata', 'info_cache'])
-
- def _assertEqualListsOfInstances(self, list1, list2):
- self._assertEqualListsOfObjects(list1, list2,
- ignored_keys=['metadata', 'system_metadata', 'info_cache'])
-
- def create_instance_with_args(self, **kwargs):
- if 'context' in kwargs:
- context = kwargs.pop('context')
- else:
- context = self.ctxt
- args = self.sample_data.copy()
- args.update(kwargs)
- return db.instance_create(context, args)
-
- def test_instance_create(self):
- instance = self.create_instance_with_args()
- self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
-
- def test_instance_create_with_object_values(self):
- values = {
- 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
- 'access_ip_v6': netaddr.IPAddress('::1'),
- }
- dt_keys = ('created_at', 'deleted_at', 'updated_at',
- 'launched_at', 'terminated_at', 'scheduled_at')
- dt = timeutils.utcnow()
- dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
- for key in dt_keys:
- values[key] = dt_utc
- inst = db.instance_create(self.ctxt, values)
- self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
- self.assertEqual(inst['access_ip_v6'], '::1')
- for key in dt_keys:
- self.assertEqual(inst[key], dt)
-
- def test_instance_update_with_object_values(self):
- values = {
- 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
- 'access_ip_v6': netaddr.IPAddress('::1'),
- }
- dt_keys = ('created_at', 'deleted_at', 'updated_at',
- 'launched_at', 'terminated_at', 'scheduled_at')
- dt = timeutils.utcnow()
- dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
- for key in dt_keys:
- values[key] = dt_utc
- inst = db.instance_create(self.ctxt, {})
- inst = db.instance_update(self.ctxt, inst['uuid'], values)
- self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
- self.assertEqual(inst['access_ip_v6'], '::1')
- for key in dt_keys:
- self.assertEqual(inst[key], dt)
-
- def test_instance_update_no_metadata_clobber(self):
- meta = {'foo': 'bar'}
- sys_meta = {'sfoo': 'sbar'}
- values = {
- 'metadata': meta,
- 'system_metadata': sys_meta,
- }
- inst = db.instance_create(self.ctxt, {})
- inst = db.instance_update(self.ctxt, inst['uuid'], values)
- self.assertEqual({'foo': 'bar'}, meta)
- self.assertEqual({'sfoo': 'sbar'}, sys_meta)
-
- def test_instance_get_all_with_meta(self):
- inst = self.create_instance_with_args()
- for inst in db.instance_get_all(self.ctxt):
- meta = utils.metadata_to_dict(inst['metadata'])
- self.assertEqual(meta, self.sample_data['metadata'])
- sys_meta = utils.metadata_to_dict(inst['system_metadata'])
- self.assertEqual(sys_meta, self.sample_data['system_metadata'])
-
- def test_instance_update(self):
- instance = self.create_instance_with_args()
- metadata = {'host': 'bar', 'key2': 'wuff'}
- system_metadata = {'original_image_ref': 'baz'}
- # Update the metadata
- db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
- 'system_metadata': system_metadata})
- # Retrieve the user-provided metadata to ensure it was successfully
- # updated
- self.assertEqual(metadata,
- db.instance_metadata_get(self.ctxt, instance['uuid']))
- self.assertEqual(system_metadata,
- db.instance_system_metadata_get(self.ctxt, instance['uuid']))
-
- def test_instance_update_bad_str_dates(self):
- instance = self.create_instance_with_args()
- values = {'created_at': '123'}
- self.assertRaises(ValueError,
- db.instance_update,
- self.ctxt, instance['uuid'], values)
-
- def test_instance_update_good_str_dates(self):
- instance = self.create_instance_with_args()
- values = {'created_at': '2011-01-31T00:00:00.0'}
- actual = db.instance_update(self.ctxt, instance['uuid'], values)
- expected = datetime.datetime(2011, 1, 31)
- self.assertEqual(expected, actual["created_at"])
-
- def test_create_instance_unique_hostname(self):
- context1 = context.RequestContext('user1', 'p1')
- context2 = context.RequestContext('user2', 'p2')
- self.create_instance_with_args(hostname='h1', project_id='p1')
-
- # With scope 'global' any duplicate should fail, be it this project:
- self.flags(osapi_compute_unique_server_name_scope='global')
- self.assertRaises(exception.InstanceExists,
- self.create_instance_with_args,
- context=context1,
- hostname='h1', project_id='p3')
- # or another:
- self.assertRaises(exception.InstanceExists,
- self.create_instance_with_args,
- context=context2,
- hostname='h1', project_id='p2')
- # With scope 'project' a duplicate in the project should fail:
- self.flags(osapi_compute_unique_server_name_scope='project')
- self.assertRaises(exception.InstanceExists,
- self.create_instance_with_args,
- context=context1,
- hostname='h1', project_id='p1')
- # With scope 'project' a duplicate in a different project should work:
- self.flags(osapi_compute_unique_server_name_scope='project')
- self.create_instance_with_args(context=context2, hostname='h2')
- self.flags(osapi_compute_unique_server_name_scope=None)
-
- def test_instance_get_all_by_filters_with_meta(self):
- inst = self.create_instance_with_args()
- for inst in db.instance_get_all_by_filters(self.ctxt, {}):
- meta = utils.metadata_to_dict(inst['metadata'])
- self.assertEqual(meta, self.sample_data['metadata'])
- sys_meta = utils.metadata_to_dict(inst['system_metadata'])
- self.assertEqual(sys_meta, self.sample_data['system_metadata'])
-
- def test_instance_get_all_by_filters_without_meta(self):
- inst = self.create_instance_with_args()
- result = db.instance_get_all_by_filters(self.ctxt, {},
- columns_to_join=[])
- for inst in result:
- meta = utils.metadata_to_dict(inst['metadata'])
- self.assertEqual(meta, {})
- sys_meta = utils.metadata_to_dict(inst['system_metadata'])
- self.assertEqual(sys_meta, {})
-
- def test_instance_get_all_by_filters(self):
- instances = [self.create_instance_with_args() for i in range(3)]
- filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
- self._assertEqualListsOfInstances(instances, filtered_instances)
-
- def test_instance_get_all_by_filters_zero_limit(self):
- self.create_instance_with_args()
- instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
- self.assertEqual([], instances)
-
- def test_instance_metadata_get_multi(self):
- uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
- meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
- for row in meta:
- self.assertIn(row['instance_uuid'], uuids)
-
- def test_instance_metadata_get_multi_no_uuids(self):
- self.mox.StubOutWithMock(query.Query, 'filter')
- self.mox.ReplayAll()
- sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
-
- def test_instance_system_system_metadata_get_multi(self):
- uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
- sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
- self.ctxt, uuids)
- for row in sys_meta:
- self.assertIn(row['instance_uuid'], uuids)
-
- def test_instance_system_metadata_get_multi_no_uuids(self):
- self.mox.StubOutWithMock(query.Query, 'filter')
- self.mox.ReplayAll()
- sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
-
- def test_instance_get_all_by_filters_regex(self):
- i1 = self.create_instance_with_args(display_name='test1')
- i2 = self.create_instance_with_args(display_name='teeeest2')
- self.create_instance_with_args(display_name='diff')
- result = db.instance_get_all_by_filters(self.ctxt,
- {'display_name': 't.*st.'})
- self._assertEqualListsOfInstances(result, [i1, i2])
-
- def test_instance_get_all_by_filters_changes_since(self):
- i1 = self.create_instance_with_args(updated_at=
- '2013-12-05T15:03:25.000000')
- i2 = self.create_instance_with_args(updated_at=
- '2013-12-05T15:03:26.000000')
- changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
- result = db.instance_get_all_by_filters(self.ctxt,
- {'changes-since':
- changes_since})
- self._assertEqualListsOfInstances([i1, i2], result)
-
- changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
- result = db.instance_get_all_by_filters(self.ctxt,
- {'changes-since':
- changes_since})
- self._assertEqualListsOfInstances([i2], result)
-
- def test_instance_get_all_by_filters_exact_match(self):
- instance = self.create_instance_with_args(host='host1')
- self.create_instance_with_args(host='host12')
- result = db.instance_get_all_by_filters(self.ctxt,
- {'host': 'host1'})
- self._assertEqualListsOfInstances([instance], result)
-
- def test_instance_get_all_by_filters_metadata(self):
- instance = self.create_instance_with_args(metadata={'foo': 'bar'})
- self.create_instance_with_args()
- result = db.instance_get_all_by_filters(self.ctxt,
- {'metadata': {'foo': 'bar'}})
- self._assertEqualListsOfInstances([instance], result)
-
- def test_instance_get_all_by_filters_system_metadata(self):
- instance = self.create_instance_with_args(
- system_metadata={'foo': 'bar'})
- self.create_instance_with_args()
- result = db.instance_get_all_by_filters(self.ctxt,
- {'system_metadata': {'foo': 'bar'}})
- self._assertEqualListsOfInstances([instance], result)
-
- def test_instance_get_all_by_filters_unicode_value(self):
- instance = self.create_instance_with_args(display_name=u'test♥')
- result = db.instance_get_all_by_filters(self.ctxt,
- {'display_name': u'test'})
- self._assertEqualListsOfInstances([instance], result)
-
- def test_instance_get_all_by_filters_tags(self):
- instance = self.create_instance_with_args(
- metadata={'foo': 'bar'})
- self.create_instance_with_args()
- # For format 'tag-'
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag-key', 'value': 'foo'},
- {'name': 'tag-value', 'value': 'bar'},
- ]})
- self._assertEqualListsOfInstances([instance], result)
- # For format 'tag:'
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag:foo', 'value': 'bar'},
- ]})
- self._assertEqualListsOfInstances([instance], result)
- # For non-existent tag
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag:foo', 'value': 'barred'},
- ]})
- self.assertEqual([], result)
-
- # Confirm with deleted tags
- db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
- # For format 'tag-'
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag-key', 'value': 'foo'},
- ]})
- self.assertEqual([], result)
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag-value', 'value': 'bar'}
- ]})
- self.assertEqual([], result)
- # For format 'tag:'
- result = db.instance_get_all_by_filters(
- self.ctxt, {'filter': [
- {'name': 'tag:foo', 'value': 'bar'},
- ]})
- self.assertEqual([], result)
-
- def test_instance_get_by_uuid(self):
- inst = self.create_instance_with_args()
- result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
- self._assertEqualInstances(inst, result)
-
- def test_instance_get_by_uuid_join_empty(self):
- inst = self.create_instance_with_args()
- result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
- columns_to_join=[])
- meta = utils.metadata_to_dict(result['metadata'])
- self.assertEqual(meta, {})
- sys_meta = utils.metadata_to_dict(result['system_metadata'])
- self.assertEqual(sys_meta, {})
-
- def test_instance_get_by_uuid_join_meta(self):
- inst = self.create_instance_with_args()
- result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
- columns_to_join=['metadata'])
- meta = utils.metadata_to_dict(result['metadata'])
- self.assertEqual(meta, self.sample_data['metadata'])
- sys_meta = utils.metadata_to_dict(result['system_metadata'])
- self.assertEqual(sys_meta, {})
-
- def test_instance_get_by_uuid_join_sys_meta(self):
- inst = self.create_instance_with_args()
- result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
- columns_to_join=['system_metadata'])
- meta = utils.metadata_to_dict(result['metadata'])
- self.assertEqual(meta, {})
- sys_meta = utils.metadata_to_dict(result['system_metadata'])
- self.assertEqual(sys_meta, self.sample_data['system_metadata'])
-
- def test_instance_get_all_by_filters_deleted(self):
- inst1 = self.create_instance_with_args()
- inst2 = self.create_instance_with_args(reservation_id='b')
- db.instance_destroy(self.ctxt, inst1['uuid'])
- result = db.instance_get_all_by_filters(self.ctxt, {})
- self._assertEqualListsOfObjects([inst1, inst2], result,
- ignored_keys=['metadata', 'system_metadata',
- 'deleted', 'deleted_at', 'info_cache',
- 'pci_devices'])
-
- def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
- inst1 = self.create_instance_with_args()
- inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- self.create_instance_with_args()
- db.instance_destroy(self.ctxt, inst1['uuid'])
- result = db.instance_get_all_by_filters(self.ctxt,
- {'deleted': True})
- self._assertEqualListsOfObjects([inst1, inst2], result,
- ignored_keys=['metadata', 'system_metadata',
- 'deleted', 'deleted_at', 'info_cache',
- 'pci_devices'])
-
- def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
- inst1 = self.create_instance_with_args()
- self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- self.create_instance_with_args()
- db.instance_destroy(self.ctxt, inst1['uuid'])
- result = db.instance_get_all_by_filters(self.ctxt,
- {'deleted': True,
- 'soft_deleted': False})
- self._assertEqualListsOfObjects([inst1], result,
- ignored_keys=['deleted', 'deleted_at', 'metadata',
- 'system_metadata', 'info_cache', 'pci_devices'])
-
- def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
- inst1 = self.create_instance_with_args()
- inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- inst3 = self.create_instance_with_args()
- db.instance_destroy(self.ctxt, inst1['uuid'])
- result = db.instance_get_all_by_filters(self.ctxt,
- {'deleted': False,
- 'soft_deleted': True})
- self._assertEqualListsOfInstances([inst2, inst3], result)
-
- def test_instance_get_all_by_filters_not_deleted(self):
- inst1 = self.create_instance_with_args()
- self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
- inst3 = self.create_instance_with_args()
- inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
- db.instance_destroy(self.ctxt, inst1['uuid'])
- result = db.instance_get_all_by_filters(self.ctxt,
- {'deleted': False})
- self.assertIsNone(inst3.vm_state)
- self._assertEqualListsOfInstances([inst3, inst4], result)
-
- def test_instance_get_all_by_filters_cleaned(self):
- inst1 = self.create_instance_with_args()
- inst2 = self.create_instance_with_args(reservation_id='b')
- db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
- result = db.instance_get_all_by_filters(self.ctxt, {})
- self.assertEqual(2, len(result))
- self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
- self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
- if inst1['uuid'] == result[0]['uuid']:
- self.assertTrue(result[0]['cleaned'])
- self.assertFalse(result[1]['cleaned'])
- else:
- self.assertTrue(result[1]['cleaned'])
- self.assertFalse(result[0]['cleaned'])
-
- def test_instance_get_all_by_host_and_node_no_join(self):
- instance = self.create_instance_with_args()
- result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
- self.assertEqual(result[0]['uuid'], instance['uuid'])
- self.assertEqual(result[0]['system_metadata'], [])
-
- def test_instance_get_all_hung_in_rebooting(self):
- # Ensure no instances are returned.
- results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
- self.assertEqual([], results)
-
- # Ensure one rebooting instance with updated_at older than 10 seconds
- # is returned.
- instance = self.create_instance_with_args(task_state="rebooting",
- updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
- results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
- self._assertEqualListsOfObjects([instance], results,
- ignored_keys=['task_state', 'info_cache', 'security_groups',
- 'metadata', 'system_metadata', 'pci_devices'])
- db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
-
- # Ensure the newly rebooted instance is not returned.
- instance = self.create_instance_with_args(task_state="rebooting",
- updated_at=timeutils.utcnow())
- results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
- self.assertEqual([], results)
-
- def test_instance_update_with_expected_vm_state(self):
- instance = self.create_instance_with_args(vm_state='foo')
- db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
- 'expected_vm_state': ('foo', 'bar')})
-
- def test_instance_update_with_unexpected_vm_state(self):
- instance = self.create_instance_with_args(vm_state='foo')
- self.assertRaises(exception.UnexpectedVMStateError,
- db.instance_update, self.ctxt, instance['uuid'],
- {'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
-
- def test_instance_update_with_instance_uuid(self):
- # test instance_update() works when an instance UUID is passed.
- ctxt = context.get_admin_context()
-
- # Create an instance with some metadata
- values = {'metadata': {'host': 'foo', 'key1': 'meow'},
- 'system_metadata': {'original_image_ref': 'blah'}}
- instance = db.instance_create(ctxt, values)
-
- # Update the metadata
- values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
- 'system_metadata': {'original_image_ref': 'baz'}}
- db.instance_update(ctxt, instance['uuid'], values)
-
- # Retrieve the user-provided metadata to ensure it was successfully
- # updated
- instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
- self.assertEqual('bar', instance_meta['host'])
- self.assertEqual('wuff', instance_meta['key2'])
- self.assertNotIn('key1', instance_meta)
-
- # Retrieve the system metadata to ensure it was successfully updated
- system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
- self.assertEqual('baz', system_meta['original_image_ref'])
-
- def test_delete_instance_metadata_on_instance_destroy(self):
- ctxt = context.get_admin_context()
- # Create an instance with some metadata
- values = {'metadata': {'host': 'foo', 'key1': 'meow'},
- 'system_metadata': {'original_image_ref': 'blah'}}
- instance = db.instance_create(ctxt, values)
- instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
- self.assertEqual('foo', instance_meta['host'])
- self.assertEqual('meow', instance_meta['key1'])
- db.instance_destroy(ctxt, instance['uuid'])
- instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
- # Make sure instance metadata is deleted as well
- self.assertEqual({}, instance_meta)
-
- def test_delete_instance_faults_on_instance_destroy(self):
- ctxt = context.get_admin_context()
- uuid = str(stdlib_uuid.uuid4())
- # Create faults
- db.instance_create(ctxt, {'uuid': uuid})
-
- fault_values = {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuid,
- 'code': 404,
- 'host': 'localhost'
- }
- fault = db.instance_fault_create(ctxt, fault_values)
-
- # Retrieve the fault to ensure it was successfully added
- faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
- self.assertEqual(1, len(faults[uuid]))
- self._assertEqualObjects(fault, faults[uuid][0])
- db.instance_destroy(ctxt, uuid)
- faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
- # Make sure instance faults is deleted as well
- self.assertEqual(0, len(faults[uuid]))
-
- def test_instance_update_with_and_get_original(self):
- instance = self.create_instance_with_args(vm_state='building')
- (old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
- instance['uuid'], {'vm_state': 'needscoffee'})
- self.assertEqual('building', old_ref['vm_state'])
- self.assertEqual('needscoffee', new_ref['vm_state'])
-
- def test_instance_update_and_get_original_metadata(self):
- instance = self.create_instance_with_args()
- columns_to_join = ['metadata']
- (old_ref, new_ref) = db.instance_update_and_get_original(
- self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
- columns_to_join=columns_to_join)
- meta = utils.metadata_to_dict(new_ref['metadata'])
- self.assertEqual(meta, self.sample_data['metadata'])
- sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
- self.assertEqual(sys_meta, {})
-
- def test_instance_update_and_get_original_metadata_none_join(self):
- instance = self.create_instance_with_args()
- (old_ref, new_ref) = db.instance_update_and_get_original(
- self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
- meta = utils.metadata_to_dict(new_ref['metadata'])
- self.assertEqual(meta, {'mk1': 'mv3'})
-
- def test_instance_update_unique_name(self):
- context1 = context.RequestContext('user1', 'p1')
- context2 = context.RequestContext('user2', 'p2')
-
- inst1 = self.create_instance_with_args(context=context1,
- project_id='p1',
- hostname='fake_name1')
- inst2 = self.create_instance_with_args(context=context1,
- project_id='p1',
- hostname='fake_name2')
- inst3 = self.create_instance_with_args(context=context2,
- project_id='p2',
- hostname='fake_name3')
- # osapi_compute_unique_server_name_scope is unset so this should work:
- db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
- db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
-
- # With scope 'global' any duplicate should fail.
- self.flags(osapi_compute_unique_server_name_scope='global')
- self.assertRaises(exception.InstanceExists,
- db.instance_update,
- context1,
- inst2['uuid'],
- {'hostname': 'fake_name1'})
- self.assertRaises(exception.InstanceExists,
- db.instance_update,
- context2,
- inst3['uuid'],
- {'hostname': 'fake_name1'})
- # But we should definitely be able to update our name if we aren't
- # really changing it.
- db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
-
- # With scope 'project' a duplicate in the project should fail:
- self.flags(osapi_compute_unique_server_name_scope='project')
- self.assertRaises(exception.InstanceExists, db.instance_update,
- context1, inst2['uuid'], {'hostname': 'fake_NAME'})
-
- # With scope 'project' a duplicate in a different project should work:
- self.flags(osapi_compute_unique_server_name_scope='project')
- db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
-
- def _test_instance_update_updates_metadata(self, metadata_type):
- instance = self.create_instance_with_args()
-
- def set_and_check(meta):
- inst = db.instance_update(self.ctxt, instance['uuid'],
- {metadata_type: dict(meta)})
- _meta = utils.metadata_to_dict(inst[metadata_type])
- self.assertEqual(meta, _meta)
-
- meta = {'speed': '88', 'units': 'MPH'}
- set_and_check(meta)
- meta['gigawatts'] = '1.21'
- set_and_check(meta)
- del meta['gigawatts']
- set_and_check(meta)
-
- def test_security_group_in_use(self):
- db.instance_create(self.ctxt, dict(host='foo'))
-
- def test_instance_update_updates_system_metadata(self):
- # Ensure that system_metadata is updated during instance_update
- self._test_instance_update_updates_metadata('system_metadata')
-
- def test_instance_update_updates_metadata(self):
- # Ensure that metadata is updated during instance_update
- self._test_instance_update_updates_metadata('metadata')
-
- def test_instance_floating_address_get_all(self):
- ctxt = context.get_admin_context()
-
- instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
- instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
-
- fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
- instance_uuids = [instance1['uuid'], instance1['uuid'],
- instance2['uuid']]
-
- for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
- float_addresses,
- instance_uuids):
- db.fixed_ip_create(ctxt, {'address': fixed_addr,
- 'instance_uuid': instance_uuid})
- fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
- db.floating_ip_create(ctxt,
- {'address': float_addr,
- 'fixed_ip_id': fixed_id})
-
- real_float_addresses = \
- db.instance_floating_address_get_all(ctxt, instance_uuids[0])
- self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
- real_float_addresses = \
- db.instance_floating_address_get_all(ctxt, instance_uuids[2])
- self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
-
- self.assertRaises(exception.InvalidUUID,
- db.instance_floating_address_get_all,
- ctxt, 'invalid_uuid')
-
- def test_instance_stringified_ips(self):
- instance = self.create_instance_with_args()
- instance = db.instance_update(
- self.ctxt, instance['uuid'],
- {'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
- 'access_ip_v6': netaddr.IPAddress('::1')})
- self.assertIsInstance(instance['access_ip_v4'], six.string_types)
- self.assertIsInstance(instance['access_ip_v6'], six.string_types)
- instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
- self.assertIsInstance(instance['access_ip_v4'], six.string_types)
- self.assertIsInstance(instance['access_ip_v6'], six.string_types)
-
- def test_instance_destroy(self):
- ctxt = context.get_admin_context()
- values = {
- 'metadata': {'key': 'value'}
- }
- inst_uuid = self.create_instance_with_args(**values)['uuid']
- db.instance_destroy(ctxt, inst_uuid)
-
- self.assertRaises(exception.InstanceNotFound,
- db.instance_get, ctxt, inst_uuid)
- self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
- self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
-
- def test_instance_destroy_already_destroyed(self):
- ctxt = context.get_admin_context()
- instance = self.create_instance_with_args()
- db.instance_destroy(ctxt, instance['uuid'])
- self.assertRaises(exception.InstanceNotFound,
- db.instance_destroy, ctxt, instance['uuid'])
-
-
-class InstanceMetadataTestCase(test.TestCase):
-
- """Tests for db.api.instance_metadata_* methods."""
-
- def setUp(self):
- super(InstanceMetadataTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def test_instance_metadata_get(self):
- instance = db.instance_create(self.ctxt, {'metadata':
- {'key': 'value'}})
- self.assertEqual({'key': 'value'}, db.instance_metadata_get(
- self.ctxt, instance['uuid']))
-
- def test_instance_metadata_delete(self):
- instance = db.instance_create(self.ctxt,
- {'metadata': {'key': 'val',
- 'key1': 'val1'}})
- db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
- self.assertEqual({'key': 'val'}, db.instance_metadata_get(
- self.ctxt, instance['uuid']))
-
- def test_instance_metadata_update(self):
- instance = db.instance_create(self.ctxt, {'host': 'h1',
- 'project_id': 'p1', 'metadata': {'key': 'value'}})
-
- # This should add new key/value pair
- metadata = db.instance_metadata_update(
- self.ctxt, instance['uuid'],
- {'new_key': 'new_value'}, False)
- metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
- self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
-
- # This should leave only one key/value pair
- metadata = db.instance_metadata_update(
- self.ctxt, instance['uuid'],
- {'new_key': 'new_value'}, True)
- metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
- self.assertEqual(metadata, {'new_key': 'new_value'})
-
-
-class InstanceExtraTestCase(test.TestCase):
- def setUp(self):
- super(InstanceExtraTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.instance = db.instance_create(self.ctxt, {})
-
- def test_instance_extra_get_by_uuid_instance_create(self):
- inst_extra = db.instance_extra_get_by_instance_uuid(
- self.ctxt, self.instance['uuid'])
- self.assertIsNotNone(inst_extra)
-
- def test_instance_extra_update_by_uuid(self):
- db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
- {'numa_topology': 'changed'})
- inst_extra = db.instance_extra_get_by_instance_uuid(
- self.ctxt, self.instance['uuid'])
- self.assertEqual('changed', inst_extra.numa_topology)
-
- def test_instance_extra_get_with_columns(self):
- extra = db.instance_extra_get_by_instance_uuid(
- self.ctxt, self.instance['uuid'],
- columns=['numa_topology'])
- self.assertNotIn('pci_requests', extra)
- self.assertIn('numa_topology', extra)
-
-
-class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(ServiceTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_base_values(self):
- return {
- 'host': 'fake_host',
- 'binary': 'fake_binary',
- 'topic': 'fake_topic',
- 'report_count': 3,
- 'disabled': False
- }
-
- def _create_service(self, values):
- v = self._get_base_values()
- v.update(values)
- return db.service_create(self.ctxt, v)
-
- def test_service_create(self):
- service = self._create_service({})
- self.assertIsNotNone(service['id'])
- for key, value in self._get_base_values().iteritems():
- self.assertEqual(value, service[key])
-
- def test_service_destroy(self):
- service1 = self._create_service({})
- service2 = self._create_service({'host': 'fake_host2'})
-
- db.service_destroy(self.ctxt, service1['id'])
- self.assertRaises(exception.ServiceNotFound,
- db.service_get, self.ctxt, service1['id'])
- self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
- service2, ignored_keys=['compute_node'])
-
- def test_service_update(self):
- service = self._create_service({})
- new_values = {
- 'host': 'fake_host1',
- 'binary': 'fake_binary1',
- 'topic': 'fake_topic1',
- 'report_count': 4,
- 'disabled': True
- }
- db.service_update(self.ctxt, service['id'], new_values)
- updated_service = db.service_get(self.ctxt, service['id'])
- for key, value in new_values.iteritems():
- self.assertEqual(value, updated_service[key])
-
- def test_service_update_not_found_exception(self):
- self.assertRaises(exception.ServiceNotFound,
- db.service_update, self.ctxt, 100500, {})
-
- def test_service_get(self):
- service1 = self._create_service({})
- self._create_service({'host': 'some_other_fake_host'})
- real_service1 = db.service_get(self.ctxt, service1['id'])
- self._assertEqualObjects(service1, real_service1,
- ignored_keys=['compute_node'])
-
- def test_service_get_with_compute_node(self):
- service = self._create_service({})
- compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
- vcpus_used=0, memory_mb_used=0,
- local_gb_used=0, free_ram_mb=1024,
- free_disk_gb=2048, hypervisor_type="xen",
- hypervisor_version=1, cpu_info="",
- running_vms=0, current_workload=0,
- service_id=service['id'])
- compute = db.compute_node_create(self.ctxt, compute_values)
- real_service = db.service_get(self.ctxt, service['id'],
- with_compute_node=True)
- real_compute = real_service['compute_node'][0]
- self.assertEqual(compute['id'], real_compute['id'])
-
- def test_service_get_not_found_exception(self):
- self.assertRaises(exception.ServiceNotFound,
- db.service_get, self.ctxt, 100500)
-
- def test_service_get_by_host_and_topic(self):
- service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
- self._create_service({'host': 'host2', 'topic': 'topic2'})
-
- real_service1 = db.service_get_by_host_and_topic(self.ctxt,
- host='host1',
- topic='topic1')
- self._assertEqualObjects(service1, real_service1)
-
- def test_service_get_all(self):
- values = [
- {'host': 'host1', 'topic': 'topic1'},
- {'host': 'host2', 'topic': 'topic2'},
- {'disabled': True}
- ]
- services = [self._create_service(vals) for vals in values]
- disabled_services = [services[-1]]
- non_disabled_services = services[:-1]
-
- compares = [
- (services, db.service_get_all(self.ctxt)),
- (disabled_services, db.service_get_all(self.ctxt, True)),
- (non_disabled_services, db.service_get_all(self.ctxt, False))
- ]
- for comp in compares:
- self._assertEqualListsOfObjects(*comp)
-
- def test_service_get_all_by_topic(self):
- values = [
- {'host': 'host1', 'topic': 't1'},
- {'host': 'host2', 'topic': 't1'},
- {'disabled': True, 'topic': 't1'},
- {'host': 'host3', 'topic': 't2'}
- ]
- services = [self._create_service(vals) for vals in values]
- expected = services[:2]
- real = db.service_get_all_by_topic(self.ctxt, 't1')
- self._assertEqualListsOfObjects(expected, real)
-
- def test_service_get_all_by_host(self):
- values = [
- {'host': 'host1', 'topic': 't11', 'binary': 'b11'},
- {'host': 'host1', 'topic': 't12', 'binary': 'b12'},
- {'host': 'host2', 'topic': 't1'},
- {'host': 'host3', 'topic': 't1'}
- ]
- services = [self._create_service(vals) for vals in values]
-
- expected = services[:2]
- real = db.service_get_all_by_host(self.ctxt, 'host1')
- self._assertEqualListsOfObjects(expected, real)
-
- def test_service_get_by_compute_host(self):
- values = [
- {'host': 'host1', 'topic': CONF.compute_topic},
- {'host': 'host2', 'topic': 't1'},
- {'host': 'host3', 'topic': CONF.compute_topic}
- ]
- services = [self._create_service(vals) for vals in values]
-
- real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
- self._assertEqualObjects(services[0], real_service,
- ignored_keys=['compute_node'])
-
- self.assertRaises(exception.ComputeHostNotFound,
- db.service_get_by_compute_host,
- self.ctxt, 'non-exists-host')
-
- def test_service_get_by_compute_host_not_found(self):
- self.assertRaises(exception.ComputeHostNotFound,
- db.service_get_by_compute_host,
- self.ctxt, 'non-exists-host')
-
- def test_service_get_by_args(self):
- values = [
- {'host': 'host1', 'binary': 'a'},
- {'host': 'host2', 'binary': 'b'}
- ]
- services = [self._create_service(vals) for vals in values]
-
- service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
- self._assertEqualObjects(services[0], service1)
-
- service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
- self._assertEqualObjects(services[1], service2)
-
- def test_service_get_by_args_not_found_exception(self):
- self.assertRaises(exception.HostBinaryNotFound,
- db.service_get_by_args,
- self.ctxt, 'non-exists-host', 'a')
-
- def test_service_binary_exists_exception(self):
- db.service_create(self.ctxt, self._get_base_values())
- values = self._get_base_values()
- values.update({'topic': 'top1'})
- self.assertRaises(exception.ServiceBinaryExists, db.service_create,
- self.ctxt, values)
-
- def test_service_topic_exists_exceptions(self):
- db.service_create(self.ctxt, self._get_base_values())
- values = self._get_base_values()
- values.update({'binary': 'bin1'})
- self.assertRaises(exception.ServiceTopicExists, db.service_create,
- self.ctxt, values)
-
-
-class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(BaseInstanceTypeTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.user_ctxt = context.RequestContext('user', 'user')
-
- def _get_base_values(self):
- return {
- 'name': 'fake_name',
- 'memory_mb': 512,
- 'vcpus': 1,
- 'root_gb': 10,
- 'ephemeral_gb': 10,
- 'flavorid': 'fake_flavor',
- 'swap': 0,
- 'rxtx_factor': 0.5,
- 'vcpu_weight': 1,
- 'disabled': False,
- 'is_public': True
- }
-
- def _create_flavor(self, values, projects=None):
- v = self._get_base_values()
- v.update(values)
- return db.flavor_create(self.ctxt, v, projects)
-
-
-class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
- IGNORED_FIELDS = [
- 'id',
- 'created_at',
- 'updated_at',
- 'deleted_at',
- 'deleted'
- ]
-
- def setUp(self):
- super(InstanceActionTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _create_action_values(self, uuid, action='run_instance',
- ctxt=None, extra=None):
- if ctxt is None:
- ctxt = self.ctxt
-
- db.instance_create(ctxt, {'uuid': uuid})
-
- values = {
- 'action': action,
- 'instance_uuid': uuid,
- 'request_id': ctxt.request_id,
- 'user_id': ctxt.user_id,
- 'project_id': ctxt.project_id,
- 'start_time': timeutils.utcnow(),
- 'message': 'action-message'
- }
- if extra is not None:
- values.update(extra)
- return values
-
- def _create_event_values(self, uuid, event='schedule',
- ctxt=None, extra=None):
- if ctxt is None:
- ctxt = self.ctxt
- values = {
- 'event': event,
- 'instance_uuid': uuid,
- 'request_id': ctxt.request_id,
- 'start_time': timeutils.utcnow(),
- 'host': 'fake-host',
- 'details': 'fake-details',
- }
- if extra is not None:
- values.update(extra)
- return values
-
- def _assertActionSaved(self, action, uuid):
- """Retrieve the action to ensure it was successfully added."""
- actions = db.actions_get(self.ctxt, uuid)
- self.assertEqual(1, len(actions))
- self._assertEqualObjects(action, actions[0])
-
- def _assertActionEventSaved(self, event, action_id):
- # Retrieve the event to ensure it was successfully added
- events = db.action_events_get(self.ctxt, action_id)
- self.assertEqual(1, len(events))
- self._assertEqualObjects(event, events[0],
- ['instance_uuid', 'request_id'])
-
- def test_instance_action_start(self):
- """Create an instance action."""
- uuid = str(stdlib_uuid.uuid4())
-
- action_values = self._create_action_values(uuid)
- action = db.action_start(self.ctxt, action_values)
-
- ignored_keys = self.IGNORED_FIELDS + ['finish_time']
- self._assertEqualObjects(action_values, action, ignored_keys)
-
- self._assertActionSaved(action, uuid)
-
- def test_instance_action_finish(self):
- """Create an instance action."""
- uuid = str(stdlib_uuid.uuid4())
-
- action_values = self._create_action_values(uuid)
- db.action_start(self.ctxt, action_values)
-
- action_values['finish_time'] = timeutils.utcnow()
- action = db.action_finish(self.ctxt, action_values)
- self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
-
- self._assertActionSaved(action, uuid)
-
- def test_instance_action_finish_without_started_event(self):
- """Create an instance finish action."""
- uuid = str(stdlib_uuid.uuid4())
-
- action_values = self._create_action_values(uuid)
- action_values['finish_time'] = timeutils.utcnow()
- self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
- self.ctxt, action_values)
-
- def test_instance_actions_get_by_instance(self):
- """Ensure we can get actions by UUID."""
- uuid1 = str(stdlib_uuid.uuid4())
-
- expected = []
-
- action_values = self._create_action_values(uuid1)
- action = db.action_start(self.ctxt, action_values)
- expected.append(action)
-
- action_values['action'] = 'resize'
- action = db.action_start(self.ctxt, action_values)
- expected.append(action)
-
- # Create some extra actions
- uuid2 = str(stdlib_uuid.uuid4())
- ctxt2 = context.get_admin_context()
- action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
- db.action_start(ctxt2, action_values)
- db.action_start(ctxt2, action_values)
-
- # Retrieve the action to ensure it was successfully added
- actions = db.actions_get(self.ctxt, uuid1)
- self._assertEqualListsOfObjects(expected, actions)
-
- def test_instance_actions_get_are_in_order(self):
- """Ensure retrived actions are in order."""
- uuid1 = str(stdlib_uuid.uuid4())
-
- extra = {
- 'created_at': timeutils.utcnow()
- }
-
- action_values = self._create_action_values(uuid1, extra=extra)
- action1 = db.action_start(self.ctxt, action_values)
-
- action_values['action'] = 'delete'
- action2 = db.action_start(self.ctxt, action_values)
-
- actions = db.actions_get(self.ctxt, uuid1)
- self.assertEqual(2, len(actions))
-
- self._assertEqualOrderedListOfObjects([action2, action1], actions)
-
- def test_instance_action_get_by_instance_and_action(self):
- """Ensure we can get an action by instance UUID and action id."""
- ctxt2 = context.get_admin_context()
- uuid1 = str(stdlib_uuid.uuid4())
- uuid2 = str(stdlib_uuid.uuid4())
-
- action_values = self._create_action_values(uuid1)
- db.action_start(self.ctxt, action_values)
- request_id = action_values['request_id']
-
- # NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
- action_values['action'] = 'resize'
- action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
- db.action_start(self.ctxt, action_values)
-
- action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
- db.action_start(ctxt2, action_values)
- db.action_start(ctxt2, action_values)
-
- action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
- self.assertEqual('run_instance', action['action'])
- self.assertEqual(self.ctxt.request_id, action['request_id'])
-
- def test_instance_action_event_start(self):
- """Create an instance action event."""
- uuid = str(stdlib_uuid.uuid4())
-
- action_values = self._create_action_values(uuid)
- action = db.action_start(self.ctxt, action_values)
-
- event_values = self._create_event_values(uuid)
- event = db.action_event_start(self.ctxt, event_values)
- # self.fail(self._dict_from_object(event, None))
- event_values['action_id'] = action['id']
- ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
- self._assertEqualObjects(event_values, event, ignored)
-
- self._assertActionEventSaved(event, action['id'])
-
- def test_instance_action_event_start_without_action(self):
- """Create an instance action event."""
- uuid = str(stdlib_uuid.uuid4())
-
- event_values = self._create_event_values(uuid)
- self.assertRaises(exception.InstanceActionNotFound,
- db.action_event_start, self.ctxt, event_values)
-
- def test_instance_action_event_finish_without_started_event(self):
- """Finish an instance action event."""
- uuid = str(stdlib_uuid.uuid4())
-
- db.action_start(self.ctxt, self._create_action_values(uuid))
-
- event_values = {
- 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
- 'result': 'Success'
- }
- event_values = self._create_event_values(uuid, extra=event_values)
- self.assertRaises(exception.InstanceActionEventNotFound,
- db.action_event_finish, self.ctxt, event_values)
-
- def test_instance_action_event_finish_without_action(self):
- """Finish an instance action event."""
- uuid = str(stdlib_uuid.uuid4())
-
- event_values = {
- 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
- 'result': 'Success'
- }
- event_values = self._create_event_values(uuid, extra=event_values)
- self.assertRaises(exception.InstanceActionNotFound,
- db.action_event_finish, self.ctxt, event_values)
-
- def test_instance_action_event_finish_success(self):
- """Finish an instance action event."""
- uuid = str(stdlib_uuid.uuid4())
-
- action = db.action_start(self.ctxt, self._create_action_values(uuid))
-
- db.action_event_start(self.ctxt, self._create_event_values(uuid))
-
- event_values = {
- 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
- 'result': 'Success'
- }
- event_values = self._create_event_values(uuid, extra=event_values)
- event = db.action_event_finish(self.ctxt, event_values)
-
- self._assertActionEventSaved(event, action['id'])
- action = db.action_get_by_request_id(self.ctxt, uuid,
- self.ctxt.request_id)
- self.assertNotEqual('Error', action['message'])
-
- def test_instance_action_event_finish_error(self):
- """Finish an instance action event with an error."""
- uuid = str(stdlib_uuid.uuid4())
-
- action = db.action_start(self.ctxt, self._create_action_values(uuid))
-
- db.action_event_start(self.ctxt, self._create_event_values(uuid))
-
- event_values = {
- 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
- 'result': 'Error'
- }
- event_values = self._create_event_values(uuid, extra=event_values)
- event = db.action_event_finish(self.ctxt, event_values)
-
- self._assertActionEventSaved(event, action['id'])
- action = db.action_get_by_request_id(self.ctxt, uuid,
- self.ctxt.request_id)
- self.assertEqual('Error', action['message'])
-
- def test_instance_action_and_event_start_string_time(self):
- """Create an instance action and event with a string start_time."""
- uuid = str(stdlib_uuid.uuid4())
-
- action = db.action_start(self.ctxt, self._create_action_values(uuid))
-
- event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
- event_values = self._create_event_values(uuid, extra=event_values)
- event = db.action_event_start(self.ctxt, event_values)
-
- self._assertActionEventSaved(event, action['id'])
-
- def test_instance_action_events_get_are_in_order(self):
- """Ensure retrived action events are in order."""
- uuid1 = str(stdlib_uuid.uuid4())
-
- action = db.action_start(self.ctxt,
- self._create_action_values(uuid1))
-
- extra1 = {
- 'created_at': timeutils.utcnow()
- }
- extra2 = {
- 'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
- }
-
- event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
- event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
- event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
-
- event1 = db.action_event_start(self.ctxt, event_val1)
- event2 = db.action_event_start(self.ctxt, event_val2)
- event3 = db.action_event_start(self.ctxt, event_val3)
-
- events = db.action_events_get(self.ctxt, action['id'])
- self.assertEqual(3, len(events))
-
- self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
- ['instance_uuid', 'request_id'])
-
- def test_instance_action_event_get_by_id(self):
- """Get a specific instance action event."""
- ctxt2 = context.get_admin_context()
- uuid1 = str(stdlib_uuid.uuid4())
- uuid2 = str(stdlib_uuid.uuid4())
-
- action = db.action_start(self.ctxt,
- self._create_action_values(uuid1))
-
- db.action_start(ctxt2,
- self._create_action_values(uuid2, 'reboot', ctxt2))
-
- event = db.action_event_start(self.ctxt,
- self._create_event_values(uuid1))
-
- event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
- db.action_event_start(ctxt2, event_values)
-
- # Retrieve the event to ensure it was successfully added
- saved_event = db.action_event_get_by_id(self.ctxt,
- action['id'],
- event['id'])
- self._assertEqualObjects(event, saved_event,
- ['instance_uuid', 'request_id'])
-
-
-class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(InstanceFaultTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _create_fault_values(self, uuid, code=404):
- return {
- 'message': 'message',
- 'details': 'detail',
- 'instance_uuid': uuid,
- 'code': code,
- 'host': 'localhost'
- }
-
- def test_instance_fault_create(self):
- """Ensure we can create an instance fault."""
- uuid = str(stdlib_uuid.uuid4())
-
- # Ensure no faults registered for this instance
- faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
- self.assertEqual(0, len(faults[uuid]))
-
- # Create a fault
- fault_values = self._create_fault_values(uuid)
- db.instance_create(self.ctxt, {'uuid': uuid})
- fault = db.instance_fault_create(self.ctxt, fault_values)
-
- ignored_keys = ['deleted', 'created_at', 'updated_at',
- 'deleted_at', 'id']
- self._assertEqualObjects(fault_values, fault, ignored_keys)
-
- # Retrieve the fault to ensure it was successfully added
- faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
- self.assertEqual(1, len(faults[uuid]))
- self._assertEqualObjects(fault, faults[uuid][0])
-
- def test_instance_fault_get_by_instance(self):
- """Ensure we can retrieve faults for instance."""
- uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
- fault_codes = [404, 500]
- expected = {}
-
- # Create faults
- for uuid in uuids:
- db.instance_create(self.ctxt, {'uuid': uuid})
-
- expected[uuid] = []
- for code in fault_codes:
- fault_values = self._create_fault_values(uuid, code)
- fault = db.instance_fault_create(self.ctxt, fault_values)
- expected[uuid].append(fault)
-
- # Ensure faults are saved
- faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
- self.assertEqual(len(expected), len(faults))
- for uuid in uuids:
- self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
-
- def test_instance_faults_get_by_instance_uuids_no_faults(self):
- uuid = str(stdlib_uuid.uuid4())
- # None should be returned when no faults exist.
- faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
- expected = {uuid: []}
- self.assertEqual(expected, faults)
-
- def test_instance_faults_get_by_instance_uuids_no_uuids(self):
- self.mox.StubOutWithMock(query.Query, 'filter')
- self.mox.ReplayAll()
- faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
- self.assertEqual({}, faults)
-
-
-class InstanceTypeTestCase(BaseInstanceTypeTestCase):
-
- def test_flavor_create(self):
- flavor = self._create_flavor({})
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at', 'extra_specs']
-
- self.assertIsNotNone(flavor['id'])
- self._assertEqualObjects(flavor, self._get_base_values(),
- ignored_keys)
-
- def test_flavor_create_with_projects(self):
- projects = ['fake-project1', 'fake-project2']
- flavor = self._create_flavor({}, projects + ['fake-project2'])
- access = db.flavor_access_get_by_flavor_id(self.ctxt,
- flavor['flavorid'])
- self.assertEqual(projects, [x.project_id for x in access])
-
- def test_flavor_destroy(self):
- specs1 = {'a': '1', 'b': '2'}
- flavor1 = self._create_flavor({'name': 'name1', 'flavorid': 'a1',
- 'extra_specs': specs1})
- specs2 = {'c': '4', 'd': '3'}
- flavor2 = self._create_flavor({'name': 'name2', 'flavorid': 'a2',
- 'extra_specs': specs2})
-
- db.flavor_destroy(self.ctxt, 'name1')
-
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_get, self.ctxt, flavor1['id'])
- real_specs1 = db.flavor_extra_specs_get(self.ctxt, flavor1['flavorid'])
- self._assertEqualObjects(real_specs1, {})
-
- r_flavor2 = db.flavor_get(self.ctxt, flavor2['id'])
- self._assertEqualObjects(flavor2, r_flavor2, 'extra_specs')
-
- def test_flavor_destroy_not_found(self):
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_destroy, self.ctxt, 'nonexists')
-
- def test_flavor_create_duplicate_name(self):
- self._create_flavor({})
- self.assertRaises(exception.FlavorExists,
- self._create_flavor,
- {'flavorid': 'some_random_flavor'})
-
- def test_flavor_create_duplicate_flavorid(self):
- self._create_flavor({})
- self.assertRaises(exception.FlavorIdExists,
- self._create_flavor,
- {'name': 'some_random_name'})
-
- def test_flavor_create_with_extra_specs(self):
- extra_specs = dict(a='abc', b='def', c='ghi')
- flavor = self._create_flavor({'extra_specs': extra_specs})
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at', 'extra_specs']
-
- self._assertEqualObjects(flavor, self._get_base_values(),
- ignored_keys)
- self._assertEqualObjects(extra_specs, flavor['extra_specs'])
-
- def test_flavor_get_all(self):
- # NOTE(boris-42): Remove base instance types
- for it in db.flavor_get_all(self.ctxt):
- db.flavor_destroy(self.ctxt, it['name'])
-
- flavors = [
- {'root_gb': 600, 'memory_mb': 100, 'disabled': True,
- 'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
- {'root_gb': 500, 'memory_mb': 200, 'disabled': True,
- 'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
- {'root_gb': 400, 'memory_mb': 300, 'disabled': False,
- 'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
- {'root_gb': 300, 'memory_mb': 400, 'disabled': False,
- 'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
- {'root_gb': 200, 'memory_mb': 500, 'disabled': True,
- 'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
- {'root_gb': 100, 'memory_mb': 600, 'disabled': True,
- 'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
- ]
- flavors = [self._create_flavor(it) for it in flavors]
-
- lambda_filters = {
- 'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
- 'min_root_gb': lambda it, v: it['root_gb'] >= v,
- 'disabled': lambda it, v: it['disabled'] == v,
- 'is_public': lambda it, v: (v is None or it['is_public'] == v)
- }
-
- mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
- root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
- disabled_filts = [{'disabled': x} for x in [True, False]]
- is_public_filts = [{'is_public': x} for x in [True, False, None]]
-
- def assert_multi_filter_flavor_get(filters=None):
- if filters is None:
- filters = {}
-
- expected_it = flavors
- for name, value in filters.iteritems():
- filt = lambda it: lambda_filters[name](it, value)
- expected_it = filter(filt, expected_it)
-
- real_it = db.flavor_get_all(self.ctxt, filters=filters)
- self._assertEqualListsOfObjects(expected_it, real_it)
-
- # no filter
- assert_multi_filter_flavor_get()
-
- # test only with one filter
- for filt in mem_filts:
- assert_multi_filter_flavor_get(filt)
- for filt in root_filts:
- assert_multi_filter_flavor_get(filt)
- for filt in disabled_filts:
- assert_multi_filter_flavor_get(filt)
- for filt in is_public_filts:
- assert_multi_filter_flavor_get(filt)
-
- # test all filters together
- for mem in mem_filts:
- for root in root_filts:
- for disabled in disabled_filts:
- for is_public in is_public_filts:
- filts = [f.items() for f in
- [mem, root, disabled, is_public]]
- filts = dict(reduce(lambda x, y: x + y, filts, []))
- assert_multi_filter_flavor_get(filts)
-
- def test_flavor_get_all_limit_sort(self):
- def assert_sorted_by_key_dir(sort_key, asc=True):
- sort_dir = 'asc' if asc else 'desc'
- results = db.flavor_get_all(self.ctxt, sort_key='name',
- sort_dir=sort_dir)
- # Manually sort the results as we would expect them
- expected_results = sorted(results,
- key=lambda item: item['name'],
- reverse=(not asc))
- self.assertEqual(expected_results, results)
-
- def assert_sorted_by_key_both_dir(sort_key):
- assert_sorted_by_key_dir(sort_key, True)
- assert_sorted_by_key_dir(sort_key, False)
-
- for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
- 'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
- 'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
- 'vcpu_weight', 'id']:
- assert_sorted_by_key_both_dir(attr)
-
- def test_flavor_get_all_limit(self):
- limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
- self.assertEqual(2, len(limited_flavors))
-
- def test_flavor_get_all_list_marker(self):
- all_flavors = db.flavor_get_all(self.ctxt)
-
- # Set the 3rd result as the marker
- marker_flavorid = all_flavors[2]['flavorid']
- marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
- # We expect everything /after/ the 3rd result
- expected_results = all_flavors[3:]
- self.assertEqual(expected_results, marked_flavors)
-
- def test_flavor_get_all_marker_not_found(self):
- self.assertRaises(exception.MarkerNotFound,
- db.flavor_get_all, self.ctxt, marker='invalid')
-
- def test_flavor_get(self):
- flavors = [{'name': 'abc', 'flavorid': '123'},
- {'name': 'def', 'flavorid': '456'},
- {'name': 'ghi', 'flavorid': '789'}]
- flavors = [self._create_flavor(t) for t in flavors]
-
- for flavor in flavors:
- flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
- self._assertEqualObjects(flavor, flavor_by_id)
-
- def test_flavor_get_non_public(self):
- flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
- 'is_public': False})
-
- # Admin can see it
- flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
- self._assertEqualObjects(flavor, flavor_by_id)
-
- # Regular user can not
- self.assertRaises(exception.FlavorNotFound, db.flavor_get,
- self.user_ctxt, flavor['id'])
-
- # Regular user can see it after being granted access
- db.flavor_access_add(self.ctxt, flavor['flavorid'],
- self.user_ctxt.project_id)
- flavor_by_id = db.flavor_get(self.user_ctxt, flavor['id'])
- self._assertEqualObjects(flavor, flavor_by_id)
-
- def test_flavor_get_by_name(self):
- flavors = [{'name': 'abc', 'flavorid': '123'},
- {'name': 'def', 'flavorid': '456'},
- {'name': 'ghi', 'flavorid': '789'}]
- flavors = [self._create_flavor(t) for t in flavors]
-
- for flavor in flavors:
- flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
- self._assertEqualObjects(flavor, flavor_by_name)
-
- def test_flavor_get_by_name_not_found(self):
- self._create_flavor({})
- self.assertRaises(exception.FlavorNotFoundByName,
- db.flavor_get_by_name, self.ctxt, 'nonexists')
-
- def test_flavor_get_by_name_non_public(self):
- flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
- 'is_public': False})
-
- # Admin can see it
- flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
- self._assertEqualObjects(flavor, flavor_by_name)
-
- # Regular user can not
- self.assertRaises(exception.FlavorNotFoundByName,
- db.flavor_get_by_name, self.user_ctxt,
- flavor['name'])
-
- # Regular user can see it after being granted access
- db.flavor_access_add(self.ctxt, flavor['flavorid'],
- self.user_ctxt.project_id)
- flavor_by_name = db.flavor_get_by_name(self.user_ctxt, flavor['name'])
- self._assertEqualObjects(flavor, flavor_by_name)
-
- def test_flavor_get_by_flavor_id(self):
- flavors = [{'name': 'abc', 'flavorid': '123'},
- {'name': 'def', 'flavorid': '456'},
- {'name': 'ghi', 'flavorid': '789'}]
- flavors = [self._create_flavor(t) for t in flavors]
-
- for flavor in flavors:
- params = (self.ctxt, flavor['flavorid'])
- flavor_by_flavorid = db.flavor_get_by_flavor_id(*params)
- self._assertEqualObjects(flavor, flavor_by_flavorid)
-
- def test_flavor_get_by_flavor_not_found(self):
- self._create_flavor({})
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_get_by_flavor_id,
- self.ctxt, 'nonexists')
-
- def test_flavor_get_by_flavor_id_non_public(self):
- flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
- 'is_public': False})
-
- # Admin can see it
- flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
- flavor['flavorid'])
- self._assertEqualObjects(flavor, flavor_by_fid)
-
- # Regular user can not
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_get_by_flavor_id, self.user_ctxt,
- flavor['flavorid'])
-
- # Regular user can see it after being granted access
- db.flavor_access_add(self.ctxt, flavor['flavorid'],
- self.user_ctxt.project_id)
- flavor_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
- flavor['flavorid'])
- self._assertEqualObjects(flavor, flavor_by_fid)
-
- def test_flavor_get_by_flavor_id_deleted(self):
- flavor = self._create_flavor({'name': 'abc', 'flavorid': '123'})
-
- db.flavor_destroy(self.ctxt, 'abc')
-
- flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
- flavor['flavorid'], read_deleted='yes')
- self.assertEqual(flavor['id'], flavor_by_fid['id'])
-
- def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
- # NOTE(wingwj): Aims to test difference between mysql and postgresql
- # for bug 1288636
- param_dict = {'name': 'abc', 'flavorid': '123'}
-
- self._create_flavor(param_dict)
- db.flavor_destroy(self.ctxt, 'abc')
-
- # Recreate the flavor with the same params
- flavor = self._create_flavor(param_dict)
-
- flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
- flavor['flavorid'], read_deleted='yes')
- self.assertEqual(flavor['id'], flavor_by_fid['id'])
-
-
-class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
-
- def setUp(self):
- super(InstanceTypeExtraSpecsTestCase, self).setUp()
- values = ({'name': 'n1', 'flavorid': 'f1',
- 'extra_specs': dict(a='a', b='b', c='c')},
- {'name': 'n2', 'flavorid': 'f2',
- 'extra_specs': dict(d='d', e='e', f='f')})
-
- # NOTE(boris-42): We have already tested flavor_create method
- # with extra_specs in InstanceTypeTestCase.
- self.flavors = [self._create_flavor(v) for v in values]
-
- def test_flavor_extra_specs_get(self):
- for it in self.flavors:
- real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
- self._assertEqualObjects(it['extra_specs'], real_specs)
-
- def test_flavor_extra_specs_get_item(self):
- expected = dict(f1=dict(a='a', b='b', c='c'),
- f2=dict(d='d', e='e', f='f'))
-
- for flavor, specs in expected.iteritems():
- for key, val in specs.iteritems():
- spec = db.flavor_extra_specs_get_item(self.ctxt, flavor, key)
- self.assertEqual(spec[key], val)
-
- def test_flavor_extra_specs_delete(self):
- for it in self.flavors:
- specs = it['extra_specs']
- key = specs.keys()[0]
- del specs[key]
- db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
- real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
- self._assertEqualObjects(it['extra_specs'], real_specs)
-
- def test_flavor_extra_specs_delete_failed(self):
- for it in self.flavors:
- self.assertRaises(exception.FlavorExtraSpecsNotFound,
- db.flavor_extra_specs_delete,
- self.ctxt, it['flavorid'], 'dummy')
-
- def test_flavor_extra_specs_update_or_create(self):
- for it in self.flavors:
- current_specs = it['extra_specs']
- current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
- params = (self.ctxt, it['flavorid'], current_specs)
- db.flavor_extra_specs_update_or_create(*params)
- real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
- self._assertEqualObjects(current_specs, real_specs)
-
- def test_flavor_extra_specs_update_or_create_flavor_not_found(self):
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_extra_specs_update_or_create,
- self.ctxt, 'nonexists', {})
-
- def test_flavor_extra_specs_update_or_create_retry(self):
-
- def counted():
- def get_id(context, flavorid, session):
- get_id.counter += 1
- raise db_exc.DBDuplicateEntry
- get_id.counter = 0
- return get_id
-
- get_id = counted()
- self.stubs.Set(sqlalchemy_api, '_flavor_get_id_from_flavor', get_id)
- self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
- sqlalchemy_api.flavor_extra_specs_update_or_create,
- self.ctxt, 1, {}, 5)
- self.assertEqual(get_id.counter, 5)
-
-
-class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
-
- def _create_flavor_access(self, flavor_id, project_id):
- return db.flavor_access_add(self.ctxt, flavor_id, project_id)
-
- def test_flavor_access_get_by_flavor_id(self):
- flavors = ({'name': 'n1', 'flavorid': 'f1'},
- {'name': 'n2', 'flavorid': 'f2'})
- it1, it2 = tuple((self._create_flavor(v) for v in flavors))
-
- access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
- self._create_flavor_access(it1['flavorid'], 'pr2')]
-
- access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
-
- for it, access_it in zip((it1, it2), (access_it1, access_it2)):
- params = (self.ctxt, it['flavorid'])
- real_access_it = db.flavor_access_get_by_flavor_id(*params)
- self._assertEqualListsOfObjects(access_it, real_access_it)
-
- def test_flavor_access_get_by_flavor_id_flavor_not_found(self):
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_get_by_flavor_id,
- self.ctxt, 'nonexists')
-
- def test_flavor_access_add(self):
- flavor = self._create_flavor({'flavorid': 'f1'})
- project_id = 'p1'
-
- access = self._create_flavor_access(flavor['flavorid'], project_id)
- # NOTE(boris-42): Check that flavor_access_add doesn't fail and
- # returns correct value. This is enough because other
- # logic is checked by other methods.
- self.assertIsNotNone(access['id'])
- self.assertEqual(access['instance_type_id'], flavor['id'])
- self.assertEqual(access['project_id'], project_id)
-
- def test_flavor_access_add_to_non_existing_flavor(self):
- self.assertRaises(exception.FlavorNotFound,
- self._create_flavor_access,
- 'nonexists', 'does_not_matter')
-
- def test_flavor_access_add_duplicate_project_id_flavor(self):
- flavor = self._create_flavor({'flavorid': 'f1'})
- params = (flavor['flavorid'], 'p1')
-
- self._create_flavor_access(*params)
- self.assertRaises(exception.FlavorAccessExists,
- self._create_flavor_access, *params)
-
- def test_flavor_access_remove(self):
- flavors = ({'name': 'n1', 'flavorid': 'f1'},
- {'name': 'n2', 'flavorid': 'f2'})
- it1, it2 = tuple((self._create_flavor(v) for v in flavors))
-
- access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
- self._create_flavor_access(it1['flavorid'], 'pr2')]
-
- access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
-
- db.flavor_access_remove(self.ctxt, it1['flavorid'],
- access_it1[1]['project_id'])
-
- for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
- params = (self.ctxt, it['flavorid'])
- real_access_it = db.flavor_access_get_by_flavor_id(*params)
- self._assertEqualListsOfObjects(access_it, real_access_it)
-
- def test_flavor_access_remove_flavor_not_found(self):
- self.assertRaises(exception.FlavorNotFound,
- db.flavor_access_remove,
- self.ctxt, 'nonexists', 'does_not_matter')
-
- def test_flavor_access_remove_access_not_found(self):
- flavor = self._create_flavor({'flavorid': 'f1'})
- params = (flavor['flavorid'], 'p1')
- self._create_flavor_access(*params)
- self.assertRaises(exception.FlavorAccessNotFound,
- db.flavor_access_remove,
- self.ctxt, flavor['flavorid'], 'p2')
-
- def test_flavor_access_removed_after_flavor_destroy(self):
- flavor1 = self._create_flavor({'flavorid': 'f1', 'name': 'n1'})
- flavor2 = self._create_flavor({'flavorid': 'f2', 'name': 'n2'})
- values = [
- (flavor1['flavorid'], 'p1'),
- (flavor1['flavorid'], 'p2'),
- (flavor2['flavorid'], 'p3')
- ]
- for v in values:
- self._create_flavor_access(*v)
-
- db.flavor_destroy(self.ctxt, flavor1['name'])
-
- p = (self.ctxt, flavor1['flavorid'])
- self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
- p = (self.ctxt, flavor2['flavorid'])
- self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
- db.flavor_destroy(self.ctxt, flavor2['name'])
- self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
-
-
-class FixedIPTestCase(BaseInstanceTypeTestCase):
- def _timeout_test(self, ctxt, timeout, multi_host):
- instance = db.instance_create(ctxt, dict(host='foo'))
- net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
- host='bar'))
- old = timeout - datetime.timedelta(seconds=5)
- new = timeout + datetime.timedelta(seconds=5)
- # should deallocate
- db.fixed_ip_create(ctxt, dict(allocated=False,
- instance_uuid=instance['uuid'],
- network_id=net['id'],
- updated_at=old))
- # still allocated
- db.fixed_ip_create(ctxt, dict(allocated=True,
- instance_uuid=instance['uuid'],
- network_id=net['id'],
- updated_at=old))
- # wrong network
- db.fixed_ip_create(ctxt, dict(allocated=False,
- instance_uuid=instance['uuid'],
- network_id=None,
- updated_at=old))
- # too new
- db.fixed_ip_create(ctxt, dict(allocated=False,
- instance_uuid=instance['uuid'],
- network_id=None,
- updated_at=new))
-
- def mock_db_query_first_to_raise_data_error_exception(self):
- self.mox.StubOutWithMock(query.Query, 'first')
- query.Query.first().AndRaise(db_exc.DBError())
- self.mox.ReplayAll()
-
- def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
- now = timeutils.utcnow()
- self._timeout_test(self.ctxt, now, False)
- result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
- self.assertEqual(result, 0)
- result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
- self.assertEqual(result, 1)
-
- def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
- now = timeutils.utcnow()
- self._timeout_test(self.ctxt, now, True)
- result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
- self.assertEqual(result, 1)
- result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
- self.assertEqual(result, 0)
-
- def test_fixed_ip_get_by_floating_address(self):
- fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
- values = {'address': '8.7.6.5',
- 'fixed_ip_id': fixed_ip['id']}
- floating = db.floating_ip_create(self.ctxt, values)['address']
- fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
- self._assertEqualObjects(fixed_ip, fixed_ip_ref)
-
- def test_fixed_ip_get_by_host(self):
- host_ips = {
- 'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
- 'host2': ['1.1.1.4', '1.1.1.5'],
- 'host3': ['1.1.1.6']
- }
-
- for host, ips in host_ips.iteritems():
- for ip in ips:
- instance_uuid = self._create_instance(host=host)
- db.fixed_ip_create(self.ctxt, {'address': ip})
- db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
-
- for host, ips in host_ips.iteritems():
- ips_on_host = map(lambda x: x['address'],
- db.fixed_ip_get_by_host(self.ctxt, host))
- self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
-
- def test_fixed_ip_get_by_network_host_not_found_exception(self):
- self.assertRaises(
- exception.FixedIpNotFoundForNetworkHost,
- db.fixed_ip_get_by_network_host,
- self.ctxt, 1, 'ignore')
-
- def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
- db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
-
- fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
-
- self.assertEqual(1, fip['network_id'])
- self.assertEqual('host', fip['host'])
-
- def _create_instance(self, **kwargs):
- instance = db.instance_create(self.ctxt, kwargs)
- return instance['uuid']
-
- def test_fixed_ip_get_by_instance_fixed_ip_found(self):
- instance_uuid = self._create_instance()
-
- FIXED_IP_ADDRESS = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
-
- ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
- self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
- [ips_list[0].address])
-
- def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
- instance_uuid = self._create_instance()
-
- FIXED_IP_ADDRESS_1 = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
- FIXED_IP_ADDRESS_2 = '192.168.1.6'
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
-
- ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
- self._assertEqualListsOfPrimitivesAsSets(
- [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
- [ips_list[0].address, ips_list[1].address])
-
- def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
- instance_uuid = self._create_instance()
-
- FIXED_IP_ADDRESS_1 = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
- FIXED_IP_ADDRESS_2 = '192.168.1.6'
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
-
- another_instance = db.instance_create(self.ctxt, {})
- db.fixed_ip_create(self.ctxt, dict(
- instance_uuid=another_instance['uuid'], address="192.168.1.7"))
-
- ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
- self._assertEqualListsOfPrimitivesAsSets(
- [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
- [ips_list[0].address, ips_list[1].address])
-
- def test_fixed_ip_get_by_instance_not_found_exception(self):
- instance_uuid = self._create_instance()
-
- self.assertRaises(exception.FixedIpNotFoundForInstance,
- db.fixed_ip_get_by_instance,
- self.ctxt, instance_uuid)
-
- def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
- instance_uuid = self._create_instance()
-
- vif = db.virtual_interface_create(
- self.ctxt, dict(instance_uuid=instance_uuid))
-
- FIXED_IP_ADDRESS = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
-
- ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
- self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
- [ips_list[0].address])
-
- def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
- instance_uuid = self._create_instance()
-
- vif = db.virtual_interface_create(
- self.ctxt, dict(instance_uuid=instance_uuid))
-
- FIXED_IP_ADDRESS_1 = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
- FIXED_IP_ADDRESS_2 = '192.168.1.6'
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
-
- ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
- self._assertEqualListsOfPrimitivesAsSets(
- [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
- [ips_list[0].address, ips_list[1].address])
-
- def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
- instance_uuid = self._create_instance()
-
- vif = db.virtual_interface_create(
- self.ctxt, dict(instance_uuid=instance_uuid))
-
- FIXED_IP_ADDRESS_1 = '192.168.1.5'
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
- FIXED_IP_ADDRESS_2 = '192.168.1.6'
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
-
- another_vif = db.virtual_interface_create(
- self.ctxt, dict(instance_uuid=instance_uuid))
- db.fixed_ip_create(self.ctxt, dict(
- virtual_interface_id=another_vif.id, address="192.168.1.7"))
-
- ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
- self._assertEqualListsOfPrimitivesAsSets(
- [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
- [ips_list[0].address, ips_list[1].address])
-
- def test_fixed_ips_by_virtual_interface_no_ip_found(self):
- instance_uuid = self._create_instance()
-
- vif = db.virtual_interface_create(
- self.ctxt, dict(instance_uuid=instance_uuid))
-
- ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
- self.assertEqual(0, len(ips_list))
-
- def create_fixed_ip(self, **params):
- default_params = {'address': '192.168.0.1'}
- default_params.update(params)
- return db.fixed_ip_create(self.ctxt, default_params)['address']
-
- def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
- instance_uuid = self._create_instance()
- self.assertRaises(exception.FixedIpNotFoundForNetwork,
- db.fixed_ip_associate,
- self.ctxt, None, instance_uuid)
-
- def test_fixed_ip_associate_fails_if_ip_in_use(self):
- instance_uuid = self._create_instance()
-
- address = self.create_fixed_ip(instance_uuid=instance_uuid)
- self.assertRaises(exception.FixedIpAlreadyInUse,
- db.fixed_ip_associate,
- self.ctxt, address, instance_uuid)
-
- def test_fixed_ip_associate_succeeds(self):
- instance_uuid = self._create_instance()
- network = db.network_create_safe(self.ctxt, {})
-
- address = self.create_fixed_ip(network_id=network['id'])
- db.fixed_ip_associate(self.ctxt, address, instance_uuid,
- network_id=network['id'])
- fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
- self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
-
- def test_fixed_ip_associate_succeeds_and_sets_network(self):
- instance_uuid = self._create_instance()
- network = db.network_create_safe(self.ctxt, {})
-
- address = self.create_fixed_ip()
- db.fixed_ip_associate(self.ctxt, address, instance_uuid,
- network_id=network['id'])
- fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
- self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
- self.assertEqual(fixed_ip['network_id'], network['id'])
-
- def test_fixed_ip_associate_pool_invalid_uuid(self):
- instance_uuid = '123'
- self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
- self.ctxt, None, instance_uuid)
-
- def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
- instance_uuid = self._create_instance()
- self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
- self.ctxt, None, instance_uuid)
-
- def test_fixed_ip_associate_pool_succeeds(self):
- instance_uuid = self._create_instance()
- network = db.network_create_safe(self.ctxt, {})
-
- address = self.create_fixed_ip(network_id=network['id'])
- db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
- fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
- self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
-
- def test_fixed_ip_create_same_address(self):
- address = '192.168.1.5'
- params = {'address': address}
- db.fixed_ip_create(self.ctxt, params)
- self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
- self.ctxt, params)
-
- def test_fixed_ip_create_success(self):
- instance_uuid = self._create_instance()
- network_id = db.network_create_safe(self.ctxt, {})['id']
- param = {
- 'reserved': False,
- 'deleted': 0,
- 'leased': False,
- 'host': '127.0.0.1',
- 'address': '192.168.1.5',
- 'allocated': False,
- 'instance_uuid': instance_uuid,
- 'network_id': network_id,
- 'virtual_interface_id': None
- }
-
- ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
- fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
- self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
-
- def test_fixed_ip_bulk_create_same_address(self):
- address_1 = '192.168.1.5'
- address_2 = '192.168.1.6'
- instance_uuid = self._create_instance()
- network_id_1 = db.network_create_safe(self.ctxt, {})['id']
- network_id_2 = db.network_create_safe(self.ctxt, {})['id']
- params = [
- {'reserved': False, 'deleted': 0, 'leased': False,
- 'host': '127.0.0.1', 'address': address_2, 'allocated': False,
- 'instance_uuid': instance_uuid, 'network_id': network_id_1,
- 'virtual_interface_id': None},
- {'reserved': False, 'deleted': 0, 'leased': False,
- 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
- 'instance_uuid': instance_uuid, 'network_id': network_id_1,
- 'virtual_interface_id': None},
- {'reserved': False, 'deleted': 0, 'leased': False,
- 'host': 'localhost', 'address': address_2, 'allocated': True,
- 'instance_uuid': instance_uuid, 'network_id': network_id_2,
- 'virtual_interface_id': None},
- ]
-
- self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
- self.ctxt, params)
- # In this case the transaction will be rolled back and none of the ips
- # will make it to the database.
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- db.fixed_ip_get_by_address, self.ctxt, address_1)
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- db.fixed_ip_get_by_address, self.ctxt, address_2)
-
- def test_fixed_ip_bulk_create_success(self):
- address_1 = '192.168.1.5'
- address_2 = '192.168.1.6'
-
- instance_uuid = self._create_instance()
- network_id_1 = db.network_create_safe(self.ctxt, {})['id']
- network_id_2 = db.network_create_safe(self.ctxt, {})['id']
- params = [
- {'reserved': False, 'deleted': 0, 'leased': False,
- 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
- 'instance_uuid': instance_uuid, 'network_id': network_id_1,
- 'virtual_interface_id': None},
- {'reserved': False, 'deleted': 0, 'leased': False,
- 'host': 'localhost', 'address': address_2, 'allocated': True,
- 'instance_uuid': instance_uuid, 'network_id': network_id_2,
- 'virtual_interface_id': None}
- ]
-
- db.fixed_ip_bulk_create(self.ctxt, params)
- ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
- 'virtual_interface', 'network', 'floating_ips']
- fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
-
- # we have no `id` in incoming data so we can not use
- # _assertEqualListsOfObjects to compare incoming data and received
- # objects
- fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
- params = sorted(params, key=lambda i: i['network_id'])
- for param, ip in zip(params, fixed_ip_data):
- self._assertEqualObjects(param, ip, ignored_keys)
-
- def test_fixed_ip_disassociate(self):
- address = '192.168.1.5'
- instance_uuid = self._create_instance()
- network_id = db.network_create_safe(self.ctxt, {})['id']
- values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
- vif = db.virtual_interface_create(self.ctxt, values)
- param = {
- 'reserved': False,
- 'deleted': 0,
- 'leased': False,
- 'host': '127.0.0.1',
- 'address': address,
- 'allocated': False,
- 'instance_uuid': instance_uuid,
- 'network_id': network_id,
- 'virtual_interface_id': vif['id']
- }
- db.fixed_ip_create(self.ctxt, param)
-
- db.fixed_ip_disassociate(self.ctxt, address)
- fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
- ignored_keys = ['created_at', 'id', 'deleted_at',
- 'updated_at', 'instance_uuid',
- 'virtual_interface_id']
- self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
- self.assertIsNone(fixed_ip_data['instance_uuid'])
- self.assertIsNone(fixed_ip_data['virtual_interface_id'])
-
- def test_fixed_ip_get_not_found_exception(self):
- self.assertRaises(exception.FixedIpNotFound,
- db.fixed_ip_get, self.ctxt, 0)
-
- def test_fixed_ip_get_success2(self):
- address = '192.168.1.5'
- instance_uuid = self._create_instance()
- network_id = db.network_create_safe(self.ctxt, {})['id']
- param = {
- 'reserved': False,
- 'deleted': 0,
- 'leased': False,
- 'host': '127.0.0.1',
- 'address': address,
- 'allocated': False,
- 'instance_uuid': instance_uuid,
- 'network_id': network_id,
- 'virtual_interface_id': None
- }
- fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
-
- self.ctxt.is_admin = False
- self.assertRaises(exception.Forbidden, db.fixed_ip_get,
- self.ctxt, fixed_ip_id)
-
- def test_fixed_ip_get_success(self):
- address = '192.168.1.5'
- instance_uuid = self._create_instance()
- network_id = db.network_create_safe(self.ctxt, {})['id']
- param = {
- 'reserved': False,
- 'deleted': 0,
- 'leased': False,
- 'host': '127.0.0.1',
- 'address': address,
- 'allocated': False,
- 'instance_uuid': instance_uuid,
- 'network_id': network_id,
- 'virtual_interface_id': None
- }
- db.fixed_ip_create(self.ctxt, param)
-
- fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
- fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
- ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
- self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
-
- def test_fixed_ip_get_by_address(self):
- instance_uuid = self._create_instance()
- db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
- 'instance_uuid': instance_uuid,
- })
- fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
- columns_to_join=['instance'])
- self.assertIn('instance', fixed_ip.__dict__)
- self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
-
- def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- db.fixed_ip_get_by_address_detailed, self.ctxt,
- '192.168.1.5')
-
- def test_fixed_ip_get_by_address_with_data_error_exception(self):
- self.mock_db_query_first_to_raise_data_error_exception()
- self.assertRaises(exception.FixedIpInvalid,
- db.fixed_ip_get_by_address_detailed, self.ctxt,
- '192.168.1.6')
-
- def test_fixed_ip_get_by_address_detailed_sucsess(self):
- address = '192.168.1.5'
- instance_uuid = self._create_instance()
- network_id = db.network_create_safe(self.ctxt, {})['id']
- param = {
- 'reserved': False,
- 'deleted': 0,
- 'leased': False,
- 'host': '127.0.0.1',
- 'address': address,
- 'allocated': False,
- 'instance_uuid': instance_uuid,
- 'network_id': network_id,
- 'virtual_interface_id': None
- }
- db.fixed_ip_create(self.ctxt, param)
-
- fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, address)
- # fixed ip check here
- ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
- self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
-
- # network model check here
- network_data = db.network_get(self.ctxt, network_id)
- self._assertEqualObjects(network_data, fixed_ip_data[1])
-
- # Instance check here
- instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
- ignored_keys = ['info_cache', 'system_metadata',
- 'security_groups', 'metadata',
- 'pci_devices'] # HOW ????
- self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
-
- def test_fixed_ip_update_not_found_for_address(self):
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- db.fixed_ip_update, self.ctxt,
- '192.168.1.5', {})
-
- def test_fixed_ip_update(self):
- instance_uuid_1 = self._create_instance()
- instance_uuid_2 = self._create_instance()
- network_id_1 = db.network_create_safe(self.ctxt, {})['id']
- network_id_2 = db.network_create_safe(self.ctxt, {})['id']
- param_1 = {
- 'reserved': True, 'deleted': 0, 'leased': True,
- 'host': '192.168.133.1', 'address': '10.0.0.2',
- 'allocated': True, 'instance_uuid': instance_uuid_1,
- 'network_id': network_id_1, 'virtual_interface_id': '123',
- }
-
- param_2 = {
- 'reserved': False, 'deleted': 0, 'leased': False,
- 'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
- 'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
- 'virtual_interface_id': None
- }
-
- ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
- fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
- db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
- fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
- param_2['address'])
- self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
-
-
-class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- def setUp(self):
- super(FloatingIpTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_base_values(self):
- return {
- 'address': '1.1.1.1',
- 'fixed_ip_id': None,
- 'project_id': 'fake_project',
- 'host': 'fake_host',
- 'auto_assigned': False,
- 'pool': 'fake_pool',
- 'interface': 'fake_interface',
- }
-
- def mock_db_query_first_to_raise_data_error_exception(self):
- self.mox.StubOutWithMock(query.Query, 'first')
- query.Query.first().AndRaise(db_exc.DBError())
- self.mox.ReplayAll()
-
- def _create_floating_ip(self, values):
- if not values:
- values = {}
- vals = self._get_base_values()
- vals.update(values)
- return db.floating_ip_create(self.ctxt, vals)
-
- def test_floating_ip_get(self):
- values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
- floating_ips = [self._create_floating_ip(val) for val in values]
-
- for floating_ip in floating_ips:
- real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
- self._assertEqualObjects(floating_ip, real_floating_ip,
- ignored_keys=['fixed_ip'])
-
- def test_floating_ip_get_not_found(self):
- self.assertRaises(exception.FloatingIpNotFound,
- db.floating_ip_get, self.ctxt, 100500)
-
- def test_floating_ip_get_with_long_id_not_found(self):
- self.mock_db_query_first_to_raise_data_error_exception()
- self.assertRaises(exception.InvalidID,
- db.floating_ip_get, self.ctxt, 123456789101112)
-
- def test_floating_ip_get_pools(self):
- values = [
- {'address': '0.0.0.0', 'pool': 'abc'},
- {'address': '1.1.1.1', 'pool': 'abc'},
- {'address': '2.2.2.2', 'pool': 'def'},
- {'address': '3.3.3.3', 'pool': 'ghi'},
- ]
- for val in values:
- self._create_floating_ip(val)
- expected_pools = [{'name': x}
- for x in set(map(lambda x: x['pool'], values))]
- real_pools = db.floating_ip_get_pools(self.ctxt)
- self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
-
- def test_floating_ip_allocate_address(self):
- pools = {
- 'pool1': ['0.0.0.0', '1.1.1.1'],
- 'pool2': ['2.2.2.2'],
- 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
- }
- for pool, addresses in pools.iteritems():
- for address in addresses:
- vals = {'pool': pool, 'address': address, 'project_id': None}
- self._create_floating_ip(vals)
-
- project_id = self._get_base_values()['project_id']
- for pool, addresses in pools.iteritems():
- alloc_addrs = []
- for i in addresses:
- float_addr = db.floating_ip_allocate_address(self.ctxt,
- project_id, pool)
- alloc_addrs.append(float_addr)
- self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
-
- def test_floating_ip_allocate_auto_assigned(self):
- addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
-
- float_ips = []
- for i in range(0, 2):
- float_ips.append(self._create_floating_ip(
- {"address": addresses[i]}))
- for i in range(2, 4):
- float_ips.append(self._create_floating_ip({"address": addresses[i],
- "auto_assigned": True}))
-
- for i in range(0, 2):
- float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
- self.assertFalse(float_ip.auto_assigned)
- for i in range(2, 4):
- float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
- self.assertTrue(float_ip.auto_assigned)
-
- def test_floating_ip_allocate_address_no_more_floating_ips(self):
- self.assertRaises(exception.NoMoreFloatingIps,
- db.floating_ip_allocate_address,
- self.ctxt, 'any_project_id', 'no_such_pool')
-
- def test_floating_ip_allocate_not_authorized(self):
- ctxt = context.RequestContext(user_id='a', project_id='abc',
- is_admin=False)
- self.assertRaises(exception.Forbidden,
- db.floating_ip_allocate_address,
- ctxt, 'other_project_id', 'any_pool')
-
- def _get_existing_ips(self):
- return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
-
- def test_floating_ip_bulk_create(self):
- expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
- db.floating_ip_bulk_create(self.ctxt,
- map(lambda x: {'address': x}, expected_ips))
- self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
- expected_ips)
-
- def test_floating_ip_bulk_create_duplicate(self):
- ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
- prepare_ips = lambda x: {'address': x}
-
- result = db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
- self.assertEqual('1.1.1.1', result[0].address)
- self.assertRaises(exception.FloatingIpExists,
- db.floating_ip_bulk_create,
- self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
- self.assertRaises(exception.FloatingIpNotFoundForAddress,
- db.floating_ip_get_by_address,
- self.ctxt, '1.1.1.5')
-
- def test_floating_ip_bulk_destroy(self):
- ips_for_delete = []
- ips_for_non_delete = []
-
- def create_ips(i, j):
- return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
-
- # NOTE(boris-42): Create more than 256 ip to check that
- # _ip_range_splitter works properly.
- for i in range(1, 3):
- ips_for_delete.extend(create_ips(i, 255))
- ips_for_non_delete.extend(create_ips(3, 255))
-
- db.floating_ip_bulk_create(self.ctxt,
- ips_for_delete + ips_for_non_delete)
-
- non_bulk_ips_for_delete = create_ips(4, 3)
- non_bulk_ips_for_non_delete = create_ips(5, 3)
- non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
- project_id = 'fake_project'
- reservations = quota.QUOTAS.reserve(self.ctxt,
- floating_ips=len(non_bulk_ips),
- project_id=project_id)
- for dct in non_bulk_ips:
- self._create_floating_ip(dct)
- quota.QUOTAS.commit(self.ctxt, reservations, project_id=project_id)
- self.assertEqual(db.quota_usage_get_all_by_project(
- self.ctxt, project_id),
- {'project_id': project_id,
- 'floating_ips': {'in_use': 6, 'reserved': 0}})
- ips_for_delete.extend(non_bulk_ips_for_delete)
- ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
-
- db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
-
- expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
- self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
- expected_addresses)
- self.assertEqual(db.quota_usage_get_all_by_project(
- self.ctxt, project_id),
- {'project_id': project_id,
- 'floating_ips': {'in_use': 3, 'reserved': 0}})
-
- def test_floating_ip_create(self):
- floating_ip = self._create_floating_ip({})
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
-
- self.assertIsNotNone(floating_ip['id'])
- self._assertEqualObjects(floating_ip, self._get_base_values(),
- ignored_keys)
-
- def test_floating_ip_create_duplicate(self):
- self._create_floating_ip({})
- self.assertRaises(exception.FloatingIpExists,
- self._create_floating_ip, {})
-
- def _create_fixed_ip(self, params):
- default_params = {'address': '192.168.0.1'}
- default_params.update(params)
- return db.fixed_ip_create(self.ctxt, default_params)['address']
-
- def test_floating_ip_fixed_ip_associate(self):
- float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
-
- float_ips = [self._create_floating_ip({'address': address})
- for address in float_addresses]
- fixed_addrs = [self._create_fixed_ip({'address': address})
- for address in fixed_addresses]
-
- for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
- fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
- float_ip.address,
- fixed_addr, 'host')
- self.assertEqual(fixed_ip.address, fixed_addr)
-
- updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
- self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
- self.assertEqual('host', updated_float_ip.host)
-
- # Test that already allocated float_ip returns None
- result = db.floating_ip_fixed_ip_associate(self.ctxt,
- float_addresses[0],
- fixed_addresses[0], 'host')
- self.assertIsNone(result)
-
- def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
- self.assertRaises(exception.FloatingIpNotFoundForAddress,
- db.floating_ip_fixed_ip_associate,
- self.ctxt, '10.10.10.10', 'some', 'some')
-
- def test_floating_ip_deallocate(self):
- values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
- float_ip = self._create_floating_ip(values)
- rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
- self.assertEqual(1, rows_updated)
-
- updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
- self.assertIsNone(updated_float_ip.project_id)
- self.assertIsNone(updated_float_ip.host)
- self.assertFalse(updated_float_ip.auto_assigned)
-
- def test_floating_ip_deallocate_address_not_found(self):
- self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
-
- def test_floating_ip_destroy(self):
- addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- float_ips = [self._create_floating_ip({'address': addr})
- for addr in addresses]
-
- expected_len = len(addresses)
- for float_ip in float_ips:
- db.floating_ip_destroy(self.ctxt, float_ip.address)
- self.assertRaises(exception.FloatingIpNotFound,
- db.floating_ip_get, self.ctxt, float_ip.id)
- expected_len -= 1
- if expected_len > 0:
- self.assertEqual(expected_len,
- len(db.floating_ip_get_all(self.ctxt)))
- else:
- self.assertRaises(exception.NoFloatingIpsDefined,
- db.floating_ip_get_all, self.ctxt)
-
- def test_floating_ip_disassociate(self):
- float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
-
- float_ips = [self._create_floating_ip({'address': address})
- for address in float_addresses]
- fixed_addrs = [self._create_fixed_ip({'address': address})
- for address in fixed_addresses]
-
- for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
- db.floating_ip_fixed_ip_associate(self.ctxt,
- float_ip.address,
- fixed_addr, 'host')
-
- for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
- fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
- self.assertEqual(fixed.address, fixed_addr)
- updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
- self.assertIsNone(updated_float_ip.fixed_ip_id)
- self.assertIsNone(updated_float_ip.host)
-
- def test_floating_ip_disassociate_not_found(self):
- self.assertRaises(exception.FloatingIpNotFoundForAddress,
- db.floating_ip_disassociate, self.ctxt,
- '11.11.11.11')
-
- def test_floating_ip_set_auto_assigned(self):
- addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- float_ips = [self._create_floating_ip({'address': addr,
- 'auto_assigned': False})
- for addr in addresses]
-
- for i in range(2):
- db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
- for i in range(2):
- float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
- self.assertTrue(float_ip.auto_assigned)
-
- float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
- self.assertFalse(float_ip.auto_assigned)
-
- def test_floating_ip_get_all(self):
- addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- float_ips = [self._create_floating_ip({'address': addr})
- for addr in addresses]
- self._assertEqualListsOfObjects(float_ips,
- db.floating_ip_get_all(self.ctxt))
-
- def test_floating_ip_get_all_not_found(self):
- self.assertRaises(exception.NoFloatingIpsDefined,
- db.floating_ip_get_all, self.ctxt)
-
- def test_floating_ip_get_all_by_host(self):
- hosts = {
- 'host1': ['1.1.1.1', '1.1.1.2'],
- 'host2': ['2.1.1.1', '2.1.1.2'],
- 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
- }
-
- hosts_with_float_ips = {}
- for host, addresses in hosts.iteritems():
- hosts_with_float_ips[host] = []
- for address in addresses:
- float_ip = self._create_floating_ip({'host': host,
- 'address': address})
- hosts_with_float_ips[host].append(float_ip)
-
- for host, float_ips in hosts_with_float_ips.iteritems():
- real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
- self._assertEqualListsOfObjects(float_ips, real_float_ips)
-
- def test_floating_ip_get_all_by_host_not_found(self):
- self.assertRaises(exception.FloatingIpNotFoundForHost,
- db.floating_ip_get_all_by_host,
- self.ctxt, 'non_exists_host')
-
- def test_floating_ip_get_all_by_project(self):
- projects = {
- 'pr1': ['1.1.1.1', '1.1.1.2'],
- 'pr2': ['2.1.1.1', '2.1.1.2'],
- 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
- }
-
- projects_with_float_ips = {}
- for project_id, addresses in projects.iteritems():
- projects_with_float_ips[project_id] = []
- for address in addresses:
- float_ip = self._create_floating_ip({'project_id': project_id,
- 'address': address})
- projects_with_float_ips[project_id].append(float_ip)
-
- for project_id, float_ips in projects_with_float_ips.iteritems():
- real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
- project_id)
- self._assertEqualListsOfObjects(float_ips, real_float_ips,
- ignored_keys='fixed_ip')
-
- def test_floating_ip_get_all_by_project_not_authorized(self):
- ctxt = context.RequestContext(user_id='a', project_id='abc',
- is_admin=False)
- self.assertRaises(exception.Forbidden,
- db.floating_ip_get_all_by_project,
- ctxt, 'other_project')
-
- def test_floating_ip_get_by_address(self):
- addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
- float_ips = [self._create_floating_ip({'address': addr})
- for addr in addresses]
-
- for float_ip in float_ips:
- real_float_ip = db.floating_ip_get_by_address(self.ctxt,
- float_ip.address)
- self._assertEqualObjects(float_ip, real_float_ip,
- ignored_keys='fixed_ip')
-
- def test_floating_ip_get_by_address_not_found(self):
- self.assertRaises(exception.FloatingIpNotFoundForAddress,
- db.floating_ip_get_by_address,
- self.ctxt, '20.20.20.20')
-
- def test_floating_ip_get_by_invalid_address(self):
- self.mock_db_query_first_to_raise_data_error_exception()
- self.assertRaises(exception.InvalidIpAddressError,
- db.floating_ip_get_by_address,
- self.ctxt, 'non_exists_host')
-
- def test_floating_ip_get_by_fixed_address(self):
- fixed_float = [
- ('1.1.1.1', '2.2.2.1'),
- ('1.1.1.2', '2.2.2.2'),
- ('1.1.1.3', '2.2.2.3')
- ]
-
- for fixed_addr, float_addr in fixed_float:
- self._create_floating_ip({'address': float_addr})
- self._create_fixed_ip({'address': fixed_addr})
- db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
- fixed_addr, 'some_host')
-
- for fixed_addr, float_addr in fixed_float:
- float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
- fixed_addr)
- self.assertEqual(float_addr, float_ip[0]['address'])
-
- def test_floating_ip_get_by_fixed_ip_id(self):
- fixed_float = [
- ('1.1.1.1', '2.2.2.1'),
- ('1.1.1.2', '2.2.2.2'),
- ('1.1.1.3', '2.2.2.3')
- ]
-
- for fixed_addr, float_addr in fixed_float:
- self._create_floating_ip({'address': float_addr})
- self._create_fixed_ip({'address': fixed_addr})
- db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
- fixed_addr, 'some_host')
-
- for fixed_addr, float_addr in fixed_float:
- fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
- float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
- fixed_ip['id'])
- self.assertEqual(float_addr, float_ip[0]['address'])
-
- def test_floating_ip_update(self):
- float_ip = self._create_floating_ip({})
-
- values = {
- 'project_id': 'some_pr',
- 'host': 'some_host',
- 'auto_assigned': True,
- 'interface': 'some_interface',
- 'pool': 'some_pool'
- }
- floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
- values)
- self.assertIsNotNone(floating_ref)
- updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
- self._assertEqualObjects(updated_float_ip, values,
- ignored_keys=['id', 'address', 'updated_at',
- 'deleted_at', 'created_at',
- 'deleted', 'fixed_ip_id',
- 'fixed_ip'])
-
- def test_floating_ip_update_to_duplicate(self):
- float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
- float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
-
- self.assertRaises(exception.FloatingIpExists,
- db.floating_ip_update,
- self.ctxt, float_ip2['address'],
- {'address': float_ip1['address']})
-
-
-class InstanceDestroyConstraints(test.TestCase):
-
- def test_destroy_with_equal_any_constraint_met_single_value(self):
- ctx = context.get_admin_context()
- instance = db.instance_create(ctx, {'task_state': 'deleting'})
- constraint = db.constraint(task_state=db.equal_any('deleting'))
- db.instance_destroy(ctx, instance['uuid'], constraint)
- self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
- ctx, instance['uuid'])
-
- def test_destroy_with_equal_any_constraint_met(self):
- ctx = context.get_admin_context()
- instance = db.instance_create(ctx, {'task_state': 'deleting'})
- constraint = db.constraint(task_state=db.equal_any('deleting',
- 'error'))
- db.instance_destroy(ctx, instance['uuid'], constraint)
- self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
- ctx, instance['uuid'])
-
- def test_destroy_with_equal_any_constraint_not_met(self):
- ctx = context.get_admin_context()
- instance = db.instance_create(ctx, {'vm_state': 'resize'})
- constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
- self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
- ctx, instance['uuid'], constraint)
- instance = db.instance_get_by_uuid(ctx, instance['uuid'])
- self.assertFalse(instance['deleted'])
-
- def test_destroy_with_not_equal_constraint_met(self):
- ctx = context.get_admin_context()
- instance = db.instance_create(ctx, {'task_state': 'deleting'})
- constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
- db.instance_destroy(ctx, instance['uuid'], constraint)
- self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
- ctx, instance['uuid'])
-
- def test_destroy_with_not_equal_constraint_not_met(self):
- ctx = context.get_admin_context()
- instance = db.instance_create(ctx, {'vm_state': 'active'})
- constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
- self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
- ctx, instance['uuid'], constraint)
- instance = db.instance_get_by_uuid(ctx, instance['uuid'])
- self.assertFalse(instance['deleted'])
-
-
-class VolumeUsageDBApiTestCase(test.TestCase):
-
- def setUp(self):
- super(VolumeUsageDBApiTestCase, self).setUp()
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- self.useFixture(test.TimeOverride())
-
- def test_vol_usage_update_no_totals_update(self):
- ctxt = context.get_admin_context()
- now = timeutils.utcnow()
- timeutils.set_time_override(now)
- start_time = now - datetime.timedelta(seconds=10)
-
- expected_vol_usages = {
- u'1': {'volume_id': u'1',
- 'instance_uuid': 'fake-instance-uuid1',
- 'project_id': 'fake-project-uuid1',
- 'user_id': 'fake-user-uuid1',
- 'curr_reads': 1000,
- 'curr_read_bytes': 2000,
- 'curr_writes': 3000,
- 'curr_write_bytes': 4000,
- 'curr_last_refreshed': now,
- 'tot_reads': 0,
- 'tot_read_bytes': 0,
- 'tot_writes': 0,
- 'tot_write_bytes': 0,
- 'tot_last_refreshed': None},
- u'2': {'volume_id': u'2',
- 'instance_uuid': 'fake-instance-uuid2',
- 'project_id': 'fake-project-uuid2',
- 'user_id': 'fake-user-uuid2',
- 'curr_reads': 100,
- 'curr_read_bytes': 200,
- 'curr_writes': 300,
- 'curr_write_bytes': 400,
- 'tot_reads': 0,
- 'tot_read_bytes': 0,
- 'tot_writes': 0,
- 'tot_write_bytes': 0,
- 'tot_last_refreshed': None}
- }
-
- def _compare(vol_usage, expected):
- for key, value in expected.items():
- self.assertEqual(vol_usage[key], value)
-
- vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
- self.assertEqual(len(vol_usages), 0)
-
- db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
- wr_req=30, wr_bytes=40,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- user_id='fake-user-uuid1',
- availability_zone='fake-az')
- db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid2',
- project_id='fake-project-uuid2',
- user_id='fake-user-uuid2',
- availability_zone='fake-az')
- db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
- wr_req=3000, wr_bytes=4000,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- user_id='fake-user-uuid1',
- availability_zone='fake-az')
-
- vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
- self.assertEqual(len(vol_usages), 2)
- for usage in vol_usages:
- _compare(usage, expected_vol_usages[usage.volume_id])
-
- def test_vol_usage_update_totals_update(self):
- ctxt = context.get_admin_context()
- now = datetime.datetime(1, 1, 1, 1, 0, 0)
- start_time = now - datetime.timedelta(seconds=10)
- now1 = now + datetime.timedelta(minutes=1)
- now2 = now + datetime.timedelta(minutes=2)
- now3 = now + datetime.timedelta(minutes=3)
-
- timeutils.set_time_override(now)
- db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az')
- current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
- self.assertEqual(current_usage['tot_reads'], 0)
- self.assertEqual(current_usage['curr_reads'], 100)
-
- timeutils.set_time_override(now1)
- db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
- wr_req=400, wr_bytes=500,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az',
- update_totals=True)
- current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
- self.assertEqual(current_usage['tot_reads'], 200)
- self.assertEqual(current_usage['curr_reads'], 0)
-
- timeutils.set_time_override(now2)
- db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
- wr_req=500, wr_bytes=600,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- availability_zone='fake-az',
- user_id='fake-user-uuid')
- current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
- self.assertEqual(current_usage['tot_reads'], 200)
- self.assertEqual(current_usage['curr_reads'], 300)
-
- timeutils.set_time_override(now3)
- db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
- wr_req=600, wr_bytes=700,
- instance_id='fake-instance-uuid',
- project_id='fake-project-uuid',
- user_id='fake-user-uuid',
- availability_zone='fake-az',
- update_totals=True)
-
- vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
-
- expected_vol_usages = {'volume_id': u'1',
- 'project_id': 'fake-project-uuid',
- 'user_id': 'fake-user-uuid',
- 'instance_uuid': 'fake-instance-uuid',
- 'availability_zone': 'fake-az',
- 'tot_reads': 600,
- 'tot_read_bytes': 800,
- 'tot_writes': 1000,
- 'tot_write_bytes': 1200,
- 'tot_last_refreshed': now3,
- 'curr_reads': 0,
- 'curr_read_bytes': 0,
- 'curr_writes': 0,
- 'curr_write_bytes': 0,
- 'curr_last_refreshed': now2}
-
- self.assertEqual(1, len(vol_usages))
- for key, value in expected_vol_usages.items():
- self.assertEqual(vol_usages[0][key], value, key)
-
- def test_vol_usage_update_when_blockdevicestats_reset(self):
- ctxt = context.get_admin_context()
- now = timeutils.utcnow()
- start_time = now - datetime.timedelta(seconds=10)
-
- vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
- self.assertEqual(len(vol_usages), 0)
-
- db.vol_usage_update(ctxt, u'1',
- rd_req=10000, rd_bytes=20000,
- wr_req=30000, wr_bytes=40000,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- availability_zone='fake-az',
- user_id='fake-user-uuid1')
-
- # Instance rebooted or crashed. block device stats were reset and are
- # less than the previous values
- db.vol_usage_update(ctxt, u'1',
- rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- availability_zone='fake-az',
- user_id='fake-user-uuid1')
-
- db.vol_usage_update(ctxt, u'1',
- rd_req=200, rd_bytes=300,
- wr_req=400, wr_bytes=500,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- availability_zone='fake-az',
- user_id='fake-user-uuid1')
-
- vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
- expected_vol_usage = {'volume_id': u'1',
- 'instance_uuid': 'fake-instance-uuid1',
- 'project_id': 'fake-project-uuid1',
- 'availability_zone': 'fake-az',
- 'user_id': 'fake-user-uuid1',
- 'curr_reads': 200,
- 'curr_read_bytes': 300,
- 'curr_writes': 400,
- 'curr_write_bytes': 500,
- 'tot_reads': 10000,
- 'tot_read_bytes': 20000,
- 'tot_writes': 30000,
- 'tot_write_bytes': 40000}
- for key, value in expected_vol_usage.items():
- self.assertEqual(vol_usage[key], value, key)
-
- def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
- # This is unlikely to happen, but could when a volume is detached
- # right after a instance has rebooted / recovered and before
- # the system polled and updated the volume usage cache table.
- ctxt = context.get_admin_context()
- now = timeutils.utcnow()
- start_time = now - datetime.timedelta(seconds=10)
-
- vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
- self.assertEqual(len(vol_usages), 0)
-
- db.vol_usage_update(ctxt, u'1',
- rd_req=10000, rd_bytes=20000,
- wr_req=30000, wr_bytes=40000,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- availability_zone='fake-az',
- user_id='fake-user-uuid1')
-
- # Instance rebooted or crashed. block device stats were reset and are
- # less than the previous values
- db.vol_usage_update(ctxt, u'1',
- rd_req=100, rd_bytes=200,
- wr_req=300, wr_bytes=400,
- instance_id='fake-instance-uuid1',
- project_id='fake-project-uuid1',
- availability_zone='fake-az',
- user_id='fake-user-uuid1',
- update_totals=True)
-
- vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
- expected_vol_usage = {'volume_id': u'1',
- 'instance_uuid': 'fake-instance-uuid1',
- 'project_id': 'fake-project-uuid1',
- 'availability_zone': 'fake-az',
- 'user_id': 'fake-user-uuid1',
- 'curr_reads': 0,
- 'curr_read_bytes': 0,
- 'curr_writes': 0,
- 'curr_write_bytes': 0,
- 'tot_reads': 10100,
- 'tot_read_bytes': 20200,
- 'tot_writes': 30300,
- 'tot_write_bytes': 40400}
- for key, value in expected_vol_usage.items():
- self.assertEqual(vol_usage[key], value, key)
-
-
-class TaskLogTestCase(test.TestCase):
-
- def setUp(self):
- super(TaskLogTestCase, self).setUp()
- self.context = context.get_admin_context()
- now = timeutils.utcnow()
- self.begin = now - datetime.timedelta(seconds=10)
- self.end = now - datetime.timedelta(seconds=5)
- self.task_name = 'fake-task-name'
- self.host = 'fake-host'
- self.message = 'Fake task message'
- db.task_log_begin_task(self.context, self.task_name, self.begin,
- self.end, self.host, message=self.message)
-
- def test_task_log_get(self):
- result = db.task_log_get(self.context, self.task_name, self.begin,
- self.end, self.host)
- self.assertEqual(result['task_name'], self.task_name)
- self.assertEqual(result['period_beginning'], self.begin)
- self.assertEqual(result['period_ending'], self.end)
- self.assertEqual(result['host'], self.host)
- self.assertEqual(result['message'], self.message)
-
- def test_task_log_get_all(self):
- result = db.task_log_get_all(self.context, self.task_name, self.begin,
- self.end, host=self.host)
- self.assertEqual(len(result), 1)
- result = db.task_log_get_all(self.context, self.task_name, self.begin,
- self.end, host=self.host, state='')
- self.assertEqual(len(result), 0)
-
- def test_task_log_begin_task(self):
- db.task_log_begin_task(self.context, 'fake', self.begin,
- self.end, self.host, task_items=42,
- message=self.message)
- result = db.task_log_get(self.context, 'fake', self.begin,
- self.end, self.host)
- self.assertEqual(result['task_name'], 'fake')
-
- def test_task_log_begin_task_duplicate(self):
- params = (self.context, 'fake', self.begin, self.end, self.host)
- db.task_log_begin_task(*params, message=self.message)
- self.assertRaises(exception.TaskAlreadyRunning,
- db.task_log_begin_task,
- *params, message=self.message)
-
- def test_task_log_end_task(self):
- errors = 1
- db.task_log_end_task(self.context, self.task_name, self.begin,
- self.end, self.host, errors, message=self.message)
- result = db.task_log_get(self.context, self.task_name, self.begin,
- self.end, self.host)
- self.assertEqual(result['errors'], 1)
-
- def test_task_log_end_task_task_not_running(self):
- self.assertRaises(exception.TaskNotRunning,
- db.task_log_end_task, self.context, 'nonexistent',
- self.begin, self.end, self.host, 42,
- message=self.message)
-
-
-class BlockDeviceMappingTestCase(test.TestCase):
- def setUp(self):
- super(BlockDeviceMappingTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.instance = db.instance_create(self.ctxt, {})
-
- def _create_bdm(self, values):
- values.setdefault('instance_uuid', self.instance['uuid'])
- values.setdefault('device_name', 'fake_device')
- values.setdefault('source_type', 'volume')
- values.setdefault('destination_type', 'volume')
- block_dev = block_device.BlockDeviceDict(values)
- db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
- uuid = block_dev['instance_uuid']
-
- bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
-
- for bdm in bdms:
- if bdm['device_name'] == values['device_name']:
- return bdm
-
- def test_scrub_empty_str_values_no_effect(self):
- values = {'volume_size': 5}
- expected = copy.copy(values)
- sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
- self.assertEqual(values, expected)
-
- def test_scrub_empty_str_values_empty_string(self):
- values = {'volume_size': ''}
- sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
- self.assertEqual(values, {})
-
- def test_scrub_empty_str_values_empty_unicode(self):
- values = {'volume_size': u''}
- sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
- self.assertEqual(values, {})
-
- def test_block_device_mapping_create(self):
- bdm = self._create_bdm({})
- self.assertIsNotNone(bdm)
-
- def test_block_device_mapping_update(self):
- bdm = self._create_bdm({})
- result = db.block_device_mapping_update(
- self.ctxt, bdm['id'], {'destination_type': 'moon'},
- legacy=False)
- uuid = bdm['instance_uuid']
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(bdm_real[0]['destination_type'], 'moon')
- # Also make sure the update call returned correct data
- self.assertEqual(dict(bdm_real[0].iteritems()),
- dict(result.iteritems()))
-
- def test_block_device_mapping_update_or_create(self):
- values = {
- 'instance_uuid': self.instance['uuid'],
- 'device_name': 'fake_name',
- 'source_type': 'volume',
- 'destination_type': 'volume'
- }
- # check create
- db.block_device_mapping_update_or_create(self.ctxt, values,
- legacy=False)
- uuid = values['instance_uuid']
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdm_real), 1)
- self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
-
- # check update
- values['destination_type'] = 'camelot'
- db.block_device_mapping_update_or_create(self.ctxt, values,
- legacy=False)
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdm_real), 1)
- bdm_real = bdm_real[0]
- self.assertEqual(bdm_real['device_name'], 'fake_name')
- self.assertEqual(bdm_real['destination_type'], 'camelot')
-
- # check create without device_name
- bdm1 = dict(values)
- bdm1['device_name'] = None
- db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdm_real), 2)
- bdm_real = bdm_real[1]
- self.assertIsNone(bdm_real['device_name'])
-
- # check create multiple devices without device_name
- bdm2 = dict(values)
- bdm2['device_name'] = None
- db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdm_real), 3)
- bdm_real = bdm_real[2]
- self.assertIsNone(bdm_real['device_name'])
-
- def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
- uuid = self.instance['uuid']
- values = {
- 'instance_uuid': uuid,
- 'source_type': 'blank',
- 'guest_format': 'myformat',
- }
-
- bdm1 = dict(values)
- bdm1['device_name'] = '/dev/sdb'
- db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
-
- bdm2 = dict(values)
- bdm2['device_name'] = '/dev/sdc'
- db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
-
- bdm_real = sorted(
- db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
- key=lambda bdm: bdm['device_name']
- )
-
- self.assertEqual(len(bdm_real), 2)
- for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
- self.assertEqual(bdm['device_name'], device_name)
- self.assertEqual(bdm['guest_format'], 'myformat')
-
- def test_block_device_mapping_update_or_create_check_remove_virt(self):
- uuid = self.instance['uuid']
- values = {
- 'instance_uuid': uuid,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': 'swap',
- }
-
- # check that old swap bdms are deleted on create
- val1 = dict(values)
- val1['device_name'] = 'device1'
- db.block_device_mapping_create(self.ctxt, val1, legacy=False)
- val2 = dict(values)
- val2['device_name'] = 'device2'
- db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
- bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdm_real), 1)
- bdm_real = bdm_real[0]
- self.assertEqual(bdm_real['device_name'], 'device2')
- self.assertEqual(bdm_real['source_type'], 'blank')
- self.assertEqual(bdm_real['guest_format'], 'swap')
- db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
-
- def test_block_device_mapping_get_all_by_instance(self):
- uuid1 = self.instance['uuid']
- uuid2 = db.instance_create(self.ctxt, {})['uuid']
-
- bmds_values = [{'instance_uuid': uuid1,
- 'device_name': '/dev/vda'},
- {'instance_uuid': uuid2,
- 'device_name': '/dev/vdb'},
- {'instance_uuid': uuid2,
- 'device_name': '/dev/vdc'}]
-
- for bdm in bmds_values:
- self._create_bdm(bdm)
-
- bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
- self.assertEqual(len(bmd), 1)
- self.assertEqual(bmd[0]['device_name'], '/dev/vda')
-
- bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
- self.assertEqual(len(bmd), 2)
-
- def test_block_device_mapping_destroy(self):
- bdm = self._create_bdm({})
- db.block_device_mapping_destroy(self.ctxt, bdm['id'])
- bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
- bdm['instance_uuid'])
- self.assertEqual(len(bdm), 0)
-
- def test_block_device_mapping_destroy_by_instance_and_volume(self):
- vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
- vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
-
- self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
- self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
-
- uuid = self.instance['uuid']
- db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
- vol_id1)
- bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
-
- def test_block_device_mapping_destroy_by_instance_and_device(self):
- self._create_bdm({'device_name': '/dev/vda'})
- self._create_bdm({'device_name': '/dev/vdb'})
-
- uuid = self.instance['uuid']
- params = (self.ctxt, uuid, '/dev/vdb')
- db.block_device_mapping_destroy_by_instance_and_device(*params)
-
- bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
- self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['device_name'], '/dev/vda')
-
- def test_block_device_mapping_get_by_volume_id(self):
- self._create_bdm({'volume_id': 'fake_id'})
- bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
- self.assertEqual(bdm['volume_id'], 'fake_id')
-
- def test_block_device_mapping_get_by_volume_id_join_instance(self):
- self._create_bdm({'volume_id': 'fake_id'})
- bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
- ['instance'])
- self.assertEqual(bdm['volume_id'], 'fake_id')
- self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
-
-
-class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- """Tests for db.api.agent_build_* methods."""
-
- def setUp(self):
- super(AgentBuildTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def test_agent_build_create_and_get_all(self):
- self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
- agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
- all_agent_builds = db.agent_build_get_all(self.ctxt)
- self.assertEqual(1, len(all_agent_builds))
- self._assertEqualObjects(agent_build, all_agent_builds[0])
-
- def test_agent_build_get_by_triple(self):
- agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
- 'os': 'FreeBSD', 'architecture': arch.X86_64})
- self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
- 'FreeBSD', 'i386'))
- self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
- self.ctxt, 'kvm', 'FreeBSD', arch.X86_64))
-
- def test_agent_build_destroy(self):
- agent_build = db.agent_build_create(self.ctxt, {})
- self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
- db.agent_build_destroy(self.ctxt, agent_build.id)
- self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
-
- def test_agent_build_update(self):
- agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
- db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
- self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
-
- def test_agent_build_destroy_destroyed(self):
- agent_build = db.agent_build_create(self.ctxt, {})
- db.agent_build_destroy(self.ctxt, agent_build.id)
- self.assertRaises(exception.AgentBuildNotFound,
- db.agent_build_destroy, self.ctxt, agent_build.id)
-
- def test_agent_build_update_destroyed(self):
- agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
- db.agent_build_destroy(self.ctxt, agent_build.id)
- self.assertRaises(exception.AgentBuildNotFound,
- db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
-
- def test_agent_build_exists(self):
- values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
- 'architecture': arch.X86_64}
- db.agent_build_create(self.ctxt, values)
- self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
- self.ctxt, values)
-
- def test_agent_build_get_all_by_hypervisor(self):
- values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
- 'architecture': arch.X86_64}
- created = db.agent_build_create(self.ctxt, values)
- actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
- self._assertEqualListsOfObjects([created], actual)
-
-
-class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(VirtualInterfaceTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
- values = {'host': 'localhost', 'project_id': 'project1'}
- self.network = db.network_create_safe(self.ctxt, values)
-
- def _get_base_values(self):
- return {
- 'instance_uuid': self.instance_uuid,
- 'address': 'fake_address',
- 'network_id': self.network['id'],
- 'uuid': str(stdlib_uuid.uuid4())
- }
-
- def mock_db_query_first_to_raise_data_error_exception(self):
- self.mox.StubOutWithMock(query.Query, 'first')
- query.Query.first().AndRaise(db_exc.DBError())
- self.mox.ReplayAll()
-
- def _create_virt_interface(self, values):
- v = self._get_base_values()
- v.update(values)
- return db.virtual_interface_create(self.ctxt, v)
-
- def test_virtual_interface_create(self):
- vif = self._create_virt_interface({})
- self.assertIsNotNone(vif['id'])
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at', 'uuid']
- self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
-
- def test_virtual_interface_create_with_duplicate_address(self):
- vif = self._create_virt_interface({})
- self.assertRaises(exception.VirtualInterfaceCreateException,
- self._create_virt_interface, {"uuid": vif['uuid']})
-
- def test_virtual_interface_get(self):
- vifs = [self._create_virt_interface({'address': 'a'}),
- self._create_virt_interface({'address': 'b'})]
-
- for vif in vifs:
- real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
- self._assertEqualObjects(vif, real_vif)
-
- def test_virtual_interface_get_by_address(self):
- vifs = [self._create_virt_interface({'address': 'first'}),
- self._create_virt_interface({'address': 'second'})]
- for vif in vifs:
- real_vif = db.virtual_interface_get_by_address(self.ctxt,
- vif['address'])
- self._assertEqualObjects(vif, real_vif)
-
- def test_virtual_interface_get_by_address_not_found(self):
- self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
- "i.nv.ali.ip"))
-
- def test_virtual_interface_get_by_address_data_error_exception(self):
- self.mock_db_query_first_to_raise_data_error_exception()
- self.assertRaises(exception.InvalidIpAddressError,
- db.virtual_interface_get_by_address,
- self.ctxt,
- "i.nv.ali.ip")
-
- def test_virtual_interface_get_by_uuid(self):
- vifs = [self._create_virt_interface({"address": "address_1"}),
- self._create_virt_interface({"address": "address_2"})]
- for vif in vifs:
- real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
- self._assertEqualObjects(vif, real_vif)
-
- def test_virtual_interface_get_by_instance(self):
- inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
- vifs1 = [self._create_virt_interface({'address': 'fake1'}),
- self._create_virt_interface({'address': 'fake2'})]
- # multiple nic of same instance
- vifs2 = [self._create_virt_interface({'address': 'fake3',
- 'instance_uuid': inst_uuid2}),
- self._create_virt_interface({'address': 'fake4',
- 'instance_uuid': inst_uuid2})]
- vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
- self.instance_uuid)
- vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
- inst_uuid2)
- self._assertEqualListsOfObjects(vifs1, vifs1_real)
- self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
-
- def test_virtual_interface_get_by_instance_and_network(self):
- inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
- values = {'host': 'localhost', 'project_id': 'project2'}
- network_id = db.network_create_safe(self.ctxt, values)['id']
-
- vifs = [self._create_virt_interface({'address': 'fake1'}),
- self._create_virt_interface({'address': 'fake2',
- 'network_id': network_id,
- 'instance_uuid': inst_uuid2}),
- self._create_virt_interface({'address': 'fake3',
- 'instance_uuid': inst_uuid2})]
- for vif in vifs:
- params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
- r_vif = db.virtual_interface_get_by_instance_and_network(*params)
- self._assertEqualObjects(r_vif, vif)
-
- def test_virtual_interface_delete_by_instance(self):
- inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
-
- values = [dict(address='fake1'), dict(address='fake2'),
- dict(address='fake3', instance_uuid=inst_uuid2)]
- for vals in values:
- self._create_virt_interface(vals)
-
- db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
-
- real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
- self.instance_uuid)
- real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
- inst_uuid2)
- self.assertEqual(len(real_vifs1), 0)
- self.assertEqual(len(real_vifs2), 1)
-
- def test_virtual_interface_get_all(self):
- inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
- values = [dict(address='fake1'), dict(address='fake2'),
- dict(address='fake3', instance_uuid=inst_uuid2)]
-
- vifs = [self._create_virt_interface(val) for val in values]
- real_vifs = db.virtual_interface_get_all(self.ctxt)
- self._assertEqualListsOfObjects(vifs, real_vifs)
-
-
-class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- """Tests for db.api.network_* methods."""
-
- def setUp(self):
- super(NetworkTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_associated_fixed_ip(self, host, cidr, ip):
- network = db.network_create_safe(self.ctxt,
- {'project_id': 'project1', 'cidr': cidr})
- self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
- host))
- instance = db.instance_create(self.ctxt,
- {'project_id': 'project1', 'host': host})
- virtual_interface = db.virtual_interface_create(self.ctxt,
- {'instance_uuid': instance.uuid, 'network_id': network.id,
- 'address': ip})
- db.fixed_ip_create(self.ctxt, {'address': ip,
- 'network_id': network.id, 'allocated': True,
- 'virtual_interface_id': virtual_interface.id})
- db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
- network.id)
- return network, instance
-
- def test_network_get_associated_default_route(self):
- network, instance = self._get_associated_fixed_ip('host.net',
- '192.0.2.0/30', '192.0.2.1')
- network2 = db.network_create_safe(self.ctxt,
- {'project_id': 'project1', 'cidr': '192.0.3.0/30'})
- ip = '192.0.3.1'
- virtual_interface = db.virtual_interface_create(self.ctxt,
- {'instance_uuid': instance.uuid, 'network_id': network2.id,
- 'address': ip})
- db.fixed_ip_create(self.ctxt, {'address': ip,
- 'network_id': network2.id, 'allocated': True,
- 'virtual_interface_id': virtual_interface.id})
- db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
- network2.id)
- data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
- self.assertEqual(1, len(data))
- self.assertTrue(data[0]['default_route'])
- data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
- self.assertEqual(1, len(data))
- self.assertFalse(data[0]['default_route'])
-
- def test_network_get_associated_fixed_ips(self):
- network, instance = self._get_associated_fixed_ip('host.net',
- '192.0.2.0/30', '192.0.2.1')
- data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
- self.assertEqual(1, len(data))
- self.assertEqual('192.0.2.1', data[0]['address'])
- self.assertEqual('192.0.2.1', data[0]['vif_address'])
- self.assertEqual(instance.uuid, data[0]['instance_uuid'])
- self.assertTrue(data[0]['allocated'])
-
- def test_network_create_safe(self):
- values = {'host': 'localhost', 'project_id': 'project1'}
- network = db.network_create_safe(self.ctxt, values)
- self.assertEqual(36, len(network['uuid']))
- db_network = db.network_get(self.ctxt, network['id'])
- self._assertEqualObjects(network, db_network)
-
- def test_network_create_with_duplicate_vlan(self):
- values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
- values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
- db.network_create_safe(self.ctxt, values1)
- self.assertRaises(exception.DuplicateVlan,
- db.network_create_safe, self.ctxt, values2)
-
- def test_network_delete_safe(self):
- values = {'host': 'localhost', 'project_id': 'project1'}
- network = db.network_create_safe(self.ctxt, values)
- db.network_get(self.ctxt, network['id'])
- values = {'network_id': network['id'], 'address': '192.168.1.5'}
- address1 = db.fixed_ip_create(self.ctxt, values)['address']
- values = {'network_id': network['id'],
- 'address': '192.168.1.6',
- 'allocated': True}
- address2 = db.fixed_ip_create(self.ctxt, values)['address']
- self.assertRaises(exception.NetworkInUse,
- db.network_delete_safe, self.ctxt, network['id'])
- db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
- network = db.network_delete_safe(self.ctxt, network['id'])
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- db.fixed_ip_get_by_address, self.ctxt, address1)
- ctxt = self.ctxt.elevated(read_deleted='yes')
- fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
- self.assertTrue(fixed_ip['deleted'])
-
- def test_network_in_use_on_host(self):
- values = {'host': 'foo', 'hostname': 'myname'}
- instance = db.instance_create(self.ctxt, values)
- values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
- vif = db.virtual_interface_create(self.ctxt, values)
- values = {'address': '192.168.1.6',
- 'network_id': 1,
- 'allocated': True,
- 'instance_uuid': instance['uuid'],
- 'virtual_interface_id': vif['id']}
- db.fixed_ip_create(self.ctxt, values)
- self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
- self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
-
- def test_network_update_nonexistent(self):
- self.assertRaises(exception.NetworkNotFound,
- db.network_update, self.ctxt, 123456, {})
-
- def test_network_update_with_duplicate_vlan(self):
- values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
- values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
- network_ref = db.network_create_safe(self.ctxt, values1)
- db.network_create_safe(self.ctxt, values2)
- self.assertRaises(exception.DuplicateVlan,
- db.network_update, self.ctxt,
- network_ref["id"], values2)
-
- def test_network_update(self):
- network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
- 'vlan': 1, 'host': 'test.com'})
- db.network_update(self.ctxt, network.id, {'vlan': 2})
- network_new = db.network_get(self.ctxt, network.id)
- self.assertEqual(2, network_new.vlan)
-
- def test_network_set_host_nonexistent_network(self):
- self.assertRaises(exception.NetworkNotFound,
- db.network_set_host, self.ctxt, 123456, 'nonexistent')
-
- def test_network_set_host_with_initially_no_host(self):
- values = {'host': 'example.com', 'project_id': 'project1'}
- network = db.network_create_safe(self.ctxt, values)
- self.assertEqual(
- db.network_set_host(self.ctxt, network.id, 'new.example.com'),
- 'example.com')
-
- def test_network_set_host(self):
- values = {'project_id': 'project1'}
- network = db.network_create_safe(self.ctxt, values)
- self.assertEqual(
- db.network_set_host(self.ctxt, network.id, 'example.com'),
- 'example.com')
- self.assertEqual('example.com',
- db.network_get(self.ctxt, network.id).host)
-
- def test_network_get_all_by_host(self):
- self.assertEqual([],
- db.network_get_all_by_host(self.ctxt, 'example.com'))
- host = 'h1.example.com'
- # network with host set
- net1 = db.network_create_safe(self.ctxt, {'host': host})
- self._assertEqualListsOfObjects([net1],
- db.network_get_all_by_host(self.ctxt, host))
- # network with fixed ip with host set
- net2 = db.network_create_safe(self.ctxt, {})
- db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
- db.network_get_all_by_host(self.ctxt, host)
- self._assertEqualListsOfObjects([net1, net2],
- db.network_get_all_by_host(self.ctxt, host))
- # network with instance with host set
- net3 = db.network_create_safe(self.ctxt, {})
- instance = db.instance_create(self.ctxt, {'host': host})
- db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
- 'instance_uuid': instance.uuid})
- self._assertEqualListsOfObjects([net1, net2, net3],
- db.network_get_all_by_host(self.ctxt, host))
-
- def test_network_get_by_cidr(self):
- cidr = '192.0.2.0/30'
- cidr_v6 = '2001:db8:1::/64'
- network = db.network_create_safe(self.ctxt,
- {'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
- self._assertEqualObjects(network,
- db.network_get_by_cidr(self.ctxt, cidr))
- self._assertEqualObjects(network,
- db.network_get_by_cidr(self.ctxt, cidr_v6))
-
- def test_network_get_by_cidr_nonexistent(self):
- self.assertRaises(exception.NetworkNotFoundForCidr,
- db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
-
- def test_network_get_by_uuid(self):
- network = db.network_create_safe(self.ctxt,
- {'project_id': 'project_1'})
- self._assertEqualObjects(network,
- db.network_get_by_uuid(self.ctxt, network.uuid))
-
- def test_network_get_by_uuid_nonexistent(self):
- self.assertRaises(exception.NetworkNotFoundForUUID,
- db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
-
- def test_network_get_all_by_uuids_no_networks(self):
- self.assertRaises(exception.NoNetworksFound,
- db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
-
- def test_network_get_all_by_uuids(self):
- net1 = db.network_create_safe(self.ctxt, {})
- net2 = db.network_create_safe(self.ctxt, {})
- self._assertEqualListsOfObjects([net1, net2],
- db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
-
- def test_network_get_all_no_networks(self):
- self.assertRaises(exception.NoNetworksFound,
- db.network_get_all, self.ctxt)
-
- def test_network_get_all(self):
- network = db.network_create_safe(self.ctxt, {})
- network_db = db.network_get_all(self.ctxt)
- self.assertEqual(1, len(network_db))
- self._assertEqualObjects(network, network_db[0])
-
- def test_network_get_all_admin_user(self):
- network1 = db.network_create_safe(self.ctxt, {})
- network2 = db.network_create_safe(self.ctxt,
- {'project_id': 'project1'})
- self._assertEqualListsOfObjects([network1, network2],
- db.network_get_all(self.ctxt,
- project_only=True))
-
- def test_network_get_all_normal_user(self):
- normal_ctxt = context.RequestContext('fake', 'fake')
- db.network_create_safe(self.ctxt, {})
- db.network_create_safe(self.ctxt, {'project_id': 'project1'})
- network1 = db.network_create_safe(self.ctxt,
- {'project_id': 'fake'})
- network_db = db.network_get_all(normal_ctxt, project_only=True)
- self.assertEqual(1, len(network_db))
- self._assertEqualObjects(network1, network_db[0])
-
- def test_network_get(self):
- network = db.network_create_safe(self.ctxt, {})
- self._assertEqualObjects(db.network_get(self.ctxt, network.id),
- network)
- db.network_delete_safe(self.ctxt, network.id)
- self.assertRaises(exception.NetworkNotFound,
- db.network_get, self.ctxt, network.id)
-
- def test_network_associate(self):
- network = db.network_create_safe(self.ctxt, {})
- self.assertIsNone(network.project_id)
- db.network_associate(self.ctxt, "project1", network.id)
- self.assertEqual("project1", db.network_get(self.ctxt,
- network.id).project_id)
-
- def test_network_diassociate(self):
- network = db.network_create_safe(self.ctxt,
- {'project_id': 'project1', 'host': 'test.net'})
- # disassociate project
- db.network_disassociate(self.ctxt, network.id, False, True)
- self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
- # disassociate host
- db.network_disassociate(self.ctxt, network.id, True, False)
- self.assertIsNone(db.network_get(self.ctxt, network.id).host)
-
- def test_network_count_reserved_ips(self):
- net = db.network_create_safe(self.ctxt, {})
- self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
- db.fixed_ip_create(self.ctxt, {'network_id': net.id,
- 'reserved': True})
- self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
-
-
-class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(KeyPairTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _create_key_pair(self, values):
- return db.key_pair_create(self.ctxt, values)
-
- def test_key_pair_create(self):
- param = {
- 'name': 'test_1',
- 'user_id': 'test_user_id_1',
- 'public_key': 'test_public_key_1',
- 'fingerprint': 'test_fingerprint_1'
- }
- key_pair = self._create_key_pair(param)
-
- self.assertIsNotNone(key_pair['id'])
- ignored_keys = ['deleted', 'created_at', 'updated_at',
- 'deleted_at', 'id']
- self._assertEqualObjects(key_pair, param, ignored_keys)
-
- def test_key_pair_create_with_duplicate_name(self):
- params = {'name': 'test_name', 'user_id': 'test_user_id'}
- self._create_key_pair(params)
- self.assertRaises(exception.KeyPairExists, self._create_key_pair,
- params)
-
- def test_key_pair_get(self):
- params = [
- {'name': 'test_1', 'user_id': 'test_user_id_1'},
- {'name': 'test_2', 'user_id': 'test_user_id_2'},
- {'name': 'test_3', 'user_id': 'test_user_id_3'}
- ]
- key_pairs = [self._create_key_pair(p) for p in params]
-
- for key in key_pairs:
- real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
- self._assertEqualObjects(key, real_key)
-
- def test_key_pair_get_no_results(self):
- param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
- self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
- self.ctxt, param['user_id'], param['name'])
-
- def test_key_pair_get_deleted(self):
- param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
- key_pair_created = self._create_key_pair(param)
-
- db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
- self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
- self.ctxt, param['user_id'], param['name'])
-
- ctxt = self.ctxt.elevated(read_deleted='yes')
- key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
- param['name'])
- ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
- self._assertEqualObjects(key_pair_deleted, key_pair_created,
- ignored_keys)
- self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
-
- def test_key_pair_get_all_by_user(self):
- params = [
- {'name': 'test_1', 'user_id': 'test_user_id_1'},
- {'name': 'test_2', 'user_id': 'test_user_id_1'},
- {'name': 'test_3', 'user_id': 'test_user_id_2'}
- ]
- key_pairs_user_1 = [self._create_key_pair(p) for p in params
- if p['user_id'] == 'test_user_id_1']
- key_pairs_user_2 = [self._create_key_pair(p) for p in params
- if p['user_id'] == 'test_user_id_2']
-
- real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
- real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
-
- self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
- self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
-
- def test_key_pair_count_by_user(self):
- params = [
- {'name': 'test_1', 'user_id': 'test_user_id_1'},
- {'name': 'test_2', 'user_id': 'test_user_id_1'},
- {'name': 'test_3', 'user_id': 'test_user_id_2'}
- ]
- for p in params:
- self._create_key_pair(p)
-
- count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
- self.assertEqual(count_1, 2)
-
- count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
- self.assertEqual(count_2, 1)
-
- def test_key_pair_destroy(self):
- param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
- self._create_key_pair(param)
-
- db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
- self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
- self.ctxt, param['user_id'], param['name'])
-
- def test_key_pair_destroy_no_such_key(self):
- param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
- self.assertRaises(exception.KeypairNotFound,
- db.key_pair_destroy, self.ctxt,
- param['user_id'], param['name'])
-
-
-class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- """Tests for db.api.quota_* methods."""
-
- def setUp(self):
- super(QuotaTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def test_quota_create(self):
- quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
- self.assertEqual(quota.resource, 'resource')
- self.assertEqual(quota.hard_limit, 99)
- self.assertEqual(quota.project_id, 'project1')
-
- def test_quota_get(self):
- quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
- quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
- self._assertEqualObjects(quota, quota_db)
-
- def test_quota_get_all_by_project(self):
- for i in range(3):
- for j in range(3):
- db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
- for i in range(3):
- quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
- self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
- 'resource0': 0,
- 'resource1': 1,
- 'resource2': 2})
-
- def test_quota_get_all_by_project_and_user(self):
- for i in range(3):
- for j in range(3):
- db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
- j - 1, user_id='user%d' % i)
- for i in range(3):
- quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
- 'proj%d' % i,
- 'user%d' % i)
- self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
- 'user_id': 'user%d' % i,
- 'resource0': -1,
- 'resource1': 0,
- 'resource2': 1})
-
- def test_quota_update(self):
- db.quota_create(self.ctxt, 'project1', 'resource1', 41)
- db.quota_update(self.ctxt, 'project1', 'resource1', 42)
- quota = db.quota_get(self.ctxt, 'project1', 'resource1')
- self.assertEqual(quota.hard_limit, 42)
- self.assertEqual(quota.resource, 'resource1')
- self.assertEqual(quota.project_id, 'project1')
-
- def test_quota_update_nonexistent(self):
- self.assertRaises(exception.ProjectQuotaNotFound,
- db.quota_update, self.ctxt, 'project1', 'resource1', 42)
-
- def test_quota_get_nonexistent(self):
- self.assertRaises(exception.ProjectQuotaNotFound,
- db.quota_get, self.ctxt, 'project1', 'resource1')
-
- def test_quota_reserve_all_resources(self):
- quotas = {}
- deltas = {}
- reservable_resources = {}
- for i, resource in enumerate(quota.resources):
- if isinstance(resource, quota.ReservableResource):
- quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
- resource.name, 100)
- deltas[resource.name] = i
- reservable_resources[resource.name] = resource
-
- usages = {'instances': 3, 'cores': 6, 'ram': 9}
- instances = []
- for i in range(3):
- instances.append(db.instance_create(self.ctxt,
- {'vcpus': 2, 'memory_mb': 3,
- 'project_id': 'project1'}))
-
- usages['fixed_ips'] = 2
- network = db.network_create_safe(self.ctxt, {})
- for i in range(2):
- address = '192.168.0.%d' % i
- db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
- 'address': address,
- 'network_id': network['id']})
- db.fixed_ip_associate(self.ctxt, address,
- instances[0].uuid, network['id'])
-
- usages['floating_ips'] = 5
- for i in range(5):
- db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
-
- usages['security_groups'] = 3
- for i in range(3):
- db.security_group_create(self.ctxt, {'project_id': 'project1'})
-
- usages['server_groups'] = 4
- for i in range(4):
- db.instance_group_create(self.ctxt, {'uuid': str(i),
- 'project_id': 'project1'})
-
- reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
- quotas, quotas, deltas, None,
- None, None, 'project1')
- resources_names = reservable_resources.keys()
- for reservation_uuid in reservations_uuids:
- reservation = _reservation_get(self.ctxt, reservation_uuid)
- usage = db.quota_usage_get(self.ctxt, 'project1',
- reservation.resource)
- self.assertEqual(usage.in_use, usages[reservation.resource],
- 'Resource: %s' % reservation.resource)
- self.assertEqual(usage.reserved, deltas[reservation.resource])
- self.assertIn(reservation.resource, resources_names)
- resources_names.remove(reservation.resource)
- self.assertEqual(len(resources_names), 0)
-
- def test_quota_destroy_all_by_project(self):
- reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
- db.quota_destroy_all_by_project(self.ctxt, 'project1')
- self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
- {'project_id': 'project1'})
- self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
- 'project1', 'user1'),
- {'project_id': 'project1', 'user_id': 'user1'})
- self.assertEqual(db.quota_usage_get_all_by_project(
- self.ctxt, 'project1'),
- {'project_id': 'project1'})
- for r in reservations:
- self.assertRaises(exception.ReservationNotFound,
- _reservation_get, self.ctxt, r)
-
- def test_quota_destroy_all_by_project_and_user(self):
- reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
- db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
- 'user1')
- self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
- 'project1', 'user1'),
- {'project_id': 'project1',
- 'user_id': 'user1'})
- self.assertEqual(db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'project1', 'user1'),
- {'project_id': 'project1',
- 'user_id': 'user1',
- 'fixed_ips': {'in_use': 2, 'reserved': 2}})
- for r in reservations:
- self.assertRaises(exception.ReservationNotFound,
- _reservation_get, self.ctxt, r)
-
- def test_quota_usage_get_nonexistent(self):
- self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
- self.ctxt, 'p1', 'nonexitent_resource')
-
- def test_quota_usage_get(self):
- _quota_reserve(self.ctxt, 'p1', 'u1')
- quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
- expected = {'resource': 'resource0', 'project_id': 'p1',
- 'in_use': 0, 'reserved': 0, 'total': 0}
- for key, value in expected.iteritems():
- self.assertEqual(value, quota_usage[key])
-
- def test_quota_usage_get_all_by_project(self):
- _quota_reserve(self.ctxt, 'p1', 'u1')
- expected = {'project_id': 'p1',
- 'resource0': {'in_use': 0, 'reserved': 0},
- 'resource1': {'in_use': 1, 'reserved': 1},
- 'fixed_ips': {'in_use': 2, 'reserved': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project(
- self.ctxt, 'p1'))
-
- def test_quota_usage_get_all_by_project_and_user(self):
- _quota_reserve(self.ctxt, 'p1', 'u1')
- expected = {'project_id': 'p1',
- 'user_id': 'u1',
- 'resource0': {'in_use': 0, 'reserved': 0},
- 'resource1': {'in_use': 1, 'reserved': 1},
- 'fixed_ips': {'in_use': 2, 'reserved': 2}}
- self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
- self.ctxt, 'p1', 'u1'))
-
- def test_quota_usage_update_nonexistent(self):
- self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
- self.ctxt, 'p1', 'u1', 'resource', in_use=42)
-
- def test_quota_usage_update(self):
- _quota_reserve(self.ctxt, 'p1', 'u1')
- db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
- reserved=43)
- quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
- expected = {'resource': 'resource0', 'project_id': 'p1',
- 'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
- for key, value in expected.iteritems():
- self.assertEqual(value, quota_usage[key])
-
- def test_quota_create_exists(self):
- db.quota_create(self.ctxt, 'project1', 'resource1', 41)
- self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
- 'project1', 'resource1', 42)
-
-
-class QuotaReserveNoDbTestCase(test.NoDBTestCase):
- """Tests quota reserve/refresh operations using mock."""
-
- def test_create_quota_usage_if_missing_not_created(self):
- # Tests that QuotaUsage isn't created if it's already in user_usages.
- resource = 'fake-resource'
- project_id = 'fake-project'
- user_id = 'fake_user'
- session = mock.sentinel
- quota_usage = mock.sentinel
- user_usages = {resource: quota_usage}
- with mock.patch.object(sqlalchemy_api, '_quota_usage_create') as quc:
- self.assertFalse(sqlalchemy_api._create_quota_usage_if_missing(
- user_usages, resource, None,
- project_id, user_id, session))
- self.assertFalse(quc.called)
-
- def _test_create_quota_usage_if_missing_created(self, per_project_quotas):
- # Tests that the QuotaUsage is created.
- user_usages = {}
- if per_project_quotas:
- resource = sqlalchemy_api.PER_PROJECT_QUOTAS[0]
- else:
- resource = 'fake-resource'
- project_id = 'fake-project'
- user_id = 'fake_user'
- session = mock.sentinel
- quota_usage = mock.sentinel
- with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
- return_value=quota_usage) as quc:
- self.assertTrue(sqlalchemy_api._create_quota_usage_if_missing(
- user_usages, resource, None,
- project_id, user_id, session))
- self.assertEqual(quota_usage, user_usages[resource])
- # Now test if the QuotaUsage was created with a user_id or not.
- if per_project_quotas:
- quc.assert_called_once_with(
- project_id, None, resource, 0, 0, None, session=session)
- else:
- quc.assert_called_once_with(
- project_id, user_id, resource, 0, 0, None, session=session)
-
- def test_create_quota_usage_if_missing_created_per_project_quotas(self):
- self._test_create_quota_usage_if_missing_created(True)
-
- def test_create_quota_usage_if_missing_created_user_quotas(self):
- self._test_create_quota_usage_if_missing_created(False)
-
- def test_is_quota_refresh_needed_in_use(self):
- # Tests when a quota refresh is needed based on the in_use value.
- for in_use in range(-1, 1):
- # We have to set until_refresh=None otherwise mock will give it
- # a value which runs some code we don't want.
- quota_usage = mock.MagicMock(in_use=in_use, until_refresh=None)
- if in_use < 0:
- self.assertTrue(sqlalchemy_api._is_quota_refresh_needed(
- quota_usage, max_age=0))
- else:
- self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(
- quota_usage, max_age=0))
-
- def test_is_quota_refresh_needed_until_refresh_none(self):
- quota_usage = mock.MagicMock(in_use=0, until_refresh=None)
- self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(quota_usage,
- max_age=0))
-
- def test_is_quota_refresh_needed_until_refresh_not_none(self):
- # Tests different values for the until_refresh counter.
- for until_refresh in range(3):
- quota_usage = mock.MagicMock(in_use=0, until_refresh=until_refresh)
- refresh = sqlalchemy_api._is_quota_refresh_needed(quota_usage,
- max_age=0)
- until_refresh -= 1
- if until_refresh <= 0:
- self.assertTrue(refresh)
- else:
- self.assertFalse(refresh)
- self.assertEqual(until_refresh, quota_usage.until_refresh)
-
- def test_refresh_quota_usages(self):
- quota_usage = mock.Mock(spec=models.QuotaUsage)
- quota_usage.in_use = 5
- quota_usage.until_refresh = None
- sqlalchemy_api._refresh_quota_usages(quota_usage, until_refresh=5,
- in_use=6)
- self.assertEqual(6, quota_usage.in_use)
- self.assertEqual(5, quota_usage.until_refresh)
-
- def test_calculate_overquota_no_delta(self):
- deltas = {'foo': -1}
- user_quotas = {'foo': 10}
- overs = sqlalchemy_api._calculate_overquota({}, user_quotas, deltas,
- {}, {})
- self.assertFalse(overs)
-
- def test_calculate_overquota_unlimited_quota(self):
- deltas = {'foo': 1}
- project_quotas = {}
- user_quotas = {'foo': -1}
- project_usages = {}
- user_usages = {'foo': 10}
- overs = sqlalchemy_api._calculate_overquota(
- project_quotas, user_quotas, deltas, project_usages, user_usages)
- self.assertFalse(overs)
-
- def _test_calculate_overquota(self, resource, project_usages, user_usages):
- deltas = {resource: 1}
- project_quotas = {resource: 10}
- user_quotas = {resource: 10}
- overs = sqlalchemy_api._calculate_overquota(
- project_quotas, user_quotas, deltas, project_usages, user_usages)
- self.assertEqual(resource, overs[0])
-
- def test_calculate_overquota_per_project_quota_overquota(self):
- # In this test, user quotas are fine but project quotas are over.
- resource = 'foo'
- project_usages = {resource: {'total': 10}}
- user_usages = {resource: {'total': 5}}
- self._test_calculate_overquota(resource, project_usages, user_usages)
-
- def test_calculate_overquota_per_user_quota_overquota(self):
- # In this test, project quotas are fine but user quotas are over.
- resource = 'foo'
- project_usages = {resource: {'total': 5}}
- user_usages = {resource: {'total': 10}}
- self._test_calculate_overquota(resource, project_usages, user_usages)
-
-
-class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- def setUp(self):
- super(QuotaClassTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def test_quota_class_get_default(self):
- params = {
- 'test_resource1': '10',
- 'test_resource2': '20',
- 'test_resource3': '30',
- }
- for res, limit in params.items():
- db.quota_class_create(self.ctxt, 'default', res, limit)
-
- defaults = db.quota_class_get_default(self.ctxt)
- self.assertEqual(defaults, dict(class_name='default',
- test_resource1=10,
- test_resource2=20,
- test_resource3=30))
-
- def test_quota_class_create(self):
- qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
- self.assertEqual(qc.class_name, 'class name')
- self.assertEqual(qc.resource, 'resource')
- self.assertEqual(qc.hard_limit, 42)
-
- def test_quota_class_get(self):
- qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
- qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
- self._assertEqualObjects(qc, qc_db)
-
- def test_quota_class_get_nonexistent(self):
- self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
- self.ctxt, 'nonexistent', 'resource')
-
- def test_quota_class_get_all_by_name(self):
- for i in range(3):
- for j in range(3):
- db.quota_class_create(self.ctxt, 'class%d' % i,
- 'resource%d' % j, j)
- for i in range(3):
- classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
- self.assertEqual(classes, {'class_name': 'class%d' % i,
- 'resource0': 0, 'resource1': 1, 'resource2': 2})
-
- def test_quota_class_update(self):
- db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
- db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
- self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
- 'resource').hard_limit, 43)
-
- def test_quota_class_update_nonexistent(self):
- self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
- self.ctxt, 'class name', 'resource', 42)
-
- def test_refresh_quota_usages(self):
- quota_usages = mock.Mock()
- sqlalchemy_api._refresh_quota_usages(quota_usages, until_refresh=5,
- in_use=6)
-
-
-class S3ImageTestCase(test.TestCase):
-
- def setUp(self):
- super(S3ImageTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.values = [uuidutils.generate_uuid() for i in xrange(3)]
- self.images = [db.s3_image_create(self.ctxt, uuid)
- for uuid in self.values]
-
- def test_s3_image_create(self):
- for ref in self.images:
- self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
- self.assertEqual(sorted(self.values),
- sorted([ref.uuid for ref in self.images]))
-
- def test_s3_image_get_by_uuid(self):
- for uuid in self.values:
- ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
- self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
- self.assertEqual(uuid, ref.uuid)
-
- def test_s3_image_get(self):
- self.assertEqual(sorted(self.values),
- sorted([db.s3_image_get(self.ctxt, ref.id).uuid
- for ref in self.images]))
-
- def test_s3_image_get_not_found(self):
- self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
- 100500)
-
- def test_s3_image_get_by_uuid_not_found(self):
- self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
- self.ctxt, uuidutils.generate_uuid())
-
-
-class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
-
- def setUp(self):
- super(ComputeNodeTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.service_dict = dict(host='host1', binary='nova-compute',
- topic=CONF.compute_topic, report_count=1,
- disabled=False)
- self.service = db.service_create(self.ctxt, self.service_dict)
- self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
- vcpus_used=0, memory_mb_used=0,
- local_gb_used=0, free_ram_mb=1024,
- free_disk_gb=2048, hypervisor_type="xen",
- hypervisor_version=1, cpu_info="",
- running_vms=0, current_workload=0,
- service_id=self.service['id'],
- disk_available_least=100,
- hypervisor_hostname='abracadabra104',
- host_ip='127.0.0.1',
- supported_instances='',
- pci_stats='',
- metrics='',
- extra_resources='',
- stats='', numa_topology='')
- # add some random stats
- self.stats = dict(num_instances=3, num_proj_12345=2,
- num_proj_23456=2, num_vm_building=3)
- self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
- self.flags(reserved_host_memory_mb=0)
- self.flags(reserved_host_disk_mb=0)
- self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
-
- def test_compute_node_create(self):
- self._assertEqualObjects(self.compute_node_dict, self.item,
- ignored_keys=self._ignored_keys + ['stats'])
- new_stats = jsonutils.loads(self.item['stats'])
- self.assertEqual(self.stats, new_stats)
-
- def test_compute_node_get_all(self):
- date_fields = set(['created_at', 'updated_at',
- 'deleted_at', 'deleted'])
- for no_date_fields in [False, True]:
- nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
- self.assertEqual(1, len(nodes))
- node = nodes[0]
- self._assertEqualObjects(self.compute_node_dict, node,
- ignored_keys=self._ignored_keys +
- ['stats', 'service'])
- node_fields = set(node.keys())
- if no_date_fields:
- self.assertFalse(date_fields & node_fields)
- else:
- self.assertTrue(date_fields <= node_fields)
- new_stats = jsonutils.loads(node['stats'])
- self.assertEqual(self.stats, new_stats)
-
- def test_compute_node_get_all_deleted_compute_node(self):
- # Create a service and compute node and ensure we can find its stats;
- # delete the service and compute node when done and loop again
- for x in range(2, 5):
- # Create a service
- service_data = self.service_dict.copy()
- service_data['host'] = 'host-%s' % x
- service = db.service_create(self.ctxt, service_data)
-
- # Create a compute node
- compute_node_data = self.compute_node_dict.copy()
- compute_node_data['service_id'] = service['id']
- compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
- compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
- node = db.compute_node_create(self.ctxt, compute_node_data)
-
- # Ensure the "new" compute node is found
- nodes = db.compute_node_get_all(self.ctxt, False)
- self.assertEqual(2, len(nodes))
- found = None
- for n in nodes:
- if n['id'] == node['id']:
- found = n
- break
- self.assertIsNotNone(found)
- # Now ensure the match has stats!
- self.assertNotEqual(jsonutils.loads(found['stats']), {})
-
- # Now delete the newly-created compute node to ensure the related
- # compute node stats are wiped in a cascaded fashion
- db.compute_node_delete(self.ctxt, node['id'])
-
- # Clean up the service
- db.service_destroy(self.ctxt, service['id'])
-
- def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
- service_data = self.service_dict.copy()
- service_data['host'] = 'host2'
- service = db.service_create(self.ctxt, service_data)
-
- existing_node = dict(self.item.iteritems())
- existing_node['service'] = dict(self.service.iteritems())
- expected = [existing_node]
-
- for name in ['bm_node1', 'bm_node2']:
- compute_node_data = self.compute_node_dict.copy()
- compute_node_data['service_id'] = service['id']
- compute_node_data['stats'] = jsonutils.dumps(self.stats)
- compute_node_data['hypervisor_hostname'] = 'bm_node_1'
- node = db.compute_node_create(self.ctxt, compute_node_data)
-
- node = dict(node.iteritems())
- node['service'] = dict(service.iteritems())
-
- expected.append(node)
-
- result = sorted(db.compute_node_get_all(self.ctxt, False),
- key=lambda n: n['hypervisor_hostname'])
-
- self._assertEqualListsOfObjects(expected, result,
- ignored_keys=['stats'])
-
- def test_compute_node_get(self):
- compute_node_id = self.item['id']
- node = db.compute_node_get(self.ctxt, compute_node_id)
- self._assertEqualObjects(self.compute_node_dict, node,
- ignored_keys=self._ignored_keys + ['stats', 'service'])
- new_stats = jsonutils.loads(node['stats'])
- self.assertEqual(self.stats, new_stats)
-
- def test_compute_node_update(self):
- compute_node_id = self.item['id']
- stats = jsonutils.loads(self.item['stats'])
- # change some values:
- stats['num_instances'] = 8
- stats['num_tribbles'] = 1
- values = {
- 'vcpus': 4,
- 'stats': jsonutils.dumps(stats),
- }
- item_updated = db.compute_node_update(self.ctxt, compute_node_id,
- values)
- self.assertEqual(4, item_updated['vcpus'])
- new_stats = jsonutils.loads(item_updated['stats'])
- self.assertEqual(stats, new_stats)
-
- def test_compute_node_delete(self):
- compute_node_id = self.item['id']
- db.compute_node_delete(self.ctxt, compute_node_id)
- nodes = db.compute_node_get_all(self.ctxt)
- self.assertEqual(len(nodes), 0)
-
- def test_compute_node_search_by_hypervisor(self):
- nodes_created = []
- new_service = copy.copy(self.service_dict)
- for i in xrange(3):
- new_service['binary'] += str(i)
- new_service['topic'] += str(i)
- service = db.service_create(self.ctxt, new_service)
- self.compute_node_dict['service_id'] = service['id']
- self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
- self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
- node = db.compute_node_create(self.ctxt, self.compute_node_dict)
- nodes_created.append(node)
- nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
- self.assertEqual(3, len(nodes))
- self._assertEqualListsOfObjects(nodes_created, nodes,
- ignored_keys=self._ignored_keys + ['stats', 'service'])
-
- def test_compute_node_statistics(self):
- stats = db.compute_node_statistics(self.ctxt)
- self.assertEqual(stats.pop('count'), 1)
- for k, v in stats.iteritems():
- self.assertEqual(v, self.item[k])
-
- def test_compute_node_statistics_disabled_service(self):
- serv = db.service_get_by_host_and_topic(
- self.ctxt, 'host1', CONF.compute_topic)
- db.service_update(self.ctxt, serv['id'], {'disabled': True})
- stats = db.compute_node_statistics(self.ctxt)
- self.assertEqual(stats.pop('count'), 0)
-
- def test_compute_node_not_found(self):
- self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
- self.ctxt, 100500)
-
- def test_compute_node_update_always_updates_updated_at(self):
- item_updated = db.compute_node_update(self.ctxt,
- self.item['id'], {})
- self.assertNotEqual(self.item['updated_at'],
- item_updated['updated_at'])
-
- def test_compute_node_update_override_updated_at(self):
- # Update the record once so updated_at is set.
- first = db.compute_node_update(self.ctxt, self.item['id'],
- {'free_ram_mb': '12'})
- self.assertIsNotNone(first['updated_at'])
-
- # Update a second time. Make sure that the updated_at value we send
- # is overridden.
- second = db.compute_node_update(self.ctxt, self.item['id'],
- {'updated_at': first.updated_at,
- 'free_ram_mb': '13'})
- self.assertNotEqual(first['updated_at'], second['updated_at'])
-
-
-class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- def setUp(self):
- super(ProviderFwRuleTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.values = self._get_rule_values()
- self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
- for rule in self.values]
-
- def _get_rule_values(self):
- cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
- '2001:4f8:3:ba::/64',
- '2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
- values = []
- for i in xrange(len(cidr_samples)):
- rule = {}
- rule['protocol'] = 'foo' + str(i)
- rule['from_port'] = 9999 + i
- rule['to_port'] = 9898 + i
- rule['cidr'] = cidr_samples[i]
- values.append(rule)
- return values
-
- def test_provider_fw_rule_create(self):
- ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
- 'updated_at']
- for i, rule in enumerate(self.values):
- self._assertEqualObjects(self.rules[i], rule,
- ignored_keys=ignored_keys)
-
- def test_provider_fw_rule_get_all(self):
- self._assertEqualListsOfObjects(self.rules,
- db.provider_fw_rule_get_all(self.ctxt))
-
- def test_provider_fw_rule_destroy(self):
- for rule in self.rules:
- db.provider_fw_rule_destroy(self.ctxt, rule.id)
- self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
-
-
-class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- def setUp(self):
- super(CertificateTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.created = self._certificates_create()
-
- def _get_certs_values(self):
- base_values = {
- 'user_id': 'user',
- 'project_id': 'project',
- 'file_name': 'filename'
- }
- return [dict((k, v + str(x)) for k, v in base_values.iteritems())
- for x in xrange(1, 4)]
-
- def _certificates_create(self):
- return [db.certificate_create(self.ctxt, cert)
- for cert in self._get_certs_values()]
-
- def test_certificate_create(self):
- ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
- 'updated_at']
- for i, cert in enumerate(self._get_certs_values()):
- self._assertEqualObjects(self.created[i], cert,
- ignored_keys=ignored_keys)
-
- def test_certificate_get_all_by_project(self):
- cert = db.certificate_get_all_by_project(self.ctxt,
- self.created[1].project_id)
- self._assertEqualObjects(self.created[1], cert[0])
-
- def test_certificate_get_all_by_user(self):
- cert = db.certificate_get_all_by_user(self.ctxt,
- self.created[1].user_id)
- self._assertEqualObjects(self.created[1], cert[0])
-
- def test_certificate_get_all_by_user_and_project(self):
- cert = db.certificate_get_all_by_user_and_project(self.ctxt,
- self.created[1].user_id, self.created[1].project_id)
- self._assertEqualObjects(self.created[1], cert[0])
-
-
-class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- def setUp(self):
- super(ConsoleTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- pools_data = [
- {'address': '192.168.10.10',
- 'username': 'user1',
- 'password': 'passwd1',
- 'console_type': 'type1',
- 'public_hostname': 'public_host1',
- 'host': 'host1',
- 'compute_host': 'compute_host1',
- },
- {'address': '192.168.10.11',
- 'username': 'user2',
- 'password': 'passwd2',
- 'console_type': 'type2',
- 'public_hostname': 'public_host2',
- 'host': 'host2',
- 'compute_host': 'compute_host2',
- },
- ]
- self.console_pools = [db.console_pool_create(self.ctxt, val)
- for val in pools_data]
- instance_uuid = uuidutils.generate_uuid()
- db.instance_create(self.ctxt, {'uuid': instance_uuid})
- self.console_data = [dict([('instance_name', 'name' + str(x)),
- ('instance_uuid', instance_uuid),
- ('password', 'pass' + str(x)),
- ('port', 7878 + x),
- ('pool_id', self.console_pools[x]['id'])])
- for x in xrange(len(pools_data))]
- self.consoles = [db.console_create(self.ctxt, val)
- for val in self.console_data]
-
- def test_console_create(self):
- ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
- 'updated_at']
- for console in self.consoles:
- self.assertIsNotNone(console['id'])
- self._assertEqualListsOfObjects(self.console_data, self.consoles,
- ignored_keys=ignored_keys)
-
- def test_console_get_by_id(self):
- console = self.consoles[0]
- console_get = db.console_get(self.ctxt, console['id'])
- self._assertEqualObjects(console, console_get,
- ignored_keys=['pool'])
-
- def test_console_get_by_id_uuid(self):
- console = self.consoles[0]
- console_get = db.console_get(self.ctxt, console['id'],
- console['instance_uuid'])
- self._assertEqualObjects(console, console_get,
- ignored_keys=['pool'])
-
- def test_console_get_by_pool_instance(self):
- console = self.consoles[0]
- console_get = db.console_get_by_pool_instance(self.ctxt,
- console['pool_id'], console['instance_uuid'])
- self._assertEqualObjects(console, console_get,
- ignored_keys=['pool'])
-
- def test_console_get_all_by_instance(self):
- instance_uuid = self.consoles[0]['instance_uuid']
- consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
- self._assertEqualListsOfObjects(self.consoles, consoles_get)
-
- def test_console_get_all_by_instance_with_pool(self):
- instance_uuid = self.consoles[0]['instance_uuid']
- consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
- columns_to_join=['pool'])
- self._assertEqualListsOfObjects(self.consoles, consoles_get,
- ignored_keys=['pool'])
- self._assertEqualListsOfObjects([pool for pool in self.console_pools],
- [c['pool'] for c in consoles_get])
-
- def test_console_get_all_by_instance_empty(self):
- consoles_get = db.console_get_all_by_instance(self.ctxt,
- uuidutils.generate_uuid())
- self.assertEqual(consoles_get, [])
-
- def test_console_delete(self):
- console_id = self.consoles[0]['id']
- db.console_delete(self.ctxt, console_id)
- self.assertRaises(exception.ConsoleNotFound, db.console_get,
- self.ctxt, console_id)
-
- def test_console_get_by_pool_instance_not_found(self):
- self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
- db.console_get_by_pool_instance, self.ctxt,
- self.consoles[0]['pool_id'],
- uuidutils.generate_uuid())
-
- def test_console_get_not_found(self):
- self.assertRaises(exception.ConsoleNotFound, db.console_get,
- self.ctxt, 100500)
-
- def test_console_get_not_found_instance(self):
- self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
- self.ctxt, self.consoles[0]['id'],
- uuidutils.generate_uuid())
-
-
-class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
-
- def setUp(self):
- super(CellTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
-
- def _get_cell_base_values(self):
- return {
- 'name': 'myname',
- 'api_url': 'apiurl',
- 'transport_url': 'transporturl',
- 'weight_offset': 0.5,
- 'weight_scale': 1.5,
- 'is_parent': True,
- }
-
- def _cell_value_modify(self, value, step):
- if isinstance(value, str):
- return value + str(step)
- elif isinstance(value, float):
- return value + step + 0.6
- elif isinstance(value, bool):
- return bool(step % 2)
- elif isinstance(value, int):
- return value + step
-
- def _create_cells(self):
- test_values = []
- for x in xrange(1, 4):
- modified_val = dict([(k, self._cell_value_modify(v, x))
- for k, v in self._get_cell_base_values().iteritems()])
- db.cell_create(self.ctxt, modified_val)
- test_values.append(modified_val)
- return test_values
-
- def test_cell_create(self):
- cell = db.cell_create(self.ctxt, self._get_cell_base_values())
- self.assertIsNotNone(cell['id'])
- self._assertEqualObjects(cell, self._get_cell_base_values(),
- ignored_keys=self._ignored_keys)
-
- def test_cell_update(self):
- db.cell_create(self.ctxt, self._get_cell_base_values())
- new_values = {
- 'api_url': 'apiurl1',
- 'transport_url': 'transporturl1',
- 'weight_offset': 0.6,
- 'weight_scale': 1.6,
- 'is_parent': False,
- }
- test_cellname = self._get_cell_base_values()['name']
- updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
- self._assertEqualObjects(updated_cell, new_values,
- ignored_keys=self._ignored_keys + ['name'])
-
- def test_cell_delete(self):
- new_cells = self._create_cells()
- for cell in new_cells:
- test_cellname = cell['name']
- db.cell_delete(self.ctxt, test_cellname)
- self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
- test_cellname)
-
- def test_cell_get(self):
- new_cells = self._create_cells()
- for cell in new_cells:
- cell_get = db.cell_get(self.ctxt, cell['name'])
- self._assertEqualObjects(cell_get, cell,
- ignored_keys=self._ignored_keys)
-
- def test_cell_get_all(self):
- new_cells = self._create_cells()
- cells = db.cell_get_all(self.ctxt)
- self.assertEqual(len(new_cells), len(cells))
- cells_byname = dict([(newcell['name'],
- newcell) for newcell in new_cells])
- for cell in cells:
- self._assertEqualObjects(cell, cells_byname[cell['name']],
- self._ignored_keys)
-
- def test_cell_get_not_found(self):
- self._create_cells()
- self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
- 'cellnotinbase')
-
- def test_cell_update_not_found(self):
- self._create_cells()
- self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
- 'cellnotinbase', self._get_cell_base_values())
-
- def test_cell_create_exists(self):
- db.cell_create(self.ctxt, self._get_cell_base_values())
- self.assertRaises(exception.CellExists, db.cell_create,
- self.ctxt, self._get_cell_base_values())
-
-
-class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(ConsolePoolTestCase, self).setUp()
-
- self.ctxt = context.get_admin_context()
- self.test_console_pool_1 = {
- 'address': '192.168.2.10',
- 'username': 'user_1',
- 'password': 'secret_123',
- 'console_type': 'type_1',
- 'public_hostname': 'public_hostname_123',
- 'host': 'localhost',
- 'compute_host': '127.0.0.1',
- }
- self.test_console_pool_2 = {
- 'address': '192.168.2.11',
- 'username': 'user_2',
- 'password': 'secret_1234',
- 'console_type': 'type_2',
- 'public_hostname': 'public_hostname_1234',
- 'host': '127.0.0.1',
- 'compute_host': 'localhost',
- }
- self.test_console_pool_3 = {
- 'address': '192.168.2.12',
- 'username': 'user_3',
- 'password': 'secret_12345',
- 'console_type': 'type_2',
- 'public_hostname': 'public_hostname_12345',
- 'host': '127.0.0.1',
- 'compute_host': '192.168.1.1',
- }
-
- def test_console_pool_create(self):
- console_pool = db.console_pool_create(
- self.ctxt, self.test_console_pool_1)
- self.assertIsNotNone(console_pool.get('id'))
- ignored_keys = ['deleted', 'created_at', 'updated_at',
- 'deleted_at', 'id']
- self._assertEqualObjects(
- console_pool, self.test_console_pool_1, ignored_keys)
-
- def test_console_pool_create_duplicate(self):
- db.console_pool_create(self.ctxt, self.test_console_pool_1)
- self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
- self.ctxt, self.test_console_pool_1)
-
- def test_console_pool_get_by_host_type(self):
- params = [
- self.test_console_pool_1,
- self.test_console_pool_2,
- ]
-
- for p in params:
- db.console_pool_create(self.ctxt, p)
-
- ignored_keys = ['deleted', 'created_at', 'updated_at',
- 'deleted_at', 'id', 'consoles']
-
- cp = self.test_console_pool_1
- db_cp = db.console_pool_get_by_host_type(
- self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
- )
- self._assertEqualObjects(cp, db_cp, ignored_keys)
-
- def test_console_pool_get_by_host_type_no_resuls(self):
- self.assertRaises(
- exception.ConsolePoolNotFoundForHostType,
- db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
- 'host', 'console_type')
-
- def test_console_pool_get_all_by_host_type(self):
- params = [
- self.test_console_pool_1,
- self.test_console_pool_2,
- self.test_console_pool_3,
- ]
- for p in params:
- db.console_pool_create(self.ctxt, p)
- ignored_keys = ['deleted', 'created_at', 'updated_at',
- 'deleted_at', 'id', 'consoles']
-
- cp = self.test_console_pool_2
- db_cp = db.console_pool_get_all_by_host_type(
- self.ctxt, cp['host'], cp['console_type'])
-
- self._assertEqualListsOfObjects(
- db_cp, [self.test_console_pool_2, self.test_console_pool_3],
- ignored_keys)
-
- def test_console_pool_get_all_by_host_type_no_results(self):
- res = db.console_pool_get_all_by_host_type(
- self.ctxt, 'cp_host', 'cp_console_type')
- self.assertEqual([], res)
-
-
-class DnsdomainTestCase(test.TestCase):
-
- def setUp(self):
- super(DnsdomainTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.domain = 'test.domain'
- self.testzone = 'testzone'
- self.project = 'fake'
-
- def test_dnsdomain_register_for_zone(self):
- db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
- domain = db.dnsdomain_get(self.ctxt, self.domain)
- self.assertEqual(domain['domain'], self.domain)
- self.assertEqual(domain['availability_zone'], self.testzone)
- self.assertEqual(domain['scope'], 'private')
-
- def test_dnsdomain_register_for_project(self):
- db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
- domain = db.dnsdomain_get(self.ctxt, self.domain)
- self.assertEqual(domain['domain'], self.domain)
- self.assertEqual(domain['project_id'], self.project)
- self.assertEqual(domain['scope'], 'public')
-
- def test_dnsdomain_list(self):
- d_list = ['test.domain.one', 'test.domain.two']
- db.dnsdomain_register_for_zone(self.ctxt, d_list[0], self.testzone)
- db.dnsdomain_register_for_project(self.ctxt, d_list[1], self.project)
- db_list = db.dnsdomain_list(self.ctxt)
- self.assertEqual(sorted(d_list), sorted(db_list))
-
- def test_dnsdomain_unregister(self):
- db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
- db.dnsdomain_unregister(self.ctxt, self.domain)
- domain = db.dnsdomain_get(self.ctxt, self.domain)
- self.assertIsNone(domain)
-
- def test_dnsdomain_get_all(self):
- d_list = ['test.domain.one', 'test.domain.two']
- db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
- db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
- db_list = db.dnsdomain_get_all(self.ctxt)
- db_domain_list = [d.domain for d in db_list]
- self.assertEqual(sorted(d_list), sorted(db_domain_list))
-
-
-class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
-
- _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
-
- def setUp(self):
- super(BwUsageTestCase, self).setUp()
- self.ctxt = context.get_admin_context()
- self.useFixture(test.TimeOverride())
-
- def test_bw_usage_get_by_uuids(self):
- now = timeutils.utcnow()
- start_period = now - datetime.timedelta(seconds=10)
- uuid3_refreshed = now - datetime.timedelta(seconds=5)
-
- expected_bw_usages = {
- 'fake_uuid1': {'uuid': 'fake_uuid1',
- 'mac': 'fake_mac1',
- 'start_period': start_period,
- 'bw_in': 100,
- 'bw_out': 200,
- 'last_ctr_in': 12345,
- 'last_ctr_out': 67890,
- 'last_refreshed': now},
- 'fake_uuid2': {'uuid': 'fake_uuid2',
- 'mac': 'fake_mac2',
- 'start_period': start_period,
- 'bw_in': 200,
- 'bw_out': 300,
- 'last_ctr_in': 22345,
- 'last_ctr_out': 77890,
- 'last_refreshed': now},
- 'fake_uuid3': {'uuid': 'fake_uuid3',
- 'mac': 'fake_mac3',
- 'start_period': start_period,
- 'bw_in': 400,
- 'bw_out': 500,
- 'last_ctr_in': 32345,
- 'last_ctr_out': 87890,
- 'last_refreshed': uuid3_refreshed}
- }
-
- bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
- ['fake_uuid1', 'fake_uuid2'], start_period)
- # No matches
- self.assertEqual(len(bw_usages), 0)
-
- # Add 3 entries
- db.bw_usage_update(self.ctxt, 'fake_uuid1',
- 'fake_mac1', start_period,
- 100, 200, 12345, 67890)
- db.bw_usage_update(self.ctxt, 'fake_uuid2',
- 'fake_mac2', start_period,
- 100, 200, 42, 42)
- # Test explicit refreshed time
- db.bw_usage_update(self.ctxt, 'fake_uuid3',
- 'fake_mac3', start_period,
- 400, 500, 32345, 87890,
- last_refreshed=uuid3_refreshed)
- # Update 2nd entry
- db.bw_usage_update(self.ctxt, 'fake_uuid2',
- 'fake_mac2', start_period,
- 200, 300, 22345, 77890)
-
- bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
- ['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
- self.assertEqual(len(bw_usages), 3)
- for usage in bw_usages:
- self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
- ignored_keys=self._ignored_keys)
-
- def test_bw_usage_get(self):
- now = timeutils.utcnow()
- start_period = now - datetime.timedelta(seconds=10)
-
- expected_bw_usage = {'uuid': 'fake_uuid1',
- 'mac': 'fake_mac1',
- 'start_period': start_period,
- 'bw_in': 100,
- 'bw_out': 200,
- 'last_ctr_in': 12345,
- 'last_ctr_out': 67890,
- 'last_refreshed': now}
-
- bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
- 'fake_mac1')
- self.assertIsNone(bw_usage)
-
- db.bw_usage_update(self.ctxt, 'fake_uuid1',
- 'fake_mac1', start_period,
- 100, 200, 12345, 67890)
-
- bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
- 'fake_mac1')
- self._assertEqualObjects(bw_usage, expected_bw_usage,
- ignored_keys=self._ignored_keys)
-
-
-class Ec2TestCase(test.TestCase):
-
- def setUp(self):
- super(Ec2TestCase, self).setUp()
- self.ctxt = context.RequestContext('fake_user', 'fake_project')
-
- def test_ec2_ids_not_found_are_printable(self):
- def check_exc_format(method, value):
- try:
- method(self.ctxt, value)
- except exception.NotFound as exc:
- self.assertIn(six.text_type(value), six.text_type(exc))
-
- check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
- check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
- check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
- check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
-
- def test_ec2_volume_create(self):
- vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
- self.assertIsNotNone(vol['id'])
- self.assertEqual(vol['uuid'], 'fake-uuid')
-
- def test_ec2_volume_get_by_id(self):
- vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
- vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
- self.assertEqual(vol2['uuid'], vol['uuid'])
-
- def test_ec2_volume_get_by_uuid(self):
- vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
- vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
- self.assertEqual(vol2['id'], vol['id'])
-
- def test_ec2_snapshot_create(self):
- snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
- self.assertIsNotNone(snap['id'])
- self.assertEqual(snap['uuid'], 'fake-uuid')
-
- def test_ec2_snapshot_get_by_ec2_id(self):
- snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
- snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
- self.assertEqual(snap2['uuid'], 'fake-uuid')
-
- def test_ec2_snapshot_get_by_uuid(self):
- snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
- snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
- self.assertEqual(snap['id'], snap2['id'])
-
- def test_ec2_snapshot_get_by_ec2_id_not_found(self):
- self.assertRaises(exception.SnapshotNotFound,
- db.ec2_snapshot_get_by_ec2_id,
- self.ctxt, 123456)
-
- def test_ec2_snapshot_get_by_uuid_not_found(self):
- self.assertRaises(exception.SnapshotNotFound,
- db.ec2_snapshot_get_by_uuid,
- self.ctxt, 'fake-uuid')
-
- def test_ec2_instance_create(self):
- inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
- self.assertIsNotNone(inst['id'])
- self.assertEqual(inst['uuid'], 'fake-uuid')
-
- def test_ec2_instance_get_by_uuid(self):
- inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
- inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
- self.assertEqual(inst['id'], inst2['id'])
-
- def test_ec2_instance_get_by_id(self):
- inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
- inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
- self.assertEqual(inst['id'], inst2['id'])
-
- def test_ec2_instance_get_by_uuid_not_found(self):
- self.assertRaises(exception.InstanceNotFound,
- db.ec2_instance_get_by_uuid,
- self.ctxt, 'uuid-not-present')
-
- def test_ec2_instance_get_by_id_not_found(self):
- self.assertRaises(exception.InstanceNotFound,
- db.ec2_instance_get_by_uuid,
- self.ctxt, 12345)
-
- def test_get_ec2_instance_id_by_uuid(self):
- inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
- inst_id = db.get_ec2_instance_id_by_uuid(self.ctxt, 'fake-uuid')
- self.assertEqual(inst['id'], inst_id)
-
- def test_get_instance_uuid_by_ec2_id(self):
- inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
- inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
- self.assertEqual(inst_uuid, 'fake-uuid')
-
- def test_get_ec2_instance_id_by_uuid_not_found(self):
- self.assertRaises(exception.InstanceNotFound,
- db.get_ec2_instance_id_by_uuid,
- self.ctxt, 'uuid-not-present')
-
- def test_get_instance_uuid_by_ec2_id_not_found(self):
- self.assertRaises(exception.InstanceNotFound,
- db.get_instance_uuid_by_ec2_id,
- self.ctxt, 100500)
-
-
-class ArchiveTestCase(test.TestCase):
-
- def setUp(self):
- super(ArchiveTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.engine = get_engine()
- self.conn = self.engine.connect()
- self.instance_id_mappings = sqlalchemyutils.get_table(
- self.engine, "instance_id_mappings")
- self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
- self.engine, "shadow_instance_id_mappings")
- self.dns_domains = sqlalchemyutils.get_table(
- self.engine, "dns_domains")
- self.shadow_dns_domains = sqlalchemyutils.get_table(
- self.engine, "shadow_dns_domains")
- self.consoles = sqlalchemyutils.get_table(self.engine, "consoles")
- self.console_pools = sqlalchemyutils.get_table(
- self.engine, "console_pools")
- self.shadow_consoles = sqlalchemyutils.get_table(
- self.engine, "shadow_consoles")
- self.shadow_console_pools = sqlalchemyutils.get_table(
- self.engine, "shadow_console_pools")
- self.instances = sqlalchemyutils.get_table(self.engine, "instances")
- self.shadow_instances = sqlalchemyutils.get_table(
- self.engine, "shadow_instances")
- self.uuidstrs = []
- for unused in range(6):
- self.uuidstrs.append(stdlib_uuid.uuid4().hex)
- self.ids = []
- self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
- self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
- "instances"])
- self.domain_tablenames_to_cleanup = set(["dns_domains"])
-
- def tearDown(self):
- super(ArchiveTestCase, self).tearDown()
- for tablename in self.id_tablenames_to_cleanup:
- for name in [tablename, "shadow_" + tablename]:
- table = sqlalchemyutils.get_table(self.engine, name)
- del_statement = table.delete(table.c.id.in_(self.ids))
- self.conn.execute(del_statement)
- for tablename in self.uuid_tablenames_to_cleanup:
- for name in [tablename, "shadow_" + tablename]:
- table = sqlalchemyutils.get_table(self.engine, name)
- del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
- self.conn.execute(del_statement)
- for tablename in self.domain_tablenames_to_cleanup:
- for name in [tablename, "shadow_" + tablename]:
- table = sqlalchemyutils.get_table(self.engine, name)
- del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
- self.conn.execute(del_statement)
-
- def test_shadow_tables(self):
- metadata = MetaData(bind=self.engine)
- metadata.reflect()
- for table_name in metadata.tables:
- # NOTE(rpodolyaka): migration 209 introduced a few new tables,
- # which don't have shadow tables and it's
- # completely OK, so we should skip them here
- if table_name.startswith("dump_"):
- continue
-
- if table_name.startswith("shadow_"):
- self.assertIn(table_name[7:], metadata.tables)
- continue
- self.assertTrue(db_utils.check_shadow_table(self.engine,
- table_name))
-
- def test_archive_deleted_rows(self):
- # Add 6 rows to table
- for uuidstr in self.uuidstrs:
- ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
- # Set 4 to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1)
- self.conn.execute(update_statement)
- qiim = sql.select([self.instance_id_mappings]).where(self.
- instance_id_mappings.c.uuid.in_(self.uuidstrs))
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
- qsiim = sql.select([self.shadow_instance_id_mappings]).\
- where(self.shadow_instance_id_mappings.c.uuid.in_(
- self.uuidstrs))
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
- # Archive 2 rows
- db.archive_deleted_rows(self.context, max_rows=2)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 4 left in main
- self.assertEqual(len(rows), 4)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 2 in shadow
- self.assertEqual(len(rows), 2)
- # Archive 2 more rows
- db.archive_deleted_rows(self.context, max_rows=2)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 4 in shadow
- self.assertEqual(len(rows), 4)
- # Try to archive more, but there are no deleted rows left.
- db.archive_deleted_rows(self.context, max_rows=2)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we still have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we still have 4 in shadow
- self.assertEqual(len(rows), 4)
-
- def test_archive_deleted_rows_for_every_uuid_table(self):
- tablenames = []
- for model_class in models.__dict__.itervalues():
- if hasattr(model_class, "__tablename__"):
- tablenames.append(model_class.__tablename__)
- tablenames.sort()
- for tablename in tablenames:
- ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
- if ret == 0:
- self.uuid_tablenames_to_cleanup.add(tablename)
-
- def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
- """:returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
- main_table = sqlalchemyutils.get_table(self.engine, tablename)
- if not hasattr(main_table.c, "uuid"):
- # Not a uuid table, so skip it.
- return 1
- shadow_table = sqlalchemyutils.get_table(
- self.engine, "shadow_" + tablename)
- # Add 6 rows to table
- for uuidstr in self.uuidstrs:
- ins_stmt = main_table.insert().values(uuid=uuidstr)
- try:
- self.conn.execute(ins_stmt)
- except db_exc.DBError:
- # This table has constraints that require a table-specific
- # insert, so skip it.
- return 2
- # Set 4 to deleted
- update_statement = main_table.update().\
- where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1)
- self.conn.execute(update_statement)
- qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
- self.uuidstrs))
- rows = self.conn.execute(qmt).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
- qst = sql.select([shadow_table]).\
- where(shadow_table.c.uuid.in_(self.uuidstrs))
- rows = self.conn.execute(qst).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
- # Archive 2 rows
- db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
- # Verify we have 4 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 4)
- # Verify we have 2 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 2)
- # Archive 2 more rows
- db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
- # Verify we have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
- # Try to archive more, but there are no deleted rows left.
- db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
- # Verify we still have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we still have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
- return 0
-
- def test_archive_deleted_rows_no_id_column(self):
- uuidstr0 = self.uuidstrs[0]
- ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
- self.conn.execute(ins_stmt)
- update_statement = self.dns_domains.update().\
- where(self.dns_domains.c.domain == uuidstr0).\
- values(deleted=True)
- self.conn.execute(update_statement)
- qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
- uuidstr0)
- rows = self.conn.execute(qdd).fetchall()
- self.assertEqual(len(rows), 1)
- qsdd = sql.select([self.shadow_dns_domains],
- self.shadow_dns_domains.c.domain == uuidstr0)
- rows = self.conn.execute(qsdd).fetchall()
- self.assertEqual(len(rows), 0)
- db.archive_deleted_rows(self.context, max_rows=1)
- rows = self.conn.execute(qdd).fetchall()
- self.assertEqual(len(rows), 0)
- rows = self.conn.execute(qsdd).fetchall()
- self.assertEqual(len(rows), 1)
-
- def test_archive_deleted_rows_fk_constraint(self):
- # consoles.pool_id depends on console_pools.id
- # SQLite doesn't enforce foreign key constraints without a pragma.
- dialect = self.engine.url.get_dialect()
- if dialect == sqlite.dialect:
- # We're seeing issues with foreign key support in SQLite 3.6.20
- # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
- # It works fine in SQLite 3.7.
- # So return early to skip this test if running SQLite < 3.7
- import sqlite3
- tup = sqlite3.sqlite_version_info
- if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
- self.skipTest(
- 'sqlite version too old for reliable SQLA foreign_keys')
- self.conn.execute("PRAGMA foreign_keys = ON")
- ins_stmt = self.console_pools.insert().values(deleted=1)
- result = self.conn.execute(ins_stmt)
- id1 = result.inserted_primary_key[0]
- self.ids.append(id1)
- ins_stmt = self.consoles.insert().values(deleted=1,
- pool_id=id1)
- result = self.conn.execute(ins_stmt)
- id2 = result.inserted_primary_key[0]
- self.ids.append(id2)
- # The first try to archive console_pools should fail, due to FK.
- num = db.archive_deleted_rows_for_table(self.context, "console_pools")
- self.assertEqual(num, 0)
- # Then archiving consoles should work.
- num = db.archive_deleted_rows_for_table(self.context, "consoles")
- self.assertEqual(num, 1)
- # Then archiving console_pools should work.
- num = db.archive_deleted_rows_for_table(self.context, "console_pools")
- self.assertEqual(num, 1)
-
- def test_archive_deleted_rows_2_tables(self):
- # Add 6 rows to each table
- for uuidstr in self.uuidstrs:
- ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
- ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt2)
- # Set 4 of each to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1)
- self.conn.execute(update_statement)
- update_statement2 = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1)
- self.conn.execute(update_statement2)
- # Verify we have 6 in each main table
- qiim = sql.select([self.instance_id_mappings]).where(
- self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 6)
- qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
- self.uuidstrs))
- rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(rows), 6)
- # Verify we have 0 in each shadow table
- qsiim = sql.select([self.shadow_instance_id_mappings]).\
- where(self.shadow_instance_id_mappings.c.uuid.in_(
- self.uuidstrs))
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 0)
- qsi = sql.select([self.shadow_instances]).\
- where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
- rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(rows), 0)
- # Archive 7 rows, which should be 4 in one table and 3 in the other.
- db.archive_deleted_rows(self.context, max_rows=7)
- # Verify we have 5 left in the two main tables combined
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 5)
- # Verify we have 7 in the two shadow tables combined.
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 7)
- # Archive the remaining deleted rows.
- db.archive_deleted_rows(self.context, max_rows=1)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
- # Try to archive more, but there are no deleted rows left.
- db.archive_deleted_rows(self.context, max_rows=500)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
-
-
-class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(InstanceGroupDBApiTestCase, self).setUp()
- self.user_id = 'fake_user'
- self.project_id = 'fake_project'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def _get_default_values(self):
- return {'name': 'fake_name',
- 'user_id': self.user_id,
- 'project_id': self.project_id}
-
- def _create_instance_group(self, context, values, policies=None,
- members=None):
- return db.instance_group_create(context, values, policies=policies,
- members=members)
-
- def test_instance_group_create_no_key(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
- self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
-
- def test_instance_group_create_with_key(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
-
- def test_instance_group_create_with_same_key(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- self._create_instance_group(self.context, values)
- self.assertRaises(exception.InstanceGroupIdExists,
- self._create_instance_group, self.context, values)
-
- def test_instance_group_get(self):
- values = self._get_default_values()
- result1 = self._create_instance_group(self.context, values)
- result2 = db.instance_group_get(self.context, result1['uuid'])
- self._assertEqualObjects(result1, result2)
-
- def test_instance_group_update_simple(self):
- values = self._get_default_values()
- result1 = self._create_instance_group(self.context, values)
- values = {'name': 'new_name', 'user_id': 'new_user',
- 'project_id': 'new_project'}
- db.instance_group_update(self.context, result1['uuid'],
- values)
- result2 = db.instance_group_get(self.context, result1['uuid'])
- self.assertEqual(result1['uuid'], result2['uuid'])
- ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result2, values, ignored_keys)
-
- def test_instance_group_delete(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- db.instance_group_delete(self.context, result['uuid'])
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_delete, self.context,
- result['uuid'])
-
- def test_instance_group_get_nonexistent(self):
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_get,
- self.context,
- 'nonexistent')
-
- def test_instance_group_delete_nonexistent(self):
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_delete,
- self.context,
- 'nonexistent')
-
- def test_instance_group_get_all(self):
- groups = db.instance_group_get_all(self.context)
- self.assertEqual(0, len(groups))
- value = self._get_default_values()
- result1 = self._create_instance_group(self.context, value)
- groups = db.instance_group_get_all(self.context)
- self.assertEqual(1, len(groups))
- value = self._get_default_values()
- result2 = self._create_instance_group(self.context, value)
- groups = db.instance_group_get_all(self.context)
- results = [result1, result2]
- self._assertEqualListsOfObjects(results, groups)
-
- def test_instance_group_get_all_by_project_id(self):
- groups = db.instance_group_get_all_by_project_id(self.context,
- 'invalid_project_id')
- self.assertEqual(0, len(groups))
- values = self._get_default_values()
- result1 = self._create_instance_group(self.context, values)
- groups = db.instance_group_get_all_by_project_id(self.context,
- 'fake_project')
- self.assertEqual(1, len(groups))
- values = self._get_default_values()
- values['project_id'] = 'new_project_id'
- result2 = self._create_instance_group(self.context, values)
- groups = db.instance_group_get_all(self.context)
- results = [result1, result2]
- self._assertEqualListsOfObjects(results, groups)
- projects = [{'name': 'fake_project', 'value': [result1]},
- {'name': 'new_project_id', 'value': [result2]}]
- for project in projects:
- groups = db.instance_group_get_all_by_project_id(self.context,
- project['name'])
- self._assertEqualListsOfObjects(project['value'], groups)
-
- def test_instance_group_update(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
- self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
- id = result['uuid']
- values = self._get_default_values()
- values['name'] = 'new_fake_name'
- db.instance_group_update(self.context, id, values)
- result = db.instance_group_get(self.context, id)
- self.assertEqual(result['name'], 'new_fake_name')
- # update update members
- values = self._get_default_values()
- members = ['instance_id1', 'instance_id2']
- values['members'] = members
- db.instance_group_update(self.context, id, values)
- result = db.instance_group_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
- # update update policies
- values = self._get_default_values()
- policies = ['policy1', 'policy2']
- values['policies'] = policies
- db.instance_group_update(self.context, id, values)
- result = db.instance_group_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
- # test invalid ID
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_update, self.context,
- 'invalid_id', values)
-
- def test_instance_group_get_by_instance(self):
- values = self._get_default_values()
- group1 = self._create_instance_group(self.context, values)
-
- members = ['instance_id1', 'instance_id2']
- db.instance_group_members_add(self.context, group1.uuid, members)
-
- group2 = db.instance_group_get_by_instance(self.context,
- 'instance_id1')
-
- self.assertEqual(group2.uuid, group1.uuid)
-
-
-class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
- def test_instance_group_members_on_create(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- members = ['instance_id1', 'instance_id2']
- result = self._create_instance_group(self.context, values,
- members=members)
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
- self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
-
- def test_instance_group_members_add(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- members = db.instance_group_members_get(self.context, id)
- self.assertEqual(members, [])
- members2 = ['instance_id1', 'instance_id2']
- db.instance_group_members_add(self.context, id, members2)
- members = db.instance_group_members_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(members, members2)
-
- def test_instance_group_members_update(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- members2 = ['instance_id1', 'instance_id2']
- db.instance_group_members_add(self.context, id, members2)
- members = db.instance_group_members_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(members, members2)
- # check add with existing keys
- members3 = ['instance_id1', 'instance_id2', 'instance_id3']
- db.instance_group_members_add(self.context, id, members3)
- members = db.instance_group_members_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(members, members3)
-
- def test_instance_group_members_delete(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- members3 = ['instance_id1', 'instance_id2', 'instance_id3']
- db.instance_group_members_add(self.context, id, members3)
- members = db.instance_group_members_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(members, members3)
- for instance_id in members3[:]:
- db.instance_group_member_delete(self.context, id, instance_id)
- members3.remove(instance_id)
- members = db.instance_group_members_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(members, members3)
-
- def test_instance_group_members_invalid_ids(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_members_get,
- self.context, 'invalid')
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_member_delete, self.context,
- 'invalidid', 'instance_id1')
- members = ['instance_id1', 'instance_id2']
- db.instance_group_members_add(self.context, id, members)
- self.assertRaises(exception.InstanceGroupMemberNotFound,
- db.instance_group_member_delete,
- self.context, id, 'invalid_id')
-
-
-class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
- def test_instance_group_policies_on_create(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- policies = ['policy1', 'policy2']
- result = self._create_instance_group(self.context, values,
- policies=policies)
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
- self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
-
- def test_instance_group_policies_add(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- policies = db.instance_group_policies_get(self.context, id)
- self.assertEqual(policies, [])
- policies2 = ['policy1', 'policy2']
- db.instance_group_policies_add(self.context, id, policies2)
- policies = db.instance_group_policies_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
-
- def test_instance_group_policies_update(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- policies2 = ['policy1', 'policy2']
- db.instance_group_policies_add(self.context, id, policies2)
- policies = db.instance_group_policies_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
- policies3 = ['policy1', 'policy2', 'policy3']
- db.instance_group_policies_add(self.context, id, policies3)
- policies = db.instance_group_policies_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
-
- def test_instance_group_policies_delete(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- policies3 = ['policy1', 'policy2', 'policy3']
- db.instance_group_policies_add(self.context, id, policies3)
- policies = db.instance_group_policies_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
- for policy in policies3[:]:
- db.instance_group_policy_delete(self.context, id, policy)
- policies3.remove(policy)
- policies = db.instance_group_policies_get(self.context, id)
- self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
-
- def test_instance_group_policies_invalid_ids(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_policies_get,
- self.context, 'invalid')
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_policy_delete, self.context,
- 'invalidid', 'policy1')
- policies = ['policy1', 'policy2']
- db.instance_group_policies_add(self.context, id, policies)
- self.assertRaises(exception.InstanceGroupPolicyNotFound,
- db.instance_group_policy_delete,
- self.context, id, 'invalid_policy')
-
-
-class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
- def setUp(self):
- super(PciDeviceDBApiTestCase, self).setUp()
- self.user_id = 'fake_user'
- self.project_id = 'fake_project'
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.admin_context = context.get_admin_context()
- self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
-
- def _get_fake_pci_devs(self):
- return {'id': 3353,
- 'compute_node_id': 1,
- 'address': '0000:0f:08.7',
- 'vendor_id': '8086',
- 'product_id': '1520',
- 'dev_type': 'type-VF',
- 'dev_id': 'pci_0000:0f:08.7',
- 'extra_info': None,
- 'label': 'label_8086_1520',
- 'status': 'available',
- 'instance_uuid': '00000000-0000-0000-0000-000000000010',
- 'request_id': None,
- }, {'id': 3356,
- 'compute_node_id': 1,
- 'address': '0000:0f:03.7',
- 'vendor_id': '8083',
- 'product_id': '1523',
- 'dev_type': 'type-VF',
- 'dev_id': 'pci_0000:0f:08.7',
- 'extra_info': None,
- 'label': 'label_8086_1520',
- 'status': 'available',
- 'instance_uuid': '00000000-0000-0000-0000-000000000010',
- 'request_id': None,
- }
-
- def _create_fake_pci_devs(self):
- v1, v2 = self._get_fake_pci_devs()
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- db.pci_device_update(self.admin_context, v2['compute_node_id'],
- v2['address'], v2)
- return (v1, v2)
-
- def test_pci_device_get_by_addr(self):
- v1, v2 = self._create_fake_pci_devs()
- result = db.pci_device_get_by_addr(self.admin_context, 1,
- '0000:0f:08.7')
- self._assertEqualObjects(v1, result, self.ignored_keys)
-
- def test_pci_device_get_by_addr_not_found(self):
- self._create_fake_pci_devs()
- self.assertRaises(exception.PciDeviceNotFound,
- db.pci_device_get_by_addr, self.admin_context,
- 1, '0000:0f:08:09')
-
- def test_pci_device_get_by_addr_low_priv(self):
- self._create_fake_pci_devs()
- self.assertRaises(exception.AdminRequired,
- db.pci_device_get_by_addr,
- self.context, 1, '0000:0f:08.7')
-
- def test_pci_device_get_by_id(self):
- v1, v2 = self._create_fake_pci_devs()
- result = db.pci_device_get_by_id(self.admin_context, 3353)
- self._assertEqualObjects(v1, result, self.ignored_keys)
-
- def test_pci_device_get_by_id_not_found(self):
- self._create_fake_pci_devs()
- self.assertRaises(exception.PciDeviceNotFoundById,
- db.pci_device_get_by_id,
- self.admin_context, 3354)
-
- def test_pci_device_get_by_id_low_priv(self):
- self._create_fake_pci_devs()
- self.assertRaises(exception.AdminRequired,
- db.pci_device_get_by_id,
- self.context, 3553)
-
- def test_pci_device_get_all_by_node(self):
- v1, v2 = self._create_fake_pci_devs()
- results = db.pci_device_get_all_by_node(self.admin_context, 1)
- self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
-
- def test_pci_device_get_all_by_node_empty(self):
- v1, v2 = self._get_fake_pci_devs()
- results = db.pci_device_get_all_by_node(self.admin_context, 9)
- self.assertEqual(len(results), 0)
-
- def test_pci_device_get_all_by_node_low_priv(self):
- self._create_fake_pci_devs()
- self.assertRaises(exception.AdminRequired,
- db.pci_device_get_all_by_node,
- self.context, 1)
-
- def test_pci_device_get_by_instance_uuid(self):
- v1, v2 = self._get_fake_pci_devs()
- v1['status'] = 'allocated'
- v2['status'] = 'allocated'
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- db.pci_device_update(self.admin_context, v2['compute_node_id'],
- v2['address'], v2)
- results = db.pci_device_get_all_by_instance_uuid(
- self.context,
- '00000000-0000-0000-0000-000000000010')
- self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
-
- def test_pci_device_get_by_instance_uuid_check_status(self):
- v1, v2 = self._get_fake_pci_devs()
- v1['status'] = 'allocated'
- v2['status'] = 'claimed'
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- db.pci_device_update(self.admin_context, v2['compute_node_id'],
- v2['address'], v2)
- results = db.pci_device_get_all_by_instance_uuid(
- self.context,
- '00000000-0000-0000-0000-000000000010')
- self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
-
- def test_pci_device_update(self):
- v1, v2 = self._get_fake_pci_devs()
- v1['status'] = 'allocated'
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- result = db.pci_device_get_by_addr(
- self.admin_context, 1, '0000:0f:08.7')
- self._assertEqualObjects(v1, result, self.ignored_keys)
-
- v1['status'] = 'claimed'
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- result = db.pci_device_get_by_addr(
- self.admin_context, 1, '0000:0f:08.7')
- self._assertEqualObjects(v1, result, self.ignored_keys)
-
- def test_pci_device_update_low_priv(self):
- v1, v2 = self._get_fake_pci_devs()
- self.assertRaises(exception.AdminRequired,
- db.pci_device_update, self.context,
- v1['compute_node_id'], v1['address'], v1)
-
- def test_pci_device_destroy(self):
- v1, v2 = self._create_fake_pci_devs()
- results = db.pci_device_get_all_by_node(self.admin_context, 1)
- self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
- db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
- v1['address'])
- results = db.pci_device_get_all_by_node(self.admin_context, 1)
- self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
-
- def test_pci_device_destroy_exception(self):
- v1, v2 = self._get_fake_pci_devs()
- db.pci_device_update(self.admin_context, v1['compute_node_id'],
- v1['address'], v1)
- results = db.pci_device_get_all_by_node(self.admin_context, 1)
- self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
- self.assertRaises(exception.PciDeviceNotFound,
- db.pci_device_destroy,
- self.admin_context,
- v2['compute_node_id'],
- v2['address'])
-
-
-class RetryOnDeadlockTestCase(test.TestCase):
- def test_without_deadlock(self):
- @sqlalchemy_api._retry_on_deadlock
- def call_api(*args, **kwargs):
- return True
- self.assertTrue(call_api())
-
- def test_raise_deadlock(self):
- self.attempts = 2
-
- @sqlalchemy_api._retry_on_deadlock
- def call_api(*args, **kwargs):
- while self.attempts:
- self.attempts = self.attempts - 1
- raise db_exc.DBDeadlock("fake exception")
- return True
- self.assertTrue(call_api())
-
-
-class TestSqlalchemyTypesRepr(test_base.DbTestCase):
- def setUp(self):
- super(TestSqlalchemyTypesRepr, self).setUp()
- meta = MetaData(bind=self.engine)
- self.table = Table(
- 'cidr_tbl',
- meta,
- Column('id', Integer, primary_key=True),
- Column('addr', col_types.CIDR())
- )
- self.table.create()
- self.addCleanup(meta.drop_all)
-
- def test_cidr_repr(self):
- addrs = [('192.168.3.0/24', '192.168.3.0/24'),
- ('2001:db8::/64', '2001:db8::/64'),
- ('192.168.3.0', '192.168.3.0/32'),
- ('2001:db8::', '2001:db8::/128'),
- (None, None)]
- with self.engine.begin() as conn:
- for i in addrs:
- conn.execute(self.table.insert(), {'addr': i[0]})
-
- query = self.table.select().order_by(self.table.c.id)
- result = conn.execute(query)
- for idx, row in enumerate(result):
- self.assertEqual(addrs[idx][1], row.addr)
-
-
-class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
- test_base.MySQLOpportunisticTestCase):
- pass
-
-
-class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
- test_base.PostgreSQLOpportunisticTestCase):
- pass
diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py
deleted file mode 100644
index b3035ca5bb..0000000000
--- a/nova/tests/db/test_migration_utils.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from oslo.db.sqlalchemy import utils as oslodbutils
-import sqlalchemy
-from sqlalchemy import Integer, String
-from sqlalchemy import MetaData, Table, Column
-from sqlalchemy.exc import NoSuchTableError
-from sqlalchemy import sql
-from sqlalchemy.types import UserDefinedType
-
-from nova.db.sqlalchemy import api as db
-from nova.db.sqlalchemy import utils
-from nova import exception
-from nova.tests.db import test_migrations
-
-
-SA_VERSION = tuple(map(int, sqlalchemy.__version__.split('.')))
-
-
-class CustomType(UserDefinedType):
- """Dummy column type for testing unsupported types."""
- def get_col_spec(self):
- return "CustomType"
-
-
-class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
- """Class for testing utils that are used in db migrations."""
-
- def test_delete_from_select(self):
- table_name = "__test_deletefromselect_table__"
- uuidstrs = []
- for unused in range(10):
- uuidstrs.append(uuid.uuid4().hex)
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- conn = engine.connect()
- test_table = Table(table_name, meta,
- Column('id', Integer, primary_key=True,
- nullable=False, autoincrement=True),
- Column('uuid', String(36), nullable=False))
- test_table.create()
- # Add 10 rows to table
- for uuidstr in uuidstrs:
- ins_stmt = test_table.insert().values(uuid=uuidstr)
- conn.execute(ins_stmt)
-
- # Delete 4 rows in one chunk
- column = test_table.c.id
- query_delete = sql.select([column],
- test_table.c.id < 5).order_by(column)
- delete_statement = utils.DeleteFromSelect(test_table,
- query_delete, column)
- result_delete = conn.execute(delete_statement)
- # Verify we delete 4 rows
- self.assertEqual(result_delete.rowcount, 4)
-
- query_all = sql.select([test_table]).\
- where(test_table.c.uuid.in_(uuidstrs))
- rows = conn.execute(query_all).fetchall()
- # Verify we still have 6 rows in table
- self.assertEqual(len(rows), 6)
-
- test_table.drop()
-
- def test_check_shadow_table(self):
- table_name = 'test_check_shadow_table'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('c', String(256)))
- table.create()
-
- # check missing shadow table
- self.assertRaises(NoSuchTableError,
- utils.check_shadow_table, engine, table_name)
-
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
- Column('id', Integer),
- Column('a', Integer))
- shadow_table.create()
-
- # check missing column
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
-
- # check when all is ok
- c = Column('c', String(256))
- shadow_table.create_column(c)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
-
- # check extra column
- d = Column('d', Integer)
- shadow_table.create_column(d)
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
-
- table.drop()
- shadow_table.drop()
-
- def test_check_shadow_table_different_types(self):
- table_name = 'test_check_shadow_table_different_types'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
-
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', String(256)))
- shadow_table.create()
- self.assertRaises(exception.NovaException,
- utils.check_shadow_table, engine, table_name)
-
- table.drop()
- shadow_table.drop()
-
- def test_check_shadow_table_with_unsupported_sqlite_type(self):
- if 'sqlite' not in self.engines:
- self.skipTest('sqlite is not configured')
- table_name = 'test_check_shadow_table_with_unsupported_sqlite_type'
- engine = self.engines['sqlite']
- meta = MetaData(bind=engine)
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('c', CustomType))
- table.create()
-
- shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('c', CustomType))
- shadow_table.create()
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- shadow_table.drop()
-
- def test_create_shadow_table_by_table_instance(self):
- table_name = 'test_create_shadow_table_by_table_instance'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('b', String(256)))
- table.create()
- shadow_table = utils.create_shadow_table(engine, table=table)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
-
- def test_create_shadow_table_by_name(self):
- table_name = 'test_create_shadow_table_by_name'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
-
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer),
- Column('b', String(256)))
- table.create()
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name)
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
-
- def test_create_shadow_table_not_supported_type(self):
- if 'sqlite' in self.engines:
- table_name = 'test_create_shadow_table_not_supported_type'
- engine = self.engines['sqlite']
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', CustomType))
- table.create()
-
- # reflection of custom types has been fixed upstream
- if SA_VERSION < (0, 9, 0):
- self.assertRaises(oslodbutils.ColumnError,
- utils.create_shadow_table,
- engine, table_name=table_name)
-
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name,
- a=Column('a', CustomType())
- )
- self.assertTrue(utils.check_shadow_table(engine, table_name))
- table.drop()
- shadow_table.drop()
-
- def test_create_shadow_both_table_and_table_name_are_none(self):
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- self.assertRaises(exception.NovaException,
- utils.create_shadow_table, engine)
-
- def test_create_shadow_both_table_and_table_name_are_specified(self):
- table_name = ('test_create_shadow_both_table_and_table_name_are_'
- 'specified')
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
- self.assertRaises(exception.NovaException,
- utils.create_shadow_table,
- engine, table=table, table_name=table_name)
- table.drop()
-
- def test_create_duplicate_shadow_table(self):
- table_name = 'test_create_duplicate_shadow_table'
- for key, engine in self.engines.items():
- meta = MetaData()
- meta.bind = engine
- table = Table(table_name, meta,
- Column('id', Integer, primary_key=True),
- Column('a', Integer))
- table.create()
- shadow_table = utils.create_shadow_table(engine,
- table_name=table_name)
- self.assertRaises(exception.ShadowTableExists,
- utils.create_shadow_table,
- engine, table_name=table_name)
- table.drop()
- shadow_table.drop()
diff --git a/nova/tests/fake_hosts.py b/nova/tests/fake_hosts.py
deleted file mode 100644
index bcf11bae68..0000000000
--- a/nova/tests/fake_hosts.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Provides some fake hosts to test host and service related functions
-"""
-
-from nova.tests.objects import test_service
-
-
-HOST_LIST = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
-
-OS_API_HOST_LIST = {"hosts": HOST_LIST}
-
-HOST_LIST_NOVA_ZONE = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
-
-service_base = test_service.fake_service
-
-SERVICES_LIST = [
- dict(service_base, host='host_c1', topic='compute'),
- dict(service_base, host='host_c2', topic='compute')]
diff --git a/nova/tests/fake_loadables/fake_loadable1.py b/nova/tests/fake_loadables/fake_loadable1.py
deleted file mode 100644
index cc02fe717b..0000000000
--- a/nova/tests/fake_loadables/fake_loadable1.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2012 OpenStack Foundation # All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Fake Loadable subclasses module #1
-"""
-
-from nova.tests import fake_loadables
-
-
-class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
- pass
-
-
-class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
- pass
-
-
-class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
- """Classes beginning with '_' will be ignored."""
- pass
-
-
-class FakeLoadableSubClass4(object):
- """Not a correct subclass."""
-
-
-def return_valid_classes():
- return [FakeLoadableSubClass1, FakeLoadableSubClass2]
-
-
-def return_invalid_classes():
- return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
- FakeLoadableSubClass4]
diff --git a/nova/tests/fake_loadables/fake_loadable2.py b/nova/tests/fake_loadables/fake_loadable2.py
deleted file mode 100644
index e72b582000..0000000000
--- a/nova/tests/fake_loadables/fake_loadable2.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2012 OpenStack Foundation # All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Fake Loadable subclasses module #2
-"""
-
-from nova.tests import fake_loadables
-
-
-class FakeLoadableSubClass5(fake_loadables.FakeLoadable):
- pass
-
-
-class FakeLoadableSubClass6(fake_loadables.FakeLoadable):
- pass
-
-
-class _FakeLoadableSubClass7(fake_loadables.FakeLoadable):
- """Classes beginning with '_' will be ignored."""
- pass
-
-
-class FakeLoadableSubClass8(BaseException):
- """Not a correct subclass."""
-
-
-def return_valid_class():
- return [FakeLoadableSubClass6]
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
deleted file mode 100644
index bfb678c6db..0000000000
--- a/nova/tests/fake_network.py
+++ /dev/null
@@ -1,457 +0,0 @@
-# Copyright 2011 Rackspace
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.compute import api as compute_api
-from nova.compute import manager as compute_manager
-import nova.context
-from nova import db
-from nova import exception
-from nova.network import api as network_api
-from nova.network import manager as network_manager
-from nova.network import model as network_model
-from nova.network import rpcapi as network_rpcapi
-from nova import objects
-from nova.objects import base as obj_base
-from nova.objects import virtual_interface as vif_obj
-from nova.pci import device as pci_device
-from nova.tests.objects import test_fixed_ip
-from nova.tests.objects import test_instance_info_cache
-from nova.tests.objects import test_pci_device
-
-
-HOST = "testhost"
-CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.netconf')
-
-
-class FakeModel(dict):
- """Represent a model from the db."""
- def __init__(self, *args, **kwargs):
- self.update(kwargs)
-
-
-class FakeNetworkManager(network_manager.NetworkManager):
- """This NetworkManager doesn't call the base class so we can bypass all
- inherited service cruft and just perform unit tests.
- """
-
- class FakeDB:
- vifs = [{'id': 0,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'instance_uuid': '00000000-0000-0000-0000-000000000010',
- 'network_id': 1,
- 'uuid': 'fake-uuid',
- 'address': 'DC:AD:BE:FF:EF:01'},
- {'id': 1,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'instance_uuid': '00000000-0000-0000-0000-000000000020',
- 'network_id': 21,
- 'uuid': 'fake-uuid2',
- 'address': 'DC:AD:BE:FF:EF:02'},
- {'id': 2,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'instance_uuid': '00000000-0000-0000-0000-000000000030',
- 'network_id': 31,
- 'uuid': 'fake-uuid3',
- 'address': 'DC:AD:BE:FF:EF:03'}]
-
- floating_ips = [dict(address='172.16.1.1',
- fixed_ip_id=100),
- dict(address='172.16.1.2',
- fixed_ip_id=200),
- dict(address='173.16.1.2',
- fixed_ip_id=210)]
-
- fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
- id=100,
- address='172.16.0.1',
- virtual_interface_id=0),
- dict(test_fixed_ip.fake_fixed_ip,
- id=200,
- address='172.16.0.2',
- virtual_interface_id=1),
- dict(test_fixed_ip.fake_fixed_ip,
- id=210,
- address='173.16.0.2',
- virtual_interface_id=2)]
-
- def fixed_ip_get_by_instance(self, context, instance_uuid):
- return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
- dict(address='10.0.0.2')]
-
- def network_get_by_cidr(self, context, cidr):
- raise exception.NetworkNotFoundForCidr(cidr=cidr)
-
- def network_create_safe(self, context, net):
- fakenet = dict(net)
- fakenet['id'] = 999
- return fakenet
-
- def network_get(self, context, network_id, project_only="allow_none"):
- return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
-
- def network_get_by_uuid(self, context, network_uuid):
- raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
-
- def network_get_all(self, context):
- raise exception.NoNetworksFound()
-
- def network_get_all_by_uuids(self, context, project_only="allow_none"):
- raise exception.NoNetworksFound()
-
- def network_disassociate(self, context, network_id):
- return True
-
- def virtual_interface_get_all(self, context):
- return self.vifs
-
- def fixed_ips_by_virtual_interface(self, context, vif_id):
- return [ip for ip in self.fixed_ips
- if ip['virtual_interface_id'] == vif_id]
-
- def fixed_ip_disassociate(self, context, address):
- return True
-
- def __init__(self, stubs=None):
- self.db = self.FakeDB()
- if stubs:
- stubs.Set(vif_obj, 'db', self.db)
- self.deallocate_called = None
- self.deallocate_fixed_ip_calls = []
- self.network_rpcapi = network_rpcapi.NetworkAPI()
-
- # TODO(matelakat) method signature should align with the faked one's
- def deallocate_fixed_ip(self, context, address=None, host=None,
- instance=None):
- self.deallocate_fixed_ip_calls.append((context, address, host))
- # TODO(matelakat) use the deallocate_fixed_ip_calls instead
- self.deallocate_called = address
-
- def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
- extra_reserved=None, bottom_reserved=0,
- top_reserved=0):
- pass
-
- def get_instance_nw_info(context, instance_id, rxtx_factor,
- host, instance_uuid=None, **kwargs):
- pass
-
-
-def fake_network(network_id, ipv6=None):
- if ipv6 is None:
- ipv6 = CONF.use_ipv6
- fake_network = {'id': network_id,
- 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
- 'label': 'test%d' % network_id,
- 'injected': False,
- 'multi_host': False,
- 'cidr': '192.168.%d.0/24' % network_id,
- 'cidr_v6': None,
- 'netmask': '255.255.255.0',
- 'netmask_v6': None,
- 'bridge': 'fake_br%d' % network_id,
- 'bridge_interface': 'fake_eth%d' % network_id,
- 'gateway': '192.168.%d.1' % network_id,
- 'gateway_v6': None,
- 'broadcast': '192.168.%d.255' % network_id,
- 'dns1': '192.168.%d.3' % network_id,
- 'dns2': '192.168.%d.4' % network_id,
- 'dns3': '192.168.%d.3' % network_id,
- 'vlan': None,
- 'host': None,
- 'project_id': 'fake_project',
- 'vpn_public_address': '192.168.%d.2' % network_id,
- 'vpn_public_port': None,
- 'vpn_private_address': None,
- 'dhcp_start': None,
- 'rxtx_base': network_id * 10,
- 'priority': None,
- 'deleted': False,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'mtu': None,
- 'dhcp_server': '192.168.%d.1' % network_id,
- 'enable_dhcp': True,
- 'share_address': False}
- if ipv6:
- fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
- fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
- fake_network['netmask_v6'] = '64'
- if CONF.flat_injected:
- fake_network['injected'] = True
-
- return fake_network
-
-
-def fake_vif(x):
- return{'id': x,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'address': 'DE:AD:BE:EF:00:%02x' % x,
- 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
- 'network_id': x,
- 'instance_uuid': 'fake-uuid'}
-
-
-def floating_ip_ids():
- for i in xrange(1, 100):
- yield i
-
-
-def fixed_ip_ids():
- for i in xrange(1, 100):
- yield i
-
-
-floating_ip_id = floating_ip_ids()
-fixed_ip_id = fixed_ip_ids()
-
-
-def next_fixed_ip(network_id, num_floating_ips=0):
- next_id = fixed_ip_id.next()
- f_ips = [FakeModel(**next_floating_ip(next_id))
- for i in xrange(num_floating_ips)]
- return {'id': next_id,
- 'network_id': network_id,
- 'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
- 'instance_uuid': 1,
- 'allocated': False,
- 'reserved': False,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'leased': True,
- 'host': HOST,
- 'deleted': 0,
- 'network': fake_network(network_id),
- 'virtual_interface': fake_vif(network_id),
- # and since network_id and vif_id happen to be equivalent
- 'virtual_interface_id': network_id,
- 'floating_ips': f_ips}
-
-
-def next_floating_ip(fixed_ip_id):
- next_id = floating_ip_id.next()
- return {'id': next_id,
- 'address': '10.10.10.%03d' % (next_id + 99),
- 'fixed_ip_id': fixed_ip_id,
- 'project_id': None,
- 'auto_assigned': False}
-
-
-def ipv4_like(ip, match_string):
- ip = ip.split('.')
- match_octets = match_string.split('.')
-
- for i, octet in enumerate(match_octets):
- if octet == '*':
- continue
- if octet != ip[i]:
- return False
- return True
-
-
-def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
- floating_ips_per_fixed_ip=0):
- # stubs is the self.stubs from the test
- # ips_per_vif is the number of ips each vif will have
- # num_floating_ips is number of float ips for each fixed ip
- network = network_manager.FlatManager(host=HOST)
- network.db = db
-
- # reset the fixed and floating ip generators
- global floating_ip_id, fixed_ip_id, fixed_ips
- floating_ip_id = floating_ip_ids()
- fixed_ip_id = fixed_ip_ids()
- fixed_ips = []
-
- def fixed_ips_fake(*args, **kwargs):
- global fixed_ips
- ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
- for i in xrange(1, num_networks + 1)
- for j in xrange(ips_per_vif)]
- fixed_ips = ips
- return ips
-
- def update_cache_fake(*args, **kwargs):
- pass
-
- stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
- stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
-
- class FakeContext(nova.context.RequestContext):
- def is_admin(self):
- return True
-
- nw_model = network.get_instance_nw_info(
- FakeContext('fakeuser', 'fake_project'),
- 0, 3, None)
- return nw_model
-
-
-def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
- num_networks=1,
- ips_per_vif=1,
- floating_ips_per_fixed_ip=0):
-
- def get_instance_nw_info(self, context, instance, conductor_api=None):
- return fake_get_instance_nw_info(stubs, num_networks=num_networks,
- ips_per_vif=ips_per_vif,
- floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
-
- if func is None:
- func = get_instance_nw_info
- stubs.Set(network_api.API, 'get_instance_nw_info', func)
-
-
-def stub_out_network_cleanup(stubs):
- stubs.Set(network_api.API, 'deallocate_for_instance',
- lambda *args, **kwargs: None)
-
-
-_real_functions = {}
-
-
-def set_stub_network_methods(stubs):
- global _real_functions
- cm = compute_manager.ComputeManager
- if not _real_functions:
- _real_functions = {
- '_get_instance_nw_info': cm._get_instance_nw_info,
- '_allocate_network': cm._allocate_network,
- '_deallocate_network': cm._deallocate_network}
-
- def fake_networkinfo(*args, **kwargs):
- return network_model.NetworkInfo()
-
- def fake_async_networkinfo(*args, **kwargs):
- return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
-
- stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo)
- stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
- stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
-
-
-def unset_stub_network_methods(stubs):
- global _real_functions
- if _real_functions:
- cm = compute_manager.ComputeManager
- for name in _real_functions:
- stubs.Set(cm, name, _real_functions[name])
-
-
-def stub_compute_with_ips(stubs):
- orig_get = compute_api.API.get
- orig_get_all = compute_api.API.get_all
- orig_create = compute_api.API.create
-
- def fake_get(*args, **kwargs):
- return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
-
- def fake_get_all(*args, **kwargs):
- return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
-
- def fake_create(*args, **kwargs):
- return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
-
- def fake_pci_device_get_by_addr(context, node_id, dev_addr):
- return test_pci_device.fake_db_dev
-
- stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
- stubs.Set(compute_api.API, 'get', fake_get)
- stubs.Set(compute_api.API, 'get_all', fake_get_all)
- stubs.Set(compute_api.API, 'create', fake_create)
-
-
-def _get_fake_cache():
- def _ip(ip, fixed=True, floats=None):
- ip_dict = {'address': ip, 'type': 'fixed'}
- if not fixed:
- ip_dict['type'] = 'floating'
- if fixed and floats:
- ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
- return ip_dict
-
- info = [{'address': 'aa:bb:cc:dd:ee:ff',
- 'id': 1,
- 'network': {'bridge': 'br0',
- 'id': 1,
- 'label': 'private',
- 'subnets': [{'cidr': '192.168.0.0/24',
- 'ips': [_ip('192.168.0.3')]}]}}]
- if CONF.use_ipv6:
- ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
- info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
- 'ips': [_ip(ipv6_addr)]})
- return jsonutils.dumps(info)
-
-
-def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
- """Kludge the cache into instance(s) without having to create DB
- entries
- """
- instances = orig_func(*args, **kwargs)
- context = args[0]
- fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
-
- def _info_cache_for(instance):
- info_cache = dict(test_instance_info_cache.fake_info_cache,
- network_info=_get_fake_cache(),
- instance_uuid=instance['uuid'])
- if isinstance(instance, obj_base.NovaObject):
- _info_cache = objects.InstanceInfoCache(context)
- objects.InstanceInfoCache._from_db_object(context, _info_cache,
- info_cache)
- info_cache = _info_cache
- instance['info_cache'] = info_cache
-
- if isinstance(instances, (list, obj_base.ObjectListBase)):
- for instance in instances:
- _info_cache_for(instance)
- pci_device.claim(fake_device, instance)
- pci_device.allocate(fake_device, instance)
- else:
- _info_cache_for(instances)
- pci_device.claim(fake_device, instances)
- pci_device.allocate(fake_device, instances)
- return instances
-
-
-def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
- """Kludge the above kludge so that the database doesn't get out
- of sync with the actual instance.
- """
- instances, reservation_id = orig_func(*args, **kwargs)
- fake_cache = _get_fake_cache()
- for instance in instances:
- instance['info_cache']['network_info'] = fake_cache
- db.instance_info_cache_update(args[1], instance['uuid'],
- {'network_info': fake_cache})
- return (instances, reservation_id)
diff --git a/nova/tests/image/test_fake.py b/nova/tests/image/test_fake.py
deleted file mode 100644
index 9088d58026..0000000000
--- a/nova/tests/image/test_fake.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import StringIO
-
-from nova import context
-from nova import exception
-from nova import test
-import nova.tests.image.fake
-
-
-class FakeImageServiceTestCase(test.NoDBTestCase):
- def setUp(self):
- super(FakeImageServiceTestCase, self).setUp()
- self.image_service = nova.tests.image.fake.FakeImageService()
- self.context = context.get_admin_context()
-
- def tearDown(self):
- super(FakeImageServiceTestCase, self).tearDown()
- nova.tests.image.fake.FakeImageService_reset()
-
- def test_detail(self):
- res = self.image_service.detail(self.context)
- for image in res:
- keys = set(image.keys())
- self.assertEqual(keys, set(['id', 'name', 'created_at',
- 'updated_at', 'deleted_at', 'deleted',
- 'status', 'is_public', 'properties',
- 'disk_format', 'container_format',
- 'size']))
- self.assertIsInstance(image['created_at'], datetime.datetime)
- self.assertIsInstance(image['updated_at'], datetime.datetime)
-
- if not (isinstance(image['deleted_at'], datetime.datetime) or
- image['deleted_at'] is None):
- self.fail('image\'s "deleted_at" attribute was neither a '
- 'datetime object nor None')
-
- def check_is_bool(image, key):
- val = image.get('deleted')
- if not isinstance(val, bool):
- self.fail('image\'s "%s" attribute wasn\'t '
- 'a bool: %r' % (key, val))
-
- check_is_bool(image, 'deleted')
- check_is_bool(image, 'is_public')
-
- def test_show_raises_imagenotfound_for_invalid_id(self):
- self.assertRaises(exception.ImageNotFound,
- self.image_service.show,
- self.context,
- 'this image does not exist')
-
- def test_create_adds_id(self):
- index = self.image_service.detail(self.context)
- image_count = len(index)
-
- self.image_service.create(self.context, {})
-
- index = self.image_service.detail(self.context)
- self.assertEqual(len(index), image_count + 1)
-
- self.assertTrue(index[0]['id'])
-
- def test_create_keeps_id(self):
- self.image_service.create(self.context, {'id': '34'})
- self.image_service.show(self.context, '34')
-
- def test_create_rejects_duplicate_ids(self):
- self.image_service.create(self.context, {'id': '34'})
- self.assertRaises(exception.CouldNotUploadImage,
- self.image_service.create,
- self.context,
- {'id': '34'})
-
- # Make sure there's still one left
- self.image_service.show(self.context, '34')
-
- def test_update(self):
- self.image_service.create(self.context,
- {'id': '34', 'foo': 'bar'})
-
- self.image_service.update(self.context, '34',
- {'id': '34', 'foo': 'baz'})
-
- img = self.image_service.show(self.context, '34')
- self.assertEqual(img['foo'], 'baz')
-
- def test_delete(self):
- self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
- self.image_service.delete(self.context, '34')
- self.assertRaises(exception.NotFound,
- self.image_service.show,
- self.context,
- '34')
-
- def test_create_then_get(self):
- blob = 'some data'
- s1 = StringIO.StringIO(blob)
- self.image_service.create(self.context,
- {'id': '32', 'foo': 'bar'},
- data=s1)
- s2 = StringIO.StringIO()
- self.image_service.download(self.context, '32', data=s2)
- self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
deleted file mode 100644
index 7472e0fa79..0000000000
--- a/nova/tests/image/test_s3.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Copyright 2011 Isaku Yamahata
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import binascii
-import os
-import tempfile
-
-import eventlet
-import fixtures
-import mox
-
-from nova.api.ec2 import ec2utils
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import s3
-from nova import test
-from nova.tests.image import fake
-
-
-ami_manifest_xml = """<?xml version="1.0" ?>
-<manifest>
- <version>2011-06-17</version>
- <bundler>
- <name>test-s3</name>
- <version>0</version>
- <release>0</release>
- </bundler>
- <machine_configuration>
- <architecture>x86_64</architecture>
- <block_device_mapping>
- <mapping>
- <virtual>ami</virtual>
- <device>sda1</device>
- </mapping>
- <mapping>
- <virtual>root</virtual>
- <device>/dev/sda1</device>
- </mapping>
- <mapping>
- <virtual>ephemeral0</virtual>
- <device>sda2</device>
- </mapping>
- <mapping>
- <virtual>swap</virtual>
- <device>sda3</device>
- </mapping>
- </block_device_mapping>
- <kernel_id>aki-00000001</kernel_id>
- <ramdisk_id>ari-00000001</ramdisk_id>
- </machine_configuration>
-</manifest>
-"""
-
-file_manifest_xml = """<?xml version="1.0" ?>
-<manifest>
- 
-</manifest>
-"""
-
-
-class TestS3ImageService(test.TestCase):
- def setUp(self):
- super(TestS3ImageService, self).setUp()
- self.context = context.RequestContext(None, None)
- self.useFixture(fixtures.FakeLogger('boto'))
-
- # set up 3 fixtures to test shows, should have id '1', '2', and '3'
- db.s3_image_create(self.context,
- '155d900f-4e14-4e4c-a73d-069cbf4541e6')
- db.s3_image_create(self.context,
- 'a2459075-d96c-40d5-893e-577ff92e721c')
- db.s3_image_create(self.context,
- '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
-
- fake.stub_out_image_service(self.stubs)
- self.image_service = s3.S3ImageService()
- ec2utils.reset_cache()
-
- def tearDown(self):
- super(TestS3ImageService, self).tearDown()
- fake.FakeImageService_reset()
-
- def _assertEqualList(self, list0, list1, keys):
- self.assertEqual(len(list0), len(list1))
- key = keys[0]
- for x in list0:
- self.assertEqual(len(x), len(keys))
- self.assertIn(key, x)
- for y in list1:
- self.assertIn(key, y)
- if x[key] == y[key]:
- for k in keys:
- self.assertEqual(x[k], y[k])
-
- def test_show_cannot_use_uuid(self):
- self.assertRaises(exception.ImageNotFound,
- self.image_service.show, self.context,
- '155d900f-4e14-4e4c-a73d-069cbf4541e6')
-
- def test_show_translates_correctly(self):
- self.image_service.show(self.context, '1')
-
- def test_show_translates_image_state_correctly(self):
- def my_fake_show(self, context, image_id, **kwargs):
- fake_state_map = {
- '155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
- 'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
- '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
- return {'id': image_id,
- 'name': 'fakeimage123456',
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'raw',
- 'disk_format': 'raw',
- 'size': '25165824',
- 'properties': {'image_state': fake_state_map[image_id]}}
-
- # Override part of the fake image service as well just for
- # this test so we can set the image_state to various values
- # and test that S3ImageService does the correct mapping for
- # us. We can't put fake bad or pending states in the real fake
- # image service as it causes other tests to fail
- self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
- ret_image = self.image_service.show(self.context, '1')
- self.assertEqual(ret_image['properties']['image_state'], 'pending')
- ret_image = self.image_service.show(self.context, '2')
- self.assertEqual(ret_image['properties']['image_state'], 'failed')
- ret_image = self.image_service.show(self.context, '3')
- self.assertEqual(ret_image['properties']['image_state'], 'available')
-
- def test_detail(self):
- self.image_service.detail(self.context)
-
- def test_s3_create(self):
- metadata = {'properties': {
- 'root_device_name': '/dev/sda1',
- 'block_device_mapping': [
- {'device_name': '/dev/sda1',
- 'snapshot_id': 'snap-12345678',
- 'delete_on_termination': True},
- {'device_name': '/dev/sda2',
- 'virtual_name': 'ephemeral0'},
- {'device_name': '/dev/sdb0',
- 'no_device': True}]}}
- _manifest, image, image_uuid = self.image_service._s3_parse_manifest(
- self.context, metadata, ami_manifest_xml)
-
- ret_image = self.image_service.show(self.context, image['id'])
- self.assertIn('properties', ret_image)
- properties = ret_image['properties']
-
- self.assertIn('mappings', properties)
- mappings = properties['mappings']
- expected_mappings = [
- {"device": "sda1", "virtual": "ami"},
- {"device": "/dev/sda1", "virtual": "root"},
- {"device": "sda2", "virtual": "ephemeral0"},
- {"device": "sda3", "virtual": "swap"}]
- self._assertEqualList(mappings, expected_mappings,
- ['device', 'virtual'])
-
- self.assertIn('block_device_mapping', properties)
- block_device_mapping = properties['block_device_mapping']
- expected_bdm = [
- {'device_name': '/dev/sda1',
- 'snapshot_id': 'snap-12345678',
- 'delete_on_termination': True},
- {'device_name': '/dev/sda2',
- 'virtual_name': 'ephemeral0'},
- {'device_name': '/dev/sdb0',
- 'no_device': True}]
- self.assertEqual(block_device_mapping, expected_bdm)
-
- def _initialize_mocks(self):
- handle, tempf = tempfile.mkstemp(dir='/tmp')
- ignore = mox.IgnoreArg()
- mockobj = self.mox.CreateMockAnything()
- self.stubs.Set(self.image_service, '_conn', mockobj)
- mockobj(ignore).AndReturn(mockobj)
- self.stubs.Set(mockobj, 'get_bucket', mockobj)
- mockobj(ignore).AndReturn(mockobj)
- self.stubs.Set(mockobj, 'get_key', mockobj)
- mockobj(ignore).AndReturn(mockobj)
- self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
- mockobj().AndReturn(file_manifest_xml)
- self.stubs.Set(self.image_service, '_download_file', mockobj)
- mockobj(ignore, ignore, ignore).AndReturn(tempf)
- self.stubs.Set(binascii, 'a2b_hex', mockobj)
- mockobj(ignore).AndReturn('foo')
- mockobj(ignore).AndReturn('foo')
- self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
- mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
- self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
- mockobj(ignore, ignore).AndReturn(tempf)
- self.mox.ReplayAll()
-
- def test_s3_create_image_locations(self):
- image_location_1 = 'testbucket_1/test.img.manifest.xml'
- # Use another location that starts with a '/'
- image_location_2 = '/testbucket_2/test.img.manifest.xml'
-
- metadata = [{'properties': {'image_location': image_location_1}},
- {'properties': {'image_location': image_location_2}}]
-
- for mdata in metadata:
- self._initialize_mocks()
- image = self.image_service._s3_create(self.context, mdata)
- eventlet.sleep()
- translated = self.image_service._translate_id_to_uuid(self.context,
- image)
- uuid = translated['id']
- image_service = fake.FakeImageService()
- updated_image = image_service.update(self.context, uuid,
- {'properties': {'image_state': 'available'}},
- purge_props=False)
- self.assertEqual(updated_image['properties']['image_state'],
- 'available')
-
- def test_s3_create_is_public(self):
- self._initialize_mocks()
- metadata = {'properties': {
- 'image_location': 'mybucket/my.img.manifest.xml'},
- 'name': 'mybucket/my.img'}
- img = self.image_service._s3_create(self.context, metadata)
- eventlet.sleep()
- translated = self.image_service._translate_id_to_uuid(self.context,
- img)
- uuid = translated['id']
- image_service = fake.FakeImageService()
- updated_image = image_service.update(self.context, uuid,
- {'is_public': True}, purge_props=False)
- self.assertTrue(updated_image['is_public'])
- self.assertEqual(updated_image['status'], 'active')
- self.assertEqual(updated_image['properties']['image_state'],
- 'available')
-
- def test_s3_malicious_tarballs(self):
- self.assertRaises(exception.NovaException,
- self.image_service._test_for_malicious_tarball,
- "/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
- self.assertRaises(exception.NovaException,
- self.image_service._test_for_malicious_tarball,
- "/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
deleted file mode 100644
index 44e2ba2dc4..0000000000
--- a/nova/tests/integrated/api/client.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright (c) 2011 Justin Santa Barbara
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import urllib
-
-from oslo.serialization import jsonutils
-import requests
-
-from nova.i18n import _
-from nova.openstack.common import log as logging
-from nova.tests.image import fake
-
-
-LOG = logging.getLogger(__name__)
-
-
-class OpenStackApiException(Exception):
- def __init__(self, message=None, response=None):
- self.response = response
- if not message:
- message = 'Unspecified error'
-
- if response:
- _status = response.status_code
- _body = response.content
-
- message = (_('%(message)s\nStatus Code: %(_status)s\n'
- 'Body: %(_body)s') %
- {'message': message, '_status': _status,
- '_body': _body})
-
- super(OpenStackApiException, self).__init__(message)
-
-
-class OpenStackApiAuthenticationException(OpenStackApiException):
- def __init__(self, response=None, message=None):
- if not message:
- message = _("Authentication error")
- super(OpenStackApiAuthenticationException, self).__init__(message,
- response)
-
-
-class OpenStackApiAuthorizationException(OpenStackApiException):
- def __init__(self, response=None, message=None):
- if not message:
- message = _("Authorization error")
- super(OpenStackApiAuthorizationException, self).__init__(message,
- response)
-
-
-class OpenStackApiNotFoundException(OpenStackApiException):
- def __init__(self, response=None, message=None):
- if not message:
- message = _("Item not found")
- super(OpenStackApiNotFoundException, self).__init__(message, response)
-
-
-class TestOpenStackClient(object):
- """Simple OpenStack API Client.
-
- This is a really basic OpenStack API client that is under our control,
- so we can make changes / insert hooks for testing
-
- """
-
- def __init__(self, auth_user, auth_key, auth_uri):
- super(TestOpenStackClient, self).__init__()
- self.auth_result = None
- self.auth_user = auth_user
- self.auth_key = auth_key
- self.auth_uri = auth_uri
- # default project_id
- self.project_id = 'openstack'
-
- def request(self, url, method='GET', body=None, headers=None):
- _headers = {'Content-Type': 'application/json'}
- _headers.update(headers or {})
-
- response = requests.request(method, url, data=body, headers=_headers)
- return response
-
- def _authenticate(self):
- if self.auth_result:
- return self.auth_result
-
- auth_uri = self.auth_uri
- headers = {'X-Auth-User': self.auth_user,
- 'X-Auth-Key': self.auth_key,
- 'X-Auth-Project-Id': self.project_id}
- response = self.request(auth_uri,
- headers=headers)
-
- http_status = response.status_code
- LOG.debug("%(auth_uri)s => code %(http_status)s",
- {'auth_uri': auth_uri, 'http_status': http_status})
-
- if http_status == 401:
- raise OpenStackApiAuthenticationException(response=response)
-
- self.auth_result = response.headers
- return self.auth_result
-
- def api_request(self, relative_uri, check_response_status=None,
- strip_version=False, **kwargs):
- auth_result = self._authenticate()
-
- # NOTE(justinsb): httplib 'helpfully' converts headers to lower case
- base_uri = auth_result['x-server-management-url']
- if strip_version:
- # NOTE(vish): cut out version number and tenant_id
- base_uri = '/'.join(base_uri.split('/', 3)[:-1])
-
- full_uri = '%s/%s' % (base_uri, relative_uri)
-
- headers = kwargs.setdefault('headers', {})
- headers['X-Auth-Token'] = auth_result['x-auth-token']
-
- response = self.request(full_uri, **kwargs)
-
- http_status = response.status_code
- LOG.debug("%(relative_uri)s => code %(http_status)s",
- {'relative_uri': relative_uri, 'http_status': http_status})
-
- if check_response_status:
- if http_status not in check_response_status:
- if http_status == 404:
- raise OpenStackApiNotFoundException(response=response)
- elif http_status == 401:
- raise OpenStackApiAuthorizationException(response=response)
- else:
- raise OpenStackApiException(
- message=_("Unexpected status code"),
- response=response)
-
- return response
-
- def _decode_json(self, response):
- body = response.content
- LOG.debug("Decoding JSON: %s", body)
- if body:
- return jsonutils.loads(body)
- else:
- return ""
-
- def api_get(self, relative_uri, **kwargs):
- kwargs.setdefault('check_response_status', [200])
- response = self.api_request(relative_uri, **kwargs)
- return self._decode_json(response)
-
- def api_post(self, relative_uri, body, **kwargs):
- kwargs['method'] = 'POST'
- if body:
- headers = kwargs.setdefault('headers', {})
- headers['Content-Type'] = 'application/json'
- kwargs['body'] = jsonutils.dumps(body)
-
- kwargs.setdefault('check_response_status', [200, 202])
- response = self.api_request(relative_uri, **kwargs)
- return self._decode_json(response)
-
- def api_put(self, relative_uri, body, **kwargs):
- kwargs['method'] = 'PUT'
- if body:
- headers = kwargs.setdefault('headers', {})
- headers['Content-Type'] = 'application/json'
- kwargs['body'] = jsonutils.dumps(body)
-
- kwargs.setdefault('check_response_status', [200, 202, 204])
- response = self.api_request(relative_uri, **kwargs)
- return self._decode_json(response)
-
- def api_delete(self, relative_uri, **kwargs):
- kwargs['method'] = 'DELETE'
- kwargs.setdefault('check_response_status', [200, 202, 204])
- return self.api_request(relative_uri, **kwargs)
-
- def get_server(self, server_id):
- return self.api_get('/servers/%s' % server_id)['server']
-
- def get_servers(self, detail=True, search_opts=None):
- rel_url = '/servers/detail' if detail else '/servers'
-
- if search_opts is not None:
- qparams = {}
- for opt, val in search_opts.iteritems():
- qparams[opt] = val
- if qparams:
- query_string = "?%s" % urllib.urlencode(qparams)
- rel_url += query_string
- return self.api_get(rel_url)['servers']
-
- def post_server(self, server):
- response = self.api_post('/servers', server)
- if 'reservation_id' in response:
- return response
- else:
- return response['server']
-
- def put_server(self, server_id, server):
- return self.api_put('/servers/%s' % server_id, server)
-
- def post_server_action(self, server_id, data):
- return self.api_post('/servers/%s/action' % server_id, data)
-
- def delete_server(self, server_id):
- return self.api_delete('/servers/%s' % server_id)
-
- def get_image(self, image_id):
- return self.api_get('/images/%s' % image_id)['image']
-
- def get_images(self, detail=True):
- rel_url = '/images/detail' if detail else '/images'
- return self.api_get(rel_url)['images']
-
- def post_image(self, image):
- return self.api_post('/images', image)['image']
-
- def delete_image(self, image_id):
- return self.api_delete('/images/%s' % image_id)
-
- def get_flavor(self, flavor_id):
- return self.api_get('/flavors/%s' % flavor_id)['flavor']
-
- def get_flavors(self, detail=True):
- rel_url = '/flavors/detail' if detail else '/flavors'
- return self.api_get(rel_url)['flavors']
-
- def post_flavor(self, flavor):
- return self.api_post('/flavors', flavor)['flavor']
-
- def delete_flavor(self, flavor_id):
- return self.api_delete('/flavors/%s' % flavor_id)
-
- def get_volume(self, volume_id):
- return self.api_get('/volumes/%s' % volume_id)['volume']
-
- def get_volumes(self, detail=True):
- rel_url = '/volumes/detail' if detail else '/volumes'
- return self.api_get(rel_url)['volumes']
-
- def post_volume(self, volume):
- return self.api_post('/volumes', volume)['volume']
-
- def delete_volume(self, volume_id):
- return self.api_delete('/volumes/%s' % volume_id)
-
- def get_server_volume(self, server_id, attachment_id):
- return self.api_get('/servers/%s/os-volume_attachments/%s' %
- (server_id, attachment_id))['volumeAttachment']
-
- def get_server_volumes(self, server_id):
- return self.api_get('/servers/%s/os-volume_attachments' %
- (server_id))['volumeAttachments']
-
- def post_server_volume(self, server_id, volume_attachment):
- return self.api_post('/servers/%s/os-volume_attachments' %
- (server_id), volume_attachment
- )['volumeAttachment']
-
- def delete_server_volume(self, server_id, attachment_id):
- return self.api_delete('/servers/%s/os-volume_attachments/%s' %
- (server_id, attachment_id))
-
-
-class TestOpenStackClientV3(TestOpenStackClient):
- """Simple OpenStack v3 API Client.
-
- This is a really basic OpenStack API client that is under our control,
- so we can make changes / insert hooks for testing.
-
- Note that the V3 API does not have an image API and so it is
- not possible to query the api for the image information.
- So instead we just access the fake image service used by the unittests
- directly.
-
- """
-
- def get_image(self, image_id):
- return fake._fakeImageService.show(None, image_id)
-
- def get_images(self, detail=True):
- return fake._fakeImageService.detail(None)
-
- def post_image(self, image):
- raise NotImplementedError
-
- def delete_image(self, image_id):
- return fake._fakeImageService.delete(None, image_id)
-
-
-class TestOpenStackClientV3Mixin(object):
- def _get_test_client(self):
- return TestOpenStackClientV3('fake', 'fake', self.auth_url)
diff --git a/nova/tests/integrated/api_samples/README.rst b/nova/tests/integrated/api_samples/README.rst
deleted file mode 100644
index 6395f48876..0000000000
--- a/nova/tests/integrated/api_samples/README.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-Api Samples
-===========
-
-This part of the tree contains templates for API samples. The
-documentation in doc/api_samples is completely autogenerated from the
-tests in this directory.
-
-To add a new api sample, add tests for the common passing and failing
-cases in this directory for your extension, and modify test_samples.py
-for your tests. There should be both JSON and XML tests included.
-
-Then run the following command:
-
- GENERATE_SAMPLES=True tox -epy27 nova.tests.integrated
-
-Which will create the files on doc/api_samples.
-
-If new tests are added or the .tpl files are changed due to bug fixes, the
-samples must be regenerated so they are in sync with the templates, as
-there is an additional test which reloads the documentation and
-ensures that it's in sync.
-
-Debugging sample generation
----------------------------
-
-If a .tpl is changed, its matching .xml and .json must be removed
-else the samples won't be generated. If an entirely new extension is
-added, a directory for it must be created before its samples will
-be generated.
diff --git a/nova/tests/integrated/api_samples_test_base.py b/nova/tests/integrated/api_samples_test_base.py
deleted file mode 100644
index ea1884f91d..0000000000
--- a/nova/tests/integrated/api_samples_test_base.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import re
-
-from lxml import etree
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-import six
-
-from nova.i18n import _
-from nova import test
-from nova.tests.integrated import integrated_helpers
-
-
-class NoMatch(test.TestingException):
- pass
-
-
-class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
- ctype = 'json'
- all_extensions = False
- extension_name = None
-
- def _pretty_data(self, data):
- if self.ctype == 'json':
- data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
- indent=4)
-
- else:
- if data is None:
- # Likely from missing XML file.
- return ""
- xml = etree.XML(data)
- data = etree.tostring(xml, encoding="UTF-8",
- xml_declaration=True, pretty_print=True)
- return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
-
- def _objectify(self, data):
- if not data:
- return {}
- if self.ctype == 'json':
- # NOTE(vish): allow non-quoted replacements to survive json
- data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
- return jsonutils.loads(data)
- else:
- def to_dict(node):
- ret = {}
- if node.items():
- ret.update(dict(node.items()))
- if node.text:
- ret['__content__'] = node.text
- if node.tag:
- ret['__tag__'] = node.tag
- if node.nsmap:
- ret['__nsmap__'] = node.nsmap
- for element in node:
- ret.setdefault(node.tag, [])
- ret[node.tag].append(to_dict(element))
- return ret
- return to_dict(etree.fromstring(data))
-
- @classmethod
- def _get_sample_path(cls, name, dirname, suffix=''):
- parts = [dirname]
- parts.append('api_samples')
- if cls.all_extensions:
- parts.append('all_extensions')
- if cls.extension_name:
- alias = importutils.import_class(cls.extension_name).alias
- parts.append(alias)
- parts.append(name + "." + cls.ctype + suffix)
- return os.path.join(*parts)
-
- @classmethod
- def _get_sample(cls, name):
- dirname = os.path.dirname(os.path.abspath(__file__))
- dirname = os.path.normpath(os.path.join(dirname, "../../../doc"))
- return cls._get_sample_path(name, dirname)
-
- @classmethod
- def _get_template(cls, name):
- dirname = os.path.dirname(os.path.abspath(__file__))
- return cls._get_sample_path(name, dirname, suffix='.tpl')
-
- def _read_template(self, name):
- template = self._get_template(name)
- with open(template) as inf:
- return inf.read().strip()
-
- def _write_template(self, name, data):
- with open(self._get_template(name), 'w') as outf:
- outf.write(data)
-
- def _write_sample(self, name, data):
- with open(self._get_sample(name), 'w') as outf:
- outf.write(data)
-
- def _compare_result(self, subs, expected, result, result_str):
- matched_value = None
- if isinstance(expected, dict):
- if not isinstance(result, dict):
- raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
- % {'result_str': result_str, 'result': result})
- ex_keys = sorted(expected.keys())
- res_keys = sorted(result.keys())
- if ex_keys != res_keys:
- ex_delta = []
- res_delta = []
- for key in ex_keys:
- if key not in res_keys:
- ex_delta.append(key)
- for key in res_keys:
- if key not in ex_keys:
- res_delta.append(key)
- raise NoMatch(
- _('Dictionary key mismatch:\n'
- 'Extra key(s) in template:\n%(ex_delta)s\n'
- 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
- {'ex_delta': ex_delta, 'result_str': result_str,
- 'res_delta': res_delta})
- for key in ex_keys:
- res = self._compare_result(subs, expected[key], result[key],
- result_str)
- matched_value = res or matched_value
- elif isinstance(expected, list):
- if not isinstance(result, list):
- raise NoMatch(
- _('%(result_str)s: %(result)s is not a list.') %
- {'result_str': result_str, 'result': result})
-
- expected = expected[:]
- extra = []
- for res_obj in result:
- for i, ex_obj in enumerate(expected):
- try:
- matched_value = self._compare_result(subs, ex_obj,
- res_obj,
- result_str)
- del expected[i]
- break
- except NoMatch:
- pass
- else:
- extra.append(res_obj)
-
- error = []
- if expected:
- error.append(_('Extra list items in template:'))
- error.extend([repr(o) for o in expected])
-
- if extra:
- error.append(_('Extra list items in %(result_str)s:') %
- {'result_str': result_str})
- error.extend([repr(o) for o in extra])
-
- if error:
- raise NoMatch('\n'.join(error))
- elif isinstance(expected, six.string_types) and '%' in expected:
- # NOTE(vish): escape stuff for regex
- for char in '[]<>?':
- expected = expected.replace(char, '\\%s' % char)
- # NOTE(vish): special handling of subs that are not quoted. We are
- # expecting an int but we had to pass in a string
- # so the json would parse properly.
- if expected.startswith("%(int:"):
- result = str(result)
- expected = expected.replace('int:', '')
- expected = expected % subs
- expected = '^%s$' % expected
- match = re.match(expected, result)
- if not match:
- raise NoMatch(
- _('Values do not match:\n'
- 'Template: %(expected)s\n%(result_str)s: %(result)s') %
- {'expected': expected, 'result_str': result_str,
- 'result': result})
- try:
- matched_value = match.group('id')
- except IndexError:
- if match.groups():
- matched_value = match.groups()[0]
- else:
- if isinstance(expected, six.string_types):
- # NOTE(danms): Ignore whitespace in this comparison
- expected = expected.strip()
- if isinstance(result, six.string_types):
- result = result.strip()
- if expected != result:
- raise NoMatch(
- _('Values do not match:\n'
- 'Template: %(expected)s\n%(result_str)s: '
- '%(result)s') % {'expected': expected,
- 'result_str': result_str,
- 'result': result})
- return matched_value
-
- def generalize_subs(self, subs, vanilla_regexes):
- """Give the test a chance to modify subs after the server response
- was verified, and before the on-disk doc/api_samples file is checked.
- This may be needed by some tests to convert exact matches expected
- from the server into pattern matches to verify what is in the
- sample file.
-
- If there are no changes to be made, subs is returned unharmed.
- """
- return subs
-
- def _verify_response(self, name, subs, response, exp_code):
- self.assertEqual(response.status_code, exp_code)
- response_data = response.content
- response_data = self._pretty_data(response_data)
- if not os.path.exists(self._get_template(name)):
- self._write_template(name, response_data)
- template_data = response_data
- else:
- template_data = self._read_template(name)
-
- if (self.generate_samples and
- not os.path.exists(self._get_sample(name))):
- self._write_sample(name, response_data)
- sample_data = response_data
- else:
- with file(self._get_sample(name)) as sample:
- sample_data = sample.read()
-
- try:
- template_data = self._objectify(template_data)
- response_data = self._objectify(response_data)
- response_result = self._compare_result(subs, template_data,
- response_data, "Response")
- # NOTE(danms): replace some of the subs with patterns for the
- # doc/api_samples check, which won't have things like the
- # correct compute host name. Also let the test do some of its
- # own generalization, if necessary
- vanilla_regexes = self._get_regexes()
- subs['compute_host'] = vanilla_regexes['host_name']
- subs['id'] = vanilla_regexes['id']
- subs = self.generalize_subs(subs, vanilla_regexes)
- sample_data = self._objectify(sample_data)
- self._compare_result(subs, template_data, sample_data, "Sample")
- return response_result
- except NoMatch:
- raise
-
- def _get_host(self):
- return 'http://openstack.example.com'
-
- def _get_glance_host(self):
- return 'http://glance.openstack.example.com'
-
- def _get_regexes(self):
- if self.ctype == 'json':
- text = r'(\\"|[^"])*'
- else:
- text = r'[^<]*'
- isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
- strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
- xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
- '\d{2}:\d{2}:\d{2}'
- '(\.\d{6})?(\+00:00)?')
- return {
- 'isotime': isotime_re,
- 'strtime': strtime_re,
- 'strtime_or_none': r'None|%s' % strtime_re,
- 'xmltime': xmltime_re,
- 'password': '[0-9a-zA-Z]{1,12}',
- 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
- 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
- 'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
- '-[0-9a-f]{4}-[0-9a-f]{12})',
- 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
- '-[0-9a-f]{4}-[0-9a-f]{12}',
- 'reservation_id': 'r-[0-9a-zA-Z]{8}',
- 'private_key': '-----BEGIN RSA PRIVATE KEY-----'
- '[a-zA-Z0-9\n/+=]*'
- '-----END RSA PRIVATE KEY-----',
- 'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
- 'Generated-by-Nova',
- 'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
- 'host': self._get_host(),
- 'host_name': '[0-9a-z]{32}',
- 'glance_host': self._get_glance_host(),
- 'compute_host': self.compute.host,
- 'text': text,
- 'int': '[0-9]+',
- }
-
- def _get_response(self, url, method, body=None, strip_version=False):
- headers = {}
- headers['Content-Type'] = 'application/' + self.ctype
- headers['Accept'] = 'application/' + self.ctype
- return self.api.api_request(url, body=body, method=method,
- headers=headers, strip_version=strip_version)
-
- def _do_get(self, url, strip_version=False):
- return self._get_response(url, 'GET', strip_version=strip_version)
-
- def _do_post(self, url, name, subs, method='POST'):
- body = self._read_template(name) % subs
- sample = self._get_sample(name)
- if self.generate_samples and not os.path.exists(sample):
- self._write_sample(name, body)
- return self._get_response(url, method, body)
-
- def _do_put(self, url, name, subs):
- return self._do_post(url, name, subs, method='PUT')
-
- def _do_delete(self, url):
- return self._get_response(url, 'DELETE')
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
deleted file mode 100644
index 8460664583..0000000000
--- a/nova/tests/integrated/integrated_helpers.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Provides common functionality for integrated unit tests
-"""
-
-import random
-import string
-import uuid
-
-from oslo.config import cfg
-
-import nova.image.glance
-from nova.openstack.common import log as logging
-from nova import service
-from nova import test
-from nova.tests import cast_as_call
-from nova.tests import fake_crypto
-import nova.tests.image.fake
-from nova.tests.integrated.api import client
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-CONF.import_opt('manager', 'nova.cells.opts', group='cells')
-
-
-def generate_random_alphanumeric(length):
- """Creates a random alphanumeric string of specified length."""
- return ''.join(random.choice(string.ascii_uppercase + string.digits)
- for _x in range(length))
-
-
-def generate_random_numeric(length):
- """Creates a random numeric string of specified length."""
- return ''.join(random.choice(string.digits)
- for _x in range(length))
-
-
-def generate_new_element(items, prefix, numeric=False):
- """Creates a random string with prefix, that is not in 'items' list."""
- while True:
- if numeric:
- candidate = prefix + generate_random_numeric(8)
- else:
- candidate = prefix + generate_random_alphanumeric(8)
- if candidate not in items:
- return candidate
- LOG.debug("Random collision on %s" % candidate)
-
-
-class _IntegratedTestBase(test.TestCase):
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(_IntegratedTestBase, self).setUp()
-
- f = self._get_flags()
- self.flags(**f)
- self.flags(verbose=True)
-
- self.useFixture(test.ReplaceModule('crypto', fake_crypto))
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.flags(scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
- self._setup_services()
- self._start_api_service()
-
- self.api = self._get_test_client()
-
- self.useFixture(cast_as_call.CastAsCall(self.stubs))
-
- def _setup_services(self):
- self.conductor = self.start_service('conductor',
- manager=CONF.conductor.manager)
- self.compute = self.start_service('compute')
- self.cert = self.start_service('cert')
- self.consoleauth = self.start_service('consoleauth')
- self.network = self.start_service('network')
- self.scheduler = self.start_service('scheduler')
- self.cells = self.start_service('cells', manager=CONF.cells.manager)
-
- def tearDown(self):
- self.osapi.stop()
- nova.tests.image.fake.FakeImageService_reset()
- super(_IntegratedTestBase, self).tearDown()
-
- def _get_test_client(self):
- return client.TestOpenStackClient('fake', 'fake', self.auth_url)
-
- def _start_api_service(self):
- self.osapi = service.WSGIService("osapi_compute")
- self.osapi.start()
- self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
- 'host': self.osapi.host, 'port': self.osapi.port,
- 'api_version': self._api_version})
-
- def _get_flags(self):
- """An opportunity to setup flags, before the services are started."""
- f = {}
-
- # Ensure tests only listen on localhost
- f['ec2_listen'] = '127.0.0.1'
- f['osapi_compute_listen'] = '127.0.0.1'
- f['metadata_listen'] = '127.0.0.1'
-
- # Auto-assign ports to allow concurrent tests
- f['ec2_listen_port'] = 0
- f['osapi_compute_listen_port'] = 0
- f['metadata_listen_port'] = 0
-
- f['fake_network'] = True
- return f
-
- def get_unused_server_name(self):
- servers = self.api.get_servers()
- server_names = [server['name'] for server in servers]
- return generate_new_element(server_names, 'server')
-
- def get_invalid_image(self):
- return str(uuid.uuid4())
-
- def _build_minimal_create_server_request(self):
- server = {}
-
- image = self.api.get_images()[0]
- LOG.debug("Image: %s" % image)
-
- if self._image_ref_parameter in image:
- image_href = image[self._image_ref_parameter]
- else:
- image_href = image['id']
- image_href = 'http://fake.server/%s' % image_href
-
- # We now have a valid imageId
- server[self._image_ref_parameter] = image_href
-
- # Set a valid flavorId
- flavor = self.api.get_flavors()[0]
- LOG.debug("Using flavor: %s" % flavor)
- server[self._flavor_ref_parameter] = ('http://fake.server/%s'
- % flavor['id'])
-
- # Set a valid server name
- server_name = self.get_unused_server_name()
- server['name'] = server_name
- return server
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
deleted file mode 100644
index f195287f91..0000000000
--- a/nova/tests/integrated/test_api_samples.py
+++ /dev/null
@@ -1,4433 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import copy
-import datetime
-import inspect
-import os
-import re
-import urllib
-import uuid as uuid_lib
-
-from lxml import etree
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-from oslo.utils import timeutils
-
-from nova.api.metadata import password
-from nova.api.openstack.compute.contrib import fping
-from nova.api.openstack.compute import extensions
-# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.cells import rpcapi as cells_rpcapi
-from nova.cells import state
-from nova.cloudpipe import pipelib
-from nova.compute import api as compute_api
-from nova.compute import cells_api as cells_api
-from nova.compute import manager as compute_manager
-from nova.compute import rpcapi as compute_rpcapi
-from nova.conductor import manager as conductor_manager
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova.network import api as network_api
-from nova import objects
-from nova.openstack.common import log as logging
-import nova.quota
-from nova.servicegroup import api as service_group_api
-from nova import test
-from nova.tests.api.openstack.compute.contrib import test_fping
-from nova.tests.api.openstack.compute.contrib import test_networks
-from nova.tests.api.openstack.compute.contrib import test_services
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
-from nova.tests import fake_server_actions
-from nova.tests import fake_utils
-from nova.tests.image import fake
-from nova.tests.integrated import api_samples_test_base
-from nova.tests.integrated import integrated_helpers
-from nova.tests.objects import test_network
-from nova.tests import utils as test_utils
-from nova import utils
-from nova.volume import cinder
-
-CONF = cfg.CONF
-CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
-CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
-CONF.import_opt('enable_network_quota',
- 'nova.api.openstack.compute.contrib.os_tenant_networks')
-CONF.import_opt('osapi_compute_extension',
- 'nova.api.openstack.compute.extensions')
-CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
-CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
-CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
-CONF.import_opt('enable', 'nova.cells.opts', group='cells')
-CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
-CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
-LOG = logging.getLogger(__name__)
-
-
-class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
- _api_version = 'v2'
-
- def setUp(self):
- extends = []
- self.flags(use_ipv6=False,
- osapi_compute_link_prefix=self._get_host(),
- osapi_glance_link_prefix=self._get_glance_host())
- if not self.all_extensions:
- if hasattr(self, 'extends_name'):
- extends = [self.extends_name]
- ext = [self.extension_name] if self.extension_name else []
- self.flags(osapi_compute_extension=ext + extends)
- super(ApiSampleTestBaseV2, self).setUp()
- self.useFixture(test.SampleNetworks(host=self.network.host))
- fake_network.stub_compute_with_ips(self.stubs)
- fake_utils.stub_out_utils_spawn_n(self.stubs)
- self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
-
-
-class ApiSamplesTrap(ApiSampleTestBaseV2):
- """Make sure extensions don't get added without tests."""
-
- all_extensions = True
-
- def _get_extensions_tested(self):
- tests = []
- for attr in globals().values():
- if not inspect.isclass(attr):
- continue # Skip non-class objects
- if not issubclass(attr, integrated_helpers._IntegratedTestBase):
- continue # Skip non-test classes
- if attr.extension_name is None:
- continue # Skip base tests
- cls = importutils.import_class(attr.extension_name)
- tests.append(cls.alias)
- return tests
-
- def _get_extensions(self):
- extensions = []
- response = self._do_get('extensions')
- for extension in jsonutils.loads(response.content)['extensions']:
- extensions.append(str(extension['alias']))
- return extensions
-
- def test_all_extensions_have_samples(self):
- # NOTE(danms): This is a list of extensions which are currently
- # in the tree but that don't (yet) have tests. This list should
- # NOT be allowed to grow, and should shrink to zero (and be
- # removed) soon.
- do_not_approve_additions = []
- do_not_approve_additions.append('os-create-server-ext')
- do_not_approve_additions.append('os-baremetal-ext-status')
- do_not_approve_additions.append('os-baremetal-nodes')
-
- tests = self._get_extensions_tested()
- extensions = self._get_extensions()
- missing_tests = []
- for extension in extensions:
- # NOTE(danms): if you add tests, remove it from the
- # exclusions list
- self.assertFalse(extension in do_not_approve_additions and
- extension in tests)
-
- # NOTE(danms): if you add an extension, it must come with
- # api_samples tests!
- if (extension not in tests and
- extension not in do_not_approve_additions):
- missing_tests.append(extension)
-
- if missing_tests:
- LOG.error("Extensions are missing tests: %s" % missing_tests)
- self.assertEqual(missing_tests, [])
-
-
-class VersionsSampleJsonTest(ApiSampleTestBaseV2):
- def test_versions_get(self):
- response = self._do_get('', strip_version=True)
- subs = self._get_regexes()
- self._verify_response('versions-get-resp', subs, response, 200)
-
-
-class VersionsSampleXmlTest(VersionsSampleJsonTest):
- ctype = 'xml'
-
-
-class ServersSampleBase(ApiSampleTestBaseV2):
- def _post_server(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- }
- response = self._do_post('servers', 'server-post-req', subs)
- subs = self._get_regexes()
- return self._verify_response('server-post-resp', subs, response, 202)
-
-
-class ServersSampleJsonTest(ServersSampleBase):
- def test_servers_post(self):
- return self._post_server()
-
- def test_servers_get(self):
- uuid = self.test_servers_post()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_servers_list(self):
- uuid = self._post_server()
- response = self._do_get('servers')
- subs = self._get_regexes()
- subs['id'] = uuid
- self._verify_response('servers-list-resp', subs, response, 200)
-
- def test_servers_details(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('servers-details-resp', subs, response, 200)
-
-
-class ServersSampleXmlTest(ServersSampleJsonTest):
- ctype = 'xml'
-
-
-class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
- all_extensions = True
-
-
-class ServersSampleAllExtensionXmlTest(ServersSampleXmlTest):
- all_extensions = True
-
-
-class ServersSampleHideAddressesJsonTest(ServersSampleJsonTest):
- extension_name = '.'.join(('nova.api.openstack.compute.contrib',
- 'hide_server_addresses',
- 'Hide_server_addresses'))
-
-
-class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
- ctype = 'xml'
-
-
-class ServersSampleMultiStatusJsonTest(ServersSampleBase):
- extension_name = '.'.join(('nova.api.openstack.compute.contrib',
- 'server_list_multi_status',
- 'Server_list_multi_status'))
-
- def test_servers_list(self):
- uuid = self._post_server()
- response = self._do_get('servers?status=active&status=error')
- subs = self._get_regexes()
- subs['id'] = uuid
- self._verify_response('servers-list-resp', subs, response, 200)
-
-
-class ServersSampleMultiStatusXMLTest(ServersSampleMultiStatusJsonTest):
- ctype = 'xml'
-
-
-class ServersMetadataJsonTest(ServersSampleBase):
- def _create_and_set(self, subs):
- uuid = self._post_server()
- response = self._do_put('servers/%s/metadata' % uuid,
- 'server-metadata-all-req',
- subs)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
- return uuid
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['value'] = '(Foo|Bar) Value'
- return subs
-
- def test_metadata_put_all(self):
- # Test setting all metadata for a server.
- subs = {'value': 'Foo Value'}
- self._create_and_set(subs)
-
- def test_metadata_post_all(self):
- # Test updating all metadata for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- subs['value'] = 'Bar Value'
- response = self._do_post('servers/%s/metadata' % uuid,
- 'server-metadata-all-req',
- subs)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
-
- def test_metadata_get_all(self):
- # Test getting all metadata for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_get('servers/%s/metadata' % uuid)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
-
- def test_metadata_put(self):
- # Test putting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- subs['value'] = 'Bar Value'
- response = self._do_put('servers/%s/metadata/foo' % uuid,
- 'server-metadata-req',
- subs)
- self._verify_response('server-metadata-resp', subs, response, 200)
-
- def test_metadata_get(self):
- # Test getting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_get('servers/%s/metadata/foo' % uuid)
- self._verify_response('server-metadata-resp', subs, response, 200)
-
- def test_metadata_delete(self):
- # Test deleting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_delete('servers/%s/metadata/foo' % uuid)
- self.assertEqual(response.status_code, 204)
- self.assertEqual(response.content, '')
-
-
-class ServersMetadataXmlTest(ServersMetadataJsonTest):
- ctype = 'xml'
-
-
-class ServersIpsJsonTest(ServersSampleBase):
- def test_get(self):
- # Test getting a server's IP information.
- uuid = self._post_server()
- response = self._do_get('servers/%s/ips' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-ips-resp', subs, response, 200)
-
- def test_get_by_network(self):
- # Test getting a server's IP information by network id.
- uuid = self._post_server()
- response = self._do_get('servers/%s/ips/private' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-ips-network-resp', subs, response, 200)
-
-
-class ServersIpsXmlTest(ServersIpsJsonTest):
- ctype = 'xml'
-
-
-class ExtensionsSampleJsonTest(ApiSampleTestBaseV2):
- all_extensions = True
-
- def test_extensions_get(self):
- response = self._do_get('extensions')
- subs = self._get_regexes()
- self._verify_response('extensions-get-resp', subs, response, 200)
-
-
-class ExtensionsSampleXmlTest(ExtensionsSampleJsonTest):
- ctype = 'xml'
-
-
-class FlavorsSampleJsonTest(ApiSampleTestBaseV2):
-
- def test_flavors_get(self):
- response = self._do_get('flavors/1')
- subs = self._get_regexes()
- self._verify_response('flavor-get-resp', subs, response, 200)
-
- def test_flavors_list(self):
- response = self._do_get('flavors')
- subs = self._get_regexes()
- self._verify_response('flavors-list-resp', subs, response, 200)
-
-
-class FlavorsSampleXmlTest(FlavorsSampleJsonTest):
- ctype = 'xml'
-
-
-class HostsSampleJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.hosts.Hosts"
-
- def test_host_startup(self):
- response = self._do_get('os-hosts/%s/startup' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-startup', subs, response, 200)
-
- def test_host_reboot(self):
- response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-reboot', subs, response, 200)
-
- def test_host_shutdown(self):
- response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-shutdown', subs, response, 200)
-
- def test_host_maintenance(self):
- response = self._do_put('os-hosts/%s' % self.compute.host,
- 'host-put-maintenance-req', {})
- subs = self._get_regexes()
- self._verify_response('host-put-maintenance-resp', subs, response, 200)
-
- def test_host_get(self):
- response = self._do_get('os-hosts/%s' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-resp', subs, response, 200)
-
- def test_hosts_list(self):
- response = self._do_get('os-hosts')
- subs = self._get_regexes()
- self._verify_response('hosts-list-resp', subs, response, 200)
-
-
-class HostsSampleXmlTest(HostsSampleJsonTest):
- ctype = 'xml'
-
-
-class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
- all_extensions = True
-
-
-class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
- all_extensions = True
-
-
-class ImagesSampleJsonTest(ApiSampleTestBaseV2):
- def test_images_list(self):
- # Get api sample of images get list request.
- response = self._do_get('images')
- subs = self._get_regexes()
- self._verify_response('images-list-get-resp', subs, response, 200)
-
- def test_image_get(self):
- # Get api sample of one single image details request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_images_details(self):
- # Get api sample of all images details request.
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('images-details-get-resp', subs, response, 200)
-
- def test_image_metadata_get(self):
- # Get api sample of an image metadata request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s/metadata' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-metadata-get-resp', subs, response, 200)
-
- def test_image_metadata_post(self):
- # Get api sample to update metadata of an image metadata request.
- image_id = fake.get_valid_image_id()
- response = self._do_post(
- 'images/%s/metadata' % image_id,
- 'image-metadata-post-req', {})
- subs = self._get_regexes()
- self._verify_response('image-metadata-post-resp', subs, response, 200)
-
- def test_image_metadata_put(self):
- # Get api sample of image metadata put request.
- image_id = fake.get_valid_image_id()
- response = self._do_put('images/%s/metadata' % image_id,
- 'image-metadata-put-req', {})
- subs = self._get_regexes()
- self._verify_response('image-metadata-put-resp', subs, response, 200)
-
- def test_image_meta_key_get(self):
- # Get api sample of an image metadata key request.
- image_id = fake.get_valid_image_id()
- key = "kernel_id"
- response = self._do_get('images/%s/metadata/%s' % (image_id, key))
- subs = self._get_regexes()
- self._verify_response('image-meta-key-get', subs, response, 200)
-
- def test_image_meta_key_put(self):
- # Get api sample of image metadata key put request.
- image_id = fake.get_valid_image_id()
- key = "auto_disk_config"
- response = self._do_put('images/%s/metadata/%s' % (image_id, key),
- 'image-meta-key-put-req', {})
- subs = self._get_regexes()
- self._verify_response('image-meta-key-put-resp', subs, response, 200)
-
-
-class ImagesSampleXmlTest(ImagesSampleJsonTest):
- ctype = 'xml'
-
-
-class LimitsSampleJsonTest(ApiSampleTestBaseV2):
- def test_limits_get(self):
- response = self._do_get('limits')
- subs = self._get_regexes()
- self._verify_response('limit-get-resp', subs, response, 200)
-
-
-class LimitsSampleXmlTest(LimitsSampleJsonTest):
- ctype = 'xml'
-
-
-class ServersActionsJsonTest(ServersSampleBase):
- def _test_server_action(self, uuid, action,
- subs=None, resp_tpl=None, code=202):
- subs = subs or {}
- subs.update({'action': action})
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-%s' % action.lower(),
- subs)
- if resp_tpl:
- subs.update(self._get_regexes())
- self._verify_response(resp_tpl, subs, response, code)
- else:
- self.assertEqual(response.status_code, code)
- self.assertEqual(response.content, "")
-
- def test_server_password(self):
- uuid = self._post_server()
- self._test_server_action(uuid, "changePassword",
- {"password": "foo"})
-
- def test_server_reboot_hard(self):
- uuid = self._post_server()
- self._test_server_action(uuid, "reboot",
- {"type": "HARD"})
-
- def test_server_reboot_soft(self):
- uuid = self._post_server()
- self._test_server_action(uuid, "reboot",
- {"type": "SOFT"})
-
- def test_server_rebuild(self):
- uuid = self._post_server()
- image = self.api.get_images()[0]['id']
- subs = {'host': self._get_host(),
- 'uuid': image,
- 'name': 'foobar',
- 'pass': 'seekr3t',
- 'ip': '1.2.3.4',
- 'ip6': 'fe80::100',
- 'hostid': '[a-f0-9]+',
- }
- self._test_server_action(uuid, 'rebuild', subs,
- 'server-action-rebuild-resp')
-
- def test_server_resize(self):
- self.flags(allow_resize_to_same_host=True)
- uuid = self._post_server()
- self._test_server_action(uuid, "resize",
- {"id": 2,
- "host": self._get_host()})
- return uuid
-
- def test_server_revert_resize(self):
- uuid = self.test_server_resize()
- self._test_server_action(uuid, "revertResize")
-
- def test_server_confirm_resize(self):
- uuid = self.test_server_resize()
- self._test_server_action(uuid, "confirmResize", code=204)
-
- def test_server_create_image(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'createImage',
- {'name': 'foo-image',
- 'meta_var': 'myvar',
- 'meta_val': 'foobar'})
-
-
-class ServersActionsXmlTest(ServersActionsJsonTest):
- ctype = 'xml'
-
-
-class ServersActionsAllJsonTest(ServersActionsJsonTest):
- all_extensions = True
-
-
-class ServersActionsAllXmlTest(ServersActionsXmlTest):
- all_extensions = True
-
-
-class ServerStartStopJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib" + \
- ".server_start_stop.Server_start_stop"
-
- def _test_server_action(self, uuid, action):
- response = self._do_post('servers/%s/action' % uuid,
- 'server_start_stop',
- {'action': action})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_server_start(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-stop')
- self._test_server_action(uuid, 'os-start')
-
- def test_server_stop(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-stop')
-
-
-class ServerStartStopXmlTest(ServerStartStopJsonTest):
- ctype = 'xml'
-
-
-class UserDataJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.user_data.User_data"
-
- def test_user_data_post(self):
- user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
- user_data = base64.b64encode(user_data_contents)
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'user_data': user_data
- }
- response = self._do_post('servers', 'userdata-post-req', subs)
-
- subs.update(self._get_regexes())
- self._verify_response('userdata-post-resp', subs, response, 202)
-
-
-class UserDataXmlTest(UserDataJsonTest):
- ctype = 'xml'
-
-
-class FlavorsExtraDataJsonTest(ApiSampleTestBaseV2):
- extension_name = ('nova.api.openstack.compute.contrib.flavorextradata.'
- 'Flavorextradata')
-
- def _get_flags(self):
- f = super(FlavorsExtraDataJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # Flavorextradata extension also needs Flavormanage to be loaded.
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
- return f
-
- def test_flavors_extra_data_get(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'flavor_name': 'm1.tiny'
- }
- subs.update(self._get_regexes())
- self._verify_response('flavors-extra-data-get-resp',
- subs, response, 200)
-
- def test_flavors_extra_data_list(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavors-extra-data-list-resp',
- subs, response, 200)
-
- def test_flavors_extra_data_create(self):
- subs = {
- 'flavor_id': 666,
- 'flavor_name': 'flavortest'
- }
- response = self._do_post('flavors',
- 'flavors-extra-data-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('flavors-extra-data-post-resp',
- subs, response, 200)
-
-
-class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
- ctype = 'xml'
-
-
-class FlavorRxtxJsonTest(ApiSampleTestBaseV2):
- extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
- 'Flavor_rxtx')
-
- def _get_flags(self):
- f = super(FlavorRxtxJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # FlavorRxtx extension also needs Flavormanage to be loaded.
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
- return f
-
- def test_flavor_rxtx_get(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'flavor_name': 'm1.tiny'
- }
- subs.update(self._get_regexes())
- self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
-
- def test_flavors_rxtx_list(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
-
- def test_flavors_rxtx_create(self):
- subs = {
- 'flavor_id': 100,
- 'flavor_name': 'flavortest'
- }
- response = self._do_post('flavors',
- 'flavor-rxtx-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
-
-
-class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
- ctype = 'xml'
-
-
-class FlavorSwapJsonTest(ApiSampleTestBaseV2):
- extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
- 'Flavor_swap')
-
- def _get_flags(self):
- f = super(FlavorSwapJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # FlavorSwap extension also needs Flavormanage to be loaded.
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
- return f
-
- def test_flavor_swap_get(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'flavor_name': 'm1.tiny'
- }
- subs.update(self._get_regexes())
- self._verify_response('flavor-swap-get-resp', subs, response, 200)
-
- def test_flavor_swap_list(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-swap-list-resp', subs, response, 200)
-
- def test_flavor_swap_create(self):
- subs = {
- 'flavor_id': 100,
- 'flavor_name': 'flavortest'
- }
- response = self._do_post('flavors',
- 'flavor-swap-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('flavor-swap-post-resp', subs, response, 200)
-
-
-class FlavorSwapXmlTest(FlavorSwapJsonTest):
- ctype = 'xml'
-
-
-class SecurityGroupsSampleJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib" + \
- ".security_groups.Security_groups"
-
- def _get_create_subs(self):
- return {
- 'group_name': 'test',
- "description": "description",
- }
-
- def _create_security_group(self):
- subs = self._get_create_subs()
- return self._do_post('os-security-groups',
- 'security-group-post-req', subs)
-
- def _add_group(self, uuid):
- subs = {
- 'group_name': 'test'
- }
- return self._do_post('servers/%s/action' % uuid,
- 'security-group-add-post-req', subs)
-
- def test_security_group_create(self):
- response = self._create_security_group()
- subs = self._get_create_subs()
- self._verify_response('security-groups-create-resp', subs,
- response, 200)
-
- def test_security_groups_list(self):
- # Get api sample of security groups get list request.
- response = self._do_get('os-security-groups')
- subs = self._get_regexes()
- self._verify_response('security-groups-list-get-resp',
- subs, response, 200)
-
- def test_security_groups_get(self):
- # Get api sample of security groups get request.
- security_group_id = '1'
- response = self._do_get('os-security-groups/%s' % security_group_id)
- subs = self._get_regexes()
- self._verify_response('security-groups-get-resp', subs, response, 200)
-
- def test_security_groups_list_server(self):
- # Get api sample of security groups for a specific server.
- uuid = self._post_server()
- response = self._do_get('servers/%s/os-security-groups' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-security-groups-list-resp',
- subs, response, 200)
-
- def test_security_groups_add(self):
- self._create_security_group()
- uuid = self._post_server()
- response = self._add_group(uuid)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_security_groups_remove(self):
- self._create_security_group()
- uuid = self._post_server()
- self._add_group(uuid)
- subs = {
- 'group_name': 'test'
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'security-group-remove-post-req', subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class SecurityGroupsSampleXmlTest(SecurityGroupsSampleJsonTest):
- ctype = 'xml'
-
-
-class SecurityGroupDefaultRulesSampleJsonTest(ServersSampleBase):
- extension_name = ('nova.api.openstack.compute.contrib'
- '.security_group_default_rules'
- '.Security_group_default_rules')
-
- def test_security_group_default_rules_create(self):
- response = self._do_post('os-security-group-default-rules',
- 'security-group-default-rules-create-req',
- {})
- self._verify_response('security-group-default-rules-create-resp',
- {}, response, 200)
-
- def test_security_group_default_rules_list(self):
- self.test_security_group_default_rules_create()
- response = self._do_get('os-security-group-default-rules')
- self._verify_response('security-group-default-rules-list-resp',
- {}, response, 200)
-
- def test_security_group_default_rules_show(self):
- self.test_security_group_default_rules_create()
- rule_id = '1'
- response = self._do_get('os-security-group-default-rules/%s' % rule_id)
- self._verify_response('security-group-default-rules-show-resp',
- {}, response, 200)
-
-
-class SecurityGroupDefaultRulesSampleXmlTest(
- SecurityGroupDefaultRulesSampleJsonTest):
- ctype = 'xml'
-
-
-class SchedulerHintsJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.scheduler_hints."
- "Scheduler_hints")
-
- def test_scheduler_hints_post(self):
- # Get api sample of scheduler hint post request.
- hints = {'image_id': fake.get_valid_image_id(),
- 'image_near': str(uuid_lib.uuid4())
- }
- response = self._do_post('servers', 'scheduler-hints-post-req',
- hints)
- subs = self._get_regexes()
- self._verify_response('scheduler-hints-post-resp', subs, response, 202)
-
-
-class SchedulerHintsXmlTest(SchedulerHintsJsonTest):
- ctype = 'xml'
-
-
-class ConsoleOutputSampleJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib" + \
- ".console_output.Console_output"
-
- def test_get_console_output(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'console-output-post-req',
- {'action': 'os-getConsoleOutput'})
- subs = self._get_regexes()
- self._verify_response('console-output-post-resp', subs, response, 200)
-
-
-class ConsoleOutputSampleXmlTest(ConsoleOutputSampleJsonTest):
- ctype = 'xml'
-
-
-class ExtendedServerAttributesJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib" + \
- ".extended_server_attributes" + \
- ".Extended_server_attributes"
-
- def test_show(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['instance_name'] = 'instance-\d{8}'
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['instance_name'] = 'instance-\d{8}'
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedServerAttributesXmlTest(ExtendedServerAttributesJsonTest):
- ctype = 'xml'
-
-
-class FloatingIpsJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib." \
- "floating_ips.Floating_ips"
-
- def setUp(self):
- super(FloatingIpsJsonTest, self).setUp()
- pool = CONF.default_floating_pool
- interface = CONF.public_interface
-
- self.ip_pool = [
- {
- 'address': "10.10.10.1",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.2",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.3",
- 'pool': pool,
- 'interface': interface
- },
- ]
- self.compute.db.floating_ip_bulk_create(
- context.get_admin_context(), self.ip_pool)
-
- def tearDown(self):
- self.compute.db.floating_ip_bulk_destroy(
- context.get_admin_context(), self.ip_pool)
- super(FloatingIpsJsonTest, self).tearDown()
-
- def test_floating_ips_list_empty(self):
- response = self._do_get('os-floating-ips')
-
- subs = self._get_regexes()
- self._verify_response('floating-ips-list-empty-resp',
- subs, response, 200)
-
- def test_floating_ips_list(self):
- self._do_post('os-floating-ips',
- 'floating-ips-create-nopool-req',
- {})
- self._do_post('os-floating-ips',
- 'floating-ips-create-nopool-req',
- {})
-
- response = self._do_get('os-floating-ips')
- subs = self._get_regexes()
- self._verify_response('floating-ips-list-resp',
- subs, response, 200)
-
- def test_floating_ips_create_nopool(self):
- response = self._do_post('os-floating-ips',
- 'floating-ips-create-nopool-req',
- {})
- subs = self._get_regexes()
- self._verify_response('floating-ips-create-resp',
- subs, response, 200)
-
- def test_floating_ips_create(self):
- response = self._do_post('os-floating-ips',
- 'floating-ips-create-req',
- {"pool": CONF.default_floating_pool})
- subs = self._get_regexes()
- self._verify_response('floating-ips-create-resp', subs, response, 200)
-
- def test_floating_ips_get(self):
- self.test_floating_ips_create()
- # NOTE(sdague): the first floating ip will always have 1 as an id,
- # but it would be better if we could get this from the create
- response = self._do_get('os-floating-ips/%d' % 1)
- subs = self._get_regexes()
- self._verify_response('floating-ips-create-resp', subs, response, 200)
-
- def test_floating_ips_delete(self):
- self.test_floating_ips_create()
- response = self._do_delete('os-floating-ips/%d' % 1)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class ExtendedFloatingIpsJsonTest(FloatingIpsJsonTest):
- extends_name = ("nova.api.openstack.compute.contrib."
- "floating_ips.Floating_ips")
- extension_name = ("nova.api.openstack.compute.contrib."
- "extended_floating_ips.Extended_floating_ips")
-
-
-class FloatingIpsXmlTest(FloatingIpsJsonTest):
- ctype = 'xml'
-
-
-class ExtendedFloatingIpsXmlTest(ExtendedFloatingIpsJsonTest):
- ctype = 'xml'
-
-
-class FloatingIpsBulkJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib." \
- "floating_ips_bulk.Floating_ips_bulk"
-
- def setUp(self):
- super(FloatingIpsBulkJsonTest, self).setUp()
- pool = CONF.default_floating_pool
- interface = CONF.public_interface
-
- self.ip_pool = [
- {
- 'address': "10.10.10.1",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.2",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.3",
- 'pool': pool,
- 'interface': interface,
- 'host': "testHost"
- },
- ]
- self.compute.db.floating_ip_bulk_create(
- context.get_admin_context(), self.ip_pool)
-
- def tearDown(self):
- self.compute.db.floating_ip_bulk_destroy(
- context.get_admin_context(), self.ip_pool)
- super(FloatingIpsBulkJsonTest, self).tearDown()
-
- def test_floating_ips_bulk_list(self):
- response = self._do_get('os-floating-ips-bulk')
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-list-resp',
- subs, response, 200)
-
- def test_floating_ips_bulk_list_by_host(self):
- response = self._do_get('os-floating-ips-bulk/testHost')
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-list-by-host-resp',
- subs, response, 200)
-
- def test_floating_ips_bulk_create(self):
- response = self._do_post('os-floating-ips-bulk',
- 'floating-ips-bulk-create-req',
- {"ip_range": "192.168.1.0/24",
- "pool": CONF.default_floating_pool,
- "interface": CONF.public_interface})
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-create-resp', subs,
- response, 200)
-
- def test_floating_ips_bulk_delete(self):
- response = self._do_put('os-floating-ips-bulk/delete',
- 'floating-ips-bulk-delete-req',
- {"ip_range": "192.168.1.0/24"})
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-delete-resp', subs,
- response, 200)
-
-
-class FloatingIpsBulkXmlTest(FloatingIpsBulkJsonTest):
- ctype = 'xml'
-
-
-class KeyPairsSampleJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['keypair_name'] = 'keypair-[0-9a-f-]+'
- return subs
-
- def test_keypairs_post(self, public_key=None):
- """Get api sample of key pairs post request."""
- key_name = 'keypair-' + str(uuid_lib.uuid4())
- response = self._do_post('os-keypairs', 'keypairs-post-req',
- {'keypair_name': key_name})
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-post-resp', subs, response, 200)
- # NOTE(maurosr): return the key_name is necessary cause the
- # verification returns the label of the last compared information in
- # the response, not necessarily the key name.
- return key_name
-
- def test_keypairs_import_key_post(self):
- # Get api sample of key pairs post to import user's key.
- key_name = 'keypair-' + str(uuid_lib.uuid4())
- subs = {
- 'keypair_name': key_name,
- 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
- "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
- "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
- "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
- "pSxsIbECHw== Generated-by-Nova"
- }
- response = self._do_post('os-keypairs', 'keypairs-import-post-req',
- subs)
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-import-post-resp', subs, response, 200)
-
- def test_keypairs_list(self):
- # Get api sample of key pairs list request.
- key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs')
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-list-resp', subs, response, 200)
-
- def test_keypairs_get(self):
- # Get api sample of key pairs get request.
- key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs/%s' % key_name)
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-get-resp', subs, response, 200)
-
-
-class KeyPairsSampleXmlTest(KeyPairsSampleJsonTest):
- ctype = 'xml'
-
-
-class RescueJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".rescue.Rescue")
-
- def _rescue(self, uuid):
- req_subs = {
- 'password': 'MySecretPass'
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-rescue-req', req_subs)
- self._verify_response('server-rescue', req_subs, response, 200)
-
- def _unrescue(self, uuid):
- response = self._do_post('servers/%s/action' % uuid,
- 'server-unrescue-req', {})
- self.assertEqual(response.status_code, 202)
-
- def test_server_rescue(self):
- uuid = self._post_server()
-
- self._rescue(uuid)
-
- # Do a server get to make sure that the 'RESCUE' state is set
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'RESCUE'
-
- self._verify_response('server-get-resp-rescue', subs, response, 200)
-
- def test_server_unrescue(self):
- uuid = self._post_server()
-
- self._rescue(uuid)
- self._unrescue(uuid)
-
- # Do a server get to make sure that the 'ACTIVE' state is back
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'ACTIVE'
-
- self._verify_response('server-get-resp-unrescue', subs, response, 200)
-
-
-class RescueXmlTest(RescueJsonTest):
- ctype = 'xml'
-
-
-class ExtendedRescueWithImageJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_rescue_with_image.Extended_rescue_with_image")
-
- def _get_flags(self):
- f = super(ExtendedRescueWithImageJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # ExtendedRescueWithImage extension also needs Rescue to be loaded.
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.rescue.Rescue')
- return f
-
- def _rescue(self, uuid):
- req_subs = {
- 'password': 'MySecretPass',
- 'rescue_image_ref': fake.get_valid_image_id()
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-rescue-req', req_subs)
- self._verify_response('server-rescue', req_subs, response, 200)
-
- def test_server_rescue(self):
- uuid = self._post_server()
-
- self._rescue(uuid)
-
- # Do a server get to make sure that the 'RESCUE' state is set
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'RESCUE'
-
- self._verify_response('server-get-resp-rescue', subs, response, 200)
-
-
-class ExtendedRescueWithImageXmlTest(ExtendedRescueWithImageJsonTest):
- ctype = 'xml'
-
-
-class ShelveJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib.shelve.Shelve"
-
- def setUp(self):
- super(ShelveJsonTest, self).setUp()
- # Don't offload instance, so we can test the offload call.
- CONF.set_override('shelved_offload_time', -1)
-
- def _test_server_action(self, uuid, template, action):
- response = self._do_post('servers/%s/action' % uuid,
- template, {'action': action})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_shelve(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
-
- def test_shelve_offload(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
-
- def test_unshelve(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve', 'unshelve')
-
-
-class ShelveXmlTest(ShelveJsonTest):
- ctype = 'xml'
-
-
-class VirtualInterfacesJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".virtual_interfaces.Virtual_interfaces")
-
- def test_vifs_list(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
-
- subs = self._get_regexes()
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
-
- self._verify_response('vifs-list-resp', subs, response, 200)
-
-
-class VirtualInterfacesXmlTest(VirtualInterfacesJsonTest):
- ctype = 'xml'
-
-
-class CloudPipeSampleJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe"
-
- def setUp(self):
- super(CloudPipeSampleJsonTest, self).setUp()
-
- def get_user_data(self, project_id):
- """Stub method to generate user data for cloudpipe tests."""
- return "VVNFUiBEQVRB\n"
-
- def network_api_get(self, context, network_uuid):
- """Stub to get a valid network and its information."""
- return {'vpn_public_address': '127.0.0.1',
- 'vpn_public_port': 22}
-
- self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(network_api.API, "get",
- network_api_get)
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
- return subs
-
- def test_cloud_pipe_create(self):
- # Get api samples of cloud pipe extension creation.
- self.flags(vpn_image_id=fake.get_valid_image_id())
- project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
- response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
- project)
- subs = self._get_regexes()
- subs.update(project)
- subs['image_id'] = CONF.vpn_image_id
- self._verify_response('cloud-pipe-create-resp', subs, response, 200)
- return project
-
- def test_cloud_pipe_list(self):
- # Get api samples of cloud pipe extension get request.
- project = self.test_cloud_pipe_create()
- response = self._do_get('os-cloudpipe')
- subs = self._get_regexes()
- subs.update(project)
- subs['image_id'] = CONF.vpn_image_id
- self._verify_response('cloud-pipe-get-resp', subs, response, 200)
-
-
-class CloudPipeSampleXmlTest(CloudPipeSampleJsonTest):
- ctype = "xml"
-
-
-class CloudPipeUpdateJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".cloudpipe_update.Cloudpipe_update")
-
- def _get_flags(self):
- f = super(CloudPipeUpdateJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # Cloudpipe_update also needs cloudpipe to be loaded
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
- return f
-
- def test_cloud_pipe_update(self):
- subs = {'vpn_ip': '192.168.1.1',
- 'vpn_port': 2000}
- response = self._do_put('os-cloudpipe/configure-project',
- 'cloud-pipe-update-req',
- subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
- ctype = "xml"
-
-
-class AgentsJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.agents.Agents"
-
- def _get_flags(self):
- f = super(AgentsJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- return f
-
- def setUp(self):
- super(AgentsJsonTest, self).setUp()
-
- fake_agents_list = [{'url': 'http://example.com/path/to/resource',
- 'hypervisor': 'hypervisor',
- 'architecture': 'x86',
- 'os': 'os',
- 'version': '8.0',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545',
- 'id': 1}]
-
- def fake_agent_build_create(context, values):
- values['id'] = 1
- agent_build_ref = models.AgentBuild()
- agent_build_ref.update(values)
- return agent_build_ref
-
- def fake_agent_build_get_all(context, hypervisor):
- agent_build_all = []
- for agent in fake_agents_list:
- if hypervisor and hypervisor != agent['hypervisor']:
- continue
- agent_build_ref = models.AgentBuild()
- agent_build_ref.update(agent)
- agent_build_all.append(agent_build_ref)
- return agent_build_all
-
- def fake_agent_build_update(context, agent_build_id, values):
- pass
-
- def fake_agent_build_destroy(context, agent_update_id):
- pass
-
- self.stubs.Set(db, "agent_build_create",
- fake_agent_build_create)
- self.stubs.Set(db, "agent_build_get_all",
- fake_agent_build_get_all)
- self.stubs.Set(db, "agent_build_update",
- fake_agent_build_update)
- self.stubs.Set(db, "agent_build_destroy",
- fake_agent_build_destroy)
-
- def test_agent_create(self):
- # Creates a new agent build.
- project = {'url': 'http://example.com/path/to/resource',
- 'hypervisor': 'hypervisor',
- 'architecture': 'x86',
- 'os': 'os',
- 'version': '8.0',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545'
- }
- response = self._do_post('os-agents', 'agent-post-req',
- project)
- project['agent_id'] = 1
- self._verify_response('agent-post-resp', project, response, 200)
- return project
-
- def test_agent_list(self):
- # Return a list of all agent builds.
- response = self._do_get('os-agents')
- project = {'url': 'http://example.com/path/to/resource',
- 'hypervisor': 'hypervisor',
- 'architecture': 'x86',
- 'os': 'os',
- 'version': '8.0',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545',
- 'agent_id': 1
- }
- self._verify_response('agents-get-resp', project, response, 200)
-
- def test_agent_update(self):
- # Update an existing agent build.
- agent_id = 1
- subs = {'version': '7.0',
- 'url': 'http://example.com/path/to/resource',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
- response = self._do_put('os-agents/%s' % agent_id,
- 'agent-update-put-req', subs)
- subs['agent_id'] = 1
- self._verify_response('agent-update-put-resp', subs, response, 200)
-
- def test_agent_delete(self):
- # Deletes an existing agent build.
- agent_id = 1
- response = self._do_delete('os-agents/%s' % agent_id)
- self.assertEqual(response.status_code, 200)
-
-
-class AgentsXmlTest(AgentsJsonTest):
- ctype = "xml"
-
-
-class FixedIpJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
-
- def _get_flags(self):
- f = super(FixedIpJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- return f
-
- def setUp(self):
- super(FixedIpJsonTest, self).setUp()
-
- instance = dict(test_utils.get_test_instance(),
- hostname='openstack', host='host')
- fake_fixed_ips = [{'id': 1,
- 'address': '192.168.1.1',
- 'network_id': 1,
- 'virtual_interface_id': 1,
- 'instance_uuid': '1',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'created_at': None,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': None,
- 'instance': instance,
- 'network': test_network.fake_network,
- 'host': None},
- {'id': 2,
- 'address': '192.168.1.2',
- 'network_id': 1,
- 'virtual_interface_id': 2,
- 'instance_uuid': '2',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'created_at': None,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': None,
- 'instance': instance,
- 'network': test_network.fake_network,
- 'host': None},
- ]
-
- def fake_fixed_ip_get_by_address(context, address,
- columns_to_join=None):
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
- return fixed_ip
- raise exception.FixedIpNotFoundForAddress(address=address)
-
- def fake_fixed_ip_get_by_address_detailed(context, address):
- network = {'id': 1,
- 'cidr': "192.168.1.0/24"}
- host = {'host': "host",
- 'hostname': 'openstack'}
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
- return (fixed_ip, network, host)
- raise exception.FixedIpNotFoundForAddress(address=address)
-
- def fake_fixed_ip_update(context, address, values):
- fixed_ip = fake_fixed_ip_get_by_address(context, address)
- if fixed_ip is None:
- raise exception.FixedIpNotFoundForAddress(address=address)
- else:
- for key in values:
- fixed_ip[key] = values[key]
-
- self.stubs.Set(db, "fixed_ip_get_by_address",
- fake_fixed_ip_get_by_address)
- self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
- fake_fixed_ip_get_by_address_detailed)
- self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
-
- def test_fixed_ip_reserve(self):
- # Reserve a Fixed IP.
- project = {'reserve': None}
- response = self._do_post('os-fixed-ips/192.168.1.1/action',
- 'fixedip-post-req',
- project)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_get_fixed_ip(self):
- # Return data about the given fixed ip.
- response = self._do_get('os-fixed-ips/192.168.1.1')
- project = {'cidr': '192.168.1.0/24',
- 'hostname': 'openstack',
- 'host': 'host',
- 'address': '192.168.1.1'}
- self._verify_response('fixedips-get-resp', project, response, 200)
-
-
-class FixedIpXmlTest(FixedIpJsonTest):
- ctype = "xml"
-
-
-class AggregatesSampleJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib" + \
- ".aggregates.Aggregates"
- create_subs = {
- "aggregate_id": '(?P<id>\d+)'
- }
-
- def _create_aggregate(self):
- return self._do_post('os-aggregates', 'aggregate-post-req',
- self.create_subs)
-
- def test_aggregate_create(self):
- response = self._create_aggregate()
- subs = self.create_subs
- subs.update(self._get_regexes())
- return self._verify_response('aggregate-post-resp',
- subs, response, 200)
-
- def test_list_aggregates(self):
- self._create_aggregate()
- response = self._do_get('os-aggregates')
- subs = self._get_regexes()
- self._verify_response('aggregates-list-get-resp', subs, response, 200)
-
- def test_aggregate_get(self):
- self._create_aggregate()
- response = self._do_get('os-aggregates/%s' % 1)
- subs = self._get_regexes()
- self._verify_response('aggregates-get-resp', subs, response, 200)
-
- def test_add_metadata(self):
- self._create_aggregate()
- response = self._do_post('os-aggregates/%s/action' % 1,
- 'aggregate-metadata-post-req',
- {'action': 'set_metadata'})
- subs = self._get_regexes()
- self._verify_response('aggregates-metadata-post-resp', subs,
- response, 200)
-
- def test_add_host(self):
- self._create_aggregate()
- subs = {
- "host_name": self.compute.host,
- }
- response = self._do_post('os-aggregates/%s/action' % 1,
- 'aggregate-add-host-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('aggregates-add-host-post-resp', subs,
- response, 200)
-
- def test_remove_host(self):
- self.test_add_host()
- subs = {
- "host_name": self.compute.host,
- }
- response = self._do_post('os-aggregates/1/action',
- 'aggregate-remove-host-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('aggregates-remove-host-post-resp',
- subs, response, 200)
-
- def test_update_aggregate(self):
- self._create_aggregate()
- response = self._do_put('os-aggregates/%s' % 1,
- 'aggregate-update-post-req', {})
- subs = self._get_regexes()
- self._verify_response('aggregate-update-post-resp',
- subs, response, 200)
-
-
-class AggregatesSampleXmlTest(AggregatesSampleJsonTest):
- ctype = 'xml'
-
-
-class CertificatesSamplesJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.certificates."
- "Certificates")
-
- def test_create_certificates(self):
- response = self._do_post('os-certificates',
- 'certificate-create-req', {})
- subs = self._get_regexes()
- self._verify_response('certificate-create-resp', subs, response, 200)
-
- def test_get_root_certificate(self):
- response = self._do_get('os-certificates/root')
- subs = self._get_regexes()
- self._verify_response('certificate-get-root-resp', subs, response, 200)
-
-
-class CertificatesSamplesXmlTest(CertificatesSamplesJsonTest):
- ctype = 'xml'
-
-
-class UsedLimitsSamplesJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.used_limits."
- "Used_limits")
-
- def test_get_used_limits(self):
- # Get api sample to used limits.
- response = self._do_get('limits')
- subs = self._get_regexes()
- self._verify_response('usedlimits-get-resp', subs, response, 200)
-
-
-class UsedLimitsSamplesXmlTest(UsedLimitsSamplesJsonTest):
- ctype = "xml"
-
-
-class UsedLimitsForAdminSamplesJsonTest(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib.used_limits."
- "Used_limits")
- extension_name = (
- "nova.api.openstack.compute.contrib.used_limits_for_admin."
- "Used_limits_for_admin")
-
- def test_get_used_limits_for_admin(self):
- tenant_id = 'openstack'
- response = self._do_get('limits?tenant_id=%s' % tenant_id)
- subs = self._get_regexes()
- return self._verify_response('usedlimitsforadmin-get-resp', subs,
- response, 200)
-
-
-class UsedLimitsForAdminSamplesXmlTest(UsedLimitsForAdminSamplesJsonTest):
- ctype = "xml"
-
-
-class MultipleCreateJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.multiple_create."
- "Multiple_create")
-
- def test_multiple_create(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'min_count': "2",
- 'max_count': "3"
- }
- response = self._do_post('servers', 'multiple-create-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('multiple-create-post-resp', subs, response, 202)
-
- def test_multiple_create_without_reservation_id(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'min_count': "2",
- 'max_count': "3"
- }
- response = self._do_post('servers', 'multiple-create-no-resv-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('multiple-create-no-resv-post-resp', subs,
- response, 202)
-
-
-class MultipleCreateXmlTest(MultipleCreateJsonTest):
- ctype = 'xml'
-
-
-class ServicesJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.services.Services"
-
- def setUp(self):
- super(ServicesJsonTest, self).setUp()
- self.stubs.Set(db, "service_get_all",
- test_services.fake_db_api_service_get_all)
- self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts", test_services.fake_utcnow_ts)
- self.stubs.Set(db, "service_get_by_args",
- test_services.fake_service_get_by_host_binary)
- self.stubs.Set(db, "service_update",
- test_services.fake_service_update)
-
- def tearDown(self):
- super(ServicesJsonTest, self).tearDown()
- timeutils.clear_time_override()
-
- def fake_load(self, service_name):
- return service_name == 'os-extended-services'
-
- def test_services_list(self):
- """Return a list of all agent builds."""
- response = self._do_get('os-services')
- subs = {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up'}
- subs.update(self._get_regexes())
- self._verify_response('services-list-get-resp', subs, response, 200)
-
- def test_service_enable(self):
- """Enable an existing agent build."""
- subs = {"host": "host1",
- 'binary': 'nova-compute'}
- response = self._do_put('os-services/enable',
- 'service-enable-put-req', subs)
- subs = {"host": "host1",
- "binary": "nova-compute"}
- self._verify_response('service-enable-put-resp', subs, response, 200)
-
- def test_service_disable(self):
- """Disable an existing agent build."""
- subs = {"host": "host1",
- 'binary': 'nova-compute'}
- response = self._do_put('os-services/disable',
- 'service-disable-put-req', subs)
- subs = {"host": "host1",
- "binary": "nova-compute"}
- self._verify_response('service-disable-put-resp', subs, response, 200)
-
- def test_service_detail(self):
- """Return a list of all running services with the disable reason
- information if that exists.
- """
- self.stubs.Set(extensions.ExtensionManager, "is_loaded",
- self.fake_load)
- response = self._do_get('os-services')
- self.assertEqual(response.status_code, 200)
- subs = {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up'}
- subs.update(self._get_regexes())
- self._verify_response('services-get-resp',
- subs, response, 200)
-
- def test_service_disable_log_reason(self):
- """Disable an existing service and log the reason."""
- self.stubs.Set(extensions.ExtensionManager, "is_loaded",
- self.fake_load)
- subs = {"host": "host1",
- 'binary': 'nova-compute',
- 'disabled_reason': 'test2'}
- response = self._do_put('os-services/disable-log-reason',
- 'service-disable-log-put-req', subs)
- return self._verify_response('service-disable-log-put-resp',
- subs, response, 200)
-
-
-class ServicesXmlTest(ServicesJsonTest):
- ctype = 'xml'
-
-
-class ExtendedServicesJsonTest(ApiSampleTestBaseV2):
- """This extension is extending the functionalities of the
- Services extension so the funcionalities introduced by this extension
- are tested in the ServicesJsonTest and ServicesXmlTest classes.
- """
-
- extension_name = ("nova.api.openstack.compute.contrib."
- "extended_services.Extended_services")
-
-
-class ExtendedServicesXmlTest(ExtendedServicesJsonTest):
- """This extension is tested in the ServicesXmlTest class."""
- ctype = 'xml'
-
-
-@mock.patch.object(db, 'service_get_all',
- side_effect=test_services.fake_db_api_service_get_all)
-@mock.patch.object(db, 'service_get_by_args',
- side_effect=test_services.fake_service_get_by_host_binary)
-class ExtendedServicesDeleteJsonTest(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib.services.Services")
- extension_name = ("nova.api.openstack.compute.contrib."
- "extended_services_delete.Extended_services_delete")
-
- def setUp(self):
- super(ExtendedServicesDeleteJsonTest, self).setUp()
- timeutils.set_time_override(test_services.fake_utcnow())
-
- def tearDown(self):
- super(ExtendedServicesDeleteJsonTest, self).tearDown()
- timeutils.clear_time_override()
-
- def test_service_detail(self, *mocks):
- """Return a list of all running services with the disable reason
- information if that exists.
- """
- response = self._do_get('os-services')
- self.assertEqual(response.status_code, 200)
- subs = {'id': 1,
- 'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up'}
- subs.update(self._get_regexes())
- return self._verify_response('services-get-resp',
- subs, response, 200)
-
- def test_service_delete(self, *mocks):
- response = self._do_delete('os-services/1')
- self.assertEqual(response.status_code, 204)
- self.assertEqual(response.content, "")
-
-
-class ExtendedServicesDeleteXmlTest(ExtendedServicesDeleteJsonTest):
- """This extension is tested in the ExtendedServicesDeleteJsonTest class."""
- ctype = 'xml'
-
-
-class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
- "Simple_tenant_usage")
-
- def setUp(self):
- """setUp method for simple tenant usage."""
- super(SimpleTenantUsageSampleJsonTest, self).setUp()
-
- started = timeutils.utcnow()
- now = started + datetime.timedelta(hours=1)
-
- timeutils.set_time_override(started)
- self._post_server()
- timeutils.set_time_override(now)
-
- self.query = {
- 'start': str(started),
- 'end': str(now)
- }
-
- def tearDown(self):
- """tearDown method for simple tenant usage."""
- super(SimpleTenantUsageSampleJsonTest, self).tearDown()
- timeutils.clear_time_override()
-
- def test_get_tenants_usage(self):
- # Get api sample to get all tenants usage request.
- response = self._do_get('os-simple-tenant-usage?%s' % (
- urllib.urlencode(self.query)))
- subs = self._get_regexes()
- self._verify_response('simple-tenant-usage-get', subs, response, 200)
-
- def test_get_tenant_usage_details(self):
- # Get api sample to get specific tenant usage request.
- tenant_id = 'openstack'
- response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
- urllib.urlencode(self.query)))
- subs = self._get_regexes()
- self._verify_response('simple-tenant-usage-get-specific', subs,
- response, 200)
-
-
-class SimpleTenantUsageSampleXmlTest(SimpleTenantUsageSampleJsonTest):
- ctype = "xml"
-
-
-class ServerDiagnosticsSamplesJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.server_diagnostics."
- "Server_diagnostics")
-
- def test_server_diagnostics_get(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s/diagnostics' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-diagnostics-get-resp', subs,
- response, 200)
-
-
-class ServerDiagnosticsSamplesXmlTest(ServerDiagnosticsSamplesJsonTest):
- ctype = "xml"
-
-
-class AvailabilityZoneJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
- "Availability_zone")
-
- def test_create_availability_zone(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- "availability_zone": "nova"
- }
- response = self._do_post('servers', 'availability-zone-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('availability-zone-post-resp', subs,
- response, 202)
-
-
-class AvailabilityZoneXmlTest(AvailabilityZoneJsonTest):
- ctype = "xml"
-
-
-class AdminActionsSamplesJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.admin_actions."
- "Admin_actions")
-
- def setUp(self):
- """setUp Method for AdminActions api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(AdminActionsSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_post_pause(self):
- # Get api samples to pause server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-pause', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_unpause(self):
- # Get api samples to unpause server request.
- self.test_post_pause()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-unpause', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_suspend(self):
- # Get api samples to suspend server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-suspend', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_resume(self):
- # Get api samples to server resume request.
- self.test_post_suspend()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-resume', {})
- self.assertEqual(response.status_code, 202)
-
- @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
- def test_post_migrate(self, mock_cold_migrate):
- # Get api samples to migrate server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-migrate', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_reset_network(self):
- # Get api samples to reset server network request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-reset-network', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_inject_network_info(self):
- # Get api samples to inject network info request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-inject-network-info', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_lock_server(self):
- # Get api samples to lock server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-lock-server', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_unlock_server(self):
- # Get api samples to unlock server request.
- self.test_post_lock_server()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-unlock-server', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_backup_server(self):
- # Get api samples to backup server request.
- def image_details(self, context, **kwargs):
- """This stub is specifically used on the backup action."""
- # NOTE(maurosr): I've added this simple stub cause backup action
- # was trapped in infinite loop during fetch image phase since the
- # fake Image Service always returns the same set of images
- return []
-
- self.stubs.Set(fake._FakeImageService, 'detail', image_details)
-
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-backup-server', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_live_migrate_server(self):
- # Get api samples to server live migrate request.
- def fake_live_migrate(_self, context, instance, scheduler_hint,
- block_migration, disk_over_commit):
- self.assertEqual(self.uuid, instance["uuid"])
- host = scheduler_hint["host"]
- self.assertEqual(self.compute.host, host)
-
- self.stubs.Set(conductor_manager.ComputeTaskManager,
- '_live_migrate',
- fake_live_migrate)
-
- def fake_get_compute(context, host):
- service = dict(host=host,
- binary='nova-compute',
- topic='compute',
- report_count=1,
- updated_at='foo',
- hypervisor_type='bar',
- hypervisor_version=
- utils.convert_version_to_int('1.0'),
- disabled=False)
- return {'compute_node': [service]}
- self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
-
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-live-migrate',
- {'hostname': self.compute.host})
- self.assertEqual(response.status_code, 202)
-
- def test_post_reset_state(self):
- # get api samples to server reset state request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-reset-server-state', {})
- self.assertEqual(response.status_code, 202)
-
-
-class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
- ctype = 'xml'
-
-
-class ConsolesSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".consoles.Consoles")
-
- def setUp(self):
- super(ConsolesSampleJsonTests, self).setUp()
- self.flags(vnc_enabled=True)
- self.flags(enabled=True, group='spice')
- self.flags(enabled=True, group='rdp')
- self.flags(enabled=True, group='serial_console')
-
- def test_get_vnc_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-vnc-console-post-req',
- {'action': 'os-getVNCConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-vnc-console-post-resp', subs, response, 200)
-
- def test_get_spice_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-spice-console-post-req',
- {'action': 'os-getSPICEConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-spice-console-post-resp', subs,
- response, 200)
-
- def test_get_rdp_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-rdp-console-post-req',
- {'action': 'os-getRDPConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-rdp-console-post-resp', subs,
- response, 200)
-
- def test_get_serial_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-serial-console-post-req',
- {'action': 'os-getSerialConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-serial-console-post-resp', subs,
- response, 200)
-
-
-class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
- ctype = 'xml'
-
-
-class ConsoleAuthTokensSampleJsonTests(ServersSampleBase):
- extends_name = ("nova.api.openstack.compute.contrib.consoles.Consoles")
- extension_name = ("nova.api.openstack.compute.contrib.console_auth_tokens."
- "Console_auth_tokens")
-
- def _get_console_url(self, data):
- return jsonutils.loads(data)["console"]["url"]
-
- def _get_console_token(self, uuid):
- response = self._do_post('servers/%s/action' % uuid,
- 'get-rdp-console-post-req',
- {'action': 'os-getRDPConsole'})
-
- url = self._get_console_url(response.content)
- return re.match('.+?token=([^&]+)', url).groups()[0]
-
- def test_get_console_connect_info(self):
- self.flags(enabled=True, group='rdp')
-
- uuid = self._post_server()
- token = self._get_console_token(uuid)
-
- response = self._do_get('os-console-auth-tokens/%s' % token)
-
- subs = self._get_regexes()
- subs["uuid"] = uuid
- subs["host"] = r"[\w\.\-]+"
- subs["port"] = "[0-9]+"
- subs["internal_access_path"] = ".*"
- self._verify_response('get-console-connect-info-get-resp', subs,
- response, 200)
-
-
-class ConsoleAuthTokensSampleXmlTests(ConsoleAuthTokensSampleJsonTests):
- ctype = 'xml'
-
- def _get_console_url(self, data):
- return etree.fromstring(data).find('url').text
-
-
-class DeferredDeleteSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".deferred_delete.Deferred_delete")
-
- def setUp(self):
- super(DeferredDeleteSampleJsonTests, self).setUp()
- self.flags(reclaim_instance_interval=1)
-
- def test_restore(self):
- uuid = self._post_server()
- response = self._do_delete('servers/%s' % uuid)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'restore-post-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_force_delete(self):
- uuid = self._post_server()
- response = self._do_delete('servers/%s' % uuid)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'force-delete-post-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
- ctype = 'xml'
-
-
-class QuotasSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
-
- def test_show_quotas(self):
- # Get api sample to show quotas.
- response = self._do_get('os-quota-sets/fake_tenant')
- self._verify_response('quotas-show-get-resp', {}, response, 200)
-
- def test_show_quotas_defaults(self):
- # Get api sample to show quotas defaults.
- response = self._do_get('os-quota-sets/fake_tenant/defaults')
- self._verify_response('quotas-show-defaults-get-resp',
- {}, response, 200)
-
- def test_update_quotas(self):
- # Get api sample to update quotas.
- response = self._do_put('os-quota-sets/fake_tenant',
- 'quotas-update-post-req',
- {})
- self._verify_response('quotas-update-post-resp', {}, response, 200)
-
-
-class QuotasSampleXmlTests(QuotasSampleJsonTests):
- ctype = "xml"
-
-
-class ExtendedQuotasSampleJsonTests(ApiSampleTestBaseV2):
- extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_quotas.Extended_quotas")
-
- def test_delete_quotas(self):
- # Get api sample to delete quota.
- response = self._do_delete('os-quota-sets/fake_tenant')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_update_quotas(self):
- # Get api sample to update quotas.
- response = self._do_put('os-quota-sets/fake_tenant',
- 'quotas-update-post-req',
- {})
- return self._verify_response('quotas-update-post-resp', {},
- response, 200)
-
-
-class ExtendedQuotasSampleXmlTests(ExtendedQuotasSampleJsonTests):
- ctype = "xml"
-
-
-class UserQuotasSampleJsonTests(ApiSampleTestBaseV2):
- extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
- extension_name = ("nova.api.openstack.compute.contrib"
- ".user_quotas.User_quotas")
-
- def fake_load(self, *args):
- return True
-
- def test_show_quotas_for_user(self):
- # Get api sample to show quotas for user.
- response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
- self._verify_response('user-quotas-show-get-resp', {}, response, 200)
-
- def test_delete_quotas_for_user(self):
- # Get api sample to delete quota for user.
- self.stubs.Set(extensions.ExtensionManager, "is_loaded",
- self.fake_load)
- response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_update_quotas_for_user(self):
- # Get api sample to update quotas for user.
- response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
- 'user-quotas-update-post-req',
- {})
- return self._verify_response('user-quotas-update-post-resp', {},
- response, 200)
-
-
-class UserQuotasSampleXmlTests(UserQuotasSampleJsonTests):
- ctype = "xml"
-
-
-class ExtendedIpsSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_ips.Extended_ips")
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedIpsSampleXmlTests(ExtendedIpsSampleJsonTests):
- ctype = 'xml'
-
-
-class ExtendedIpsMacSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_ips_mac.Extended_ips_mac")
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- self.assertEqual(response.status_code, 200)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- self.assertEqual(response.status_code, 200)
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedIpsMacSampleXmlTests(ExtendedIpsMacSampleJsonTests):
- ctype = 'xml'
-
-
-class ExtendedStatusSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_status.Extended_status")
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedStatusSampleXmlTests(ExtendedStatusSampleJsonTests):
- ctype = 'xml'
-
-
-class ExtendedVolumesSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_volumes.Extended_volumes")
-
- def test_show(self):
- uuid = self._post_server()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fakes.stub_bdm_get_all_by_instance)
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fakes.stub_bdm_get_all_by_instance)
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedVolumesSampleXmlTests(ExtendedVolumesSampleJsonTests):
- ctype = 'xml'
-
-
-class ServerUsageSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".server_usage.Server_usage")
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- return self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- return self._verify_response('servers-detail-resp', subs,
- response, 200)
-
-
-class ServerUsageSampleXmlTests(ServerUsageSampleJsonTests):
- ctype = 'xml'
-
-
-class ExtendedVIFNetSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_virtual_interfaces_net.Extended_virtual_interfaces_net")
-
- def _get_flags(self):
- f = super(ExtendedVIFNetSampleJsonTests, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # extended_virtual_interfaces_net_update also
- # needs virtual_interfaces to be loaded
- f['osapi_compute_extension'].append(
- ('nova.api.openstack.compute.contrib'
- '.virtual_interfaces.Virtual_interfaces'))
- return f
-
- def test_vifs_list(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
- self.assertEqual(response.status_code, 200)
-
- subs = self._get_regexes()
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
-
- self._verify_response('vifs-list-resp', subs, response, 200)
-
-
-class ExtendedVIFNetSampleXmlTests(ExtendedIpsSampleJsonTests):
- ctype = 'xml'
-
-
-class FlavorManageSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.flavormanage."
- "Flavormanage")
-
- def _create_flavor(self):
- """Create a flavor."""
- subs = {
- 'flavor_id': 10,
- 'flavor_name': "test_flavor"
- }
- response = self._do_post("flavors",
- "flavor-create-post-req",
- subs)
- subs.update(self._get_regexes())
- self._verify_response("flavor-create-post-resp", subs, response, 200)
-
- def test_create_flavor(self):
- # Get api sample to create a flavor.
- self._create_flavor()
-
- def test_delete_flavor(self):
- # Get api sample to delete a flavor.
- self._create_flavor()
- response = self._do_delete("flavors/10")
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
- ctype = "xml"
-
-
-class ServerPasswordSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.server_password."
- "Server_password")
-
- def test_get_password(self):
-
- # Mock password since there is no api to set it
- def fake_ext_password(*args, **kwargs):
- return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
- "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
- "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
- "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
- "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
- "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
- "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
- self.stubs.Set(password, "extract_password", fake_ext_password)
- uuid = self._post_server()
- response = self._do_get('servers/%s/os-server-password' % uuid)
- subs = self._get_regexes()
- subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
- self._verify_response('get-password-resp', subs, response, 200)
-
- def test_reset_password(self):
- uuid = self._post_server()
- response = self._do_delete('servers/%s/os-server-password' % uuid)
- self.assertEqual(response.status_code, 204)
-
-
-class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
- ctype = "xml"
-
-
-class DiskConfigJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.disk_config."
- "Disk_config")
-
- def test_list_servers_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('list-servers-detail-get', subs, response, 200)
-
- def test_get_server(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_update_server(self):
- uuid = self._post_server()
- response = self._do_put('servers/%s' % uuid,
- 'server-update-put-req', {})
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-update-put-resp', subs, response, 200)
-
- def test_resize_server(self):
- self.flags(allow_resize_to_same_host=True)
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'server-resize-post-req', {})
- self.assertEqual(response.status_code, 202)
- # NOTE(tmello): Resize does not return response body
- # Bug #1085213.
- self.assertEqual(response.content, "")
-
- def test_rebuild_server(self):
- uuid = self._post_server()
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-rebuild-req', subs)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-action-rebuild-resp',
- subs, response, 202)
-
- def test_get_image(self):
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_list_images(self):
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('image-list-resp', subs, response, 200)
-
-
-class DiskConfigXmlTest(DiskConfigJsonTest):
- ctype = 'xml'
-
-
-class OsNetworksJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
- ".Os_tenant_networks")
-
- def setUp(self):
- super(OsNetworksJsonTests, self).setUp()
- CONF.set_override("enable_network_quota", True)
-
- def fake(*args, **kwargs):
- pass
-
- self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
- self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
- self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
-
- def test_list_networks(self):
- response = self._do_get('os-tenant-networks')
- subs = self._get_regexes()
- self._verify_response('networks-list-res', subs, response, 200)
-
- def test_create_network(self):
- response = self._do_post('os-tenant-networks', "networks-post-req", {})
- subs = self._get_regexes()
- self._verify_response('networks-post-res', subs, response, 200)
-
- def test_delete_network(self):
- response = self._do_post('os-tenant-networks', "networks-post-req", {})
- net = jsonutils.loads(response.content)
- response = self._do_delete('os-tenant-networks/%s' %
- net["network"]["id"])
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class OsNetworksXmlTests(OsNetworksJsonTests):
- ctype = 'xml'
-
- def test_delete_network(self):
- response = self._do_post('os-tenant-networks', "networks-post-req", {})
- net = etree.fromstring(response.content)
- network_id = net.find('id').text
- response = self._do_delete('os-tenant-networks/%s' % network_id)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class NetworksJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".os_networks.Os_networks")
-
- def setUp(self):
- super(NetworksJsonTests, self).setUp()
- fake_network_api = test_networks.FakeNetworkAPI()
- self.stubs.Set(network_api.API, "get_all",
- fake_network_api.get_all)
- self.stubs.Set(network_api.API, "get",
- fake_network_api.get)
- self.stubs.Set(network_api.API, "associate",
- fake_network_api.associate)
- self.stubs.Set(network_api.API, "delete",
- fake_network_api.delete)
- self.stubs.Set(network_api.API, "create",
- fake_network_api.create)
- self.stubs.Set(network_api.API, "add_network_to_project",
- fake_network_api.add_network_to_project)
-
- def test_network_list(self):
- response = self._do_get('os-networks')
- subs = self._get_regexes()
- self._verify_response('networks-list-resp', subs, response, 200)
-
- def test_network_disassociate(self):
- uuid = test_networks.FAKE_NETWORKS[0]['uuid']
- response = self._do_post('os-networks/%s/action' % uuid,
- 'networks-disassociate-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_network_show(self):
- uuid = test_networks.FAKE_NETWORKS[0]['uuid']
- response = self._do_get('os-networks/%s' % uuid)
- subs = self._get_regexes()
- self._verify_response('network-show-resp', subs, response, 200)
-
- def test_network_create(self):
- response = self._do_post("os-networks",
- 'network-create-req', {})
- subs = self._get_regexes()
- self._verify_response('network-create-resp', subs, response, 200)
-
- def test_network_add(self):
- response = self._do_post("os-networks/add",
- 'network-add-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_network_delete(self):
- response = self._do_delete('os-networks/always_delete')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class NetworksXmlTests(NetworksJsonTests):
- ctype = 'xml'
-
-
-class ExtendedNetworksJsonTests(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib."
- "os_networks.Os_networks")
- extension_name = ("nova.api.openstack.compute.contrib."
- "extended_networks.Extended_networks")
-
- def setUp(self):
- super(ExtendedNetworksJsonTests, self).setUp()
- fake_network_api = test_networks.FakeNetworkAPI()
- self.stubs.Set(network_api.API, "get_all",
- fake_network_api.get_all)
- self.stubs.Set(network_api.API, "get",
- fake_network_api.get)
- self.stubs.Set(network_api.API, "associate",
- fake_network_api.associate)
- self.stubs.Set(network_api.API, "delete",
- fake_network_api.delete)
- self.stubs.Set(network_api.API, "create",
- fake_network_api.create)
- self.stubs.Set(network_api.API, "add_network_to_project",
- fake_network_api.add_network_to_project)
-
- def test_network_list(self):
- response = self._do_get('os-networks')
- subs = self._get_regexes()
- self._verify_response('networks-list-resp', subs, response, 200)
-
- def test_network_show(self):
- uuid = test_networks.FAKE_NETWORKS[0]['uuid']
- response = self._do_get('os-networks/%s' % uuid)
- subs = self._get_regexes()
- self._verify_response('network-show-resp', subs, response, 200)
-
- def test_network_create(self):
- response = self._do_post("os-networks",
- 'network-create-req', {})
- subs = self._get_regexes()
- self._verify_response('network-create-resp', subs, response, 200)
-
-
-class ExtendedNetworksXmlTests(ExtendedNetworksJsonTests):
- ctype = 'xml'
-
-
-class NetworksAssociateJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".networks_associate.Networks_associate")
-
- _sentinel = object()
-
- def _get_flags(self):
- f = super(NetworksAssociateJsonTests, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # Networks_associate requires Networks to be update
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
- return f
-
- def setUp(self):
- super(NetworksAssociateJsonTests, self).setUp()
-
- def fake_associate(self, context, network_id,
- host=NetworksAssociateJsonTests._sentinel,
- project=NetworksAssociateJsonTests._sentinel):
- return True
-
- self.stubs.Set(network_api.API, "associate", fake_associate)
-
- def test_disassociate(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_disassociate_host(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-host-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_disassociate_project(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-project-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_associate_host(self):
- response = self._do_post('os-networks/1/action',
- 'network-associate-host-req',
- {"host": "testHost"})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
-
-class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
- ctype = 'xml'
-
-
-class FlavorDisabledSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.flavor_disabled."
- "Flavor_disabled")
-
- def test_show_flavor(self):
- # Get api sample to show flavor_disabled attr. of a flavor.
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = self._get_regexes()
- subs['flavor_id'] = flavor_id
- self._verify_response('flavor-show-get-resp', subs, response, 200)
-
- def test_detail_flavor(self):
- # Get api sample to show details of a flavor.
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-detail-get-resp', subs, response, 200)
-
-
-class FlavorDisabledSampleXmlTests(FlavorDisabledSampleJsonTests):
- ctype = "xml"
-
-
-class QuotaClassesSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.quota_classes."
- "Quota_classes")
- set_id = 'test_class'
-
- def test_show_quota_classes(self):
- # Get api sample to show quota classes.
- response = self._do_get('os-quota-class-sets/%s' % self.set_id)
- subs = {'set_id': self.set_id}
- self._verify_response('quota-classes-show-get-resp', subs,
- response, 200)
-
- def test_update_quota_classes(self):
- # Get api sample to update quota classes.
- response = self._do_put('os-quota-class-sets/%s' % self.set_id,
- 'quota-classes-update-post-req',
- {})
- self._verify_response('quota-classes-update-post-resp',
- {}, response, 200)
-
-
-class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
- ctype = "xml"
-
-
-class CellsSampleJsonTest(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
-
- def setUp(self):
- # db_check_interval < 0 makes cells manager always hit the DB
- self.flags(enable=True, db_check_interval=-1, group='cells')
- super(CellsSampleJsonTest, self).setUp()
- self._stub_cells()
-
- def _stub_cells(self, num_cells=5):
- self.cells = []
- self.cells_next_id = 1
-
- def _fake_cell_get_all(context):
- return self.cells
-
- def _fake_cell_get(inst, context, cell_name):
- for cell in self.cells:
- if cell['name'] == cell_name:
- return cell
- raise exception.CellNotFound(cell_name=cell_name)
-
- for x in xrange(num_cells):
- cell = models.Cell()
- our_id = self.cells_next_id
- self.cells_next_id += 1
- cell.update({'id': our_id,
- 'name': 'cell%s' % our_id,
- 'transport_url': 'rabbit://username%s@/' % our_id,
- 'is_parent': our_id % 2 == 0})
- self.cells.append(cell)
-
- self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
-
- def test_cells_empty_list(self):
- # Override this
- self._stub_cells(num_cells=0)
- response = self._do_get('os-cells')
- subs = self._get_regexes()
- self._verify_response('cells-list-empty-resp', subs, response, 200)
-
- def test_cells_list(self):
- response = self._do_get('os-cells')
- subs = self._get_regexes()
- self._verify_response('cells-list-resp', subs, response, 200)
-
- def test_cells_get(self):
- response = self._do_get('os-cells/cell3')
- subs = self._get_regexes()
- self._verify_response('cells-get-resp', subs, response, 200)
-
-
-class CellsSampleXmlTest(CellsSampleJsonTest):
- ctype = 'xml'
-
-
-class CellsCapacitySampleJsonTest(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib.cells.Cells")
- extension_name = ("nova.api.openstack.compute.contrib."
- "cell_capacities.Cell_capacities")
-
- def setUp(self):
- self.flags(enable=True, db_check_interval=-1, group='cells')
- super(CellsCapacitySampleJsonTest, self).setUp()
- # (navneetk/kaushikc) : Mock cell capacity to avoid the capacity
- # being calculated from the compute nodes in the environment
- self._mock_cell_capacity()
-
- def test_get_cell_capacity(self):
- state_manager = state.CellStateManager()
- my_state = state_manager.get_my_state()
- response = self._do_get('os-cells/%s/capacities' %
- my_state.name)
- subs = self._get_regexes()
- return self._verify_response('cells-capacities-resp',
- subs, response, 200)
-
- def test_get_all_cells_capacity(self):
- response = self._do_get('os-cells/capacities')
- subs = self._get_regexes()
- return self._verify_response('cells-capacities-resp',
- subs, response, 200)
-
- def _mock_cell_capacity(self):
- self.mox.StubOutWithMock(self.cells.manager.state_manager,
- 'get_our_capacities')
- response = {"ram_free":
- {"units_by_mb": {"8192": 0, "512": 13,
- "4096": 1, "2048": 3, "16384": 0},
- "total_mb": 7680},
- "disk_free":
- {"units_by_mb": {"81920": 11, "20480": 46,
- "40960": 23, "163840": 5, "0": 0},
- "total_mb": 1052672}
- }
- self.cells.manager.state_manager.get_our_capacities(). \
- AndReturn(response)
- self.mox.ReplayAll()
-
-
-class CellsCapacitySampleXmlTest(CellsCapacitySampleJsonTest):
- ctype = 'xml'
-
-
-class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
- extension_name = ('nova.api.openstack.compute.contrib.'
- 'block_device_mapping_v2_boot.'
- 'Block_device_mapping_v2_boot')
-
- def _get_flags(self):
- f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # We need the volumes extension as well
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.volumes.Volumes')
- return f
-
- def test_servers_post_with_bdm_v2(self):
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(cinder.API, 'check_attach',
- fakes.stub_volume_check_attach)
- return self._post_server()
-
-
-class BlockDeviceMappingV2BootXmlTest(BlockDeviceMappingV2BootJsonTest):
- ctype = 'xml'
-
-
-class FloatingIPPoolsSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.floating_ip_pools."
- "Floating_ip_pools")
-
- def test_list_floatingippools(self):
- pool_list = ["pool1", "pool2"]
-
- def fake_get_floating_ip_pools(self, context):
- return pool_list
-
- self.stubs.Set(network_api.API, "get_floating_ip_pools",
- fake_get_floating_ip_pools)
- response = self._do_get('os-floating-ip-pools')
- subs = {
- 'pool1': pool_list[0],
- 'pool2': pool_list[1]
- }
- self._verify_response('floatingippools-list-resp', subs, response, 200)
-
-
-class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
- ctype = 'xml'
-
-
-class MultinicSampleJsonTest(ServersSampleBase):
- extension_name = "nova.api.openstack.compute.contrib.multinic.Multinic"
-
- def _disable_instance_dns_manager(self):
- # NOTE(markmc): it looks like multinic and instance_dns_manager are
- # incompatible. See:
- # https://bugs.launchpad.net/nova/+bug/1213251
- self.flags(
- instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
-
- def setUp(self):
- self._disable_instance_dns_manager()
- super(MultinicSampleJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def _add_fixed_ip(self):
- subs = {"networkId": 1}
- response = self._do_post('servers/%s/action' % (self.uuid),
- 'multinic-add-fixed-ip-req', subs)
- self.assertEqual(response.status_code, 202)
-
- def test_add_fixed_ip(self):
- self._add_fixed_ip()
-
- def test_remove_fixed_ip(self):
- self._add_fixed_ip()
-
- subs = {"ip": "10.0.0.4"}
- response = self._do_post('servers/%s/action' % (self.uuid),
- 'multinic-remove-fixed-ip-req', subs)
- self.assertEqual(response.status_code, 202)
-
-
-class MultinicSampleXmlTest(MultinicSampleJsonTest):
- ctype = "xml"
-
-
-class InstanceUsageAuditLogJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib."
- "instance_usage_audit_log.Instance_usage_audit_log")
-
- def test_show_instance_usage_audit_log(self):
- response = self._do_get('os-instance_usage_audit_log/%s' %
- urllib.quote('2012-07-05 10:00:00'))
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('inst-usage-audit-log-show-get-resp',
- subs, response, 200)
-
- def test_index_instance_usage_audit_log(self):
- response = self._do_get('os-instance_usage_audit_log')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('inst-usage-audit-log-index-get-resp',
- subs, response, 200)
-
-
-class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest):
- ctype = "xml"
-
-
-class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.flavorextraspecs."
- "Flavorextraspecs")
-
- def _flavor_extra_specs_create(self):
- subs = {'value1': 'value1',
- 'value2': 'value2'
- }
- response = self._do_post('flavors/1/os-extra_specs',
- 'flavor-extra-specs-create-req', subs)
- self._verify_response('flavor-extra-specs-create-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_get(self):
- subs = {'value1': 'value1'}
- self._flavor_extra_specs_create()
- response = self._do_get('flavors/1/os-extra_specs/key1')
- self._verify_response('flavor-extra-specs-get-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_list(self):
- subs = {'value1': 'value1',
- 'value2': 'value2'
- }
- self._flavor_extra_specs_create()
- response = self._do_get('flavors/1/os-extra_specs')
- self._verify_response('flavor-extra-specs-list-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_create(self):
- self._flavor_extra_specs_create()
-
- def test_flavor_extra_specs_update(self):
- subs = {'value1': 'new_value1'}
- self._flavor_extra_specs_create()
- response = self._do_put('flavors/1/os-extra_specs/key1',
- 'flavor-extra-specs-update-req', subs)
- self._verify_response('flavor-extra-specs-update-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_delete(self):
- self._flavor_extra_specs_create()
- response = self._do_delete('flavors/1/os-extra_specs/key1')
- self.assertEqual(response.status_code, 200)
- self.assertEqual(response.content, '')
-
-
-class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests):
- ctype = 'xml'
-
-
-class FpingSampleJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
-
- def setUp(self):
- super(FpingSampleJsonTests, self).setUp()
-
- def fake_check_fping(self):
- pass
- self.stubs.Set(utils, "execute", test_fping.execute)
- self.stubs.Set(fping.FpingController, "check_fping",
- fake_check_fping)
-
- def test_get_fping(self):
- self._post_server()
- response = self._do_get('os-fping')
- subs = self._get_regexes()
- self._verify_response('fping-get-resp', subs, response, 200)
-
- def test_get_fping_details(self):
- uuid = self._post_server()
- response = self._do_get('os-fping/%s' % (uuid))
- subs = self._get_regexes()
- self._verify_response('fping-get-details-resp', subs, response, 200)
-
-
-class FpingSampleXmlTests(FpingSampleJsonTests):
- ctype = 'xml'
-
-
-class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_availability_zone"
- ".Extended_availability_zone")
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedAvailabilityZoneXmlTests(ExtendedAvailabilityZoneJsonTests):
- ctype = 'xml'
-
-
-class EvacuateJsonTest(ServersSampleBase):
-
- extension_name = ("nova.api.openstack.compute.contrib"
- ".evacuate.Evacuate")
-
- def test_server_evacuate(self):
- uuid = self._post_server()
-
- req_subs = {
- 'host': 'testHost',
- "adminPass": "MySecretPass",
- "onSharedStorage": 'False'
- }
-
- def fake_service_is_up(self, service):
- """Simulate validation of instance host is down."""
- return False
-
- def fake_service_get_by_compute_host(self, context, host):
- """Simulate that given host is a valid host."""
- return {
- 'host_name': host,
- 'service': 'compute',
- 'zone': 'nova'
- }
-
- def fake_rebuild_instance(self, ctxt, instance, new_pass,
- injected_files, image_ref, orig_image_ref,
- orig_sys_metadata, bdms, recreate=False,
- on_shared_storage=False, host=None,
- preserve_ephemeral=False, kwargs=None):
- return {
- 'adminPass': new_pass
- }
-
- self.stubs.Set(service_group_api.API, 'service_is_up',
- fake_service_is_up)
- self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'rebuild_instance',
- fake_rebuild_instance)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'server-evacuate-req', req_subs)
- subs = self._get_regexes()
- self._verify_response('server-evacuate-resp', subs, response, 200)
-
-
-class EvacuateXmlTest(EvacuateJsonTest):
- ctype = 'xml'
-
-
-class EvacuateFindHostSampleJsonTest(ServersSampleBase):
- extends_name = ("nova.api.openstack.compute.contrib"
- ".evacuate.Evacuate")
-
- extension_name = ("nova.api.openstack.compute.contrib"
- ".extended_evacuate_find_host.Extended_evacuate_find_host")
-
- @mock.patch('nova.compute.manager.ComputeManager._check_instance_exists')
- @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
- @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
- def test_server_evacuate(self, rebuild_mock, service_get_mock,
- check_instance_mock):
- self.uuid = self._post_server()
-
- req_subs = {
- "adminPass": "MySecretPass",
- "onSharedStorage": 'False'
- }
-
- check_instance_mock.return_value = False
-
- def fake_service_get_by_compute_host(self, context, host):
- return {
- 'host_name': host,
- 'service': 'compute',
- 'zone': 'nova'
- }
- service_get_mock.side_effect = fake_service_get_by_compute_host
- with mock.patch.object(service_group_api.API, 'service_is_up',
- return_value=False):
- response = self._do_post('servers/%s/action' % self.uuid,
- 'server-evacuate-find-host-req', req_subs)
- subs = self._get_regexes()
- self._verify_response('server-evacuate-find-host-resp', subs,
- response, 200)
- rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
- orig_image_ref=mock.ANY, image_ref=mock.ANY,
- injected_files=mock.ANY, new_pass="MySecretPass",
- orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
- on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None)
-
-
-class EvacuateFindHostSampleXmlTests(EvacuateFindHostSampleJsonTest):
- ctype = "xml"
-
-
-class FloatingIpDNSJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
- "Floating_ip_dns")
-
- domain = 'domain1.example.org'
- name = 'instance1'
- scope = 'public'
- project = 'project1'
- dns_type = 'A'
- ip = '192.168.1.1'
-
- def _create_or_update(self):
- subs = {'domain': self.domain,
- 'project': self.project,
- 'scope': self.scope}
- response = self._do_put('os-floating-ip-dns/%s' % self.domain,
- 'floating-ip-dns-create-or-update-req', subs)
- self._verify_response('floating-ip-dns-create-or-update-resp', subs,
- response, 200)
-
- def _create_or_update_entry(self):
- subs = {'ip': self.ip, 'dns_type': self.dns_type}
- response = self._do_put('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name),
- 'floating-ip-dns-create-or-update-entry-req',
- subs)
- subs.update({'name': self.name, 'domain': self.domain})
- self._verify_response('floating-ip-dns-create-or-update-entry-resp',
- subs, response, 200)
-
- def test_floating_ip_dns_list(self):
- self._create_or_update()
- response = self._do_get('os-floating-ip-dns')
- subs = {'domain': self.domain,
- 'project': self.project,
- 'scope': self.scope}
- self._verify_response('floating-ip-dns-list-resp', subs,
- response, 200)
-
- def test_floating_ip_dns_create_or_update(self):
- self._create_or_update()
-
- def test_floating_ip_dns_delete(self):
- self._create_or_update()
- response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
- self.assertEqual(response.status_code, 202)
-
- def test_floating_ip_dns_create_or_update_entry(self):
- self._create_or_update_entry()
-
- def test_floating_ip_dns_entry_get(self):
- self._create_or_update_entry()
- response = self._do_get('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name))
- subs = {'domain': self.domain,
- 'ip': self.ip,
- 'name': self.name}
- self._verify_response('floating-ip-dns-entry-get-resp', subs,
- response, 200)
-
- def test_floating_ip_dns_entry_delete(self):
- self._create_or_update_entry()
- response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name))
- self.assertEqual(response.status_code, 202)
-
- def test_floating_ip_dns_entry_list(self):
- self._create_or_update_entry()
- response = self._do_get('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.ip))
- subs = {'domain': self.domain,
- 'ip': self.ip,
- 'name': self.name}
- self._verify_response('floating-ip-dns-entry-list-resp', subs,
- response, 200)
-
-
-class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
- ctype = 'xml'
-
-
-class InstanceActionsSampleJsonTest(ApiSampleTestBaseV2):
- extension_name = ('nova.api.openstack.compute.contrib.instance_actions.'
- 'Instance_actions')
-
- def setUp(self):
- super(InstanceActionsSampleJsonTest, self).setUp()
- self.actions = fake_server_actions.FAKE_ACTIONS
- self.events = fake_server_actions.FAKE_EVENTS
- self.instance = test_utils.get_test_instance()
-
- def fake_server_action_get_by_request_id(context, uuid, request_id):
- return copy.deepcopy(self.actions[uuid][request_id])
-
- def fake_server_actions_get(context, uuid):
- return [copy.deepcopy(value) for value in
- self.actions[uuid].itervalues()]
-
- def fake_server_action_events_get(context, action_id):
- return copy.deepcopy(self.events[action_id])
-
- def fake_instance_get_by_uuid(context, instance_id):
- return self.instance
-
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=True):
- return {'uuid': instance_uuid}
-
- self.stubs.Set(db, 'action_get_by_request_id',
- fake_server_action_get_by_request_id)
- self.stubs.Set(db, 'actions_get', fake_server_actions_get)
- self.stubs.Set(db, 'action_events_get',
- fake_server_action_events_get)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- self.stubs.Set(compute_api.API, 'get', fake_get)
-
- def test_instance_action_get(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_action = self.actions[fake_uuid][fake_request_id]
-
- response = self._do_get('servers/%s/os-instance-actions/%s' %
- (fake_uuid, fake_request_id))
- subs = self._get_regexes()
- subs['action'] = '(reboot)|(resize)'
- subs['instance_uuid'] = fake_uuid
- subs['integer_id'] = '[0-9]+'
- subs['request_id'] = fake_action['request_id']
- subs['start_time'] = fake_action['start_time']
- subs['result'] = '(Success)|(Error)'
- subs['event'] = '(schedule)|(compute_create)'
- self._verify_response('instance-action-get-resp', subs, response, 200)
-
- def test_instance_actions_list(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
- subs = self._get_regexes()
- subs['action'] = '(reboot)|(resize)'
- subs['integer_id'] = '[0-9]+'
- subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
- '-[0-9a-f]{4}-[0-9a-f]{12}')
- self._verify_response('instance-actions-list-resp', subs,
- response, 200)
-
-
-class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
- ctype = 'xml'
-
-
-class ImageSizeSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".image_size.Image_size")
-
- def test_show(self):
- # Get api sample of one single image details request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_detail(self):
- # Get api sample of all images details request.
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('images-details-get-resp', subs, response, 200)
-
-
-class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
- ctype = 'xml'
-
-
-class ConfigDriveSampleJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.config_drive."
- "Config_drive")
-
- def setUp(self):
- super(ConfigDriveSampleJsonTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- fake.stub_out_image_service(self.stubs)
-
- def test_config_drive_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- # config drive can be a string for True or empty value for False
- subs['cdrive'] = '.*'
- self._verify_response('server-config-drive-get-resp', subs,
- response, 200)
-
- def test_config_drive_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- # config drive can be a string for True or empty value for False
- subs['cdrive'] = '.*'
- self._verify_response('servers-config-drive-details-resp',
- subs, response, 200)
-
-
-class ConfigDriveSampleXmlTest(ConfigDriveSampleJsonTest):
- ctype = 'xml'
-
-
-class FlavorAccessSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.flavor_access."
- "Flavor_access")
-
- def _get_flags(self):
- f = super(FlavorAccessSampleJsonTests, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # FlavorAccess extension also needs Flavormanage to be loaded.
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
- return f
-
- def _add_tenant(self):
- subs = {
- 'tenant_id': 'fake_tenant',
- 'flavor_id': 10
- }
- response = self._do_post('flavors/10/action',
- 'flavor-access-add-tenant-req',
- subs)
- self._verify_response('flavor-access-add-tenant-resp',
- subs, response, 200)
-
- def _create_flavor(self):
- subs = {
- 'flavor_id': 10,
- 'flavor_name': 'test_flavor'
- }
- response = self._do_post("flavors",
- "flavor-access-create-req",
- subs)
- subs.update(self._get_regexes())
- self._verify_response("flavor-access-create-resp", subs, response, 200)
-
- def test_flavor_access_create(self):
- self._create_flavor()
-
- def test_flavor_access_detail(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-access-detail-resp', subs, response, 200)
-
- def test_flavor_access_list(self):
- self._create_flavor()
- self._add_tenant()
- flavor_id = 10
- response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'tenant_id': 'fake_tenant',
- }
- self._verify_response('flavor-access-list-resp', subs, response, 200)
-
- def test_flavor_access_show(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id
- }
- subs.update(self._get_regexes())
- self._verify_response('flavor-access-show-resp', subs, response, 200)
-
- def test_flavor_access_add_tenant(self):
- self._create_flavor()
- self._add_tenant()
-
- def test_flavor_access_remove_tenant(self):
- self._create_flavor()
- self._add_tenant()
- subs = {
- 'tenant_id': 'fake_tenant',
- }
- response = self._do_post('flavors/10/action',
- "flavor-access-remove-tenant-req",
- subs)
- exp_subs = {
- "tenant_id": self.api.project_id,
- "flavor_id": "10"
- }
- self._verify_response('flavor-access-remove-tenant-resp',
- exp_subs, response, 200)
-
-
-class FlavorAccessSampleXmlTests(FlavorAccessSampleJsonTests):
- ctype = 'xml'
-
-
-@mock.patch.object(service_group_api.API, "service_is_up", lambda _: True)
-class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
- "Hypervisors")
-
- def test_hypervisors_list(self):
- response = self._do_get('os-hypervisors')
- self._verify_response('hypervisors-list-resp', {}, response, 200)
-
- def test_hypervisors_search(self):
- response = self._do_get('os-hypervisors/fake/search')
- self._verify_response('hypervisors-search-resp', {}, response, 200)
-
- def test_hypervisors_servers(self):
- response = self._do_get('os-hypervisors/fake/servers')
- self._verify_response('hypervisors-servers-resp', {}, response, 200)
-
- def test_hypervisors_show(self):
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/%s' % hypervisor_id)
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-show-resp', subs, response, 200)
-
- def test_hypervisors_statistics(self):
- response = self._do_get('os-hypervisors/statistics')
- self._verify_response('hypervisors-statistics-resp', {}, response, 200)
-
- def test_hypervisors_uptime(self):
- def fake_get_host_uptime(self, context, hyp):
- return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
- " 0.20, 0.12, 0.14")
-
- self.stubs.Set(compute_api.HostAPI,
- 'get_host_uptime', fake_get_host_uptime)
- hypervisor_id = 1
- response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
- subs = {
- 'hypervisor_id': hypervisor_id,
- }
- self._verify_response('hypervisors-uptime-resp', subs, response, 200)
-
-
-class HypervisorsSampleXmlTests(HypervisorsSampleJsonTests):
- ctype = "xml"
-
-
-class ExtendedHypervisorsJsonTest(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib."
- "hypervisors.Hypervisors")
- extension_name = ("nova.api.openstack.compute.contrib."
- "extended_hypervisors.Extended_hypervisors")
-
- def test_hypervisors_show_with_ip(self):
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/%s' % hypervisor_id)
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-show-with-ip-resp',
- subs, response, 200)
-
-
-class ExtendedHypervisorsXmlTest(ExtendedHypervisorsJsonTest):
- ctype = "xml"
-
-
-class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
- extends_name = ("nova.api.openstack.compute.contrib."
- "hypervisors.Hypervisors")
- extension_name = ("nova.api.openstack.compute.contrib."
- "hypervisor_status.Hypervisor_status")
-
- def test_hypervisors_show_with_status(self):
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/%s' % hypervisor_id)
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-show-with-status-resp',
- subs, response, 200)
-
-
-class HypervisorStatusXmlTest(HypervisorStatusJsonTest):
- ctype = 'xml'
-
-
-@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
-class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
- "Hypervisors")
-
- def setUp(self):
- self.flags(enable=True, cell_type='api', group='cells')
- super(HypervisorsCellsSampleJsonTests, self).setUp()
-
- def test_hypervisor_uptime(self, mocks):
- fake_hypervisor = {'service': {'host': 'fake-mini',
- 'disabled': False,
- 'disabled_reason': None},
- 'id': 1, 'hypervisor_hostname': 'fake-mini'}
-
- def fake_get_host_uptime(self, context, hyp):
- return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
- " 0.20, 0.12, 0.14")
-
- def fake_compute_node_get(self, context, hyp):
- return fake_hypervisor
-
- self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
- fake_compute_node_get)
-
- self.stubs.Set(cells_api.HostAPI,
- 'get_host_uptime', fake_get_host_uptime)
- hypervisor_id = fake_hypervisor['id']
- response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
- subs = {'hypervisor_id': hypervisor_id}
- self._verify_response('hypervisors-uptime-resp', subs, response, 200)
-
-
-class HypervisorsCellsSampleXmlTests(HypervisorsCellsSampleJsonTests):
- ctype = "xml"
-
-
-class AttachInterfacesSampleJsonTest(ServersSampleBase):
- extension_name = ('nova.api.openstack.compute.contrib.attach_interfaces.'
- 'Attach_interfaces')
-
- def setUp(self):
- super(AttachInterfacesSampleJsonTest, self).setUp()
-
- def fake_list_ports(self, *args, **kwargs):
- uuid = kwargs.get('device_id', None)
- if not uuid:
- raise exception.InstanceNotFound(instance_id=None)
- port_data = {
- "id": "ce531f90-199f-48c0-816c-13e38010b442",
- "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "fa:16:3e:4c:2c:30",
- "fixed_ips": [
- {
- "ip_address": "192.168.1.3",
- "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
- }
- ],
- "device_id": uuid,
- }
- ports = {'ports': [port_data]}
- return ports
-
- def fake_show_port(self, context, port_id=None):
- if not port_id:
- raise exception.PortNotFound(port_id=None)
- port_data = {
- "id": port_id,
- "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "fa:16:3e:4c:2c:30",
- "fixed_ips": [
- {
- "ip_address": "192.168.1.3",
- "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
- }
- ],
- "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
- }
- port = {'port': port_data}
- return port
-
- def fake_attach_interface(self, context, instance,
- network_id, port_id,
- requested_ip='192.168.1.3'):
- if not network_id:
- network_id = "fake_net_uuid"
- if not port_id:
- port_id = "fake_port_uuid"
- vif = fake_network_cache_model.new_vif()
- vif['id'] = port_id
- vif['network']['id'] = network_id
- vif['network']['subnets'][0]['ips'][0] = requested_ip
- return vif
-
- def fake_detach_interface(self, context, instance, port_id):
- pass
-
- self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
- self.stubs.Set(network_api.API, 'show_port', fake_show_port)
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['subnet_id'] = vanilla_regexes['uuid']
- subs['net_id'] = vanilla_regexes['uuid']
- subs['port_id'] = vanilla_regexes['uuid']
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- subs['ip_address'] = vanilla_regexes['ip']
- return subs
-
- def test_list_interfaces(self):
- instance_uuid = self._post_server()
- response = self._do_get('servers/%s/os-interface' % instance_uuid)
- subs = {
- 'ip_address': '192.168.1.3',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
- 'port_state': 'ACTIVE'
- }
- self._verify_response('attach-interfaces-list-resp', subs,
- response, 200)
-
- def _stub_show_for_instance(self, instance_uuid, port_id):
- show_port = network_api.API().show_port(None, port_id)
- show_port['port']['device_id'] = instance_uuid
- self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
-
- def test_show_interfaces(self):
- instance_uuid = self._post_server()
- port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
- self._stub_show_for_instance(instance_uuid, port_id)
- response = self._do_get('servers/%s/os-interface/%s' %
- (instance_uuid, port_id))
- subs = {
- 'ip_address': '192.168.1.3',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': port_id,
- 'port_state': 'ACTIVE'
- }
- self._verify_response('attach-interfaces-show-resp', subs,
- response, 200)
-
- def test_create_interfaces(self, instance_uuid=None):
- if instance_uuid is None:
- instance_uuid = self._post_server()
- subs = {
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'ip_address': '192.168.1.3',
- 'port_state': 'ACTIVE',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- }
- self._stub_show_for_instance(instance_uuid, subs['port_id'])
- response = self._do_post('servers/%s/os-interface' % instance_uuid,
- 'attach-interfaces-create-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('attach-interfaces-create-resp', subs,
- response, 200)
-
- def test_delete_interfaces(self):
- instance_uuid = self._post_server()
- port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
- response = self._do_delete('servers/%s/os-interface/%s' %
- (instance_uuid, port_id))
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class AttachInterfacesSampleXmlTest(AttachInterfacesSampleJsonTest):
- ctype = 'xml'
-
-
-class SnapshotsSampleJsonTests(ApiSampleTestBaseV2):
- extension_name = "nova.api.openstack.compute.contrib.volumes.Volumes"
-
- create_subs = {
- 'snapshot_name': 'snap-001',
- 'description': 'Daily backup',
- 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
- }
-
- def setUp(self):
- super(SnapshotsSampleJsonTests, self).setUp()
- self.stubs.Set(cinder.API, "get_all_snapshots",
- fakes.stub_snapshot_get_all)
- self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
-
- def _create_snapshot(self):
- self.stubs.Set(cinder.API, "create_snapshot",
- fakes.stub_snapshot_create)
-
- response = self._do_post("os-snapshots",
- "snapshot-create-req",
- self.create_subs)
- return response
-
- def test_snapshots_create(self):
- response = self._create_snapshot()
- self.create_subs.update(self._get_regexes())
- self._verify_response("snapshot-create-resp",
- self.create_subs, response, 200)
-
- def test_snapshots_delete(self):
- self.stubs.Set(cinder.API, "delete_snapshot",
- fakes.stub_snapshot_delete)
- self._create_snapshot()
- response = self._do_delete('os-snapshots/100')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_snapshots_detail(self):
- response = self._do_get('os-snapshots/detail')
- subs = self._get_regexes()
- self._verify_response('snapshots-detail-resp', subs, response, 200)
-
- def test_snapshots_list(self):
- response = self._do_get('os-snapshots')
- subs = self._get_regexes()
- self._verify_response('snapshots-list-resp', subs, response, 200)
-
- def test_snapshots_show(self):
- response = self._do_get('os-snapshots/100')
- subs = {
- 'snapshot_name': 'Default name',
- 'description': 'Default description'
- }
- subs.update(self._get_regexes())
- self._verify_response('snapshots-show-resp', subs, response, 200)
-
-
-class SnapshotsSampleXmlTests(SnapshotsSampleJsonTests):
- ctype = "xml"
-
-
-class AssistedVolumeSnapshotsJsonTest(ApiSampleTestBaseV2):
- """Assisted volume snapshots."""
- extension_name = ("nova.api.openstack.compute.contrib."
- "assisted_volume_snapshots.Assisted_volume_snapshots")
-
- def _create_assisted_snapshot(self, subs):
- self.stubs.Set(compute_api.API, 'volume_snapshot_create',
- fakes.stub_compute_volume_snapshot_create)
-
- response = self._do_post("os-assisted-volume-snapshots",
- "snapshot-create-assisted-req",
- subs)
- return response
-
- def test_snapshots_create_assisted(self):
- subs = {
- 'snapshot_name': 'snap-001',
- 'description': 'Daily backup',
- 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c',
- 'snapshot_id': '421752a6-acf6-4b2d-bc7a-119f9148cd8c',
- 'type': 'qcow2',
- 'new_file': 'new_file_name'
- }
- subs.update(self._get_regexes())
- response = self._create_assisted_snapshot(subs)
- self._verify_response("snapshot-create-assisted-resp",
- subs, response, 200)
-
- def test_snapshots_delete_assisted(self):
- self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
- fakes.stub_compute_volume_snapshot_delete)
- snapshot_id = '100'
- response = self._do_delete(
- 'os-assisted-volume-snapshots/%s?delete_info='
- '{"volume_id":"521752a6-acf6-4b2d-bc7a-119f9148cd8c"}'
- % snapshot_id)
- self.assertEqual(response.status_code, 204)
- self.assertEqual(response.content, '')
-
-
-class AssistedVolumeSnapshotsXmlTest(AssistedVolumeSnapshotsJsonTest):
- ctype = "xml"
-
-
-class VolumeAttachmentsSampleBase(ServersSampleBase):
- def _stub_db_bdms_get_all_by_instance(self, server_id):
-
- def fake_bdms_get_all_by_instance(context, instance_uuid,
- use_slave=False):
- bdms = [
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
- 'instance_uuid': server_id, 'source_type': 'volume',
- 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
- 'instance_uuid': server_id, 'source_type': 'volume',
- 'destination_type': 'volume', 'device_name': '/dev/sdc'})
- ]
- return bdms
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
-
- def _stub_compute_api_get(self):
-
- def fake_compute_api_get(self, context, instance_id,
- want_objects=False, expected_attrs=None):
- if want_objects:
- return fake_instance.fake_instance_obj(
- context, **{'uuid': instance_id})
- else:
- return {'uuid': instance_id}
-
- self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
-
-
-class VolumeAttachmentsSampleJsonTest(VolumeAttachmentsSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
-
- def test_attach_volume_to_server(self):
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
- self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
- device_name = '/dev/vdd'
- bdm = objects.BlockDeviceMapping()
- bdm['device_name'] = device_name
- self.stubs.Set(compute_manager.ComputeManager,
- "reserve_block_device_name",
- lambda *a, **k: bdm)
- self.stubs.Set(compute_manager.ComputeManager,
- 'attach_volume',
- lambda *a, **k: None)
- self.stubs.Set(objects.BlockDeviceMapping, 'get_by_volume_id',
- classmethod(lambda *a, **k: None))
-
- volume = fakes.stub_volume_get(None, context.get_admin_context(),
- 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
- subs = {
- 'volume_id': volume['id'],
- 'device': device_name
- }
- server_id = self._post_server()
- response = self._do_post('servers/%s/os-volume_attachments'
- % server_id,
- 'attach-volume-to-server-req', subs)
-
- subs.update(self._get_regexes())
- self._verify_response('attach-volume-to-server-resp', subs,
- response, 200)
-
- def test_list_volume_attachments(self):
- server_id = self._post_server()
-
- self._stub_db_bdms_get_all_by_instance(server_id)
-
- response = self._do_get('servers/%s/os-volume_attachments'
- % server_id)
- subs = self._get_regexes()
- self._verify_response('list-volume-attachments-resp', subs,
- response, 200)
-
- def test_volume_attachment_detail(self):
- server_id = self._post_server()
- attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
- self._stub_db_bdms_get_all_by_instance(server_id)
- self._stub_compute_api_get()
- response = self._do_get('servers/%s/os-volume_attachments/%s'
- % (server_id, attach_id))
- subs = self._get_regexes()
- self._verify_response('volume-attachment-detail-resp', subs,
- response, 200)
-
- def test_volume_attachment_delete(self):
- server_id = self._post_server()
- attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
- self._stub_db_bdms_get_all_by_instance(server_id)
- self._stub_compute_api_get()
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
- response = self._do_delete('servers/%s/os-volume_attachments/%s'
- % (server_id, attach_id))
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class VolumeAttachmentsSampleXmlTest(VolumeAttachmentsSampleJsonTest):
- ctype = 'xml'
-
-
-class VolumeAttachUpdateSampleJsonTest(VolumeAttachmentsSampleBase):
- extends_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
- extension_name = ("nova.api.openstack.compute.contrib."
- "volume_attachment_update.Volume_attachment_update")
-
- def test_volume_attachment_update(self):
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- subs = {
- 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f805',
- 'device': '/dev/sdd'
- }
- server_id = self._post_server()
- attach_id = 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
- self._stub_db_bdms_get_all_by_instance(server_id)
- self._stub_compute_api_get()
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(compute_api.API, 'swap_volume', lambda *a, **k: None)
- response = self._do_put('servers/%s/os-volume_attachments/%s'
- % (server_id, attach_id),
- 'update-volume-req',
- subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class VolumeAttachUpdateSampleXmlTest(VolumeAttachUpdateSampleJsonTest):
- ctype = 'xml'
-
-
-class VolumesSampleJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
-
- def _get_volume_id(self):
- return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
-
- def _stub_volume(self, id, displayname="Volume Name",
- displaydesc="Volume Description", size=100):
- volume = {
- 'id': id,
- 'size': size,
- 'availability_zone': 'zone1:host1',
- 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
- 'mountpoint': '/',
- 'status': 'in-use',
- 'attach_status': 'attached',
- 'name': 'vol name',
- 'display_name': displayname,
- 'display_description': displaydesc,
- 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
- 'snapshot_id': None,
- 'volume_type_id': 'fakevoltype',
- 'volume_metadata': [],
- 'volume_type': {'name': 'Backup'}
- }
- return volume
-
- def _stub_volume_get(self, context, volume_id):
- return self._stub_volume(volume_id)
-
- def _stub_volume_delete(self, context, *args, **param):
- pass
-
- def _stub_volume_get_all(self, context, search_opts=None):
- id = self._get_volume_id()
- return [self._stub_volume(id)]
-
- def _stub_volume_create(self, context, size, name, description, snapshot,
- **param):
- id = self._get_volume_id()
- return self._stub_volume(id)
-
- def setUp(self):
- super(VolumesSampleJsonTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
- self.stubs.Set(cinder.API, "get", self._stub_volume_get)
- self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
-
- def _post_volume(self):
- subs_req = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
-
- self.stubs.Set(cinder.API, "create", self._stub_volume_create)
- response = self._do_post('os-volumes', 'os-volumes-post-req',
- subs_req)
- subs = self._get_regexes()
- subs.update(subs_req)
- self._verify_response('os-volumes-post-resp', subs, response, 200)
-
- def test_volumes_show(self):
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- vol_id = self._get_volume_id()
- response = self._do_get('os-volumes/%s' % vol_id)
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-get-resp', subs, response, 200)
-
- def test_volumes_index(self):
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- response = self._do_get('os-volumes')
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-index-resp', subs, response, 200)
-
- def test_volumes_detail(self):
- # For now, index and detail are the same.
- # See the volumes api
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- response = self._do_get('os-volumes/detail')
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-detail-resp', subs, response, 200)
-
- def test_volumes_create(self):
- self._post_volume()
-
- def test_volumes_delete(self):
- self._post_volume()
- vol_id = self._get_volume_id()
- response = self._do_delete('os-volumes/%s' % vol_id)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
-
-class VolumesSampleXmlTest(VolumesSampleJsonTest):
- ctype = 'xml'
-
-
-class MigrationsSamplesJsonTest(ApiSampleTestBaseV2):
- extension_name = ("nova.api.openstack.compute.contrib.migrations."
- "Migrations")
-
- def _stub_migrations(self, context, filters):
- fake_migrations = [
- {
- 'id': 1234,
- 'source_node': 'node1',
- 'dest_node': 'node2',
- 'source_compute': 'compute1',
- 'dest_compute': 'compute2',
- 'dest_host': '1.2.3.4',
- 'status': 'Done',
- 'instance_uuid': 'instance_id_123',
- 'old_instance_type_id': 1,
- 'new_instance_type_id': 2,
- 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'deleted_at': None,
- 'deleted': False
- },
- {
- 'id': 5678,
- 'source_node': 'node10',
- 'dest_node': 'node20',
- 'source_compute': 'compute10',
- 'dest_compute': 'compute20',
- 'dest_host': '5.6.7.8',
- 'status': 'Done',
- 'instance_uuid': 'instance_id_456',
- 'old_instance_type_id': 5,
- 'new_instance_type_id': 6,
- 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
- 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
- 'deleted_at': None,
- 'deleted': False
- }
- ]
- return fake_migrations
-
- def setUp(self):
- super(MigrationsSamplesJsonTest, self).setUp()
- self.stubs.Set(compute_api.API, 'get_migrations',
- self._stub_migrations)
-
- def test_get_migrations(self):
- response = self._do_get('os-migrations')
- subs = self._get_regexes()
-
- self.assertEqual(response.status_code, 200)
- self._verify_response('migrations-get', subs, response, 200)
-
-
-class MigrationsSamplesXmlTest(MigrationsSamplesJsonTest):
- ctype = 'xml'
-
-
-class PreserveEphemeralOnRebuildJsonTest(ServersSampleBase):
- extension_name = ('nova.api.openstack.compute.contrib.'
- 'preserve_ephemeral_rebuild.'
- 'Preserve_ephemeral_rebuild')
-
- def _test_server_action(self, uuid, action,
- subs=None, resp_tpl=None, code=202):
- subs = subs or {}
- subs.update({'action': action})
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-%s' % action.lower(),
- subs)
- if resp_tpl:
- subs.update(self._get_regexes())
- self._verify_response(resp_tpl, subs, response, code)
- else:
- self.assertEqual(response.status_code, code)
- self.assertEqual(response.content, "")
-
- def test_rebuild_server_preserve_ephemeral_false(self):
- uuid = self._post_server()
- image = self.api.get_images()[0]['id']
- subs = {'host': self._get_host(),
- 'uuid': image,
- 'name': 'foobar',
- 'pass': 'seekr3t',
- 'ip': '1.2.3.4',
- 'ip6': 'fe80::100',
- 'hostid': '[a-f0-9]+',
- 'preserve_ephemeral': 'false'}
- self._test_server_action(uuid, 'rebuild', subs,
- 'server-action-rebuild-resp')
-
- def test_rebuild_server_preserve_ephemeral_true(self):
- image = self.api.get_images()[0]['id']
- subs = {'host': self._get_host(),
- 'uuid': image,
- 'name': 'new-server-test',
- 'pass': 'seekr3t',
- 'ip': '1.2.3.4',
- 'ip6': 'fe80::100',
- 'hostid': '[a-f0-9]+',
- 'preserve_ephemeral': 'true'}
-
- def fake_rebuild(self_, context, instance, image_href, admin_password,
- **kwargs):
- self.assertTrue(kwargs['preserve_ephemeral'])
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- instance_uuid = self._post_server()
- response = self._do_post('servers/%s/action' % instance_uuid,
- 'server-action-rebuild', subs)
- self.assertEqual(response.status_code, 202)
-
-
-class PreserveEphemeralOnRebuildXmlTest(PreserveEphemeralOnRebuildJsonTest):
- ctype = 'xml'
-
-
-class ServerExternalEventsJsonTest(ServersSampleBase):
- extension_name = ('nova.api.openstack.compute.contrib.'
- 'server_external_events.Server_external_events')
-
- def test_create_event(self):
- instance_uuid = self._post_server()
- subs = {
- 'uuid': instance_uuid,
- 'name': 'network-changed',
- 'status': 'completed',
- 'tag': 'foo',
- }
- response = self._do_post('os-server-external-events',
- 'event-create-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('event-create-resp', subs, response, 200)
-
-
-class ServerExternalEventsXmlTest(ServerExternalEventsJsonTest):
- ctype = 'xml'
-
-
-class ServerGroupsSampleJsonTest(ServersSampleBase):
- extension_name = ("nova.api.openstack.compute.contrib"
- ".server_groups.Server_groups")
-
- def _get_create_subs(self):
- return {'name': 'test'}
-
- def _post_server_group(self):
- """Verify the response status code and returns the UUID of the
- newly created server group.
- """
- subs = self._get_create_subs()
- response = self._do_post('os-server-groups',
- 'server-groups-post-req', subs)
- subs = self._get_regexes()
- subs['name'] = 'test'
- return self._verify_response('server-groups-post-resp',
- subs, response, 200)
-
- def _create_server_group(self):
- subs = self._get_create_subs()
- return self._do_post('os-server-groups',
- 'server-groups-post-req', subs)
-
- def test_server_groups_post(self):
- return self._post_server_group()
-
- def test_server_groups_list(self):
- subs = self._get_create_subs()
- uuid = self._post_server_group()
- response = self._do_get('os-server-groups')
- subs.update(self._get_regexes())
- subs['id'] = uuid
- self._verify_response('server-groups-list-resp',
- subs, response, 200)
-
- def test_server_groups_get(self):
- # Get api sample of server groups get request.
- subs = {'name': 'test'}
- uuid = self._post_server_group()
- subs['id'] = uuid
- response = self._do_get('os-server-groups/%s' % uuid)
-
- self._verify_response('server-groups-get-resp', subs, response, 200)
-
- def test_server_groups_delete(self):
- uuid = self._post_server_group()
- response = self._do_delete('os-server-groups/%s' % uuid)
- self.assertEqual(response.status_code, 204)
-
-
-class ServerGroupsSampleXmlTest(ServerGroupsSampleJsonTest):
- ctype = 'xml'
-
-
-class ServerGroupQuotas_LimitsSampleJsonTest(LimitsSampleJsonTest):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
-
-
-class ServerGroupQuotas_LimitsSampleXmlTest(LimitsSampleXmlTest):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
-
-
-class ServerGroupQuotas_UsedLimitsSamplesJsonTest(UsedLimitsSamplesJsonTest):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = ("nova.api.openstack.compute.contrib.used_limits."
- "Used_limits")
-
-
-class ServerGroupQuotas_UsedLimitsSamplesXmlTest(UsedLimitsSamplesXmlTest):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = ("nova.api.openstack.compute.contrib.used_limits."
- "Used_limits")
-
-
-class ServerGroupQuotas_QuotasSampleJsonTests(QuotasSampleJsonTests):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
-
-
-class ServerGroupQuotas_QuotasSampleXmlTests(QuotasSampleXmlTests):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
-
-
-class ServerGroupQuotasQuota_ClassesSampleJsonTests(
- QuotaClassesSampleJsonTests):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
- "Quota_classes")
-
-
-class ServerGroupQuotas_QuotaClassesSampleXmlTests(
- QuotaClassesSampleXmlTests):
- extension_name = ("nova.api.openstack.compute.contrib."
- "server_group_quotas.Server_group_quotas")
- extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
- "Quota_classes")
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
deleted file mode 100644
index 5f3420f37a..0000000000
--- a/nova/tests/integrated/test_extensions.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-# Import extensions to pull in osapi_compute_extension CONF option used below.
-from nova.openstack.common import log as logging
-from nova.tests.integrated import integrated_helpers
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-class ExtensionsTest(integrated_helpers._IntegratedTestBase):
- _api_version = 'v2'
-
- def _get_flags(self):
- f = super(ExtensionsTest, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- f['osapi_compute_extension'].append(
- 'nova.tests.api.openstack.compute.extensions.'
- 'foxinsocks.Foxinsocks')
- return f
-
- def test_get_foxnsocks(self):
- # Simple check that fox-n-socks works.
- response = self.api.api_request('/foxnsocks')
- foxnsocks = response.content
- LOG.debug("foxnsocks: %s" % foxnsocks)
- self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
deleted file mode 100644
index 4fd22ea060..0000000000
--- a/nova/tests/integrated/test_login.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.openstack.common import log as logging
-from nova.tests.integrated.api import client
-from nova.tests.integrated import integrated_helpers
-
-
-LOG = logging.getLogger(__name__)
-
-
-class LoginTest(integrated_helpers._IntegratedTestBase):
- _api_version = 'v2'
-
- def test_login(self):
- # Simple check - we list flavors - so we know we're logged in.
- flavors = self.api.get_flavors()
- for flavor in flavors:
- LOG.debug("flavor: %s", flavor)
-
-
-class LoginTestV3(client.TestOpenStackClientV3Mixin, LoginTest):
- _api_version = 'v3'
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
deleted file mode 100644
index c821cd4611..0000000000
--- a/nova/tests/integrated/test_servers.py
+++ /dev/null
@@ -1,522 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import time
-import zlib
-
-from oslo.utils import timeutils
-
-from nova import context
-from nova import exception
-from nova.openstack.common import log as logging
-from nova.tests import fake_network
-from nova.tests.integrated.api import client
-from nova.tests.integrated import integrated_helpers
-import nova.virt.fake
-
-
-LOG = logging.getLogger(__name__)
-
-
-class ServersTest(integrated_helpers._IntegratedTestBase):
- _api_version = 'v2'
- _force_delete_parameter = 'forceDelete'
- _image_ref_parameter = 'imageRef'
- _flavor_ref_parameter = 'flavorRef'
- _access_ipv4_parameter = 'accessIPv4'
- _access_ipv6_parameter = 'accessIPv6'
- _return_resv_id_parameter = 'return_reservation_id'
- _min_count_parameter = 'min_count'
-
- def setUp(self):
- super(ServersTest, self).setUp()
- self.conductor = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
-
- def _wait_for_state_change(self, server, from_status):
- for i in xrange(0, 50):
- server = self.api.get_server(server['id'])
- if server['status'] != from_status:
- break
- time.sleep(.1)
-
- return server
-
- def _restart_compute_service(self, *args, **kwargs):
- """restart compute service. NOTE: fake driver forgets all instances."""
- self.compute.kill()
- self.compute = self.start_service('compute', *args, **kwargs)
-
- def test_get_servers(self):
- # Simple check that listing servers works.
- servers = self.api.get_servers()
- for server in servers:
- LOG.debug("server: %s" % server)
-
- def test_create_server_with_error(self):
- # Create a server which will enter error state.
- fake_network.set_stub_network_methods(self.stubs)
-
- def throw_error(*args, **kwargs):
- raise exception.BuildAbortException(reason='',
- instance_uuid='fake')
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
-
- server = self._build_minimal_create_server_request()
- created_server = self.api.post_server({"server": server})
- created_server_id = created_server['id']
-
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
-
- found_server = self._wait_for_state_change(found_server, 'BUILD')
-
- self.assertEqual('ERROR', found_server['status'])
- self._delete_server(created_server_id)
-
- def test_create_and_delete_server(self):
- # Creates and deletes a server.
- fake_network.set_stub_network_methods(self.stubs)
-
- # Create server
- # Build the server data gradually, checking errors along the way
- server = {}
- good_server = self._build_minimal_create_server_request()
-
- post = {'server': server}
-
- # Without an imageRef, this throws 500.
- # TODO(justinsb): Check whatever the spec says should be thrown here
- self.assertRaises(client.OpenStackApiException,
- self.api.post_server, post)
-
- # With an invalid imageRef, this throws 500.
- server[self._image_ref_parameter] = self.get_invalid_image()
- # TODO(justinsb): Check whatever the spec says should be thrown here
- self.assertRaises(client.OpenStackApiException,
- self.api.post_server, post)
-
- # Add a valid imageRef
- server[self._image_ref_parameter] = good_server.get(
- self._image_ref_parameter)
-
- # Without flavorRef, this throws 500
- # TODO(justinsb): Check whatever the spec says should be thrown here
- self.assertRaises(client.OpenStackApiException,
- self.api.post_server, post)
-
- server[self._flavor_ref_parameter] = good_server.get(
- self._flavor_ref_parameter)
-
- # Without a name, this throws 500
- # TODO(justinsb): Check whatever the spec says should be thrown here
- self.assertRaises(client.OpenStackApiException,
- self.api.post_server, post)
-
- # Set a valid server name
- server['name'] = good_server['name']
-
- created_server = self.api.post_server(post)
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # Check it's there
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
-
- # It should also be in the all-servers list
- servers = self.api.get_servers()
- server_ids = [s['id'] for s in servers]
- self.assertIn(created_server_id, server_ids)
-
- found_server = self._wait_for_state_change(found_server, 'BUILD')
- # It should be available...
- # TODO(justinsb): Mock doesn't yet do this...
- self.assertEqual('ACTIVE', found_server['status'])
- servers = self.api.get_servers(detail=True)
- for server in servers:
- self.assertIn("image", server)
- self.assertIn("flavor", server)
-
- self._delete_server(created_server_id)
-
- def _force_reclaim(self):
- # Make sure that compute manager thinks the instance is
- # old enough to be expired
- the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
- timeutils.set_time_override(override_time=the_past)
- ctxt = context.get_admin_context()
- self.compute._reclaim_queued_deletes(ctxt)
-
- def test_deferred_delete(self):
- # Creates, deletes and waits for server to be reclaimed.
- self.flags(reclaim_instance_interval=1)
- fake_network.set_stub_network_methods(self.stubs)
-
- # Create server
- server = self._build_minimal_create_server_request()
-
- created_server = self.api.post_server({'server': server})
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # Wait for it to finish being created
- found_server = self._wait_for_state_change(created_server, 'BUILD')
-
- # It should be available...
- self.assertEqual('ACTIVE', found_server['status'])
-
- # Cannot restore unless instance is deleted
- self.assertRaises(client.OpenStackApiException,
- self.api.post_server_action, created_server_id,
- {'restore': {}})
-
- # Delete the server
- self.api.delete_server(created_server_id)
-
- # Wait for queued deletion
- found_server = self._wait_for_state_change(found_server, 'ACTIVE')
- self.assertEqual('SOFT_DELETED', found_server['status'])
-
- self._force_reclaim()
-
- # Wait for real deletion
- self._wait_for_deletion(created_server_id)
-
- def test_deferred_delete_restore(self):
- # Creates, deletes and restores a server.
- self.flags(reclaim_instance_interval=3600)
- fake_network.set_stub_network_methods(self.stubs)
-
- # Create server
- server = self._build_minimal_create_server_request()
-
- created_server = self.api.post_server({'server': server})
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # Wait for it to finish being created
- found_server = self._wait_for_state_change(created_server, 'BUILD')
-
- # It should be available...
- self.assertEqual('ACTIVE', found_server['status'])
-
- # Delete the server
- self.api.delete_server(created_server_id)
-
- # Wait for queued deletion
- found_server = self._wait_for_state_change(found_server, 'ACTIVE')
- self.assertEqual('SOFT_DELETED', found_server['status'])
-
- # Restore server
- self.api.post_server_action(created_server_id, {'restore': {}})
-
- # Wait for server to become active again
- found_server = self._wait_for_state_change(found_server, 'DELETED')
- self.assertEqual('ACTIVE', found_server['status'])
-
- def test_deferred_delete_force(self):
- # Creates, deletes and force deletes a server.
- self.flags(reclaim_instance_interval=3600)
- fake_network.set_stub_network_methods(self.stubs)
-
- # Create server
- server = self._build_minimal_create_server_request()
-
- created_server = self.api.post_server({'server': server})
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # Wait for it to finish being created
- found_server = self._wait_for_state_change(created_server, 'BUILD')
-
- # It should be available...
- self.assertEqual('ACTIVE', found_server['status'])
-
- # Delete the server
- self.api.delete_server(created_server_id)
-
- # Wait for queued deletion
- found_server = self._wait_for_state_change(found_server, 'ACTIVE')
- self.assertEqual('SOFT_DELETED', found_server['status'])
-
- # Force delete server
- self.api.post_server_action(created_server_id,
- {self._force_delete_parameter: {}})
-
- # Wait for real deletion
- self._wait_for_deletion(created_server_id)
-
- def _wait_for_deletion(self, server_id):
- # Wait (briefly) for deletion
- for _retries in range(50):
- try:
- found_server = self.api.get_server(server_id)
- except client.OpenStackApiNotFoundException:
- found_server = None
- LOG.debug("Got 404, proceeding")
- break
-
- LOG.debug("Found_server=%s" % found_server)
-
- # TODO(justinsb): Mock doesn't yet do accurate state changes
- # if found_server['status'] != 'deleting':
- # break
- time.sleep(.1)
-
- # Should be gone
- self.assertFalse(found_server)
-
- def _delete_server(self, server_id):
- # Delete the server
- self.api.delete_server(server_id)
- self._wait_for_deletion(server_id)
-
- def test_create_server_with_metadata(self):
- # Creates a server with metadata.
- fake_network.set_stub_network_methods(self.stubs)
-
- # Build the server data gradually, checking errors along the way
- server = self._build_minimal_create_server_request()
-
- metadata = {}
- for i in range(30):
- metadata['key_%s' % i] = 'value_%s' % i
-
- server['metadata'] = metadata
-
- post = {'server': server}
- created_server = self.api.post_server(post)
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
- self.assertEqual(metadata, found_server.get('metadata'))
-
- # The server should also be in the all-servers details list
- servers = self.api.get_servers(detail=True)
- server_map = dict((server['id'], server) for server in servers)
- found_server = server_map.get(created_server_id)
- self.assertTrue(found_server)
- # Details do include metadata
- self.assertEqual(metadata, found_server.get('metadata'))
-
- # The server should also be in the all-servers summary list
- servers = self.api.get_servers(detail=False)
- server_map = dict((server['id'], server) for server in servers)
- found_server = server_map.get(created_server_id)
- self.assertTrue(found_server)
- # Summary should not include metadata
- self.assertFalse(found_server.get('metadata'))
-
- # Cleanup
- self._delete_server(created_server_id)
-
- def test_create_and_rebuild_server(self):
- # Rebuild a server with metadata.
- fake_network.set_stub_network_methods(self.stubs)
-
- # create a server with initially has no metadata
- server = self._build_minimal_create_server_request()
- server_post = {'server': server}
-
- metadata = {}
- for i in range(30):
- metadata['key_%s' % i] = 'value_%s' % i
-
- server_post['server']['metadata'] = metadata
-
- created_server = self.api.post_server(server_post)
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- created_server = self._wait_for_state_change(created_server, 'BUILD')
-
- # rebuild the server with metadata and other server attributes
- post = {}
- post['rebuild'] = {
- self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
- "name": "blah",
- self._access_ipv4_parameter: "172.19.0.2",
- self._access_ipv6_parameter: "fe80::2",
- "metadata": {'some': 'thing'},
- }
- post['rebuild'].update(self._get_access_ips_params())
-
- self.api.post_server_action(created_server_id, post)
- LOG.debug("rebuilt server: %s" % created_server)
- self.assertTrue(created_server['id'])
-
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
- self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
- self.assertEqual('blah', found_server.get('name'))
- self.assertEqual(post['rebuild'][self._image_ref_parameter],
- found_server.get('image')['id'])
- self._verify_access_ips(found_server)
-
- # rebuild the server with empty metadata and nothing else
- post = {}
- post['rebuild'] = {
- self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
- "metadata": {},
- }
-
- self.api.post_server_action(created_server_id, post)
- LOG.debug("rebuilt server: %s" % created_server)
- self.assertTrue(created_server['id'])
-
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
- self.assertEqual({}, found_server.get('metadata'))
- self.assertEqual('blah', found_server.get('name'))
- self.assertEqual(post['rebuild'][self._image_ref_parameter],
- found_server.get('image')['id'])
- self._verify_access_ips(found_server)
-
- # Cleanup
- self._delete_server(created_server_id)
-
- def _get_access_ips_params(self):
- return {self._access_ipv4_parameter: "172.19.0.2",
- self._access_ipv6_parameter: "fe80::2"}
-
- def _verify_access_ips(self, server):
- self.assertEqual('172.19.0.2',
- server[self._access_ipv4_parameter])
- self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
-
- def test_rename_server(self):
- # Test building and renaming a server.
- fake_network.set_stub_network_methods(self.stubs)
-
- # Create a server
- server = self._build_minimal_create_server_request()
- created_server = self.api.post_server({'server': server})
- LOG.debug("created_server: %s" % created_server)
- server_id = created_server['id']
- self.assertTrue(server_id)
-
- # Rename the server to 'new-name'
- self.api.put_server(server_id, {'server': {'name': 'new-name'}})
-
- # Check the name of the server
- created_server = self.api.get_server(server_id)
- self.assertEqual(created_server['name'], 'new-name')
-
- # Cleanup
- self._delete_server(server_id)
-
- def test_create_multiple_servers(self):
- # Creates multiple servers and checks for reservation_id.
-
- # Create 2 servers, setting 'return_reservation_id, which should
- # return a reservation_id
- server = self._build_minimal_create_server_request()
- server[self._min_count_parameter] = 2
- server[self._return_resv_id_parameter] = True
- post = {'server': server}
- response = self.api.post_server(post)
- self.assertIn('reservation_id', response)
- reservation_id = response['reservation_id']
- self.assertNotIn(reservation_id, ['', None])
-
- # Create 1 more server, which should not return a reservation_id
- server = self._build_minimal_create_server_request()
- post = {'server': server}
- created_server = self.api.post_server(post)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # lookup servers created by the first request.
- servers = self.api.get_servers(detail=True,
- search_opts={'reservation_id': reservation_id})
- server_map = dict((server['id'], server) for server in servers)
- found_server = server_map.get(created_server_id)
- # The server from the 2nd request should not be there.
- self.assertIsNone(found_server)
- # Should have found 2 servers.
- self.assertEqual(len(server_map), 2)
-
- # Cleanup
- self._delete_server(created_server_id)
- for server_id in server_map.iterkeys():
- self._delete_server(server_id)
-
- def test_create_server_with_injected_files(self):
- # Creates a server with injected_files.
- fake_network.set_stub_network_methods(self.stubs)
- personality = []
-
- # Inject a text file
- data = 'Hello, World!'
- personality.append({
- 'path': '/helloworld.txt',
- 'contents': data.encode('base64'),
- })
-
- # Inject a binary file
- data = zlib.compress('Hello, World!')
- personality.append({
- 'path': '/helloworld.zip',
- 'contents': data.encode('base64'),
- })
-
- # Create server
- server = self._build_minimal_create_server_request()
- server['personality'] = personality
-
- post = {'server': server}
-
- created_server = self.api.post_server(post)
- LOG.debug("created_server: %s" % created_server)
- self.assertTrue(created_server['id'])
- created_server_id = created_server['id']
-
- # Check it's there
- found_server = self.api.get_server(created_server_id)
- self.assertEqual(created_server_id, found_server['id'])
-
- found_server = self._wait_for_state_change(found_server, 'BUILD')
- self.assertEqual('ACTIVE', found_server['status'])
-
- # Cleanup
- self._delete_server(created_server_id)
-
-
-class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
- _force_delete_parameter = 'forceDelete'
- _api_version = 'v3'
- _image_ref_parameter = 'imageRef'
- _flavor_ref_parameter = 'flavorRef'
- _access_ipv4_parameter = None
- _access_ipv6_parameter = None
-
- def _get_access_ips_params(self):
- return {}
-
- def _verify_access_ips(self, server):
- # NOTE(alexxu): access_ips was demoted as extensions in v3 api.
- # So skips verifying access_ips
- pass
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
deleted file mode 100644
index a61c86f750..0000000000
--- a/nova/tests/integrated/test_xml.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-
-from nova.api.openstack import common
-from nova.api.openstack import xmlutil
-from nova.openstack.common import log as logging
-from nova.tests.integrated import integrated_helpers
-
-
-LOG = logging.getLogger(__name__)
-
-
-class XmlTests(integrated_helpers._IntegratedTestBase):
- """"Some basic XML sanity checks."""
-
- _api_version = 'v2'
-
- def test_namespace_limits(self):
- headers = {}
- headers['Accept'] = 'application/xml'
-
- response = self.api.api_request('/limits', headers=headers)
- data = response.content
- LOG.debug("data: %s" % data)
- root = etree.XML(data)
- self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
-
- def test_namespace_servers(self):
- # /servers should have v1.1 namespace (has changed in 1.1).
- headers = {}
- headers['Accept'] = 'application/xml'
-
- response = self.api.api_request('/servers', headers=headers)
- data = response.content
- LOG.debug("data: %s" % data)
- root = etree.XML(data)
- self.assertEqual(root.nsmap.get(None), common.XML_NS_V11)
diff --git a/nova/tests/integrated/v3/api_sample_base.py b/nova/tests/integrated/v3/api_sample_base.py
deleted file mode 100644
index 56c0e8bee7..0000000000
--- a/nova/tests/integrated/v3/api_sample_base.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-from oslo.config import cfg
-
-from nova.api.openstack import API_V3_CORE_EXTENSIONS # noqa
-from nova import test
-from nova.tests import fake_network
-from nova.tests import fake_utils
-from nova.tests.integrated import api_samples_test_base
-
-CONF = cfg.CONF
-
-
-class ApiSampleTestBaseV3(api_samples_test_base.ApiSampleTestBase):
- _api_version = 'v3'
- sample_dir = None
- extra_extensions_to_load = None
-
- def setUp(self):
- self.flags(use_ipv6=False,
- osapi_compute_link_prefix=self._get_host(),
- osapi_glance_link_prefix=self._get_glance_host())
- if not self.all_extensions:
- # Set the whitelist to ensure only the extensions we are
- # interested in are loaded so the api samples don't include
- # data from extensions we are not interested in
- whitelist = API_V3_CORE_EXTENSIONS.copy()
- if self.extension_name:
- whitelist.add(self.extension_name)
- if self.extra_extensions_to_load:
- whitelist.update(set(self.extra_extensions_to_load))
-
- CONF.set_override('extensions_whitelist', whitelist,
- 'osapi_v3')
-
- super(ApiSampleTestBaseV3, self).setUp()
- self.useFixture(test.SampleNetworks(host=self.network.host))
- fake_network.stub_compute_with_ips(self.stubs)
- fake_utils.stub_out_utils_spawn_n(self.stubs)
- self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
-
- @classmethod
- def _get_sample_path(cls, name, dirname, suffix=''):
- parts = [dirname]
- parts.append('api_samples')
- if cls.all_extensions:
- parts.append('all_extensions')
- elif cls.sample_dir:
- parts.append(cls.sample_dir)
- elif cls.extension_name:
- parts.append(cls.extension_name)
- parts.append(name + "." + cls.ctype + suffix)
- return os.path.join(*parts)
-
- @classmethod
- def _get_sample(cls, name):
- dirname = os.path.dirname(os.path.abspath(__file__))
- dirname = os.path.normpath(os.path.join(dirname, "../../../../doc/v3"))
- return cls._get_sample_path(name, dirname)
-
- @classmethod
- def _get_template(cls, name):
- dirname = os.path.dirname(os.path.abspath(__file__))
- return cls._get_sample_path(name, dirname, suffix='.tpl')
diff --git a/nova/tests/integrated/v3/test_access_ips.py b/nova/tests/integrated/v3/test_access_ips.py
deleted file mode 100644
index 36af1e71e5..0000000000
--- a/nova/tests/integrated/v3/test_access_ips.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'os-access-ips'
-
- def _servers_post(self, subs):
- response = self._do_post('servers', 'server-post-req', subs)
- subs.update(self._get_regexes())
- return self._verify_response('server-post-resp', subs, response, 202)
-
- def test_servers_post(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- self._servers_post(subs)
-
- def test_servers_get(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- uuid = self._servers_post(subs)
- response = self._do_get('servers/%s' % uuid)
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_servers_details(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- uuid = self._servers_post(subs)
- response = self._do_get('servers/detail')
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('servers-details-resp', subs, response, 200)
-
- def test_servers_rebuild(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- uuid = self._servers_post(subs)
- subs['access_ip_v4'] = "4.3.2.1"
- subs['access_ip_v6'] = '80fe::'
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-rebuild', subs)
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('server-action-rebuild-resp',
- subs, response, 202)
-
- def test_servers_update(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- uuid = self._servers_post(subs)
- subs['access_ip_v4'] = "4.3.2.1"
- subs['access_ip_v6'] = '80fe::'
- response = self._do_put('servers/%s' % uuid, 'server-put-req', subs)
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('server-put-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_admin_actions.py b/nova/tests/integrated/v3/test_admin_actions.py
deleted file mode 100644
index edfce562f7..0000000000
--- a/nova/tests/integrated/v3/test_admin_actions.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-admin-actions"
-
- def setUp(self):
- """setUp Method for AdminActions api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(AdminActionsSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_post_reset_network(self):
- # Get api samples to reset server network request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-reset-network', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_inject_network_info(self):
- # Get api samples to inject network info request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-inject-network-info', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_reset_state(self):
- # get api samples to server reset state request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'admin-actions-reset-server-state', {})
- self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/integrated/v3/test_admin_password.py b/nova/tests/integrated/v3/test_admin_password.py
deleted file mode 100644
index 4e5304e1bc..0000000000
--- a/nova/tests/integrated/v3/test_admin_password.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class AdminPasswordJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-admin-password'
-
- def test_server_password(self):
- uuid = self._post_server()
- subs = {"password": "foo"}
- response = self._do_post('servers/%s/action' % uuid,
- 'admin-password-change-password',
- subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_agents.py b/nova/tests/integrated/v3/test_agents.py
deleted file mode 100644
index 991de50e1e..0000000000
--- a/nova/tests/integrated/v3/test_agents.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import db
-from nova.db.sqlalchemy import models
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-agents"
-
- def setUp(self):
- super(AgentsJsonTest, self).setUp()
-
- fake_agents_list = [{'url': 'http://example.com/path/to/resource',
- 'hypervisor': 'hypervisor',
- 'architecture': 'x86',
- 'os': 'os',
- 'version': '8.0',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545',
- 'id': 1}]
-
- def fake_agent_build_create(context, values):
- values['id'] = 1
- agent_build_ref = models.AgentBuild()
- agent_build_ref.update(values)
- return agent_build_ref
-
- def fake_agent_build_get_all(context, hypervisor):
- agent_build_all = []
- for agent in fake_agents_list:
- if hypervisor and hypervisor != agent['hypervisor']:
- continue
- agent_build_ref = models.AgentBuild()
- agent_build_ref.update(agent)
- agent_build_all.append(agent_build_ref)
- return agent_build_all
-
- def fake_agent_build_update(context, agent_build_id, values):
- pass
-
- def fake_agent_build_destroy(context, agent_update_id):
- pass
-
- self.stubs.Set(db, "agent_build_create",
- fake_agent_build_create)
- self.stubs.Set(db, "agent_build_get_all",
- fake_agent_build_get_all)
- self.stubs.Set(db, "agent_build_update",
- fake_agent_build_update)
- self.stubs.Set(db, "agent_build_destroy",
- fake_agent_build_destroy)
-
- def test_agent_create(self):
- # Creates a new agent build.
- project = {'url': 'http://example.com/path/to/resource',
- 'hypervisor': 'hypervisor',
- 'architecture': 'x86',
- 'os': 'os',
- 'version': '8.0',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545'
- }
- response = self._do_post('os-agents', 'agent-post-req',
- project)
- self._verify_response('agent-post-resp', project, response, 200)
-
- def test_agent_list(self):
- # Return a list of all agent builds.
- response = self._do_get('os-agents')
- self._verify_response('agents-get-resp', {}, response, 200)
-
- def test_agent_update(self):
- # Update an existing agent build.
- agent_id = 1
- subs = {'version': '7.0',
- 'url': 'http://example.com/path/to/resource',
- 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
- response = self._do_put('os-agents/%s' % agent_id,
- 'agent-update-put-req', subs)
- self._verify_response('agent-update-put-resp', subs, response, 200)
-
- def test_agent_delete(self):
- # Deletes an existing agent build.
- agent_id = 1
- response = self._do_delete('os-agents/%s' % agent_id)
- self.assertEqual(response.status_code, 200)
diff --git a/nova/tests/integrated/v3/test_aggregates.py b/nova/tests/integrated/v3/test_aggregates.py
deleted file mode 100644
index 819cf3447a..0000000000
--- a/nova/tests/integrated/v3/test_aggregates.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class AggregatesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-aggregates"
-
- def test_aggregate_create(self):
- subs = {
- "aggregate_id": '(?P<id>\d+)'
- }
- response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
- subs.update(self._get_regexes())
- return self._verify_response('aggregate-post-resp',
- subs, response, 200)
-
- def test_list_aggregates(self):
- self.test_aggregate_create()
- response = self._do_get('os-aggregates')
- subs = self._get_regexes()
- self._verify_response('aggregates-list-get-resp', subs, response, 200)
-
- def test_aggregate_get(self):
- agg_id = self.test_aggregate_create()
- response = self._do_get('os-aggregates/%s' % agg_id)
- subs = self._get_regexes()
- self._verify_response('aggregates-get-resp', subs, response, 200)
-
- def test_add_metadata(self):
- agg_id = self.test_aggregate_create()
- response = self._do_post('os-aggregates/%s/action' % agg_id,
- 'aggregate-metadata-post-req',
- {'action': 'set_metadata'})
- subs = self._get_regexes()
- self._verify_response('aggregates-metadata-post-resp', subs,
- response, 200)
-
- def test_add_host(self):
- aggregate_id = self.test_aggregate_create()
- subs = {
- "host_name": self.compute.host,
- }
- response = self._do_post('os-aggregates/%s/action' % aggregate_id,
- 'aggregate-add-host-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('aggregates-add-host-post-resp', subs,
- response, 200)
-
- def test_remove_host(self):
- self.test_add_host()
- subs = {
- "host_name": self.compute.host,
- }
- response = self._do_post('os-aggregates/1/action',
- 'aggregate-remove-host-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('aggregates-remove-host-post-resp',
- subs, response, 200)
-
- def test_update_aggregate(self):
- aggregate_id = self.test_aggregate_create()
- response = self._do_put('os-aggregates/%s' % aggregate_id,
- 'aggregate-update-post-req', {})
- subs = self._get_regexes()
- self._verify_response('aggregate-update-post-resp',
- subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_attach_interfaces.py b/nova/tests/integrated/v3/test_attach_interfaces.py
deleted file mode 100644
index 50ea7910fb..0000000000
--- a/nova/tests/integrated/v3/test_attach_interfaces.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import api as compute_api
-from nova import exception
-from nova.network import api as network_api
-from nova.tests import fake_network_cache_model
-from nova.tests.integrated.v3 import test_servers
-
-
-class AttachInterfacesSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-attach-interfaces'
-
- def setUp(self):
- super(AttachInterfacesSampleJsonTest, self).setUp()
-
- def fake_list_ports(self, *args, **kwargs):
- uuid = kwargs.get('device_id', None)
- if not uuid:
- raise exception.InstanceNotFound(instance_id=None)
- port_data = {
- "id": "ce531f90-199f-48c0-816c-13e38010b442",
- "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "fa:16:3e:4c:2c:30",
- "fixed_ips": [
- {
- "ip_address": "192.168.1.3",
- "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
- }
- ],
- "device_id": uuid,
- }
- ports = {'ports': [port_data]}
- return ports
-
- def fake_show_port(self, context, port_id=None):
- if not port_id:
- raise exception.PortNotFound(port_id=None)
- port_data = {
- "id": port_id,
- "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "fa:16:3e:4c:2c:30",
- "fixed_ips": [
- {
- "ip_address": "192.168.1.3",
- "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
- }
- ],
- "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
- }
- port = {'port': port_data}
- return port
-
- def fake_attach_interface(self, context, instance,
- network_id, port_id,
- requested_ip='192.168.1.3'):
- if not network_id:
- network_id = "fake_net_uuid"
- if not port_id:
- port_id = "fake_port_uuid"
- vif = fake_network_cache_model.new_vif()
- vif['id'] = port_id
- vif['network']['id'] = network_id
- vif['network']['subnets'][0]['ips'][0] = requested_ip
- return vif
-
- def fake_detach_interface(self, context, instance, port_id):
- pass
-
- self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
- self.stubs.Set(network_api.API, 'show_port', fake_show_port)
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['subnet_id'] = vanilla_regexes['uuid']
- subs['net_id'] = vanilla_regexes['uuid']
- subs['port_id'] = vanilla_regexes['uuid']
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- subs['ip_address'] = vanilla_regexes['ip']
- return subs
-
- def test_list_interfaces(self):
- instance_uuid = self._post_server()
- response = self._do_get('servers/%s/os-interface'
- % instance_uuid)
- subs = {
- 'ip_address': '192.168.1.3',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
- 'port_state': 'ACTIVE'
- }
- self._verify_response('attach-interfaces-list-resp', subs,
- response, 200)
-
- def _stub_show_for_instance(self, instance_uuid, port_id):
- show_port = network_api.API().show_port(None, port_id)
- show_port['port']['device_id'] = instance_uuid
- self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
-
- def test_show_interfaces(self):
- instance_uuid = self._post_server()
- port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
- self._stub_show_for_instance(instance_uuid, port_id)
- response = self._do_get('servers/%s/os-interface/%s' %
- (instance_uuid, port_id))
- subs = {
- 'ip_address': '192.168.1.3',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': port_id,
- 'port_state': 'ACTIVE'
- }
- self._verify_response('attach-interfaces-show-resp', subs,
- response, 200)
-
- def test_create_interfaces(self, instance_uuid=None):
- if instance_uuid is None:
- instance_uuid = self._post_server()
- subs = {
- 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
- 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
- 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
- 'ip_address': '192.168.1.3',
- 'port_state': 'ACTIVE',
- 'mac_addr': 'fa:16:3e:4c:2c:30',
- }
- self._stub_show_for_instance(instance_uuid, subs['port_id'])
- response = self._do_post('servers/%s/os-interface'
- % instance_uuid,
- 'attach-interfaces-create-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('attach-interfaces-create-resp', subs,
- response, 200)
-
- def test_delete_interfaces(self):
- instance_uuid = self._post_server()
- port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
- response = self._do_delete('servers/%s/os-interface/%s' %
- (instance_uuid, port_id))
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_availability_zone.py b/nova/tests/integrated/v3/test_availability_zone.py
deleted file mode 100644
index 31a5ae5925..0000000000
--- a/nova/tests/integrated/v3/test_availability_zone.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova.tests.integrated.v3 import test_servers
-
-CONF = cfg.CONF
-CONF.import_opt('manager', 'nova.cells.opts', group='cells')
-
-
-class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-availability-zone"
-
- def _setup_services(self):
- self.conductor = self.start_service('conductor',
- host='conductor', manager=CONF.conductor.manager)
- self.compute = self.start_service('compute', host='compute')
- self.cert = self.start_service('cert', host='cert')
- self.consoleauth = self.start_service('consoleauth',
- host='consoleauth')
- self.network = self.start_service('network', host='network')
- self.scheduler = self.start_service('scheduler', host='scheduler')
- self.cells = self.start_service('cells', host='cells',
- manager=CONF.cells.manager)
-
- def test_availability_zone_list(self):
- response = self._do_get('os-availability-zone')
- self._verify_response('availability-zone-list-resp', {}, response, 200)
-
- def test_availability_zone_detail(self):
- response = self._do_get('os-availability-zone/detail')
- subs = self._get_regexes()
- self._verify_response('availability-zone-detail-resp', subs, response,
- 200)
-
- def test_availability_zone_post(self):
- self._post_server()
diff --git a/nova/tests/integrated/v3/test_cells.py b/nova/tests/integrated/v3/test_cells.py
deleted file mode 100644
index ec09aa4cf6..0000000000
--- a/nova/tests/integrated/v3/test_cells.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.cells import rpcapi as cells_rpcapi
-from nova.cells import state
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-cells"
-
- def setUp(self):
- # db_check_interval < 0 makes cells manager always hit the DB
- self.flags(enable=True, db_check_interval=-1, group='cells')
- super(CellsSampleJsonTest, self).setUp()
- self._stub_cells()
-
- def _stub_cells(self, num_cells=5):
- self.cell_list = []
- self.cells_next_id = 1
-
- def _fake_cell_get_all(context):
- return self.cell_list
-
- def _fake_cell_get(inst, context, cell_name):
- for cell in self.cell_list:
- if cell['name'] == cell_name:
- return cell
- raise exception.CellNotFound(cell_name=cell_name)
-
- for x in xrange(num_cells):
- cell = models.Cell()
- our_id = self.cells_next_id
- self.cells_next_id += 1
- cell.update({'id': our_id,
- 'name': 'cell%s' % our_id,
- 'transport_url': 'rabbit://username%s@/' % our_id,
- 'is_parent': our_id % 2 == 0})
- self.cell_list.append(cell)
-
- self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
- self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
-
- def test_cells_empty_list(self):
- # Override this
- self._stub_cells(num_cells=0)
- response = self._do_get('os-cells')
- subs = self._get_regexes()
- self._verify_response('cells-list-empty-resp', subs, response, 200)
-
- def test_cells_list(self):
- response = self._do_get('os-cells')
- subs = self._get_regexes()
- self._verify_response('cells-list-resp', subs, response, 200)
-
- def test_cells_get(self):
- response = self._do_get('os-cells/cell3')
- subs = self._get_regexes()
- self._verify_response('cells-get-resp', subs, response, 200)
-
- def test_get_cell_capacity(self):
- self._mock_cell_capacity()
- state_manager = state.CellStateManager()
- my_state = state_manager.get_my_state()
- response = self._do_get('os-cells/%s/capacities' %
- my_state.name)
- subs = self._get_regexes()
- return self._verify_response('cells-capacities-resp',
- subs, response, 200)
-
- def test_get_all_cells_capacity(self):
- self._mock_cell_capacity()
- response = self._do_get('os-cells/capacities')
- subs = self._get_regexes()
- return self._verify_response('cells-capacities-resp',
- subs, response, 200)
-
- def _mock_cell_capacity(self):
- self.mox.StubOutWithMock(self.cells.manager.state_manager,
- 'get_our_capacities')
- response = {"ram_free":
- {"units_by_mb": {"8192": 0, "512": 13,
- "4096": 1, "2048": 3, "16384": 0},
- "total_mb": 7680},
- "disk_free":
- {"units_by_mb": {"81920": 11, "20480": 46,
- "40960": 23, "163840": 5, "0": 0},
- "total_mb": 1052672}
- }
- self.cells.manager.state_manager.get_our_capacities(). \
- AndReturn(response)
- self.mox.ReplayAll()
diff --git a/nova/tests/integrated/v3/test_certificates.py b/nova/tests/integrated/v3/test_certificates.py
deleted file mode 100644
index c04afc5fae..0000000000
--- a/nova/tests/integrated/v3/test_certificates.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class CertificatesSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-certificates"
-
- def test_create_certificates(self):
- response = self._do_post('os-certificates',
- 'certificate-create-req', {})
- subs = self._get_regexes()
- self._verify_response('certificate-create-resp', subs, response, 200)
-
- def test_get_root_certificate(self):
- response = self._do_get('os-certificates/root')
- subs = self._get_regexes()
- self._verify_response('certificate-get-root-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_cloudpipe.py b/nova/tests/integrated/v3/test_cloudpipe.py
deleted file mode 100644
index 54c510feab..0000000000
--- a/nova/tests/integrated/v3/test_cloudpipe.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid as uuid_lib
-
-from oslo.config import cfg
-
-from nova.cloudpipe import pipelib
-from nova.network import api as network_api
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
-
-
-class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-cloudpipe"
-
- def setUp(self):
- super(CloudPipeSampleTest, self).setUp()
-
- def get_user_data(self, project_id):
- """Stub method to generate user data for cloudpipe tests."""
- return "VVNFUiBEQVRB\n"
-
- def network_api_get(self, context, network_uuid):
- """Stub to get a valid network and its information."""
- return {'vpn_public_address': '127.0.0.1',
- 'vpn_public_port': 22}
-
- self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
- self.stubs.Set(network_api.API, "get",
- network_api_get)
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['project_id'] = '[0-9a-f-]+'
- return subs
-
- def test_cloud_pipe_create(self):
- # Get api samples of cloud pipe extension creation.
- self.flags(vpn_image_id=fake.get_valid_image_id())
- project = {'project_id': str(uuid_lib.uuid4().hex)}
- response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
- project)
- subs = self._get_regexes()
- subs.update(project)
- subs['image_id'] = CONF.vpn_image_id
- self._verify_response('cloud-pipe-create-resp', subs, response, 200)
- return project
-
- def test_cloud_pipe_list(self):
- # Get api samples of cloud pipe extension get request.
- project = self.test_cloud_pipe_create()
- response = self._do_get('os-cloudpipe')
- subs = self._get_regexes()
- subs.update(project)
- subs['image_id'] = CONF.vpn_image_id
- self._verify_response('cloud-pipe-get-resp', subs, response, 200)
-
- def test_cloud_pipe_update(self):
- subs = {'vpn_ip': '192.168.1.1',
- 'vpn_port': 2000}
- response = self._do_put('os-cloudpipe/configure-project',
- 'cloud-pipe-update-req',
- subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_config_drive.py b/nova/tests/integrated/v3/test_config_drive.py
deleted file mode 100644
index 38189f2335..0000000000
--- a/nova/tests/integrated/v3/test_config_drive.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.api.openstack import fakes
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import test_servers
-
-
-class ConfigDriveSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-config-drive'
-
- def setUp(self):
- super(ConfigDriveSampleJsonTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- fake.stub_out_image_service(self.stubs)
-
- def test_config_drive_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- # config drive can be a string for True or empty value for False
- subs['cdrive'] = '.*'
- self._verify_response('server-config-drive-get-resp', subs,
- response, 200)
-
- def test_config_drive_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- # config drive can be a string for True or empty value for False
- subs['cdrive'] = '.*'
- self._verify_response('servers-config-drive-details-resp',
- subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_console_auth_tokens.py b/nova/tests/integrated/v3/test_console_auth_tokens.py
deleted file mode 100644
index 80ebbc67f2..0000000000
--- a/nova/tests/integrated/v3/test_console_auth_tokens.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright 2013 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-
-from oslo.serialization import jsonutils
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-console-auth-tokens"
- extra_extensions_to_load = ["os-remote-consoles"]
-
- def _get_console_url(self, data):
- return jsonutils.loads(data)["console"]["url"]
-
- def _get_console_token(self, uuid):
- response = self._do_post('servers/%s/action' % uuid,
- 'get-rdp-console-post-req',
- {'action': 'os-getRDPConsole'})
-
- url = self._get_console_url(response.content)
- return re.match('.+?token=([^&]+)', url).groups()[0]
-
- def test_get_console_connect_info(self):
- self.flags(enabled=True, group='rdp')
-
- uuid = self._post_server()
- token = self._get_console_token(uuid)
-
- response = self._do_get('os-console-auth-tokens/%s' % token)
-
- subs = self._get_regexes()
- subs["uuid"] = uuid
- subs["host"] = r"[\w\.\-]+"
- subs["port"] = "[0-9]+"
- subs["internal_access_path"] = ".*"
- self._verify_response('get-console-connect-info-get-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_console_output.py b/nova/tests/integrated/v3/test_console_output.py
deleted file mode 100644
index 47d39d6464..0000000000
--- a/nova/tests/integrated/v3/test_console_output.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-console-output"
-
- def test_get_console_output(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'console-output-post-req', {})
- subs = self._get_regexes()
- self._verify_response('console-output-post-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_consoles.py b/nova/tests/integrated/v3/test_consoles.py
deleted file mode 100644
index c7dcced092..0000000000
--- a/nova/tests/integrated/v3/test_consoles.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ConsolesSamplesJsonTest(test_servers.ServersSampleBase):
- sample_dir = "consoles"
-
- def setUp(self):
- super(ConsolesSamplesJsonTest, self).setUp()
- self.flags(console_public_hostname='fake')
- self.flags(console_host='fake')
- self.flags(console_driver='nova.console.fake.FakeConsoleProxy')
- self.console = self.start_service('console', host='fake')
-
- def _create_consoles(self, server_uuid):
- response = self._do_post('servers/%s/consoles' % server_uuid,
- 'consoles-create-req', {})
- self.assertEqual(response.status_code, 201)
-
- def test_create_consoles(self):
- uuid = self._post_server()
- self._create_consoles(uuid)
-
- def test_list_consoles(self):
- uuid = self._post_server()
- self._create_consoles(uuid)
- response = self._do_get('servers/%s/consoles' % uuid)
- self._verify_response('consoles-list-get-resp', {}, response, 200)
-
- def test_console_get(self):
- uuid = self._post_server()
- self._create_consoles(uuid)
- response = self._do_get('servers/%s/consoles/1' % uuid)
- subs = self._get_regexes()
- self._verify_response('consoles-get-resp', subs, response, 200)
-
- def test_console_delete(self):
- uuid = self._post_server()
- self._create_consoles(uuid)
- response = self._do_delete('servers/%s/consoles/1' % uuid)
- self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/integrated/v3/test_create_backup.py b/nova/tests/integrated/v3/test_create_backup.py
deleted file mode 100644
index e90c29b1a0..0000000000
--- a/nova/tests/integrated/v3/test_create_backup.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import test_servers
-
-
-class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-create-backup"
-
- def setUp(self):
- """setUp Method for PauseServer api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(CreateBackupSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- @mock.patch.object(fake._FakeImageService, 'detail', return_value=[])
- def test_post_backup_server(self, mock_method):
- # Get api samples to backup server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'create-backup-req', {})
- self.assertEqual(202, response.status_code)
diff --git a/nova/tests/integrated/v3/test_deferred_delete.py b/nova/tests/integrated/v3/test_deferred_delete.py
deleted file mode 100644
index 7a0e264539..0000000000
--- a/nova/tests/integrated/v3/test_deferred_delete.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-deferred-delete"
-
- def setUp(self):
- super(DeferredDeleteSampleJsonTests, self).setUp()
- self.flags(reclaim_instance_interval=1)
-
- def test_restore(self):
- uuid = self._post_server()
- response = self._do_delete('servers/%s' % uuid)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'restore-post-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_force_delete(self):
- uuid = self._post_server()
- response = self._do_delete('servers/%s' % uuid)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'force-delete-post-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_disk_config.py b/nova/tests/integrated/v3/test_disk_config.py
deleted file mode 100644
index 6f40c43d0e..0000000000
--- a/nova/tests/integrated/v3/test_disk_config.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import test_servers
-
-
-class DiskConfigJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-disk-config'
- extra_extensions_to_load = ["images"]
-
- def test_list_servers_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('list-servers-detail-get', subs, response, 200)
-
- def test_get_server(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_update_server(self):
- uuid = self._post_server()
- response = self._do_put('servers/%s' % uuid,
- 'server-update-put-req', {})
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-update-put-resp', subs, response, 200)
-
- def test_resize_server(self):
- self.flags(allow_resize_to_same_host=True)
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'server-resize-post-req', {})
- self.assertEqual(response.status_code, 202)
- # NOTE(tmello): Resize does not return response body
- # Bug #1085213.
- self.assertEqual(response.content, "")
-
- def test_rebuild_server(self):
- uuid = self._post_server()
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-rebuild-req', subs)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-action-rebuild-resp',
- subs, response, 202)
-
- def test_get_image(self):
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_list_images(self):
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('image-list-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_evacuate.py b/nova/tests/integrated/v3/test_evacuate.py
deleted file mode 100644
index 0f17ad1220..0000000000
--- a/nova/tests/integrated/v3/test_evacuate.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.compute import api as compute_api
-from nova.compute import manager as compute_manager
-from nova.servicegroup import api as service_group_api
-from nova.tests.integrated.v3 import test_servers
-
-
-class EvacuateJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-evacuate"
-
- def _test_evacuate(self, req_subs, server_req, server_resp,
- expected_resp_code):
- self.uuid = self._post_server()
-
- def fake_service_is_up(self, service):
- """Simulate validation of instance host is down."""
- return False
-
- def fake_service_get_by_compute_host(self, context, host):
- """Simulate that given host is a valid host."""
- return {
- 'host_name': host,
- 'service': 'compute',
- 'zone': 'nova'
- }
-
- def fake_check_instance_exists(self, context, instance):
- """Simulate validation of instance does not exist."""
- return False
-
- self.stubs.Set(service_group_api.API, 'service_is_up',
- fake_service_is_up)
- self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- self.stubs.Set(compute_manager.ComputeManager,
- '_check_instance_exists',
- fake_check_instance_exists)
-
- response = self._do_post('servers/%s/action' % self.uuid,
- server_req, req_subs)
- subs = self._get_regexes()
- self._verify_response(server_resp, subs, response, expected_resp_code)
-
- @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
- def test_server_evacuate(self, rebuild_mock):
- # Note (wingwj): The host can't be the same one
- req_subs = {
- 'host': 'testHost',
- "adminPass": "MySecretPass",
- "onSharedStorage": 'False'
- }
- self._test_evacuate(req_subs, 'server-evacuate-req',
- 'server-evacuate-resp', 200)
- rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
- orig_image_ref=mock.ANY, image_ref=mock.ANY,
- injected_files=mock.ANY, new_pass="MySecretPass",
- orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
- on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost')
-
- @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
- def test_server_evacuate_find_host(self, rebuild_mock):
- req_subs = {
- "adminPass": "MySecretPass",
- "onSharedStorage": 'False'
- }
- self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
- 'server-evacuate-find-host-resp', 200)
-
- rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
- orig_image_ref=mock.ANY, image_ref=mock.ANY,
- injected_files=mock.ANY, new_pass="MySecretPass",
- orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
- on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None)
diff --git a/nova/tests/integrated/v3/test_extended_availability_zone.py b/nova/tests/integrated/v3/test_extended_availability_zone.py
deleted file mode 100644
index 631d46141d..0000000000
--- a/nova/tests/integrated/v3/test_extended_availability_zone.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ExtendedAvailabilityZoneJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-extended-availability-zone"
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_extended_server_attributes.py b/nova/tests/integrated/v3/test_extended_server_attributes.py
deleted file mode 100644
index 36c70fddbd..0000000000
--- a/nova/tests/integrated/v3/test_extended_server_attributes.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-extended-server-attributes"
-
- def test_show(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['instance_name'] = 'instance-\d{8}'
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
-
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['instance_name'] = 'instance-\d{8}'
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_extended_status.py b/nova/tests/integrated/v3/test_extended_status.py
deleted file mode 100644
index cbdb4fb483..0000000000
--- a/nova/tests/integrated/v3/test_extended_status.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ExtendedStatusSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-extended-status"
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_extended_volumes.py b/nova/tests/integrated/v3/test_extended_volumes.py
deleted file mode 100644
index bb41b793cc..0000000000
--- a/nova/tests/integrated/v3/test_extended_volumes.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import api as compute_api
-from nova.compute import manager as compute_manager
-from nova import context
-from nova import db
-from nova import objects
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.integrated.v3 import test_servers
-from nova.volume import cinder
-
-
-class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-extended-volumes"
-
- def _stub_compute_api_get_instance_bdms(self, server_id):
-
- def fake_bdms_get_all_by_instance(context, instance_uuid,
- use_slave=False):
- bdms = [
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
- 'instance_uuid': server_id, 'source_type': 'volume',
- 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
- 'instance_uuid': server_id, 'source_type': 'volume',
- 'destination_type': 'volume', 'device_name': '/dev/sdc'})
- ]
- return bdms
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
-
- def _stub_compute_api_get(self):
- def fake_compute_api_get(self, context, instance_id, **kwargs):
- want_objects = kwargs.get('want_objects')
- if want_objects:
- return fake_instance.fake_instance_obj(
- context, **{'uuid': instance_id})
- else:
- return {'uuid': instance_id}
-
- self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
-
- def test_show(self):
- uuid = self._post_server()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fakes.stub_bdm_get_all_by_instance)
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- uuid = self._post_server()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fakes.stub_bdm_get_all_by_instance)
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
- def test_attach_volume(self):
- bdm = objects.BlockDeviceMapping()
- device_name = '/dev/vdd'
- bdm['device_name'] = device_name
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
- self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
- self.stubs.Set(compute_manager.ComputeManager,
- "reserve_block_device_name",
- lambda *a, **k: bdm)
- self.stubs.Set(compute_manager.ComputeManager,
- 'attach_volume',
- lambda *a, **k: None)
-
- volume = fakes.stub_volume_get(None, context.get_admin_context(),
- 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
- subs = {
- 'volume_id': volume['id'],
- 'device': device_name,
- 'disk_bus': 'ide',
- 'device_type': 'cdrom'
- }
- server_id = self._post_server()
- response = self._do_post('servers/%s/action'
- % server_id,
- 'attach-volume-req', subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_detach_volume(self):
- server_id = self._post_server()
- attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
- self._stub_compute_api_get_instance_bdms(server_id)
- self._stub_compute_api_get()
- self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
- self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
- subs = {
- 'volume_id': attach_id,
- }
- response = self._do_post('servers/%s/action'
- % server_id, 'detach-volume-req', subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_swap_volume(self):
- server_id = self._post_server()
- old_volume_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
- old_new_volume = 'a26887c6-c47b-4654-abb5-dfadf7d3f805'
- self._stub_compute_api_get_instance_bdms(server_id)
-
- def stub_volume_get(self, context, volume_id):
- if volume_id == old_volume_id:
- return fakes.stub_volume(volume_id, instance_uuid=server_id)
- else:
- return fakes.stub_volume(volume_id, instance_uuid=None,
- attach_status='detached')
-
- self.stubs.Set(cinder.API, 'get', stub_volume_get)
- self.stubs.Set(cinder.API, 'begin_detaching', lambda *a, **k: None)
- self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
- self.stubs.Set(cinder.API, 'check_detach', lambda *a, **k: None)
- self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
- self.stubs.Set(compute_manager.ComputeManager, 'swap_volume',
- lambda *a, **k: None)
- subs = {
- 'old_volume_id': old_volume_id,
- 'new_volume_id': old_new_volume
- }
- response = self._do_post('servers/%s/action' % server_id,
- 'swap-volume-req', subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_extension_info.py b/nova/tests/integrated/v3/test_extension_info.py
deleted file mode 100644
index 26dd186193..0000000000
--- a/nova/tests/integrated/v3/test_extension_info.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova.api.openstack import extensions as api_extensions
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
- sample_dir = "extension-info"
-
- def test_list_extensions(self):
- response = self._do_get('extensions')
- subs = self._get_regexes()
- self._verify_response('extensions-list-resp', subs, response, 200)
-
- def test_get_extensions(self):
- response = self._do_get('extensions/flavors')
- subs = self._get_regexes()
- self._verify_response('extensions-get-resp', subs, response, 200)
-
-
-class ExtensionInfoFormatTest(api_sample_base.ApiSampleTestBaseV3):
- # NOTE: To check all extension formats, here makes authorize() return True
- # always instead of fake_policy.py because most extensions are not set as
- # "discoverable" in fake_policy.py.
- all_extensions = True
-
- def _test_list_extensions(self, key, pattern):
- with mock.patch.object(api_extensions,
- 'soft_extension_authorizer') as api_mock:
- def fake_soft_extension_authorizer(api_name, extension_name):
- def authorize(context, action=None):
- return True
- return authorize
-
- api_mock.side_effect = fake_soft_extension_authorizer
- response = self._do_get('extensions')
- response = jsonutils.loads(response.content)
- extensions = response['extensions']
- pattern_comp = re.compile(pattern)
- for ext in extensions:
- self.assertIsNotNone(pattern_comp.match(ext[key]),
- '%s does not match with %s' % (ext[key],
- pattern))
-
- def test_list_extensions_name_format(self):
- # name should be CamelCase.
- pattern = '^[A-Z]{1}[a-z]{1}[a-zA-Z]*$'
- self._test_list_extensions('name', pattern)
-
- def test_list_extensions_alias_format(self):
- # alias should contain lowercase chars and '-' only.
- pattern = '^[a-z-]+$'
- self._test_list_extensions('alias', pattern)
diff --git a/nova/tests/integrated/v3/test_fixed_ips.py b/nova/tests/integrated/v3/test_fixed_ips.py
deleted file mode 100644
index 347aacfbc3..0000000000
--- a/nova/tests/integrated/v3/test_fixed_ips.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import db
-from nova import exception
-from nova.tests.integrated.v3 import test_servers
-from nova.tests.objects import test_network
-from nova.tests import utils as test_utils
-
-
-class FixedIpTest(test_servers.ServersSampleBase):
- extension_name = "os-fixed-ips"
-
- def setUp(self):
- super(FixedIpTest, self).setUp()
-
- instance = dict(test_utils.get_test_instance(),
- hostname='openstack', host='host')
- fake_fixed_ips = [{'id': 1,
- 'address': '192.168.1.1',
- 'network_id': 1,
- 'virtual_interface_id': 1,
- 'instance_uuid': '1',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'created_at': None,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': None,
- 'instance': instance,
- 'network': test_network.fake_network,
- 'host': None},
- {'id': 2,
- 'address': '192.168.1.2',
- 'network_id': 1,
- 'virtual_interface_id': 2,
- 'instance_uuid': '2',
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'created_at': None,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': None,
- 'instance': instance,
- 'network': test_network.fake_network,
- 'host': None},
- ]
-
- def fake_fixed_ip_get_by_address(context, address,
- columns_to_join=None):
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
- return fixed_ip
- raise exception.FixedIpNotFoundForAddress(address=address)
-
- def fake_fixed_ip_get_by_address_detailed(context, address):
- network = {'id': 1,
- 'cidr': "192.168.1.0/24"}
- host = {'host': "host",
- 'hostname': 'openstack'}
- for fixed_ip in fake_fixed_ips:
- if fixed_ip['address'] == address:
- return (fixed_ip, network, host)
- raise exception.FixedIpNotFoundForAddress(address=address)
-
- def fake_fixed_ip_update(context, address, values):
- fixed_ip = fake_fixed_ip_get_by_address(context, address)
- if fixed_ip is None:
- raise exception.FixedIpNotFoundForAddress(address=address)
- else:
- for key in values:
- fixed_ip[key] = values[key]
-
- self.stubs.Set(db, "fixed_ip_get_by_address",
- fake_fixed_ip_get_by_address)
- self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
- fake_fixed_ip_get_by_address_detailed)
- self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
-
- def test_fixed_ip_reserve(self):
- # Reserve a Fixed IP.
- project = {'reserve': None}
- response = self._do_post('os-fixed-ips/192.168.1.1/action',
- 'fixedip-post-req',
- project)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_get_fixed_ip(self):
- # Return data about the given fixed ip.
- response = self._do_get('os-fixed-ips/192.168.1.1')
- project = {'cidr': '192.168.1.0/24',
- 'hostname': 'openstack',
- 'host': 'host',
- 'address': '192.168.1.1'}
- self._verify_response('fixedips-get-resp', project, response, 200)
diff --git a/nova/tests/integrated/v3/test_flavor_access.py b/nova/tests/integrated/v3/test_flavor_access.py
deleted file mode 100644
index 7b9006bbf1..0000000000
--- a/nova/tests/integrated/v3/test_flavor_access.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'flavor-access'
-
- def _add_tenant(self):
- subs = {
- 'tenant_id': 'fake_tenant',
- 'flavor_id': 10,
- }
- response = self._do_post('flavors/10/action',
- 'flavor-access-add-tenant-req',
- subs)
- self._verify_response('flavor-access-add-tenant-resp',
- subs, response, 200)
-
- def _create_flavor(self):
- subs = {
- 'flavor_id': 10,
- 'flavor_name': 'test_flavor'
- }
- response = self._do_post("flavors",
- "flavor-access-create-req",
- subs)
- subs.update(self._get_regexes())
- self._verify_response("flavor-access-create-resp", subs, response, 200)
-
- def test_flavor_access_create(self):
- self._create_flavor()
-
- def test_flavor_access_detail(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-access-detail-resp', subs, response, 200)
-
- def test_flavor_access_list(self):
- self._create_flavor()
- self._add_tenant()
- flavor_id = 10
- response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'tenant_id': 'fake_tenant',
- }
- self._verify_response('flavor-access-list-resp', subs, response, 200)
-
- def test_flavor_access_show(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id
- }
- subs.update(self._get_regexes())
- self._verify_response('flavor-access-show-resp', subs, response, 200)
-
- def test_flavor_access_add_tenant(self):
- self._create_flavor()
- self._add_tenant()
-
- def test_flavor_access_remove_tenant(self):
- self._create_flavor()
- self._add_tenant()
- subs = {
- 'tenant_id': 'fake_tenant',
- }
- response = self._do_post('flavors/10/action',
- "flavor-access-remove-tenant-req",
- subs)
- exp_subs = {
- "tenant_id": self.api.project_id,
- "flavor_id": "10"
- }
- self._verify_response('flavor-access-remove-tenant-resp',
- exp_subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_flavor_extraspecs.py b/nova/tests/integrated/v3/test_flavor_extraspecs.py
deleted file mode 100644
index 4a08d454a1..0000000000
--- a/nova/tests/integrated/v3/test_flavor_extraspecs.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'flavor-extra-specs'
-
- def _flavor_extra_specs_create(self):
- subs = {'value1': 'value1',
- 'value2': 'value2'
- }
- response = self._do_post('flavors/1/os-extra_specs',
- 'flavor-extra-specs-create-req', subs)
- self._verify_response('flavor-extra-specs-create-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_get(self):
- subs = {'value1': 'value1'}
- self._flavor_extra_specs_create()
- response = self._do_get('flavors/1/os-extra_specs/key1')
- self._verify_response('flavor-extra-specs-get-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_list(self):
- subs = {'value1': 'value1',
- 'value2': 'value2'
- }
- self._flavor_extra_specs_create()
- response = self._do_get('flavors/1/os-extra_specs')
- self._verify_response('flavor-extra-specs-list-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_create(self):
- self._flavor_extra_specs_create()
-
- def test_flavor_extra_specs_update(self):
- subs = {'value1': 'new_value1'}
- self._flavor_extra_specs_create()
- response = self._do_put('flavors/1/os-extra_specs/key1',
- 'flavor-extra-specs-update-req', subs)
- self._verify_response('flavor-extra-specs-update-resp',
- subs, response, 200)
-
- def test_flavor_extra_specs_delete(self):
- self._flavor_extra_specs_create()
- response = self._do_delete('flavors/1/os-extra_specs/key1')
- self.assertEqual(response.status_code, 200)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_flavor_manage.py b/nova/tests/integrated/v3/test_flavor_manage.py
deleted file mode 100644
index 9b6d14301e..0000000000
--- a/nova/tests/integrated/v3/test_flavor_manage.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'flavor-manage'
-
- def _create_flavor(self):
- """Create a flavor."""
- subs = {
- 'flavor_id': 10,
- 'flavor_name': "test_flavor"
- }
- response = self._do_post("flavors",
- "flavor-create-post-req",
- subs)
- subs.update(self._get_regexes())
- self._verify_response("flavor-create-post-resp", subs, response, 200)
-
- def test_create_flavor(self):
- # Get api sample to create a flavor.
- self._create_flavor()
-
- def test_delete_flavor(self):
- # Get api sample to delete a flavor.
- self._create_flavor()
- response = self._do_delete("flavors/10")
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_flavor_rxtx.py b/nova/tests/integrated/v3/test_flavor_rxtx.py
deleted file mode 100644
index 0de02d5c4d..0000000000
--- a/nova/tests/integrated/v3/test_flavor_rxtx.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'os-flavor-rxtx'
-
- def test_flavor_rxtx_get(self):
- flavor_id = 1
- response = self._do_get('flavors/%s' % flavor_id)
- subs = {
- 'flavor_id': flavor_id,
- 'flavor_name': 'm1.tiny'
- }
- subs.update(self._get_regexes())
- self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
-
- def test_flavors_rxtx_detail(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
-
- def test_flavors_rxtx_create(self):
- subs = {
- 'flavor_id': 100,
- 'flavor_name': 'flavortest'
- }
- response = self._do_post('flavors',
- 'flavor-rxtx-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_flavors.py b/nova/tests/integrated/v3/test_flavors.py
deleted file mode 100644
index c821bd4764..0000000000
--- a/nova/tests/integrated/v3/test_flavors.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FlavorsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- sample_dir = 'flavors'
-
- def test_flavors_get(self):
- response = self._do_get('flavors/1')
- subs = self._get_regexes()
- self._verify_response('flavor-get-resp', subs, response, 200)
-
- def test_flavors_list(self):
- response = self._do_get('flavors')
- subs = self._get_regexes()
- self._verify_response('flavors-list-resp', subs, response, 200)
-
- def test_flavors_detail(self):
- response = self._do_get('flavors/detail')
- subs = self._get_regexes()
- self._verify_response('flavors-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_floating_ip_dns.py b/nova/tests/integrated/v3/test_floating_ip_dns.py
deleted file mode 100644
index 51cb543a80..0000000000
--- a/nova/tests/integrated/v3/test_floating_ip_dns.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-floating-ip-dns"
-
- domain = 'domain1.example.org'
- name = 'instance1'
- scope = 'public'
- project = 'project1'
- dns_type = 'A'
- ip = '192.168.1.1'
-
- def _create_or_update(self):
- subs = {'project': self.project,
- 'scope': self.scope}
- response = self._do_put('os-floating-ip-dns/%s' % self.domain,
- 'floating-ip-dns-create-or-update-req', subs)
- subs.update({'domain': self.domain})
- self._verify_response('floating-ip-dns-create-or-update-resp', subs,
- response, 200)
-
- def _create_or_update_entry(self):
- subs = {'ip': self.ip, 'dns_type': self.dns_type}
- response = self._do_put('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name),
- 'floating-ip-dns-create-or-update-entry-req',
- subs)
- subs.update({'name': self.name, 'domain': self.domain})
- self._verify_response('floating-ip-dns-create-or-update-entry-resp',
- subs, response, 200)
-
- def test_floating_ip_dns_list(self):
- self._create_or_update()
- response = self._do_get('os-floating-ip-dns')
- subs = {'domain': self.domain,
- 'project': self.project,
- 'scope': self.scope}
- self._verify_response('floating-ip-dns-list-resp', subs,
- response, 200)
-
- def test_floating_ip_dns_create_or_update(self):
- self._create_or_update()
-
- def test_floating_ip_dns_delete(self):
- self._create_or_update()
- response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
- self.assertEqual(response.status_code, 202)
-
- def test_floating_ip_dns_create_or_update_entry(self):
- self._create_or_update_entry()
-
- def test_floating_ip_dns_entry_get(self):
- self._create_or_update_entry()
- response = self._do_get('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name))
- subs = {'domain': self.domain,
- 'ip': self.ip,
- 'name': self.name}
- self._verify_response('floating-ip-dns-entry-get-resp', subs,
- response, 200)
-
- def test_floating_ip_dns_entry_delete(self):
- self._create_or_update_entry()
- response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.name))
- self.assertEqual(response.status_code, 202)
-
- def test_floating_ip_dns_entry_list(self):
- self._create_or_update_entry()
- response = self._do_get('os-floating-ip-dns/%s/entries/%s'
- % (self.domain, self.ip))
- subs = {'domain': self.domain,
- 'ip': self.ip,
- 'name': self.name}
- self._verify_response('floating-ip-dns-entry-list-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_floating_ip_pools.py b/nova/tests/integrated/v3/test_floating_ip_pools.py
deleted file mode 100644
index bea1123cb9..0000000000
--- a/nova/tests/integrated/v3/test_floating_ip_pools.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.network import api as network_api
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class FloatingIPPoolsSampleTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-floating-ip-pools"
-
- def test_list_floatingippools(self):
- pool_list = ["pool1", "pool2"]
-
- def fake_get_floating_ip_pools(self, context):
- return pool_list
-
- self.stubs.Set(network_api.API, "get_floating_ip_pools",
- fake_get_floating_ip_pools)
- response = self._do_get('os-floating-ip-pools')
- subs = {
- 'pool1': pool_list[0],
- 'pool2': pool_list[1]
- }
- self._verify_response('floatingippools-list-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_floating_ips_bulk.py b/nova/tests/integrated/v3/test_floating_ips_bulk.py
deleted file mode 100644
index 630902572d..0000000000
--- a/nova/tests/integrated/v3/test_floating_ips_bulk.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova import context
-from nova.tests.integrated.v3 import api_sample_base
-
-CONF = cfg.CONF
-CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
-CONF.import_opt('public_interface', 'nova.network.linux_net')
-
-
-class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-floating-ips-bulk"
-
- def setUp(self):
- super(FloatingIpsBulkTest, self).setUp()
- pool = CONF.default_floating_pool
- interface = CONF.public_interface
-
- self.ip_pool = [
- {
- 'address': "10.10.10.1",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.2",
- 'pool': pool,
- 'interface': interface
- },
- {
- 'address': "10.10.10.3",
- 'pool': pool,
- 'interface': interface,
- 'host': "testHost"
- },
- ]
- self.compute.db.floating_ip_bulk_create(
- context.get_admin_context(), self.ip_pool)
-
- self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
- context.get_admin_context(), self.ip_pool)
-
- def test_floating_ips_bulk_list(self):
- response = self._do_get('os-floating-ips-bulk')
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-list-resp',
- subs, response, 200)
-
- def test_floating_ips_bulk_list_by_host(self):
- response = self._do_get('os-floating-ips-bulk/testHost')
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-list-by-host-resp',
- subs, response, 200)
-
- def test_floating_ips_bulk_create(self):
- response = self._do_post('os-floating-ips-bulk',
- 'floating-ips-bulk-create-req',
- {"ip_range": "192.168.1.0/24",
- "pool": CONF.default_floating_pool,
- "interface": CONF.public_interface})
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-create-resp', subs,
- response, 200)
-
- def test_floating_ips_bulk_delete(self):
- response = self._do_put('os-floating-ips-bulk/delete',
- 'floating-ips-bulk-delete-req',
- {"ip_range": "192.168.1.0/24"})
- subs = self._get_regexes()
- self._verify_response('floating-ips-bulk-delete-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_fping.py b/nova/tests/integrated/v3/test_fping.py
deleted file mode 100644
index 2fa67427f2..0000000000
--- a/nova/tests/integrated/v3/test_fping.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.api.openstack.compute.plugins.v3 import fping
-from nova.tests.api.openstack.compute.contrib import test_fping
-from nova.tests.integrated.v3 import test_servers
-from nova import utils
-
-
-class FpingSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-fping"
-
- def setUp(self):
- super(FpingSampleJsonTests, self).setUp()
-
- def fake_check_fping(self):
- pass
- self.stubs.Set(utils, "execute", test_fping.execute)
- self.stubs.Set(fping.FpingController, "check_fping",
- fake_check_fping)
-
- def test_get_fping(self):
- self._post_server()
- response = self._do_get('os-fping')
- subs = self._get_regexes()
- self._verify_response('fping-get-resp', subs, response, 200)
-
- def test_get_fping_details(self):
- uuid = self._post_server()
- response = self._do_get('os-fping/%s' % (uuid))
- subs = self._get_regexes()
- self._verify_response('fping-get-details-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_hide_server_addresses.py b/nova/tests/integrated/v3/test_hide_server_addresses.py
deleted file mode 100644
index a530b70c89..0000000000
--- a/nova/tests/integrated/v3/test_hide_server_addresses.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova.compute import vm_states
-from nova.tests.integrated.v3 import test_servers
-
-CONF = cfg.CONF
-CONF.import_opt('osapi_hide_server_address_states',
- 'nova.api.openstack.compute.plugins.v3.hide_server_addresses')
-
-
-class ServersSampleHideAddressesJsonTest(test_servers.ServersSampleJsonTest):
- extension_name = 'os-hide-server-addresses'
- # Override the sample dirname because
- # test_servers.ServersSampleJsonTest does and so it won't default
- # to the extension name
- sample_dir = extension_name
-
- def setUp(self):
- # We override osapi_hide_server_address_states in order
- # to have an example of in the json samples of the
- # addresses being hidden
- CONF.set_override("osapi_hide_server_address_states",
- [vm_states.ACTIVE])
- super(ServersSampleHideAddressesJsonTest, self).setUp()
diff --git a/nova/tests/integrated/v3/test_hosts.py b/nova/tests/integrated/v3/test_hosts.py
deleted file mode 100644
index f86ad63153..0000000000
--- a/nova/tests/integrated/v3/test_hosts.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-hosts"
-
- def test_host_startup(self):
- response = self._do_get('os-hosts/%s/startup' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-startup', subs, response, 200)
-
- def test_host_reboot(self):
- response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-reboot', subs, response, 200)
-
- def test_host_shutdown(self):
- response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-shutdown', subs, response, 200)
-
- def test_host_maintenance(self):
- response = self._do_put('os-hosts/%s' % self.compute.host,
- 'host-put-maintenance-req', {})
- subs = self._get_regexes()
- self._verify_response('host-put-maintenance-resp', subs, response, 200)
-
- def test_host_get(self):
- response = self._do_get('os-hosts/%s' % self.compute.host)
- subs = self._get_regexes()
- self._verify_response('host-get-resp', subs, response, 200)
-
- def test_hosts_list(self):
- response = self._do_get('os-hosts')
- subs = self._get_regexes()
- self._verify_response('hosts-list-resp', subs, response, 200)
-
- def test_hosts_list_compute_service(self):
- response = self._do_get('os-hosts?service=compute')
- subs = self._get_regexes()
- self._verify_response('hosts-list-compute-service-resp',
- subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_hypervisors.py b/nova/tests/integrated/v3/test_hypervisors.py
deleted file mode 100644
index 9c2a2ba2ec..0000000000
--- a/nova/tests/integrated/v3/test_hypervisors.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import api as compute_api
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-hypervisors"
-
- def test_hypervisors_list(self):
- response = self._do_get('os-hypervisors')
- self._verify_response('hypervisors-list-resp', {}, response, 200)
-
- def test_hypervisors_search(self):
- response = self._do_get('os-hypervisors/fake/search')
- self._verify_response('hypervisors-search-resp', {}, response, 200)
-
- def test_hypervisors_servers(self):
- response = self._do_get('os-hypervisors/fake/servers')
- self._verify_response('hypervisors-servers-resp', {}, response, 200)
-
- def test_hypervisors_detail(self):
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/detail')
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-detail-resp', subs, response, 200)
-
- def test_hypervisors_show(self):
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/%s' % hypervisor_id)
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-show-resp', subs, response, 200)
-
- def test_hypervisors_statistics(self):
- response = self._do_get('os-hypervisors/statistics')
- self._verify_response('hypervisors-statistics-resp', {}, response, 200)
-
- def test_hypervisors_uptime(self):
- def fake_get_host_uptime(self, context, hyp):
- return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
- " 0.20, 0.12, 0.14")
-
- self.stubs.Set(compute_api.HostAPI,
- 'get_host_uptime', fake_get_host_uptime)
- hypervisor_id = 1
- response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
- subs = {
- 'hypervisor_id': hypervisor_id,
- }
- self._verify_response('hypervisors-uptime-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_image_size.py b/nova/tests/integrated/v3/test_image_size.py
deleted file mode 100644
index e2037eb984..0000000000
--- a/nova/tests/integrated/v3/test_image_size.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class ImageSizeSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "image-size"
- extra_extensions_to_load = ["images", "image-metadata"]
-
- def test_show(self):
- # Get api sample of one single image details request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_detail(self):
- # Get api sample of all images details request.
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('images-details-get-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_images.py b/nova/tests/integrated/v3/test_images.py
deleted file mode 100644
index e6982d2963..0000000000
--- a/nova/tests/integrated/v3/test_images.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'images'
- extra_extensions_to_load = ["image-metadata"]
-
- def test_images_list(self):
- # Get api sample of images get list request.
- response = self._do_get('images')
- subs = self._get_regexes()
- self._verify_response('images-list-get-resp', subs, response, 200)
-
- def test_image_get(self):
- # Get api sample of one single image details request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-get-resp', subs, response, 200)
-
- def test_images_details(self):
- # Get api sample of all images details request.
- response = self._do_get('images/detail')
- subs = self._get_regexes()
- self._verify_response('images-details-get-resp', subs, response, 200)
-
- def test_image_metadata_get(self):
- # Get api sample of an image metadata request.
- image_id = fake.get_valid_image_id()
- response = self._do_get('images/%s/metadata' % image_id)
- subs = self._get_regexes()
- subs['image_id'] = image_id
- self._verify_response('image-metadata-get-resp', subs, response, 200)
-
- def test_image_metadata_post(self):
- # Get api sample to update metadata of an image metadata request.
- image_id = fake.get_valid_image_id()
- response = self._do_post(
- 'images/%s/metadata' % image_id,
- 'image-metadata-post-req', {})
- subs = self._get_regexes()
- self._verify_response('image-metadata-post-resp', subs, response, 200)
-
- def test_image_metadata_put(self):
- # Get api sample of image metadata put request.
- image_id = fake.get_valid_image_id()
- response = self._do_put('images/%s/metadata' % image_id,
- 'image-metadata-put-req', {})
- subs = self._get_regexes()
- self._verify_response('image-metadata-put-resp', subs, response, 200)
-
- def test_image_meta_key_get(self):
- # Get api sample of an image metadata key request.
- image_id = fake.get_valid_image_id()
- key = "kernel_id"
- response = self._do_get('images/%s/metadata/%s' % (image_id, key))
- subs = self._get_regexes()
- self._verify_response('image-meta-key-get', subs, response, 200)
-
- def test_image_meta_key_put(self):
- # Get api sample of image metadata key put request.
- image_id = fake.get_valid_image_id()
- key = "auto_disk_config"
- response = self._do_put('images/%s/metadata/%s' % (image_id, key),
- 'image-meta-key-put-req', {})
- subs = self._get_regexes()
- self._verify_response('image-meta-key-put-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_instance_actions.py b/nova/tests/integrated/v3/test_instance_actions.py
deleted file mode 100644
index ca69b401e8..0000000000
--- a/nova/tests/integrated/v3/test_instance_actions.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from nova.compute import api as compute_api
-from nova import db
-from nova.tests import fake_server_actions
-from nova.tests.integrated.v3 import api_sample_base
-from nova.tests import utils as test_utils
-
-
-class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'os-instance-actions'
-
- def setUp(self):
- super(ServerActionsSampleJsonTest, self).setUp()
- self.actions = fake_server_actions.FAKE_ACTIONS
- self.events = fake_server_actions.FAKE_EVENTS
- self.instance = test_utils.get_test_instance()
-
- def fake_instance_action_get_by_request_id(context, uuid, request_id):
- return copy.deepcopy(self.actions[uuid][request_id])
-
- def fake_server_actions_get(context, uuid):
- return [copy.deepcopy(value) for value in
- self.actions[uuid].itervalues()]
-
- def fake_instance_action_events_get(context, action_id):
- return copy.deepcopy(self.events[action_id])
-
- def fake_instance_get_by_uuid(context, instance_id):
- return self.instance
-
- def fake_get(self, context, instance_uuid, **kwargs):
- return {'uuid': instance_uuid}
-
- self.stubs.Set(db, 'action_get_by_request_id',
- fake_instance_action_get_by_request_id)
- self.stubs.Set(db, 'actions_get', fake_server_actions_get)
- self.stubs.Set(db, 'action_events_get',
- fake_instance_action_events_get)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
- self.stubs.Set(compute_api.API, 'get', fake_get)
-
- def test_instance_action_get(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
- fake_action = self.actions[fake_uuid][fake_request_id]
-
- response = self._do_get('servers/%s/os-instance-actions/%s' %
- (fake_uuid, fake_request_id))
- subs = self._get_regexes()
- subs['action'] = '(reboot)|(resize)'
- subs['instance_uuid'] = fake_uuid
- subs['integer_id'] = '[0-9]+'
- subs['request_id'] = fake_action['request_id']
- subs['start_time'] = fake_action['start_time']
- subs['result'] = '(Success)|(Error)'
- subs['event'] = '(schedule)|(compute_create)'
- self._verify_response('instance-action-get-resp', subs, response, 200)
-
- def test_instance_actions_list(self):
- fake_uuid = fake_server_actions.FAKE_UUID
- response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
- subs = self._get_regexes()
- subs['action'] = '(reboot)|(resize)'
- subs['integer_id'] = '[0-9]+'
- subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
- '-[0-9a-f]{4}-[0-9a-f]{12}')
- self._verify_response('instance-actions-list-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_keypairs.py b/nova/tests/integrated/v3/test_keypairs.py
deleted file mode 100644
index 6c888e090a..0000000000
--- a/nova/tests/integrated/v3/test_keypairs.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- sample_dir = "keypairs"
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['keypair_name'] = 'keypair-[0-9a-f-]+'
- return subs
-
- def test_keypairs_post(self, public_key=None):
- """Get api sample of key pairs post request."""
- key_name = 'keypair-' + str(uuid.uuid4())
- response = self._do_post('os-keypairs', 'keypairs-post-req',
- {'keypair_name': key_name})
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-post-resp', subs, response, 200)
- # NOTE(maurosr): return the key_name is necessary cause the
- # verification returns the label of the last compared information in
- # the response, not necessarily the key name.
- return key_name
-
- def test_keypairs_import_key_post(self):
- # Get api sample of key pairs post to import user's key.
- key_name = 'keypair-' + str(uuid.uuid4())
- subs = {
- 'keypair_name': key_name,
- 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
- "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
- "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
- "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
- "pSxsIbECHw== Generated-by-Nova"
- }
- response = self._do_post('os-keypairs', 'keypairs-import-post-req',
- subs)
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-import-post-resp', subs, response, 200)
-
- def test_keypairs_list(self):
- # Get api sample of key pairs list request.
- key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs')
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-list-resp', subs, response, 200)
-
- def test_keypairs_get(self):
- # Get api sample of key pairs get request.
- key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs/%s' % key_name)
- subs = self._get_regexes()
- subs['keypair_name'] = '(%s)' % key_name
- self._verify_response('keypairs-get-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_lock_server.py b/nova/tests/integrated/v3/test_lock_server.py
deleted file mode 100644
index 7991a3b8e3..0000000000
--- a/nova/tests/integrated/v3/test_lock_server.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class LockServerSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-lock-server"
-
- def setUp(self):
- """setUp Method for LockServer api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(LockServerSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_post_lock_server(self):
- # Get api samples to lock server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'lock-server', {})
- self.assertEqual(202, response.status_code)
-
- def test_post_unlock_server(self):
- # Get api samples to unlock server request.
- self.test_post_lock_server()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'unlock-server', {})
- self.assertEqual(202, response.status_code)
diff --git a/nova/tests/integrated/v3/test_migrate_server.py b/nova/tests/integrated/v3/test_migrate_server.py
deleted file mode 100644
index 5b0d838152..0000000000
--- a/nova/tests/integrated/v3/test_migrate_server.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.conductor import manager as conductor_manager
-from nova import db
-from nova.tests.integrated.v3 import test_servers
-from nova import utils
-
-
-class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-migrate-server"
- ctype = 'json'
-
- def setUp(self):
- """setUp Method for MigrateServer api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(MigrateServerSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
- def test_post_migrate(self, mock_cold_migrate):
- # Get api samples to migrate server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'migrate-server', {})
- self.assertEqual(202, response.status_code)
-
- def test_post_live_migrate_server(self):
- # Get api samples to server live migrate request.
- def fake_live_migrate(_self, context, instance, scheduler_hint,
- block_migration, disk_over_commit):
- self.assertEqual(self.uuid, instance["uuid"])
- host = scheduler_hint["host"]
- self.assertEqual(self.compute.host, host)
-
- self.stubs.Set(conductor_manager.ComputeTaskManager,
- '_live_migrate',
- fake_live_migrate)
-
- def fake_get_compute(context, host):
- service = dict(host=host,
- binary='nova-compute',
- topic='compute',
- report_count=1,
- updated_at='foo',
- hypervisor_type='bar',
- hypervisor_version=utils.convert_version_to_int(
- '1.0'),
- disabled=False)
- return {'compute_node': [service]}
- self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
-
- response = self._do_post('servers/%s/action' % self.uuid,
- 'live-migrate-server',
- {'hostname': self.compute.host})
- self.assertEqual(202, response.status_code)
diff --git a/nova/tests/integrated/v3/test_migrations.py b/nova/tests/integrated/v3/test_migrations.py
deleted file mode 100644
index 5ce42e07b7..0000000000
--- a/nova/tests/integrated/v3/test_migrations.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from nova.compute import api as compute_api
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class MigrationsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-migrations"
-
- def _stub_migrations(self, context, filters):
- fake_migrations = [
- {
- 'id': 1234,
- 'source_node': 'node1',
- 'dest_node': 'node2',
- 'source_compute': 'compute1',
- 'dest_compute': 'compute2',
- 'dest_host': '1.2.3.4',
- 'status': 'Done',
- 'instance_uuid': 'instance_id_123',
- 'old_instance_type_id': 1,
- 'new_instance_type_id': 2,
- 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
- 'deleted_at': None,
- 'deleted': False
- },
- {
- 'id': 5678,
- 'source_node': 'node10',
- 'dest_node': 'node20',
- 'source_compute': 'compute10',
- 'dest_compute': 'compute20',
- 'dest_host': '5.6.7.8',
- 'status': 'Done',
- 'instance_uuid': 'instance_id_456',
- 'old_instance_type_id': 5,
- 'new_instance_type_id': 6,
- 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
- 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
- 'deleted_at': None,
- 'deleted': False
- }
- ]
- return fake_migrations
-
- def setUp(self):
- super(MigrationsSamplesJsonTest, self).setUp()
- self.stubs.Set(compute_api.API, 'get_migrations',
- self._stub_migrations)
-
- def test_get_migrations(self):
- response = self._do_get('os-migrations')
- subs = self._get_regexes()
-
- self.assertEqual(response.status_code, 200)
- self._verify_response('migrations-get', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_multinic.py b/nova/tests/integrated/v3/test_multinic.py
deleted file mode 100644
index 68eb4573e2..0000000000
--- a/nova/tests/integrated/v3/test_multinic.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class MultinicSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-multinic"
-
- def _disable_instance_dns_manager(self):
- # NOTE(markmc): it looks like multinic and instance_dns_manager are
- # incompatible. See:
- # https://bugs.launchpad.net/nova/+bug/1213251
- self.flags(
- instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
-
- def setUp(self):
- self._disable_instance_dns_manager()
- super(MultinicSampleJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def _add_fixed_ip(self):
- subs = {"networkId": 1}
- response = self._do_post('servers/%s/action' % (self.uuid),
- 'multinic-add-fixed-ip-req', subs)
- self.assertEqual(response.status_code, 202)
-
- def test_add_fixed_ip(self):
- self._add_fixed_ip()
-
- def test_remove_fixed_ip(self):
- self._add_fixed_ip()
-
- subs = {"ip": "10.0.0.4"}
- response = self._do_post('servers/%s/action' % (self.uuid),
- 'multinic-remove-fixed-ip-req', subs)
- self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/integrated/v3/test_multiple_create.py b/nova/tests/integrated/v3/test_multiple_create.py
deleted file mode 100644
index b887878fbb..0000000000
--- a/nova/tests/integrated/v3/test_multiple_create.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import test_servers
-
-
-class MultipleCreateJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-multiple-create"
-
- def test_multiple_create(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'min_count': "2",
- 'max_count': "3"
- }
- response = self._do_post('servers', 'multiple-create-post-req', subs)
- subs.update(self._get_regexes())
- self._verify_response('multiple-create-post-resp', subs, response, 202)
-
- def test_multiple_create_without_reservation_id(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'min_count': "2",
- 'max_count': "3"
- }
- response = self._do_post('servers', 'multiple-create-no-resv-post-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('multiple-create-no-resv-post-resp', subs,
- response, 202)
diff --git a/nova/tests/integrated/v3/test_networks.py b/nova/tests/integrated/v3/test_networks.py
deleted file mode 100644
index df9a7184e7..0000000000
--- a/nova/tests/integrated/v3/test_networks.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.network import api as network_api
-from nova.tests.api.openstack.compute.contrib import test_networks
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-networks"
-
- def setUp(self):
- super(NetworksJsonTests, self).setUp()
- fake_network_api = test_networks.FakeNetworkAPI()
- self.stubs.Set(network_api.API, "get_all",
- fake_network_api.get_all)
- self.stubs.Set(network_api.API, "get",
- fake_network_api.get)
- self.stubs.Set(network_api.API, "associate",
- fake_network_api.associate)
- self.stubs.Set(network_api.API, "delete",
- fake_network_api.delete)
- self.stubs.Set(network_api.API, "create",
- fake_network_api.create)
- self.stubs.Set(network_api.API, "add_network_to_project",
- fake_network_api.add_network_to_project)
-
- def test_network_list(self):
- response = self._do_get('os-networks')
- subs = self._get_regexes()
- self._verify_response('networks-list-resp', subs, response, 200)
-
- def test_network_disassociate(self):
- uuid = test_networks.FAKE_NETWORKS[0]['uuid']
- response = self._do_post('os-networks/%s/action' % uuid,
- 'networks-disassociate-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_network_show(self):
- uuid = test_networks.FAKE_NETWORKS[0]['uuid']
- response = self._do_get('os-networks/%s' % uuid)
- subs = self._get_regexes()
- self._verify_response('network-show-resp', subs, response, 200)
-
- def test_network_create(self):
- response = self._do_post("os-networks",
- 'network-create-req', {})
- subs = self._get_regexes()
- self._verify_response('network-create-resp', subs, response, 200)
-
- def test_network_add(self):
- response = self._do_post("os-networks/add",
- 'network-add-req', {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_network_delete(self):
- response = self._do_delete('os-networks/always_delete')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_networks_associate.py b/nova/tests/integrated/v3/test_networks_associate.py
deleted file mode 100644
index 3bd437a1cd..0000000000
--- a/nova/tests/integrated/v3/test_networks_associate.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova.network import api as network_api
-from nova.tests.integrated.v3 import api_sample_base
-
-CONF = cfg.CONF
-CONF.import_opt('osapi_compute_extension',
- 'nova.api.openstack.compute.extensions')
-
-
-class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-networks-associate"
- extra_extensions_to_load = ["os-networks"]
-
- _sentinel = object()
-
- def _get_flags(self):
- f = super(NetworksAssociateJsonTests, self)._get_flags()
- f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
- # Networks_associate requires Networks to be update
- f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
- return f
-
- def setUp(self):
- super(NetworksAssociateJsonTests, self).setUp()
-
- def fake_associate(self, context, network_id,
- host=NetworksAssociateJsonTests._sentinel,
- project=NetworksAssociateJsonTests._sentinel):
- return True
-
- self.stubs.Set(network_api.API, "associate", fake_associate)
-
- def test_disassociate(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_disassociate_host(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-host-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_disassociate_project(self):
- response = self._do_post('os-networks/1/action',
- 'network-disassociate-project-req',
- {})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_associate_host(self):
- response = self._do_post('os-networks/1/action',
- 'network-associate-host-req',
- {"host": "testHost"})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_pause_server.py b/nova/tests/integrated/v3/test_pause_server.py
deleted file mode 100644
index 109906dc26..0000000000
--- a/nova/tests/integrated/v3/test_pause_server.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class PauseServerSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-pause-server"
-
- def setUp(self):
- """setUp Method for PauseServer api samples extension
-
- This method creates the server that will be used in each test
- """
- super(PauseServerSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_post_pause(self):
- # Get api samples to pause server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'pause-server', {})
- self.assertEqual(202, response.status_code)
-
- def test_post_unpause(self):
- # Get api samples to unpause server request.
- self.test_post_pause()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'unpause-server', {})
- self.assertEqual(202, response.status_code)
diff --git a/nova/tests/integrated/v3/test_pci.py b/nova/tests/integrated/v3/test_pci.py
deleted file mode 100644
index e20aeb7f5e..0000000000
--- a/nova/tests/integrated/v3/test_pci.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2013 Intel.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import db
-from nova.tests.integrated.v3 import api_sample_base
-from nova.tests.integrated.v3 import test_servers
-
-
-fake_db_dev_1 = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'compute_node_id': 1,
- 'address': '0000:04:10.0',
- 'vendor_id': '8086',
- 'product_id': '1520',
- 'dev_type': 'type-VF',
- 'status': 'available',
- 'dev_id': 'pci_0000_04_10_0',
- 'label': 'label_8086_1520',
- 'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
- 'request_id': None,
- 'extra_info': '{"key1": "value1", "key2": "value2"}'
- }
-
-fake_db_dev_2 = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 2,
- 'compute_node_id': 1,
- 'address': '0000:04:10.1',
- 'vendor_id': '8086',
- 'product_id': '1520',
- 'dev_type': 'type-VF',
- 'status': 'available',
- 'dev_id': 'pci_0000_04_10_1',
- 'label': 'label_8086_1520',
- 'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
- 'request_id': None,
- 'extra_info': '{"key3": "value3", "key4": "value4"}'
- }
-
-
-class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-pci"
-
- def test_show(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
-
-class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extra_extensions_to_load = ['os-hypervisors']
- extension_name = 'os-pci'
-
- def setUp(self):
- super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
- self.fake_compute_node = {"cpu_info": "?",
- "current_workload": 0,
- "disk_available_least": 0,
- "host_ip": "1.1.1.1",
- "state": "up",
- "status": "enabled",
- "free_disk_gb": 1028,
- "free_ram_mb": 7680,
- "hypervisor_hostname": "fake-mini",
- "hypervisor_type": "fake",
- "hypervisor_version": 1000,
- "id": 1,
- "local_gb": 1028,
- "local_gb_used": 0,
- "memory_mb": 8192,
- "memory_mb_used": 512,
- "running_vms": 0,
- "service": {"host": '043b3cacf6f34c90a'
- '7245151fc8ebcda',
- "disabled": False,
- "disabled_reason": None},
- "vcpus": 1,
- "vcpus_used": 0,
- "service_id": 2,
- "pci_stats": [
- {"count": 5,
- "vendor_id": "8086",
- "product_id": "1520",
- "keya": "valuea",
- "extra_info": {
- "phys_function": '[["0x0000", '
- '"0x04", "0x00",'
- ' "0x1"]]',
- "key1": "value1"}}]}
-
- @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
- @mock.patch("nova.db.compute_node_get")
- def test_pci_show(self, mock_db, mock_service):
- self.fake_compute_node['pci_stats'] = jsonutils.dumps(
- self.fake_compute_node['pci_stats'])
- mock_db.return_value = self.fake_compute_node
- hypervisor_id = 1
- response = self._do_get('os-hypervisors/%s' % hypervisor_id)
- subs = {
- 'hypervisor_id': hypervisor_id,
- }
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-pci-show-resp',
- subs, response, 200)
-
- @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
- @mock.patch("nova.db.compute_node_get_all")
- def test_pci_detail(self, mock_db, mock_service):
- self.fake_compute_node['pci_stats'] = jsonutils.dumps(
- self.fake_compute_node['pci_stats'])
-
- mock_db.return_value = [self.fake_compute_node]
- hypervisor_id = 1
- subs = {
- 'hypervisor_id': hypervisor_id
- }
- response = self._do_get('os-hypervisors/detail')
-
- subs.update(self._get_regexes())
- self._verify_response('hypervisors-pci-detail-resp',
- subs, response, 200)
-
-
-class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-pci"
-
- def _fake_pci_device_get_by_id(self, context, id):
- return fake_db_dev_1
-
- def _fake_pci_device_get_all_by_node(self, context, id):
- return [fake_db_dev_1, fake_db_dev_2]
-
- def test_pci_show(self):
- self.stubs.Set(db, 'pci_device_get_by_id',
- self._fake_pci_device_get_by_id)
- response = self._do_get('os-pci/1')
- subs = self._get_regexes()
- self._verify_response('pci-show-resp', subs, response, 200)
-
- def test_pci_index(self):
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
- response = self._do_get('os-pci')
- subs = self._get_regexes()
- self._verify_response('pci-index-resp', subs, response, 200)
-
- def test_pci_detail(self):
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
- response = self._do_get('os-pci/detail')
- subs = self._get_regexes()
- self._verify_response('pci-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_quota_sets.py b/nova/tests/integrated/v3/test_quota_sets.py
deleted file mode 100644
index 4153c3c3f6..0000000000
--- a/nova/tests/integrated/v3/test_quota_sets.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-quota-sets"
-
- def test_show_quotas(self):
- # Get api sample to show quotas.
- response = self._do_get('os-quota-sets/fake_tenant')
- self._verify_response('quotas-show-get-resp', {}, response, 200)
-
- def test_show_quotas_defaults(self):
- # Get api sample to show quotas defaults.
- response = self._do_get('os-quota-sets/fake_tenant/defaults')
- self._verify_response('quotas-show-defaults-get-resp',
- {}, response, 200)
-
- def test_update_quotas(self):
- # Get api sample to update quotas.
- response = self._do_put('os-quota-sets/fake_tenant',
- 'quotas-update-post-req',
- {})
- self._verify_response('quotas-update-post-resp', {}, response, 200)
-
- def test_delete_quotas(self):
- # Get api sample to delete quota.
- response = self._do_delete('os-quota-sets/fake_tenant')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_update_quotas_force(self):
- # Get api sample to update quotas.
- response = self._do_put('os-quota-sets/fake_tenant',
- 'quotas-update-force-post-req',
- {})
- return self._verify_response('quotas-update-force-post-resp', {},
- response, 200)
-
- def test_show_quotas_for_user(self):
- # Get api sample to show quotas for user.
- response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
- self._verify_response('user-quotas-show-get-resp', {}, response, 200)
-
- def test_delete_quotas_for_user(self):
- response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_update_quotas_for_user(self):
- # Get api sample to update quotas for user.
- response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
- 'user-quotas-update-post-req',
- {})
- return self._verify_response('user-quotas-update-post-resp', {},
- response, 200)
diff --git a/nova/tests/integrated/v3/test_remote_consoles.py b/nova/tests/integrated/v3/test_remote_consoles.py
deleted file mode 100644
index 914c3f5767..0000000000
--- a/nova/tests/integrated/v3/test_remote_consoles.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
- extension_name = "os-remote-consoles"
-
- def setUp(self):
- super(ConsolesSampleJsonTests, self).setUp()
- self.flags(vnc_enabled=True)
- self.flags(enabled=True, group='spice')
- self.flags(enabled=True, group='rdp')
- self.flags(enabled=True, group='serial_console')
-
- def test_get_vnc_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-vnc-console-post-req',
- {'action': 'os-getVNCConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-vnc-console-post-resp', subs, response, 200)
-
- def test_get_spice_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-spice-console-post-req',
- {'action': 'os-getSPICEConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-spice-console-post-resp', subs,
- response, 200)
-
- def test_get_rdp_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-rdp-console-post-req',
- {'action': 'os-getRDPConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-rdp-console-post-resp', subs,
- response, 200)
-
- def test_get_serial_console(self):
- uuid = self._post_server()
- response = self._do_post('servers/%s/action' % uuid,
- 'get-serial-console-post-req',
- {'action': 'os-getSerialConsole'})
- subs = self._get_regexes()
- subs["url"] = \
- "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
- self._verify_response('get-serial-console-post-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_rescue.py b/nova/tests/integrated/v3/test_rescue.py
deleted file mode 100644
index aee385dc99..0000000000
--- a/nova/tests/integrated/v3/test_rescue.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class RescueJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-rescue"
-
- def _rescue(self, uuid):
- req_subs = {
- 'password': 'MySecretPass'
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-rescue-req', req_subs)
- self._verify_response('server-rescue', req_subs, response, 200)
-
- def _unrescue(self, uuid):
- response = self._do_post('servers/%s/action' % uuid,
- 'server-unrescue-req', {})
- self.assertEqual(response.status_code, 202)
-
- def test_server_rescue(self):
- uuid = self._post_server()
-
- self._rescue(uuid)
-
- # Do a server get to make sure that the 'RESCUE' state is set
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'RESCUE'
-
- self._verify_response('server-get-resp-rescue', subs, response, 200)
-
- def test_server_rescue_with_image_ref_specified(self):
- uuid = self._post_server()
-
- req_subs = {
- 'password': 'MySecretPass',
- 'image_ref': '2341-Abc'
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'server-rescue-req-with-image-ref', req_subs)
- self._verify_response('server-rescue', req_subs, response, 200)
-
- # Do a server get to make sure that the 'RESCUE' state is set
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'RESCUE'
-
- self._verify_response('server-get-resp-rescue', subs, response, 200)
-
- def test_server_unrescue(self):
- uuid = self._post_server()
-
- self._rescue(uuid)
- self._unrescue(uuid)
-
- # Do a server get to make sure that the 'ACTIVE' state is back
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['status'] = 'ACTIVE'
-
- self._verify_response('server-get-resp-unrescue', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_scheduler_hints.py b/nova/tests/integrated/v3/test_scheduler_hints.py
deleted file mode 100644
index a0b9b6186e..0000000000
--- a/nova/tests/integrated/v3/test_scheduler_hints.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class SchedulerHintsJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-scheduler-hints"
-
- def test_scheduler_hints_post(self):
- # Get api sample of scheduler hint post request.
- subs = self._get_regexes()
- subs.update({'image_id': fake.get_valid_image_id(),
- 'image_near': str(uuid.uuid4())})
- response = self._do_post('servers', 'scheduler-hints-post-req',
- subs)
- self._verify_response('scheduler-hints-post-resp', subs, response, 202)
diff --git a/nova/tests/integrated/v3/test_security_group_default_rules.py b/nova/tests/integrated/v3/test_security_group_default_rules.py
deleted file mode 100644
index 99882ce865..0000000000
--- a/nova/tests/integrated/v3/test_security_group_default_rules.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class SecurityGroupDefaultRulesSampleJsonTest(
- api_sample_base.ApiSampleTestBaseV3):
- extension_name = 'os-security-group-default-rules'
-
- def test_security_group_default_rules_create(self):
- response = self._do_post('os-security-group-default-rules',
- 'security-group-default-rules-create-req',
- {})
- self._verify_response('security-group-default-rules-create-resp',
- {}, response, 200)
-
- def test_security_group_default_rules_list(self):
- self.test_security_group_default_rules_create()
- response = self._do_get('os-security-group-default-rules')
- self._verify_response('security-group-default-rules-list-resp',
- {}, response, 200)
-
- def test_security_group_default_rules_show(self):
- self.test_security_group_default_rules_create()
- rule_id = '1'
- response = self._do_get('os-security-group-default-rules/%s' % rule_id)
- self._verify_response('security-group-default-rules-show-resp',
- {}, response, 200)
diff --git a/nova/tests/integrated/v3/test_security_groups.py b/nova/tests/integrated/v3/test_security_groups.py
deleted file mode 100644
index 2859bf1f12..0000000000
--- a/nova/tests/integrated/v3/test_security_groups.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.network.security_group import neutron_driver
-from nova.tests.integrated.v3 import test_servers
-
-
-def fake_get(*args, **kwargs):
- nova_group = {}
- nova_group['id'] = 1
- nova_group['description'] = 'default'
- nova_group['name'] = 'default'
- nova_group['project_id'] = 'openstack'
- nova_group['rules'] = []
- return nova_group
-
-
-def fake_get_instances_security_groups_bindings(self, context, servers,
- detailed=False):
- result = {}
- for s in servers:
- result[s.get('id')] = [{'name': 'test'}]
- return result
-
-
-def fake_add_to_instance(self, context, instance, security_group_name):
- pass
-
-
-def fake_remove_from_instance(self, context, instance, security_group_name):
- pass
-
-
-def fake_list(self, context, names=None, ids=None, project=None,
- search_opts=None):
- return [fake_get()]
-
-
-def fake_get_instance_security_groups(self, context, instance_uuid,
- detailed=False):
- return [fake_get()]
-
-
-def fake_create_security_group(self, context, name, description):
- return fake_get()
-
-
-class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-security-groups'
-
- def setUp(self):
- self.flags(security_group_api=('neutron'))
- super(SecurityGroupsJsonTest, self).setUp()
- self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'get_instances_security_groups_bindings',
- fake_get_instances_security_groups_bindings)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'add_to_instance',
- fake_add_to_instance)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'remove_from_instance',
- fake_remove_from_instance)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'list',
- fake_list)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'get_instance_security_groups',
- fake_get_instance_security_groups)
- self.stubs.Set(neutron_driver.SecurityGroupAPI,
- 'create_security_group',
- fake_create_security_group)
-
- def test_server_create(self):
- self._post_server()
-
- def test_server_get(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_server_detail(self):
- self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
-
- def _get_create_subs(self):
- return {
- 'group_name': 'default',
- "description": "default",
- }
-
- def _create_security_group(self):
- subs = self._get_create_subs()
- return self._do_post('os-security-groups',
- 'security-group-post-req', subs)
-
- def _add_group(self, uuid):
- subs = {
- 'group_name': 'test'
- }
- return self._do_post('servers/%s/action' % uuid,
- 'security-group-add-post-req', subs)
-
- def test_security_group_create(self):
- response = self._create_security_group()
- subs = self._get_create_subs()
- self._verify_response('security-groups-create-resp', subs,
- response, 200)
-
- def test_security_groups_list(self):
- # Get api sample of security groups get list request.
- response = self._do_get('os-security-groups')
- subs = self._get_regexes()
- self._verify_response('security-groups-list-get-resp',
- subs, response, 200)
-
- def test_security_groups_get(self):
- # Get api sample of security groups get request.
- security_group_id = '11111111-1111-1111-1111-111111111111'
- response = self._do_get('os-security-groups/%s' % security_group_id)
- subs = self._get_regexes()
- self._verify_response('security-groups-get-resp', subs, response, 200)
-
- def test_security_groups_list_server(self):
- # Get api sample of security groups for a specific server.
- uuid = self._post_server()
- response = self._do_get('servers/%s/os-security-groups' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-security-groups-list-resp',
- subs, response, 200)
-
- def test_security_groups_add(self):
- self._create_security_group()
- uuid = self._post_server()
- response = self._add_group(uuid)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_security_groups_remove(self):
- self._create_security_group()
- uuid = self._post_server()
- self._add_group(uuid)
- subs = {
- 'group_name': 'test'
- }
- response = self._do_post('servers/%s/action' % uuid,
- 'security-group-remove-post-req', subs)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_server_diagnostics.py b/nova/tests/integrated/v3/test_server_diagnostics.py
deleted file mode 100644
index 9218066ad0..0000000000
--- a/nova/tests/integrated/v3/test_server_diagnostics.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServerDiagnosticsSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-server-diagnostics"
-
- def test_server_diagnostics_get(self):
- uuid = self._post_server()
- response = self._do_get('servers/%s/diagnostics' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-diagnostics-get-resp', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_server_external_events.py b/nova/tests/integrated/v3/test_server_external_events.py
deleted file mode 100644
index 79b135b5e0..0000000000
--- a/nova/tests/integrated/v3/test_server_external_events.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServerExternalEventsSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-server-external-events"
-
- def setUp(self):
- """setUp Method for AdminActions api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(ServerExternalEventsSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_create_event(self):
- subs = {
- 'uuid': self.uuid,
- 'name': 'network-changed',
- 'status': 'completed',
- 'tag': 'foo',
- }
- response = self._do_post('os-server-external-events',
- 'event-create-req',
- subs)
- subs.update(self._get_regexes())
- self._verify_response('event-create-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_server_groups.py b/nova/tests/integrated/v3/test_server_groups.py
deleted file mode 100644
index dcbe2afc35..0000000000
--- a/nova/tests/integrated/v3/test_server_groups.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServerGroupsSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-server-groups"
-
- def _get_create_subs(self):
- return {'name': 'test'}
-
- def _post_server_group(self):
- """Verify the response status and returns the UUID of the
- newly created server group.
- """
- subs = self._get_create_subs()
- response = self._do_post('os-server-groups',
- 'server-groups-post-req', subs)
- subs = self._get_regexes()
- subs['name'] = 'test'
- return self._verify_response('server-groups-post-resp',
- subs, response, 200)
-
- def _create_server_group(self):
- subs = self._get_create_subs()
- return self._do_post('os-server-groups',
- 'server-groups-post-req', subs)
-
- def test_server_groups_post(self):
- return self._post_server_group()
-
- def test_server_groups_list(self):
- subs = self._get_create_subs()
- uuid = self._post_server_group()
- response = self._do_get('os-server-groups')
- subs.update(self._get_regexes())
- subs['id'] = uuid
- self._verify_response('server-groups-list-resp',
- subs, response, 200)
-
- def test_server_groups_get(self):
- # Get api sample of server groups get request.
- subs = {'name': 'test'}
- uuid = self._post_server_group()
- subs['id'] = uuid
- response = self._do_get('os-server-groups/%s' % uuid)
-
- self._verify_response('server-groups-get-resp', subs, response, 200)
-
- def test_server_groups_delete(self):
- uuid = self._post_server_group()
- response = self._do_delete('os-server-groups/%s' % uuid)
- self.assertEqual(response.status_code, 204)
diff --git a/nova/tests/integrated/v3/test_server_metadata.py b/nova/tests/integrated/v3/test_server_metadata.py
deleted file mode 100644
index f85c8118bb..0000000000
--- a/nova/tests/integrated/v3/test_server_metadata.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServersMetadataJsonTest(test_servers.ServersSampleBase):
- extends_name = 'core_only'
- sample_dir = 'server-metadata'
-
- def _create_and_set(self, subs):
- uuid = self._post_server()
- response = self._do_put('servers/%s/metadata' % uuid,
- 'server-metadata-all-req',
- subs)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
- return uuid
-
- def generalize_subs(self, subs, vanilla_regexes):
- subs['value'] = '(Foo|Bar) Value'
- return subs
-
- def test_metadata_put_all(self):
- # Test setting all metadata for a server.
- subs = {'value': 'Foo Value'}
- self._create_and_set(subs)
-
- def test_metadata_post_all(self):
- # Test updating all metadata for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- subs['value'] = 'Bar Value'
- response = self._do_post('servers/%s/metadata' % uuid,
- 'server-metadata-all-req',
- subs)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
-
- def test_metadata_get_all(self):
- # Test getting all metadata for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_get('servers/%s/metadata' % uuid)
- self._verify_response('server-metadata-all-resp', subs, response, 200)
-
- def test_metadata_put(self):
- # Test putting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- subs['value'] = 'Bar Value'
- response = self._do_put('servers/%s/metadata/foo' % uuid,
- 'server-metadata-req',
- subs)
- self._verify_response('server-metadata-resp', subs, response, 200)
-
- def test_metadata_get(self):
- # Test getting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_get('servers/%s/metadata/foo' % uuid)
- self._verify_response('server-metadata-resp', subs, response, 200)
-
- def test_metadata_delete(self):
- # Test deleting an individual metadata item for a server.
- subs = {'value': 'Foo Value'}
- uuid = self._create_and_set(subs)
- response = self._do_delete('servers/%s/metadata/foo' % uuid)
- self.assertEqual(response.status_code, 204)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/integrated/v3/test_server_usage.py b/nova/tests/integrated/v3/test_server_usage.py
deleted file mode 100644
index ae0c393e78..0000000000
--- a/nova/tests/integrated/v3/test_server_usage.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServerUsageSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = 'os-server-usage'
-
- def setUp(self):
- """setUp method for server usage."""
- super(ServerUsageSampleJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_show(self):
- response = self._do_get('servers/%s' % self.uuid)
- subs = self._get_regexes()
- subs['id'] = self.uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_details(self):
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['id'] = self.uuid
- subs['hostid'] = '[a-f0-9]+'
- self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_servers.py b/nova/tests/integrated/v3/test_servers.py
deleted file mode 100644
index 156870203b..0000000000
--- a/nova/tests/integrated/v3/test_servers.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import api as compute_api
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class ServersSampleBase(api_sample_base.ApiSampleTestBaseV3):
- def _post_server(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'glance_host': self._get_glance_host()
- }
- response = self._do_post('servers', 'server-post-req', subs)
- subs = self._get_regexes()
- return self._verify_response('server-post-resp', subs, response, 202)
-
-
-class ServersSampleJsonTest(ServersSampleBase):
- sample_dir = 'servers'
-
- def test_servers_post(self):
- return self._post_server()
-
- def test_servers_get(self):
- uuid = self.test_servers_post()
- response = self._do_get('servers/%s' % uuid)
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('server-get-resp', subs, response, 200)
-
- def test_servers_list(self):
- uuid = self._post_server()
- response = self._do_get('servers')
- subs = self._get_regexes()
- subs['id'] = uuid
- self._verify_response('servers-list-resp', subs, response, 200)
-
- def test_servers_details(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail')
- subs = self._get_regexes()
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- subs['hypervisor_hostname'] = r'[\w\.\-]+'
- subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
- self._verify_response('servers-details-resp', subs, response, 200)
-
-
-class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
- all_extensions = True
-
-
-class ServersActionsJsonTest(ServersSampleBase):
- sample_dir = 'servers'
-
- def _test_server_action(self, uuid, action, req_tpl,
- subs=None, resp_tpl=None, code=202):
- subs = subs or {}
- subs.update({'action': action,
- 'glance_host': self._get_glance_host()})
- response = self._do_post('servers/%s/action' % uuid,
- req_tpl,
- subs)
- if resp_tpl:
- subs.update(self._get_regexes())
- self._verify_response(resp_tpl, subs, response, code)
- else:
- self.assertEqual(response.status_code, code)
- self.assertEqual(response.content, "")
-
- def test_server_reboot_hard(self):
- uuid = self._post_server()
- self._test_server_action(uuid, "reboot",
- 'server-action-reboot',
- {"type": "HARD"})
-
- def test_server_reboot_soft(self):
- uuid = self._post_server()
- self._test_server_action(uuid, "reboot",
- 'server-action-reboot',
- {"type": "SOFT"})
-
- def test_server_rebuild(self):
- uuid = self._post_server()
- image = fake.get_valid_image_id()
- subs = {'host': self._get_host(),
- 'uuid': image,
- 'name': 'foobar',
- 'pass': 'seekr3t',
- 'hostid': '[a-f0-9]+',
- }
- self._test_server_action(uuid, 'rebuild',
- 'server-action-rebuild',
- subs,
- 'server-action-rebuild-resp')
-
- def _test_server_rebuild_preserve_ephemeral(self, value):
- uuid = self._post_server()
- image = fake.get_valid_image_id()
- subs = {'host': self._get_host(),
- 'uuid': image,
- 'name': 'foobar',
- 'pass': 'seekr3t',
- 'hostid': '[a-f0-9]+',
- 'preserve_ephemeral': str(value).lower(),
- 'action': 'rebuild',
- 'glance_host': self._get_glance_host(),
- }
-
- def fake_rebuild(self_, context, instance, image_href, admin_password,
- files_to_inject=None, **kwargs):
- self.assertEqual(kwargs['preserve_ephemeral'], value)
- self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
-
- response = self._do_post('servers/%s/action' % uuid,
- 'server-action-rebuild-preserve-ephemeral',
- subs)
- self.assertEqual(response.status_code, 202)
-
- def test_server_rebuild_preserve_ephemeral_true(self):
- self._test_server_rebuild_preserve_ephemeral(True)
-
- def test_server_rebuild_preserve_ephemeral_false(self):
- self._test_server_rebuild_preserve_ephemeral(False)
-
- def test_server_resize(self):
- self.flags(allow_resize_to_same_host=True)
- uuid = self._post_server()
- self._test_server_action(uuid, "resize",
- 'server-action-resize',
- {"id": 2,
- "host": self._get_host()})
- return uuid
-
- def test_server_revert_resize(self):
- uuid = self.test_server_resize()
- self._test_server_action(uuid, "revertResize",
- 'server-action-revert-resize')
-
- def test_server_confirm_resize(self):
- uuid = self.test_server_resize()
- self._test_server_action(uuid, "confirmResize",
- 'server-action-confirm-resize',
- code=204)
-
- def test_server_create_image(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'createImage',
- 'server-action-create-image',
- {'name': 'foo-image'})
-
-
-class ServerStartStopJsonTest(ServersSampleBase):
- sample_dir = 'servers'
-
- def _test_server_action(self, uuid, action, req_tpl):
- response = self._do_post('servers/%s/action' % uuid,
- req_tpl,
- {'action': action})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_server_start(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-stop', 'server-action-stop')
- self._test_server_action(uuid, 'os-start', 'server-action-start')
-
- def test_server_stop(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-stop', 'server-action-stop')
diff --git a/nova/tests/integrated/v3/test_servers_ips.py b/nova/tests/integrated/v3/test_servers_ips.py
deleted file mode 100644
index 08cabe6a82..0000000000
--- a/nova/tests/integrated/v3/test_servers_ips.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class ServersIpsJsonTest(test_servers.ServersSampleBase):
- extends_name = 'core_only'
- sample_dir = 'server-ips'
-
- def test_get(self):
- # Test getting a server's IP information.
- uuid = self._post_server()
- response = self._do_get('servers/%s/ips' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-ips-resp', subs, response, 200)
-
- def test_get_by_network(self):
- # Test getting a server's IP information by network id.
- uuid = self._post_server()
- response = self._do_get('servers/%s/ips/private' % uuid)
- subs = self._get_regexes()
- self._verify_response('server-ips-network-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_services.py b/nova/tests/integrated/v3/test_services.py
deleted file mode 100644
index b15701358c..0000000000
--- a/nova/tests/integrated/v3/test_services.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.utils import timeutils
-
-from nova import db
-from nova.tests.api.openstack.compute.plugins.v3 import test_services
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-services"
-
- def setUp(self):
- super(ServicesJsonTest, self).setUp()
- self.stubs.Set(db, "service_get_all",
- test_services.fake_db_api_service_get_all)
- self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
- self.stubs.Set(timeutils, "utcnow_ts",
- test_services.fake_utcnow_ts)
- self.stubs.Set(db, "service_get_by_args",
- test_services.fake_service_get_by_host_binary)
- self.stubs.Set(db, "service_update",
- test_services.fake_service_update)
-
- def tearDown(self):
- super(ServicesJsonTest, self).tearDown()
- timeutils.clear_time_override()
-
- def test_services_list(self):
- """Return a list of all agent builds."""
- response = self._do_get('os-services')
- subs = {'binary': 'nova-compute',
- 'host': 'host1',
- 'zone': 'nova',
- 'status': 'disabled',
- 'state': 'up'}
- subs.update(self._get_regexes())
- self._verify_response('services-list-get-resp', subs, response, 200)
-
- def test_service_enable(self):
- """Enable an existing agent build."""
- subs = {"host": "host1",
- 'binary': 'nova-compute'}
- response = self._do_put('os-services/enable',
- 'service-enable-put-req', subs)
- subs = {"host": "host1",
- "binary": "nova-compute"}
- self._verify_response('service-enable-put-resp', subs, response, 200)
-
- def test_service_disable(self):
- """Disable an existing agent build."""
- subs = {"host": "host1",
- 'binary': 'nova-compute'}
- response = self._do_put('os-services/disable',
- 'service-disable-put-req', subs)
- subs = {"host": "host1",
- "binary": "nova-compute"}
- self._verify_response('service-disable-put-resp', subs, response, 200)
-
- def test_service_disable_log_reason(self):
- """Disable an existing service and log the reason."""
- subs = {"host": "host1",
- 'binary': 'nova-compute',
- 'disabled_reason': 'test2'}
- response = self._do_put('os-services/disable-log-reason',
- 'service-disable-log-put-req', subs)
- return self._verify_response('service-disable-log-put-resp',
- subs, response, 200)
-
- def test_service_delete(self):
- """Delete an existing service."""
- response = self._do_delete('os-services/1')
- self.assertEqual(response.status_code, 204)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_shelve.py b/nova/tests/integrated/v3/test_shelve.py
deleted file mode 100644
index 6913ab42cc..0000000000
--- a/nova/tests/integrated/v3/test_shelve.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova.tests.integrated.v3 import test_servers
-
-CONF = cfg.CONF
-CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
-
-
-class ShelveJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-shelve"
-
- def setUp(self):
- super(ShelveJsonTest, self).setUp()
- # Don't offload instance, so we can test the offload call.
- CONF.set_override('shelved_offload_time', -1)
-
- def _test_server_action(self, uuid, template, action):
- response = self._do_post('servers/%s/action' % uuid,
- template, {'action': action})
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
-
- def test_shelve(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
-
- def test_shelve_offload(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
-
- def test_unshelve(self):
- uuid = self._post_server()
- self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve', 'unshelve')
diff --git a/nova/tests/integrated/v3/test_simple_tenant_usage.py b/nova/tests/integrated/v3/test_simple_tenant_usage.py
deleted file mode 100644
index 18d2e7ceb7..0000000000
--- a/nova/tests/integrated/v3/test_simple_tenant_usage.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import urllib
-
-from oslo.utils import timeutils
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class SimpleTenantUsageSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-simple-tenant-usage"
-
- def setUp(self):
- """setUp method for simple tenant usage."""
- super(SimpleTenantUsageSampleJsonTest, self).setUp()
-
- started = timeutils.utcnow()
- now = started + datetime.timedelta(hours=1)
-
- timeutils.set_time_override(started)
- self._post_server()
- timeutils.set_time_override(now)
-
- self.query = {
- 'start': str(started),
- 'end': str(now)
- }
-
- def tearDown(self):
- """tearDown method for simple tenant usage."""
- super(SimpleTenantUsageSampleJsonTest, self).tearDown()
- timeutils.clear_time_override()
-
- def test_get_tenants_usage(self):
- # Get api sample to get all tenants usage request.
- response = self._do_get('os-simple-tenant-usage?%s' % (
- urllib.urlencode(self.query)))
- subs = self._get_regexes()
- self._verify_response('simple-tenant-usage-get', subs, response, 200)
-
- def test_get_tenant_usage_details(self):
- # Get api sample to get specific tenant usage request.
- tenant_id = 'openstack'
- response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
- urllib.urlencode(self.query)))
- subs = self._get_regexes()
- self._verify_response('simple-tenant-usage-get-specific', subs,
- response, 200)
diff --git a/nova/tests/integrated/v3/test_suspend_server.py b/nova/tests/integrated/v3/test_suspend_server.py
deleted file mode 100644
index 2d9b1048ed..0000000000
--- a/nova/tests/integrated/v3/test_suspend_server.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.integrated.v3 import test_servers
-
-
-class SuspendServerSamplesJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-suspend-server"
- ctype = 'json'
-
- def setUp(self):
- """setUp Method for SuspendServer api samples extension
-
- This method creates the server that will be used in each tests
- """
- super(SuspendServerSamplesJsonTest, self).setUp()
- self.uuid = self._post_server()
-
- def test_post_suspend(self):
- # Get api samples to suspend server request.
- response = self._do_post('servers/%s/action' % self.uuid,
- 'server-suspend', {})
- self.assertEqual(response.status_code, 202)
-
- def test_post_resume(self):
- # Get api samples to server resume request.
- self.test_post_suspend()
- response = self._do_post('servers/%s/action' % self.uuid,
- 'server-resume', {})
- self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/integrated/v3/test_tenant_networks.py b/nova/tests/integrated/v3/test_tenant_networks.py
deleted file mode 100644
index 6ea3c7d7c6..0000000000
--- a/nova/tests/integrated/v3/test_tenant_networks.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-import nova.quota
-from nova.tests.integrated.v3 import api_sample_base
-
-CONF = cfg.CONF
-CONF.import_opt('enable_network_quota',
- 'nova.api.openstack.compute.contrib.os_tenant_networks')
-
-
-class TenantNetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-tenant-networks"
-
- def setUp(self):
- super(TenantNetworksJsonTests, self).setUp()
- CONF.set_override("enable_network_quota", True)
-
- def fake(*args, **kwargs):
- pass
-
- self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
- self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
- self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
- self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
-
- def test_list_networks(self):
- response = self._do_get('os-tenant-networks')
- subs = self._get_regexes()
- self._verify_response('networks-list-res', subs, response, 200)
-
- def test_create_network(self):
- response = self._do_post('os-tenant-networks', "networks-post-req", {})
- subs = self._get_regexes()
- self._verify_response('networks-post-res', subs, response, 200)
-
- def test_delete_network(self):
- response = self._do_post('os-tenant-networks', "networks-post-req", {})
- net = jsonutils.loads(response.content)
- response = self._do_delete('os-tenant-networks/%s' %
- net["network"]["id"])
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, "")
diff --git a/nova/tests/integrated/v3/test_used_limits.py b/nova/tests/integrated/v3/test_used_limits.py
deleted file mode 100644
index d9e4248001..0000000000
--- a/nova/tests/integrated/v3/test_used_limits.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class UsedLimitsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-used-limits"
- extra_extensions_to_load = ["limits"]
-
- def test_get_used_limits(self):
- # Get api sample to used limits.
- response = self._do_get('limits')
- subs = self._get_regexes()
- self._verify_response('usedlimits-get-resp', subs, response, 200)
-
- def test_get_used_limits_for_admin(self):
- tenant_id = 'openstack'
- response = self._do_get('limits?tenant_id=%s' % tenant_id)
- subs = self._get_regexes()
- self._verify_response('usedlimits-get-resp', subs, response, 200)
diff --git a/nova/tests/integrated/v3/test_user_data.py b/nova/tests/integrated/v3/test_user_data.py
deleted file mode 100644
index bdea92a8d8..0000000000
--- a/nova/tests/integrated/v3/test_user_data.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-
-from nova.tests.image import fake
-from nova.tests.integrated.v3 import api_sample_base
-
-
-class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-user-data"
-
- def test_user_data_post(self):
- user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
- user_data = base64.b64encode(user_data_contents)
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'host': self._get_host(),
- 'user_data': user_data
- }
- response = self._do_post('servers', 'userdata-post-req', subs)
-
- subs.update(self._get_regexes())
- self._verify_response('userdata-post-resp', subs, response, 202)
diff --git a/nova/tests/integrated/v3/test_volumes.py b/nova/tests/integrated/v3/test_volumes.py
deleted file mode 100644
index 8c306ae347..0000000000
--- a/nova/tests/integrated/v3/test_volumes.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2012 Nebula, Inc.
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from nova.tests.api.openstack import fakes
-from nova.tests.integrated.v3 import api_sample_base
-from nova.tests.integrated.v3 import test_servers
-from nova.volume import cinder
-
-
-class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
- extension_name = "os-volumes"
-
- create_subs = {
- 'snapshot_name': 'snap-001',
- 'description': 'Daily backup',
- 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
- }
-
- def setUp(self):
- super(SnapshotsSampleJsonTests, self).setUp()
- self.stubs.Set(cinder.API, "get_all_snapshots",
- fakes.stub_snapshot_get_all)
- self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
-
- def _create_snapshot(self):
- self.stubs.Set(cinder.API, "create_snapshot",
- fakes.stub_snapshot_create)
-
- response = self._do_post("os-snapshots",
- "snapshot-create-req",
- self.create_subs)
- return response
-
- def test_snapshots_create(self):
- response = self._create_snapshot()
- self.create_subs.update(self._get_regexes())
- self._verify_response("snapshot-create-resp",
- self.create_subs, response, 200)
-
- def test_snapshots_delete(self):
- self.stubs.Set(cinder.API, "delete_snapshot",
- fakes.stub_snapshot_delete)
- self._create_snapshot()
- response = self._do_delete('os-snapshots/100')
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
-
- def test_snapshots_detail(self):
- response = self._do_get('os-snapshots/detail')
- subs = self._get_regexes()
- self._verify_response('snapshots-detail-resp', subs, response, 200)
-
- def test_snapshots_list(self):
- response = self._do_get('os-snapshots')
- subs = self._get_regexes()
- self._verify_response('snapshots-list-resp', subs, response, 200)
-
- def test_snapshots_show(self):
- response = self._do_get('os-snapshots/100')
- subs = {
- 'snapshot_name': 'Default name',
- 'description': 'Default description'
- }
- subs.update(self._get_regexes())
- self._verify_response('snapshots-show-resp', subs, response, 200)
-
-
-class VolumesSampleJsonTest(test_servers.ServersSampleBase):
- extension_name = "os-volumes"
-
- def _get_volume_id(self):
- return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
-
- def _stub_volume(self, id, displayname="Volume Name",
- displaydesc="Volume Description", size=100):
- volume = {
- 'id': id,
- 'size': size,
- 'availability_zone': 'zone1:host1',
- 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
- 'mountpoint': '/',
- 'status': 'in-use',
- 'attach_status': 'attached',
- 'name': 'vol name',
- 'display_name': displayname,
- 'display_description': displaydesc,
- 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
- 'snapshot_id': None,
- 'volume_type_id': 'fakevoltype',
- 'volume_metadata': [],
- 'volume_type': {'name': 'Backup'}
- }
- return volume
-
- def _stub_volume_get(self, context, volume_id):
- return self._stub_volume(volume_id)
-
- def _stub_volume_delete(self, context, *args, **param):
- pass
-
- def _stub_volume_get_all(self, context, search_opts=None):
- id = self._get_volume_id()
- return [self._stub_volume(id)]
-
- def _stub_volume_create(self, context, size, name, description, snapshot,
- **param):
- id = self._get_volume_id()
- return self._stub_volume(id)
-
- def setUp(self):
- super(VolumesSampleJsonTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
- self.stubs.Set(cinder.API, "get", self._stub_volume_get)
- self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
-
- def _post_volume(self):
- subs_req = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
-
- self.stubs.Set(cinder.API, "create", self._stub_volume_create)
- response = self._do_post('os-volumes', 'os-volumes-post-req',
- subs_req)
- subs = self._get_regexes()
- subs.update(subs_req)
- self._verify_response('os-volumes-post-resp', subs, response, 200)
-
- def test_volumes_show(self):
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- vol_id = self._get_volume_id()
- response = self._do_get('os-volumes/%s' % vol_id)
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-get-resp', subs, response, 200)
-
- def test_volumes_index(self):
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- response = self._do_get('os-volumes')
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-index-resp', subs, response, 200)
-
- def test_volumes_detail(self):
- # For now, index and detail are the same.
- # See the volumes api
- subs = {
- 'volume_name': "Volume Name",
- 'volume_desc': "Volume Description",
- }
- response = self._do_get('os-volumes/detail')
- subs.update(self._get_regexes())
- self._verify_response('os-volumes-detail-resp', subs, response, 200)
-
- def test_volumes_create(self):
- self._post_volume()
-
- def test_volumes_delete(self):
- self._post_volume()
- vol_id = self._get_volume_id()
- response = self._do_delete('os-volumes/%s' % vol_id)
- self.assertEqual(response.status_code, 202)
- self.assertEqual(response.content, '')
diff --git a/nova/tests/keymgr/test_conf_key_mgr.py b/nova/tests/keymgr/test_conf_key_mgr.py
deleted file mode 100644
index 8e2b1abcb9..0000000000
--- a/nova/tests/keymgr/test_conf_key_mgr.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test cases for the conf key manager.
-"""
-
-import array
-
-from oslo.config import cfg
-
-from nova.keymgr import conf_key_mgr
-from nova.keymgr import key
-from nova.tests.keymgr import test_single_key_mgr
-
-
-CONF = cfg.CONF
-CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
-
-
-class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase):
- def __init__(self, *args, **kwargs):
- super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs)
-
- self._hex_key = '0' * 64
-
- def _create_key_manager(self):
- CONF.set_default('fixed_key', default=self._hex_key, group='keymgr')
- return conf_key_mgr.ConfKeyManager()
-
- def setUp(self):
- super(ConfKeyManagerTestCase, self).setUp()
-
- encoded_key = array.array('B', self._hex_key.decode('hex')).tolist()
- self.key = key.SymmetricKey('AES', encoded_key)
-
- def test_init(self):
- key_manager = self._create_key_manager()
- self.assertEqual(self._hex_key, key_manager._hex_key)
-
- def test_init_value_error(self):
- CONF.set_default('fixed_key', default=None, group='keymgr')
- self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager)
-
- def test_generate_hex_key(self):
- key_manager = self._create_key_manager()
- self.assertEqual(self._hex_key, key_manager._generate_hex_key())
diff --git a/nova/tests/keymgr/test_mock_key_mgr.py b/nova/tests/keymgr/test_mock_key_mgr.py
deleted file mode 100644
index 3d56da08a4..0000000000
--- a/nova/tests/keymgr/test_mock_key_mgr.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test cases for the mock key manager.
-"""
-
-import array
-
-from nova import context
-from nova import exception
-from nova.keymgr import key as keymgr_key
-from nova.keymgr import mock_key_mgr
-from nova.tests.keymgr import test_key_mgr
-
-
-class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
-
- def _create_key_manager(self):
- return mock_key_mgr.MockKeyManager()
-
- def setUp(self):
- super(MockKeyManagerTestCase, self).setUp()
-
- self.ctxt = context.RequestContext('fake', 'fake')
-
- def test_create_key(self):
- key_id_1 = self.key_mgr.create_key(self.ctxt)
- key_id_2 = self.key_mgr.create_key(self.ctxt)
- # ensure that the UUIDs are unique
- self.assertNotEqual(key_id_1, key_id_2)
-
- def test_create_key_with_length(self):
- for length in [64, 128, 256]:
- key_id = self.key_mgr.create_key(self.ctxt, key_length=length)
- key = self.key_mgr.get_key(self.ctxt, key_id)
- self.assertEqual(length / 8, len(key.get_encoded()))
-
- def test_create_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.create_key, None)
-
- def test_store_key(self):
- secret_key = array.array('B', ('0' * 64).decode('hex')).tolist()
- _key = keymgr_key.SymmetricKey('AES', secret_key)
- key_id = self.key_mgr.store_key(self.ctxt, _key)
-
- actual_key = self.key_mgr.get_key(self.ctxt, key_id)
- self.assertEqual(_key, actual_key)
-
- def test_store_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.store_key, None, None)
-
- def test_copy_key(self):
- key_id = self.key_mgr.create_key(self.ctxt)
- key = self.key_mgr.get_key(self.ctxt, key_id)
-
- copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
- copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
-
- self.assertNotEqual(key_id, copied_key_id)
- self.assertEqual(key, copied_key)
-
- def test_copy_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.copy_key, None, None)
-
- def test_get_key(self):
- pass
-
- def test_get_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.get_key, None, None)
-
- def test_get_unknown_key(self):
- self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None)
-
- def test_delete_key(self):
- key_id = self.key_mgr.create_key(self.ctxt)
- self.key_mgr.delete_key(self.ctxt, key_id)
-
- self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id)
-
- def test_delete_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.delete_key, None, None)
-
- def test_delete_unknown_key(self):
- self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/keymgr/test_not_implemented_key_mgr.py b/nova/tests/keymgr/test_not_implemented_key_mgr.py
deleted file mode 100644
index 0419dfc5f0..0000000000
--- a/nova/tests/keymgr/test_not_implemented_key_mgr.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test cases for the not implemented key manager.
-"""
-
-from nova.keymgr import not_implemented_key_mgr
-from nova.tests.keymgr import test_key_mgr
-
-
-class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
-
- def _create_key_manager(self):
- return not_implemented_key_mgr.NotImplementedKeyManager()
-
- def test_create_key(self):
- self.assertRaises(NotImplementedError,
- self.key_mgr.create_key, None)
-
- def test_store_key(self):
- self.assertRaises(NotImplementedError,
- self.key_mgr.store_key, None, None)
-
- def test_copy_key(self):
- self.assertRaises(NotImplementedError,
- self.key_mgr.copy_key, None, None)
-
- def test_get_key(self):
- self.assertRaises(NotImplementedError,
- self.key_mgr.get_key, None, None)
-
- def test_delete_key(self):
- self.assertRaises(NotImplementedError,
- self.key_mgr.delete_key, None, None)
diff --git a/nova/tests/keymgr/test_single_key_mgr.py b/nova/tests/keymgr/test_single_key_mgr.py
deleted file mode 100644
index d6e71a6441..0000000000
--- a/nova/tests/keymgr/test_single_key_mgr.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test cases for the single key manager.
-"""
-
-import array
-
-from nova import exception
-from nova.keymgr import key
-from nova.keymgr import single_key_mgr
-from nova.tests.keymgr import test_mock_key_mgr
-
-
-class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
-
- def _create_key_manager(self):
- return single_key_mgr.SingleKeyManager()
-
- def setUp(self):
- super(SingleKeyManagerTestCase, self).setUp()
-
- self.key_id = '00000000-0000-0000-0000-000000000000'
- encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
- self.key = key.SymmetricKey('AES', encoded)
-
- def test___init__(self):
- self.assertEqual(self.key,
- self.key_mgr.get_key(self.ctxt, self.key_id))
-
- def test_create_key(self):
- key_id_1 = self.key_mgr.create_key(self.ctxt)
- key_id_2 = self.key_mgr.create_key(self.ctxt)
- # ensure that the UUIDs are the same
- self.assertEqual(key_id_1, key_id_2)
-
- def test_create_key_with_length(self):
- pass
-
- def test_store_null_context(self):
- self.assertRaises(exception.Forbidden,
- self.key_mgr.store_key, None, self.key)
-
- def test_copy_key(self):
- key_id = self.key_mgr.create_key(self.ctxt)
- key = self.key_mgr.get_key(self.ctxt, key_id)
-
- copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
- copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
-
- self.assertEqual(key_id, copied_key_id)
- self.assertEqual(key, copied_key)
-
- def test_delete_key(self):
- pass
-
- def test_delete_unknown_key(self):
- self.assertRaises(exception.KeyManagerError,
- self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
deleted file mode 100644
index 2df0b294a2..0000000000
--- a/nova/tests/network/test_api.py
+++ /dev/null
@@ -1,589 +0,0 @@
-# Copyright 2012 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for network API."""
-
-import contextlib
-import itertools
-
-import mock
-import mox
-
-from nova.compute import flavors
-from nova import context
-from nova import exception
-from nova import network
-from nova.network import api
-from nova.network import base_api
-from nova.network import floating_ips
-from nova.network import model as network_model
-from nova.network import rpcapi as network_rpcapi
-from nova import objects
-from nova.objects import fields
-from nova import policy
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.objects import test_fixed_ip
-from nova.tests.objects import test_flavor
-from nova.tests.objects import test_virtual_interface
-from nova import utils
-
-FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
-
-
-class NetworkPolicyTestCase(test.TestCase):
- def setUp(self):
- super(NetworkPolicyTestCase, self).setUp()
-
- policy.reset()
- policy.init()
-
- self.context = context.get_admin_context()
-
- def tearDown(self):
- super(NetworkPolicyTestCase, self).tearDown()
- policy.reset()
-
- def test_check_policy(self):
- self.mox.StubOutWithMock(policy, 'enforce')
- target = {
- 'project_id': self.context.project_id,
- 'user_id': self.context.user_id,
- }
- policy.enforce(self.context, 'network:get_all', target)
- self.mox.ReplayAll()
- api.check_policy(self.context, 'get_all')
-
-
-class ApiTestCase(test.TestCase):
- def setUp(self):
- super(ApiTestCase, self).setUp()
- self.network_api = network.API()
- self.context = context.RequestContext('fake-user',
- 'fake-project')
-
- @mock.patch('nova.objects.NetworkList.get_all')
- def test_get_all(self, mock_get_all):
- mock_get_all.return_value = mock.sentinel.get_all
- self.assertEqual(mock.sentinel.get_all,
- self.network_api.get_all(self.context))
- mock_get_all.assert_called_once_with(self.context,
- project_only=True)
-
- @mock.patch('nova.objects.NetworkList.get_all')
- def test_get_all_liberal(self, mock_get_all):
- self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
- mock_get_all.return_value = mock.sentinel.get_all
- self.assertEqual(mock.sentinel.get_all,
- self.network_api.get_all(self.context))
- mock_get_all.assert_called_once_with(self.context,
- project_only="allow_none")
-
- @mock.patch('nova.objects.NetworkList.get_all')
- def test_get_all_no_networks(self, mock_get_all):
- mock_get_all.side_effect = exception.NoNetworksFound
- self.assertEqual([], self.network_api.get_all(self.context))
- mock_get_all.assert_called_once_with(self.context,
- project_only=True)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- def test_get(self, mock_get):
- mock_get.return_value = mock.sentinel.get_by_uuid
- with mock.patch.object(self.context, 'elevated') as elevated:
- elevated.return_value = mock.sentinel.elevated_context
- self.assertEqual(mock.sentinel.get_by_uuid,
- self.network_api.get(self.context, 'fake-uuid'))
- mock_get.assert_called_once_with(mock.sentinel.elevated_context,
- 'fake-uuid')
-
- @mock.patch('nova.objects.Network.get_by_id')
- @mock.patch('nova.db.virtual_interface_get_by_instance')
- def test_get_vifs_by_instance(self, mock_get_by_instance,
- mock_get_by_id):
- mock_get_by_instance.return_value = [
- dict(test_virtual_interface.fake_vif,
- network_id=123)]
- mock_get_by_id.return_value = objects.Network()
- mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
- instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
- vifs = self.network_api.get_vifs_by_instance(self.context,
- instance)
- self.assertEqual(1, len(vifs))
- self.assertEqual(123, vifs[0].network_id)
- self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
- mock_get_by_instance.assert_called_once_with(
- self.context, str(mock.sentinel.inst_uuid), use_slave=False)
- mock_get_by_id.assert_called_once_with(self.context, 123,
- project_only='allow_none')
-
- @mock.patch('nova.objects.Network.get_by_id')
- @mock.patch('nova.db.virtual_interface_get_by_address')
- def test_get_vif_by_mac_address(self, mock_get_by_address,
- mock_get_by_id):
- mock_get_by_address.return_value = dict(
- test_virtual_interface.fake_vif, network_id=123)
- mock_get_by_id.return_value = objects.Network(
- uuid=mock.sentinel.network_uuid)
- vif = self.network_api.get_vif_by_mac_address(self.context,
- mock.sentinel.mac)
- self.assertEqual(123, vif.network_id)
- self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
- mock_get_by_address.assert_called_once_with(self.context,
- mock.sentinel.mac)
- mock_get_by_id.assert_called_once_with(self.context, 123,
- project_only='allow_none')
-
- def test_allocate_for_instance_handles_macs_passed(self):
- # If a macs argument is supplied to the 'nova-network' API, it is just
- # ignored. This test checks that the call down to the rpcapi layer
- # doesn't pass macs down: nova-network doesn't support hypervisor
- # mac address limits (today anyhow).
- macs = set(['ab:cd:ef:01:23:34'])
- self.mox.StubOutWithMock(
- self.network_api.network_rpcapi, "allocate_for_instance")
- kwargs = dict(zip(['host', 'instance_id', 'project_id',
- 'requested_networks', 'rxtx_factor', 'vpn', 'macs',
- 'dhcp_options'],
- itertools.repeat(mox.IgnoreArg())))
- self.network_api.network_rpcapi.allocate_for_instance(
- mox.IgnoreArg(), **kwargs).AndReturn([])
- self.mox.ReplayAll()
- flavor = flavors.get_default_flavor()
- flavor['rxtx_factor'] = 0
- sys_meta = flavors.save_flavor_info({}, flavor)
- instance = dict(id=1, uuid='uuid', project_id='project_id',
- host='host', system_metadata=utils.dict_to_metadata(sys_meta))
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'], **instance)
- self.network_api.allocate_for_instance(
- self.context, instance, 'vpn', 'requested_networks', macs=macs)
-
- def _do_test_associate_floating_ip(self, orig_instance_uuid):
- """Test post-association logic."""
-
- new_instance = {'uuid': 'new-uuid'}
-
- def fake_associate(*args, **kwargs):
- return orig_instance_uuid
-
- self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
- fake_associate)
-
- def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join=None,
- use_slave=None):
- return fake_instance.fake_db_instance(uuid=instance_uuid)
-
- self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
-
- def fake_get_nw_info(ctxt, instance):
- class FakeNWInfo(object):
- def json(self):
- pass
- return FakeNWInfo()
-
- self.stubs.Set(self.network_api, '_get_instance_nw_info',
- fake_get_nw_info)
-
- if orig_instance_uuid:
- expected_updated_instances = [new_instance['uuid'],
- orig_instance_uuid]
- else:
- expected_updated_instances = [new_instance['uuid']]
-
- def fake_instance_info_cache_update(context, instance_uuid, cache):
- self.assertEqual(instance_uuid,
- expected_updated_instances.pop())
-
- self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
- fake_instance_info_cache_update)
-
- def fake_update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None,
- update_cells=True):
- return
-
- self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
- fake_update_instance_cache_with_nw_info)
-
- self.network_api.associate_floating_ip(self.context,
- new_instance,
- '172.24.4.225',
- '10.0.0.2')
-
- def test_associate_preassociated_floating_ip(self):
- self._do_test_associate_floating_ip('orig-uuid')
-
- def test_associate_unassociated_floating_ip(self):
- self._do_test_associate_floating_ip(None)
-
- def test_get_floating_ip_invalid_id(self):
- self.assertRaises(exception.InvalidID,
- self.network_api.get_floating_ip,
- self.context, '123zzz')
-
- @mock.patch('nova.objects.FloatingIP.get_by_id')
- def test_get_floating_ip(self, mock_get):
- floating = mock.sentinel.floating
- mock_get.return_value = floating
- self.assertEqual(floating,
- self.network_api.get_floating_ip(self.context, 123))
- mock_get.assert_called_once_with(self.context, 123)
-
- @mock.patch('nova.objects.FloatingIP.get_pool_names')
- def test_get_floating_ip_pools(self, mock_get):
- pools = ['foo', 'bar']
- mock_get.return_value = pools
- self.assertEqual(pools,
- self.network_api.get_floating_ip_pools(
- self.context))
-
- @mock.patch('nova.objects.FloatingIP.get_by_address')
- def test_get_floating_ip_by_address(self, mock_get):
- floating = mock.sentinel.floating
- mock_get.return_value = floating
- self.assertEqual(floating,
- self.network_api.get_floating_ip_by_address(
- self.context, mock.sentinel.address))
- mock_get.assert_called_once_with(self.context,
- mock.sentinel.address)
-
- @mock.patch('nova.objects.FloatingIPList.get_by_project')
- def test_get_floating_ips_by_project(self, mock_get):
- floatings = mock.sentinel.floating_ips
- mock_get.return_value = floatings
- self.assertEqual(floatings,
- self.network_api.get_floating_ips_by_project(
- self.context))
- mock_get.assert_called_once_with(self.context,
- self.context.project_id)
-
- @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
- def test_get_floating_ips_by_fixed_address(self, mock_get):
- floatings = [objects.FloatingIP(id=1, address='1.2.3.4'),
- objects.FloatingIP(id=2, address='5.6.7.8')]
- mock_get.return_value = floatings
- self.assertEqual(['1.2.3.4', '5.6.7.8'],
- self.network_api.get_floating_ips_by_fixed_address(
- self.context, mock.sentinel.fixed_address))
- mock_get.assert_called_once_with(self.context,
- mock.sentinel.fixed_address)
-
- def _stub_migrate_instance_calls(self, method, multi_host, info):
- fake_flavor = flavors.get_default_flavor()
- fake_flavor['rxtx_factor'] = 1.21
- sys_meta = utils.dict_to_metadata(
- flavors.save_flavor_info({}, fake_flavor))
- fake_instance = {'uuid': 'fake_uuid',
- 'instance_type_id': fake_flavor['id'],
- 'project_id': 'fake_project_id',
- 'system_metadata': sys_meta}
- fake_migration = {'source_compute': 'fake_compute_source',
- 'dest_compute': 'fake_compute_dest'}
-
- def fake_mig_inst_method(*args, **kwargs):
- info['kwargs'] = kwargs
-
- def fake_get_multi_addresses(*args, **kwargs):
- return multi_host, ['fake_float1', 'fake_float2']
-
- self.stubs.Set(network_rpcapi.NetworkAPI, method,
- fake_mig_inst_method)
- self.stubs.Set(self.network_api, '_get_multi_addresses',
- fake_get_multi_addresses)
-
- expected = {'instance_uuid': 'fake_uuid',
- 'source_compute': 'fake_compute_source',
- 'dest_compute': 'fake_compute_dest',
- 'rxtx_factor': 1.21,
- 'project_id': 'fake_project_id',
- 'floating_addresses': None}
- if multi_host:
- expected['floating_addresses'] = ['fake_float1', 'fake_float2']
- return fake_instance, fake_migration, expected
-
- def test_migrate_instance_start_with_multhost(self):
- info = {'kwargs': {}}
- arg1, arg2, expected = self._stub_migrate_instance_calls(
- 'migrate_instance_start', True, info)
- expected['host'] = 'fake_compute_source'
- self.network_api.migrate_instance_start(self.context, arg1, arg2)
- self.assertEqual(info['kwargs'], expected)
-
- def test_migrate_instance_start_without_multhost(self):
- info = {'kwargs': {}}
- arg1, arg2, expected = self._stub_migrate_instance_calls(
- 'migrate_instance_start', False, info)
- self.network_api.migrate_instance_start(self.context, arg1, arg2)
- self.assertEqual(info['kwargs'], expected)
-
- def test_migrate_instance_finish_with_multhost(self):
- info = {'kwargs': {}}
- arg1, arg2, expected = self._stub_migrate_instance_calls(
- 'migrate_instance_finish', True, info)
- expected['host'] = 'fake_compute_dest'
- self.network_api.migrate_instance_finish(self.context, arg1, arg2)
- self.assertEqual(info['kwargs'], expected)
-
- def test_migrate_instance_finish_without_multhost(self):
- info = {'kwargs': {}}
- arg1, arg2, expected = self._stub_migrate_instance_calls(
- 'migrate_instance_finish', False, info)
- self.network_api.migrate_instance_finish(self.context, arg1, arg2)
- self.assertEqual(info['kwargs'], expected)
-
- def test_is_multi_host_instance_has_no_fixed_ip(self):
- def fake_fixed_ip_get_by_instance(ctxt, uuid):
- raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
- self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
- fake_fixed_ip_get_by_instance)
- instance = {'uuid': FAKE_UUID}
- result, floats = self.network_api._get_multi_addresses(self.context,
- instance)
- self.assertFalse(result)
-
- @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
- def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
- fip_get):
- network = objects.Network(
- id=123, project_id=None,
- multi_host=is_multi_host)
- fip_get.return_value = [
- objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
- floating_ips=objects.FloatingIPList())]
- instance = {'uuid': FAKE_UUID}
- result, floats = self.network_api._get_multi_addresses(self.context,
- instance)
- self.assertEqual(is_multi_host, result)
-
- def test_is_multi_host_network_has_no_project_id_multi(self):
- self._test_is_multi_host_network_has_no_project_id(True)
-
- def test_is_multi_host_network_has_no_project_id_non_multi(self):
- self._test_is_multi_host_network_has_no_project_id(False)
-
- @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
- def _test_is_multi_host_network_has_project_id(self, is_multi_host,
- fip_get):
- network = objects.Network(
- id=123, project_id=self.context.project_id,
- multi_host=is_multi_host)
- fip_get.return_value = [
- objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
- floating_ips=objects.FloatingIPList())]
- instance = {'uuid': FAKE_UUID}
- result, floats = self.network_api._get_multi_addresses(self.context,
- instance)
- self.assertEqual(is_multi_host, result)
-
- def test_is_multi_host_network_has_project_id_multi(self):
- self._test_is_multi_host_network_has_project_id(True)
-
- def test_is_multi_host_network_has_project_id_non_multi(self):
- self._test_is_multi_host_network_has_project_id(False)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- @mock.patch('nova.objects.Network.disassociate')
- def test_network_disassociate_project(self, mock_disassociate, mock_get):
- net_obj = objects.Network(context=self.context, id=1)
- mock_get.return_value = net_obj
- self.network_api.associate(self.context, FAKE_UUID, project=None)
- mock_disassociate.assert_called_once_with(self.context, net_obj.id,
- host=False, project=True)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- @mock.patch('nova.objects.Network.disassociate')
- def test_network_disassociate_host(self, mock_disassociate, mock_get):
- net_obj = objects.Network(context=self.context, id=1)
- mock_get.return_value = net_obj
- self.network_api.associate(self.context, FAKE_UUID, host=None)
- mock_disassociate.assert_called_once_with(self.context, net_obj.id,
- host=True, project=False)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- @mock.patch('nova.objects.Network.associate')
- def test_network_associate_project(self, mock_associate, mock_get):
- net_obj = objects.Network(context=self.context, id=1)
- mock_get.return_value = net_obj
- project = mock.sentinel.project
- self.network_api.associate(self.context, FAKE_UUID, project=project)
- mock_associate.assert_called_once_with(self.context, project,
- network_id=net_obj.id,
- force=True)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- @mock.patch('nova.objects.Network.save')
- def test_network_associate_host(self, mock_save, mock_get):
- net_obj = objects.Network(context=self.context, id=1)
- mock_get.return_value = net_obj
- host = str(mock.sentinel.host)
- self.network_api.associate(self.context, FAKE_UUID, host=host)
- mock_save.assert_called_once_with()
- self.assertEqual(host, net_obj.host)
-
- @mock.patch('nova.objects.Network.get_by_uuid')
- @mock.patch('nova.objects.Network.disassociate')
- def test_network_disassociate(self, mock_disassociate, mock_get):
- mock_get.return_value = objects.Network(context=self.context, id=123)
- self.network_api.disassociate(self.context, FAKE_UUID)
- mock_disassociate.assert_called_once_with(self.context, 123,
- project=True, host=True)
-
- def _test_refresh_cache(self, method, *args, **kwargs):
- # This test verifies that no call to get_instance_nw_info() is made
- # from the @refresh_cache decorator for the tested method.
- with contextlib.nested(
- mock.patch.object(self.network_api.network_rpcapi, method),
- mock.patch.object(self.network_api.network_rpcapi,
- 'get_instance_nw_info'),
- mock.patch.object(network_model.NetworkInfo, 'hydrate'),
- ) as (
- method_mock, nwinfo_mock, hydrate_mock
- ):
- nw_info = network_model.NetworkInfo([])
- method_mock.return_value = nw_info
- hydrate_mock.return_value = nw_info
- getattr(self.network_api, method)(*args, **kwargs)
- hydrate_mock.assert_called_once_with(nw_info)
- self.assertFalse(nwinfo_mock.called)
-
- def test_allocate_for_instance_refresh_cache(self):
- sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'],
- system_metadata=sys_meta)
- vpn = 'fake-vpn'
- requested_networks = 'fake-networks'
- self._test_refresh_cache('allocate_for_instance', self.context,
- instance, vpn, requested_networks)
-
- def test_add_fixed_ip_to_instance_refresh_cache(self):
- sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'],
- system_metadata=sys_meta)
- network_id = 'fake-network-id'
- self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
- instance, network_id)
-
- def test_remove_fixed_ip_from_instance_refresh_cache(self):
- sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
- instance = fake_instance.fake_instance_obj(
- self.context, expected_attrs=['system_metadata'],
- system_metadata=sys_meta)
- address = 'fake-address'
- self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
- instance, address)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- def test_get_fixed_ip_by_address(self, fip_get):
- fip_get.return_value = test_fixed_ip.fake_fixed_ip
- fip = self.network_api.get_fixed_ip_by_address(self.context,
- 'fake-addr')
- self.assertIsInstance(fip, objects.FixedIP)
-
- @mock.patch('nova.objects.FixedIP.get_by_id')
- def test_get_fixed_ip(self, mock_get_by_id):
- mock_get_by_id.return_value = mock.sentinel.fixed_ip
- self.assertEqual(mock.sentinel.fixed_ip,
- self.network_api.get_fixed_ip(self.context,
- mock.sentinel.id))
- mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
-
- @mock.patch('nova.objects.FixedIP.get_by_floating_address')
- def test_get_instance_by_floating_address(self, mock_get_by_floating):
- mock_get_by_floating.return_value = objects.FixedIP(
- instance_uuid = mock.sentinel.instance_uuid)
- self.assertEqual(str(mock.sentinel.instance_uuid),
- self.network_api.get_instance_id_by_floating_address(
- self.context, mock.sentinel.floating))
- mock_get_by_floating.assert_called_once_with(self.context,
- mock.sentinel.floating)
-
- @mock.patch('nova.objects.FixedIP.get_by_floating_address')
- def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
- mock_get_by_floating.return_value = None
- self.assertIsNone(
- self.network_api.get_instance_id_by_floating_address(
- self.context, mock.sentinel.floating))
- mock_get_by_floating.assert_called_once_with(self.context,
- mock.sentinel.floating)
-
-
-@mock.patch('nova.network.api.API')
-@mock.patch('nova.db.instance_info_cache_update')
-class TestUpdateInstanceCache(test.TestCase):
- def setUp(self):
- super(TestUpdateInstanceCache, self).setUp()
- self.context = context.get_admin_context()
- self.instance = {'uuid': FAKE_UUID}
- vifs = [network_model.VIF(id='super_vif')]
- self.nw_info = network_model.NetworkInfo(vifs)
- self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
- self.nw_info)
-
- def test_update_nw_info_none(self, db_mock, api_mock):
- api_mock._get_instance_nw_info.return_value = self.nw_info
-
- base_api.update_instance_cache_with_nw_info(api_mock, self.context,
- self.instance, None)
- api_mock._get_instance_nw_info.assert_called_once_with(self.context,
- self.instance)
- db_mock.assert_called_once_with(self.context, self.instance['uuid'],
- {'network_info': self.nw_json})
-
- def test_update_nw_info_one_network(self, db_mock, api_mock):
- api_mock._get_instance_nw_info.return_value = self.nw_info
- base_api.update_instance_cache_with_nw_info(api_mock, self.context,
- self.instance, self.nw_info)
- self.assertFalse(api_mock._get_instance_nw_info.called)
- db_mock.assert_called_once_with(self.context, self.instance['uuid'],
- {'network_info': self.nw_json})
-
- def test_update_nw_info_empty_list(self, db_mock, api_mock):
- api_mock._get_instance_nw_info.return_value = self.nw_info
- base_api.update_instance_cache_with_nw_info(api_mock, self.context,
- self.instance,
- network_model.NetworkInfo([]))
- self.assertFalse(api_mock._get_instance_nw_info.called)
- db_mock.assert_called_once_with(self.context, self.instance['uuid'],
- {'network_info': '[]'})
-
- def test_decorator_return_object(self, db_mock, api_mock):
- @base_api.refresh_cache
- def func(self, context, instance):
- return network_model.NetworkInfo([])
- func(api_mock, self.context, self.instance)
- self.assertFalse(api_mock._get_instance_nw_info.called)
- db_mock.assert_called_once_with(self.context, self.instance['uuid'],
- {'network_info': '[]'})
-
- def test_decorator_return_none(self, db_mock, api_mock):
- @base_api.refresh_cache
- def func(self, context, instance):
- pass
- api_mock._get_instance_nw_info.return_value = self.nw_info
- func(api_mock, self.context, self.instance)
- api_mock._get_instance_nw_info.assert_called_once_with(self.context,
- self.instance)
- db_mock.assert_called_once_with(self.context, self.instance['uuid'],
- {'network_info': self.nw_json})
-
-
-class NetworkHooksTestCase(test.BaseHookTestCase):
- def test_instance_network_info_hook(self):
- info_func = base_api.update_instance_cache_with_nw_info
- self.assert_has_hook('instance_network_info', info_func)
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
deleted file mode 100644
index 06ac18c8d7..0000000000
--- a/nova/tests/network/test_manager.py
+++ /dev/null
@@ -1,3358 +0,0 @@
-# Copyright 2011 Rackspace
-# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
-# Copyright 2013 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import fixtures
-import mock
-import mox
-import netaddr
-from oslo.concurrency import processutils
-from oslo.config import cfg
-from oslo.db import exception as db_exc
-from oslo import messaging
-from oslo.utils import importutils
-import six
-
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import models
-from nova import exception
-from nova import ipv6
-from nova.network import floating_ips
-from nova.network import linux_net
-from nova.network import manager as network_manager
-from nova.network import model as net_model
-from nova import objects
-from nova.objects import quotas as quotas_obj
-from nova.objects import virtual_interface as vif_obj
-from nova.openstack.common import log as logging
-from nova import quota
-from nova import test
-from nova.tests import fake_instance
-from nova.tests import fake_ldap
-from nova.tests import fake_network
-from nova.tests import matchers
-from nova.tests.objects import test_fixed_ip
-from nova.tests.objects import test_floating_ip
-from nova.tests.objects import test_network
-from nova.tests.objects import test_service
-from nova import utils
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-HOST = "testhost"
-FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
-
-
-fake_inst = fake_instance.fake_db_instance
-
-
-networks = [{'id': 0,
- 'uuid': FAKEUUID,
- 'label': 'test0',
- 'injected': False,
- 'multi_host': False,
- 'cidr': '192.168.0.0/24',
- 'cidr_v6': '2001:db8::/64',
- 'gateway_v6': '2001:db8::1',
- 'netmask_v6': '64',
- 'netmask': '255.255.255.0',
- 'bridge': 'fa0',
- 'bridge_interface': 'fake_fa0',
- 'gateway': '192.168.0.1',
- 'dhcp_server': '192.168.0.1',
- 'broadcast': '192.168.0.255',
- 'dns1': '192.168.0.1',
- 'dns2': '192.168.0.2',
- 'vlan': None,
- 'host': HOST,
- 'project_id': 'fake_project',
- 'vpn_public_address': '192.168.0.2',
- 'vpn_public_port': '22',
- 'vpn_private_address': '10.0.0.2'},
- {'id': 1,
- 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- 'label': 'test1',
- 'injected': False,
- 'multi_host': False,
- 'cidr': '192.168.1.0/24',
- 'cidr_v6': '2001:db9::/64',
- 'gateway_v6': '2001:db9::1',
- 'netmask_v6': '64',
- 'netmask': '255.255.255.0',
- 'bridge': 'fa1',
- 'bridge_interface': 'fake_fa1',
- 'gateway': '192.168.1.1',
- 'dhcp_server': '192.168.1.1',
- 'broadcast': '192.168.1.255',
- 'dns1': '192.168.0.1',
- 'dns2': '192.168.0.2',
- 'vlan': None,
- 'host': HOST,
- 'project_id': 'fake_project',
- 'vpn_public_address': '192.168.1.2',
- 'vpn_public_port': '22',
- 'vpn_private_address': '10.0.0.2'}]
-
-fixed_ips = [{'id': 0,
- 'network_id': 0,
- 'address': '192.168.0.100',
- 'instance_uuid': 0,
- 'allocated': False,
- 'virtual_interface_id': 0,
- 'floating_ips': []},
- {'id': 0,
- 'network_id': 1,
- 'address': '192.168.1.100',
- 'instance_uuid': 0,
- 'allocated': False,
- 'virtual_interface_id': 0,
- 'floating_ips': []},
- {'id': 0,
- 'network_id': 1,
- 'address': '2001:db9:0:1::10',
- 'instance_uuid': 0,
- 'allocated': False,
- 'virtual_interface_id': 0,
- 'floating_ips': []}]
-
-
-flavor = {'id': 0,
- 'rxtx_cap': 3}
-
-
-floating_ip_fields = {'id': 0,
- 'address': '192.168.10.100',
- 'pool': 'nova',
- 'interface': 'eth0',
- 'fixed_ip_id': 0,
- 'project_id': None,
- 'auto_assigned': False}
-
-vifs = [{'id': 0,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'address': 'DE:AD:BE:EF:00:00',
- 'uuid': '00000000-0000-0000-0000-0000000000000000',
- 'network_id': 0,
- 'instance_uuid': 0},
- {'id': 1,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'address': 'DE:AD:BE:EF:00:01',
- 'uuid': '00000000-0000-0000-0000-0000000000000001',
- 'network_id': 1,
- 'instance_uuid': 0},
- {'id': 2,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'address': 'DE:AD:BE:EF:00:02',
- 'uuid': '00000000-0000-0000-0000-0000000000000002',
- 'network_id': 2,
- 'instance_uuid': 0}]
-
-
-class FlatNetworkTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(FlatNetworkTestCase, self).setUp()
- self.tempdir = self.useFixture(fixtures.TempDir()).path
- self.flags(log_dir=self.tempdir)
- self.flags(use_local=True, group='conductor')
- self.network = network_manager.FlatManager(host=HOST)
- self.network.instance_dns_domain = ''
- self.network.db = db
- self.context = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def test_get_instance_nw_info(self):
- fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
-
- nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
- self.assertFalse(nw_info)
-
- nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
-
- for i, vif in enumerate(nw_info):
- nid = i + 1
- check = {'bridge': 'fake_br%d' % nid,
- 'cidr': '192.168.%s.0/24' % nid,
- 'cidr_v6': '2001:db8:0:%x::/64' % nid,
- 'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
- 'multi_host': False,
- 'injected': False,
- 'bridge_interface': None,
- 'vlan': None,
- 'broadcast': '192.168.%d.255' % nid,
- 'dhcp_server': '192.168.1.1',
- 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
- 'gateway': '192.168.%d.1' % nid,
- 'gateway_v6': '2001:db8:0:1::1',
- 'label': 'test%d' % nid,
- 'mac': 'DE:AD:BE:EF:00:%02x' % nid,
- 'rxtx_cap': 30,
- 'vif_type': net_model.VIF_TYPE_BRIDGE,
- 'vif_devname': None,
- 'vif_uuid':
- '00000000-0000-0000-0000-00000000000000%02d' % nid,
- 'ovs_interfaceid': None,
- 'qbh_params': None,
- 'qbg_params': None,
- 'should_create_vlan': False,
- 'should_create_bridge': False,
- 'ip': '192.168.%d.%03d' % (nid, nid + 99),
- 'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
- 'netmask': '255.255.255.0',
- 'netmask_v6': 64,
- 'physical_network': None,
- }
-
- network = vif['network']
- net_v4 = vif['network']['subnets'][0]
- net_v6 = vif['network']['subnets'][1]
-
- vif_dict = dict(bridge=network['bridge'],
- cidr=net_v4['cidr'],
- cidr_v6=net_v6['cidr'],
- id=vif['id'],
- multi_host=network.get_meta('multi_host', False),
- injected=network.get_meta('injected', False),
- bridge_interface=
- network.get_meta('bridge_interface'),
- vlan=network.get_meta('vlan'),
- broadcast=str(net_v4.as_netaddr().broadcast),
- dhcp_server=network.get_meta('dhcp_server',
- net_v4['gateway']['address']),
- dns=[ip['address'] for ip in net_v4['dns']],
- gateway=net_v4['gateway']['address'],
- gateway_v6=net_v6['gateway']['address'],
- label=network['label'],
- mac=vif['address'],
- rxtx_cap=vif.get_meta('rxtx_cap'),
- vif_type=vif['type'],
- vif_devname=vif.get('devname'),
- vif_uuid=vif['id'],
- ovs_interfaceid=vif.get('ovs_interfaceid'),
- qbh_params=vif.get('qbh_params'),
- qbg_params=vif.get('qbg_params'),
- should_create_vlan=
- network.get_meta('should_create_vlan', False),
- should_create_bridge=
- network.get_meta('should_create_bridge',
- False),
- ip=net_v4['ips'][i]['address'],
- ip_v6=net_v6['ips'][i]['address'],
- netmask=str(net_v4.as_netaddr().netmask),
- netmask_v6=net_v6.as_netaddr()._prefixlen,
- physical_network=
- network.get_meta('physical_network', None))
-
- self.assertThat(vif_dict, matchers.DictMatches(check))
-
- def test_validate_networks(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
- self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- '192.168.1.100'),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '192.168.0.100')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
-
- ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
- ip['network'] = dict(test_network.fake_network,
- **networks[1])
- ip['instance_uuid'] = None
- db.fixed_ip_get_by_address(mox.IgnoreArg(),
- mox.IgnoreArg(),
- columns_to_join=mox.IgnoreArg()
- ).AndReturn(ip)
- ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
- ip['network'] = dict(test_network.fake_network,
- **networks[0])
- ip['instance_uuid'] = None
- db.fixed_ip_get_by_address(mox.IgnoreArg(),
- mox.IgnoreArg(),
- columns_to_join=mox.IgnoreArg()
- ).AndReturn(ip)
-
- self.mox.ReplayAll()
- self.network.validate_networks(self.context, requested_networks)
-
- def test_validate_networks_valid_fixed_ipv6(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
- self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- '2001:db9:0:1::10')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **networks[1])])
-
- ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
- ip['network'] = dict(test_network.fake_network,
- **networks[1])
- ip['instance_uuid'] = None
- db.fixed_ip_get_by_address(mox.IgnoreArg(),
- mox.IgnoreArg(),
- columns_to_join=mox.IgnoreArg()
- ).AndReturn(ip)
-
- self.mox.ReplayAll()
- self.network.validate_networks(self.context, requested_networks)
-
- def test_validate_reserved(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- nets = self.network.create_networks(context_admin, 'fake',
- '192.168.0.0/24', False, 1,
- 256, None, None, None, None, None)
- self.assertEqual(1, len(nets))
- network = nets[0]
- self.assertEqual(4, db.network_count_reserved_ips(context_admin,
- network['id']))
-
- def test_validate_reserved_start_end(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- nets = self.network.create_networks(context_admin, 'fake',
- '192.168.0.0/24', False, 1,
- 256, dhcp_server='192.168.0.11',
- allowed_start='192.168.0.10',
- allowed_end='192.168.0.245')
- self.assertEqual(1, len(nets))
- network = nets[0]
- # gateway defaults to beginning of allowed_start
- self.assertEqual('192.168.0.10', network['gateway'])
- # vpn_server doesn't conflict with dhcp_start
- self.assertEqual('192.168.0.12', network['vpn_private_address'])
- # dhcp_start doesn't conflict with dhcp_server
- self.assertEqual('192.168.0.13', network['dhcp_start'])
- # NOTE(vish): 10 from the beginning, 10 from the end, and
- # 1 for the gateway, 1 for the dhcp server,
- # 1 for the vpn server
- self.assertEqual(23, db.network_count_reserved_ips(context_admin,
- network['id']))
-
- def test_validate_reserved_start_out_of_range(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- self.assertRaises(exception.AddressOutOfRange,
- self.network.create_networks,
- context_admin, 'fake', '192.168.0.0/24', False,
- 1, 256, allowed_start='192.168.1.10')
-
- def test_validate_reserved_end_invalid(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- self.assertRaises(exception.InvalidAddress,
- self.network.create_networks,
- context_admin, 'fake', '192.168.0.0/24', False,
- 1, 256, allowed_end='invalid')
-
- def test_validate_cidr_invalid(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- self.assertRaises(exception.InvalidCidr,
- self.network.create_networks,
- context_admin, 'fake', 'invalid', False,
- 1, 256)
-
- def test_validate_non_int_size(self):
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
- self.assertRaises(exception.InvalidIntValue,
- self.network.create_networks,
- context_admin, 'fake', '192.168.0.0/24', False,
- 1, 'invalid')
-
- def test_validate_networks_none_requested_networks(self):
- self.network.validate_networks(self.context, None)
-
- def test_validate_networks_empty_requested_networks(self):
- requested_networks = []
- self.mox.ReplayAll()
-
- self.network.validate_networks(self.context, requested_networks)
-
- def test_validate_networks_invalid_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- '192.168.1.100.1'),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '192.168.0.100.1')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FixedIpInvalid,
- self.network.validate_networks, self.context,
- requested_networks)
-
- def test_validate_networks_empty_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- ''),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FixedIpInvalid,
- self.network.validate_networks,
- self.context, requested_networks)
-
- def test_validate_networks_none_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- None),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- None)]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
-
- self.network.validate_networks(self.context, requested_networks)
-
- @mock.patch('nova.objects.quotas.Quotas.reserve')
- def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- lambda *a, **kw: None)
- self.mox.StubOutWithMock(db, 'network_get')
- self.mox.StubOutWithMock(db, 'network_update')
- self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
-
- fixed = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.101')
- db.fixed_ip_associate_pool(mox.IgnoreArg(),
- mox.IgnoreArg(),
- instance_uuid=mox.IgnoreArg(),
- host=None).AndReturn(fixed)
-
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
-
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
- db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(inst)
-
- db.network_get(mox.IgnoreArg(),
- mox.IgnoreArg(),
- project_only=mox.IgnoreArg()
- ).AndReturn(dict(test_network.fake_network,
- **networks[0]))
- db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
-
- self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
- networks[0]['id'])
- exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
- inst)
- reserve.assert_called_once_with(self.context, fixed_ips=1,
- project_id=exp_project,
- user_id=exp_user)
-
- @mock.patch('nova.objects.quotas.Quotas.reserve')
- def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- lambda *a, **kw: None)
- self.mox.StubOutWithMock(db, 'network_get_by_uuid')
- self.mox.StubOutWithMock(db, 'network_update')
- self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
-
- fixed = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.101')
- db.fixed_ip_associate_pool(mox.IgnoreArg(),
- mox.IgnoreArg(),
- instance_uuid=mox.IgnoreArg(),
- host=None).AndReturn(fixed)
-
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
-
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
- db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(inst)
-
- db.network_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg()
- ).AndReturn(dict(test_network.fake_network,
- **networks[0]))
- db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
-
- self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
- networks[0]['uuid'])
- exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
- inst)
- reserve.assert_called_once_with(self.context, fixed_ips=1,
- project_id=exp_project,
- user_id=exp_user)
-
- def test_mini_dns_driver(self):
- zone1 = "example.org"
- zone2 = "example.com"
- driver = self.network.instance_dns_manager
- driver.create_entry("hostone", "10.0.0.1", "A", zone1)
- driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
- driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
- driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
- driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
-
- driver.delete_entry("hostone", zone1)
- driver.modify_address("hostfour", "10.0.0.1", zone1)
- driver.modify_address("hostthree", "10.0.0.1", zone1)
- names = driver.get_entries_by_address("10.0.0.1", zone1)
- self.assertEqual(len(names), 2)
- self.assertIn('hostthree', names)
- self.assertIn('hostfour', names)
-
- names = driver.get_entries_by_address("10.0.0.5", zone2)
- self.assertEqual(len(names), 1)
- self.assertIn('hostfive', names)
-
- addresses = driver.get_entries_by_name("hosttwo", zone1)
- self.assertEqual(len(addresses), 1)
- self.assertIn('10.0.0.2', addresses)
-
- self.assertRaises(exception.InvalidInput,
- driver.create_entry,
- "hostname",
- "10.10.10.10",
- "invalidtype",
- zone1)
-
- def test_mini_dns_driver_with_mixed_case(self):
- zone1 = "example.org"
- driver = self.network.instance_dns_manager
- driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
- addresses = driver.get_entries_by_address("10.0.0.10", zone1)
- self.assertEqual(len(addresses), 1)
- for n in addresses:
- driver.delete_entry(n, zone1)
- addresses = driver.get_entries_by_address("10.0.0.10", zone1)
- self.assertEqual(len(addresses), 0)
-
- @mock.patch('nova.objects.quotas.Quotas.reserve')
- def test_instance_dns(self, reserve):
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- lambda *a, **kw: None)
- fixedip = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.101')
- self.mox.StubOutWithMock(db, 'network_get_by_uuid')
- self.mox.StubOutWithMock(db, 'network_update')
- self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
-
- db.fixed_ip_associate_pool(mox.IgnoreArg(),
- mox.IgnoreArg(),
- instance_uuid=mox.IgnoreArg(),
- host=None
- ).AndReturn(fixedip)
-
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
-
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
- db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(inst)
-
- db.network_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg()
- ).AndReturn(dict(test_network.fake_network,
- **networks[0]))
- db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
-
- self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
- networks[0]['uuid'])
-
- instance_manager = self.network.instance_dns_manager
- addresses = instance_manager.get_entries_by_name(HOST,
- self.network.instance_dns_domain)
- self.assertEqual(len(addresses), 1)
- self.assertEqual(addresses[0], fixedip['address'])
- addresses = instance_manager.get_entries_by_name(FAKEUUID,
- self.network.instance_dns_domain)
- self.assertEqual(len(addresses), 1)
- self.assertEqual(addresses[0], fixedip['address'])
- exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
- inst)
- reserve.assert_called_once_with(self.context, fixed_ips=1,
- project_id=exp_project,
- user_id=exp_user)
-
- def test_allocate_floating_ip(self):
- self.assertIsNone(self.network.allocate_floating_ip(self.context,
- 1, None))
-
- def test_deallocate_floating_ip(self):
- self.assertIsNone(self.network.deallocate_floating_ip(self.context,
- 1, None))
-
- def test_associate_floating_ip(self):
- self.assertIsNone(self.network.associate_floating_ip(self.context,
- None, None))
-
- def test_disassociate_floating_ip(self):
- self.assertIsNone(self.network.disassociate_floating_ip(self.context,
- None, None))
-
- def test_get_networks_by_uuids_ordering(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
-
- self.mox.ReplayAll()
- res = self.network._get_networks_by_uuids(self.context,
- requested_networks)
-
- self.assertEqual(res[0]['id'], 1)
- self.assertEqual(res[1]['id'], 0)
-
- @mock.patch('nova.objects.instance.Instance.get_by_uuid')
- @mock.patch('nova.objects.quotas.Quotas.reserve')
- @mock.patch('nova.objects.quotas.ids_from_instance')
- def test_allocate_calculates_quota_auth(self, util_method, reserve,
- get_by_uuid):
- inst = objects.Instance()
- inst['uuid'] = 'nosuch'
- get_by_uuid.return_value = inst
- reserve.side_effect = exception.OverQuota(overs='testing',
- quotas={'fixed_ips': 10},
- headroom={'fixed_ips': 0})
- util_method.return_value = ('foo', 'bar')
- self.assertRaises(exception.FixedIpLimitExceeded,
- self.network.allocate_fixed_ip,
- self.context, 123, {'uuid': 'nosuch'})
- util_method.assert_called_once_with(self.context, inst)
-
- @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
- @mock.patch('nova.objects.quotas.Quotas.reserve')
- @mock.patch('nova.objects.quotas.ids_from_instance')
- def test_deallocate_calculates_quota_auth(self, util_method, reserve,
- get_by_address):
- inst = objects.Instance(uuid='fake-uuid')
- fip = objects.FixedIP(instance_uuid='fake-uuid',
- virtual_interface_id=1)
- get_by_address.return_value = fip
- util_method.return_value = ('foo', 'bar')
- # This will fail right after the reserve call when it tries
- # to look up the fake instance we created above
- self.assertRaises(exception.InstanceNotFound,
- self.network.deallocate_fixed_ip,
- self.context, '1.2.3.4', instance=inst)
- util_method.assert_called_once_with(self.context, inst)
-
- @mock.patch('nova.objects.instance.Instance.get_by_uuid')
- @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
- def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
- mock_get):
- mock_associate.side_effect = test.TestingException
- instance = objects.Instance(context=self.context)
- instance.create()
- mock_get.return_value = instance
- self.assertRaises(test.TestingException,
- self.network.allocate_fixed_ip,
- self.context, instance.uuid,
- {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
- address=netaddr.IPAddress('1.2.3.4'))
- mock_associate.assert_called_once_with(self.context,
- '1.2.3.4',
- instance.uuid,
- 1)
-
- @mock.patch('nova.objects.instance.Instance.get_by_uuid')
- @mock.patch('nova.objects.virtual_interface.VirtualInterface'
- '.get_by_instance_and_network')
- @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
- @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
- @mock.patch('nova.objects.fixed_ip.FixedIP.save')
- def test_allocate_fixed_ip_cleanup(self,
- mock_fixedip_save,
- mock_fixedip_associate,
- mock_fixedip_disassociate,
- mock_vif_get,
- mock_instance_get):
- address = netaddr.IPAddress('1.2.3.4')
-
- fip = objects.FixedIP(instance_uuid='fake-uuid',
- address=address,
- virtual_interface_id=1)
- mock_fixedip_associate.return_value = fip
-
- instance = objects.Instance(context=self.context)
- instance.create()
- mock_instance_get.return_value = instance
-
- mock_vif_get.return_value = vif_obj.VirtualInterface(
- instance_uuid='fake-uuid', id=1)
-
- with contextlib.nested(
- mock.patch.object(self.network, '_setup_network_on_host'),
- mock.patch.object(self.network, 'instance_dns_manager'),
- mock.patch.object(self.network,
- '_do_trigger_security_group_members_refresh_for_instance')
- ) as (mock_setup_network, mock_dns_manager, mock_ignored):
- mock_setup_network.side_effect = test.TestingException
- self.assertRaises(test.TestingException,
- self.network.allocate_fixed_ip,
- self.context, instance.uuid,
- {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
- address=address)
-
- mock_dns_manager.delete_entry.assert_has_calls([
- mock.call(instance.display_name, ''),
- mock.call(instance.uuid, '')
- ])
-
- mock_fixedip_disassociate.assert_called_once_with(self.context)
-
-
-class FlatDHCPNetworkTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(FlatDHCPNetworkTestCase, self).setUp()
- self.useFixture(test.SampleNetworks())
- self.flags(use_local=True, group='conductor')
- self.network = network_manager.FlatDHCPManager(host=HOST)
- self.network.db = db
- self.context = context.RequestContext('testuser', 'testproject',
- is_admin=False)
- self.context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
-
- @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
- @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
- @mock.patch('nova.network.linux_net.iptables_manager._apply')
- def test_init_host_iptables_defer_apply(self, iptable_apply,
- floating_get_by_host,
- fixed_get_by_id):
- def get_by_id(context, fixed_ip_id, **kwargs):
- net = objects.Network(bridge='testbridge',
- cidr='192.168.1.0/24')
- if fixed_ip_id == 1:
- return objects.FixedIP(address='192.168.1.4',
- network=net)
- elif fixed_ip_id == 2:
- return objects.FixedIP(address='192.168.1.5',
- network=net)
-
- def fake_apply():
- fake_apply.count += 1
-
- fake_apply.count = 0
- ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
- float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
- float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
- float1._context = ctxt
- float2._context = ctxt
-
- iptable_apply.side_effect = fake_apply
- floating_get_by_host.return_value = [float1, float2]
- fixed_get_by_id.side_effect = get_by_id
-
- self.network.init_host()
- self.assertEqual(1, fake_apply.count)
-
-
-class VlanNetworkTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(VlanNetworkTestCase, self).setUp()
- self.useFixture(test.SampleNetworks())
- self.flags(use_local=True, group='conductor')
- self.network = network_manager.VlanManager(host=HOST)
- self.network.db = db
- self.context = context.RequestContext('testuser', 'testproject',
- is_admin=False)
- self.context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
-
- def test_quota_driver_type(self):
- self.assertEqual(objects.QuotasNoOp,
- self.network.quotas_cls)
-
- def test_vpn_allocate_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'fixed_ip_associate')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
-
- fixed = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.1')
- db.fixed_ip_associate(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- network_id=mox.IgnoreArg(),
- reserved=True).AndReturn(fixed)
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
- db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(fake_inst(display_name=HOST,
- uuid=FAKEUUID))
- self.mox.ReplayAll()
-
- network = objects.Network._from_db_object(
- self.context, objects.Network(),
- dict(test_network.fake_network, **networks[0]))
- network.vpn_private_address = '192.168.0.2'
- self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
- vpn=True)
-
- def test_vpn_allocate_fixed_ip_no_network_id(self):
- network = dict(networks[0])
- network['vpn_private_address'] = '192.168.0.2'
- network['id'] = None
- instance = db.instance_create(self.context, {})
- self.assertRaises(exception.FixedIpNotFoundForNetwork,
- self.network.allocate_fixed_ip,
- self.context_admin,
- instance['uuid'],
- network,
- vpn=True)
-
- def test_allocate_fixed_ip(self):
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- lambda *a, **kw: None)
- self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
-
- fixed = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.1')
- db.fixed_ip_associate_pool(mox.IgnoreArg(),
- mox.IgnoreArg(),
- instance_uuid=mox.IgnoreArg(),
- host=None).AndReturn(fixed)
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
- db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(fake_inst(display_name=HOST,
- uuid=FAKEUUID))
- self.mox.ReplayAll()
-
- network = objects.Network._from_db_object(
- self.context, objects.Network(),
- dict(test_network.fake_network, **networks[0]))
- network.vpn_private_address = '192.168.0.2'
- self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
-
- @mock.patch('nova.objects.instance.Instance.get_by_uuid')
- @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
- def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
- mock_get):
- mock_associate.side_effect = test.TestingException
- instance = objects.Instance(context=self.context)
- instance.create()
- mock_get.return_value = instance
- self.assertRaises(test.TestingException,
- self.network.allocate_fixed_ip,
- self.context, instance.uuid,
- {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
- address=netaddr.IPAddress('1.2.3.4'))
- mock_associate.assert_called_once_with(self.context,
- '1.2.3.4',
- instance.uuid,
- 1)
-
- @mock.patch('nova.objects.instance.Instance.get_by_uuid')
- @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
- def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
- mock_get):
- mock_associate.side_effect = test.TestingException
- instance = objects.Instance(context=self.context)
- instance.create()
- mock_get.return_value = instance
- self.assertRaises(test.TestingException,
- self.network.allocate_fixed_ip,
- self.context, instance.uuid,
- {'cidr': '24', 'id': 1, 'uuid': 'nosuch',
- 'vpn_private_address': netaddr.IPAddress('1.2.3.4')
- }, vpn=1)
- mock_associate.assert_called_once_with(self.context,
- '1.2.3.4',
- instance.uuid,
- 1, reserved=True)
-
- def test_create_networks_too_big(self):
- self.assertRaises(ValueError, self.network.create_networks, None,
- num_networks=4094, vlan_start=1)
-
- def test_create_networks_too_many(self):
- self.assertRaises(ValueError, self.network.create_networks, None,
- num_networks=100, vlan_start=1,
- cidr='192.168.0.1/24', network_size=100)
-
- def test_duplicate_vlan_raises(self):
- # VLAN 100 is already used and we force the network to be created
- # in that vlan (vlan=100).
- self.assertRaises(exception.DuplicateVlan,
- self.network.create_networks,
- self.context_admin, label="fake", num_networks=1,
- vlan=100, cidr='192.168.0.1/24', network_size=100)
-
- def test_vlan_start(self):
- # VLAN 100 and 101 are used, so this network shoud be created in 102
- networks = self.network.create_networks(
- self.context_admin, label="fake", num_networks=1,
- vlan_start=100, cidr='192.168.3.1/24',
- network_size=100)
-
- self.assertEqual(networks[0]["vlan"], 102)
-
- def test_vlan_start_multiple(self):
- # VLAN 100 and 101 are used, so these networks shoud be created in 102
- # and 103
- networks = self.network.create_networks(
- self.context_admin, label="fake", num_networks=2,
- vlan_start=100, cidr='192.168.3.1/24',
- network_size=100)
-
- self.assertEqual(networks[0]["vlan"], 102)
- self.assertEqual(networks[1]["vlan"], 103)
-
- def test_vlan_start_used(self):
- # VLAN 100 and 101 are used, but vlan_start=99.
- networks = self.network.create_networks(
- self.context_admin, label="fake", num_networks=1,
- vlan_start=99, cidr='192.168.3.1/24',
- network_size=100)
-
- self.assertEqual(networks[0]["vlan"], 102)
-
- def test_vlan_parameter(self):
- # vlan parameter could not be greater than 4094
- exc = self.assertRaises(ValueError,
- self.network.create_networks,
- self.context_admin, label="fake",
- num_networks=1,
- vlan=4095, cidr='192.168.0.1/24')
- error_msg = 'The vlan number cannot be greater than 4094'
- self.assertIn(error_msg, six.text_type(exc))
-
- # vlan parameter could not be less than 1
- exc = self.assertRaises(ValueError,
- self.network.create_networks,
- self.context_admin, label="fake",
- num_networks=1,
- vlan=0, cidr='192.168.0.1/24')
- error_msg = 'The vlan number cannot be less than 1'
- self.assertIn(error_msg, six.text_type(exc))
-
- def test_vlan_be_integer(self):
- # vlan must be an integer
- exc = self.assertRaises(ValueError,
- self.network.create_networks,
- self.context_admin, label="fake",
- num_networks=1,
- vlan='fake', cidr='192.168.0.1/24')
- error_msg = 'vlan must be an integer'
- self.assertIn(error_msg, six.text_type(exc))
-
- @mock.patch('nova.db.network_get')
- def test_validate_networks(self, net_get):
- def network_get(_context, network_id, project_only='allow_none'):
- return dict(test_network.fake_network, **networks[network_id])
-
- net_get.side_effect = network_get
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
- self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- '192.168.1.100'),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '192.168.0.100')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
-
- db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
- network_id=networks[1]['id'],
- network=dict(test_network.fake_network,
- **networks[1]),
- instance_uuid=None)
- db.fixed_ip_get_by_address(mox.IgnoreArg(),
- mox.IgnoreArg(),
- columns_to_join=mox.IgnoreArg()
- ).AndReturn(db_fixed1)
- db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
- network_id=networks[0]['id'],
- network=dict(test_network.fake_network,
- **networks[0]),
- instance_uuid=None)
- db.fixed_ip_get_by_address(mox.IgnoreArg(),
- mox.IgnoreArg(),
- columns_to_join=mox.IgnoreArg()
- ).AndReturn(db_fixed2)
-
- self.mox.ReplayAll()
- self.network.validate_networks(self.context, requested_networks)
-
- def test_validate_networks_none_requested_networks(self):
- self.network.validate_networks(self.context, None)
-
- def test_validate_networks_empty_requested_networks(self):
- requested_networks = []
- self.mox.ReplayAll()
-
- self.network.validate_networks(self.context, requested_networks)
-
- def test_validate_networks_invalid_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- '192.168.1.100.1'),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- '192.168.0.100.1')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FixedIpInvalid,
- self.network.validate_networks, self.context,
- requested_networks)
-
- def test_validate_networks_empty_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FixedIpInvalid,
- self.network.validate_networks,
- self.context, requested_networks)
-
- def test_validate_networks_none_fixed_ip(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
- ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
- self.mox.ReplayAll()
- self.network.validate_networks(self.context, requested_networks)
-
- def test_floating_ip_owned_by_project(self):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- # raises because floating_ip project_id is None
- floating_ip = objects.FloatingIP(address='10.0.0.1',
- project_id=None)
- self.assertRaises(exception.Forbidden,
- self.network._floating_ip_owned_by_project,
- ctxt,
- floating_ip)
-
- # raises because floating_ip project_id is not equal to ctxt project_id
- floating_ip = objects.FloatingIP(address='10.0.0.1',
- project_id=ctxt.project_id + '1')
- self.assertRaises(exception.Forbidden,
- self.network._floating_ip_owned_by_project,
- ctxt,
- floating_ip)
-
- # does not raise (floating ip is owned by ctxt project)
- floating_ip = objects.FloatingIP(address='10.0.0.1',
- project_id=ctxt.project_id)
- self.network._floating_ip_owned_by_project(ctxt, floating_ip)
-
- ctxt = context.RequestContext(None, None,
- is_admin=True)
-
- # does not raise (ctxt is admin)
- floating_ip = objects.FloatingIP(address='10.0.0.1',
- project_id=None)
- self.network._floating_ip_owned_by_project(ctxt, floating_ip)
-
- # does not raise (ctxt is admin)
- floating_ip = objects.FloatingIP(address='10.0.0.1',
- project_id='testproject')
- self.network._floating_ip_owned_by_project(ctxt, floating_ip)
-
- def test_allocate_floating_ip(self):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def fake_allocate_address(*args, **kwargs):
- return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
-
- self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
- fake_allocate_address)
-
- self.network.allocate_floating_ip(ctxt, ctxt.project_id)
-
- @mock.patch('nova.quota.QUOTAS.reserve')
- @mock.patch('nova.quota.QUOTAS.commit')
- def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def fake1(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip)
-
- def fake2(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1', fixed_ip_id=1)
-
- def fake3(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1', fixed_ip_id=None,
- project_id=ctxt.project_id)
-
- self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
- self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
-
- # this time should raise because floating ip is associated to fixed_ip
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
- self.assertRaises(exception.FloatingIpAssociated,
- self.network.deallocate_floating_ip,
- ctxt,
- mox.IgnoreArg())
-
- mock_reserve.return_value = 'reserve'
- # this time should not raise
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
- self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
-
- mock_commit.assert_called_once_with(ctxt, 'reserve',
- project_id='testproject')
-
- @mock.patch('nova.db.fixed_ip_get')
- def test_associate_floating_ip(self, fixed_get):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def fake1(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- address='10.0.0.1',
- network=test_network.fake_network)
-
- # floating ip that's already associated
- def fake2(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1',
- pool='nova',
- interface='eth0',
- fixed_ip_id=1)
-
- # floating ip that isn't associated
- def fake3(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1',
- pool='nova',
- interface='eth0',
- fixed_ip_id=None)
-
- # fixed ip with remote host
- def fake4(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- address='10.0.0.1',
- pool='nova',
- instance_uuid=FAKEUUID,
- interface='eth0',
- network_id=123)
-
- def fake4_network(*args, **kwargs):
- return dict(test_network.fake_network,
- multi_host=False, host='jibberjabber')
-
- # fixed ip with local host
- def fake5(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- address='10.0.0.1',
- pool='nova',
- instance_uuid=FAKEUUID,
- interface='eth0',
- network_id=1234)
-
- def fake5_network(*args, **kwargs):
- return dict(test_network.fake_network,
- multi_host=False, host='testhost')
-
- def fake6(ctxt, method, **kwargs):
- self.local = False
-
- def fake7(*args, **kwargs):
- self.local = True
-
- def fake8(*args, **kwargs):
- raise processutils.ProcessExecutionError('',
- 'Cannot find device "em0"\n')
-
- def fake9(*args, **kwargs):
- raise test.TestingException()
-
- # raises because interface doesn't exist
- self.stubs.Set(self.network.db,
- 'floating_ip_fixed_ip_associate',
- fake1)
- self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
- self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
- self.assertRaises(exception.NoFloatingIpInterface,
- self.network._associate_floating_ip,
- ctxt,
- '1.2.3.4',
- '1.2.3.5',
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
-
- # raises because floating_ip is already associated to a fixed_ip
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
- self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
-
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address='1.2.3.4',
- instance_uuid='fake_uuid',
- network=test_network.fake_network)
-
- # doesn't raise because we exit early if the address is the same
- self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
-
- # raises because we call disassociate which is mocked
- self.assertRaises(test.TestingException,
- self.network.associate_floating_ip,
- ctxt,
- mox.IgnoreArg(),
- 'new')
-
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
-
- # does not raise and makes call remotely
- self.local = True
- self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
- self.stubs.Set(self.network.db, 'network_get', fake4_network)
- self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
- lambda **kw: self.network.network_rpcapi.client)
- self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
- self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
- mox.IgnoreArg())
- self.assertFalse(self.local)
-
- # does not raise and makes call locally
- self.local = False
- self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
- self.stubs.Set(self.network.db, 'network_get', fake5_network)
- self.stubs.Set(self.network, '_associate_floating_ip', fake7)
- self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
- mox.IgnoreArg())
- self.assertTrue(self.local)
-
- def test_add_floating_ip_nat_before_bind(self):
- # Tried to verify order with documented mox record/verify
- # functionality, but it doesn't seem to work since I can't make it
- # fail. I'm using stubs and a flag for now, but if this mox feature
- # can be made to work, it would be a better way to test this.
- #
- # self.mox.StubOutWithMock(self.network.driver,
- # 'ensure_floating_forward')
- # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
- #
- # self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
- # mox.IgnoreArg(),
- # mox.IgnoreArg(),
- # mox.IgnoreArg())
- # self.network.driver.bind_floating_ip(mox.IgnoreArg(),
- # mox.IgnoreArg())
- # self.mox.ReplayAll()
-
- nat_called = [False]
-
- def fake_nat(*args, **kwargs):
- nat_called[0] = True
-
- def fake_bind(*args, **kwargs):
- self.assertTrue(nat_called[0])
-
- self.stubs.Set(self.network.driver,
- 'ensure_floating_forward',
- fake_nat)
- self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
-
- self.network.l3driver.add_floating_ip('fakefloat',
- 'fakefixed',
- 'fakeiface',
- 'fakenet')
-
- @mock.patch('nova.db.floating_ip_get_all_by_host')
- @mock.patch('nova.db.fixed_ip_get')
- def _test_floating_ip_init_host(self, fixed_get, floating_get,
- public_interface, expected_arg):
-
- floating_get.return_value = [
- dict(test_floating_ip.fake_floating_ip,
- interface='foo',
- address='1.2.3.4'),
- dict(test_floating_ip.fake_floating_ip,
- interface='fakeiface',
- address='1.2.3.5',
- fixed_ip_id=1),
- dict(test_floating_ip.fake_floating_ip,
- interface='bar',
- address='1.2.3.6',
- fixed_ip_id=2),
- ]
-
- def fixed_ip_get(_context, fixed_ip_id, get_network):
- if fixed_ip_id == 1:
- return dict(test_fixed_ip.fake_fixed_ip,
- address='1.2.3.4',
- network=test_network.fake_network)
- raise exception.FixedIpNotFound(id=fixed_ip_id)
- fixed_get.side_effect = fixed_ip_get
-
- self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
- self.flags(public_interface=public_interface)
- self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
- netaddr.IPAddress('1.2.3.4'),
- expected_arg,
- mox.IsA(objects.Network))
- self.mox.ReplayAll()
- self.network.init_host_floating_ips()
- self.mox.UnsetStubs()
- self.mox.VerifyAll()
-
- def test_floating_ip_init_host_without_public_interface(self):
- self._test_floating_ip_init_host(public_interface=False,
- expected_arg='fakeiface')
-
- def test_floating_ip_init_host_with_public_interface(self):
- self._test_floating_ip_init_host(public_interface='fooiface',
- expected_arg='fooiface')
-
- def test_disassociate_floating_ip(self):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def fake1(*args, **kwargs):
- pass
-
- # floating ip that isn't associated
- def fake2(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1',
- pool='nova',
- interface='eth0',
- fixed_ip_id=None)
-
- # floating ip that is associated
- def fake3(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1',
- pool='nova',
- interface='eth0',
- fixed_ip_id=1,
- project_id=ctxt.project_id)
-
- # fixed ip with remote host
- def fake4(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- address='10.0.0.1',
- pool='nova',
- instance_uuid=FAKEUUID,
- interface='eth0',
- network_id=123)
-
- def fake4_network(*args, **kwargs):
- return dict(test_network.fake_network,
- multi_host=False,
- host='jibberjabber')
-
- # fixed ip with local host
- def fake5(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- address='10.0.0.1',
- pool='nova',
- instance_uuid=FAKEUUID,
- interface='eth0',
- network_id=1234)
-
- def fake5_network(*args, **kwargs):
- return dict(test_network.fake_network,
- multi_host=False, host='testhost')
-
- def fake6(ctxt, method, **kwargs):
- self.local = False
-
- def fake7(*args, **kwargs):
- self.local = True
-
- def fake8(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1',
- pool='nova',
- interface='eth0',
- fixed_ip_id=1,
- auto_assigned=True,
- project_id=ctxt.project_id)
-
- self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
-
- # raises because floating_ip is not associated to a fixed_ip
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
- self.assertRaises(exception.FloatingIpNotAssociated,
- self.network.disassociate_floating_ip,
- ctxt,
- mox.IgnoreArg())
-
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
-
- # does not raise and makes call remotely
- self.local = True
- self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
- self.stubs.Set(self.network.db, 'network_get', fake4_network)
- self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
- lambda **kw: self.network.network_rpcapi.client)
- self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
- self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
- self.assertFalse(self.local)
-
- # does not raise and makes call locally
- self.local = False
- self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
- self.stubs.Set(self.network.db, 'network_get', fake5_network)
- self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
- self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
- self.assertTrue(self.local)
-
- # raises because auto_assigned floating IP cannot be disassociated
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
- self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
- self.network.disassociate_floating_ip,
- ctxt,
- mox.IgnoreArg())
-
- def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- lambda *a, **kw: None)
- self.mox.StubOutWithMock(db, 'network_get')
- self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
- self.mox.StubOutWithMock(db,
- 'virtual_interface_get_by_instance_and_network')
- self.mox.StubOutWithMock(db, 'fixed_ip_update')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
-
- db.fixed_ip_update(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
-
- fixed = dict(test_fixed_ip.fake_fixed_ip,
- address='192.168.0.101')
- db.fixed_ip_associate_pool(mox.IgnoreArg(),
- mox.IgnoreArg(),
- instance_uuid=mox.IgnoreArg(),
- host=None).AndReturn(fixed)
- db.network_get(mox.IgnoreArg(),
- mox.IgnoreArg(),
- project_only=mox.IgnoreArg()
- ).AndReturn(dict(test_network.fake_network,
- **networks[0]))
- db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(fake_inst(display_name=HOST,
- uuid=FAKEUUID))
- self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
- networks[0]['id'])
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- def test_ip_association_and_allocation_of_other_project(self, net_get,
- fixed_get):
- """Makes sure that we cannot deallocaate or disassociate
- a public ip of other project.
- """
- net_get.return_value = dict(test_network.fake_network,
- **networks[1])
-
- context1 = context.RequestContext('user', 'project1')
- context2 = context.RequestContext('user', 'project2')
-
- float_ip = db.floating_ip_create(context1.elevated(),
- {'address': '1.2.3.4',
- 'project_id': context1.project_id})
-
- float_addr = float_ip['address']
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
-
- fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
- 1, instance['uuid']).address
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address=fix_addr,
- instance_uuid=instance.uuid,
- network=dict(test_network.fake_network,
- **networks[1]))
-
- # Associate the IP with non-admin user context
- self.assertRaises(exception.Forbidden,
- self.network.associate_floating_ip,
- context2,
- float_addr,
- fix_addr)
-
- # Deallocate address from other project
- self.assertRaises(exception.Forbidden,
- self.network.deallocate_floating_ip,
- context2,
- float_addr)
-
- # Now Associates the address to the actual project
- self.network.associate_floating_ip(context1, float_addr, fix_addr)
-
- # Now try dis-associating from other project
- self.assertRaises(exception.Forbidden,
- self.network.disassociate_floating_ip,
- context2,
- float_addr)
-
- # Clean up the ip addresses
- self.network.disassociate_floating_ip(context1, float_addr)
- self.network.deallocate_floating_ip(context1, float_addr)
- self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
- db.floating_ip_destroy(context1.elevated(), float_addr)
- db.fixed_ip_disassociate(context1.elevated(), fix_addr)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ip_update')
- def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
- """Verify that release is called properly.
-
- Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
- """
- net_get.return_value = dict(test_network.fake_network,
- **networks[1])
-
- def vif_get(_context, _vif_id):
- return vifs[0]
-
- self.stubs.Set(db, 'virtual_interface_get', vif_get)
- context1 = context.RequestContext('user', 'project1')
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
-
- elevated = context1.elevated()
- fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address=fix_addr.address,
- instance_uuid=instance.uuid,
- allocated=True,
- virtual_interface_id=3,
- network=dict(test_network.fake_network,
- **networks[1]))
-
- self.flags(force_dhcp_release=True)
- self.mox.StubOutWithMock(linux_net, 'release_dhcp')
- linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
- 'DE:AD:BE:EF:00:00')
- self.mox.ReplayAll()
- self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
- fixed_update.assert_called_once_with(context1, fix_addr.address,
- {'allocated': False})
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ip_update')
- def test_deallocate_fixed_with_dhcp_exception(self, fixed_update, net_get,
- fixed_get):
- net_get.return_value = dict(test_network.fake_network,
- **networks[1])
-
- def vif_get(_context, _vif_id):
- return vifs[0]
-
- with contextlib.nested(
- mock.patch.object(db, 'virtual_interface_get', vif_get),
- mock.patch.object(
- utils, 'execute',
- side_effect=processutils.ProcessExecutionError()),
- ) as (_vif_get, _execute):
- context1 = context.RequestContext('user', 'project1')
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
-
- elevated = context1.elevated()
- fix_addr = db.fixed_ip_associate_pool(elevated, 1,
- instance['uuid'])
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address=fix_addr.address,
- instance_uuid=instance.uuid,
- allocated=True,
- virtual_interface_id=3,
- network=dict(
- test_network.fake_network,
- **networks[1]))
- self.flags(force_dhcp_release=True)
- self.network.deallocate_fixed_ip(context1, fix_addr.address,
- 'fake')
- fixed_update.assert_called_once_with(context1, fix_addr.address,
- {'allocated': False})
- _execute.assert_called_once_with('dhcp_release',
- networks[1]['bridge'],
- fix_addr.address,
- 'DE:AD:BE:EF:00:00',
- run_as_root=True)
-
- def test_deallocate_fixed_deleted(self):
- # Verify doesn't deallocate deleted fixed_ip from deleted network.
-
- def teardown_network_on_host(_context, network):
- if network['id'] == 0:
- raise test.TestingException()
-
- self.stubs.Set(self.network, '_teardown_network_on_host',
- teardown_network_on_host)
-
- context1 = context.RequestContext('user', 'project1')
- elevated = context1.elevated()
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
- network = db.network_create_safe(elevated, networks[0])
-
- _fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
- fix_addr = _fix_addr.address
- db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
- elevated.read_deleted = 'yes'
- delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
- values = {'address': fix_addr,
- 'network_id': network.id,
- 'instance_uuid': delfixed['instance_uuid']}
- db.fixed_ip_create(elevated, values)
- elevated.read_deleted = 'no'
- elevated.read_deleted = 'yes'
-
- deallocate = self.network.deallocate_fixed_ip
- self.assertRaises(test.TestingException, deallocate, context1,
- fix_addr, 'fake')
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ip_update')
- def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
- """Verify that deallocate doesn't raise when no vif is returned.
-
- Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
- """
- net_get.return_value = dict(test_network.fake_network,
- **networks[1])
-
- def vif_get(_context, _vif_id):
- return None
-
- self.stubs.Set(db, 'virtual_interface_get', vif_get)
- context1 = context.RequestContext('user', 'project1')
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
-
- elevated = context1.elevated()
- fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address=fix_addr.address,
- allocated=True,
- virtual_interface_id=3,
- instance_uuid=instance.uuid,
- network=dict(test_network.fake_network,
- **networks[1]))
- self.flags(force_dhcp_release=True)
- fixed_update.return_value = fixed_get.return_value
- self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
- fixed_update.assert_called_once_with(context1, fix_addr.address,
- {'allocated': False})
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ip_update')
- def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
- # Verify IP is not deallocated if the security group refresh fails.
- net_get.return_value = dict(test_network.fake_network,
- **networks[1])
- context1 = context.RequestContext('user', 'project1')
-
- instance = db.instance_create(context1,
- {'project_id': 'project1'})
-
- elevated = context1.elevated()
- fix_addr = objects.FixedIP.associate_pool(elevated, 1,
- instance['uuid'])
-
- def fake_refresh(instance_uuid):
- raise test.TestingException()
- self.stubs.Set(self.network,
- '_do_trigger_security_group_members_refresh_for_instance',
- fake_refresh)
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- address=fix_addr.address,
- allocated=True,
- virtual_interface_id=3,
- instance_uuid=instance.uuid,
- network=dict(test_network.fake_network,
- **networks[1]))
- self.assertRaises(test.TestingException,
- self.network.deallocate_fixed_ip,
- context1, str(fix_addr.address), 'fake')
- self.assertFalse(fixed_update.called)
-
- def test_get_networks_by_uuids_ordering(self):
- self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
-
- requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
- db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- [dict(test_network.fake_network, **net)
- for net in networks])
-
- self.mox.ReplayAll()
- res = self.network._get_networks_by_uuids(self.context,
- requested_networks)
-
- self.assertEqual(res[0]['id'], 1)
- self.assertEqual(res[1]['id'], 0)
-
- @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
- @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
- @mock.patch('nova.network.linux_net.iptables_manager._apply')
- def test_init_host_iptables_defer_apply(self, iptable_apply,
- floating_get_by_host,
- fixed_get_by_id):
- def get_by_id(context, fixed_ip_id, **kwargs):
- net = objects.Network(bridge='testbridge',
- cidr='192.168.1.0/24')
- if fixed_ip_id == 1:
- return objects.FixedIP(address='192.168.1.4',
- network=net)
- elif fixed_ip_id == 2:
- return objects.FixedIP(address='192.168.1.5',
- network=net)
-
- def fake_apply():
- fake_apply.count += 1
-
- fake_apply.count = 0
- ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
- float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
- float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
- float1._context = ctxt
- float2._context = ctxt
-
- iptable_apply.side_effect = fake_apply
- floating_get_by_host.return_value = [float1, float2]
- fixed_get_by_id.side_effect = get_by_id
-
- self.network.init_host()
- self.assertEqual(1, fake_apply.count)
-
-
-class _TestDomainObject(object):
- def __init__(self, **kwargs):
- for k, v in kwargs.iteritems():
- self.__setattr__(k, v)
-
-
-class FakeNetwork(object):
- def __init__(self, **kwargs):
- self.vlan = None
- for k, v in kwargs.iteritems():
- self.__setattr__(k, v)
-
- def __getitem__(self, item):
- return getattr(self, item)
-
-
-class CommonNetworkTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(CommonNetworkTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.flags(ipv6_backend='rfc2462')
- self.flags(use_local=True, group='conductor')
- ipv6.reset_backend()
-
- def test_validate_instance_zone_for_dns_domain(self):
- domain = 'example.com'
- az = 'test_az'
- domains = {
- domain: _TestDomainObject(
- domain=domain,
- availability_zone=az)}
-
- def dnsdomain_get(context, instance_domain):
- return domains.get(instance_domain)
-
- self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
- fake_instance = {'uuid': FAKEUUID,
- 'availability_zone': az}
-
- manager = network_manager.NetworkManager()
- res = manager._validate_instance_zone_for_dns_domain(self.context,
- fake_instance)
- self.assertTrue(res)
-
- def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
- extra_reserved=None, bottom_reserved=0,
- top_reserved=0):
- return None
-
- def test_get_instance_nw_info_client_exceptions(self):
- manager = network_manager.NetworkManager()
- self.mox.StubOutWithMock(manager.db,
- 'fixed_ip_get_by_instance')
- manager.db.fixed_ip_get_by_instance(
- self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
- instance_id=FAKEUUID))
- self.mox.ReplayAll()
- self.assertRaises(messaging.ExpectedException,
- manager.get_instance_nw_info,
- self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
-
- @mock.patch('nova.db.instance_get')
- @mock.patch('nova.db.fixed_ip_get_by_instance')
- def test_deallocate_for_instance_passes_host_info(self, fixed_get,
- instance_get):
- manager = fake_network.FakeNetworkManager()
- db = manager.db
- instance_get.return_value = fake_inst(uuid='ignoreduuid')
- db.virtual_interface_delete_by_instance = lambda _x, _y: None
- ctx = context.RequestContext('igonre', 'igonre')
-
- fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
- address='1.2.3.4',
- network_id=123)]
-
- manager.deallocate_for_instance(
- ctx, instance=objects.Instance._from_db_object(self.context,
- objects.Instance(), instance_get.return_value))
-
- self.assertEqual([
- (ctx, '1.2.3.4', 'fake-host')
- ], manager.deallocate_fixed_ip_calls)
-
- def test_deallocate_for_instance_with_requested_networks(self):
- manager = fake_network.FakeNetworkManager()
- db = manager.db
- db.virtual_interface_delete_by_instance = mock.Mock()
- ctx = context.RequestContext('igonre', 'igonre')
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest.from_tuple(t)
- for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
- manager.deallocate_for_instance(
- ctx,
- instance=fake_instance.fake_instance_obj(ctx),
- requested_networks=requested_networks)
-
- self.assertEqual([
- (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
- ], manager.deallocate_fixed_ip_calls)
-
- @mock.patch('nova.db.fixed_ip_get_by_instance')
- @mock.patch('nova.db.fixed_ip_disassociate')
- def test_remove_fixed_ip_from_instance(self, disassociate, get):
- manager = fake_network.FakeNetworkManager()
- get.return_value = [
- dict(test_fixed_ip.fake_fixed_ip, **x)
- for x in manager.db.fixed_ip_get_by_instance(None,
- FAKEUUID)]
- manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
- HOST,
- '10.0.0.1')
-
- self.assertEqual(manager.deallocate_called, '10.0.0.1')
- disassociate.assert_called_once_with(self.context, '10.0.0.1')
-
- @mock.patch('nova.db.fixed_ip_get_by_instance')
- def test_remove_fixed_ip_from_instance_bad_input(self, get):
- manager = fake_network.FakeNetworkManager()
- get.return_value = []
- self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
- manager.remove_fixed_ip_from_instance,
- self.context, 99, HOST, 'bad input')
-
- def test_validate_cidrs(self):
- manager = fake_network.FakeNetworkManager()
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.0.0/24',
- False, 1, 256, None, None, None,
- None, None)
- self.assertEqual(1, len(nets))
- cidrs = [str(net['cidr']) for net in nets]
- self.assertIn('192.168.0.0/24', cidrs)
-
- def test_validate_cidrs_split_exact_in_half(self):
- manager = fake_network.FakeNetworkManager()
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.0.0/24',
- False, 2, 128, None, None, None,
- None, None)
- self.assertEqual(2, len(nets))
- cidrs = [str(net['cidr']) for net in nets]
- self.assertIn('192.168.0.0/25', cidrs)
- self.assertIn('192.168.0.128/25', cidrs)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- id=1, cidr='192.168.2.0/24')]
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.0.0/16',
- False, 4, 256, None, None, None,
- None, None)
- self.assertEqual(4, len(nets))
- cidrs = [str(net['cidr']) for net in nets]
- exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
- '192.168.4.0/24']
- for exp_cidr in exp_cidrs:
- self.assertIn(exp_cidr, cidrs)
- self.assertNotIn('192.168.2.0/24', cidrs)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- id=1, cidr='192.168.2.9/25')]
- # CidrConflict: requested cidr (192.168.2.0/24) conflicts with
- # existing smaller cidr
- args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
- 1, 256, None, None, None, None, None)
- self.assertRaises(exception.CidrConflict,
- manager.create_networks, *args)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- id=1, cidr='192.168.2.0/25')]
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.0.0/16',
- False, 4, 256, None, None, None, None,
- None)
- self.assertEqual(4, len(nets))
- cidrs = [str(net['cidr']) for net in nets]
- exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
- '192.168.4.0/24']
- for exp_cidr in exp_cidrs:
- self.assertIn(exp_cidr, cidrs)
- self.assertNotIn('192.168.2.0/24', cidrs)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
- manager = fake_network.FakeNetworkManager()
- self.mox.StubOutWithMock(manager.db, 'network_get_all')
- get_all.return_value = [dict(test_network.fake_network, id=1,
- cidr='192.168.2.9/29')]
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.2.0/24',
- False, 3, 32, None, None, None, None,
- None)
- self.assertEqual(3, len(nets))
- cidrs = [str(net['cidr']) for net in nets]
- exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
- for exp_cidr in exp_cidrs:
- self.assertIn(exp_cidr, cidrs)
- self.assertNotIn('192.168.2.0/27', cidrs)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_split_all_in_use(self, get_all):
- manager = fake_network.FakeNetworkManager()
- in_use = [dict(test_network.fake_network, **values) for values in
- [{'id': 1, 'cidr': '192.168.2.9/29'},
- {'id': 2, 'cidr': '192.168.2.64/26'},
- {'id': 3, 'cidr': '192.168.2.128/26'}]]
- get_all.return_value = in_use
- args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
- 3, 64, None, None, None, None, None)
- # CidrConflict: Not enough subnets avail to satisfy requested num_
- # networks - some subnets in requested range already
- # in use
- self.assertRaises(exception.CidrConflict,
- manager.create_networks, *args)
-
- def test_validate_cidrs_one_in_use(self):
- manager = fake_network.FakeNetworkManager()
- args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
- None, None, None)
- # ValueError: network_size * num_networks exceeds cidr size
- self.assertRaises(ValueError, manager.create_networks, *args)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_already_used(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- cidr='192.168.0.0/24')]
- # CidrConflict: cidr already in use
- args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
- 1, 256, None, None, None, None, None)
- self.assertRaises(exception.CidrConflict,
- manager.create_networks, *args)
-
- def test_validate_cidrs_too_many(self):
- manager = fake_network.FakeNetworkManager()
- args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
- None, None, None)
- # ValueError: Not enough subnets avail to satisfy requested
- # num_networks
- self.assertRaises(ValueError, manager.create_networks, *args)
-
- def test_validate_cidrs_split_partial(self):
- manager = fake_network.FakeNetworkManager()
- nets = manager.create_networks(self.context.elevated(), 'fake',
- '192.168.0.0/16',
- False, 2, 256, None, None, None, None,
- None)
- returned_cidrs = [str(net['cidr']) for net in nets]
- self.assertIn('192.168.0.0/24', returned_cidrs)
- self.assertIn('192.168.1.0/24', returned_cidrs)
-
- @mock.patch('nova.db.network_get_all')
- def test_validate_cidrs_conflict_existing_supernet(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- id=1, cidr='192.168.0.0/8')]
- args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
- 1, 256, None, None, None, None, None)
- # CidrConflict: requested cidr (192.168.0.0/24) conflicts
- # with existing supernet
- self.assertRaises(exception.CidrConflict,
- manager.create_networks, *args)
-
- def test_create_networks(self):
- cidr = '192.168.0.0/24'
- manager = fake_network.FakeNetworkManager()
- self.stubs.Set(manager, '_create_fixed_ips',
- self.fake_create_fixed_ips)
- args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
- 'fd00::/48', None, None, None, None, None]
- self.assertTrue(manager.create_networks(*args))
-
- @mock.patch('nova.db.network_get_all')
- def test_create_networks_cidr_already_used(self, get_all):
- manager = fake_network.FakeNetworkManager()
- get_all.return_value = [dict(test_network.fake_network,
- id=1, cidr='192.168.0.0/24')]
- args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
- 'fd00::/48', None, None, None, None, None]
- self.assertRaises(exception.CidrConflict,
- manager.create_networks, *args)
-
- def test_create_networks_many(self):
- cidr = '192.168.0.0/16'
- manager = fake_network.FakeNetworkManager()
- self.stubs.Set(manager, '_create_fixed_ips',
- self.fake_create_fixed_ips)
- args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
- 'fd00::/48', None, None, None, None, None]
- self.assertTrue(manager.create_networks(*args))
-
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ips_by_virtual_interface')
- def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
- manager = fake_network.FakeNetworkManager(self.stubs)
- fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
- _vifs = manager.db.virtual_interface_get_all(None)
- fake_context = context.RequestContext('user', 'project')
- network_get.return_value = dict(test_network.fake_network,
- **manager.db.network_get(None, 1))
-
- # Greedy get eveything
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '.*'})
- self.assertEqual(len(res), len(_vifs))
-
- # Doesn't exist
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '10.0.0.1'})
- self.assertFalse(res)
-
- # Get instance 1
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '172.16.0.2'})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
-
- # Get instance 2
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '173.16.0.2'})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
-
- # Get instance 0 and 1
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '172.16.0.*'})
- self.assertTrue(res)
- self.assertEqual(len(res), 2)
- self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
- self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
-
- # Get instance 1 and 2
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip': '17..16.0.2'})
- self.assertTrue(res)
- self.assertEqual(len(res), 2)
- self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
- self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
-
- @mock.patch('nova.db.network_get')
- def test_get_instance_uuids_by_ipv6_regex(self, network_get):
- manager = fake_network.FakeNetworkManager(self.stubs)
- _vifs = manager.db.virtual_interface_get_all(None)
- fake_context = context.RequestContext('user', 'project')
-
- def _network_get(context, network_id, **args):
- return dict(test_network.fake_network,
- **manager.db.network_get(context, network_id))
- network_get.side_effect = _network_get
-
- # Greedy get eveything
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': '.*'})
- self.assertEqual(len(res), len(_vifs))
-
- # Doesn't exist
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': '.*1034.*'})
- self.assertFalse(res)
-
- # Get instance 1
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': '2001:.*2'})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
-
- # Get instance 2
- ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': ip6})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
-
- # Get instance 0 and 1
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': '.*ef0[1,2]'})
- self.assertTrue(res)
- self.assertEqual(len(res), 2)
- self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
- self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
-
- # Get instance 1 and 2
- ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'ip6': ip6})
- self.assertTrue(res)
- self.assertEqual(len(res), 2)
- self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
- self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
-
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.fixed_ips_by_virtual_interface')
- def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
- manager = fake_network.FakeNetworkManager(self.stubs)
- fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
- _vifs = manager.db.virtual_interface_get_all(None)
- fake_context = context.RequestContext('user', 'project')
- network_get.return_value = dict(test_network.fake_network,
- **manager.db.network_get(None, 1))
-
- # No regex for you!
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'fixed_ip': '.*'})
- self.assertFalse(res)
-
- # Doesn't exist
- ip = '10.0.0.1'
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'fixed_ip': ip})
- self.assertFalse(res)
-
- # Get instance 1
- ip = '172.16.0.2'
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'fixed_ip': ip})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
-
- # Get instance 2
- ip = '173.16.0.2'
- res = manager.get_instance_uuids_by_ip_filter(fake_context,
- {'fixed_ip': ip})
- self.assertTrue(res)
- self.assertEqual(len(res), 1)
- self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
-
- @mock.patch('nova.db.network_get_by_uuid')
- def test_get_network(self, get):
- manager = fake_network.FakeNetworkManager()
- fake_context = context.RequestContext('user', 'project')
- get.return_value = dict(test_network.fake_network, **networks[0])
- uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- network = manager.get_network(fake_context, uuid)
- self.assertEqual(network['uuid'], uuid)
-
- @mock.patch('nova.db.network_get_by_uuid')
- def test_get_network_not_found(self, get):
- manager = fake_network.FakeNetworkManager()
- fake_context = context.RequestContext('user', 'project')
- get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
- uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- self.assertRaises(exception.NetworkNotFound,
- manager.get_network, fake_context, uuid)
-
- @mock.patch('nova.db.network_get_all')
- def test_get_all_networks(self, get_all):
- manager = fake_network.FakeNetworkManager()
- fake_context = context.RequestContext('user', 'project')
- get_all.return_value = [dict(test_network.fake_network, **net)
- for net in networks]
- output = manager.get_all_networks(fake_context)
- self.assertEqual(len(networks), 2)
- self.assertEqual(output[0]['uuid'],
- 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
- self.assertEqual(output[1]['uuid'],
- 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
-
- @mock.patch('nova.db.network_get_by_uuid')
- @mock.patch('nova.db.network_disassociate')
- def test_disassociate_network(self, disassociate, get):
- manager = fake_network.FakeNetworkManager()
- disassociate.return_value = True
- fake_context = context.RequestContext('user', 'project')
- get.return_value = dict(test_network.fake_network,
- **networks[0])
- uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
- manager.disassociate_network(fake_context, uuid)
-
- @mock.patch('nova.db.network_get_by_uuid')
- def test_disassociate_network_not_found(self, get):
- manager = fake_network.FakeNetworkManager()
- fake_context = context.RequestContext('user', 'project')
- get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
- uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
- self.assertRaises(exception.NetworkNotFound,
- manager.disassociate_network, fake_context, uuid)
-
- def _test_init_host_dynamic_fixed_range(self, net_manager):
- self.flags(fake_network=True,
- routing_source_ip='172.16.0.1',
- metadata_host='172.16.0.1',
- public_interface='eth1',
- dmz_cidr=['10.0.3.0/24'])
- binary_name = linux_net.get_binary_name()
-
- # Stub out calls we don't want to really run, mock the db
- self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
- self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
- lambda *args: None)
- self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
- lambda *args: None)
- self.mox.StubOutWithMock(db, 'network_get_all_by_host')
- fake_networks = [dict(test_network.fake_network, **n)
- for n in networks]
- db.network_get_all_by_host(mox.IgnoreArg(),
- mox.IgnoreArg()
- ).MultipleTimes().AndReturn(fake_networks)
- self.mox.ReplayAll()
-
- net_manager.init_host()
-
- # Get the iptables rules that got created
- current_lines = []
- new_lines = linux_net.iptables_manager._modify_rules(current_lines,
- linux_net.iptables_manager.ipv4['nat'],
- table_name='nat')
-
- expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
- '-j SNAT --to-source %s -o %s'
- % (binary_name, networks[0]['cidr'],
- CONF.routing_source_ip,
- CONF.public_interface),
- '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
- % (binary_name, networks[0]['cidr'],
- CONF.metadata_host),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
- % (binary_name, networks[0]['cidr'],
- CONF.dmz_cidr[0]),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
- '--ctstate DNAT -j ACCEPT' % (binary_name,
- networks[0]['cidr'],
- networks[0]['cidr']),
- '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
- '-j SNAT --to-source %s -o %s'
- % (binary_name, networks[1]['cidr'],
- CONF.routing_source_ip,
- CONF.public_interface),
- '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
- % (binary_name, networks[1]['cidr'],
- CONF.metadata_host),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
- % (binary_name, networks[1]['cidr'],
- CONF.dmz_cidr[0]),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
- '--ctstate DNAT -j ACCEPT' % (binary_name,
- networks[1]['cidr'],
- networks[1]['cidr'])]
-
- # Compare the expected rules against the actual ones
- for line in expected_lines:
- self.assertIn(line, new_lines)
-
- # Add an additional network and ensure the rules get configured
- new_network = {'id': 2,
- 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
- 'label': 'test2',
- 'injected': False,
- 'multi_host': False,
- 'cidr': '192.168.2.0/24',
- 'cidr_v6': '2001:dba::/64',
- 'gateway_v6': '2001:dba::1',
- 'netmask_v6': '64',
- 'netmask': '255.255.255.0',
- 'bridge': 'fa1',
- 'bridge_interface': 'fake_fa1',
- 'gateway': '192.168.2.1',
- 'dhcp_server': '192.168.2.1',
- 'broadcast': '192.168.2.255',
- 'dns1': '192.168.2.1',
- 'dns2': '192.168.2.2',
- 'vlan': None,
- 'host': HOST,
- 'project_id': 'fake_project',
- 'vpn_public_address': '192.168.2.2',
- 'vpn_public_port': '22',
- 'vpn_private_address': '10.0.0.2'}
- new_network_obj = objects.Network._from_db_object(
- self.context, objects.Network(),
- dict(test_network.fake_network, **new_network))
-
- ctxt = context.get_admin_context()
- net_manager._setup_network_on_host(ctxt, new_network_obj)
-
- # Get the new iptables rules that got created from adding a new network
- current_lines = []
- new_lines = linux_net.iptables_manager._modify_rules(current_lines,
- linux_net.iptables_manager.ipv4['nat'],
- table_name='nat')
-
- # Add the new expected rules to the old ones
- expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
- '-j SNAT --to-source %s -o %s'
- % (binary_name, new_network['cidr'],
- CONF.routing_source_ip,
- CONF.public_interface),
- '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
- % (binary_name, new_network['cidr'],
- CONF.metadata_host),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
- % (binary_name, new_network['cidr'],
- CONF.dmz_cidr[0]),
- '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
- '! --ctstate DNAT -j ACCEPT' % (binary_name,
- new_network['cidr'],
- new_network['cidr'])]
-
- # Compare the expected rules (with new network) against the actual ones
- for line in expected_lines:
- self.assertIn(line, new_lines)
-
- def test_flatdhcpmanager_dynamic_fixed_range(self):
- """Test FlatDHCPManager NAT rules for fixed_range."""
- # Set the network manager
- self.network = network_manager.FlatDHCPManager(host=HOST)
- self.network.db = db
-
- # Test new behavior:
- # CONF.fixed_range is not set, defaults to None
- # Determine networks to NAT based on lookup
- self._test_init_host_dynamic_fixed_range(self.network)
-
- def test_vlanmanager_dynamic_fixed_range(self):
- """Test VlanManager NAT rules for fixed_range."""
- # Set the network manager
- self.network = network_manager.VlanManager(host=HOST)
- self.network.db = db
-
- # Test new behavior:
- # CONF.fixed_range is not set, defaults to None
- # Determine networks to NAT based on lookup
- self._test_init_host_dynamic_fixed_range(self.network)
-
- @mock.patch('nova.objects.quotas.Quotas.rollback')
- @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
- @mock.patch('nova.network.manager.NetworkManager.'
- '_do_trigger_security_group_members_refresh_for_instance')
- def test_fixed_ip_cleanup_rollback(self, fake_trig,
- fixed_get, rollback):
- manager = network_manager.NetworkManager()
-
- fake_trig.side_effect = test.TestingException
-
- self.assertRaises(test.TestingException,
- manager.deallocate_fixed_ip,
- self.context, 'fake', 'fake',
- instance=fake_inst(uuid='ignoreduuid'))
- rollback.assert_called_once_with(self.context)
-
- def test_fixed_cidr_out_of_range(self):
- manager = network_manager.NetworkManager()
- ctxt = context.get_admin_context()
- self.assertRaises(exception.AddressOutOfRange,
- manager.create_networks, ctxt, label="fake",
- cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
-
-
-class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
- network_manager.NetworkManager):
- """Dummy manager that implements RPCAllocateFixedIP."""
-
-
-class RPCAllocateTestCase(test.TestCase):
- """Tests nova.network.manager.RPCAllocateFixedIP."""
- def setUp(self):
- super(RPCAllocateTestCase, self).setUp()
- self.flags(use_local=True, group='conductor')
- self.rpc_fixed = TestRPCFixedManager()
- self.context = context.RequestContext('fake', 'fake')
-
- def test_rpc_allocate(self):
- """Test to verify bug 855030 doesn't resurface.
-
- Mekes sure _rpc_allocate_fixed_ip returns a value so the call
- returns properly and the greenpool completes.
- """
- address = '10.10.10.10'
-
- def fake_allocate(*args, **kwargs):
- return address
-
- def fake_network_get(*args, **kwargs):
- return test_network.fake_network
-
- self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
- self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
- rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
- 'fake_instance',
- 'fake_network')
- self.assertEqual(rval, address)
-
-
-class TestFloatingIPManager(floating_ips.FloatingIP,
- network_manager.NetworkManager):
- """Dummy manager that implements FloatingIP."""
-
-
-class AllocateTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(AllocateTestCase, self).setUp()
- dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
- self.flags(instance_dns_manager=dns)
- self.useFixture(test.SampleNetworks())
- self.conductor = self.start_service(
- 'conductor', manager=CONF.conductor.manager)
- self.compute = self.start_service('compute')
- self.network = self.start_service('network')
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
- self.user_context = context.RequestContext('testuser',
- 'testproject')
-
- def test_allocate_for_instance(self):
- address = "10.10.10.10"
- self.flags(auto_assign_floating_ip=True)
-
- db.floating_ip_create(self.context,
- {'address': address,
- 'pool': 'nova'})
- inst = objects.Instance()
- inst.host = self.compute.host
- inst.display_name = HOST
- inst.instance_type_id = 1
- inst.uuid = FAKEUUID
- inst.create(self.context)
- networks = db.network_get_all(self.context)
- for network in networks:
- db.network_update(self.context, network['id'],
- {'host': self.network.host})
- project_id = self.user_context.project_id
- nw_info = self.network.allocate_for_instance(self.user_context,
- instance_id=inst['id'], instance_uuid=inst['uuid'],
- host=inst['host'], vpn=None, rxtx_factor=3,
- project_id=project_id, macs=None)
- self.assertEqual(1, len(nw_info))
- fixed_ip = nw_info.fixed_ips()[0]['address']
- self.assertTrue(utils.is_valid_ipv4(fixed_ip))
- self.network.deallocate_for_instance(self.context,
- instance=inst)
-
- def test_allocate_for_instance_illegal_network(self):
- networks = db.network_get_all(self.context)
- requested_networks = []
- for network in networks:
- # set all networks to other projects
- db.network_update(self.context, network['id'],
- {'host': self.network.host,
- 'project_id': 'otherid'})
- requested_networks.append((network['uuid'], None))
- # set the first network to our project
- db.network_update(self.context, networks[0]['id'],
- {'project_id': self.user_context.project_id})
-
- inst = objects.Instance()
- inst.host = self.compute.host
- inst.display_name = HOST
- inst.instance_type_id = 1
- inst.uuid = FAKEUUID
- inst.create(self.context)
- self.assertRaises(exception.NetworkNotFoundForProject,
- self.network.allocate_for_instance, self.user_context,
- instance_id=inst['id'], instance_uuid=inst['uuid'],
- host=inst['host'], vpn=None, rxtx_factor=3,
- project_id=self.context.project_id, macs=None,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_with_mac(self):
- available_macs = set(['ca:fe:de:ad:be:ef'])
- inst = db.instance_create(self.context, {'host': self.compute.host,
- 'display_name': HOST,
- 'instance_type_id': 1})
- networks = db.network_get_all(self.context)
- for network in networks:
- db.network_update(self.context, network['id'],
- {'host': self.network.host})
- project_id = self.context.project_id
- nw_info = self.network.allocate_for_instance(self.user_context,
- instance_id=inst['id'], instance_uuid=inst['uuid'],
- host=inst['host'], vpn=None, rxtx_factor=3,
- project_id=project_id, macs=available_macs)
- assigned_macs = [vif['address'] for vif in nw_info]
- self.assertEqual(1, len(assigned_macs))
- self.assertEqual(available_macs.pop(), assigned_macs[0])
- self.network.deallocate_for_instance(self.context,
- instance_id=inst['id'],
- host=self.network.host,
- project_id=project_id)
-
- def test_allocate_for_instance_not_enough_macs(self):
- available_macs = set()
- inst = db.instance_create(self.context, {'host': self.compute.host,
- 'display_name': HOST,
- 'instance_type_id': 1})
- networks = db.network_get_all(self.context)
- for network in networks:
- db.network_update(self.context, network['id'],
- {'host': self.network.host})
- project_id = self.context.project_id
- self.assertRaises(exception.VirtualInterfaceCreateException,
- self.network.allocate_for_instance,
- self.user_context,
- instance_id=inst['id'], instance_uuid=inst['uuid'],
- host=inst['host'], vpn=None, rxtx_factor=3,
- project_id=project_id, macs=available_macs)
-
-
-class FloatingIPTestCase(test.TestCase):
- """Tests nova.network.manager.FloatingIP."""
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(FloatingIPTestCase, self).setUp()
- self.tempdir = self.useFixture(fixtures.TempDir()).path
- self.flags(log_dir=self.tempdir)
- self.flags(use_local=True, group='conductor')
- self.network = TestFloatingIPManager()
- self.network.db = db
- self.project_id = 'testproject'
- self.context = context.RequestContext('testuser', self.project_id,
- is_admin=False)
-
- @mock.patch('nova.db.fixed_ip_get')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.instance_get_by_uuid')
- @mock.patch('nova.db.service_get_by_host_and_topic')
- @mock.patch('nova.db.floating_ip_get_by_address')
- def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
- service_get,
- inst_get, net_get,
- fixed_get):
- floating_ip = dict(test_floating_ip.fake_floating_ip,
- fixed_ip_id=12)
-
- fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
- network_id=None,
- instance_uuid='instance-uuid')
-
- network = dict(test_network.fake_network,
- multi_host=True)
-
- instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
-
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- self.stubs.Set(self.network,
- '_floating_ip_owned_by_project',
- lambda _x, _y: True)
-
- floating_get.return_value = floating_ip
- fixed_get.return_value = fixed_ip
- net_get.return_value = network
- inst_get.return_value = instance
- service_get.return_value = test_service.fake_service
-
- self.stubs.Set(self.network.servicegroup_api,
- 'service_is_up',
- lambda _x: True)
-
- self.mox.StubOutWithMock(
- self.network.network_rpcapi, '_disassociate_floating_ip')
-
- self.network.network_rpcapi._disassociate_floating_ip(
- ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
- self.mox.ReplayAll()
-
- self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.instance_get_by_uuid')
- @mock.patch('nova.db.floating_ip_get_by_address')
- def test_associate_floating_ip_multi_host_calls(self, floating_get,
- inst_get, net_get,
- fixed_get):
- floating_ip = dict(test_floating_ip.fake_floating_ip,
- fixed_ip_id=None)
-
- fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
- network_id=None,
- instance_uuid='instance-uuid')
-
- network = dict(test_network.fake_network,
- multi_host=True)
-
- instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
-
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- self.stubs.Set(self.network,
- '_floating_ip_owned_by_project',
- lambda _x, _y: True)
-
- floating_get.return_value = floating_ip
- fixed_get.return_value = fixed_ip
- net_get.return_value = network
- inst_get.return_value = instance
-
- self.mox.StubOutWithMock(
- self.network.network_rpcapi, '_associate_floating_ip')
-
- self.network.network_rpcapi._associate_floating_ip(
- ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
- 'instance-uuid')
- self.mox.ReplayAll()
-
- self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
-
- def test_double_deallocation(self):
- instance_ref = db.instance_create(self.context,
- {"project_id": self.project_id})
- # Run it twice to make it fault if it does not handle
- # instances without fixed networks
- # If this fails in either, it does not handle having no addresses
- self.network.deallocate_for_instance(self.context,
- instance_id=instance_ref['id'])
- self.network.deallocate_for_instance(self.context,
- instance_id=instance_ref['id'])
-
- def test_deallocate_floating_ip_quota_rollback(self):
- ctxt = context.RequestContext('testuser', 'testproject',
- is_admin=False)
-
- def fake(*args, **kwargs):
- return dict(test_floating_ip.fake_floating_ip,
- address='10.0.0.1', fixed_ip_id=None,
- project_id=ctxt.project_id)
-
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
- self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
- self.mox.StubOutWithMock(self.network,
- '_floating_ip_owned_by_project')
- self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
- self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
- quota.QUOTAS.reserve(self.context,
- floating_ips=-1,
- project_id='testproject').AndReturn('fake-rsv')
- self.network._floating_ip_owned_by_project(self.context,
- mox.IgnoreArg())
- db.floating_ip_deallocate(mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(None)
- quota.QUOTAS.rollback(self.context, 'fake-rsv',
- project_id='testproject')
-
- self.mox.ReplayAll()
- self.network.deallocate_floating_ip(self.context, '10.0.0.1')
-
- def test_deallocation_deleted_instance(self):
- self.stubs.Set(self.network, '_teardown_network_on_host',
- lambda *args, **kwargs: None)
- instance = objects.Instance()
- instance.project_id = self.project_id
- instance.deleted = True
- instance.create(self.context)
- network = db.network_create_safe(self.context.elevated(), {
- 'project_id': self.project_id,
- 'host': CONF.host,
- 'label': 'foo'})
- fixed = db.fixed_ip_create(self.context, {'allocated': True,
- 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
- 'network_id': network['id']})
- db.floating_ip_create(self.context, {
- 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
- 'fixed_ip_id': fixed['id'],
- 'project_id': self.project_id})
- self.network.deallocate_for_instance(self.context, instance=instance)
-
- def test_deallocation_duplicate_floating_ip(self):
- self.stubs.Set(self.network, '_teardown_network_on_host',
- lambda *args, **kwargs: None)
- instance = objects.Instance()
- instance.project_id = self.project_id
- instance.create(self.context)
- network = db.network_create_safe(self.context.elevated(), {
- 'project_id': self.project_id,
- 'host': CONF.host,
- 'label': 'foo'})
- fixed = db.fixed_ip_create(self.context, {'allocated': True,
- 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
- 'network_id': network['id']})
- db.floating_ip_create(self.context, {
- 'address': '10.10.10.10',
- 'deleted': True})
- db.floating_ip_create(self.context, {
- 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
- 'fixed_ip_id': fixed['id'],
- 'project_id': self.project_id})
- self.network.deallocate_for_instance(self.context, instance=instance)
-
- @mock.patch('nova.db.fixed_ip_get')
- @mock.patch('nova.db.floating_ip_get_by_address')
- @mock.patch('nova.db.floating_ip_update')
- def test_migrate_instance_start(self, floating_update, floating_get,
- fixed_get):
- called = {'count': 0}
-
- def fake_floating_ip_get_by_address(context, address):
- return dict(test_floating_ip.fake_floating_ip,
- address=address,
- fixed_ip_id=0)
-
- def fake_is_stale_floating_ip_address(context, floating_ip):
- return str(floating_ip.address) == '172.24.4.23'
-
- floating_get.side_effect = fake_floating_ip_get_by_address
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- instance_uuid='fake_uuid',
- address='10.0.0.2',
- network=test_network.fake_network)
- floating_update.return_value = fake_floating_ip_get_by_address(
- None, '1.2.3.4')
-
- def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
- network):
- called['count'] += 1
-
- def fake_clean_conntrack(fixed_ip):
- if not str(fixed_ip) == "10.0.0.2":
- raise exception.FixedIpInvalid(address=fixed_ip)
-
- self.stubs.Set(self.network, '_is_stale_floating_ip_address',
- fake_is_stale_floating_ip_address)
- self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
- fake_remove_floating_ip)
- self.stubs.Set(self.network.driver, 'clean_conntrack',
- fake_clean_conntrack)
- self.mox.ReplayAll()
- addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_start(self.context,
- instance_uuid=FAKEUUID,
- floating_addresses=addresses,
- rxtx_factor=3,
- project_id=self.project_id,
- source='fake_source',
- dest='fake_dest')
-
- self.assertEqual(called['count'], 2)
-
- @mock.patch('nova.db.fixed_ip_get')
- @mock.patch('nova.db.floating_ip_update')
- def test_migrate_instance_finish(self, floating_update, fixed_get):
- called = {'count': 0}
-
- def fake_floating_ip_get_by_address(context, address):
- return dict(test_floating_ip.fake_floating_ip,
- address=address,
- fixed_ip_id=0)
-
- def fake_is_stale_floating_ip_address(context, floating_ip):
- return str(floating_ip.address) == '172.24.4.23'
-
- fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
- instance_uuid='fake_uuid',
- address='10.0.0.2',
- network=test_network.fake_network)
- floating_update.return_value = fake_floating_ip_get_by_address(
- None, '1.2.3.4')
-
- def fake_add_floating_ip(floating_addr, fixed_addr, interface,
- network):
- called['count'] += 1
-
- self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
- fake_floating_ip_get_by_address)
- self.stubs.Set(self.network, '_is_stale_floating_ip_address',
- fake_is_stale_floating_ip_address)
- self.stubs.Set(self.network.l3driver, 'add_floating_ip',
- fake_add_floating_ip)
- self.mox.ReplayAll()
- addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
- self.network.migrate_instance_finish(self.context,
- instance_uuid=FAKEUUID,
- floating_addresses=addresses,
- host='fake_dest',
- rxtx_factor=3,
- project_id=self.project_id,
- source='fake_source')
-
- self.assertEqual(called['count'], 2)
-
- def test_floating_dns_create_conflict(self):
- zone = "example.org"
- address1 = "10.10.10.11"
- name1 = "foo"
-
- self.network.add_dns_entry(self.context, address1, name1, "A", zone)
-
- self.assertRaises(exception.FloatingIpDNSExists,
- self.network.add_dns_entry, self.context,
- address1, name1, "A", zone)
-
- def test_floating_create_and_get(self):
- zone = "example.org"
- address1 = "10.10.10.11"
- name1 = "foo"
- name2 = "bar"
- entries = self.network.get_dns_entries_by_address(self.context,
- address1, zone)
- self.assertFalse(entries)
-
- self.network.add_dns_entry(self.context, address1, name1, "A", zone)
- self.network.add_dns_entry(self.context, address1, name2, "A", zone)
- entries = self.network.get_dns_entries_by_address(self.context,
- address1, zone)
- self.assertEqual(len(entries), 2)
- self.assertEqual(entries[0], name1)
- self.assertEqual(entries[1], name2)
-
- entries = self.network.get_dns_entries_by_name(self.context,
- name1, zone)
- self.assertEqual(len(entries), 1)
- self.assertEqual(entries[0], address1)
-
- def test_floating_dns_delete(self):
- zone = "example.org"
- address1 = "10.10.10.11"
- name1 = "foo"
- name2 = "bar"
-
- self.network.add_dns_entry(self.context, address1, name1, "A", zone)
- self.network.add_dns_entry(self.context, address1, name2, "A", zone)
- self.network.delete_dns_entry(self.context, name1, zone)
-
- entries = self.network.get_dns_entries_by_address(self.context,
- address1, zone)
- self.assertEqual(len(entries), 1)
- self.assertEqual(entries[0], name2)
-
- self.assertRaises(exception.NotFound,
- self.network.delete_dns_entry, self.context,
- name1, zone)
-
- def test_floating_dns_domains_public(self):
- zone1 = "testzone"
- domain1 = "example.org"
- domain2 = "example.com"
- address1 = '10.10.10.10'
- entryname = 'testentry'
-
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
-
- self.assertRaises(exception.AdminRequired,
- self.network.create_public_dns_domain, self.context,
- domain1, zone1)
- self.network.create_public_dns_domain(context_admin, domain1,
- 'testproject')
- self.network.create_public_dns_domain(context_admin, domain2,
- 'fakeproject')
-
- domains = self.network.get_dns_domains(self.context)
- self.assertEqual(len(domains), 2)
- self.assertEqual(domains[0]['domain'], domain1)
- self.assertEqual(domains[1]['domain'], domain2)
- self.assertEqual(domains[0]['project'], 'testproject')
- self.assertEqual(domains[1]['project'], 'fakeproject')
-
- self.network.add_dns_entry(self.context, address1, entryname,
- 'A', domain1)
- entries = self.network.get_dns_entries_by_name(self.context,
- entryname, domain1)
- self.assertEqual(len(entries), 1)
- self.assertEqual(entries[0], address1)
-
- self.assertRaises(exception.AdminRequired,
- self.network.delete_dns_domain, self.context,
- domain1)
- self.network.delete_dns_domain(context_admin, domain1)
- self.network.delete_dns_domain(context_admin, domain2)
-
- # Verify that deleting the domain deleted the associated entry
- entries = self.network.get_dns_entries_by_name(self.context,
- entryname, domain1)
- self.assertFalse(entries)
-
- def test_delete_all_by_ip(self):
- domain1 = "example.org"
- domain2 = "example.com"
- address = "10.10.10.10"
- name1 = "foo"
- name2 = "bar"
-
- def fake_domains(context):
- return [{'domain': 'example.org', 'scope': 'public'},
- {'domain': 'example.com', 'scope': 'public'},
- {'domain': 'test.example.org', 'scope': 'public'}]
-
- self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
-
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
-
- self.network.create_public_dns_domain(context_admin, domain1,
- 'testproject')
- self.network.create_public_dns_domain(context_admin, domain2,
- 'fakeproject')
-
- domains = self.network.get_dns_domains(self.context)
- for domain in domains:
- self.network.add_dns_entry(self.context, address,
- name1, "A", domain['domain'])
- self.network.add_dns_entry(self.context, address,
- name2, "A", domain['domain'])
- entries = self.network.get_dns_entries_by_address(self.context,
- address,
- domain['domain'])
- self.assertEqual(len(entries), 2)
-
- self.network._delete_all_entries_for_ip(self.context, address)
-
- for domain in domains:
- entries = self.network.get_dns_entries_by_address(self.context,
- address,
- domain['domain'])
- self.assertFalse(entries)
-
- self.network.delete_dns_domain(context_admin, domain1)
- self.network.delete_dns_domain(context_admin, domain2)
-
- def test_mac_conflicts(self):
- # Make sure MAC collisions are retried.
- self.flags(create_unique_mac_address_attempts=3)
- ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
- macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
-
- # Create a VIF with aa:aa:aa:aa:aa:aa
- crash_test_dummy_vif = {
- 'address': macs[1],
- 'instance_uuid': 'fake_uuid',
- 'network_id': 123,
- 'uuid': 'fake_uuid',
- }
- self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
-
- # Hand out a collision first, then a legit MAC
- def fake_gen_mac():
- return macs.pop()
- self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
-
- # SQLite doesn't seem to honor the uniqueness constraint on the
- # address column, so fake the collision-avoidance here
- def fake_vif_save(vif):
- if vif.address == crash_test_dummy_vif['address']:
- raise db_exc.DBError("If you're smart, you'll retry!")
- # NOTE(russellb) The VirtualInterface object requires an ID to be
- # set, and we expect it to get set automatically when we do the
- # save.
- vif.id = 1
- self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
-
- # Attempt to add another and make sure that both MACs are consumed
- # by the retry loop
- self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
- self.assertEqual(macs, [])
-
- def test_deallocate_client_exceptions(self):
- # Ensure that FloatingIpNotFoundForAddress is wrapped.
- self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
- self.network.db.floating_ip_get_by_address(
- self.context, '1.2.3.4').AndRaise(
- exception.FloatingIpNotFoundForAddress(address='fake'))
- self.mox.ReplayAll()
- self.assertRaises(messaging.ExpectedException,
- self.network.deallocate_floating_ip,
- self.context, '1.2.3.4')
-
- def test_associate_client_exceptions(self):
- # Ensure that FloatingIpNotFoundForAddress is wrapped.
- self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
- self.network.db.floating_ip_get_by_address(
- self.context, '1.2.3.4').AndRaise(
- exception.FloatingIpNotFoundForAddress(address='fake'))
- self.mox.ReplayAll()
- self.assertRaises(messaging.ExpectedException,
- self.network.associate_floating_ip,
- self.context, '1.2.3.4', '10.0.0.1')
-
- def test_disassociate_client_exceptions(self):
- # Ensure that FloatingIpNotFoundForAddress is wrapped.
- self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
- self.network.db.floating_ip_get_by_address(
- self.context, '1.2.3.4').AndRaise(
- exception.FloatingIpNotFoundForAddress(address='fake'))
- self.mox.ReplayAll()
- self.assertRaises(messaging.ExpectedException,
- self.network.disassociate_floating_ip,
- self.context, '1.2.3.4')
-
- def test_get_floating_ip_client_exceptions(self):
- # Ensure that FloatingIpNotFoundForAddress is wrapped.
- self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
- self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
- exception.FloatingIpNotFound(id='fake'))
- self.mox.ReplayAll()
- self.assertRaises(messaging.ExpectedException,
- self.network.get_floating_ip,
- self.context, 'fake-id')
-
- def _test_associate_floating_ip_failure(self, stdout, expected_exception):
- def _fake_catchall(*args, **kwargs):
- return dict(test_fixed_ip.fake_fixed_ip,
- network=test_network.fake_network)
-
- def _fake_add_floating_ip(*args, **kwargs):
- raise processutils.ProcessExecutionError(stdout)
-
- self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
- _fake_catchall)
- self.stubs.Set(self.network.db, 'floating_ip_disassociate',
- _fake_catchall)
- self.stubs.Set(self.network.l3driver, 'add_floating_ip',
- _fake_add_floating_ip)
-
- self.assertRaises(expected_exception,
- self.network._associate_floating_ip, self.context,
- '1.2.3.4', '1.2.3.5', '', '')
-
- def test_associate_floating_ip_failure(self):
- self._test_associate_floating_ip_failure(None,
- processutils.ProcessExecutionError)
-
- def test_associate_floating_ip_failure_interface_not_found(self):
- self._test_associate_floating_ip_failure('Cannot find device',
- exception.NoFloatingIpInterface)
-
- @mock.patch('nova.objects.FloatingIP.get_by_address')
- def test_get_floating_ip_by_address(self, mock_get):
- mock_get.return_value = mock.sentinel.floating
- self.assertEqual(mock.sentinel.floating,
- self.network.get_floating_ip_by_address(
- self.context,
- mock.sentinel.address))
- mock_get.assert_called_once_with(self.context, mock.sentinel.address)
-
- @mock.patch('nova.objects.FloatingIPList.get_by_project')
- def test_get_floating_ips_by_project(self, mock_get):
- mock_get.return_value = mock.sentinel.floatings
- self.assertEqual(mock.sentinel.floatings,
- self.network.get_floating_ips_by_project(
- self.context))
- mock_get.assert_called_once_with(self.context, self.context.project_id)
-
- @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
- def test_get_floating_ips_by_fixed_address(self, mock_get):
- mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
- objects.FloatingIP(address='5.6.7.8')]
- self.assertEqual(['1.2.3.4', '5.6.7.8'],
- self.network.get_floating_ips_by_fixed_address(
- self.context, mock.sentinel.address))
- mock_get.assert_called_once_with(self.context, mock.sentinel.address)
-
-
-class InstanceDNSTestCase(test.TestCase):
- """Tests nova.network.manager instance DNS."""
- def setUp(self):
- super(InstanceDNSTestCase, self).setUp()
- self.tempdir = self.useFixture(fixtures.TempDir()).path
- self.flags(log_dir=self.tempdir)
- self.flags(use_local=True, group='conductor')
- self.network = TestFloatingIPManager()
- self.network.db = db
- self.project_id = 'testproject'
- self.context = context.RequestContext('testuser', self.project_id,
- is_admin=False)
-
- def test_dns_domains_private(self):
- zone1 = 'testzone'
- domain1 = 'example.org'
-
- context_admin = context.RequestContext('testuser', 'testproject',
- is_admin=True)
-
- self.assertRaises(exception.AdminRequired,
- self.network.create_private_dns_domain, self.context,
- domain1, zone1)
-
- self.network.create_private_dns_domain(context_admin, domain1, zone1)
- domains = self.network.get_dns_domains(self.context)
- self.assertEqual(len(domains), 1)
- self.assertEqual(domains[0]['domain'], domain1)
- self.assertEqual(domains[0]['availability_zone'], zone1)
-
- self.assertRaises(exception.AdminRequired,
- self.network.delete_dns_domain, self.context,
- domain1)
- self.network.delete_dns_domain(context_admin, domain1)
-
-
-domain1 = "example.org"
-domain2 = "example.com"
-
-
-class LdapDNSTestCase(test.TestCase):
- """Tests nova.network.ldapdns.LdapDNS."""
- def setUp(self):
- super(LdapDNSTestCase, self).setUp()
-
- self.useFixture(test.ReplaceModule('ldap', fake_ldap))
- dns_class = 'nova.network.ldapdns.LdapDNS'
- self.driver = importutils.import_object(dns_class)
-
- attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
- 'domain', 'dcobject', 'top'],
- 'associateddomain': ['root'],
- 'dc': ['root']}
- self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
- self.driver.create_domain(domain1)
- self.driver.create_domain(domain2)
-
- def tearDown(self):
- self.driver.delete_domain(domain1)
- self.driver.delete_domain(domain2)
- super(LdapDNSTestCase, self).tearDown()
-
- def test_ldap_dns_domains(self):
- domains = self.driver.get_domains()
- self.assertEqual(len(domains), 2)
- self.assertIn(domain1, domains)
- self.assertIn(domain2, domains)
-
- def test_ldap_dns_create_conflict(self):
- address1 = "10.10.10.11"
- name1 = "foo"
-
- self.driver.create_entry(name1, address1, "A", domain1)
-
- self.assertRaises(exception.FloatingIpDNSExists,
- self.driver.create_entry,
- name1, address1, "A", domain1)
-
- def test_ldap_dns_create_and_get(self):
- address1 = "10.10.10.11"
- name1 = "foo"
- name2 = "bar"
- entries = self.driver.get_entries_by_address(address1, domain1)
- self.assertFalse(entries)
-
- self.driver.create_entry(name1, address1, "A", domain1)
- self.driver.create_entry(name2, address1, "A", domain1)
- entries = self.driver.get_entries_by_address(address1, domain1)
- self.assertEqual(len(entries), 2)
- self.assertEqual(entries[0], name1)
- self.assertEqual(entries[1], name2)
-
- entries = self.driver.get_entries_by_name(name1, domain1)
- self.assertEqual(len(entries), 1)
- self.assertEqual(entries[0], address1)
-
- def test_ldap_dns_delete(self):
- address1 = "10.10.10.11"
- name1 = "foo"
- name2 = "bar"
-
- self.driver.create_entry(name1, address1, "A", domain1)
- self.driver.create_entry(name2, address1, "A", domain1)
- entries = self.driver.get_entries_by_address(address1, domain1)
- self.assertEqual(len(entries), 2)
-
- self.driver.delete_entry(name1, domain1)
- entries = self.driver.get_entries_by_address(address1, domain1)
- LOG.debug("entries: %s" % entries)
- self.assertEqual(len(entries), 1)
- self.assertEqual(entries[0], name2)
-
- self.assertRaises(exception.NotFound,
- self.driver.delete_entry,
- name1, domain1)
diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py
deleted file mode 100644
index aa5413efcb..0000000000
--- a/nova/tests/network/test_network_info.py
+++ /dev/null
@@ -1,800 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import exception
-from nova.network import model
-from nova import test
-from nova.tests import fake_network_cache_model
-from nova.virt import netutils
-
-
-class RouteTests(test.NoDBTestCase):
- def test_create_route_with_attrs(self):
- route = fake_network_cache_model.new_route()
- fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
- self.assertEqual(route['cidr'], '0.0.0.0/24')
- self.assertEqual(route['gateway']['address'], '192.168.1.1')
- self.assertEqual(route['interface'], 'eth0')
-
- def test_routes_equal(self):
- route1 = model.Route()
- route2 = model.Route()
- self.assertEqual(route1, route2)
-
- def test_routes_not_equal(self):
- route1 = model.Route(cidr='1.1.1.0/24')
- route2 = model.Route(cidr='2.2.2.0/24')
- self.assertNotEqual(route1, route2)
-
- route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
- route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
- self.assertNotEqual(route1, route2)
-
- route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
- route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
- self.assertNotEqual(route1, route2)
-
- def test_hydrate(self):
- route = model.Route.hydrate(
- {'gateway': fake_network_cache_model.new_ip(
- dict(address='192.168.1.1'))})
- self.assertIsNone(route['cidr'])
- self.assertEqual(route['gateway']['address'], '192.168.1.1')
- self.assertIsNone(route['interface'])
-
-
-class IPTests(test.NoDBTestCase):
- def test_ip_equal(self):
- ip1 = model.IP(address='127.0.0.1')
- ip2 = model.IP(address='127.0.0.1')
- self.assertEqual(ip1, ip2)
-
- def test_ip_not_equal(self):
- ip1 = model.IP(address='127.0.0.1')
- ip2 = model.IP(address='172.0.0.3')
- self.assertNotEqual(ip1, ip2)
-
- ip1 = model.IP(address='127.0.0.1', type=1)
- ip2 = model.IP(address='172.0.0.1', type=2)
- self.assertNotEqual(ip1, ip2)
-
- ip1 = model.IP(address='127.0.0.1', version=4)
- ip2 = model.IP(address='172.0.0.1', version=6)
- self.assertNotEqual(ip1, ip2)
-
-
-class FixedIPTests(test.NoDBTestCase):
- def test_createnew_fixed_ip_with_attrs(self):
- fixed_ip = model.FixedIP(address='192.168.1.100')
- self.assertEqual(fixed_ip['address'], '192.168.1.100')
- self.assertEqual(fixed_ip['floating_ips'], [])
- self.assertEqual(fixed_ip['type'], 'fixed')
- self.assertEqual(fixed_ip['version'], 4)
-
- def test_create_fixed_ipv6(self):
- fixed_ip = model.FixedIP(address='::1')
- self.assertEqual(fixed_ip['address'], '::1')
- self.assertEqual(fixed_ip['floating_ips'], [])
- self.assertEqual(fixed_ip['type'], 'fixed')
- self.assertEqual(fixed_ip['version'], 6)
-
- def test_create_fixed_bad_ip_fails(self):
- self.assertRaises(exception.InvalidIpAddressError,
- model.FixedIP,
- address='picklespicklespickles')
-
- def test_equate_two_fixed_ips(self):
- fixed_ip = model.FixedIP(address='::1')
- fixed_ip2 = model.FixedIP(address='::1')
- self.assertEqual(fixed_ip, fixed_ip2)
-
- def test_equate_two_dissimilar_fixed_ips_fails(self):
- fixed_ip = model.FixedIP(address='::1')
- fixed_ip2 = model.FixedIP(address='::2')
- self.assertNotEqual(fixed_ip, fixed_ip2)
-
- fixed_ip = model.FixedIP(address='::1', type='1')
- fixed_ip2 = model.FixedIP(address='::1', type='2')
- self.assertNotEqual(fixed_ip, fixed_ip2)
-
- fixed_ip = model.FixedIP(address='::1', version='6')
- fixed_ip2 = model.FixedIP(address='::1', version='4')
- self.assertNotEqual(fixed_ip, fixed_ip2)
-
- fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
- fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
- self.assertNotEqual(fixed_ip, fixed_ip2)
-
- def test_hydrate(self):
- fixed_ip = model.FixedIP.hydrate({})
- self.assertEqual(fixed_ip['floating_ips'], [])
- self.assertIsNone(fixed_ip['address'])
- self.assertEqual(fixed_ip['type'], 'fixed')
- self.assertIsNone(fixed_ip['version'])
-
- def test_add_floating_ip(self):
- fixed_ip = model.FixedIP(address='192.168.1.100')
- fixed_ip.add_floating_ip('192.168.1.101')
- self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
-
- def test_add_floating_ip_repeatedly_only_one_instance(self):
- fixed_ip = model.FixedIP(address='192.168.1.100')
- for i in xrange(10):
- fixed_ip.add_floating_ip('192.168.1.101')
- self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
-
-
-class SubnetTests(test.NoDBTestCase):
- def test_create_subnet_with_attrs(self):
- subnet = fake_network_cache_model.new_subnet()
-
- route1 = fake_network_cache_model.new_route()
-
- self.assertEqual(subnet['cidr'], '10.10.0.0/24')
- self.assertEqual(subnet['dns'],
- [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
- fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
- self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
- self.assertEqual(subnet['ips'],
- [fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.2')),
- fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.3'))])
- self.assertEqual(subnet['routes'], [route1])
- self.assertEqual(subnet['version'], 4)
-
- def test_subnet_equal(self):
- subnet1 = fake_network_cache_model.new_subnet()
- subnet2 = fake_network_cache_model.new_subnet()
- self.assertEqual(subnet1, subnet2)
-
- def test_subnet_not_equal(self):
- subnet1 = model.Subnet(cidr='1.1.1.0/24')
- subnet2 = model.Subnet(cidr='2.2.2.0/24')
- self.assertNotEqual(subnet1, subnet2)
-
- subnet1 = model.Subnet(dns='1.1.1.0/24')
- subnet2 = model.Subnet(dns='2.2.2.0/24')
- self.assertNotEqual(subnet1, subnet2)
-
- subnet1 = model.Subnet(gateway='1.1.1.1/24')
- subnet2 = model.Subnet(gateway='2.2.2.1/24')
- self.assertNotEqual(subnet1, subnet2)
-
- subnet1 = model.Subnet(ips='1.1.1.0/24')
- subnet2 = model.Subnet(ips='2.2.2.0/24')
- self.assertNotEqual(subnet1, subnet2)
-
- subnet1 = model.Subnet(routes='1.1.1.0/24')
- subnet2 = model.Subnet(routes='2.2.2.0/24')
- self.assertNotEqual(subnet1, subnet2)
-
- subnet1 = model.Subnet(version='4')
- subnet2 = model.Subnet(version='6')
- self.assertNotEqual(subnet1, subnet2)
-
- def test_add_route(self):
- subnet = fake_network_cache_model.new_subnet()
- route1 = fake_network_cache_model.new_route()
- route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
- subnet.add_route(route2)
- self.assertEqual(subnet['routes'], [route1, route2])
-
- def test_add_route_a_lot(self):
- subnet = fake_network_cache_model.new_subnet()
- route1 = fake_network_cache_model.new_route()
- route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
- for i in xrange(10):
- subnet.add_route(route2)
- self.assertEqual(subnet['routes'], [route1, route2])
-
- def test_add_dns(self):
- subnet = fake_network_cache_model.new_subnet()
- dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
- subnet.add_dns(dns)
- self.assertEqual(subnet['dns'],
- [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
- fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
- fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
-
- def test_add_dns_a_lot(self):
- subnet = fake_network_cache_model.new_subnet()
- for i in xrange(10):
- subnet.add_dns(fake_network_cache_model.new_ip(
- dict(address='9.9.9.9')))
- self.assertEqual(subnet['dns'],
- [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
- fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
- fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
-
- def test_add_ip(self):
- subnet = fake_network_cache_model.new_subnet()
- subnet.add_ip(fake_network_cache_model.new_ip(
- dict(address='192.168.1.102')))
- self.assertEqual(subnet['ips'],
- [fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.2')),
- fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.3')),
- fake_network_cache_model.new_ip(
- dict(address='192.168.1.102'))])
-
- def test_add_ip_a_lot(self):
- subnet = fake_network_cache_model.new_subnet()
- for i in xrange(10):
- subnet.add_ip(fake_network_cache_model.new_fixed_ip(
- dict(address='192.168.1.102')))
- self.assertEqual(subnet['ips'],
- [fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.2')),
- fake_network_cache_model.new_fixed_ip(
- dict(address='10.10.0.3')),
- fake_network_cache_model.new_fixed_ip(
- dict(address='192.168.1.102'))])
-
- def test_hydrate(self):
- subnet_dict = {
- 'cidr': '255.255.255.0',
- 'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
- 'ips': [fake_network_cache_model.new_fixed_ip(
- dict(address='2.2.2.2'))],
- 'routes': [fake_network_cache_model.new_route()],
- 'version': 4,
- 'gateway': fake_network_cache_model.new_ip(
- dict(address='3.3.3.3'))}
- subnet = model.Subnet.hydrate(subnet_dict)
-
- self.assertEqual(subnet['cidr'], '255.255.255.0')
- self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
- dict(address='1.1.1.1'))])
- self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
- self.assertEqual(subnet['ips'], [fake_network_cache_model.new_fixed_ip(
- dict(address='2.2.2.2'))])
- self.assertEqual(subnet['routes'], [
- fake_network_cache_model.new_route()])
- self.assertEqual(subnet['version'], 4)
-
-
-class NetworkTests(test.NoDBTestCase):
- def test_create_network(self):
- network = fake_network_cache_model.new_network()
- self.assertEqual(network['id'], 1)
- self.assertEqual(network['bridge'], 'br0')
- self.assertEqual(network['label'], 'public')
- self.assertEqual(network['subnets'],
- [fake_network_cache_model.new_subnet(),
- fake_network_cache_model.new_subnet(
- dict(cidr='255.255.255.255'))])
-
- def test_add_subnet(self):
- network = fake_network_cache_model.new_network()
- network.add_subnet(fake_network_cache_model.new_subnet(
- dict(cidr='0.0.0.0')))
- self.assertEqual(network['subnets'],
- [fake_network_cache_model.new_subnet(),
- fake_network_cache_model.new_subnet(
- dict(cidr='255.255.255.255')),
- fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
-
- def test_add_subnet_a_lot(self):
- network = fake_network_cache_model.new_network()
- for i in xrange(10):
- network.add_subnet(fake_network_cache_model.new_subnet(
- dict(cidr='0.0.0.0')))
- self.assertEqual(network['subnets'],
- [fake_network_cache_model.new_subnet(),
- fake_network_cache_model.new_subnet(
- dict(cidr='255.255.255.255')),
- fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
-
- def test_network_equal(self):
- network1 = model.Network()
- network2 = model.Network()
- self.assertEqual(network1, network2)
-
- def test_network_not_equal(self):
- network1 = model.Network(id='1')
- network2 = model.Network(id='2')
- self.assertNotEqual(network1, network2)
-
- network1 = model.Network(bridge='br-int')
- network2 = model.Network(bridge='br0')
- self.assertNotEqual(network1, network2)
-
- network1 = model.Network(label='net1')
- network2 = model.Network(label='net2')
- self.assertNotEqual(network1, network2)
-
- network1 = model.Network(subnets='1.1.1.0/24')
- network2 = model.Network(subnets='2.2.2.0/24')
- self.assertNotEqual(network1, network2)
-
- def test_hydrate(self):
- fake_network_cache_model.new_subnet()
- fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
- network = model.Network.hydrate(fake_network_cache_model.new_network())
-
- self.assertEqual(network['id'], 1)
- self.assertEqual(network['bridge'], 'br0')
- self.assertEqual(network['label'], 'public')
- self.assertEqual(network['subnets'],
- [fake_network_cache_model.new_subnet(),
- fake_network_cache_model.new_subnet(
- dict(cidr='255.255.255.255'))])
-
-
-class VIFTests(test.NoDBTestCase):
- def test_create_vif(self):
- vif = fake_network_cache_model.new_vif()
- self.assertEqual(vif['id'], 1)
- self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
- self.assertEqual(vif['network'],
- fake_network_cache_model.new_network())
-
- def test_vif_equal(self):
- vif1 = model.VIF()
- vif2 = model.VIF()
- self.assertEqual(vif1, vif2)
-
- def test_vif_not_equal(self):
- vif1 = model.VIF(id=1)
- vif2 = model.VIF(id=2)
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(address='00:00:00:00:00:11')
- vif2 = model.VIF(address='00:00:00:00:00:22')
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(network='net1')
- vif2 = model.VIF(network='net2')
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(type='ovs')
- vif2 = model.VIF(type='linuxbridge')
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(devname='ovs1234')
- vif2 = model.VIF(devname='linuxbridge1234')
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(qbh_params=1)
- vif2 = model.VIF(qbh_params=None)
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(qbg_params=1)
- vif2 = model.VIF(qbg_params=None)
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(active=True)
- vif2 = model.VIF(active=False)
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
- vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
- self.assertNotEqual(vif1, vif2)
-
- vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
- vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
- self.assertNotEqual(vif1, vif2)
-
- def test_create_vif_with_type(self):
- vif_dict = dict(
- id=1,
- address='aa:aa:aa:aa:aa:aa',
- network=fake_network_cache_model.new_network(),
- type='bridge')
- vif = fake_network_cache_model.new_vif(vif_dict)
- self.assertEqual(vif['id'], 1)
- self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
- self.assertEqual(vif['type'], 'bridge')
- self.assertEqual(vif['network'],
- fake_network_cache_model.new_network())
-
- def test_vif_get_fixed_ips(self):
- vif = fake_network_cache_model.new_vif()
- fixed_ips = vif.fixed_ips()
- ips = [
- fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
- fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
- ] * 2
- self.assertEqual(fixed_ips, ips)
-
- def test_vif_get_floating_ips(self):
- vif = fake_network_cache_model.new_vif()
- vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
- floating_ips = vif.floating_ips()
- self.assertEqual(floating_ips, ['192.168.1.1'])
-
- def test_vif_get_labeled_ips(self):
- vif = fake_network_cache_model.new_vif()
- labeled_ips = vif.labeled_ips()
- ip_dict = {
- 'network_id': 1,
- 'ips': [fake_network_cache_model.new_ip(
- {'address': '10.10.0.2', 'type': 'fixed'}),
- fake_network_cache_model.new_ip(
- {'address': '10.10.0.3', 'type': 'fixed'})] * 2,
- 'network_label': 'public'}
- self.assertEqual(labeled_ips, ip_dict)
-
- def test_hydrate(self):
- fake_network_cache_model.new_network()
- vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
- self.assertEqual(vif['id'], 1)
- self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
- self.assertEqual(vif['network'],
- fake_network_cache_model.new_network())
-
- def test_hydrate_vif_with_type(self):
- vif_dict = dict(
- id=1,
- address='aa:aa:aa:aa:aa:aa',
- network=fake_network_cache_model.new_network(),
- type='bridge')
- vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
- self.assertEqual(vif['id'], 1)
- self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
- self.assertEqual(vif['type'], 'bridge')
- self.assertEqual(vif['network'],
- fake_network_cache_model.new_network())
-
-
-class NetworkInfoTests(test.NoDBTestCase):
- def test_create_model(self):
- ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- self.assertEqual(ninfo.fixed_ips(),
- [fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.2'}),
- fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.3'})] * 4)
-
- def test_create_async_model(self):
- def async_wrapper():
- return model.NetworkInfo(
- [fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
-
- ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
- self.assertEqual(ninfo.fixed_ips(),
- [fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.2'}),
- fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.3'})] * 4)
-
- def test_create_async_model_exceptions(self):
- def async_wrapper():
- raise test.TestingException()
-
- ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
- self.assertRaises(test.TestingException, ninfo.wait)
- # 2nd one doesn't raise
- self.assertIsNone(ninfo.wait())
- # Test that do_raise=False works on .wait()
- ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
- self.assertIsNone(ninfo.wait(do_raise=False))
- # Test we also raise calling a method
- ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
- self.assertRaises(test.TestingException, ninfo.fixed_ips)
-
- def test_get_floating_ips(self):
- vif = fake_network_cache_model.new_vif()
- vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
- ninfo = model.NetworkInfo([vif,
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
-
- def test_hydrate(self):
- ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- model.NetworkInfo.hydrate(ninfo)
- self.assertEqual(ninfo.fixed_ips(),
- [fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.2'}),
- fake_network_cache_model.new_fixed_ip(
- {'address': '10.10.0.3'})] * 4)
-
- def _setup_injected_network_scenario(self, should_inject=True,
- use_ipv4=True, use_ipv6=False,
- gateway=True, dns=True,
- two_interfaces=False,
- libvirt_virt_type=None):
- """Check that netutils properly decides whether to inject based on
- whether the supplied subnet is static or dynamic.
- """
- network = fake_network_cache_model.new_network({'subnets': []})
-
- subnet_dict = {}
- if not gateway:
- subnet_dict['gateway'] = None
-
- if not dns:
- subnet_dict['dns'] = None
-
- if not should_inject:
- subnet_dict['dhcp_server'] = '10.10.0.1'
-
- if use_ipv4:
- network.add_subnet(
- fake_network_cache_model.new_subnet(subnet_dict))
-
- if should_inject and use_ipv6:
- gateway_ip = fake_network_cache_model.new_ip(dict(
- address='1234:567::1'))
- ip = fake_network_cache_model.new_ip(dict(
- address='1234:567::2'))
- ipv6_subnet_dict = dict(
- cidr='1234:567::/48',
- gateway=gateway_ip,
- dns=[fake_network_cache_model.new_ip(
- dict(address='2001:4860:4860::8888')),
- fake_network_cache_model.new_ip(
- dict(address='2001:4860:4860::8844'))],
- ips=[ip])
- if not gateway:
- ipv6_subnet_dict['gateway'] = None
- network.add_subnet(fake_network_cache_model.new_subnet(
- ipv6_subnet_dict))
-
- # Behave as though CONF.flat_injected is True
- network['meta']['injected'] = True
- vif = fake_network_cache_model.new_vif({'network': network})
- vifs = [vif]
- if two_interfaces:
- vifs.append(vif)
-
- nwinfo = model.NetworkInfo(vifs)
- return netutils.get_injected_network_template(
- nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
-
- def test_injection_dynamic(self):
- expected = None
- template = self._setup_injected_network_scenario(should_inject=False)
- self.assertEqual(expected, template)
-
- def test_injection_static(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
-"""
- template = self._setup_injected_network_scenario()
- self.assertEqual(expected, template)
-
- def test_injection_static_no_gateway(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- dns-nameservers 1.2.3.4 2.3.4.5
-"""
- template = self._setup_injected_network_scenario(gateway=False)
- self.assertEqual(expected, template)
-
- def test_injection_static_no_dns(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
-"""
- template = self._setup_injected_network_scenario(dns=False)
- self.assertEqual(expected, template)
-
- def test_injection_static_ipv6(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
-iface eth0 inet6 static
- address 1234:567::2
- netmask 48
- gateway 1234:567::1
- dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
-"""
- template = self._setup_injected_network_scenario(use_ipv6=True)
- self.assertEqual(expected, template)
-
- def test_injection_static_ipv6_no_gateway(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- dns-nameservers 1.2.3.4 2.3.4.5
-iface eth0 inet6 static
- address 1234:567::2
- netmask 48
- dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
-"""
- template = self._setup_injected_network_scenario(use_ipv6=True,
- gateway=False)
- self.assertEqual(expected, template)
-
- def test_injection_static_with_ipv4_off(self):
- expected = None
- template = self._setup_injected_network_scenario(use_ipv4=False)
- self.assertEqual(expected, template)
-
- def test_injection_ipv6_two_interfaces(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
-iface eth0 inet6 static
- address 1234:567::2
- netmask 48
- gateway 1234:567::1
- dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
-
-auto eth1
-iface eth1 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
-iface eth1 inet6 static
- address 1234:567::2
- netmask 48
- gateway 1234:567::1
- dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
-"""
- template = self._setup_injected_network_scenario(use_ipv6=True,
- two_interfaces=True)
- self.assertEqual(expected, template)
-
- def test_injection_ipv6_with_lxc(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
- post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
- post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
-
-auto eth1
-iface eth1 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- gateway 10.10.0.1
- dns-nameservers 1.2.3.4 2.3.4.5
- post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
- post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
-"""
- template = self._setup_injected_network_scenario(
- use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
- self.assertEqual(expected, template)
-
- def test_injection_ipv6_with_lxc_no_gateway(self):
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- dns-nameservers 1.2.3.4 2.3.4.5
- post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
-
-auto eth1
-iface eth1 inet static
- address 10.10.0.2
- netmask 255.255.255.0
- broadcast 10.10.0.255
- dns-nameservers 1.2.3.4 2.3.4.5
- post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
-"""
- template = self._setup_injected_network_scenario(
- use_ipv6=True, gateway=False, two_interfaces=True,
- libvirt_virt_type='lxc')
- self.assertEqual(expected, template)
diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py
deleted file mode 100644
index f462fa8fa8..0000000000
--- a/nova/tests/network/test_neutronv2.py
+++ /dev/null
@@ -1,3194 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import collections
-import contextlib
-import copy
-import uuid
-
-import mock
-import mox
-from neutronclient.common import exceptions
-from neutronclient.v2_0 import client
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import six
-
-from nova.compute import flavors
-from nova import context
-from nova import exception
-from nova.network import model
-from nova.network import neutronv2
-from nova.network.neutronv2 import api as neutronapi
-from nova.network.neutronv2 import constants
-from nova import objects
-from nova.openstack.common import policy as common_policy
-from nova.pci import manager as pci_manager
-from nova.pci import whitelist as pci_whitelist
-from nova import policy
-from nova import test
-from nova.tests import fake_instance
-from nova import utils
-
-CONF = cfg.CONF
-
-# NOTE: Neutron client raises Exception which is discouraged by HACKING.
-# We set this variable here and use it for assertions below to avoid
-# the hacking checks until we can make neutron client throw a custom
-# exception class instead.
-NEUTRON_CLIENT_EXCEPTION = Exception
-
-
-class MyComparator(mox.Comparator):
- def __init__(self, lhs):
- self.lhs = lhs
-
- def _com_dict(self, lhs, rhs):
- if len(lhs) != len(rhs):
- return False
- for key, value in lhs.iteritems():
- if key not in rhs:
- return False
- rhs_value = rhs[key]
- if not self._com(value, rhs_value):
- return False
- return True
-
- def _com_list(self, lhs, rhs):
- if len(lhs) != len(rhs):
- return False
- for lhs_value in lhs:
- if lhs_value not in rhs:
- return False
- return True
-
- def _com(self, lhs, rhs):
- if lhs is None:
- return rhs is None
- if isinstance(lhs, dict):
- if not isinstance(rhs, dict):
- return False
- return self._com_dict(lhs, rhs)
- if isinstance(lhs, list):
- if not isinstance(rhs, list):
- return False
- return self._com_list(lhs, rhs)
- if isinstance(lhs, tuple):
- if not isinstance(rhs, tuple):
- return False
- return self._com_list(lhs, rhs)
- return lhs == rhs
-
- def equals(self, rhs):
- return self._com(self.lhs, rhs)
-
- def __repr__(self):
- return str(self.lhs)
-
-
-class TestNeutronClient(test.TestCase):
- def test_withtoken(self):
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- my_context = context.RequestContext('userid',
- 'my_tenantid',
- auth_token='token')
- self.mox.StubOutWithMock(client.Client, "__init__")
- client.Client.__init__(
- auth_strategy=CONF.neutron.auth_strategy,
- endpoint_url=CONF.neutron.url,
- token=my_context.auth_token,
- timeout=CONF.neutron.url_timeout,
- insecure=False,
- ca_cert=None).AndReturn(None)
- self.mox.ReplayAll()
- neutronv2.get_client(my_context)
-
- def test_withouttoken(self):
- my_context = context.RequestContext('userid', 'my_tenantid')
- self.assertRaises(exceptions.Unauthorized,
- neutronv2.get_client,
- my_context)
-
- def test_withtoken_context_is_admin(self):
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- my_context = context.RequestContext('userid',
- 'my_tenantid',
- auth_token='token',
- is_admin=True)
- self.mox.StubOutWithMock(client.Client, "__init__")
- client.Client.__init__(
- auth_strategy=CONF.neutron.auth_strategy,
- endpoint_url=CONF.neutron.url,
- token=my_context.auth_token,
- timeout=CONF.neutron.url_timeout,
- insecure=False,
- ca_cert=None).AndReturn(None)
- self.mox.ReplayAll()
- # Note that although we have admin set in the context we
- # are not asking for an admin client, and so we auth with
- # our own token
- neutronv2.get_client(my_context)
-
- def test_withouttoken_keystone_connection_error(self):
- self.flags(auth_strategy='keystone', group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- my_context = context.RequestContext('userid', 'my_tenantid')
- self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
- neutronv2.get_client,
- my_context)
-
- def test_reuse_admin_token(self):
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- token_store = neutronv2.AdminTokenStore.get()
- token_store.admin_auth_token = 'new_token'
- my_context = context.RequestContext('userid', 'my_tenantid',
- auth_token='token')
- with contextlib.nested(
- mock.patch.object(client.Client, "list_networks",
- side_effect=mock.Mock),
- mock.patch.object(client.Client, 'get_auth_info',
- return_value={'auth_token': 'new_token1'}),
- ):
- client1 = neutronv2.get_client(my_context, True)
- client1.list_networks(retrieve_all=False)
- self.assertEqual('new_token1', token_store.admin_auth_token)
- client1 = neutronv2.get_client(my_context, True)
- client1.list_networks(retrieve_all=False)
- self.assertEqual('new_token1', token_store.admin_auth_token)
-
- def test_admin_token_updated(self):
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- token_store = neutronv2.AdminTokenStore.get()
- token_store.admin_auth_token = 'new_token'
- tokens = [{'auth_token': 'new_token1'}, {'auth_token': 'new_token'}]
- my_context = context.RequestContext('userid', 'my_tenantid',
- auth_token='token')
- with contextlib.nested(
- mock.patch.object(client.Client, "list_networks",
- side_effect=mock.Mock),
- mock.patch.object(client.Client, 'get_auth_info',
- side_effect=tokens.pop),
- ):
- client1 = neutronv2.get_client(my_context, True)
- client1.list_networks(retrieve_all=False)
- self.assertEqual('new_token', token_store.admin_auth_token)
- client1 = neutronv2.get_client(my_context, True)
- client1.list_networks(retrieve_all=False)
- self.assertEqual('new_token1', token_store.admin_auth_token)
-
-
-class TestNeutronv2Base(test.TestCase):
-
- def setUp(self):
- super(TestNeutronv2Base, self).setUp()
- self.context = context.RequestContext('userid', 'my_tenantid')
- setattr(self.context,
- 'auth_token',
- 'bff4a5a6b9eb4ea2a6efec6eefb77936')
- self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
- 'uuid': str(uuid.uuid4()),
- 'display_name': 'test_instance',
- 'availability_zone': 'nova',
- 'host': 'some_host',
- 'security_groups': []}
- self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
- 'uuid': str(uuid.uuid4()),
- 'display_name': 'test_instance2',
- 'availability_zone': 'nova',
- 'security_groups': []}
- self.nets1 = [{'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': 'my_tenantid'}]
- self.nets2 = []
- self.nets2.append(self.nets1[0])
- self.nets2.append({'id': 'my_netid2',
- 'name': 'my_netname2',
- 'subnets': ['mysubnid2'],
- 'tenant_id': 'my_tenantid'})
- self.nets3 = self.nets2 + [{'id': 'my_netid3',
- 'name': 'my_netname3',
- 'tenant_id': 'my_tenantid'}]
- self.nets4 = [{'id': 'his_netid4',
- 'name': 'his_netname4',
- 'tenant_id': 'his_tenantid'}]
- # A network request with external networks
- self.nets5 = self.nets1 + [{'id': 'the-external-one',
- 'name': 'out-of-this-world',
- 'router:external': True,
- 'tenant_id': 'should-be-an-admin'}]
- # A network request with a duplicate
- self.nets6 = []
- self.nets6.append(self.nets1[0])
- self.nets6.append(self.nets1[0])
- # A network request with a combo
- self.nets7 = []
- self.nets7.append(self.nets2[1])
- self.nets7.append(self.nets1[0])
- self.nets7.append(self.nets2[1])
- self.nets7.append(self.nets1[0])
- # A network request with only external network
- self.nets8 = [self.nets5[1]]
-
- self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
- self.nets5, self.nets6, self.nets7, self.nets8]
-
- self.port_address = '10.0.1.2'
- self.port_data1 = [{'network_id': 'my_netid1',
- 'device_id': self.instance2['uuid'],
- 'device_owner': 'compute:nova',
- 'id': 'my_portid1',
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'status': 'DOWN',
- 'admin_state_up': True,
- 'fixed_ips': [{'ip_address': self.port_address,
- 'subnet_id': 'my_subid1'}],
- 'mac_address': 'my_mac1', }]
- self.float_data1 = [{'port_id': 'my_portid1',
- 'fixed_ip_address': self.port_address,
- 'floating_ip_address': '172.0.1.2'}]
- self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
- 'subnet_id': 'my_subid1'}],
- 'status': 'ACTIVE',
- 'admin_state_up': True}]
- self.port_address2 = '10.0.2.2'
- self.port_data2 = []
- self.port_data2.append(self.port_data1[0])
- self.port_data2.append({'network_id': 'my_netid2',
- 'device_id': self.instance['uuid'],
- 'admin_state_up': True,
- 'status': 'ACTIVE',
- 'device_owner': 'compute:nova',
- 'id': 'my_portid2',
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'fixed_ips':
- [{'ip_address': self.port_address2,
- 'subnet_id': 'my_subid2'}],
- 'mac_address': 'my_mac2', })
- self.float_data2 = []
- self.float_data2.append(self.float_data1[0])
- self.float_data2.append({'port_id': 'my_portid2',
- 'fixed_ip_address': '10.0.2.2',
- 'floating_ip_address': '172.0.2.2'})
- self.port_data3 = [{'network_id': 'my_netid1',
- 'device_id': 'device_id3',
- 'status': 'DOWN',
- 'admin_state_up': True,
- 'device_owner': 'compute:nova',
- 'id': 'my_portid3',
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'fixed_ips': [], # no fixed ip
- 'mac_address': 'my_mac3', }]
- self.subnet_data1 = [{'id': 'my_subid1',
- 'cidr': '10.0.1.0/24',
- 'network_id': 'my_netid1',
- 'gateway_ip': '10.0.1.1',
- 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
- self.subnet_data2 = []
- self.subnet_data_n = [{'id': 'my_subid1',
- 'cidr': '10.0.1.0/24',
- 'network_id': 'my_netid1',
- 'gateway_ip': '10.0.1.1',
- 'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
- {'id': 'my_subid2',
- 'cidr': '20.0.1.0/24',
- 'network_id': 'my_netid2',
- 'gateway_ip': '20.0.1.1',
- 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
- self.subnet_data2.append({'id': 'my_subid2',
- 'cidr': '10.0.2.0/24',
- 'network_id': 'my_netid2',
- 'gateway_ip': '10.0.2.1',
- 'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
-
- self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
- 'name': 'ext_net',
- 'router:external': True,
- 'tenant_id': 'admin_tenantid'}
- self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
- 'name': 'nova',
- 'router:external': True,
- 'tenant_id': 'admin_tenantid'}
- self.fip_unassociated = {'tenant_id': 'my_tenantid',
- 'id': 'fip_id1',
- 'floating_ip_address': '172.24.4.227',
- 'floating_network_id': self.fip_pool['id'],
- 'port_id': None,
- 'fixed_ip_address': None,
- 'router_id': None}
- fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
- self.fip_associated = {'tenant_id': 'my_tenantid',
- 'id': 'fip_id2',
- 'floating_ip_address': '172.24.4.228',
- 'floating_network_id': self.fip_pool['id'],
- 'port_id': self.port_data2[1]['id'],
- 'fixed_ip_address': fixed_ip_address,
- 'router_id': 'router_id1'}
- self._returned_nw_info = []
- self.mox.StubOutWithMock(neutronv2, 'get_client')
- self.moxed_client = self.mox.CreateMock(client.Client)
- self.addCleanup(CONF.reset)
- self.addCleanup(self.mox.VerifyAll)
- self.addCleanup(self.mox.UnsetStubs)
- self.addCleanup(self.stubs.UnsetAll)
-
- def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
- # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
- # converted to handling instance objects.
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- self.instance2 = fake_instance.fake_instance_obj(self.context,
- **self.instance2)
-
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, 'get_instance_nw_info')
- has_portbinding = False
- has_extra_dhcp_opts = False
- dhcp_options = kwargs.get('dhcp_options')
- if dhcp_options is not None:
- has_extra_dhcp_opts = True
-
- if kwargs.get('portbinding'):
- has_portbinding = True
- api.extensions[constants.PORTBINDING_EXT] = 1
- self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
- neutronv2.get_client(mox.IgnoreArg()).AndReturn(
- self.moxed_client)
- neutronv2.get_client(
- mox.IgnoreArg(), admin=True).AndReturn(
- self.moxed_client)
- api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
- neutron=self.moxed_client)
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- neutron=self.moxed_client,
- refresh_cache=True).AndReturn(has_portbinding)
- else:
- self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
- api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
- neutron=self.moxed_client)
- self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
- # Net idx is 1-based for compatibility with existing unit tests
- nets = self.nets[net_idx - 1]
- ports = {}
- fixed_ips = {}
- macs = kwargs.get('macs')
- if macs:
- macs = set(macs)
- req_net_ids = []
- ordered_networks = []
- port = {}
- if 'requested_networks' in kwargs:
- for request in kwargs['requested_networks']:
- if request.port_id:
- if request.port_id == 'my_portid3':
- self.moxed_client.show_port(request.port_id
- ).AndReturn(
- {'port': {'id': 'my_portid3',
- 'network_id': 'my_netid1',
- 'mac_address': 'my_mac1',
- 'device_id': kwargs.get('_device') and
- self.instance2.uuid or
- ''}})
- ports['my_netid1'] = [self.port_data1[0],
- self.port_data3[0]]
- ports[request.port_id] = self.port_data3[0]
- request.network_id = 'my_netid1'
- if macs is not None:
- macs.discard('my_mac1')
- else:
- self.moxed_client.show_port(request.port_id).AndReturn(
- {'port': {'id': 'my_portid1',
- 'network_id': 'my_netid1',
- 'mac_address': 'my_mac1',
- 'device_id': kwargs.get('_device') and
- self.instance2.uuid or
- ''}})
- ports[request.port_id] = self.port_data1[0]
- request.network_id = 'my_netid1'
- if macs is not None:
- macs.discard('my_mac1')
- else:
- fixed_ips[request.network_id] = request.address
- req_net_ids.append(request.network_id)
- ordered_networks.append(request)
- else:
- for n in nets:
- ordered_networks.append(
- objects.NetworkRequest(network_id=n['id']))
- if kwargs.get('_break') == 'pre_list_networks':
- self.mox.ReplayAll()
- return api
- # search all req_net_ids as in api.py
- search_ids = req_net_ids
- if search_ids:
- mox_list_params = {'id': mox.SameElementsAs(search_ids)}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': nets})
- else:
- mox_list_params = {'tenant_id': self.instance.project_id,
- 'shared': False}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': nets})
- mox_list_params = {'shared': True}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': []})
-
- if (('requested_networks' not in kwargs or
- kwargs['requested_networks'].as_tuples() == [(None, None, None)])
- and len(nets) > 1):
- self.mox.ReplayAll()
- return api
-
- ports_in_requested_net_order = []
- nets_in_requested_net_order = []
- for request in ordered_networks:
- port_req_body = {
- 'port': {
- 'device_id': self.instance.uuid,
- 'device_owner': 'compute:nova',
- },
- }
- # Network lookup for available network_id
- network = None
- for net in nets:
- if net['id'] == request.network_id:
- network = net
- break
- # if net_id did not pass validate_networks() and not available
- # here then skip it safely not continuing with a None Network
- else:
- continue
- if has_portbinding:
- port_req_body['port']['binding:host_id'] = (
- self.instance.get('host'))
- if not has_portbinding:
- api._populate_neutron_extension_values(mox.IgnoreArg(),
- self.instance, mox.IgnoreArg(),
- mox.IgnoreArg(), neutron=self.moxed_client).AndReturn(None)
- else:
- # since _populate_neutron_extension_values() will call
- # _has_port_binding_extension()
- api._has_port_binding_extension(mox.IgnoreArg(),
- neutron=self.moxed_client).\
- AndReturn(has_portbinding)
- if request.port_id:
- port = ports[request.port_id]
- self.moxed_client.update_port(request.port_id,
- MyComparator(port_req_body)
- ).AndReturn(
- {'port': port})
- ports_in_requested_net_order.append(request.port_id)
- else:
- request.address = fixed_ips.get(request.network_id)
- if request.address:
- port_req_body['port']['fixed_ips'] = [{'ip_address':
- request.address}]
- port_req_body['port']['network_id'] = request.network_id
- port_req_body['port']['admin_state_up'] = True
- port_req_body['port']['tenant_id'] = \
- self.instance.project_id
- if macs:
- port_req_body['port']['mac_address'] = macs.pop()
- if has_portbinding:
- port_req_body['port']['binding:host_id'] = (
- self.instance.get('host'))
- res_port = {'port': {'id': 'fake'}}
- if has_extra_dhcp_opts:
- port_req_body['port']['extra_dhcp_opts'] = dhcp_options
- if kwargs.get('_break') == 'mac' + request.network_id:
- self.mox.ReplayAll()
- return api
- self.moxed_client.create_port(
- MyComparator(port_req_body)).AndReturn(res_port)
- ports_in_requested_net_order.append(res_port['port']['id'])
-
- nets_in_requested_net_order.append(network)
-
- api.get_instance_nw_info(mox.IgnoreArg(),
- self.instance,
- networks=nets_in_requested_net_order,
- port_ids=ports_in_requested_net_order,
- admin_client=None
- ).AndReturn(self._returned_nw_info)
- self.mox.ReplayAll()
- return api
-
- def _verify_nw_info(self, nw_inf, index=0):
- id_suffix = index + 1
- self.assertEqual('10.0.%s.2' % id_suffix,
- nw_inf.fixed_ips()[index]['address'])
- self.assertEqual('172.0.%s.2' % id_suffix,
- nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
- self.assertEqual('my_netname%s' % id_suffix,
- nw_inf[index]['network']['label'])
- self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
- self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
- self.assertEqual('10.0.%s.0/24' % id_suffix,
- nw_inf[index]['network']['subnets'][0]['cidr'])
-
- ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
- version=4, type='dns')
- self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
-
- def _get_instance_nw_info(self, number):
- api = neutronapi.API()
- self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
- api.db.instance_info_cache_update(mox.IgnoreArg(),
- self.instance['uuid'],
- mox.IgnoreArg())
- port_data = number == 1 and self.port_data1 or self.port_data2
- nets = number == 1 and self.nets1 or self.nets2
- net_info_cache = []
- for port in port_data:
- net_info_cache.append({"network": {"id": port['network_id']},
- "id": port['id']})
-
- instance = copy.copy(self.instance)
- # This line here does not wrap net_info_cache in jsonutils.dumps()
- # intentionally to test the other code path when it's not unicode.
- instance['info_cache'] = {'network_info': net_info_cache}
-
- self.moxed_client.list_ports(
- tenant_id=self.instance['project_id'],
- device_id=self.instance['uuid']).AndReturn(
- {'ports': port_data})
- net_ids = [port['network_id'] for port in port_data]
- nets = number == 1 and self.nets1 or self.nets2
- self.moxed_client.list_networks(
- id=net_ids).AndReturn({'networks': nets})
- for i in xrange(1, number + 1):
- float_data = number == 1 and self.float_data1 or self.float_data2
- for ip in port_data[i - 1]['fixed_ips']:
- float_data = [x for x in float_data
- if x['fixed_ip_address'] == ip['ip_address']]
- self.moxed_client.list_floatingips(
- fixed_ip_address=ip['ip_address'],
- port_id=port_data[i - 1]['id']).AndReturn(
- {'floatingips': float_data})
- subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
- self.moxed_client.list_subnets(
- id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
- {'subnets': subnet_data})
- self.moxed_client.list_ports(
- network_id=subnet_data[0]['network_id'],
- device_owner='network:dhcp').AndReturn(
- {'ports': []})
- self.mox.ReplayAll()
- nw_inf = api.get_instance_nw_info(self.context, instance)
- for i in xrange(0, number):
- self._verify_nw_info(nw_inf, i)
-
- def _allocate_for_instance(self, net_idx=1, **kwargs):
- api = self._stub_allocate_for_instance(net_idx, **kwargs)
- return api.allocate_for_instance(self.context, self.instance, **kwargs)
-
-
-class TestNeutronv2(TestNeutronv2Base):
-
- def setUp(self):
- super(TestNeutronv2, self).setUp()
- neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
- self.moxed_client)
-
- def test_get_instance_nw_info_1(self):
- # Test to get one port in one network and subnet.
- neutronv2.get_client(mox.IgnoreArg(),
- admin=True).MultipleTimes().AndReturn(
- self.moxed_client)
- self._get_instance_nw_info(1)
-
- def test_get_instance_nw_info_2(self):
- # Test to get one port in each of two networks and subnets.
- neutronv2.get_client(mox.IgnoreArg(),
- admin=True).MultipleTimes().AndReturn(
- self.moxed_client)
- self._get_instance_nw_info(2)
-
- def test_get_instance_nw_info_with_nets_add_interface(self):
- # This tests that adding an interface to an instance does not
- # remove the first instance from the instance.
- network_model = model.Network(id='network_id',
- bridge='br-int',
- injected='injected',
- label='fake_network',
- tenant_id='fake_tenant')
- network_cache = {'info_cache': {
- 'network_info': [{'id': self.port_data2[0]['id'],
- 'address': 'mac_address',
- 'network': network_model,
- 'type': 'ovs',
- 'ovs_interfaceid': 'ovs_interfaceid',
- 'devname': 'devname'}]}}
-
- self._fake_get_instance_nw_info_helper(network_cache,
- self.port_data2,
- self.nets2,
- [self.port_data2[1]['id']])
-
- def test_get_instance_nw_info_remove_ports_from_neutron(self):
- # This tests that when a port is removed in neutron it
- # is also removed from the nova.
- network_model = model.Network(id=self.port_data2[0]['network_id'],
- bridge='br-int',
- injected='injected',
- label='fake_network',
- tenant_id='fake_tenant')
- network_cache = {'info_cache': {
- 'network_info': [{'id': 'network_id',
- 'address': 'mac_address',
- 'network': network_model,
- 'type': 'ovs',
- 'ovs_interfaceid': 'ovs_interfaceid',
- 'devname': 'devname'}]}}
-
- self._fake_get_instance_nw_info_helper(network_cache,
- self.port_data2,
- None,
- None)
-
- def test_get_instance_nw_info_ignores_neturon_ports(self):
- # Tests that only ports in the network_cache are updated
- # and ports returned from neutron that match the same
- # instance_id/device_id are ignored.
- port_data2 = copy.copy(self.port_data2)
-
- # set device_id on the ports to be the same.
- port_data2[1]['device_id'] = port_data2[0]['device_id']
- network_model = model.Network(id='network_id',
- bridge='br-int',
- injected='injected',
- label='fake_network',
- tenant_id='fake_tenant')
- network_cache = {'info_cache': {
- 'network_info': [{'id': 'network_id',
- 'address': 'mac_address',
- 'network': network_model,
- 'type': 'ovs',
- 'ovs_interfaceid': 'ovs_interfaceid',
- 'devname': 'devname'}]}}
-
- self._fake_get_instance_nw_info_helper(network_cache,
- port_data2,
- None,
- None)
-
- def _fake_get_instance_nw_info_helper(self, network_cache,
- current_neutron_ports,
- networks=None, port_ids=None):
- """Helper function to test get_instance_nw_info.
-
- :param network_cache - data already in the nova network cache.
- :param current_neutron_ports - updated list of ports from neutron.
- :param networks - networks of ports being added to instance.
- :param port_ids - new ports being added to instance.
- """
-
- # keep a copy of the original ports/networks to pass to
- # get_instance_nw_info() as the code below changes them.
- original_port_ids = copy.copy(port_ids)
- original_networks = copy.copy(networks)
-
- api = neutronapi.API()
- self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
- api.db.instance_info_cache_update(
- mox.IgnoreArg(),
- self.instance['uuid'], mox.IgnoreArg())
- neutronv2.get_client(mox.IgnoreArg(),
- admin=True).MultipleTimes().AndReturn(
- self.moxed_client)
- self.moxed_client.list_ports(
- tenant_id=self.instance['project_id'],
- device_id=self.instance['uuid']).AndReturn(
- {'ports': current_neutron_ports})
-
- ifaces = network_cache['info_cache']['network_info']
-
- if port_ids is None:
- port_ids = [iface['id'] for iface in ifaces]
- net_ids = [iface['network']['id'] for iface in ifaces]
- nets = [{'id': iface['network']['id'],
- 'name': iface['network']['label'],
- 'tenant_id': iface['network']['meta']['tenant_id']}
- for iface in ifaces]
- if networks is None:
- self.moxed_client.list_networks(
- id=net_ids).AndReturn({'networks': nets})
- else:
- networks = networks + [
- dict(id=iface['network']['id'],
- name=iface['network']['label'],
- tenant_id=iface['network']['meta']['tenant_id'])
- for iface in ifaces]
- port_ids = [iface['id'] for iface in ifaces] + port_ids
-
- index = 0
-
- current_neutron_port_map = {}
- for current_neutron_port in current_neutron_ports:
- current_neutron_port_map[current_neutron_port['id']] = (
- current_neutron_port)
- for port_id in port_ids:
- current_neutron_port = current_neutron_port_map.get(port_id)
- if current_neutron_port:
- for ip in current_neutron_port['fixed_ips']:
- self.moxed_client.list_floatingips(
- fixed_ip_address=ip['ip_address'],
- port_id=current_neutron_port['id']).AndReturn(
- {'floatingips': [self.float_data2[index]]})
- self.moxed_client.list_subnets(
- id=mox.SameElementsAs([ip['subnet_id']])
- ).AndReturn(
- {'subnets': [self.subnet_data_n[index]]})
- self.moxed_client.list_ports(
- network_id=current_neutron_port['network_id'],
- device_owner='network:dhcp').AndReturn(
- {'ports': self.dhcp_port_data1})
- index += 1
- self.mox.ReplayAll()
-
- self.instance['info_cache'] = network_cache
- instance = copy.copy(self.instance)
- instance['info_cache'] = network_cache['info_cache']
- nw_infs = api.get_instance_nw_info(self.context,
- instance,
- networks=original_networks,
- port_ids=original_port_ids)
-
- self.assertEqual(index, len(nw_infs))
- # ensure that nic ordering is preserved
- for iface_index in range(index):
- self.assertEqual(nw_infs[iface_index]['id'],
- port_ids[iface_index])
-
- def test_get_instance_nw_info_without_subnet(self):
- # Test get instance_nw_info for a port without subnet.
- api = neutronapi.API()
- self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
- api.db.instance_info_cache_update(
- mox.IgnoreArg(),
- self.instance['uuid'], mox.IgnoreArg())
- self.moxed_client.list_ports(
- tenant_id=self.instance['project_id'],
- device_id=self.instance['uuid']).AndReturn(
- {'ports': self.port_data3})
- self.moxed_client.list_networks(
- id=[self.port_data1[0]['network_id']]).AndReturn(
- {'networks': self.nets1})
- neutronv2.get_client(mox.IgnoreArg(),
- admin=True).MultipleTimes().AndReturn(
- self.moxed_client)
-
- net_info_cache = []
- for port in self.port_data3:
- net_info_cache.append({"network": {"id": port['network_id']},
- "id": port['id']})
- instance = copy.copy(self.instance)
- instance['info_cache'] = {'network_info':
- six.text_type(
- jsonutils.dumps(net_info_cache))}
-
- self.mox.ReplayAll()
-
- nw_inf = api.get_instance_nw_info(self.context,
- instance)
-
- id_suffix = 3
- self.assertEqual(0, len(nw_inf.fixed_ips()))
- self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
- self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
- self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
- self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
-
- def test_refresh_neutron_extensions_cache(self):
- api = neutronapi.API()
-
- # Note: Don't want the default get_client from setUp()
- self.mox.ResetAll()
- neutronv2.get_client(mox.IgnoreArg()).AndReturn(
- self.moxed_client)
- self.moxed_client.list_extensions().AndReturn(
- {'extensions': [{'name': constants.QOS_QUEUE}]})
- self.mox.ReplayAll()
- api._refresh_neutron_extensions_cache(mox.IgnoreArg())
- self.assertEqual(
- {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
- api.extensions)
-
- def test_populate_neutron_extension_values_rxtx_factor(self):
- api = neutronapi.API()
-
- # Note: Don't want the default get_client from setUp()
- self.mox.ResetAll()
- neutronv2.get_client(mox.IgnoreArg()).AndReturn(
- self.moxed_client)
- self.moxed_client.list_extensions().AndReturn(
- {'extensions': [{'name': constants.QOS_QUEUE}]})
- self.mox.ReplayAll()
- flavor = flavors.get_default_flavor()
- flavor['rxtx_factor'] = 1
- sys_meta = utils.dict_to_metadata(
- flavors.save_flavor_info({}, flavor))
- instance = {'system_metadata': sys_meta}
- port_req_body = {'port': {}}
- api._populate_neutron_extension_values(self.context, instance,
- None, port_req_body)
- self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
-
- def test_allocate_for_instance_1(self):
- # Allocate one port in one network env.
- self._allocate_for_instance(1)
-
- def test_allocate_for_instance_2(self):
- # Allocate one port in two networks env.
- api = self._stub_allocate_for_instance(net_idx=2)
- self.assertRaises(exception.NetworkAmbiguous,
- api.allocate_for_instance,
- self.context, self.instance)
-
- def test_allocate_for_instance_accepts_macs_kwargs_None(self):
- # The macs kwarg should be accepted as None.
- self._allocate_for_instance(1, macs=None)
-
- def test_allocate_for_instance_accepts_macs_kwargs_set(self):
- # The macs kwarg should be accepted, as a set, the
- # _allocate_for_instance helper checks that the mac is used to create a
- # port.
- self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
-
- def test_allocate_for_instance_accepts_only_portid(self):
- # Make sure allocate_for_instance works when only a portid is provided
- self._returned_nw_info = self.port_data1
- result = self._allocate_for_instance(
- requested_networks=objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id='my_portid1')]))
- self.assertEqual(self.port_data1, result)
-
- def test_allocate_for_instance_not_enough_macs_via_ports(self):
- # using a hypervisor MAC via a pre-created port will stop it being
- # used to dynamically create a port on a network. We put the network
- # first in requested_networks so that if the code were to not pre-check
- # requested ports, it would incorrectly assign the mac and not fail.
- requested_networks = objects.NetworkRequestList(
- objects = [
- objects.NetworkRequest(network_id=self.nets2[1]['id']),
- objects.NetworkRequest(port_id='my_portid1')])
- api = self._stub_allocate_for_instance(
- net_idx=2, requested_networks=requested_networks,
- macs=set(['my_mac1']),
- _break='mac' + self.nets2[1]['id'])
- self.assertRaises(exception.PortNotFree,
- api.allocate_for_instance, self.context,
- self.instance, requested_networks=requested_networks,
- macs=set(['my_mac1']))
-
- def test_allocate_for_instance_not_enough_macs(self):
- # If not enough MAC addresses are available to allocate to networks, an
- # error should be raised.
- # We could pass in macs=set(), but that wouldn't tell us that
- # allocate_for_instance tracks used macs properly, so we pass in one
- # mac, and ask for two networks.
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
- objects.NetworkRequest(network_id=self.nets2[0]['id'])])
- api = self._stub_allocate_for_instance(
- net_idx=2, requested_networks=requested_networks,
- macs=set(['my_mac2']),
- _break='mac' + self.nets2[0]['id'])
- with mock.patch.object(api, '_delete_ports'):
- self.assertRaises(exception.PortNotFree,
- api.allocate_for_instance, self.context,
- self.instance,
- requested_networks=requested_networks,
- macs=set(['my_mac2']))
-
- def test_allocate_for_instance_two_macs_two_networks(self):
- # If two MACs are available and two networks requested, two new ports
- # get made and no exceptions raised.
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
- objects.NetworkRequest(network_id=self.nets2[0]['id'])])
- self._allocate_for_instance(
- net_idx=2, requested_networks=requested_networks,
- macs=set(['my_mac2', 'my_mac1']))
-
- def test_allocate_for_instance_mac_conflicting_requested_port(self):
- # specify only first and last network
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id='my_portid1')])
- api = self._stub_allocate_for_instance(
- net_idx=1, requested_networks=requested_networks,
- macs=set(['unknown:mac']),
- _break='pre_list_networks')
- self.assertRaises(exception.PortNotUsable,
- api.allocate_for_instance, self.context,
- self.instance, requested_networks=requested_networks,
- macs=set(['unknown:mac']))
-
- def test_allocate_for_instance_without_requested_networks(self):
- api = self._stub_allocate_for_instance(net_idx=3)
- self.assertRaises(exception.NetworkAmbiguous,
- api.allocate_for_instance,
- self.context, self.instance)
-
- def test_allocate_for_instance_with_requested_non_available_network(self):
- """verify that a non available network is ignored.
- self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
- Do not create a port on a non available network self.nets3[2].
- """
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=net['id'])
- for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
- self._allocate_for_instance(net_idx=2,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_with_requested_networks(self):
- # specify only first and last network
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=net['id'])
- for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
- self._allocate_for_instance(net_idx=3,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
- # specify only first and last network
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
- address='10.0.1.0')])
- self._allocate_for_instance(net_idx=1,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_with_requested_networks_with_port(self):
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id='my_portid1')])
- self._allocate_for_instance(net_idx=1,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_no_networks(self):
- """verify the exception thrown when there are no networks defined."""
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- api = neutronapi.API()
- self.moxed_client.list_extensions().AndReturn({'extensions': []})
- self.moxed_client.list_networks(
- tenant_id=self.instance.project_id,
- shared=False).AndReturn(
- {'networks': model.NetworkInfo([])})
- self.moxed_client.list_networks(shared=True).AndReturn(
- {'networks': model.NetworkInfo([])})
- self.mox.ReplayAll()
- nwinfo = api.allocate_for_instance(self.context, self.instance)
- self.assertEqual(len(nwinfo), 0)
-
- def test_allocate_for_instance_ex1(self):
- """verify we will delete created ports
- if we fail to allocate all net resources.
-
- Mox to raise exception when creating a second port.
- In this case, the code should delete the first created port.
- """
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- neutron=self.moxed_client,
- refresh_cache=True).AndReturn(False)
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=net['id'])
- for net in (self.nets2[0], self.nets2[1])])
- self.moxed_client.list_networks(
- id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
- index = 0
- for network in self.nets2:
- binding_port_req_body = {
- 'port': {
- 'device_id': self.instance.uuid,
- 'device_owner': 'compute:nova',
- },
- }
- port_req_body = {
- 'port': {
- 'network_id': network['id'],
- 'admin_state_up': True,
- 'tenant_id': self.instance.project_id,
- },
- }
- port_req_body['port'].update(binding_port_req_body['port'])
- port = {'id': 'portid_' + network['id']}
-
- api._populate_neutron_extension_values(self.context,
- self.instance, None, binding_port_req_body,
- neutron=self.moxed_client).AndReturn(None)
- if index == 0:
- self.moxed_client.create_port(
- MyComparator(port_req_body)).AndReturn({'port': port})
- else:
- NeutronOverQuota = exceptions.OverQuotaClient()
- self.moxed_client.create_port(
- MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
- index += 1
- self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
- self.mox.ReplayAll()
- self.assertRaises(exception.PortLimitExceeded,
- api.allocate_for_instance,
- self.context, self.instance,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_ex2(self):
- """verify we have no port to delete
- if we fail to allocate the first net resource.
-
- Mox to raise exception when creating the first port.
- In this case, the code should not delete any ports.
- """
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- neutron=self.moxed_client,
- refresh_cache=True).AndReturn(False)
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=net['id'])
- for net in (self.nets2[0], self.nets2[1])])
- self.moxed_client.list_networks(
- id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
- binding_port_req_body = {
- 'port': {
- 'device_id': self.instance.uuid,
- 'device_owner': 'compute:nova',
- },
- }
- port_req_body = {
- 'port': {
- 'network_id': self.nets2[0]['id'],
- 'admin_state_up': True,
- 'device_id': self.instance.uuid,
- 'tenant_id': self.instance.project_id,
- },
- }
- api._populate_neutron_extension_values(self.context,
- self.instance, None, binding_port_req_body,
- neutron=self.moxed_client).AndReturn(None)
- self.moxed_client.create_port(
- MyComparator(port_req_body)).AndRaise(
- Exception("fail to create port"))
- self.mox.ReplayAll()
- self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
- self.context, self.instance,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_no_port_or_network(self):
- class BailOutEarly(Exception):
- pass
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- api = neutronapi.API()
- self.moxed_client.list_extensions().AndReturn({'extensions': []})
- self.mox.StubOutWithMock(api, '_get_available_networks')
- # Make sure we get an empty list and then bail out of the rest
- # of the function
- api._get_available_networks(self.context, self.instance.project_id,
- [],
- neutron=self.moxed_client).\
- AndRaise(BailOutEarly)
- self.mox.ReplayAll()
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest()])
- self.assertRaises(BailOutEarly,
- api.allocate_for_instance,
- self.context, self.instance,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_second_time(self):
- # Make sure that allocate_for_instance only returns ports that it
- # allocated during _that_ run.
- new_port = {'id': 'fake'}
- self._returned_nw_info = self.port_data1 + [new_port]
- nw_info = self._allocate_for_instance()
- self.assertEqual(nw_info, [new_port])
-
- def test_allocate_for_instance_port_in_use(self):
- # If a port is already in use, an exception should be raised.
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id='my_portid1')])
- api = self._stub_allocate_for_instance(
- requested_networks=requested_networks,
- _break='pre_list_networks',
- _device=True)
- self.assertRaises(exception.PortInUse,
- api.allocate_for_instance, self.context,
- self.instance, requested_networks=requested_networks)
-
- def test_allocate_for_instance_with_externalnet_forbidden(self):
- """Only one network is available, it's external, and the client
- is unauthorized to use it.
- """
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- self.moxed_client.list_extensions().AndReturn({'extensions': []})
- # no networks in the tenant
- self.moxed_client.list_networks(
- tenant_id=self.instance.project_id,
- shared=False).AndReturn(
- {'networks': model.NetworkInfo([])})
- # external network is shared
- self.moxed_client.list_networks(shared=True).AndReturn(
- {'networks': self.nets8})
- self.mox.ReplayAll()
- api = neutronapi.API()
- self.assertRaises(exception.ExternalNetworkAttachForbidden,
- api.allocate_for_instance,
- self.context, self.instance)
-
- def test_allocate_for_instance_with_externalnet_multiple(self):
- """Multiple networks are available, one the client is authorized
- to use, and an external one the client is unauthorized to use.
- """
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- self.moxed_client.list_extensions().AndReturn({'extensions': []})
- # network found in the tenant
- self.moxed_client.list_networks(
- tenant_id=self.instance.project_id,
- shared=False).AndReturn(
- {'networks': self.nets1})
- # external network is shared
- self.moxed_client.list_networks(shared=True).AndReturn(
- {'networks': self.nets8})
- self.mox.ReplayAll()
- api = neutronapi.API()
- self.assertRaises(
- exception.NetworkAmbiguous,
- api.allocate_for_instance,
- self.context, self.instance)
-
- def test_allocate_for_instance_with_externalnet_admin_ctx(self):
- """Only one network is available, it's external, and the client
- is authorized.
- """
- admin_ctx = context.RequestContext('userid', 'my_tenantid',
- is_admin=True)
- api = self._stub_allocate_for_instance(net_idx=8)
- api.allocate_for_instance(admin_ctx, self.instance)
-
- def _deallocate_for_instance(self, number, requested_networks=None):
- # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
- # converted to handling instance objects.
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- api = neutronapi.API()
- port_data = number == 1 and self.port_data1 or self.port_data2
- ret_data = copy.deepcopy(port_data)
- if requested_networks:
- if isinstance(requested_networks, objects.NetworkRequestList):
- # NOTE(danms): Temporary and transitional
- with mock.patch('nova.utils.is_neutron', return_value=True):
- requested_networks = requested_networks.as_tuples()
- for net, fip, port, request_id in requested_networks:
- ret_data.append({'network_id': net,
- 'device_id': self.instance.uuid,
- 'device_owner': 'compute:nova',
- 'id': port,
- 'status': 'DOWN',
- 'admin_state_up': True,
- 'fixed_ips': [],
- 'mac_address': 'fake_mac', })
- self.moxed_client.list_ports(
- device_id=self.instance.uuid).AndReturn(
- {'ports': ret_data})
- if requested_networks:
- for net, fip, port, request_id in requested_networks:
- self.moxed_client.update_port(port)
- for port in reversed(port_data):
- self.moxed_client.delete_port(port['id'])
-
- self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
- api.db.instance_info_cache_update(self.context,
- self.instance.uuid,
- {'network_info': '[]'})
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- api.deallocate_for_instance(self.context, self.instance,
- requested_networks=requested_networks)
-
- def test_deallocate_for_instance_1_with_requested(self):
- requested = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='fake-net',
- address='1.2.3.4',
- port_id='fake-port')])
- # Test to deallocate in one port env.
- self._deallocate_for_instance(1, requested_networks=requested)
-
- def test_deallocate_for_instance_2_with_requested(self):
- requested = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='fake-net',
- address='1.2.3.4',
- port_id='fake-port')])
- # Test to deallocate in one port env.
- self._deallocate_for_instance(2, requested_networks=requested)
-
- def test_deallocate_for_instance_1(self):
- # Test to deallocate in one port env.
- self._deallocate_for_instance(1)
-
- def test_deallocate_for_instance_2(self):
- # Test to deallocate in two ports env.
- self._deallocate_for_instance(2)
-
- def test_deallocate_for_instance_port_not_found(self):
- # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
- # converted to handling instance objects.
- self.instance = fake_instance.fake_instance_obj(self.context,
- **self.instance)
- port_data = self.port_data1
- self.moxed_client.list_ports(
- device_id=self.instance.uuid).AndReturn(
- {'ports': port_data})
-
- NeutronNotFound = exceptions.NeutronClientException(status_code=404)
- for port in reversed(port_data):
- self.moxed_client.delete_port(port['id']).AndRaise(
- NeutronNotFound)
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- api.deallocate_for_instance(self.context, self.instance)
-
- def _test_deallocate_port_for_instance(self, number):
- port_data = number == 1 and self.port_data1 or self.port_data2
- nets = number == 1 and self.nets1 or self.nets2
- self.moxed_client.delete_port(port_data[0]['id'])
-
- net_info_cache = []
- for port in port_data:
- net_info_cache.append({"network": {"id": port['network_id']},
- "id": port['id']})
- instance = copy.copy(self.instance)
- instance['info_cache'] = {'network_info':
- six.text_type(
- jsonutils.dumps(net_info_cache))}
- api = neutronapi.API()
- neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
- self.moxed_client)
- self.moxed_client.list_ports(
- tenant_id=self.instance['project_id'],
- device_id=self.instance['uuid']).AndReturn(
- {'ports': port_data[1:]})
- neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
- self.moxed_client)
- net_ids = [port['network_id'] for port in port_data]
- self.moxed_client.list_networks(id=net_ids).AndReturn(
- {'networks': nets})
- float_data = number == 1 and self.float_data1 or self.float_data2
- for data in port_data[1:]:
- for ip in data['fixed_ips']:
- self.moxed_client.list_floatingips(
- fixed_ip_address=ip['ip_address'],
- port_id=data['id']).AndReturn(
- {'floatingips': float_data[1:]})
- for port in port_data[1:]:
- self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
-
- self.mox.ReplayAll()
-
- nwinfo = api.deallocate_port_for_instance(self.context, instance,
- port_data[0]['id'])
- self.assertEqual(len(nwinfo), len(port_data[1:]))
- if len(port_data) > 1:
- self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
-
- def test_deallocate_port_for_instance_1(self):
- # Test to deallocate the first and only port
- self._test_deallocate_port_for_instance(1)
-
- def test_deallocate_port_for_instance_2(self):
- # Test to deallocate the first port of two
- self._test_deallocate_port_for_instance(2)
-
- def test_list_ports(self):
- search_opts = {'parm': 'value'}
- self.moxed_client.list_ports(**search_opts)
- self.mox.ReplayAll()
- neutronapi.API().list_ports(self.context, **search_opts)
-
- def test_show_port(self):
- self.moxed_client.show_port('foo')
- self.mox.ReplayAll()
- neutronapi.API().show_port(self.context, 'foo')
-
- def test_validate_networks(self):
- requested_networks = [('my_netid1', None, None, None),
- ('my_netid2', None, None, None)]
- ids = ['my_netid1', 'my_netid2']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': []})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 50}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- api.validate_networks(self.context, requested_networks, 1)
-
- def test_validate_networks_without_port_quota_on_network_side(self):
- requested_networks = [('my_netid1', None, None, None),
- ('my_netid2', None, None, None)]
- ids = ['my_netid1', 'my_netid2']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': []})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- api.validate_networks(self.context, requested_networks, 1)
-
- def test_validate_networks_ex_1(self):
- requested_networks = [('my_netid1', None, None, None)]
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(['my_netid1'])).AndReturn(
- {'networks': self.nets1})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': []})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 50}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- try:
- api.validate_networks(self.context, requested_networks, 1)
- except exception.NetworkNotFound as ex:
- self.assertIn("my_netid2", six.text_type(ex))
-
- def test_validate_networks_ex_2(self):
- requested_networks = [('my_netid1', None, None, None),
- ('my_netid2', None, None, None),
- ('my_netid3', None, None, None)]
- ids = ['my_netid1', 'my_netid2', 'my_netid3']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets1})
- self.mox.ReplayAll()
- api = neutronapi.API()
- try:
- api.validate_networks(self.context, requested_networks, 1)
- except exception.NetworkNotFound as ex:
- self.assertIn("my_netid2, my_netid3", six.text_type(ex))
-
- def test_validate_networks_duplicate_disable(self):
- """Verify that the correct exception is thrown when duplicate
- network ids are passed to validate_networks, when nova config flag
- allow_duplicate_networks is set to its default value: False
- """
- requested_networks = [('my_netid1', None, None, None),
- ('my_netid1', None, None, None)]
- self.mox.ReplayAll()
- # Expected call from setUp.
- neutronv2.get_client(None)
- api = neutronapi.API()
- self.assertRaises(exception.NetworkDuplicated,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_duplicate_enable(self):
- """Verify that no duplicateNetworks exception is thrown when duplicate
- network ids are passed to validate_networks, when nova config flag
- allow_duplicate_networks is set to its non default value: True
- """
- self.flags(allow_duplicate_networks=True, group='neutron')
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid1'),
- objects.NetworkRequest(network_id='my_netid1')])
- ids = ['my_netid1', 'my_netid1']
-
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets1})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': []})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 50}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- api.validate_networks(self.context, requested_networks, 1)
-
- def test_allocate_for_instance_with_requested_networks_duplicates(self):
- # specify a duplicate network to allocate to instance
- self.flags(allow_duplicate_networks=True, group='neutron')
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id=net['id'])
- for net in (self.nets6[0], self.nets6[1])])
- self._allocate_for_instance(net_idx=6,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_requested_networks_duplicates_port(self):
- # specify first port and last port that are in same network
- self.flags(allow_duplicate_networks=True, group='neutron')
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port['id'])
- for port in (self.port_data1[0], self.port_data3[0])])
- self._allocate_for_instance(net_idx=6,
- requested_networks=requested_networks)
-
- def test_allocate_for_instance_requested_networks_duplicates_combo(self):
- # specify a combo net_idx=7 : net2, port in net1, net2, port in net1
- self.flags(allow_duplicate_networks=True, group='neutron')
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid2'),
- objects.NetworkRequest(port_id=self.port_data1[0]['id']),
- objects.NetworkRequest(network_id='my_netid2'),
- objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
- self._allocate_for_instance(net_idx=7,
- requested_networks=requested_networks)
-
- def test_validate_networks_not_specified(self):
- requested_networks = objects.NetworkRequestList(objects=[])
- self.moxed_client.list_networks(
- tenant_id=self.context.project_id,
- shared=False).AndReturn(
- {'networks': self.nets1})
- self.moxed_client.list_networks(
- shared=True).AndReturn(
- {'networks': self.nets2})
- self.mox.ReplayAll()
- api = neutronapi.API()
- self.assertRaises(exception.NetworkAmbiguous,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_port_not_found(self):
- # Verify that the correct exception is thrown when a non existent
- # port is passed to validate_networks.
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(
- network_id='my_netid1',
- port_id='3123-ad34-bc43-32332ca33e')])
-
- NeutronNotFound = exceptions.NeutronClientException(status_code=404)
- self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
- NeutronNotFound)
- self.mox.ReplayAll()
- # Expected call from setUp.
- neutronv2.get_client(None)
- api = neutronapi.API()
- self.assertRaises(exception.PortNotFound,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_port_show_rasies_non404(self):
- # Verify that the correct exception is thrown when a non existent
- # port is passed to validate_networks.
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(
- network_id='my_netid1',
- port_id='3123-ad34-bc43-32332ca33e')])
-
- NeutronNotFound = exceptions.NeutronClientException(status_code=0)
- self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
- NeutronNotFound)
- self.mox.ReplayAll()
- # Expected call from setUp.
- neutronv2.get_client(None)
- api = neutronapi.API()
- self.assertRaises(exceptions.NeutronClientException,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_port_in_use(self):
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
- self.moxed_client.show_port(self.port_data3[0]['id']).\
- AndReturn({'port': self.port_data3[0]})
-
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- self.assertRaises(exception.PortInUse,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_port_no_subnet_id(self):
- port_a = self.port_data3[0]
- port_a['device_id'] = None
- port_a['device_owner'] = None
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_a['id'])])
- self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
-
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- self.assertRaises(exception.PortRequiresFixedIP,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_no_subnet_id(self):
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='his_netid4')])
- ids = ['his_netid4']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets4})
- self.mox.ReplayAll()
- api = neutronapi.API()
- self.assertRaises(exception.NetworkRequiresSubnet,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_ports_in_same_network_disable(self):
- """Verify that duplicateNetworks exception is thrown when ports on same
- duplicate network are passed to validate_networks, when nova config
- flag allow_duplicate_networks is set to its default False
- """
- self.flags(allow_duplicate_networks=False, group='neutron')
- port_a = self.port_data3[0]
- port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
- 'subnet_id': 'subnet_id'}
- port_b = self.port_data1[0]
- self.assertEqual(port_a['network_id'], port_b['network_id'])
- for port in [port_a, port_b]:
- port['device_id'] = None
- port['device_owner'] = None
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_a['id']),
- objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_a['id']).AndReturn(
- {'port': port_a})
- self.moxed_client.show_port(port_b['id']).AndReturn(
- {'port': port_b})
-
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- self.assertRaises(exception.NetworkDuplicated,
- api.validate_networks,
- self.context, requested_networks, 1)
-
- def test_validate_networks_ports_in_same_network_enable(self):
- """Verify that duplicateNetworks exception is not thrown when ports
- on same duplicate network are passed to validate_networks, when nova
- config flag allow_duplicate_networks is set to its True
- """
- self.flags(allow_duplicate_networks=True, group='neutron')
- port_a = self.port_data3[0]
- port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
- 'subnet_id': 'subnet_id'}
- port_b = self.port_data1[0]
- self.assertEqual(port_a['network_id'], port_b['network_id'])
- for port in [port_a, port_b]:
- port['device_id'] = None
- port['device_owner'] = None
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_a['id']),
- objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_a['id']).AndReturn(
- {'port': port_a})
- self.moxed_client.show_port(port_b['id']).AndReturn(
- {'port': port_b})
-
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- api.validate_networks(self.context, requested_networks, 1)
-
- def test_validate_networks_ports_not_in_same_network(self):
- port_a = self.port_data3[0]
- port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
- 'subnet_id': 'subnet_id'}
- port_b = self.port_data2[1]
- self.assertNotEqual(port_a['network_id'], port_b['network_id'])
- for port in [port_a, port_b]:
- port['device_id'] = None
- port['device_owner'] = None
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_a['id']),
- objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
- self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- api.validate_networks(self.context, requested_networks, 1)
-
- def test_validate_networks_no_quota(self):
- # Test validation for a request for one instance needing
- # two ports, where the quota is 2 and 2 ports are in use
- # => instances which can be created = 0
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid1'),
- objects.NetworkRequest(network_id='my_netid2')])
- ids = ['my_netid1', 'my_netid2']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': self.port_data2})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 2}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 1)
- self.assertEqual(max_count, 0)
-
- def test_validate_networks_with_ports_and_networks(self):
- # Test validation for a request for one instance needing
- # one port allocated via nova with another port being passed in.
- port_b = self.port_data2[1]
- port_b['device_id'] = None
- port_b['device_owner'] = None
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid1'),
- objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
- ids = ['my_netid1']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets1})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': self.port_data2})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 5}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 1)
- self.assertEqual(max_count, 1)
-
- def test_validate_networks_one_port_and_no_networks(self):
- # Test that show quota is not called if no networks are
- # passed in and only ports.
- port_b = self.port_data2[1]
- port_b['device_id'] = None
- port_b['device_owner'] = None
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
- self.mox.ReplayAll()
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 1)
- self.assertEqual(max_count, 1)
-
- def test_validate_networks_some_quota(self):
- # Test validation for a request for two instance needing
- # two ports each, where the quota is 5 and 2 ports are in use
- # => instances which can be created = 1
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid1'),
- objects.NetworkRequest(network_id='my_netid2')])
- ids = ['my_netid1', 'my_netid2']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': self.port_data2})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': 5}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 2)
- self.assertEqual(max_count, 1)
-
- def test_validate_networks_unlimited_quota(self):
- # Test validation for a request for two instance needing
- # two ports each, where the quota is -1 (unlimited)
- # => instances which can be created = 1
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='my_netid1'),
- objects.NetworkRequest(network_id='my_netid2')])
- ids = ['my_netid1', 'my_netid2']
- self.moxed_client.list_networks(
- id=mox.SameElementsAs(ids)).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
- {'ports': self.port_data2})
- self.moxed_client.show_quota(
- tenant_id='my_tenantid').AndReturn(
- {'quota': {'port': -1}})
- self.mox.ReplayAll()
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 2)
- self.assertEqual(max_count, 2)
-
- def test_validate_networks_no_quota_but_ports_supplied(self):
- port_a = self.port_data3[0]
- port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
- 'subnet_id': 'subnet_id'}
- port_b = self.port_data2[1]
- self.assertNotEqual(port_a['network_id'], port_b['network_id'])
- for port in [port_a, port_b]:
- port['device_id'] = None
- port['device_owner'] = None
-
- requested_networks = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(port_id=port_a['id']),
- objects.NetworkRequest(port_id=port_b['id'])])
- self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
- self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
-
- self.mox.ReplayAll()
-
- api = neutronapi.API()
- max_count = api.validate_networks(self.context,
- requested_networks, 1)
- self.assertEqual(max_count, 1)
-
- def _mock_list_ports(self, port_data=None):
- if port_data is None:
- port_data = self.port_data2
- address = self.port_address
- self.moxed_client.list_ports(
- fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
- {'ports': port_data})
- self.mox.ReplayAll()
- return address
-
- def test_get_instance_uuids_by_ip_filter(self):
- self._mock_list_ports()
- filters = {'ip': '^10\\.0\\.1\\.2$'}
- api = neutronapi.API()
- result = api.get_instance_uuids_by_ip_filter(self.context, filters)
- self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
- self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
-
- def test_get_fixed_ip_by_address_fails_for_no_ports(self):
- address = self._mock_list_ports(port_data=[])
- api = neutronapi.API()
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- api.get_fixed_ip_by_address,
- self.context, address)
-
- def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
- address = self._mock_list_ports(port_data=self.port_data1)
- api = neutronapi.API()
- result = api.get_fixed_ip_by_address(self.context, address)
- self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
-
- def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
- address = self._mock_list_ports()
- api = neutronapi.API()
- self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
- api.get_fixed_ip_by_address,
- self.context, address)
-
- def _get_available_networks(self, prv_nets, pub_nets,
- req_ids=None, context=None):
- api = neutronapi.API()
- nets = prv_nets + pub_nets
- if req_ids:
- mox_list_params = {'id': req_ids}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': nets})
- else:
- mox_list_params = {'tenant_id': self.instance['project_id'],
- 'shared': False}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': prv_nets})
- mox_list_params = {'shared': True}
- self.moxed_client.list_networks(
- **mox_list_params).AndReturn({'networks': pub_nets})
-
- self.mox.ReplayAll()
- rets = api._get_available_networks(
- context if context else self.context,
- self.instance['project_id'],
- req_ids)
- self.assertEqual(rets, nets)
-
- def test_get_available_networks_all_private(self):
- self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
-
- def test_get_available_networks_all_public(self):
- self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
-
- def test_get_available_networks_private_and_public(self):
- self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
-
- def test_get_available_networks_with_network_ids(self):
- prv_nets = [self.nets3[0]]
- pub_nets = [self.nets3[-1]]
- # specify only first and last network
- req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
- self._get_available_networks(prv_nets, pub_nets, req_ids)
-
- def test_get_available_networks_with_custom_policy(self):
- rules = {'network:attach_external_network':
- common_policy.parse_rule('')}
- policy.set_rules(rules)
- req_ids = [net['id'] for net in self.nets5]
- self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
-
- def test_get_floating_ip_pools(self):
- api = neutronapi.API()
- search_opts = {'router:external': True}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
- self.mox.ReplayAll()
- pools = api.get_floating_ip_pools(self.context)
- expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
- self.assertEqual(expected, pools)
-
- def _get_expected_fip_model(self, fip_data, idx=0):
- expected = {'id': fip_data['id'],
- 'address': fip_data['floating_ip_address'],
- 'pool': self.fip_pool['name'],
- 'project_id': fip_data['tenant_id'],
- 'fixed_ip_id': fip_data['port_id'],
- 'fixed_ip':
- {'address': fip_data['fixed_ip_address']},
- 'instance': ({'uuid': self.port_data2[idx]['device_id']}
- if fip_data['port_id']
- else None)}
- return expected
-
- def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
- api = neutronapi.API()
- fip_id = fip_data['id']
- net_id = fip_data['floating_network_id']
- address = fip_data['floating_ip_address']
- if by_address:
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [fip_data]})
- else:
- self.moxed_client.show_floatingip(fip_id).\
- AndReturn({'floatingip': fip_data})
- self.moxed_client.show_network(net_id).\
- AndReturn({'network': self.fip_pool})
- if fip_data['port_id']:
- self.moxed_client.show_port(fip_data['port_id']).\
- AndReturn({'port': self.port_data2[idx]})
- self.mox.ReplayAll()
-
- expected = self._get_expected_fip_model(fip_data, idx)
-
- if by_address:
- fip = api.get_floating_ip_by_address(self.context, address)
- else:
- fip = api.get_floating_ip(self.context, fip_id)
- self.assertEqual(expected, fip)
-
- def test_get_floating_ip_unassociated(self):
- self._test_get_floating_ip(self.fip_unassociated, idx=0)
-
- def test_get_floating_ip_associated(self):
- self._test_get_floating_ip(self.fip_associated, idx=1)
-
- def test_get_floating_ip_by_address(self):
- self._test_get_floating_ip(self.fip_unassociated, idx=0,
- by_address=True)
-
- def test_get_floating_ip_by_address_associated(self):
- self._test_get_floating_ip(self.fip_associated, idx=1,
- by_address=True)
-
- def test_get_floating_ip_by_address_not_found(self):
- api = neutronapi.API()
- address = self.fip_unassociated['floating_ip_address']
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': []})
- self.mox.ReplayAll()
- self.assertRaises(exception.FloatingIpNotFoundForAddress,
- api.get_floating_ip_by_address,
- self.context, address)
-
- def test_get_floating_ip_by_id_not_found(self):
- api = neutronapi.API()
- NeutronNotFound = exceptions.NeutronClientException(status_code=404)
- floating_ip_id = self.fip_unassociated['id']
- self.moxed_client.show_floatingip(floating_ip_id).\
- AndRaise(NeutronNotFound)
- self.mox.ReplayAll()
- self.assertRaises(exception.FloatingIpNotFound,
- api.get_floating_ip,
- self.context, floating_ip_id)
-
- def test_get_floating_ip_raises_non404(self):
- api = neutronapi.API()
- NeutronNotFound = exceptions.NeutronClientException(status_code=0)
- floating_ip_id = self.fip_unassociated['id']
- self.moxed_client.show_floatingip(floating_ip_id).\
- AndRaise(NeutronNotFound)
- self.mox.ReplayAll()
- self.assertRaises(exceptions.NeutronClientException,
- api.get_floating_ip,
- self.context, floating_ip_id)
-
- def test_get_floating_ip_by_address_multiple_found(self):
- api = neutronapi.API()
- address = self.fip_unassociated['floating_ip_address']
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_unassociated] * 2})
- self.mox.ReplayAll()
- self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
- api.get_floating_ip_by_address,
- self.context, address)
-
- def test_get_floating_ips_by_project(self):
- api = neutronapi.API()
- project_id = self.context.project_id
- self.moxed_client.list_floatingips(tenant_id=project_id).\
- AndReturn({'floatingips': [self.fip_unassociated,
- self.fip_associated]})
- search_opts = {'router:external': True}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
- self.moxed_client.list_ports(tenant_id=project_id).\
- AndReturn({'ports': self.port_data2})
- self.mox.ReplayAll()
-
- expected = [self._get_expected_fip_model(self.fip_unassociated),
- self._get_expected_fip_model(self.fip_associated, idx=1)]
- fips = api.get_floating_ips_by_project(self.context)
- self.assertEqual(expected, fips)
-
- def _test_get_instance_id_by_floating_address(self, fip_data,
- associated=False):
- api = neutronapi.API()
- address = fip_data['floating_ip_address']
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [fip_data]})
- if associated:
- self.moxed_client.show_port(fip_data['port_id']).\
- AndReturn({'port': self.port_data2[1]})
- self.mox.ReplayAll()
-
- if associated:
- expected = self.port_data2[1]['device_id']
- else:
- expected = None
- fip = api.get_instance_id_by_floating_address(self.context, address)
- self.assertEqual(expected, fip)
-
- def test_get_instance_id_by_floating_address(self):
- self._test_get_instance_id_by_floating_address(self.fip_unassociated)
-
- def test_get_instance_id_by_floating_address_associated(self):
- self._test_get_instance_id_by_floating_address(self.fip_associated,
- associated=True)
-
- def test_allocate_floating_ip(self):
- api = neutronapi.API()
- pool_name = self.fip_pool['name']
- pool_id = self.fip_pool['id']
- search_opts = {'router:external': True,
- 'fields': 'id',
- 'name': pool_name}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool]})
- self.moxed_client.create_floatingip(
- {'floatingip': {'floating_network_id': pool_id}}).\
- AndReturn({'floatingip': self.fip_unassociated})
- self.mox.ReplayAll()
- fip = api.allocate_floating_ip(self.context, 'ext_net')
- self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
-
- def test_allocate_floating_ip_addr_gen_fail(self):
- api = neutronapi.API()
- pool_name = self.fip_pool['name']
- pool_id = self.fip_pool['id']
- search_opts = {'router:external': True,
- 'fields': 'id',
- 'name': pool_name}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool]})
- self.moxed_client.create_floatingip(
- {'floatingip': {'floating_network_id': pool_id}}).\
- AndRaise(exceptions.IpAddressGenerationFailureClient)
- self.mox.ReplayAll()
- self.assertRaises(exception.NoMoreFloatingIps,
- api.allocate_floating_ip, self.context, 'ext_net')
-
- def test_allocate_floating_ip_exhausted_fail(self):
- api = neutronapi.API()
- pool_name = self.fip_pool['name']
- pool_id = self.fip_pool['id']
- search_opts = {'router:external': True,
- 'fields': 'id',
- 'name': pool_name}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool]})
- self.moxed_client.create_floatingip(
- {'floatingip': {'floating_network_id': pool_id}}).\
- AndRaise(exceptions.ExternalIpAddressExhaustedClient)
- self.mox.ReplayAll()
- self.assertRaises(exception.NoMoreFloatingIps,
- api.allocate_floating_ip, self.context, 'ext_net')
-
- def test_allocate_floating_ip_with_pool_id(self):
- api = neutronapi.API()
- pool_id = self.fip_pool['id']
- search_opts = {'router:external': True,
- 'fields': 'id',
- 'id': pool_id}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool]})
- self.moxed_client.create_floatingip(
- {'floatingip': {'floating_network_id': pool_id}}).\
- AndReturn({'floatingip': self.fip_unassociated})
- self.mox.ReplayAll()
- fip = api.allocate_floating_ip(self.context, pool_id)
- self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
-
- def test_allocate_floating_ip_with_default_pool(self):
- api = neutronapi.API()
- pool_name = self.fip_pool_nova['name']
- pool_id = self.fip_pool_nova['id']
- search_opts = {'router:external': True,
- 'fields': 'id',
- 'name': pool_name}
- self.moxed_client.list_networks(**search_opts).\
- AndReturn({'networks': [self.fip_pool_nova]})
- self.moxed_client.create_floatingip(
- {'floatingip': {'floating_network_id': pool_id}}).\
- AndReturn({'floatingip': self.fip_unassociated})
- self.mox.ReplayAll()
- fip = api.allocate_floating_ip(self.context)
- self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
-
- def test_release_floating_ip(self):
- api = neutronapi.API()
- address = self.fip_unassociated['floating_ip_address']
- fip_id = self.fip_unassociated['id']
-
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_unassociated]})
- self.moxed_client.delete_floatingip(fip_id)
- self.mox.ReplayAll()
- api.release_floating_ip(self.context, address)
-
- def test_disassociate_and_release_floating_ip(self):
- api = neutronapi.API()
- address = self.fip_unassociated['floating_ip_address']
- fip_id = self.fip_unassociated['id']
- floating_ip = {'address': address}
-
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_unassociated]})
- self.moxed_client.delete_floatingip(fip_id)
- self.mox.ReplayAll()
- api.disassociate_and_release_floating_ip(self.context, None,
- floating_ip)
-
- def test_release_floating_ip_associated(self):
- api = neutronapi.API()
- address = self.fip_associated['floating_ip_address']
-
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_associated]})
- self.mox.ReplayAll()
- self.assertRaises(exception.FloatingIpAssociated,
- api.release_floating_ip, self.context, address)
-
- def _setup_mock_for_refresh_cache(self, api, instances):
- nw_info = self.mox.CreateMock(model.NetworkInfo)
- self.mox.StubOutWithMock(api, '_get_instance_nw_info')
- self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
- for instance in instances:
- nw_info.json()
- api._get_instance_nw_info(mox.IgnoreArg(), instance).\
- AndReturn(nw_info)
- api.db.instance_info_cache_update(mox.IgnoreArg(),
- instance['uuid'],
- mox.IgnoreArg())
-
- def test_associate_floating_ip(self):
- api = neutronapi.API()
- address = self.fip_unassociated['floating_ip_address']
- fixed_address = self.port_address2
- fip_id = self.fip_unassociated['id']
-
- search_opts = {'device_owner': 'compute:nova',
- 'device_id': self.instance['uuid']}
- self.moxed_client.list_ports(**search_opts).\
- AndReturn({'ports': [self.port_data2[1]]})
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_unassociated]})
- self.moxed_client.update_floatingip(
- fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
- 'fixed_ip_address': fixed_address}})
- self._setup_mock_for_refresh_cache(api, [self.instance])
-
- self.mox.ReplayAll()
- api.associate_floating_ip(self.context, self.instance,
- address, fixed_address)
-
- @mock.patch('nova.objects.Instance.get_by_uuid')
- def test_reassociate_floating_ip(self, mock_get):
- api = neutronapi.API()
- address = self.fip_associated['floating_ip_address']
- new_fixed_address = self.port_address
- fip_id = self.fip_associated['id']
-
- search_opts = {'device_owner': 'compute:nova',
- 'device_id': self.instance2['uuid']}
- self.moxed_client.list_ports(**search_opts).\
- AndReturn({'ports': [self.port_data2[0]]})
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_associated]})
- self.moxed_client.update_floatingip(
- fip_id, {'floatingip': {'port_id': 'my_portid1',
- 'fixed_ip_address': new_fixed_address}})
- self.moxed_client.show_port(self.fip_associated['port_id']).\
- AndReturn({'port': self.port_data2[1]})
-
- mock_get.return_value = fake_instance.fake_instance_obj(
- self.context, **self.instance)
- self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
- self.instance2])
-
- self.mox.ReplayAll()
- api.associate_floating_ip(self.context, self.instance2,
- address, new_fixed_address)
-
- def test_associate_floating_ip_not_found_fixed_ip(self):
- api = neutronapi.API()
- address = self.fip_associated['floating_ip_address']
- fixed_address = self.fip_associated['fixed_ip_address']
-
- search_opts = {'device_owner': 'compute:nova',
- 'device_id': self.instance['uuid']}
- self.moxed_client.list_ports(**search_opts).\
- AndReturn({'ports': [self.port_data2[0]]})
-
- self.mox.ReplayAll()
- self.assertRaises(exception.FixedIpNotFoundForAddress,
- api.associate_floating_ip, self.context,
- self.instance, address, fixed_address)
-
- def test_disassociate_floating_ip(self):
- api = neutronapi.API()
- address = self.fip_associated['floating_ip_address']
- fip_id = self.fip_associated['id']
-
- self.moxed_client.list_floatingips(floating_ip_address=address).\
- AndReturn({'floatingips': [self.fip_associated]})
- self.moxed_client.update_floatingip(
- fip_id, {'floatingip': {'port_id': None}})
- self._setup_mock_for_refresh_cache(api, [self.instance])
-
- self.mox.ReplayAll()
- api.disassociate_floating_ip(self.context, self.instance, address)
-
- def test_add_fixed_ip_to_instance(self):
- api = neutronapi.API()
- self._setup_mock_for_refresh_cache(api, [self.instance])
- network_id = 'my_netid1'
- search_opts = {'network_id': network_id}
- self.moxed_client.list_subnets(
- **search_opts).AndReturn({'subnets': self.subnet_data_n})
-
- search_opts = {'device_id': self.instance['uuid'],
- 'device_owner': 'compute:nova',
- 'network_id': network_id}
- self.moxed_client.list_ports(
- **search_opts).AndReturn({'ports': self.port_data1})
- port_req_body = {
- 'port': {
- 'fixed_ips': [{'subnet_id': 'my_subid1'},
- {'subnet_id': 'my_subid1'}],
- },
- }
- port = self.port_data1[0]
- port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
- self.moxed_client.update_port('my_portid1',
- MyComparator(port_req_body)).AndReturn({'port': port})
-
- self.mox.ReplayAll()
- api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
-
- def test_remove_fixed_ip_from_instance(self):
- api = neutronapi.API()
- self._setup_mock_for_refresh_cache(api, [self.instance])
- address = '10.0.0.3'
- zone = 'compute:%s' % self.instance['availability_zone']
- search_opts = {'device_id': self.instance['uuid'],
- 'device_owner': zone,
- 'fixed_ips': 'ip_address=%s' % address}
- self.moxed_client.list_ports(
- **search_opts).AndReturn({'ports': self.port_data1})
- port_req_body = {
- 'port': {
- 'fixed_ips': [],
- },
- }
- port = self.port_data1[0]
- port['fixed_ips'] = []
- self.moxed_client.update_port('my_portid1',
- MyComparator(port_req_body)).AndReturn({'port': port})
-
- self.mox.ReplayAll()
- api.remove_fixed_ip_from_instance(self.context, self.instance, address)
-
- def test_list_floating_ips_without_l3_support(self):
- api = neutronapi.API()
- NeutronNotFound = exceptions.NeutronClientException(
- status_code=404)
- self.moxed_client.list_floatingips(
- fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- floatingips = api._get_floating_ips_by_fixed_and_port(
- self.moxed_client, '1.1.1.1', 1)
- self.assertEqual(floatingips, [])
-
- def test_nw_info_get_ips(self):
- fake_port = {
- 'fixed_ips': [
- {'ip_address': '1.1.1.1'}],
- 'id': 'port-id',
- }
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
- api._get_floating_ips_by_fixed_and_port(
- self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
- [{'floating_ip_address': '10.0.0.1'}])
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- result = api._nw_info_get_ips(self.moxed_client, fake_port)
- self.assertEqual(len(result), 1)
- self.assertEqual(result[0]['address'], '1.1.1.1')
- self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
-
- def test_nw_info_get_subnets(self):
- fake_port = {
- 'fixed_ips': [
- {'ip_address': '1.1.1.1'},
- {'ip_address': '2.2.2.2'}],
- 'id': 'port-id',
- }
- fake_subnet = model.Subnet(cidr='1.0.0.0/8')
- fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_get_subnets_from_port')
- api._get_subnets_from_port(self.context, fake_port).AndReturn(
- [fake_subnet])
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
- self.assertEqual(len(subnets), 1)
- self.assertEqual(len(subnets[0]['ips']), 1)
- self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
-
- def _test_nw_info_build_network(self, vif_type):
- fake_port = {
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'id': 'port-id',
- 'network_id': 'net-id',
- 'binding:vif_type': vif_type,
- }
- fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
- fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
- api = neutronapi.API()
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- net, iid = api._nw_info_build_network(fake_port, fake_nets,
- fake_subnets)
- self.assertEqual(net['subnets'], fake_subnets)
- self.assertEqual(net['id'], 'net-id')
- self.assertEqual(net['label'], 'foo')
- self.assertEqual(net.get_meta('tenant_id'), 'tenant')
- self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
- return net, iid
-
- def test_nw_info_build_network_ovs(self):
- net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
- self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge)
- self.assertNotIn('should_create_bridge', net)
- self.assertEqual(iid, 'port-id')
-
- def test_nw_info_build_network_dvs(self):
- net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
- self.assertEqual('foo-net-id', net['bridge'])
- self.assertNotIn('should_create_bridge', net)
- self.assertNotIn('ovs_interfaceid', net)
- self.assertIsNone(iid)
-
- def test_nw_info_build_network_bridge(self):
- net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
- self.assertEqual(net['bridge'], 'brqnet-id')
- self.assertTrue(net['should_create_bridge'])
- self.assertIsNone(iid)
-
- def test_nw_info_build_network_other(self):
- net, iid = self._test_nw_info_build_network(None)
- self.assertIsNone(net['bridge'])
- self.assertNotIn('should_create_bridge', net)
- self.assertIsNone(iid)
-
- def test_nw_info_build_no_match(self):
- fake_port = {
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'id': 'port-id',
- 'network_id': 'net-id1',
- 'tenant_id': 'tenant',
- 'binding:vif_type': model.VIF_TYPE_OVS,
- }
- fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
- fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
- api = neutronapi.API()
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- net, iid = api._nw_info_build_network(fake_port, fake_nets,
- fake_subnets)
- self.assertEqual(fake_subnets, net['subnets'])
- self.assertEqual('net-id1', net['id'])
- self.assertEqual('net-id1', net['id'])
- self.assertEqual('tenant', net['meta']['tenant_id'])
-
- def test_build_network_info_model(self):
- api = neutronapi.API()
- fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
- 'info_cache': {'network_info': []}}
- fake_ports = [
- # admin_state_up=True and status='ACTIVE' thus vif.active=True
- {'id': 'port1',
- 'network_id': 'net-id',
- 'admin_state_up': True,
- 'status': 'ACTIVE',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:01',
- 'binding:vif_type': model.VIF_TYPE_BRIDGE,
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'binding:vif_details': {},
- },
- # admin_state_up=False and status='DOWN' thus vif.active=True
- {'id': 'port2',
- 'network_id': 'net-id',
- 'admin_state_up': False,
- 'status': 'DOWN',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:02',
- 'binding:vif_type': model.VIF_TYPE_BRIDGE,
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'binding:vif_details': {},
- },
- # admin_state_up=True and status='DOWN' thus vif.active=False
- {'id': 'port0',
- 'network_id': 'net-id',
- 'admin_state_up': True,
- 'status': 'DOWN',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:03',
- 'binding:vif_type': model.VIF_TYPE_BRIDGE,
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- 'binding:vif_details': {},
- },
- # admin_state_up=True and status='ACTIVE' thus vif.active=True
- {'id': 'port3',
- 'network_id': 'net-id',
- 'admin_state_up': True,
- 'status': 'ACTIVE',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:04',
- 'binding:vif_type': model.VIF_TYPE_HW_VEB,
- 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
- 'binding:profile': {'pci_vendor_info': '1137:0047',
- 'pci_slot': '0000:0a:00.1',
- 'physical_network': 'phynet1'},
- 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
- },
- # admin_state_up=True and status='ACTIVE' thus vif.active=True
- {'id': 'port4',
- 'network_id': 'net-id',
- 'admin_state_up': True,
- 'status': 'ACTIVE',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:05',
- 'binding:vif_type': model.VIF_TYPE_802_QBH,
- 'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
- 'binding:profile': {'pci_vendor_info': '1137:0047',
- 'pci_slot': '0000:0a:00.2',
- 'physical_network': 'phynet1'},
- 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
- },
- # admin_state_up=True and status='ACTIVE' thus vif.active=True
- # This port has no binding:vnic_type to verify default is assumed
- {'id': 'port5',
- 'network_id': 'net-id',
- 'admin_state_up': True,
- 'status': 'ACTIVE',
- 'fixed_ips': [{'ip_address': '1.1.1.1'}],
- 'mac_address': 'de:ad:be:ef:00:06',
- 'binding:vif_type': model.VIF_TYPE_BRIDGE,
- # No binding:vnic_type
- 'binding:vif_details': {},
- },
- # This does not match the networks we provide below,
- # so it should be ignored (and is here to verify that)
- {'id': 'port6',
- 'network_id': 'other-net-id',
- 'admin_state_up': True,
- 'status': 'DOWN',
- 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
- },
- ]
- fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
- fake_nets = [
- {'id': 'net-id',
- 'name': 'foo',
- 'tenant_id': 'fake',
- }
- ]
- neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
- ).AndReturn(self.moxed_client)
- self.moxed_client.list_ports(
- tenant_id='fake', device_id='uuid').AndReturn(
- {'ports': fake_ports})
-
- self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
- self.mox.StubOutWithMock(api, '_get_subnets_from_port')
- requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
- fake_ports[3], fake_ports[4], fake_ports[5]]
- for requested_port in requested_ports:
- api._get_floating_ips_by_fixed_and_port(
- self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
- [{'floating_ip_address': '10.0.0.1'}])
- for requested_port in requested_ports:
- api._get_subnets_from_port(self.context, requested_port
- ).AndReturn(fake_subnets)
-
- self.mox.ReplayAll()
- neutronv2.get_client('fake')
- nw_infos = api._build_network_info_model(self.context, fake_inst,
- fake_nets,
- [fake_ports[2]['id'],
- fake_ports[0]['id'],
- fake_ports[1]['id'],
- fake_ports[3]['id'],
- fake_ports[4]['id'],
- fake_ports[5]['id']])
- self.assertEqual(len(nw_infos), 6)
- index = 0
- for nw_info in nw_infos:
- self.assertEqual(nw_info['address'],
- requested_ports[index]['mac_address'])
- self.assertEqual(nw_info['devname'], 'tapport' + str(index))
- self.assertIsNone(nw_info['ovs_interfaceid'])
- self.assertEqual(nw_info['type'],
- requested_ports[index]['binding:vif_type'])
- if nw_info['type'] == model.VIF_TYPE_BRIDGE:
- self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
- self.assertEqual(nw_info['vnic_type'],
- requested_ports[index].get('binding:vnic_type',
- model.VNIC_TYPE_NORMAL))
- self.assertEqual(nw_info.get('details'),
- requested_ports[index].get('binding:vif_details'))
- self.assertEqual(nw_info.get('profile'),
- requested_ports[index].get('binding:profile'))
- index += 1
-
- self.assertEqual(nw_infos[0]['active'], False)
- self.assertEqual(nw_infos[1]['active'], True)
- self.assertEqual(nw_infos[2]['active'], True)
- self.assertEqual(nw_infos[3]['active'], True)
- self.assertEqual(nw_infos[4]['active'], True)
- self.assertEqual(nw_infos[5]['active'], True)
-
- self.assertEqual(nw_infos[0]['id'], 'port0')
- self.assertEqual(nw_infos[1]['id'], 'port1')
- self.assertEqual(nw_infos[2]['id'], 'port2')
- self.assertEqual(nw_infos[3]['id'], 'port3')
- self.assertEqual(nw_infos[4]['id'], 'port4')
- self.assertEqual(nw_infos[5]['id'], 'port5')
-
- def test_get_subnets_from_port(self):
- api = neutronapi.API()
-
- port_data = copy.copy(self.port_data1[0])
- subnet_data1 = copy.copy(self.subnet_data1)
- subnet_data1[0]['host_routes'] = [
- {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
- ]
-
- self.moxed_client.list_subnets(
- id=[port_data['fixed_ips'][0]['subnet_id']]
- ).AndReturn({'subnets': subnet_data1})
- self.moxed_client.list_ports(
- network_id=subnet_data1[0]['network_id'],
- device_owner='network:dhcp').AndReturn({'ports': []})
- self.mox.ReplayAll()
-
- subnets = api._get_subnets_from_port(self.context, port_data)
-
- self.assertEqual(len(subnets), 1)
- self.assertEqual(len(subnets[0]['routes']), 1)
- self.assertEqual(subnets[0]['routes'][0]['cidr'],
- subnet_data1[0]['host_routes'][0]['destination'])
- self.assertEqual(subnets[0]['routes'][0]['gateway']['address'],
- subnet_data1[0]['host_routes'][0]['nexthop'])
-
- def test_get_all_empty_list_networks(self):
- api = neutronapi.API()
- self.moxed_client.list_networks().AndReturn({'networks': []})
- self.mox.ReplayAll()
- networks = api.get_all(self.context)
- self.assertEqual(networks, [])
-
- def test_get_floating_ips_by_fixed_address(self):
- # NOTE(lbragstad): We need to reset the mocks in order to assert
- # a NotImplementedError is raised when calling the method under test.
- self.mox.ResetAll()
- fake_fixed = '192.168.1.4'
- api = neutronapi.API()
- self.assertRaises(NotImplementedError,
- api.get_floating_ips_by_fixed_address,
- self.context, fake_fixed)
-
- @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
- def test_get_port_vnic_info_1(self, mock_get_client):
- api = neutronapi.API()
- self.mox.ResetAll()
- test_port = {
- 'port': {'id': 'my_port_id1',
- 'network_id': 'net-id',
- 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
- },
- }
- test_net = {'network': {'provider:physical_network': 'phynet1'}}
-
- mock_client = mock_get_client()
- mock_client.show_port.return_value = test_port
- mock_client.show_network.return_value = test_net
- vnic_type, phynet_name = api._get_port_vnic_info(
- self.context, mock_client, test_port['port']['id'])
-
- mock_client.show_port.assert_called_once_with(test_port['port']['id'],
- fields=['binding:vnic_type', 'network_id'])
- mock_client.show_network.assert_called_once_with(
- test_port['port']['network_id'],
- fields='provider:physical_network')
- self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
- self.assertEqual(phynet_name, 'phynet1')
-
- def _test_get_port_vnic_info(self, mock_get_client,
- binding_vnic_type=None):
- api = neutronapi.API()
- self.mox.ResetAll()
- test_port = {
- 'port': {'id': 'my_port_id2',
- 'network_id': 'net-id',
- },
- }
-
- if binding_vnic_type:
- test_port['port']['binding:vnic_type'] = binding_vnic_type
-
- mock_client = mock_get_client()
- mock_client.show_port.return_value = test_port
- vnic_type, phynet_name = api._get_port_vnic_info(
- self.context, mock_client, test_port['port']['id'])
-
- mock_client.show_port.assert_called_once_with(test_port['port']['id'],
- fields=['binding:vnic_type', 'network_id'])
- self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
- self.assertFalse(phynet_name)
-
- @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
- def test_get_port_vnic_info_2(self, mock_get_client):
- self._test_get_port_vnic_info(mock_get_client,
- binding_vnic_type=model.VNIC_TYPE_NORMAL)
-
- @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
- def test_get_port_vnic_info_3(self, mock_get_client):
- self._test_get_port_vnic_info(mock_get_client)
-
- @mock.patch.object(neutronapi.API, "_get_port_vnic_info")
- @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
- def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
- mock_get_port_vnic_info):
- api = neutronapi.API()
- self.mox.ResetAll()
- requested_networks = objects.NetworkRequestList(
- objects = [
- objects.NetworkRequest(port_id='my_portid1'),
- objects.NetworkRequest(network_id='net1'),
- objects.NetworkRequest(port_id='my_portid2'),
- objects.NetworkRequest(port_id='my_portid3'),
- objects.NetworkRequest(port_id='my_portid4')])
- pci_requests = objects.InstancePCIRequests(requests=[])
- mock_get_port_vnic_info.side_effect = [
- (model.VNIC_TYPE_DIRECT, 'phynet1'),
- (model.VNIC_TYPE_NORMAL, ''),
- (model.VNIC_TYPE_MACVTAP, 'phynet1'),
- (model.VNIC_TYPE_MACVTAP, 'phynet2')
- ]
- api.create_pci_requests_for_sriov_ports(
- None, pci_requests, requested_networks)
- self.assertEqual(3, len(pci_requests.requests))
- has_pci_request_id = [net.pci_request_id is not None for net in
- requested_networks.objects]
- expected_results = [True, False, False, True, True]
- self.assertEqual(expected_results, has_pci_request_id)
-
-
-class TestNeutronv2WithMock(test.TestCase):
- """Used to test Neutron V2 API with mock."""
-
- def setUp(self):
- super(TestNeutronv2WithMock, self).setUp()
- self.api = neutronapi.API()
- self.context = context.RequestContext(
- 'fake-user', 'fake-project',
- auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
-
- @mock.patch('oslo.concurrency.lockutils.lock')
- def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
- instance = objects.Instance(uuid=uuid.uuid4())
- api = neutronapi.API()
- mock_lock.side_effect = test.TestingException
- self.assertRaises(test.TestingException,
- api.get_instance_nw_info, 'context', instance)
- mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
-
- def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
- ids, list_port_values):
-
- def _fake_list_ports(**search_opts):
- for args, return_value in list_port_values:
- if args == search_opts:
- return return_value
- self.fail('Unexpected call to list_ports %s' % search_opts)
-
- with contextlib.nested(
- mock.patch.object(client.Client, 'list_ports',
- side_effect=_fake_list_ports),
- mock.patch.object(client.Client, 'list_networks',
- return_value={'networks': nets}),
- mock.patch.object(client.Client, 'show_quota',
- return_value={'quota': {'port': 50}})) as (
- list_ports_mock, list_networks_mock, show_quota_mock):
-
- self.api.validate_networks(self.context, requested_networks, 1)
-
- self.assertEqual(len(list_port_values),
- len(list_ports_mock.call_args_list))
- list_networks_mock.assert_called_once_with(id=ids)
- show_quota_mock.assert_called_once_with(tenant_id='fake-project')
-
- def test_validate_networks_fixed_ip_no_dup1(self):
- # Test validation for a request for a network with a
- # fixed ip that is not already in use because no fixed ips in use
-
- nets1 = [{'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': 'fake-project'}]
-
- requested_networks = [('my_netid1', '10.0.1.2', None, None)]
- ids = ['my_netid1']
- list_port_values = [({'network_id': 'my_netid1',
- 'fixed_ips': 'ip_address=10.0.1.2',
- 'fields': 'device_id'},
- {'ports': []}),
- ({'tenant_id': 'fake-project'},
- {'ports': []})]
- self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
- ids, list_port_values)
-
- def test_validate_networks_fixed_ip_no_dup2(self):
- # Test validation for a request for a network with a
- # fixed ip that is not already in use because not used on this net id
-
- nets2 = [{'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': 'fake-project'},
- {'id': 'my_netid2',
- 'name': 'my_netname2',
- 'subnets': ['mysubnid2'],
- 'tenant_id': 'fake-project'}]
-
- requested_networks = [('my_netid1', '10.0.1.2', None, None),
- ('my_netid2', '10.0.1.3', None, None)]
- ids = ['my_netid1', 'my_netid2']
- list_port_values = [({'network_id': 'my_netid1',
- 'fixed_ips': 'ip_address=10.0.1.2',
- 'fields': 'device_id'},
- {'ports': []}),
- ({'network_id': 'my_netid2',
- 'fixed_ips': 'ip_address=10.0.1.3',
- 'fields': 'device_id'},
- {'ports': []}),
-
- ({'tenant_id': 'fake-project'},
- {'ports': []})]
-
- self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
- ids, list_port_values)
-
- def test_validate_networks_fixed_ip_dup(self):
- # Test validation for a request for a network with a
- # fixed ip that is already in use
-
- requested_networks = [('my_netid1', '10.0.1.2', None, None)]
- list_port_mock_params = {'network_id': 'my_netid1',
- 'fixed_ips': 'ip_address=10.0.1.2',
- 'fields': 'device_id'}
- list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
-
- with mock.patch.object(client.Client, 'list_ports',
- return_value=list_port_mock_return) as (
- list_ports_mock):
-
- self.assertRaises(exception.FixedIpAlreadyInUse,
- self.api.validate_networks,
- self.context, requested_networks, 1)
-
- list_ports_mock.assert_called_once_with(**list_port_mock_params)
-
- def test_allocate_floating_ip_exceed_limit(self):
- # Verify that the correct exception is thrown when quota exceed
- pool_name = 'dummy'
- api = neutronapi.API()
- with contextlib.nested(
- mock.patch.object(client.Client, 'create_floatingip'),
- mock.patch.object(api,
- '_get_floating_ip_pool_id_by_name_or_id')) as (
- create_mock, get_mock):
- create_mock.side_effect = exceptions.OverQuotaClient()
-
- self.assertRaises(exception.FloatingIpLimitExceeded,
- api.allocate_floating_ip,
- self.context, pool_name)
-
- def test_create_port_for_instance_no_more_ip(self):
- instance = fake_instance.fake_instance_obj(self.context)
- net = {'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': instance['project_id']}
-
- with mock.patch.object(client.Client, 'create_port',
- side_effect=exceptions.IpAddressGenerationFailureClient()) as (
- create_port_mock):
- zone = 'compute:%s' % instance['availability_zone']
- port_req_body = {'port': {'device_id': instance['uuid'],
- 'device_owner': zone}}
- self.assertRaises(exception.NoMoreFixedIps,
- self.api._create_port,
- neutronv2.get_client(self.context),
- instance, net['id'], port_req_body)
- create_port_mock.assert_called_once_with(port_req_body)
-
- @mock.patch.object(client.Client, 'create_port',
- side_effect=exceptions.MacAddressInUseClient())
- def test_create_port_for_instance_mac_address_in_use(self,
- create_port_mock):
- # Create fake data.
- instance = fake_instance.fake_instance_obj(self.context)
- net = {'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': instance['project_id']}
- zone = 'compute:%s' % instance['availability_zone']
- port_req_body = {'port': {'device_id': instance['uuid'],
- 'device_owner': zone,
- 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
- available_macs = set(['XX:XX:XX:XX:XX:XX'])
- # Run the code.
- self.assertRaises(exception.PortInUse,
- self.api._create_port,
- neutronv2.get_client(self.context),
- instance, net['id'], port_req_body,
- available_macs=available_macs)
- # Assert the calls.
- create_port_mock.assert_called_once_with(port_req_body)
-
- @mock.patch.object(client.Client, 'create_port',
- side_effect=exceptions.IpAddressInUseClient())
- def test_create_port_for_fixed_ip_in_use(self, create_port_mock):
- # Create fake data.
- instance = fake_instance.fake_instance_obj(self.context)
- net = {'id': 'my_netid1',
- 'name': 'my_netname1',
- 'subnets': ['mysubnid1'],
- 'tenant_id': instance['project_id']}
- zone = 'compute:%s' % instance['availability_zone']
- port_req_body = {'port': {'device_id': instance['uuid'],
- 'device_owner': zone,
- 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
- fake_ip = '1.1.1.1'
- # Run the code.
- self.assertRaises(exception.FixedIpAlreadyInUse,
- self.api._create_port,
- neutronv2.get_client(self.context),
- instance, net['id'], port_req_body,
- fixed_ip=fake_ip)
- # Assert the calls.
- create_port_mock.assert_called_once_with(port_req_body)
-
- def test_get_network_detail_not_found(self):
- api = neutronapi.API()
- expected_exc = exceptions.NetworkNotFoundClient()
- network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
- with mock.patch.object(client.Client, 'show_network',
- side_effect=expected_exc) as (
- fake_show_network):
- self.assertRaises(exception.NetworkNotFound,
- api.get,
- self.context,
- network_uuid)
- fake_show_network.assert_called_once_with(network_uuid)
-
- def test_deallocate_for_instance_uses_delete_helper(self):
- # setup fake data
- instance = fake_instance.fake_instance_obj(self.context)
- port_data = {'ports': [{'id': str(uuid.uuid4())}]}
- ports = set([port['id'] for port in port_data.get('ports')])
- api = neutronapi.API()
- # setup mocks
- mock_client = mock.Mock()
- mock_client.list_ports.return_value = port_data
- with contextlib.nested(
- mock.patch.object(neutronv2, 'get_client',
- return_value=mock_client),
- mock.patch.object(api, '_delete_ports')
- ) as (
- mock_get_client, mock_delete
- ):
- # run the code
- api.deallocate_for_instance(self.context, instance)
- # assert the calls
- mock_client.list_ports.assert_called_once_with(
- device_id=instance.uuid)
- mock_delete.assert_called_once_with(
- mock_client, instance, ports, raise_if_fail=True)
-
- def _test_delete_ports(self, expect_raise):
- results = [exceptions.NeutronClientException, None]
- mock_client = mock.Mock()
- with mock.patch.object(mock_client, 'delete_port',
- side_effect=results):
- api = neutronapi.API()
- api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
- raise_if_fail=expect_raise)
-
- def test_delete_ports_raise(self):
- self.assertRaises(exceptions.NeutronClientException,
- self._test_delete_ports, True)
-
- def test_delete_ports_no_raise(self):
- self._test_delete_ports(False)
-
- def test_delete_ports_never_raise_404(self):
- mock_client = mock.Mock()
- mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
- api = neutronapi.API()
- api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
- raise_if_fail=True)
- mock_client.delete_port.assert_called_once_with('port1')
-
- def test_deallocate_port_for_instance_fails(self):
- mock_client = mock.Mock()
- api = neutronapi.API()
- with contextlib.nested(
- mock.patch.object(neutronv2, 'get_client',
- return_value=mock_client),
- mock.patch.object(api, '_delete_ports',
- side_effect=exceptions.Unauthorized),
- mock.patch.object(api, 'get_instance_nw_info')
- ) as (
- get_client, delete_ports, get_nw_info
- ):
- self.assertRaises(exceptions.Unauthorized,
- api.deallocate_port_for_instance,
- self.context, instance={'uuid': 'fake'},
- port_id='fake')
- # make sure that we didn't try to reload nw info
- self.assertFalse(get_nw_info.called)
-
- @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
- def _test_show_port_exceptions(self, client_exc, expected_nova_exc,
- get_client_mock):
- show_port_mock = mock.Mock(side_effect=client_exc)
- get_client_mock.return_value.show_port = show_port_mock
- self.assertRaises(expected_nova_exc, self.api.show_port,
- self.context, 'fake_port_id')
-
- def test_show_port_not_found(self):
- self._test_show_port_exceptions(exceptions.PortNotFoundClient,
- exception.PortNotFound)
-
- def test_show_port_forbidden(self):
- self._test_show_port_exceptions(exceptions.Unauthorized,
- exception.Forbidden)
-
-
-class TestNeutronv2ModuleMethods(test.TestCase):
-
- def test_gather_port_ids_and_networks_wrong_params(self):
- api = neutronapi.API()
-
- # Test with networks not None and port_ids is None
- self.assertRaises(exception.NovaException,
- api._gather_port_ids_and_networks,
- 'fake_context', 'fake_instance',
- [{'network': {'name': 'foo'}}], None)
-
- # Test with networks is None and port_ids not None
- self.assertRaises(exception.NovaException,
- api._gather_port_ids_and_networks,
- 'fake_context', 'fake_instance',
- None, ['list', 'of', 'port_ids'])
-
- def test_ensure_requested_network_ordering_no_preference_ids(self):
- l = [1, 2, 3]
-
- neutronapi._ensure_requested_network_ordering(
- lambda x: x,
- l,
- None)
-
- def test_ensure_requested_network_ordering_no_preference_hashes(self):
- l = [{'id': 3}, {'id': 1}, {'id': 2}]
-
- neutronapi._ensure_requested_network_ordering(
- lambda x: x['id'],
- l,
- None)
-
- self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
-
- def test_ensure_requested_network_ordering_with_preference(self):
- l = [{'id': 3}, {'id': 1}, {'id': 2}]
-
- neutronapi._ensure_requested_network_ordering(
- lambda x: x['id'],
- l,
- [1, 2, 3])
-
- self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
-
-
-class TestNeutronv2Portbinding(TestNeutronv2Base):
-
- def test_allocate_for_instance_portbinding(self):
- self._allocate_for_instance(1, portbinding=True)
-
- def test_populate_neutron_extension_values_binding(self):
- api = neutronapi.API()
- neutronv2.get_client(mox.IgnoreArg()).AndReturn(
- self.moxed_client)
- self.moxed_client.list_extensions().AndReturn(
- {'extensions': [{'name': constants.PORTBINDING_EXT}]})
- self.mox.ReplayAll()
- host_id = 'my_host_id'
- instance = {'host': host_id}
- port_req_body = {'port': {}}
- api._populate_neutron_extension_values(self.context, instance,
- None, port_req_body)
- self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
- self.assertFalse(port_req_body['port'].get('binding:profile'))
-
- @mock.patch.object(pci_whitelist, 'get_pci_device_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_populate_neutron_extension_values_binding_sriov(self,
- mock_get_instance_pci_devs,
- mock_get_pci_device_devspec):
- api = neutronapi.API()
- host_id = 'my_host_id'
- instance = {'host': host_id}
- port_req_body = {'port': {}}
- pci_req_id = 'my_req_id'
- pci_dev = {'vendor_id': '1377',
- 'product_id': '0047',
- 'address': '0000:0a:00.1',
- }
- PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address'])
- mydev = PciDevice(**pci_dev)
- profile = {'pci_vendor_info': '1377:0047',
- 'pci_slot': '0000:0a:00.1',
- 'physical_network': 'phynet1',
- }
-
- mock_get_instance_pci_devs.return_value = [mydev]
- devspec = mock.Mock()
- devspec.get_tags.return_value = {'physical_network': 'phynet1'}
- mock_get_pci_device_devspec.return_value = devspec
- api._populate_neutron_binding_profile(instance,
- pci_req_id, port_req_body)
-
- self.assertEqual(port_req_body['port']['binding:profile'], profile)
-
- def test_migrate_instance_finish_binding_false(self):
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- refresh_cache=True).AndReturn(False)
- self.mox.ReplayAll()
- api.migrate_instance_finish(self.context, None, None)
-
- def test_migrate_instance_finish_binding_true(self):
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- refresh_cache=True).AndReturn(True)
- neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
- self.moxed_client)
- search_opts = {'device_id': self.instance['uuid'],
- 'tenant_id': self.instance['project_id']}
- ports = {'ports': [{'id': 'test1'}]}
- self.moxed_client.list_ports(**search_opts).AndReturn(ports)
- migration = {'source_compute': self.instance.get('host'),
- 'dest_compute': 'dest_host', }
- port_req_body = {'port':
- {'binding:host_id': migration['dest_compute']}}
- self.moxed_client.update_port('test1',
- port_req_body).AndReturn(None)
- self.mox.ReplayAll()
- api.migrate_instance_finish(self.context, self.instance, migration)
-
- def test_migrate_instance_finish_binding_true_exception(self):
- api = neutronapi.API()
- self.mox.StubOutWithMock(api, '_has_port_binding_extension')
- api._has_port_binding_extension(mox.IgnoreArg(),
- refresh_cache=True).AndReturn(True)
- neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
- self.moxed_client)
- search_opts = {'device_id': self.instance['uuid'],
- 'tenant_id': self.instance['project_id']}
- ports = {'ports': [{'id': 'test1'}]}
- self.moxed_client.list_ports(**search_opts).AndReturn(ports)
- migration = {'source_compute': self.instance.get('host'),
- 'dest_compute': 'dest_host', }
- port_req_body = {'port':
- {'binding:host_id': migration['dest_compute']}}
- self.moxed_client.update_port('test1',
- port_req_body).AndRaise(
- Exception("fail to update port"))
- self.mox.ReplayAll()
- self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
- api.migrate_instance_finish,
- self.context, self.instance, migration)
-
- def test_associate_not_implemented(self):
- api = neutronapi.API()
- self.assertRaises(NotImplementedError,
- api.associate,
- self.context, 'id')
-
-
-class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
- def setUp(self):
- super(TestNeutronv2ExtraDhcpOpts, self).setUp()
- neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
- self.moxed_client)
-
- def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
- self._allocate_for_instance(1, extra_dhcp_opts=False)
-
- def test_allocate_for_instance_extradhcpopts(self):
- dhcp_opts = [{'opt_name': 'bootfile-name',
- 'opt_value': 'pxelinux.0'},
- {'opt_name': 'tftp-server',
- 'opt_value': '123.123.123.123'},
- {'opt_name': 'server-ip-address',
- 'opt_value': '123.123.123.456'}]
-
- self._allocate_for_instance(1, dhcp_options=dhcp_opts)
-
-
-class TestNeutronClientForAdminScenarios(test.TestCase):
-
- def _test_get_client_for_admin(self, use_id=False, admin_context=False):
-
- def client_mock(*args, **kwargs):
- client.Client.httpclient = mock.MagicMock()
-
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- if use_id:
- self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
- self.flags(admin_user_id='admin_user_id', group='neutron')
-
- if admin_context:
- my_context = context.get_admin_context()
- else:
- my_context = context.RequestContext('userid', 'my_tenantid',
- auth_token='token')
- self.mox.StubOutWithMock(client.Client, "__init__")
- kwargs = {
- 'auth_url': CONF.neutron.admin_auth_url,
- 'password': CONF.neutron.admin_password,
- 'endpoint_url': CONF.neutron.url,
- 'auth_strategy': None,
- 'timeout': CONF.neutron.url_timeout,
- 'insecure': False,
- 'ca_cert': None,
- 'token': None}
- if use_id:
- kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
- kwargs['user_id'] = CONF.neutron.admin_user_id
- else:
- kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
- kwargs['username'] = CONF.neutron.admin_username
- client.Client.__init__(**kwargs).WithSideEffects(client_mock)
- self.mox.ReplayAll()
-
- # clean global
- token_store = neutronv2.AdminTokenStore.get()
- token_store.admin_auth_token = None
- if admin_context:
- # Note that the context does not contain a token but is
- # an admin context which will force an elevation to admin
- # credentials.
- neutronv2.get_client(my_context)
- else:
- # Note that the context is not elevated, but the True is passed in
- # which will force an elevation to admin credentials even though
- # the context has an auth_token.
- neutronv2.get_client(my_context, True)
-
- def test_get_client_for_admin(self):
- self._test_get_client_for_admin()
-
- def test_get_client_for_admin_with_id(self):
- self._test_get_client_for_admin(use_id=True)
-
- def test_get_client_for_admin_context(self):
- self._test_get_client_for_admin(admin_context=True)
-
- def test_get_client_for_admin_context_with_id(self):
- self._test_get_client_for_admin(use_id=True, admin_context=True)
diff --git a/nova/tests/network/test_rpcapi.py b/nova/tests/network/test_rpcapi.py
deleted file mode 100644
index f33128cae3..0000000000
--- a/nova/tests/network/test_rpcapi.py
+++ /dev/null
@@ -1,353 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unit Tests for nova.network.rpcapi
-"""
-
-import collections
-
-import mox
-from oslo.config import cfg
-
-from nova import context
-from nova.network import rpcapi as network_rpcapi
-from nova import test
-from nova.tests import fake_instance
-
-CONF = cfg.CONF
-
-
-class NetworkRpcAPITestCase(test.NoDBTestCase):
- def setUp(self):
- super(NetworkRpcAPITestCase, self).setUp()
- self.flags(multi_host=True)
-
- # Used to specify the default value expected if no real value is passed
- DefaultArg = collections.namedtuple('DefaultArg', ['value'])
-
- def _test_network_api(self, method, rpc_method, **kwargs):
- ctxt = context.RequestContext('fake_user', 'fake_project')
-
- rpcapi = network_rpcapi.NetworkAPI()
- self.assertIsNotNone(rpcapi.client)
- self.assertEqual(rpcapi.client.target.topic, CONF.network_topic)
-
- expected_retval = 'foo' if rpc_method == 'call' else None
- expected_version = kwargs.pop('version', None)
- expected_fanout = kwargs.pop('fanout', None)
- expected_kwargs = kwargs.copy()
-
- for k, v in expected_kwargs.items():
- if isinstance(v, self.DefaultArg):
- expected_kwargs[k] = v.value
- kwargs.pop(k)
-
- prepare_kwargs = {}
- if expected_version:
- prepare_kwargs['version'] = expected_version
- if expected_fanout:
- prepare_kwargs['fanout'] = True
-
- if 'source_compute' in expected_kwargs:
- # Fix up for migrate_instance_* calls.
- expected_kwargs['source'] = expected_kwargs.pop('source_compute')
- expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
-
- targeted_methods = [
- 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
- '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
- '_associate_floating_ip', '_disassociate_floating_ip',
- 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
- 'migrate_instance_finish',
- 'allocate_for_instance', 'deallocate_for_instance',
- ]
- targeted_by_instance = ['deallocate_for_instance']
- if method in targeted_methods and ('host' in expected_kwargs or
- 'instance' in expected_kwargs):
- if method in targeted_by_instance:
- host = expected_kwargs['instance']['host']
- else:
- host = expected_kwargs['host']
- if method not in ['allocate_for_instance',
- 'deallocate_fixed_ip']:
- expected_kwargs.pop('host')
- if CONF.multi_host:
- prepare_kwargs['server'] = host
-
- self.mox.StubOutWithMock(rpcapi, 'client')
-
- version_check = [
- 'deallocate_for_instance', 'deallocate_fixed_ip',
- 'allocate_for_instance',
- ]
- if method in version_check:
- rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
-
- if prepare_kwargs:
- rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
-
- rpc_method = getattr(rpcapi.client, rpc_method)
- rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
-
- self.mox.ReplayAll()
-
- retval = getattr(rpcapi, method)(ctxt, **kwargs)
- self.assertEqual(retval, expected_retval)
-
- def test_create_networks(self):
- self._test_network_api('create_networks', rpc_method='call',
- arg1='arg', arg2='arg')
-
- def test_delete_network(self):
- self._test_network_api('delete_network', rpc_method='call',
- uuid='fake_uuid', fixed_range='range')
-
- def test_disassociate_network(self):
- self._test_network_api('disassociate_network', rpc_method='call',
- network_uuid='fake_uuid')
-
- def test_associate_host_and_project(self):
- self._test_network_api('associate', rpc_method='call',
- network_uuid='fake_uuid',
- associations={'host': "testHost",
- 'project': 'testProject'},
- version="1.5")
-
- def test_get_fixed_ip(self):
- self._test_network_api('get_fixed_ip', rpc_method='call', id='id')
-
- def test_get_fixed_ip_by_address(self):
- self._test_network_api('get_fixed_ip_by_address', rpc_method='call',
- address='a.b.c.d')
-
- def test_get_floating_ip(self):
- self._test_network_api('get_floating_ip', rpc_method='call', id='id')
-
- def test_get_floating_ip_pools(self):
- self._test_network_api('get_floating_ip_pools', rpc_method='call',
- version="1.7")
-
- def test_get_floating_ip_by_address(self):
- self._test_network_api('get_floating_ip_by_address', rpc_method='call',
- address='a.b.c.d')
-
- def test_get_floating_ips_by_project(self):
- self._test_network_api('get_floating_ips_by_project',
- rpc_method='call')
-
- def test_get_floating_ips_by_fixed_address(self):
- self._test_network_api('get_floating_ips_by_fixed_address',
- rpc_method='call', fixed_address='w.x.y.z')
-
- def test_get_instance_id_by_floating_address(self):
- self._test_network_api('get_instance_id_by_floating_address',
- rpc_method='call', address='w.x.y.z')
-
- def test_allocate_floating_ip(self):
- self._test_network_api('allocate_floating_ip', rpc_method='call',
- project_id='fake_id', pool='fake_pool', auto_assigned=False)
-
- def test_deallocate_floating_ip(self):
- self._test_network_api('deallocate_floating_ip', rpc_method='call',
- address='addr', affect_auto_assigned=True)
-
- def test_allocate_floating_ip_no_multi(self):
- self.flags(multi_host=False)
- self._test_network_api('allocate_floating_ip', rpc_method='call',
- project_id='fake_id', pool='fake_pool', auto_assigned=False)
-
- def test_deallocate_floating_ip_no_multi(self):
- self.flags(multi_host=False)
- self._test_network_api('deallocate_floating_ip', rpc_method='call',
- address='addr', affect_auto_assigned=True)
-
- def test_associate_floating_ip(self):
- self._test_network_api('associate_floating_ip', rpc_method='call',
- floating_address='blah', fixed_address='foo',
- affect_auto_assigned=True)
-
- def test_disassociate_floating_ip(self):
- self._test_network_api('disassociate_floating_ip', rpc_method='call',
- address='addr', affect_auto_assigned=True)
-
- def test_allocate_for_instance(self):
- self._test_network_api('allocate_for_instance', rpc_method='call',
- instance_id='fake_id', project_id='fake_id', host='fake_host',
- rxtx_factor='fake_factor', vpn=False, requested_networks={},
- macs=[], version='1.13')
-
- def test_deallocate_for_instance(self):
- instance = fake_instance.fake_instance_obj(context.get_admin_context())
- self._test_network_api('deallocate_for_instance', rpc_method='call',
- requested_networks=self.DefaultArg(None), instance=instance,
- version='1.11')
-
- def test_deallocate_for_instance_with_expected_networks(self):
- instance = fake_instance.fake_instance_obj(context.get_admin_context())
- self._test_network_api('deallocate_for_instance', rpc_method='call',
- instance=instance, requested_networks={}, version='1.11')
-
- def test_add_fixed_ip_to_instance(self):
- self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
- instance_id='fake_id', rxtx_factor='fake_factor',
- host='fake_host', network_id='fake_id', version='1.9')
-
- def test_remove_fixed_ip_from_instance(self):
- self._test_network_api('remove_fixed_ip_from_instance',
- rpc_method='call', instance_id='fake_id',
- rxtx_factor='fake_factor', host='fake_host',
- address='fake_address', version='1.9')
-
- def test_add_network_to_project(self):
- self._test_network_api('add_network_to_project', rpc_method='call',
- project_id='fake_id', network_uuid='fake_uuid')
-
- def test_get_instance_nw_info(self):
- self._test_network_api('get_instance_nw_info', rpc_method='call',
- instance_id='fake_id', rxtx_factor='fake_factor',
- host='fake_host', project_id='fake_id', version='1.9')
-
- def test_validate_networks(self):
- self._test_network_api('validate_networks', rpc_method='call',
- networks={})
-
- def test_get_instance_uuids_by_ip_filter(self):
- self._test_network_api('get_instance_uuids_by_ip_filter',
- rpc_method='call', filters={})
-
- def test_get_dns_domains(self):
- self._test_network_api('get_dns_domains', rpc_method='call')
-
- def test_add_dns_entry(self):
- self._test_network_api('add_dns_entry', rpc_method='call',
- address='addr', name='name', dns_type='foo', domain='domain')
-
- def test_modify_dns_entry(self):
- self._test_network_api('modify_dns_entry', rpc_method='call',
- address='addr', name='name', domain='domain')
-
- def test_delete_dns_entry(self):
- self._test_network_api('delete_dns_entry', rpc_method='call',
- name='name', domain='domain')
-
- def test_delete_dns_domain(self):
- self._test_network_api('delete_dns_domain', rpc_method='call',
- domain='fake_domain')
-
- def test_get_dns_entries_by_address(self):
- self._test_network_api('get_dns_entries_by_address', rpc_method='call',
- address='fake_address', domain='fake_domain')
-
- def test_get_dns_entries_by_name(self):
- self._test_network_api('get_dns_entries_by_name', rpc_method='call',
- name='fake_name', domain='fake_domain')
-
- def test_create_private_dns_domain(self):
- self._test_network_api('create_private_dns_domain', rpc_method='call',
- domain='fake_domain', av_zone='fake_zone')
-
- def test_create_public_dns_domain(self):
- self._test_network_api('create_public_dns_domain', rpc_method='call',
- domain='fake_domain', project='fake_project')
-
- def test_setup_networks_on_host(self):
- self._test_network_api('setup_networks_on_host', rpc_method='call',
- instance_id='fake_id', host='fake_host', teardown=False)
-
- def test_lease_fixed_ip(self):
- self._test_network_api('lease_fixed_ip', rpc_method='cast',
- host='fake_host', address='fake_addr')
-
- def test_release_fixed_ip(self):
- self._test_network_api('release_fixed_ip', rpc_method='cast',
- host='fake_host', address='fake_addr')
-
- def test_set_network_host(self):
- self._test_network_api('set_network_host', rpc_method='call',
- network_ref={})
-
- def test_rpc_setup_network_on_host(self):
- self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
- network_id='fake_id', teardown=False, host='fake_host')
-
- def test_rpc_allocate_fixed_ip(self):
- self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
- instance_id='fake_id', network_id='fake_id', address='addr',
- vpn=True, host='fake_host')
-
- def test_deallocate_fixed_ip(self):
- instance = fake_instance.fake_db_instance()
- self._test_network_api('deallocate_fixed_ip', rpc_method='call',
- address='fake_addr', host='fake_host', instance=instance,
- version='1.12')
-
- def test_update_dns(self):
- self._test_network_api('update_dns', rpc_method='cast', fanout=True,
- network_ids='fake_id', version='1.3')
-
- def test__associate_floating_ip(self):
- self._test_network_api('_associate_floating_ip', rpc_method='call',
- floating_address='fake_addr', fixed_address='fixed_address',
- interface='fake_interface', host='fake_host',
- instance_uuid='fake_uuid', version='1.6')
-
- def test__disassociate_floating_ip(self):
- self._test_network_api('_disassociate_floating_ip', rpc_method='call',
- address='fake_addr', interface='fake_interface',
- host='fake_host', instance_uuid='fake_uuid', version='1.6')
-
- def test_migrate_instance_start(self):
- self._test_network_api('migrate_instance_start', rpc_method='call',
- instance_uuid='fake_instance_uuid',
- rxtx_factor='fake_factor',
- project_id='fake_project',
- source_compute='fake_src_compute',
- dest_compute='fake_dest_compute',
- floating_addresses='fake_floating_addresses',
- host=self.DefaultArg(None),
- version='1.2')
-
- def test_migrate_instance_start_multi_host(self):
- self._test_network_api('migrate_instance_start', rpc_method='call',
- instance_uuid='fake_instance_uuid',
- rxtx_factor='fake_factor',
- project_id='fake_project',
- source_compute='fake_src_compute',
- dest_compute='fake_dest_compute',
- floating_addresses='fake_floating_addresses',
- host='fake_host',
- version='1.2')
-
- def test_migrate_instance_finish(self):
- self._test_network_api('migrate_instance_finish', rpc_method='call',
- instance_uuid='fake_instance_uuid',
- rxtx_factor='fake_factor',
- project_id='fake_project',
- source_compute='fake_src_compute',
- dest_compute='fake_dest_compute',
- floating_addresses='fake_floating_addresses',
- host=self.DefaultArg(None),
- version='1.2')
-
- def test_migrate_instance_finish_multi_host(self):
- self._test_network_api('migrate_instance_finish', rpc_method='call',
- instance_uuid='fake_instance_uuid',
- rxtx_factor='fake_factor',
- project_id='fake_project',
- source_compute='fake_src_compute',
- dest_compute='fake_dest_compute',
- floating_addresses='fake_floating_addresses',
- host='fake_host',
- version='1.2')
diff --git a/nova/tests/objects/test_agent.py b/nova/tests/objects/test_agent.py
deleted file mode 100644
index d8d8a41356..0000000000
--- a/nova/tests/objects/test_agent.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import exception
-from nova.objects import agent as agent_obj
-from nova.tests.objects import test_objects
-
-
-fake_agent = {
- 'id': 1,
- 'hypervisor': 'novavm',
- 'os': 'linux',
- 'architecture': 'DISC',
- 'version': '1.0',
- 'url': 'http://openstack.org/novavm/agents/novavm_agent_v1.0.rpm',
- 'md5hash': '8cb151f3adc23a92db8ddbe084796823',
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
-}
-
-
-class _TestAgent(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- @mock.patch('nova.db.agent_build_get_by_triple')
- def test_get_by_triple(self, mock_get):
- mock_get.return_value = fake_agent
- agent = agent_obj.Agent.get_by_triple(self.context,
- 'novavm', 'linux', 'DISC')
- self._compare(self, fake_agent, agent)
-
- @mock.patch('nova.db.agent_build_get_by_triple')
- def test_get_by_triple_none(self, mock_get):
- mock_get.return_value = None
- agent = agent_obj.Agent.get_by_triple(self.context,
- 'novavm', 'linux', 'DISC')
- self.assertIsNone(agent)
-
- @mock.patch('nova.db.agent_build_create')
- def test_create(self, mock_create):
- mock_create.return_value = fake_agent
- agent = agent_obj.Agent(context=self.context)
- agent.hypervisor = 'novavm'
- agent.create()
- mock_create.assert_called_once_with(self.context,
- {'hypervisor': 'novavm'})
- self._compare(self, fake_agent, agent)
-
- @mock.patch('nova.db.agent_build_create')
- def test_create_with_id(self, mock_create):
- agent = agent_obj.Agent(context=self.context, id=123)
- self.assertRaises(exception.ObjectActionError, agent.create)
- self.assertFalse(mock_create.called)
-
- @mock.patch('nova.db.agent_build_destroy')
- def test_destroy(self, mock_destroy):
- agent = agent_obj.Agent(context=self.context, id=123)
- agent.destroy()
- mock_destroy.assert_called_once_with(self.context, 123)
-
- @mock.patch('nova.db.agent_build_update')
- def test_save(self, mock_update):
- mock_update.return_value = fake_agent
- agent = agent_obj.Agent(context=self.context, id=123)
- agent.obj_reset_changes()
- agent.hypervisor = 'novavm'
- agent.save()
- mock_update.assert_called_once_with(self.context, 123,
- {'hypervisor': 'novavm'})
-
- @mock.patch('nova.db.agent_build_get_all')
- def test_get_all(self, mock_get_all):
- mock_get_all.return_value = [fake_agent]
- agents = agent_obj.AgentList.get_all(self.context, hypervisor='novavm')
- self.assertEqual(1, len(agents))
- self._compare(self, fake_agent, agents[0])
- mock_get_all.assert_called_once_with(self.context, hypervisor='novavm')
-
-
-class TestAgent(test_objects._LocalTest, _TestAgent):
- pass
-
-
-class TestAgentRemote(test_objects._RemoteTest, _TestAgent):
- pass
diff --git a/nova/tests/objects/test_aggregate.py b/nova/tests/objects/test_aggregate.py
deleted file mode 100644
index 9492b26cf8..0000000000
--- a/nova/tests/objects/test_aggregate.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.utils import timeutils
-
-from nova import db
-from nova import exception
-from nova.objects import aggregate
-from nova.tests import fake_notifier
-from nova.tests.objects import test_objects
-
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-fake_aggregate = {
- 'created_at': NOW,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'name': 'fake-aggregate',
- 'hosts': ['foo', 'bar'],
- 'metadetails': {'this': 'that'},
- }
-
-SUBS = {'metadata': 'metadetails'}
-
-
-class _TestAggregateObject(object):
- def test_get_by_id(self):
- self.mox.StubOutWithMock(db, 'aggregate_get')
- db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
- self.mox.ReplayAll()
- agg = aggregate.Aggregate.get_by_id(self.context, 123)
- self.compare_obj(agg, fake_aggregate, subs=SUBS)
-
- def test_create(self):
- self.mox.StubOutWithMock(db, 'aggregate_create')
- db.aggregate_create(self.context, {'name': 'foo'},
- metadata={'one': 'two'}).AndReturn(fake_aggregate)
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.name = 'foo'
- agg.metadata = {'one': 'two'}
- agg.create(self.context)
- self.compare_obj(agg, fake_aggregate, subs=SUBS)
-
- def test_recreate_fails(self):
- self.mox.StubOutWithMock(db, 'aggregate_create')
- db.aggregate_create(self.context, {'name': 'foo'},
- metadata={'one': 'two'}).AndReturn(fake_aggregate)
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.name = 'foo'
- agg.metadata = {'one': 'two'}
- agg.create(self.context)
- self.assertRaises(exception.ObjectActionError, agg.create,
- self.context)
-
- def test_save(self):
- self.mox.StubOutWithMock(db, 'aggregate_update')
- db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
- fake_aggregate)
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.id = 123
- agg.name = 'baz'
- agg.save(self.context)
- self.compare_obj(agg, fake_aggregate, subs=SUBS)
-
- def test_save_and_create_no_hosts(self):
- agg = aggregate.Aggregate()
- agg.id = 123
- agg.hosts = ['foo', 'bar']
- self.assertRaises(exception.ObjectActionError,
- agg.create, self.context)
- self.assertRaises(exception.ObjectActionError,
- agg.save, self.context)
-
- def test_update_metadata(self):
- self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
- self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
- db.aggregate_metadata_delete(self.context, 123, 'todelete')
- db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
- self.mox.ReplayAll()
- fake_notifier.NOTIFICATIONS = []
- agg = aggregate.Aggregate()
- agg._context = self.context
- agg.id = 123
- agg.metadata = {'foo': 'bar'}
- agg.obj_reset_changes()
- agg.update_metadata({'todelete': None, 'toadd': 'myval'})
- self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
- self.assertEqual({'todelete': None, 'toadd': 'myval'},
- msg.payload['meta_data'])
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
- self.assertEqual({'todelete': None, 'toadd': 'myval'},
- msg.payload['meta_data'])
- self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
-
- def test_destroy(self):
- self.mox.StubOutWithMock(db, 'aggregate_delete')
- db.aggregate_delete(self.context, 123)
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.id = 123
- agg.destroy(self.context)
-
- def test_add_host(self):
- self.mox.StubOutWithMock(db, 'aggregate_host_add')
- db.aggregate_host_add(self.context, 123, 'bar'
- ).AndReturn({'host': 'bar'})
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.id = 123
- agg.hosts = ['foo']
- agg._context = self.context
- agg.add_host('bar')
- self.assertEqual(agg.hosts, ['foo', 'bar'])
-
- def test_delete_host(self):
- self.mox.StubOutWithMock(db, 'aggregate_host_delete')
- db.aggregate_host_delete(self.context, 123, 'foo')
- self.mox.ReplayAll()
- agg = aggregate.Aggregate()
- agg.id = 123
- agg.hosts = ['foo', 'bar']
- agg._context = self.context
- agg.delete_host('foo')
- self.assertEqual(agg.hosts, ['bar'])
-
- def test_availability_zone(self):
- agg = aggregate.Aggregate()
- agg.metadata = {'availability_zone': 'foo'}
- self.assertEqual('foo', agg.availability_zone)
-
- def test_get_all(self):
- self.mox.StubOutWithMock(db, 'aggregate_get_all')
- db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
- self.mox.ReplayAll()
- aggs = aggregate.AggregateList.get_all(self.context)
- self.assertEqual(1, len(aggs))
- self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
-
- def test_by_host(self):
- self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
- db.aggregate_get_by_host(self.context, 'fake-host', key=None,
- ).AndReturn([fake_aggregate])
- self.mox.ReplayAll()
- aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
- self.assertEqual(1, len(aggs))
- self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
-
- @mock.patch('nova.db.aggregate_get_by_metadata_key')
- def test_get_by_metadata_key(self, get_by_metadata_key):
- get_by_metadata_key.return_value = [fake_aggregate]
- aggs = aggregate.AggregateList.get_by_metadata_key(
- self.context, 'this')
- self.assertEqual(1, len(aggs))
- self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
-
- @mock.patch('nova.db.aggregate_get_by_metadata_key')
- def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
- get_by_metadata_key.return_value = [fake_aggregate]
- aggs = aggregate.AggregateList.get_by_metadata_key(
- self.context, 'this', hosts=['baz'])
- self.assertEqual(0, len(aggs))
-
- @mock.patch('nova.db.aggregate_get_by_metadata_key')
- def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
- get_by_metadata_key.return_value = [fake_aggregate]
- aggs = aggregate.AggregateList.get_by_metadata_key(
- self.context, 'this', hosts=['foo', 'bar'])
- self.assertEqual(1, len(aggs))
- self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
-
-
-class TestAggregateObject(test_objects._LocalTest,
- _TestAggregateObject):
- pass
-
-
-class TestRemoteAggregateObject(test_objects._RemoteTest,
- _TestAggregateObject):
- pass
diff --git a/nova/tests/objects/test_bandwidth_usage.py b/nova/tests/objects/test_bandwidth_usage.py
deleted file mode 100644
index d7662210b6..0000000000
--- a/nova/tests/objects/test_bandwidth_usage.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import iso8601
-import mock
-from oslo.utils import timeutils
-
-from nova import context
-from nova import db
-from nova.objects import bandwidth_usage
-from nova import test
-from nova.tests.objects import test_objects
-
-
-class _TestBandwidthUsage(test.TestCase):
-
- def setUp(self):
- super(_TestBandwidthUsage, self).setUp()
- self.user_id = 'fake_user'
- self.project_id = 'fake_project'
- self.context = context.RequestContext(self.user_id, self.project_id)
- now, start_period = self._time_now_and_start_period()
- self.expected_bw_usage = self._fake_bw_usage(
- time=now, start_period=start_period)
-
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- @staticmethod
- def _fake_bw_usage(time=None, start_period=None, bw_in=100,
- bw_out=200, last_ctr_in=12345, last_ctr_out=67890):
- fake_bw_usage = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'instance_uuid': 'fake_uuid1',
- 'mac': 'fake_mac1',
- 'start_period': start_period,
- 'bw_in': bw_in,
- 'bw_out': bw_out,
- 'last_ctr_in': last_ctr_in,
- 'last_ctr_out': last_ctr_out,
- 'last_refreshed': time
- }
- return fake_bw_usage
-
- @staticmethod
- def _time_now_and_start_period():
- now = timeutils.utcnow().replace(tzinfo=iso8601.iso8601.Utc(),
- microsecond=0)
- start_period = now - datetime.timedelta(seconds=10)
- return now, start_period
-
- @mock.patch.object(db, 'bw_usage_get')
- def test_get_by_instance_uuid_and_mac(self, mock_get):
- mock_get.return_value = self.expected_bw_usage
- bw_usage = bandwidth_usage.BandwidthUsage.get_by_instance_uuid_and_mac(
- self.context, 'fake_uuid', 'fake_mac',
- start_period=self.expected_bw_usage['start_period'])
- self._compare(self, self.expected_bw_usage, bw_usage)
-
- @mock.patch.object(db, 'bw_usage_get_by_uuids')
- def test_get_by_uuids(self, mock_get_by_uuids):
- mock_get_by_uuids.return_value = [self.expected_bw_usage]
-
- bw_usages = bandwidth_usage.BandwidthUsageList.get_by_uuids(
- self.context, ['fake_uuid'],
- start_period=self.expected_bw_usage['start_period'])
- self.assertEqual(len(bw_usages), 1)
- self._compare(self, self.expected_bw_usage, bw_usages[0])
-
- @mock.patch.object(db, 'bw_usage_update')
- def test_create(self, mock_create):
- mock_create.return_value = self.expected_bw_usage
-
- bw_usage = bandwidth_usage.BandwidthUsage()
- bw_usage.create(self.context, 'fake_uuid', 'fake_mac',
- 100, 200, 12345, 67890,
- start_period=self.expected_bw_usage['start_period'])
-
- self._compare(self, self.expected_bw_usage, bw_usage)
-
- @mock.patch.object(db, 'bw_usage_update')
- def test_update(self, mock_update):
- expected_bw_usage1 = self._fake_bw_usage(
- time=self.expected_bw_usage['last_refreshed'],
- start_period=self.expected_bw_usage['start_period'],
- last_ctr_in=42, last_ctr_out=42)
-
- mock_update.side_effect = [expected_bw_usage1, self.expected_bw_usage]
-
- bw_usage = bandwidth_usage.BandwidthUsage()
- bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
- 100, 200, 42, 42,
- start_period=self.expected_bw_usage['start_period'])
- self._compare(self, expected_bw_usage1, bw_usage)
- bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
- 100, 200, 12345, 67890,
- start_period=self.expected_bw_usage['start_period'])
- self._compare(self, self.expected_bw_usage, bw_usage)
-
-
-class TestBandwidthUsageObject(test_objects._LocalTest,
- _TestBandwidthUsage):
- pass
-
-
-class TestRemoteBandwidthUsageObject(test_objects._RemoteTest,
- _TestBandwidthUsage):
- pass
diff --git a/nova/tests/objects/test_block_device.py b/nova/tests/objects/test_block_device.py
deleted file mode 100644
index 03b7fc2756..0000000000
--- a/nova/tests/objects/test_block_device.py
+++ /dev/null
@@ -1,333 +0,0 @@
-# Copyright 2013 Red Hat Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-
-from nova.cells import rpcapi as cells_rpcapi
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import block_device as block_device_obj
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests.objects import test_objects
-
-
-class _TestBlockDeviceMappingObject(object):
- def fake_bdm(self, instance=None):
- instance = instance or {}
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 123,
- 'instance_uuid': instance.get('uuid') or 'fake-instance',
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'snapshot_id': 'fake-snapshot-id-1',
- 'boot_index': -1
- })
- if instance:
- fake_bdm['instance'] = instance
- return fake_bdm
-
- def _test_save(self, cell_type=None):
- if cell_type:
- self.flags(enable=True, cell_type=cell_type, group='cells')
- else:
- self.flags(enable=False, group='cells')
-
- fake_bdm = self.fake_bdm()
- with contextlib.nested(
- mock.patch.object(
- db, 'block_device_mapping_update', return_value=fake_bdm),
- mock.patch.object(
- cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')
- ) as (bdm_update_mock, cells_update_mock):
- bdm_object = objects.BlockDeviceMapping()
- bdm_object.id = 123
- bdm_object.volume_id = 'fake_volume_id'
- bdm_object.save(self.context)
-
- bdm_update_mock.assert_called_once_with(
- self.context, 123, {'volume_id': 'fake_volume_id'},
- legacy=False)
- if cell_type != 'compute':
- self.assertFalse(cells_update_mock.called)
- else:
- self.assertEqual(1, cells_update_mock.call_count)
- self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
- self.assertIsInstance(cells_update_mock.call_args[0][1],
- block_device_obj.BlockDeviceMapping)
- self.assertEqual(cells_update_mock.call_args[1], {})
-
- def test_save_nocells(self):
- self._test_save()
-
- def test_save_apicell(self):
- self._test_save(cell_type='api')
-
- def test_save_computecell(self):
- self._test_save(cell_type='compute')
-
- def test_save_instance_changed(self):
- bdm_object = objects.BlockDeviceMapping()
- bdm_object.instance = objects.Instance()
- self.assertRaises(exception.ObjectActionError,
- bdm_object.save, self.context)
-
- @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
- def test_get_by_volume_id(self, get_by_vol_id):
- get_by_vol_id.return_value = self.fake_bdm()
-
- vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
- self.context, 'fake-volume-id')
- for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
- self.assertFalse(vol_bdm.obj_attr_is_set(attr))
- self.assertRemotes()
-
- @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
- def test_get_by_volume_id_not_found(self, get_by_vol_id):
- get_by_vol_id.return_value = None
-
- self.assertRaises(exception.VolumeBDMNotFound,
- objects.BlockDeviceMapping.get_by_volume_id,
- self.context, 'fake-volume-id')
-
- @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
- def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id):
- fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
- get_by_vol_id.return_value = fake_bdm_vol
-
- self.assertRaises(exception.InvalidVolume,
- objects.BlockDeviceMapping.get_by_volume_id,
- self.context, 'fake-volume-id',
- instance_uuid='fake-instance')
-
- @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
- def test_get_by_volume_id_with_expected(self, get_by_vol_id):
- get_by_vol_id.return_value = self.fake_bdm(
- fake_instance.fake_db_instance())
-
- vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
- self.context, 'fake-volume-id', expected_attrs=['instance'])
- for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
- self.assertTrue(vol_bdm.obj_attr_is_set(attr))
- get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
- ['instance'])
- self.assertRemotes()
-
- def _test_create_mocked(self, cell_type=None):
- if cell_type:
- self.flags(enable=True, cell_type=cell_type, group='cells')
- else:
- self.flags(enable=False, group='cells')
- values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume',
- 'instance_uuid': 'fake-instance'}
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
-
- with contextlib.nested(
- mock.patch.object(
- db, 'block_device_mapping_create', return_value=fake_bdm),
- mock.patch.object(cells_rpcapi.CellsAPI,
- 'bdm_update_or_create_at_top')
- ) as (bdm_create_mock, cells_update_mock):
- bdm = objects.BlockDeviceMapping(**values)
-
- if cell_type == 'api':
- self.assertRaises(exception.ObjectActionError,
- bdm.create, self.context)
- elif cell_type == 'compute':
- bdm.create(self.context)
- bdm_create_mock.assert_called_once_with(
- self.context, values, legacy=False)
- self.assertEqual(1, cells_update_mock.call_count)
- self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
- self.assertIsInstance(cells_update_mock.call_args[0][1],
- block_device_obj.BlockDeviceMapping)
- self.assertEqual(cells_update_mock.call_args[1],
- {'create': True})
- else:
- bdm.create(self.context)
- self.assertFalse(cells_update_mock.called)
- bdm_create_mock.assert_called_once_with(
- self.context, values, legacy=False)
-
- def test_create_nocells(self):
- self._test_create_mocked()
-
- def test_create_apicell(self):
- self._test_create_mocked(cell_type='api')
-
- def test_create_computecell(self):
- self._test_create_mocked(cell_type='compute')
-
- def test_create(self):
- values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume',
- 'instance_uuid': 'fake-instance'}
- bdm = objects.BlockDeviceMapping(**values)
- with mock.patch.object(cells_rpcapi.CellsAPI,
- 'bdm_update_or_create_at_top'):
- bdm.create(self.context)
-
- for k, v in values.iteritems():
- self.assertEqual(v, getattr(bdm, k))
-
- def test_create_fails(self):
- values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume',
- 'instance_uuid': 'fake-instance'}
- bdm = objects.BlockDeviceMapping(**values)
- bdm.create(self.context)
-
- self.assertRaises(exception.ObjectActionError,
- bdm.create, self.context)
-
- def test_create_fails_instance(self):
- values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume',
- 'instance_uuid': 'fake-instance',
- 'instance': objects.Instance()}
- bdm = objects.BlockDeviceMapping(**values)
- self.assertRaises(exception.ObjectActionError,
- bdm.create, self.context)
-
- def _test_destroy_mocked(self, cell_type=None):
- values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
- 'destination_type': 'volume', 'id': 1,
- 'instance_uuid': 'fake-instance', 'device_name': 'fake'}
- if cell_type:
- self.flags(enable=True, cell_type=cell_type, group='cells')
- else:
- self.flags(enable=False, group='cells')
- with contextlib.nested(
- mock.patch.object(db, 'block_device_mapping_destroy'),
- mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top')
- ) as (bdm_del, cells_destroy):
- bdm = objects.BlockDeviceMapping(**values)
- bdm.destroy(self.context)
- bdm_del.assert_called_once_with(self.context, values['id'])
- if cell_type != 'compute':
- self.assertFalse(cells_destroy.called)
- else:
- cells_destroy.assert_called_once_with(
- self.context, values['instance_uuid'],
- device_name=values['device_name'],
- volume_id=values['volume_id'])
-
- def test_destroy_nocells(self):
- self._test_destroy_mocked()
-
- def test_destroy_apicell(self):
- self._test_destroy_mocked(cell_type='api')
-
- def test_destroy_computecell(self):
- self._test_destroy_mocked(cell_type='compute')
-
-
-class TestBlockDeviceMappingObject(test_objects._LocalTest,
- _TestBlockDeviceMappingObject):
- pass
-
-
-class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest,
- _TestBlockDeviceMappingObject):
- pass
-
-
-class _TestBlockDeviceMappingListObject(object):
- def fake_bdm(self, bdm_id):
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
- 'id': bdm_id, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'snapshot_id': 'fake-snapshot-id-1',
- 'boot_index': -1,
- })
- return fake_bdm
-
- @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
- def test_get_by_instance_uuid(self, get_all_by_inst):
- fakes = [self.fake_bdm(123), self.fake_bdm(456)]
- get_all_by_inst.return_value = fakes
- bdm_list = (
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, 'fake_instance_uuid'))
- for faked, got in zip(fakes, bdm_list):
- self.assertIsInstance(got, objects.BlockDeviceMapping)
- self.assertEqual(faked['id'], got.id)
-
- @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
- def test_get_by_instance_uuid_no_result(self, get_all_by_inst):
- get_all_by_inst.return_value = None
- bdm_list = (
- objects.BlockDeviceMappingList.get_by_instance_uuid(
- self.context, 'fake_instance_uuid'))
- self.assertEqual(0, len(bdm_list))
-
- def test_root_volume_metadata(self):
- fake_volume = {
- 'volume_image_metadata': {'vol_test_key': 'vol_test_value'}}
-
- class FakeVolumeApi(object):
- def get(*args, **kwargs):
- return fake_volume
-
- block_device_mapping = block_device_obj.block_device_make_list(None, [
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'boot_index': 0,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake_volume_id',
- 'delete_on_termination': False})])
-
- volume_meta = block_device_mapping.root_metadata(
- self.context, None, FakeVolumeApi())
- self.assertEqual(fake_volume['volume_image_metadata'], volume_meta)
-
- def test_root_image_metadata(self):
- fake_image = {'properties': {'img_test_key': 'img_test_value'}}
-
- class FakeImageApi(object):
- def show(*args, **kwargs):
- return fake_image
-
- block_device_mapping = block_device_obj.block_device_make_list(None, [
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'boot_index': 0,
- 'source_type': 'image',
- 'destination_type': 'local',
- 'image_id': "fake-image",
- 'delete_on_termination': True})])
-
- image_meta = block_device_mapping.root_metadata(
- self.context, FakeImageApi(), None)
- self.assertEqual(fake_image['properties'], image_meta)
-
-
-class TestBlockDeviceMappingListObject(test_objects._LocalTest,
- _TestBlockDeviceMappingListObject):
- pass
-
-
-class TestRemoteBlockDeviceMappingListObject(
- test_objects._RemoteTest, _TestBlockDeviceMappingListObject):
- pass
diff --git a/nova/tests/objects/test_compute_node.py b/nova/tests/objects/test_compute_node.py
deleted file mode 100644
index 49bb968496..0000000000
--- a/nova/tests/objects/test_compute_node.py
+++ /dev/null
@@ -1,240 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-
-from nova import db
-from nova import exception
-from nova.objects import compute_node
-from nova.objects import hv_spec
-from nova.objects import service
-from nova.tests.objects import test_objects
-from nova.virt import hardware
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-fake_stats = {'num_foo': '10'}
-fake_stats_db_format = jsonutils.dumps(fake_stats)
-# host_ip is coerced from a string to an IPAddress
-# but needs to be converted to a string for the database format
-fake_host_ip = '127.0.0.1'
-fake_numa_topology = hardware.VirtNUMAHostTopology(
- cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512),
- hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512)])
-fake_numa_topology_db_format = fake_numa_topology.to_json()
-fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar')
-fake_supported_hv_specs = [fake_hv_spec]
-# for backward compatibility, each supported instance object
-# is stored as a list in the database
-fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
-fake_compute_node = {
- 'created_at': NOW,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'service_id': 456,
- 'vcpus': 4,
- 'memory_mb': 4096,
- 'local_gb': 1024,
- 'vcpus_used': 2,
- 'memory_mb_used': 2048,
- 'local_gb_used': 512,
- 'hypervisor_type': 'Hyper-Dan-VM-ware',
- 'hypervisor_version': 1001,
- 'hypervisor_hostname': 'vm.danplanet.com',
- 'free_ram_mb': 1024,
- 'free_disk_gb': 256,
- 'current_workload': 100,
- 'running_vms': 2013,
- 'cpu_info': 'Schmintel i786',
- 'disk_available_least': 256,
- 'metrics': '',
- 'stats': fake_stats_db_format,
- 'host_ip': fake_host_ip,
- 'numa_topology': fake_numa_topology_db_format,
- 'supported_instances': fake_supported_hv_specs_db_format,
- }
-
-
-class _TestComputeNodeObject(object):
- def supported_hv_specs_comparator(self, expected, obj_val):
- obj_val = [inst.to_list() for inst in obj_val]
- self.json_comparator(expected, obj_val)
-
- def comparators(self):
- return {'stats': self.json_comparator,
- 'host_ip': self.str_comparator,
- 'supported_hv_specs': self.supported_hv_specs_comparator}
-
- def subs(self):
- return {'supported_hv_specs': 'supported_instances'}
-
- def test_get_by_id(self):
- self.mox.StubOutWithMock(db, 'compute_node_get')
- db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode.get_by_id(self.context, 123)
- self.compare_obj(compute, fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- def test_get_by_service_id(self):
- self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
- db.compute_node_get_by_service_id(self.context, 456).AndReturn(
- fake_compute_node)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
- self.compare_obj(compute, fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- def test_create(self):
- self.mox.StubOutWithMock(db, 'compute_node_create')
- db.compute_node_create(
- self.context,
- {
- 'service_id': 456,
- 'stats': fake_stats_db_format,
- 'host_ip': fake_host_ip,
- 'supported_instances': fake_supported_hv_specs_db_format,
- }).AndReturn(fake_compute_node)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode()
- compute.service_id = 456
- compute.stats = fake_stats
- # NOTE (pmurray): host_ip is coerced to an IPAddress
- compute.host_ip = fake_host_ip
- compute.supported_hv_specs = fake_supported_hv_specs
- compute.create(self.context)
- self.compare_obj(compute, fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- def test_recreate_fails(self):
- self.mox.StubOutWithMock(db, 'compute_node_create')
- db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
- fake_compute_node)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode()
- compute.service_id = 456
- compute.create(self.context)
- self.assertRaises(exception.ObjectActionError, compute.create,
- self.context)
-
- def test_save(self):
- self.mox.StubOutWithMock(db, 'compute_node_update')
- db.compute_node_update(
- self.context, 123,
- {
- 'vcpus_used': 3,
- 'stats': fake_stats_db_format,
- 'host_ip': fake_host_ip,
- 'supported_instances': fake_supported_hv_specs_db_format,
- }).AndReturn(fake_compute_node)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode()
- compute.id = 123
- compute.vcpus_used = 3
- compute.stats = fake_stats
- # NOTE (pmurray): host_ip is coerced to an IPAddress
- compute.host_ip = fake_host_ip
- compute.supported_hv_specs = fake_supported_hv_specs
- compute.save(self.context)
- self.compare_obj(compute, fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- @mock.patch.object(db, 'compute_node_create',
- return_value=fake_compute_node)
- def test_set_id_failure(self, db_mock):
- compute = compute_node.ComputeNode()
- compute.create(self.context)
- self.assertRaises(exception.ReadOnlyFieldError, setattr,
- compute, 'id', 124)
-
- def test_destroy(self):
- self.mox.StubOutWithMock(db, 'compute_node_delete')
- db.compute_node_delete(self.context, 123)
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode()
- compute.id = 123
- compute.destroy(self.context)
-
- def test_service(self):
- self.mox.StubOutWithMock(service.Service, 'get_by_id')
- service.Service.get_by_id(self.context, 456).AndReturn('my-service')
- self.mox.ReplayAll()
- compute = compute_node.ComputeNode()
- compute._context = self.context
- compute.id = 123
- compute.service_id = 456
- self.assertEqual('my-service', compute.service)
- # Make sure it doesn't call Service.get_by_id() again
- self.assertEqual('my-service', compute.service)
-
- def test_get_all(self):
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- db.compute_node_get_all(self.context).AndReturn([fake_compute_node])
- self.mox.ReplayAll()
- computes = compute_node.ComputeNodeList.get_all(self.context)
- self.assertEqual(1, len(computes))
- self.compare_obj(computes[0], fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- def test_get_by_hypervisor(self):
- self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
- db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
- [fake_compute_node])
- self.mox.ReplayAll()
- computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
- 'hyper')
- self.assertEqual(1, len(computes))
- self.compare_obj(computes[0], fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- @mock.patch('nova.db.service_get')
- def test_get_by_service(self, service_get):
- service_get.return_value = {'compute_node': [fake_compute_node]}
- fake_service = service.Service(id=123)
- computes = compute_node.ComputeNodeList.get_by_service(self.context,
- fake_service)
- self.assertEqual(1, len(computes))
- self.compare_obj(computes[0], fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- def test_compat_numa_topology(self):
- compute = compute_node.ComputeNode()
- primitive = compute.obj_to_primitive(target_version='1.4')
- self.assertNotIn('numa_topology', primitive)
-
- def test_compat_supported_hv_specs(self):
- compute = compute_node.ComputeNode()
- compute.supported_hv_specs = fake_supported_hv_specs
- primitive = compute.obj_to_primitive(target_version='1.5')
- self.assertNotIn('supported_hv_specs', primitive)
-
-
-class TestComputeNodeObject(test_objects._LocalTest,
- _TestComputeNodeObject):
- pass
-
-
-class TestRemoteComputeNodeObject(test_objects._RemoteTest,
- _TestComputeNodeObject):
- pass
diff --git a/nova/tests/objects/test_dns_domain.py b/nova/tests/objects/test_dns_domain.py
deleted file mode 100644
index aff1a0eb92..0000000000
--- a/nova/tests/objects/test_dns_domain.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (C) 2014, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova.objects import dns_domain
-from nova.tests.objects import test_objects
-
-
-fake_dnsd = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'domain': 'blah.example.com',
- 'scope': 'private',
- 'availability_zone': 'overthere',
- 'project_id': '867530niner',
-}
-
-
-class _TestDNSDomain(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_get_by_domain(self):
- with mock.patch.object(db, 'dnsdomain_get') as get:
- get.return_value = fake_dnsd
- dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
- self._compare(self, fake_dnsd, dnsd)
-
- def test_register_for_zone(self):
- dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
- 'domain', 'zone')
- dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
- self.assertEqual('domain', dnsd.domain)
- self.assertEqual('zone', dnsd.availability_zone)
-
- def test_register_for_project(self):
- dns_domain.DNSDomain.register_for_project(self.context.elevated(),
- 'domain', 'project')
- dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
- self.assertEqual('domain', dnsd.domain)
- self.assertEqual('project', dnsd.project_id)
-
- def test_delete_by_domain(self):
- dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
- 'domain', 'zone')
- dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
- self.assertEqual('domain', dnsd.domain)
- self.assertEqual('zone', dnsd.availability_zone)
-
- dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
- 'domain')
- dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
- self.assertIsNone(dnsd)
-
- def test_get_all(self):
- with mock.patch.object(db, 'dnsdomain_get_all') as get:
- get.return_value = [fake_dnsd]
- dns_domain.DNSDomainList.get_all(self.context)
-
-
-class TestDNSDomainObject(test_objects._LocalTest,
- _TestDNSDomain):
- pass
-
-
-class TestRemoteDNSDomainObject(test_objects._RemoteTest,
- _TestDNSDomain):
- pass
diff --git a/nova/tests/objects/test_ec2.py b/nova/tests/objects/test_ec2.py
deleted file mode 100644
index 9b3dc38b18..0000000000
--- a/nova/tests/objects/test_ec2.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# Copyright (C) 2014, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova.objects import ec2 as ec2_obj
-from nova.tests.objects import test_objects
-
-
-fake_map = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 1,
- 'uuid': 'fake-uuid-2',
-}
-
-
-class _TestEC2InstanceMapping(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_create(self):
- imap = ec2_obj.EC2InstanceMapping()
- imap.uuid = 'fake-uuid-2'
-
- with mock.patch.object(db, 'ec2_instance_create') as create:
- create.return_value = fake_map
- imap.create(self.context)
-
- self.assertEqual(self.context, imap._context)
- imap._context = None
- self._compare(self, fake_map, imap)
-
- def test_get_by_uuid(self):
- with mock.patch.object(db, 'ec2_instance_get_by_uuid') as get:
- get.return_value = fake_map
- imap = ec2_obj.EC2InstanceMapping.get_by_uuid(self.context,
- 'fake-uuid-2')
- self._compare(self, fake_map, imap)
-
- def test_get_by_ec2_id(self):
- with mock.patch.object(db, 'ec2_instance_get_by_id') as get:
- get.return_value = fake_map
- imap = ec2_obj.EC2InstanceMapping.get_by_id(self.context, 1)
- self._compare(self, fake_map, imap)
-
-
-class TestEC2InstanceMapping(test_objects._LocalTest, _TestEC2InstanceMapping):
- pass
-
-
-class TestRemoteEC2InstanceMapping(test_objects._RemoteTest,
- _TestEC2InstanceMapping):
- pass
-
-
-class _TestEC2VolumeMapping(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_create(self):
- vmap = ec2_obj.EC2VolumeMapping()
- vmap.uuid = 'fake-uuid-2'
-
- with mock.patch.object(db, 'ec2_volume_create') as create:
- create.return_value = fake_map
- vmap.create(self.context)
-
- self.assertEqual(self.context, vmap._context)
- vmap._context = None
- self._compare(self, fake_map, vmap)
-
- def test_get_by_uuid(self):
- with mock.patch.object(db, 'ec2_volume_get_by_uuid') as get:
- get.return_value = fake_map
- vmap = ec2_obj.EC2VolumeMapping.get_by_uuid(self.context,
- 'fake-uuid-2')
- self._compare(self, fake_map, vmap)
-
- def test_get_by_ec2_id(self):
- with mock.patch.object(db, 'ec2_volume_get_by_id') as get:
- get.return_value = fake_map
- vmap = ec2_obj.EC2VolumeMapping.get_by_id(self.context, 1)
- self._compare(self, fake_map, vmap)
-
-
-class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping):
- pass
-
-
-class TestRemoteEC2VolumeMapping(test_objects._RemoteTest,
- _TestEC2VolumeMapping):
- pass
-
-
-class _TestEC2SnapshotMapping(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_create(self):
- smap = ec2_obj.EC2SnapshotMapping()
- smap.uuid = 'fake-uuid-2'
-
- with mock.patch.object(db, 'ec2_snapshot_create') as create:
- create.return_value = fake_map
- smap.create(self.context)
-
- self.assertEqual(self.context, smap._context)
- smap._context = None
- self._compare(self, fake_map, smap)
-
- def test_get_by_uuid(self):
- with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get:
- get.return_value = fake_map
- smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context,
- 'fake-uuid-2')
- self._compare(self, fake_map, smap)
-
- def test_get_by_ec2_id(self):
- with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get:
- get.return_value = fake_map
- smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1)
- self._compare(self, fake_map, smap)
-
-
-class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping):
- pass
-
-
-class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest,
- _TestEC2SnapshotMapping):
- pass
-
-
-class _TestS3ImageMapping(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_create(self):
- s3imap = ec2_obj.S3ImageMapping()
- s3imap.uuid = 'fake-uuid-2'
-
- with mock.patch.object(db, 's3_image_create') as create:
- create.return_value = fake_map
- s3imap.create(self.context)
-
- self.assertEqual(self.context, s3imap._context)
- s3imap._context = None
- self._compare(self, fake_map, s3imap)
-
- def test_get_by_uuid(self):
- with mock.patch.object(db, 's3_image_get_by_uuid') as get:
- get.return_value = fake_map
- s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context,
- 'fake-uuid-2')
- self._compare(self, fake_map, s3imap)
-
- def test_get_by_s3_id(self):
- with mock.patch.object(db, 's3_image_get') as get:
- get.return_value = fake_map
- s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1)
- self._compare(self, fake_map, s3imap)
-
-
-class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping):
- pass
-
-
-class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping):
- pass
diff --git a/nova/tests/objects/test_external_event.py b/nova/tests/objects/test_external_event.py
deleted file mode 100644
index 4674e3d315..0000000000
--- a/nova/tests/objects/test_external_event.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.objects import external_event as external_event_obj
-from nova.tests.objects import test_objects
-
-
-class _TestInstanceExternalEventObject(object):
- def test_make_key(self):
- key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
- self.assertEqual('foo-bar', key)
-
- def test_make_key_no_tag(self):
- key = external_event_obj.InstanceExternalEvent.make_key('foo')
- self.assertEqual('foo', key)
-
- def test_key(self):
- event = external_event_obj.InstanceExternalEvent(name='foo',
- tag='bar')
- with mock.patch.object(event, 'make_key') as make_key:
- make_key.return_value = 'key'
- self.assertEqual('key', event.key)
- make_key.assert_called_once_with('foo', 'bar')
-
-
-class TestInstanceExternalEventObject(test_objects._LocalTest,
- _TestInstanceExternalEventObject):
- pass
-
-
-class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
- _TestInstanceExternalEventObject):
- pass
diff --git a/nova/tests/objects/test_fixed_ip.py b/nova/tests/objects/test_fixed_ip.py
deleted file mode 100644
index a0e1b3aa16..0000000000
--- a/nova/tests/objects/test_fixed_ip.py
+++ /dev/null
@@ -1,339 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import iso8601
-import mock
-import netaddr
-from oslo.utils import timeutils
-
-from nova import exception
-from nova.objects import fixed_ip
-from nova.tests import fake_instance
-from nova.tests.objects import test_network
-from nova.tests.objects import test_objects
-
-
-fake_fixed_ip = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'address': '192.168.1.100',
- 'network_id': None,
- 'virtual_interface_id': None,
- 'instance_uuid': None,
- 'allocated': False,
- 'leased': False,
- 'reserved': False,
- 'host': None,
- 'network': None,
- 'virtual_interface': None,
- 'floating_ips': [],
- }
-
-
-class _TestFixedIPObject(object):
- def _compare(self, obj, db_obj):
- for field in obj.fields:
- if field in ('default_route', 'floating_ips'):
- continue
- if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS:
- if obj.obj_attr_is_set(field) and db_obj[field] is not None:
- obj_val = obj[field].uuid
- db_val = db_obj[field]['uuid']
- else:
- continue
- else:
- obj_val = obj[field]
- db_val = db_obj[field]
- if isinstance(obj_val, netaddr.IPAddress):
- obj_val = str(obj_val)
- self.assertEqual(db_val, obj_val)
-
- @mock.patch('nova.db.fixed_ip_get')
- def test_get_by_id(self, get):
- get.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123)
- get.assert_called_once_with(self.context, 123, get_network=False)
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_get')
- @mock.patch('nova.db.network_get')
- def test_get_by_id_with_extras(self, network_get, fixed_get):
- db_fixed = dict(fake_fixed_ip,
- network=test_network.fake_network)
- fixed_get.return_value = db_fixed
- fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123,
- expected_attrs=['network'])
- fixed_get.assert_called_once_with(self.context, 123, get_network=True)
- self._compare(fixedip, db_fixed)
- self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
- self.assertFalse(network_get.called)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- def test_get_by_address(self, get):
- get.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4')
- get.assert_called_once_with(self.context, '1.2.3.4',
- columns_to_join=[])
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.instance_get')
- def test_get_by_address_with_extras(self, instance_get, network_get,
- fixed_get):
- db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
- instance=fake_instance.fake_db_instance())
- fixed_get.return_value = db_fixed
- fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
- expected_attrs=['network',
- 'instance'])
- fixed_get.assert_called_once_with(self.context, '1.2.3.4',
- columns_to_join=['network',
- 'instance'])
- self._compare(fixedip, db_fixed)
- self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
- self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid)
- self.assertFalse(network_get.called)
- self.assertFalse(instance_get.called)
-
- @mock.patch('nova.db.fixed_ip_get_by_address')
- @mock.patch('nova.db.network_get')
- @mock.patch('nova.db.instance_get')
- def test_get_by_address_with_extras_deleted_instance(self, instance_get,
- network_get,
- fixed_get):
- db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
- instance=None)
- fixed_get.return_value = db_fixed
- fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
- expected_attrs=['network',
- 'instance'])
- fixed_get.assert_called_once_with(self.context, '1.2.3.4',
- columns_to_join=['network',
- 'instance'])
- self._compare(fixedip, db_fixed)
- self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
- self.assertIsNone(fixedip.instance)
- self.assertFalse(network_get.called)
- self.assertFalse(instance_get.called)
-
- @mock.patch('nova.db.fixed_ip_get_by_floating_address')
- def test_get_by_floating_address(self, get):
- get.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
- '1.2.3.4')
- get.assert_called_once_with(self.context, '1.2.3.4')
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_get_by_floating_address')
- def test_get_by_floating_address_none(self, get):
- get.return_value = None
- fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
- '1.2.3.4')
- get.assert_called_once_with(self.context, '1.2.3.4')
- self.assertIsNone(fixedip)
-
- @mock.patch('nova.db.fixed_ip_get_by_network_host')
- def test_get_by_network_and_host(self, get):
- get.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context,
- 123, 'host')
- get.assert_called_once_with(self.context, 123, 'host')
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_associate')
- def test_associate(self, associate):
- associate.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4',
- 'fake-uuid')
- associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid',
- network_id=None, reserved=False)
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_associate_pool')
- def test_associate_pool(self, associate):
- associate.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123,
- 'fake-uuid', 'host')
- associate.assert_called_with(self.context, 123,
- instance_uuid='fake-uuid',
- host='host')
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_disassociate')
- def test_disassociate_by_address(self, disassociate):
- fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4')
- disassociate.assert_called_with(self.context, '1.2.3.4')
-
- @mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout')
- def test_disassociate_all_by_timeout(self, disassociate):
- now = timeutils.utcnow()
- now_tz = timeutils.parse_isotime(
- timeutils.isotime(now)).replace(
- tzinfo=iso8601.iso8601.Utc())
- disassociate.return_value = 123
- result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context,
- 'host', now)
- self.assertEqual(123, result)
- # NOTE(danms): be pedantic about timezone stuff
- args, kwargs = disassociate.call_args_list[0]
- self.assertEqual(now_tz, args[2])
- self.assertEqual((self.context, 'host'), args[:2])
- self.assertEqual({}, kwargs)
-
- @mock.patch('nova.db.fixed_ip_create')
- def test_create(self, create):
- create.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP(address='1.2.3.4')
- fixedip.create(self.context)
- create.assert_called_once_with(
- self.context, {'address': '1.2.3.4'})
- self._compare(fixedip, fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_update')
- def test_save(self, update):
- update.return_value = fake_fixed_ip
- fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
- instance_uuid='fake-uuid')
- self.assertRaises(exception.ObjectActionError, fixedip.save)
- fixedip.obj_reset_changes(['address'])
- fixedip.save()
- update.assert_called_once_with(self.context, '1.2.3.4',
- {'instance_uuid': 'fake-uuid'})
-
- @mock.patch('nova.db.fixed_ip_disassociate')
- def test_disassociate(self, disassociate):
- fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
- instance_uuid='fake-uuid')
- fixedip.obj_reset_changes()
- fixedip.disassociate()
- disassociate.assert_called_once_with(self.context, '1.2.3.4')
- self.assertIsNone(fixedip.instance_uuid)
-
- @mock.patch('nova.db.fixed_ip_get_all')
- def test_get_all(self, get_all):
- get_all.return_value = [fake_fixed_ip]
- fixedips = fixed_ip.FixedIPList.get_all(self.context)
- self.assertEqual(1, len(fixedips))
- get_all.assert_called_once_with(self.context)
- self._compare(fixedips[0], fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_get_by_instance')
- def test_get_by_instance(self, get):
- get.return_value = [fake_fixed_ip]
- fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context,
- 'fake-uuid')
- self.assertEqual(1, len(fixedips))
- get.assert_called_once_with(self.context, 'fake-uuid')
- self._compare(fixedips[0], fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ip_get_by_host')
- def test_get_by_host(self, get):
- get.return_value = [fake_fixed_ip]
- fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host')
- self.assertEqual(1, len(fixedips))
- get.assert_called_once_with(self.context, 'host')
- self._compare(fixedips[0], fake_fixed_ip)
-
- @mock.patch('nova.db.fixed_ips_by_virtual_interface')
- def test_get_by_virtual_interface_id(self, get):
- get.return_value = [fake_fixed_ip]
- fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id(
- self.context, 123)
- self.assertEqual(1, len(fixedips))
- get.assert_called_once_with(self.context, 123)
- self._compare(fixedips[0], fake_fixed_ip)
-
- def test_floating_ips_do_not_lazy_load(self):
- fixedip = fixed_ip.FixedIP()
- self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips)
-
- @mock.patch('nova.db.fixed_ip_bulk_create')
- def test_bulk_create(self, bulk):
- fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'),
- fixed_ip.FixedIP(address='192.168.1.2')]
- fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips)
- bulk.assert_called_once_with(self.context,
- [{'address': '192.168.1.1'},
- {'address': '192.168.1.2'}])
-
- @mock.patch('nova.db.network_get_associated_fixed_ips')
- def test_get_by_network(self, get):
- info = {'address': '1.2.3.4',
- 'instance_uuid': 'fake-uuid',
- 'network_id': 0,
- 'vif_id': 1,
- 'vif_address': 'de:ad:be:ee:f0:00',
- 'instance_hostname': 'fake-host',
- 'instance_updated': datetime.datetime(1955, 11, 5),
- 'instance_created': datetime.datetime(1955, 11, 5),
- 'allocated': True,
- 'leased': True,
- 'default_route': True,
- }
- get.return_value = [info]
- fixed_ips = fixed_ip.FixedIPList.get_by_network(
- self.context, {'id': 0}, host='fake-host')
- get.assert_called_once_with(self.context, 0, host='fake-host')
- self.assertEqual(1, len(fixed_ips))
- fip = fixed_ips[0]
- self.assertEqual('1.2.3.4', str(fip.address))
- self.assertEqual('fake-uuid', fip.instance_uuid)
- self.assertEqual(0, fip.network_id)
- self.assertEqual(1, fip.virtual_interface_id)
- self.assertTrue(fip.allocated)
- self.assertTrue(fip.leased)
- self.assertEqual('fake-uuid', fip.instance.uuid)
- self.assertEqual('fake-host', fip.instance.hostname)
- self.assertIsInstance(fip.instance.created_at, datetime.datetime)
- self.assertIsInstance(fip.instance.updated_at, datetime.datetime)
- self.assertEqual(1, fip.virtual_interface.id)
- self.assertEqual(info['vif_address'], fip.virtual_interface.address)
-
- @mock.patch('nova.db.network_get_associated_fixed_ips')
- def test_backport_default_route(self, mock_get):
- info = {'address': '1.2.3.4',
- 'instance_uuid': 'fake-uuid',
- 'network_id': 0,
- 'vif_id': 1,
- 'vif_address': 'de:ad:be:ee:f0:00',
- 'instance_hostname': 'fake-host',
- 'instance_updated': datetime.datetime(1955, 11, 5),
- 'instance_created': datetime.datetime(1955, 11, 5),
- 'allocated': True,
- 'leased': True,
- 'default_route': True,
- }
- mock_get.return_value = [info]
- fixed_ips = fixed_ip.FixedIPList.get_by_network(
- self.context, {'id': 0}, host='fake-host')
- primitive = fixed_ips[0].obj_to_primitive()
- self.assertIn('default_route', primitive['nova_object.data'])
- fixed_ips[0].obj_make_compatible(primitive['nova_object.data'], '1.1')
- self.assertNotIn('default_route', primitive['nova_object.data'])
-
-
-class TestFixedIPObject(test_objects._LocalTest,
- _TestFixedIPObject):
- pass
-
-
-class TestRemoteFixedIPObject(test_objects._RemoteTest,
- _TestFixedIPObject):
- pass
diff --git a/nova/tests/objects/test_flavor.py b/nova/tests/objects/test_flavor.py
deleted file mode 100644
index 134d767f51..0000000000
--- a/nova/tests/objects/test_flavor.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova import exception
-from nova.objects import flavor as flavor_obj
-from nova.tests.objects import test_objects
-
-
-fake_flavor = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 1,
- 'name': 'm1.foo',
- 'memory_mb': 1024,
- 'vcpus': 4,
- 'root_gb': 20,
- 'ephemeral_gb': 0,
- 'flavorid': 'm1.foo',
- 'swap': 0,
- 'rxtx_factor': 1.0,
- 'vcpu_weight': 1,
- 'disabled': False,
- 'is_public': True,
- 'extra_specs': {'foo': 'bar'},
- }
-
-
-class _TestFlavor(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_get_by_id(self):
- with mock.patch.object(db, 'flavor_get') as get:
- get.return_value = fake_flavor
- flavor = flavor_obj.Flavor.get_by_id(self.context, 1)
- self._compare(self, fake_flavor, flavor)
-
- def test_get_by_name(self):
- with mock.patch.object(db, 'flavor_get_by_name') as get_by_name:
- get_by_name.return_value = fake_flavor
- flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo')
- self._compare(self, fake_flavor, flavor)
-
- def test_get_by_flavor_id(self):
- with mock.patch.object(db, 'flavor_get_by_flavor_id') as get_by_id:
- get_by_id.return_value = fake_flavor
- flavor = flavor_obj.Flavor.get_by_flavor_id(self.context,
- 'm1.foo')
- self._compare(self, fake_flavor, flavor)
-
- def test_add_access(self):
- elevated = self.context.elevated()
- flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
- with mock.patch.object(db, 'flavor_access_add') as add:
- flavor.add_access('456')
- add.assert_called_once_with(elevated, '123', '456')
-
- def test_add_access_with_dirty_projects(self):
- flavor = flavor_obj.Flavor(context=self.context, projects=['1'])
- self.assertRaises(exception.ObjectActionError,
- flavor.add_access, '2')
-
- def test_remove_access(self):
- elevated = self.context.elevated()
- flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
- with mock.patch.object(db, 'flavor_access_remove') as remove:
- flavor.remove_access('456')
- remove.assert_called_once_with(elevated, '123', '456')
-
- def test_create(self):
- flavor = flavor_obj.Flavor()
- flavor.name = 'm1.foo'
- flavor.extra_specs = fake_flavor['extra_specs']
-
- with mock.patch.object(db, 'flavor_create') as create:
- create.return_value = fake_flavor
- flavor.create(self.context)
-
- self.assertEqual(self.context, flavor._context)
- # NOTE(danms): Orphan this to avoid lazy-loads
- flavor._context = None
- self._compare(self, fake_flavor, flavor)
-
- def test_create_with_projects(self):
- context = self.context.elevated()
- flavor = flavor_obj.Flavor()
- flavor.name = 'm1.foo'
- flavor.extra_specs = fake_flavor['extra_specs']
- flavor.projects = ['project-1', 'project-2']
-
- db_flavor = dict(fake_flavor, projects=list(flavor.projects))
-
- with mock.patch.multiple(db, flavor_create=mock.DEFAULT,
- flavor_access_get_by_flavor_id=mock.DEFAULT
- ) as methods:
- methods['flavor_create'].return_value = db_flavor
- methods['flavor_access_get_by_flavor_id'].return_value = [
- {'project_id': 'project-1'},
- {'project_id': 'project-2'}]
- flavor.create(context)
- methods['flavor_create'].assert_called_once_with(
- context,
- {'name': 'm1.foo',
- 'extra_specs': fake_flavor['extra_specs']},
- projects=['project-1', 'project-2'])
-
- self.assertEqual(context, flavor._context)
- # NOTE(danms): Orphan this to avoid lazy-loads
- flavor._context = None
- self._compare(self, fake_flavor, flavor)
- self.assertEqual(['project-1', 'project-2'], flavor.projects)
-
- def test_create_with_id(self):
- flavor = flavor_obj.Flavor(id=123)
- self.assertRaises(exception.ObjectActionError, flavor.create,
- self.context)
-
- @mock.patch('nova.db.flavor_access_add')
- @mock.patch('nova.db.flavor_access_remove')
- @mock.patch('nova.db.flavor_extra_specs_delete')
- @mock.patch('nova.db.flavor_extra_specs_update_or_create')
- def test_save(self, mock_update, mock_delete, mock_remove, mock_add):
- ctxt = self.context.elevated()
- extra_specs = {'key1': 'value1', 'key2': 'value2'}
- projects = ['project-1', 'project-2']
- flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo',
- extra_specs=extra_specs, projects=projects)
- flavor.obj_reset_changes()
-
- # Test deleting an extra_specs key and project
- del flavor.extra_specs['key1']
- del flavor.projects[-1]
- self.assertEqual(set(['extra_specs', 'projects']),
- flavor.obj_what_changed())
- flavor.save()
- self.assertEqual({'key2': 'value2'}, flavor.extra_specs)
- mock_delete.assert_called_once_with(ctxt, 'foo', 'key1')
- self.assertEqual(['project-1'], flavor.projects)
- mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2')
-
- # Test updating an extra_specs key value
- flavor.extra_specs['key2'] = 'foobar'
- self.assertEqual(set(['extra_specs']), flavor.obj_what_changed())
- flavor.save()
- self.assertEqual({'key2': 'foobar'}, flavor.extra_specs)
- mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'})
-
- # Test adding an extra_specs and project
- flavor.extra_specs['key3'] = 'value3'
- flavor.projects.append('project-3')
- self.assertEqual(set(['extra_specs', 'projects']),
- flavor.obj_what_changed())
- flavor.save()
- self.assertEqual({'key2': 'foobar', 'key3': 'value3'},
- flavor.extra_specs)
- mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar',
- 'key3': 'value3'})
- self.assertEqual(['project-1', 'project-3'], flavor.projects)
- mock_add.assert_called_once_with(ctxt, 'foo', 'project-3')
-
- @mock.patch('nova.db.flavor_create')
- @mock.patch('nova.db.flavor_extra_specs_delete')
- @mock.patch('nova.db.flavor_extra_specs_update_or_create')
- def test_save_deleted_extra_specs(self, mock_update, mock_delete,
- mock_create):
- mock_create.return_value = dict(fake_flavor,
- extra_specs={'key1': 'value1'})
- ctxt = self.context.elevated()
- flavor = flavor_obj.Flavor(context=ctxt)
- flavor.flavorid = 'test'
- flavor.extra_specs = {'key1': 'value1'}
- flavor.create()
- flavor.extra_specs = {}
- flavor.save()
- mock_delete.assert_called_once_with(ctxt, flavor.flavorid,
- 'key1')
- self.assertFalse(mock_update.called)
-
- def test_save_invalid_fields(self):
- flavor = flavor_obj.Flavor(id=123)
- self.assertRaises(exception.ObjectActionError, flavor.save)
-
- def test_destroy(self):
- flavor = flavor_obj.Flavor(id=123, name='foo')
- with mock.patch.object(db, 'flavor_destroy') as destroy:
- flavor.destroy(self.context)
- destroy.assert_called_once_with(self.context, flavor.name)
-
- def test_load_projects(self):
- flavor = flavor_obj.Flavor(context=self.context, flavorid='foo')
- with mock.patch.object(db, 'flavor_access_get_by_flavor_id') as get:
- get.return_value = [{'project_id': 'project-1'}]
- projects = flavor.projects
-
- self.assertEqual(['project-1'], projects)
- self.assertNotIn('projects', flavor.obj_what_changed())
-
- def test_load_anything_else(self):
- flavor = flavor_obj.Flavor()
- self.assertRaises(exception.ObjectActionError,
- getattr, flavor, 'name')
-
-
-class TestFlavor(test_objects._LocalTest, _TestFlavor):
- pass
-
-
-class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor):
- pass
-
-
-class _TestFlavorList(object):
- def test_get_all(self):
- with mock.patch.object(db, 'flavor_get_all') as get_all:
- get_all.return_value = [fake_flavor]
- filters = {'min_memory_mb': 4096}
- flavors = flavor_obj.FlavorList.get_all(self.context,
- inactive=False,
- filters=filters,
- sort_key='id',
- sort_dir='asc')
- self.assertEqual(1, len(flavors))
- _TestFlavor._compare(self, fake_flavor, flavors[0])
- get_all.assert_called_once_with(self.context, inactive=False,
- filters=filters, sort_key='id',
- sort_dir='asc', limit=None,
- marker=None)
-
-
-class TestFlavorList(test_objects._LocalTest, _TestFlavorList):
- pass
-
-
-class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList):
- pass
diff --git a/nova/tests/objects/test_floating_ip.py b/nova/tests/objects/test_floating_ip.py
deleted file mode 100644
index 39ac95a81b..0000000000
--- a/nova/tests/objects/test_floating_ip.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import netaddr
-
-from nova import exception
-from nova import objects
-from nova.objects import floating_ip
-from nova.tests.objects import test_fixed_ip
-from nova.tests.objects import test_network
-from nova.tests.objects import test_objects
-
-fake_floating_ip = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'address': '172.17.0.1',
- 'fixed_ip_id': None,
- 'project_id': None,
- 'host': None,
- 'auto_assigned': False,
- 'pool': None,
- 'interface': None,
- 'fixed_ip': None,
-}
-
-
-class _TestFloatingIPObject(object):
- def _compare(self, obj, db_obj):
- for field in obj.fields:
- if field in floating_ip.FLOATING_IP_OPTIONAL_ATTRS:
- if obj.obj_attr_is_set(field):
- obj_val = obj[field].id
- db_val = db_obj[field]['id']
- else:
- continue
- else:
- obj_val = obj[field]
- db_val = db_obj[field]
- if isinstance(obj_val, netaddr.IPAddress):
- obj_val = str(obj_val)
- self.assertEqual(db_val, obj_val)
-
- @mock.patch('nova.db.floating_ip_get')
- def test_get_by_id(self, get):
- db_floatingip = dict(fake_floating_ip,
- fixed_ip=test_fixed_ip.fake_fixed_ip)
- get.return_value = db_floatingip
- floatingip = floating_ip.FloatingIP.get_by_id(self.context, 123)
- get.assert_called_once_with(self.context, 123)
- self._compare(floatingip, db_floatingip)
-
- @mock.patch('nova.db.floating_ip_get_by_address')
- def test_get_by_address(self, get):
- get.return_value = fake_floating_ip
- floatingip = floating_ip.FloatingIP.get_by_address(self.context,
- '1.2.3.4')
- get.assert_called_once_with(self.context, '1.2.3.4')
- self._compare(floatingip, fake_floating_ip)
-
- @mock.patch('nova.db.floating_ip_get_pools')
- def test_get_pool_names(self, get):
- get.return_value = [{'name': 'a'}, {'name': 'b'}]
- self.assertEqual(['a', 'b'],
- floating_ip.FloatingIP.get_pool_names(self.context))
-
- @mock.patch('nova.db.floating_ip_allocate_address')
- def test_allocate_address(self, allocate):
- allocate.return_value = '1.2.3.4'
- self.assertEqual('1.2.3.4',
- floating_ip.FloatingIP.allocate_address(self.context,
- 'project',
- 'pool'))
- allocate.assert_called_with(self.context, 'project', 'pool',
- auto_assigned=False)
-
- @mock.patch('nova.db.floating_ip_fixed_ip_associate')
- def test_associate(self, associate):
- db_fixed = dict(test_fixed_ip.fake_fixed_ip,
- network=test_network.fake_network)
- associate.return_value = db_fixed
- floatingip = floating_ip.FloatingIP.associate(self.context,
- '172.17.0.1',
- '192.168.1.1',
- 'host')
- associate.assert_called_with(self.context, '172.17.0.1',
- '192.168.1.1', 'host')
- self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
- self.assertEqual('172.17.0.1', str(floatingip.address))
- self.assertEqual('host', floatingip.host)
-
- @mock.patch('nova.db.floating_ip_deallocate')
- def test_deallocate(self, deallocate):
- floating_ip.FloatingIP.deallocate(self.context, '1.2.3.4')
- deallocate.assert_called_with(self.context, '1.2.3.4')
-
- @mock.patch('nova.db.floating_ip_destroy')
- def test_destroy(self, destroy):
- floating_ip.FloatingIP.destroy(self.context, '1.2.3.4')
- destroy.assert_called_with(self.context, '1.2.3.4')
-
- @mock.patch('nova.db.floating_ip_disassociate')
- def test_disassociate(self, disassociate):
- db_fixed = dict(test_fixed_ip.fake_fixed_ip,
- network=test_network.fake_network)
- disassociate.return_value = db_fixed
- floatingip = floating_ip.FloatingIP.disassociate(self.context,
- '1.2.3.4')
- disassociate.assert_called_with(self.context, '1.2.3.4')
- self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
- self.assertEqual('1.2.3.4', str(floatingip.address))
-
- @mock.patch('nova.db.floating_ip_update')
- def test_save(self, update):
- update.return_value = fake_floating_ip
- floatingip = floating_ip.FloatingIP(context=self.context,
- id=123, address='1.2.3.4',
- host='foo')
- floatingip.obj_reset_changes(['address', 'id'])
- floatingip.save()
- self.assertEqual(set(), floatingip.obj_what_changed())
- update.assert_called_with(self.context, '1.2.3.4',
- {'host': 'foo'})
-
- def test_save_errors(self):
- floatingip = floating_ip.FloatingIP(context=self.context,
- id=123, host='foo')
- floatingip.obj_reset_changes()
- floating_ip.address = '1.2.3.4'
- self.assertRaises(exception.ObjectActionError, floatingip.save)
-
- floatingip.obj_reset_changes()
- floatingip.fixed_ip_id = 1
- self.assertRaises(exception.ObjectActionError, floatingip.save)
-
- @mock.patch('nova.db.floating_ip_update')
- def test_save_no_fixedip(self, update):
- update.return_value = fake_floating_ip
- floatingip = floating_ip.FloatingIP(context=self.context,
- id=123)
- floatingip.fixed_ip = objects.FixedIP(context=self.context,
- id=456)
- self.assertNotIn('fixed_ip', update.calls[1])
-
- @mock.patch('nova.db.floating_ip_get_all')
- def test_get_all(self, get):
- get.return_value = [fake_floating_ip]
- floatingips = floating_ip.FloatingIPList.get_all(self.context)
- self.assertEqual(1, len(floatingips))
- self._compare(floatingips[0], fake_floating_ip)
- get.assert_called_with(self.context)
-
- @mock.patch('nova.db.floating_ip_get_all_by_host')
- def test_get_by_host(self, get):
- get.return_value = [fake_floating_ip]
- floatingips = floating_ip.FloatingIPList.get_by_host(self.context,
- 'host')
- self.assertEqual(1, len(floatingips))
- self._compare(floatingips[0], fake_floating_ip)
- get.assert_called_with(self.context, 'host')
-
- @mock.patch('nova.db.floating_ip_get_all_by_project')
- def test_get_by_project(self, get):
- get.return_value = [fake_floating_ip]
- floatingips = floating_ip.FloatingIPList.get_by_project(self.context,
- 'project')
- self.assertEqual(1, len(floatingips))
- self._compare(floatingips[0], fake_floating_ip)
- get.assert_called_with(self.context, 'project')
-
- @mock.patch('nova.db.floating_ip_get_by_fixed_address')
- def test_get_by_fixed_address(self, get):
- get.return_value = [fake_floating_ip]
- floatingips = floating_ip.FloatingIPList.get_by_fixed_address(
- self.context, '1.2.3.4')
- self.assertEqual(1, len(floatingips))
- self._compare(floatingips[0], fake_floating_ip)
- get.assert_called_with(self.context, '1.2.3.4')
-
- @mock.patch('nova.db.floating_ip_get_by_fixed_ip_id')
- def test_get_by_fixed_ip_id(self, get):
- get.return_value = [fake_floating_ip]
- floatingips = floating_ip.FloatingIPList.get_by_fixed_ip_id(
- self.context, 123)
- self.assertEqual(1, len(floatingips))
- self._compare(floatingips[0], fake_floating_ip)
- get.assert_called_with(self.context, 123)
-
- @mock.patch('nova.db.instance_floating_address_get_all')
- def test_get_addresses_by_instance(self, get_all):
- expected = ['1.2.3.4', '4.5.6.7']
- get_all.return_value = list(expected)
- ips = floating_ip.FloatingIP.get_addresses_by_instance(
- self.context, {'uuid': '1234'})
- self.assertEqual(expected, ips)
- get_all.assert_called_once_with(self.context, '1234')
-
- def test_make_ip_info(self):
- result = objects.FloatingIPList.make_ip_info('1.2.3.4', 'pool', 'eth0')
- self.assertEqual({'address': '1.2.3.4', 'pool': 'pool',
- 'interface': 'eth0'},
- result)
-
- @mock.patch('nova.db.floating_ip_bulk_create')
- def test_bulk_create(self, create_mock):
- def fake_create(ctxt, ip_info):
- return [{'id': 1, 'address': ip['address'], 'fixed_ip_id': 1,
- 'project_id': 'foo', 'host': 'host',
- 'auto_assigned': False, 'pool': ip['pool'],
- 'interface': ip['interface'], 'fixed_ip': None,
- 'created_at': None, 'updated_at': None,
- 'deleted_at': None, 'deleted': False}
- for ip in ip_info]
-
- create_mock.side_effect = fake_create
- ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0'),
- objects.FloatingIPList.make_ip_info('1.1.1.2', 'loop', 'eth1')]
- result = objects.FloatingIPList.create(None, ips)
- self.assertIs(result, None)
- result = objects.FloatingIPList.create(None, ips, want_result=True)
- self.assertEqual('1.1.1.2', str(result[1].address))
-
- @mock.patch('nova.db.floating_ip_bulk_destroy')
- def test_bulk_destroy(self, destroy_mock):
- ips = [{'address': '1.2.3.4'}, {'address': '4.5.6.7'}]
- objects.FloatingIPList.destroy(None, ips)
- destroy_mock.assert_called_once_with(None, ips)
-
- def test_backport_fixedip_1_1(self):
- floating = objects.FloatingIP()
- fixed = objects.FixedIP()
- floating.fixed_ip = fixed
- primitive = floating.obj_to_primitive(target_version='1.1')
- self.assertEqual('1.1',
- primitive['nova_object.data']['fixed_ip']['nova_object.version'])
-
-
-class TestFloatingIPObject(test_objects._LocalTest,
- _TestFloatingIPObject):
- pass
-
-
-class TestRemoteFloatingIPObject(test_objects._RemoteTest,
- _TestFloatingIPObject):
- pass
diff --git a/nova/tests/objects/test_hv_spec.py b/nova/tests/objects/test_hv_spec.py
deleted file mode 100644
index 589fb418c8..0000000000
--- a/nova/tests/objects/test_hv_spec.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import arch
-from nova.compute import hvtype
-from nova.compute import vm_mode
-from nova import objects
-from nova.tests.objects import test_objects
-
-
-spec_dict = {
- 'arch': arch.I686,
- 'hv_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM
-}
-
-spec_list = [
- arch.I686,
- hvtype.KVM,
- vm_mode.HVM
-]
-
-
-class _TestHVSpecObject(object):
-
- def test_hv_spec_from_list(self):
- spec_obj = objects.HVSpec.from_list(spec_list)
- self.compare_obj(spec_obj, spec_dict)
-
- def test_hv_spec_to_list(self):
- spec_obj = objects.HVSpec()
- spec_obj.arch = arch.I686
- spec_obj.hv_type = hvtype.KVM
- spec_obj.vm_mode = vm_mode.HVM
- spec = spec_obj.to_list()
- self.assertEqual(spec_list, spec)
-
-
-class TestHVSpecObject(test_objects._LocalTest,
- _TestHVSpecObject):
- pass
-
-
-class TestRemoteHVSpecObject(test_objects._RemoteTest,
- _TestHVSpecObject):
- pass
diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py
deleted file mode 100644
index c17061dd61..0000000000
--- a/nova/tests/objects/test_instance.py
+++ /dev/null
@@ -1,1196 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import iso8601
-import mock
-import mox
-import netaddr
-from oslo.utils import timeutils
-
-from nova.cells import rpcapi as cells_rpcapi
-from nova.compute import flavors
-from nova import db
-from nova import exception
-from nova.network import model as network_model
-from nova import notifications
-from nova import objects
-from nova.objects import instance
-from nova.objects import instance_info_cache
-from nova.objects import instance_numa_topology
-from nova.objects import pci_device
-from nova.objects import security_group
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.objects import test_instance_fault
-from nova.tests.objects import test_instance_info_cache
-from nova.tests.objects import test_instance_numa_topology
-from nova.tests.objects import test_instance_pci_requests
-from nova.tests.objects import test_objects
-from nova.tests.objects import test_security_group
-from nova import utils
-
-
-class _TestInstanceObject(object):
- @property
- def fake_instance(self):
- fake_instance = fakes.stub_instance(id=2,
- access_ipv4='1.2.3.4',
- access_ipv6='::1')
- fake_instance['cell_name'] = 'api!child'
- fake_instance['scheduled_at'] = None
- fake_instance['terminated_at'] = None
- fake_instance['deleted_at'] = None
- fake_instance['created_at'] = None
- fake_instance['updated_at'] = None
- fake_instance['launched_at'] = (
- fake_instance['launched_at'].replace(
- tzinfo=iso8601.iso8601.Utc(), microsecond=0))
- fake_instance['deleted'] = False
- fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
- fake_instance['security_groups'] = []
- fake_instance['pci_devices'] = []
- fake_instance['user_id'] = self.context.user_id
- fake_instance['project_id'] = self.context.project_id
- return fake_instance
-
- def test_datetime_deserialization(self):
- red_letter_date = timeutils.parse_isotime(
- timeutils.isotime(datetime.datetime(1955, 11, 5)))
- inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date)
- primitive = inst.obj_to_primitive()
- expected = {'nova_object.name': 'Instance',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.16',
- 'nova_object.data':
- {'uuid': 'fake-uuid',
- 'launched_at': '1955-11-05T00:00:00Z'},
- 'nova_object.changes': ['launched_at', 'uuid']}
- self.assertEqual(primitive, expected)
- inst2 = instance.Instance.obj_from_primitive(primitive)
- self.assertIsInstance(inst2.launched_at, datetime.datetime)
- self.assertEqual(inst2.launched_at, red_letter_date)
-
- def test_ip_deserialization(self):
- inst = instance.Instance(uuid='fake-uuid', access_ip_v4='1.2.3.4',
- access_ip_v6='::1')
- primitive = inst.obj_to_primitive()
- expected = {'nova_object.name': 'Instance',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.16',
- 'nova_object.data':
- {'uuid': 'fake-uuid',
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': '::1'},
- 'nova_object.changes': ['uuid', 'access_ip_v6',
- 'access_ip_v4']}
- self.assertEqual(primitive, expected)
- inst2 = instance.Instance.obj_from_primitive(primitive)
- self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
- self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
- self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
- self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
-
- def test_get_without_expected(self):
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, 'uuid',
- columns_to_join=[],
- use_slave=False
- ).AndReturn(self.fake_instance)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, 'uuid',
- expected_attrs=[])
- for attr in instance.INSTANCE_OPTIONAL_ATTRS:
- self.assertFalse(inst.obj_attr_is_set(attr))
- self.assertRemotes()
-
- def test_get_with_expected(self):
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- self.mox.StubOutWithMock(
- db, 'instance_extra_get_by_instance_uuid')
-
- exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
- exp_cols.remove('fault')
- exp_cols.remove('numa_topology')
- exp_cols.remove('pci_requests')
-
- db.instance_get_by_uuid(
- self.context, 'uuid',
- columns_to_join=exp_cols,
- use_slave=False
- ).AndReturn(self.fake_instance)
- fake_faults = test_instance_fault.fake_faults
- db.instance_fault_get_by_instance_uuids(
- self.context, [self.fake_instance['uuid']]
- ).AndReturn(fake_faults)
- fake_topology = test_instance_numa_topology.fake_db_topology
- db.instance_extra_get_by_instance_uuid(
- self.context, self.fake_instance['uuid'],
- columns=['numa_topology']
- ).AndReturn(fake_topology)
- fake_requests = test_instance_pci_requests.fake_pci_requests
- db.instance_extra_get_by_instance_uuid(
- self.context, self.fake_instance['uuid'],
- columns=['pci_requests']
- ).AndReturn(fake_requests)
-
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(
- self.context, 'uuid',
- expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
- for attr in instance.INSTANCE_OPTIONAL_ATTRS:
- self.assertTrue(inst.obj_attr_is_set(attr))
- self.assertRemotes()
-
- def test_get_by_id(self):
- self.mox.StubOutWithMock(db, 'instance_get')
- db.instance_get(self.context, 'instid',
- columns_to_join=['info_cache',
- 'security_groups']
- ).AndReturn(self.fake_instance)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_id(self.context, 'instid')
- self.assertEqual(inst.uuid, self.fake_instance['uuid'])
- self.assertRemotes()
-
- def test_load(self):
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- fake_uuid = self.fake_instance['uuid']
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(self.fake_instance)
- fake_inst2 = dict(self.fake_instance,
- system_metadata=[{'key': 'foo', 'value': 'bar'}])
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['system_metadata'],
- use_slave=False
- ).AndReturn(fake_inst2)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- self.assertFalse(hasattr(inst, '_system_metadata'))
- sys_meta = inst.system_metadata
- self.assertEqual(sys_meta, {'foo': 'bar'})
- self.assertTrue(hasattr(inst, '_system_metadata'))
- # Make sure we don't run load again
- sys_meta2 = inst.system_metadata
- self.assertEqual(sys_meta2, {'foo': 'bar'})
- self.assertRemotes()
-
- def test_load_invalid(self):
- inst = instance.Instance(context=self.context, uuid='fake-uuid')
- self.assertRaises(exception.ObjectActionError,
- inst.obj_load_attr, 'foo')
-
- def test_get_remote(self):
- # isotime doesn't have microseconds and is always UTC
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- fake_instance = self.fake_instance
- db.instance_get_by_uuid(self.context, 'fake-uuid',
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_instance)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
- self.assertEqual(inst.id, fake_instance['id'])
- self.assertEqual(inst.launched_at, fake_instance['launched_at'])
- self.assertEqual(str(inst.access_ip_v4),
- fake_instance['access_ip_v4'])
- self.assertEqual(str(inst.access_ip_v6),
- fake_instance['access_ip_v6'])
- self.assertRemotes()
-
- def test_refresh(self):
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- fake_uuid = self.fake_instance['uuid']
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(dict(self.fake_instance,
- host='orig-host'))
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(dict(self.fake_instance,
- host='new-host'))
- self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
- 'refresh')
- instance_info_cache.InstanceInfoCache.refresh()
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- self.assertEqual(inst.host, 'orig-host')
- inst.refresh()
- self.assertEqual(inst.host, 'new-host')
- self.assertRemotes()
- self.assertEqual(set([]), inst.obj_what_changed())
-
- def test_refresh_does_not_recurse(self):
- inst = instance.Instance(context=self.context, uuid='fake-uuid',
- metadata={})
- inst_copy = instance.Instance()
- inst_copy.uuid = inst.uuid
- self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
- instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
- expected_attrs=['metadata'],
- use_slave=False
- ).AndReturn(inst_copy)
- self.mox.ReplayAll()
- self.assertRaises(exception.OrphanedObjectError, inst.refresh)
-
- def _save_test_helper(self, cell_type, save_kwargs):
- """Common code for testing save() for cells/non-cells."""
- if cell_type:
- self.flags(enable=True, cell_type=cell_type, group='cells')
- else:
- self.flags(enable=False, group='cells')
-
- old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
- vm_state='old', task_state='old')
- fake_uuid = old_ref['uuid']
-
- expected_updates = dict(vm_state='meow', task_state='wuff',
- user_data='new')
-
- new_ref = dict(old_ref, host='newhost', **expected_updates)
- exp_vm_state = save_kwargs.get('expected_vm_state')
- exp_task_state = save_kwargs.get('expected_task_state')
- admin_reset = save_kwargs.get('admin_state_reset', False)
- if exp_vm_state:
- expected_updates['expected_vm_state'] = exp_vm_state
- if exp_task_state:
- if (exp_task_state == 'image_snapshot' and
- 'instance_version' in save_kwargs and
- save_kwargs['instance_version'] == '1.9'):
- expected_updates['expected_task_state'] = [
- 'image_snapshot', 'image_snapshot_pending']
- else:
- expected_updates['expected_task_state'] = exp_task_state
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(db, 'instance_info_cache_update')
- cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
- self.mox.StubOutWithMock(cells_api_mock,
- 'instance_update_at_top')
- self.mox.StubOutWithMock(cells_api_mock,
- 'instance_update_from_api')
- self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
- use_mock_anything=True)
- self.mox.StubOutWithMock(notifications, 'send_update')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(old_ref)
- db.instance_update_and_get_original(
- self.context, fake_uuid, expected_updates,
- update_cells=False,
- columns_to_join=['info_cache', 'security_groups',
- 'system_metadata']
- ).AndReturn((old_ref, new_ref))
- if cell_type == 'api':
- cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
- cells_api_mock.instance_update_from_api(
- self.context, mox.IsA(instance.Instance),
- exp_vm_state, exp_task_state, admin_reset)
- elif cell_type == 'compute':
- cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
- cells_api_mock.instance_update_at_top(self.context, new_ref)
- notifications.send_update(self.context, mox.IgnoreArg(),
- mox.IgnoreArg())
-
- self.mox.ReplayAll()
-
- inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
- if 'instance_version' in save_kwargs:
- inst.VERSION = save_kwargs.pop('instance_version')
- self.assertEqual('old', inst.task_state)
- self.assertEqual('old', inst.vm_state)
- self.assertEqual('old', inst.user_data)
- inst.vm_state = 'meow'
- inst.task_state = 'wuff'
- inst.user_data = 'new'
- inst.save(**save_kwargs)
- self.assertEqual('newhost', inst.host)
- self.assertEqual('meow', inst.vm_state)
- self.assertEqual('wuff', inst.task_state)
- self.assertEqual('new', inst.user_data)
- self.assertEqual(set([]), inst.obj_what_changed())
-
- def test_save(self):
- self._save_test_helper(None, {})
-
- def test_save_in_api_cell(self):
- self._save_test_helper('api', {})
-
- def test_save_in_compute_cell(self):
- self._save_test_helper('compute', {})
-
- def test_save_exp_vm_state(self):
- self._save_test_helper(None, {'expected_vm_state': ['meow']})
-
- def test_save_exp_task_state(self):
- self._save_test_helper(None, {'expected_task_state': ['meow']})
-
- def test_save_exp_task_state_havana(self):
- self._save_test_helper(None, {
- 'expected_task_state': 'image_snapshot',
- 'instance_version': '1.9'})
-
- def test_save_exp_vm_state_api_cell(self):
- self._save_test_helper('api', {'expected_vm_state': ['meow']})
-
- def test_save_exp_task_state_api_cell(self):
- self._save_test_helper('api', {'expected_task_state': ['meow']})
-
- def test_save_exp_task_state_api_cell_admin_reset(self):
- self._save_test_helper('api', {'admin_state_reset': True})
-
- def test_save_rename_sends_notification(self):
- # Tests that simply changing the 'display_name' on the instance
- # will send a notification.
- self.flags(enable=False, group='cells')
- old_ref = dict(self.fake_instance, display_name='hello')
- fake_uuid = old_ref['uuid']
- expected_updates = dict(display_name='goodbye')
- new_ref = dict(old_ref, **expected_updates)
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(notifications, 'send_update')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(old_ref)
- db.instance_update_and_get_original(
- self.context, fake_uuid, expected_updates, update_cells=False,
- columns_to_join=['info_cache', 'security_groups',
- 'system_metadata']
- ).AndReturn((old_ref, new_ref))
- notifications.send_update(self.context, mox.IgnoreArg(),
- mox.IgnoreArg())
-
- self.mox.ReplayAll()
-
- inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
- use_slave=False)
- self.assertEqual('hello', inst.display_name)
- inst.display_name = 'goodbye'
- inst.save()
- self.assertEqual('goodbye', inst.display_name)
- self.assertEqual(set([]), inst.obj_what_changed())
-
- @mock.patch('nova.db.instance_update_and_get_original')
- @mock.patch('nova.objects.Instance._from_db_object')
- def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
- # NOTE(danms): This tests that we don't update the pci_devices
- # field from the contents of the database. This is not because we
- # don't necessarily want to, but because the way pci_devices is
- # currently implemented it causes versioning issues. When that is
- # resolved, this test should go away.
- mock_update.return_value = None, None
- inst = instance.Instance(context=self.context, id=123)
- inst.uuid = 'foo'
- inst.pci_devices = pci_device.PciDeviceList()
- inst.save()
- self.assertNotIn('pci_devices',
- mock_fdo.call_args_list[0][1]['expected_attrs'])
-
- def test_get_deleted(self):
- fake_inst = dict(self.fake_instance, id=123, deleted=123)
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- # NOTE(danms): Make sure it's actually a bool
- self.assertEqual(inst.deleted, True)
-
- def test_get_not_cleaned(self):
- fake_inst = dict(self.fake_instance, id=123, cleaned=None)
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- # NOTE(mikal): Make sure it's actually a bool
- self.assertEqual(inst.cleaned, False)
-
- def test_get_cleaned(self):
- fake_inst = dict(self.fake_instance, id=123, cleaned=1)
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- # NOTE(mikal): Make sure it's actually a bool
- self.assertEqual(inst.cleaned, True)
-
- def test_with_info_cache(self):
- fake_inst = dict(self.fake_instance)
- fake_uuid = fake_inst['uuid']
- nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
- nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
- nwinfo1_json = nwinfo1.json()
- nwinfo2_json = nwinfo2.json()
- fake_inst['info_cache'] = dict(
- test_instance_info_cache.fake_info_cache,
- network_info=nwinfo1_json,
- instance_uuid=fake_uuid)
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(db, 'instance_info_cache_update')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- db.instance_info_cache_update(self.context, fake_uuid,
- {'network_info': nwinfo2_json})
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- self.assertEqual(inst.info_cache.network_info, nwinfo1)
- self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
- inst.info_cache.network_info = nwinfo2
- inst.save()
-
- def test_with_info_cache_none(self):
- fake_inst = dict(self.fake_instance, info_cache=None)
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
- ['info_cache'])
- self.assertIsNone(inst.info_cache)
-
- def test_with_security_groups(self):
- fake_inst = dict(self.fake_instance)
- fake_uuid = fake_inst['uuid']
- fake_inst['security_groups'] = [
- {'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
- 'user_id': 'fake-user', 'project_id': 'fake_project',
- 'created_at': None, 'updated_at': None, 'deleted_at': None,
- 'deleted': False},
- {'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
- 'user_id': 'fake-user', 'project_id': 'fake_project',
- 'created_at': None, 'updated_at': None, 'deleted_at': None,
- 'deleted': False},
- ]
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(db, 'security_group_update')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- db.security_group_update(self.context, 1, {'description': 'changed'}
- ).AndReturn(fake_inst['security_groups'][0])
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- self.assertEqual(len(inst.security_groups), 2)
- for index, group in enumerate(fake_inst['security_groups']):
- for key in group:
- self.assertEqual(group[key],
- inst.security_groups[index][key])
- self.assertIsInstance(inst.security_groups[index],
- security_group.SecurityGroup)
- self.assertEqual(inst.security_groups.obj_what_changed(), set())
- inst.security_groups[0].description = 'changed'
- inst.save()
- self.assertEqual(inst.security_groups.obj_what_changed(), set())
-
- def test_with_empty_security_groups(self):
- fake_inst = dict(self.fake_instance, security_groups=[])
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
- self.assertEqual(0, len(inst.security_groups))
-
- def test_with_empty_pci_devices(self):
- fake_inst = dict(self.fake_instance, pci_devices=[])
- fake_uuid = fake_inst['uuid']
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['pci_devices'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
- ['pci_devices'])
- self.assertEqual(len(inst.pci_devices), 0)
-
- def test_with_pci_devices(self):
- fake_inst = dict(self.fake_instance)
- fake_uuid = fake_inst['uuid']
- fake_inst['pci_devices'] = [
- {'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 2,
- 'compute_node_id': 1,
- 'address': 'a1',
- 'vendor_id': 'v1',
- 'product_id': 'p1',
- 'dev_type': 't',
- 'status': 'allocated',
- 'dev_id': 'i',
- 'label': 'l',
- 'instance_uuid': fake_uuid,
- 'request_id': None,
- 'extra_info': '{}'},
- {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'compute_node_id': 1,
- 'address': 'a',
- 'vendor_id': 'v',
- 'product_id': 'p',
- 'dev_type': 't',
- 'status': 'allocated',
- 'dev_id': 'i',
- 'label': 'l',
- 'instance_uuid': fake_uuid,
- 'request_id': None,
- 'extra_info': '{}'},
- ]
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['pci_devices'],
- use_slave=False
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
- ['pci_devices'])
- self.assertEqual(len(inst.pci_devices), 2)
- self.assertEqual(inst.pci_devices[0].instance_uuid, fake_uuid)
- self.assertEqual(inst.pci_devices[1].instance_uuid, fake_uuid)
-
- def test_with_fault(self):
- fake_inst = dict(self.fake_instance)
- fake_uuid = fake_inst['uuid']
- fake_faults = [dict(x, instance_uuid=fake_uuid)
- for x in test_instance_fault.fake_faults['fake-uuid']]
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=[],
- use_slave=False
- ).AndReturn(self.fake_instance)
- db.instance_fault_get_by_instance_uuids(
- self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
- self.mox.ReplayAll()
- inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
- expected_attrs=['fault'])
- self.assertEqual(fake_faults[0], dict(inst.fault.items()))
- self.assertRemotes()
-
- def test_iteritems_with_extra_attrs(self):
- self.stubs.Set(instance.Instance, 'name', 'foo')
- inst = instance.Instance(uuid='fake-uuid')
- self.assertEqual(inst.items(),
- {'uuid': 'fake-uuid',
- 'name': 'foo',
- }.items())
-
- def _test_metadata_change_tracking(self, which):
- inst = instance.Instance(uuid='fake-uuid')
- setattr(inst, which, {})
- inst.obj_reset_changes()
- getattr(inst, which)['foo'] = 'bar'
- self.assertEqual(set([which]), inst.obj_what_changed())
- inst.obj_reset_changes()
- self.assertEqual(set(), inst.obj_what_changed())
-
- def test_metadata_change_tracking(self):
- self._test_metadata_change_tracking('metadata')
-
- def test_system_metadata_change_tracking(self):
- self._test_metadata_change_tracking('system_metadata')
-
- def test_create_stubbed(self):
- self.mox.StubOutWithMock(db, 'instance_create')
- vals = {'host': 'foo-host',
- 'memory_mb': 128,
- 'system_metadata': {'foo': 'bar'}}
- fake_inst = fake_instance.fake_db_instance(**vals)
- db.instance_create(self.context, vals).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance(host='foo-host', memory_mb=128,
- system_metadata={'foo': 'bar'})
- inst.create(self.context)
-
- def test_create(self):
- self.mox.StubOutWithMock(db, 'instance_create')
- db.instance_create(self.context, {}).AndReturn(self.fake_instance)
- self.mox.ReplayAll()
- inst = instance.Instance()
- inst.create(self.context)
- self.assertEqual(self.fake_instance['id'], inst.id)
-
- def test_create_with_values(self):
- inst1 = instance.Instance(user_id=self.context.user_id,
- project_id=self.context.project_id,
- host='foo-host')
- inst1.create(self.context)
- self.assertEqual(inst1.host, 'foo-host')
- inst2 = instance.Instance.get_by_uuid(self.context, inst1.uuid)
- self.assertEqual(inst2.host, 'foo-host')
-
- def test_create_with_numa_topology(self):
- inst = instance.Instance(uuid=self.fake_instance['uuid'],
- numa_topology=instance_numa_topology.InstanceNUMATopology
- .obj_from_topology(
- test_instance_numa_topology.fake_numa_topology))
-
- inst.create(self.context)
- self.assertIsNotNone(inst.numa_topology)
- got_numa_topo = (
- instance_numa_topology.InstanceNUMATopology
- .get_by_instance_uuid(self.context, inst.uuid))
- self.assertEqual(inst.numa_topology.id, got_numa_topo.id)
-
- def test_recreate_fails(self):
- inst = instance.Instance(user_id=self.context.user_id,
- project_id=self.context.project_id,
- host='foo-host')
- inst.create(self.context)
- self.assertRaises(exception.ObjectActionError, inst.create,
- self.context)
-
- def test_create_with_special_things(self):
- self.mox.StubOutWithMock(db, 'instance_create')
- fake_inst = fake_instance.fake_db_instance()
- db.instance_create(self.context,
- {'host': 'foo-host',
- 'security_groups': ['foo', 'bar'],
- 'info_cache': {'network_info': '[]'},
- }
- ).AndReturn(fake_inst)
- self.mox.ReplayAll()
- secgroups = security_group.SecurityGroupList()
- secgroups.objects = []
- for name in ('foo', 'bar'):
- secgroup = security_group.SecurityGroup()
- secgroup.name = name
- secgroups.objects.append(secgroup)
- info_cache = instance_info_cache.InstanceInfoCache()
- info_cache.network_info = network_model.NetworkInfo()
- inst = instance.Instance(host='foo-host', security_groups=secgroups,
- info_cache=info_cache)
- inst.create(self.context)
-
- def test_destroy_stubbed(self):
- self.mox.StubOutWithMock(db, 'instance_destroy')
- deleted_at = datetime.datetime(1955, 11, 6)
- fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
- deleted=True)
- db.instance_destroy(self.context, 'fake-uuid',
- constraint=None).AndReturn(fake_inst)
- self.mox.ReplayAll()
- inst = instance.Instance(id=1, uuid='fake-uuid', host='foo')
- inst.destroy(self.context)
- self.assertEqual(timeutils.normalize_time(inst.deleted_at),
- timeutils.normalize_time(deleted_at))
- self.assertTrue(inst.deleted)
-
- def test_destroy(self):
- values = {'user_id': self.context.user_id,
- 'project_id': self.context.project_id}
- db_inst = db.instance_create(self.context, values)
- inst = instance.Instance(id=db_inst['id'], uuid=db_inst['uuid'])
- inst.destroy(self.context)
- self.assertRaises(exception.InstanceNotFound,
- db.instance_get_by_uuid, self.context,
- db_inst['uuid'])
-
- def test_destroy_host_constraint(self):
- values = {'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'host': 'foo'}
- db_inst = db.instance_create(self.context, values)
- inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
- inst.host = None
- self.assertRaises(exception.ObjectActionError,
- inst.destroy)
-
- def test_name_does_not_trigger_lazy_loads(self):
- values = {'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'host': 'foo'}
- db_inst = db.instance_create(self.context, values)
- inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
- self.assertFalse(inst.obj_attr_is_set('fault'))
- self.flags(instance_name_template='foo-%(uuid)s')
- self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
- self.assertFalse(inst.obj_attr_is_set('fault'))
-
- def test_from_db_object_not_overwrite_info_cache(self):
- info_cache = instance_info_cache.InstanceInfoCache()
- inst = instance.Instance(context=self.context,
- info_cache=info_cache)
- db_inst = fake_instance.fake_db_instance()
- db_inst['info_cache'] = dict(
- test_instance_info_cache.fake_info_cache)
- inst._from_db_object(self.context, inst, db_inst,
- expected_attrs=['info_cache'])
- self.assertIs(info_cache, inst.info_cache)
-
- def test_compat_strings(self):
- unicode_attributes = ['user_id', 'project_id', 'image_ref',
- 'kernel_id', 'ramdisk_id', 'hostname',
- 'key_name', 'key_data', 'host', 'node',
- 'user_data', 'availability_zone',
- 'display_name', 'display_description',
- 'launched_on', 'locked_by', 'os_type',
- 'architecture', 'vm_mode', 'root_device_name',
- 'default_ephemeral_device',
- 'default_swap_device', 'config_drive',
- 'cell_name']
- inst = instance.Instance()
- expected = {}
- for key in unicode_attributes:
- inst[key] = u'\u2603'
- expected[key] = '?'
- primitive = inst.obj_to_primitive(target_version='1.6')
- self.assertEqual(expected, primitive['nova_object.data'])
- self.assertEqual('1.6', primitive['nova_object.version'])
-
- def test_compat_pci_devices(self):
- inst = instance.Instance()
- inst.pci_devices = pci_device.PciDeviceList()
- primitive = inst.obj_to_primitive(target_version='1.5')
- self.assertNotIn('pci_devices', primitive)
-
- def test_compat_info_cache(self):
- inst = instance.Instance()
- inst.info_cache = instance_info_cache.InstanceInfoCache()
- primitive = inst.obj_to_primitive(target_version='1.9')
- self.assertEqual(
- '1.4',
- primitive['nova_object.data']['info_cache']['nova_object.version'])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
- def test_get_with_pci_requests(self, mock_get):
- mock_get.return_value = objects.InstancePCIRequests()
- db_instance = db.instance_create(self.context, {
- 'user_id': self.context.user_id,
- 'project_id': self.context.project_id})
- instance = objects.Instance.get_by_uuid(
- self.context, db_instance['uuid'],
- expected_attrs=['pci_requests'])
- self.assertTrue(instance.obj_attr_is_set('pci_requests'))
- self.assertIsNotNone(instance.pci_requests)
-
- def _test_get_flavor(self, namespace):
- prefix = '%s_' % namespace if namespace is not None else ''
- db_inst = db.instance_create(self.context, {
- 'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'system_metadata': flavors.save_flavor_info(
- {}, flavors.get_default_flavor(), prefix)})
- db_flavor = flavors.extract_flavor(db_inst, prefix)
- inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
- flavor = inst.get_flavor(namespace)
- self.assertEqual(db_flavor['flavorid'], flavor.flavorid)
-
- def test_get_flavor(self):
- self._test_get_flavor(None)
- self._test_get_flavor('foo')
-
- def _test_set_flavor(self, namespace):
- prefix = '%s_' % namespace if namespace is not None else ''
- db_inst = db.instance_create(self.context, {
- 'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- })
- inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
- db_flavor = flavors.get_default_flavor()
- inst.set_flavor(db_flavor, namespace)
- db_inst = db.instance_get(self.context, db_inst['id'])
- self.assertEqual(
- db_flavor['flavorid'], flavors.extract_flavor(
- db_inst, prefix)['flavorid'])
-
- def test_set_flavor(self):
- self._test_set_flavor(None)
- self._test_set_flavor('foo')
-
- def test_delete_flavor(self):
- namespace = 'foo'
- prefix = '%s_' % namespace
- db_inst = db.instance_create(self.context, {
- 'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'system_metadata': flavors.save_flavor_info(
- {}, flavors.get_default_flavor(), prefix)})
- inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
- inst.delete_flavor(namespace)
- db_inst = db.instance_get(self.context, db_inst['id'])
- self.assertEqual({}, utils.instance_sys_meta(db_inst))
-
- def test_delete_flavor_no_namespace_fails(self):
- inst = instance.Instance(system_metadata={})
- self.assertRaises(KeyError, inst.delete_flavor, None)
- self.assertRaises(KeyError, inst.delete_flavor, '')
-
- @mock.patch.object(db, 'instance_metadata_delete')
- def test_delete_metadata_key(self, db_delete):
- inst = instance.Instance(context=self.context,
- id=1, uuid='fake-uuid')
- inst.metadata = {'foo': '1', 'bar': '2'}
- inst.obj_reset_changes()
- inst.delete_metadata_key('foo')
- self.assertEqual({'bar': '2'}, inst.metadata)
- self.assertEqual({}, inst.obj_get_changes())
- db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
-
- def test_reset_changes(self):
- inst = instance.Instance()
- inst.metadata = {'1985': 'present'}
- inst.system_metadata = {'1955': 'past'}
- self.assertEqual({}, inst._orig_metadata)
- inst.obj_reset_changes(['metadata'])
- self.assertEqual({'1985': 'present'}, inst._orig_metadata)
- self.assertEqual({}, inst._orig_system_metadata)
-
- def test_load_generic_calls_handler(self):
- inst = instance.Instance(context=self.context,
- uuid='fake-uuid')
- with mock.patch.object(inst, '_load_generic') as mock_load:
- def fake_load(name):
- inst.system_metadata = {}
-
- mock_load.side_effect = fake_load
- inst.system_metadata
- mock_load.assert_called_once_with('system_metadata')
-
- def test_load_fault_calls_handler(self):
- inst = instance.Instance(context=self.context,
- uuid='fake-uuid')
- with mock.patch.object(inst, '_load_fault') as mock_load:
- def fake_load():
- inst.fault = None
-
- mock_load.side_effect = fake_load
- inst.fault
- mock_load.assert_called_once_with()
-
- @mock.patch('nova.objects.Instance.get_by_uuid')
- def test_load_generic(self, mock_get):
- inst2 = instance.Instance(metadata={'foo': 'bar'})
- mock_get.return_value = inst2
- inst = instance.Instance(context=self.context,
- uuid='fake-uuid')
- inst.metadata
- self.assertEqual({'foo': 'bar'}, inst.metadata)
- mock_get.assert_called_once_with(self.context,
- uuid='fake-uuid',
- expected_attrs=['metadata'])
- self.assertNotIn('metadata', inst.obj_what_changed())
-
- @mock.patch('nova.db.instance_fault_get_by_instance_uuids')
- def test_load_fault(self, mock_get):
- fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
- mock_get.return_value = {'fake': [fake_fault]}
- inst = instance.Instance(context=self.context, uuid='fake')
- fault = inst.fault
- mock_get.assert_called_once_with(self.context, ['fake'])
- self.assertEqual(fake_fault['id'], fault.id)
- self.assertNotIn('metadata', inst.obj_what_changed())
-
-
-class TestInstanceObject(test_objects._LocalTest,
- _TestInstanceObject):
- pass
-
-
-class TestRemoteInstanceObject(test_objects._RemoteTest,
- _TestInstanceObject):
- pass
-
-
-class _TestInstanceListObject(object):
- def fake_instance(self, id, updates=None):
- fake_instance = fakes.stub_instance(id=2,
- access_ipv4='1.2.3.4',
- access_ipv6='::1')
- fake_instance['scheduled_at'] = None
- fake_instance['terminated_at'] = None
- fake_instance['deleted_at'] = None
- fake_instance['created_at'] = None
- fake_instance['updated_at'] = None
- fake_instance['launched_at'] = (
- fake_instance['launched_at'].replace(
- tzinfo=iso8601.iso8601.Utc(), microsecond=0))
- fake_instance['info_cache'] = {'network_info': '[]',
- 'instance_uuid': fake_instance['uuid']}
- fake_instance['security_groups'] = []
- fake_instance['deleted'] = 0
- if updates:
- fake_instance.update(updates)
- return fake_instance
-
- def test_get_all_by_filters(self):
- fakes = [self.fake_instance(1), self.fake_instance(2)]
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
- 'asc', limit=None, marker=None,
- columns_to_join=['metadata'],
- use_slave=False).AndReturn(fakes)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_by_filters(
- self.context, {'foo': 'bar'}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
-
- for i in range(0, len(fakes)):
- self.assertIsInstance(inst_list.objects[i], instance.Instance)
- self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
- self.assertRemotes()
-
- def test_get_all_by_filters_works_for_cleaned(self):
- fakes = [self.fake_instance(1),
- self.fake_instance(2, updates={'deleted': 2,
- 'cleaned': None})]
- self.context.read_deleted = 'yes'
- self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
- db.instance_get_all_by_filters(self.context,
- {'deleted': True, 'cleaned': False},
- 'uuid', 'asc', limit=None, marker=None,
- columns_to_join=['metadata'],
- use_slave=False).AndReturn(
- [fakes[1]])
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_by_filters(
- self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
-
- self.assertEqual(1, len(inst_list))
- self.assertIsInstance(inst_list.objects[0], instance.Instance)
- self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
- self.assertRemotes()
-
- def test_get_by_host(self):
- fakes = [self.fake_instance(1),
- self.fake_instance(2)]
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- db.instance_get_all_by_host(self.context, 'foo',
- columns_to_join=None,
- use_slave=False).AndReturn(fakes)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
- for i in range(0, len(fakes)):
- self.assertIsInstance(inst_list.objects[i], instance.Instance)
- self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
- self.assertEqual(inst_list.objects[i]._context, self.context)
- self.assertEqual(inst_list.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_get_by_host_and_node(self):
- fakes = [self.fake_instance(1),
- self.fake_instance(2)]
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
- db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar'
- ).AndReturn(fakes)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_by_host_and_node(self.context,
- 'foo', 'bar')
- for i in range(0, len(fakes)):
- self.assertIsInstance(inst_list.objects[i], instance.Instance)
- self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
- self.assertRemotes()
-
- def test_get_by_host_and_not_type(self):
- fakes = [self.fake_instance(1),
- self.fake_instance(2)]
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
- db.instance_get_all_by_host_and_not_type(self.context, 'foo',
- type_id='bar').AndReturn(
- fakes)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_by_host_and_not_type(
- self.context, 'foo', 'bar')
- for i in range(0, len(fakes)):
- self.assertIsInstance(inst_list.objects[i], instance.Instance)
- self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
- self.assertRemotes()
-
- def test_get_hung_in_rebooting(self):
- fakes = [self.fake_instance(1),
- self.fake_instance(2)]
- dt = timeutils.isotime()
- self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
- db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
- fakes)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList.get_hung_in_rebooting(self.context,
- dt)
- for i in range(0, len(fakes)):
- self.assertIsInstance(inst_list.objects[i], instance.Instance)
- self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
- self.assertRemotes()
-
- def test_get_active_by_window_joined(self):
- fakes = [self.fake_instance(1), self.fake_instance(2)]
- # NOTE(mriedem): Send in a timezone-naive datetime since the
- # InstanceList.get_active_by_window_joined method should convert it
- # to tz-aware for the DB API call, which we'll assert with our stub.
- dt = timeutils.utcnow()
-
- def fake_instance_get_active_by_window_joined(context, begin, end,
- project_id, host):
- # make sure begin is tz-aware
- self.assertIsNotNone(begin.utcoffset())
- self.assertIsNone(end)
- return fakes
-
- with mock.patch.object(db, 'instance_get_active_by_window_joined',
- fake_instance_get_active_by_window_joined):
- inst_list = instance.InstanceList.get_active_by_window_joined(
- self.context, dt)
-
- for fake, obj in zip(fakes, inst_list.objects):
- self.assertIsInstance(obj, instance.Instance)
- self.assertEqual(obj.uuid, fake['uuid'])
- self.assertRemotes()
-
- def test_with_fault(self):
- fake_insts = [
- fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
- fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
- ]
- fake_faults = test_instance_fault.fake_faults
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_get_all_by_host(self.context, 'host',
- columns_to_join=[],
- use_slave=False
- ).AndReturn(fake_insts)
- db.instance_fault_get_by_instance_uuids(
- self.context, [x['uuid'] for x in fake_insts]
- ).AndReturn(fake_faults)
- self.mox.ReplayAll()
- instances = instance.InstanceList.get_by_host(self.context, 'host',
- expected_attrs=['fault'],
- use_slave=False)
- self.assertEqual(2, len(instances))
- self.assertEqual(fake_faults['fake-uuid'][0],
- dict(instances[0].fault.iteritems()))
- self.assertIsNone(instances[1].fault)
-
- def test_fill_faults(self):
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
-
- inst1 = instance.Instance(uuid='uuid1')
- inst2 = instance.Instance(uuid='uuid2')
- insts = [inst1, inst2]
- for inst in insts:
- inst.obj_reset_changes()
- db_faults = {
- 'uuid1': [{'id': 123,
- 'instance_uuid': 'uuid1',
- 'code': 456,
- 'message': 'Fake message',
- 'details': 'No details',
- 'host': 'foo',
- 'deleted': False,
- 'deleted_at': None,
- 'updated_at': None,
- 'created_at': None,
- }
- ]}
-
- db.instance_fault_get_by_instance_uuids(self.context,
- [x.uuid for x in insts],
- ).AndReturn(db_faults)
- self.mox.ReplayAll()
- inst_list = instance.InstanceList()
- inst_list._context = self.context
- inst_list.objects = insts
- faulty = inst_list.fill_faults()
- self.assertEqual(faulty, ['uuid1'])
- self.assertEqual(inst_list[0].fault.message,
- db_faults['uuid1'][0]['message'])
- self.assertIsNone(inst_list[1].fault)
- for inst in inst_list:
- self.assertEqual(inst.obj_what_changed(), set())
-
- def test_get_by_security_group(self):
- fake_secgroup = dict(test_security_group.fake_secgroup)
- fake_secgroup['instances'] = [
- fake_instance.fake_db_instance(id=1,
- system_metadata={'foo': 'bar'}),
- fake_instance.fake_db_instance(id=2),
- ]
-
- with mock.patch.object(db, 'security_group_get') as sgg:
- sgg.return_value = fake_secgroup
- secgroup = security_group.SecurityGroup()
- secgroup.id = fake_secgroup['id']
- instances = instance.InstanceList.get_by_security_group(
- self.context, secgroup)
-
- self.assertEqual(2, len(instances))
- self.assertEqual([1, 2], [x.id for x in instances])
- self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
- self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
-
-
-class TestInstanceListObject(test_objects._LocalTest,
- _TestInstanceListObject):
- pass
-
-
-class TestRemoteInstanceListObject(test_objects._RemoteTest,
- _TestInstanceListObject):
- pass
-
-
-class TestInstanceObjectMisc(test.NoDBTestCase):
- def test_expected_cols(self):
- self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
- self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
- self.assertIsNone(instance._expected_cols(None))
diff --git a/nova/tests/objects/test_instance_action.py b/nova/tests/objects/test_instance_action.py
deleted file mode 100644
index 37804035db..0000000000
--- a/nova/tests/objects/test_instance_action.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import traceback
-
-import mock
-from oslo.utils import timeutils
-
-from nova import db
-from nova.objects import instance_action
-from nova import test
-from nova.tests.objects import test_objects
-
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-fake_action = {
- 'created_at': NOW,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': False,
- 'id': 123,
- 'action': 'fake-action',
- 'instance_uuid': 'fake-uuid',
- 'request_id': 'fake-request',
- 'user_id': 'fake-user',
- 'project_id': 'fake-project',
- 'start_time': NOW,
- 'finish_time': None,
- 'message': 'foo',
-}
-fake_event = {
- 'created_at': NOW,
- 'deleted_at': None,
- 'updated_at': None,
- 'deleted': False,
- 'id': 123,
- 'event': 'fake-event',
- 'action_id': 123,
- 'start_time': NOW,
- 'finish_time': None,
- 'result': 'fake-result',
- 'traceback': 'fake-tb',
-}
-
-
-class _TestInstanceActionObject(object):
- @mock.patch.object(db, 'action_get_by_request_id')
- def test_get_by_request_id(self, mock_get):
- context = self.context
- mock_get.return_value = fake_action
- action = instance_action.InstanceAction.get_by_request_id(
- context, 'fake-uuid', 'fake-request')
- self.compare_obj(action, fake_action)
- mock_get.assert_called_once_with(context,
- 'fake-uuid', 'fake-request')
-
- def test_pack_action_start(self):
- values = instance_action.InstanceAction.pack_action_start(
- self.context, 'fake-uuid', 'fake-action')
- self.assertEqual(values['request_id'], self.context.request_id)
- self.assertEqual(values['user_id'], self.context.user_id)
- self.assertEqual(values['project_id'], self.context.project_id)
- self.assertEqual(values['instance_uuid'], 'fake-uuid')
- self.assertEqual(values['action'], 'fake-action')
- self.assertEqual(values['start_time'].replace(tzinfo=None),
- self.context.timestamp)
-
- def test_pack_action_finish(self):
- timeutils.set_time_override(override_time=NOW)
- values = instance_action.InstanceAction.pack_action_finish(
- self.context, 'fake-uuid')
- self.assertEqual(values['request_id'], self.context.request_id)
- self.assertEqual(values['instance_uuid'], 'fake-uuid')
- self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW)
-
- @mock.patch.object(db, 'action_start')
- def test_action_start(self, mock_start):
- test_class = instance_action.InstanceAction
- expected_packed_values = test_class.pack_action_start(
- self.context, 'fake-uuid', 'fake-action')
- mock_start.return_value = fake_action
- action = instance_action.InstanceAction.action_start(
- self.context, 'fake-uuid', 'fake-action', want_result=True)
- mock_start.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(action, fake_action)
-
- @mock.patch.object(db, 'action_start')
- def test_action_start_no_result(self, mock_start):
- test_class = instance_action.InstanceAction
- expected_packed_values = test_class.pack_action_start(
- self.context, 'fake-uuid', 'fake-action')
- mock_start.return_value = fake_action
- action = instance_action.InstanceAction.action_start(
- self.context, 'fake-uuid', 'fake-action', want_result=False)
- mock_start.assert_called_once_with(self.context,
- expected_packed_values)
- self.assertIsNone(action)
-
- @mock.patch.object(db, 'action_finish')
- def test_action_finish(self, mock_finish):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceAction
- expected_packed_values = test_class.pack_action_finish(
- self.context, 'fake-uuid')
- mock_finish.return_value = fake_action
- action = instance_action.InstanceAction.action_finish(
- self.context, 'fake-uuid', want_result=True)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(action, fake_action)
-
- @mock.patch.object(db, 'action_finish')
- def test_action_finish_no_result(self, mock_finish):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceAction
- expected_packed_values = test_class.pack_action_finish(
- self.context, 'fake-uuid')
- mock_finish.return_value = fake_action
- action = instance_action.InstanceAction.action_finish(
- self.context, 'fake-uuid', want_result=False)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.assertIsNone(action)
-
- @mock.patch.object(db, 'action_finish')
- @mock.patch.object(db, 'action_start')
- def test_finish(self, mock_start, mock_finish):
- timeutils.set_time_override(override_time=NOW)
- expected_packed_action_start = {
- 'request_id': self.context.request_id,
- 'user_id': self.context.user_id,
- 'project_id': self.context.project_id,
- 'instance_uuid': 'fake-uuid',
- 'action': 'fake-action',
- 'start_time': self.context.timestamp,
- }
- expected_packed_action_finish = {
- 'request_id': self.context.request_id,
- 'instance_uuid': 'fake-uuid',
- 'finish_time': NOW,
- }
- mock_start.return_value = fake_action
- mock_finish.return_value = fake_action
- action = instance_action.InstanceAction.action_start(
- self.context, 'fake-uuid', 'fake-action')
- action.finish(self.context)
- mock_start.assert_called_once_with(self.context,
- expected_packed_action_start)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_action_finish)
- self.compare_obj(action, fake_action)
-
- @mock.patch.object(db, 'actions_get')
- def test_get_list(self, mock_get):
- fake_actions = [dict(fake_action, id=1234),
- dict(fake_action, id=5678)]
- mock_get.return_value = fake_actions
- obj_list = instance_action.InstanceActionList.get_by_instance_uuid(
- self.context, 'fake-uuid')
- for index, action in enumerate(obj_list):
- self.compare_obj(action, fake_actions[index])
- mock_get.assert_called_once_with(self.context, 'fake-uuid')
-
-
-class TestInstanceActionObject(test_objects._LocalTest,
- _TestInstanceActionObject):
- pass
-
-
-class TestRemoteInstanceActionObject(test_objects._RemoteTest,
- _TestInstanceActionObject):
- pass
-
-
-class _TestInstanceActionEventObject(object):
- @mock.patch.object(db, 'action_event_get_by_id')
- def test_get_by_id(self, mock_get):
- mock_get.return_value = fake_event
- event = instance_action.InstanceActionEvent.get_by_id(
- self.context, 'fake-action-id', 'fake-event-id')
- self.compare_obj(event, fake_event)
- mock_get.assert_called_once_with(self.context,
- 'fake-action-id', 'fake-event-id')
-
- @mock.patch.object(db, 'action_event_start')
- def test_event_start(self, mock_start):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_start(
- self.context, 'fake-uuid', 'fake-event')
- mock_start.return_value = fake_event
- event = instance_action.InstanceActionEvent.event_start(
- self.context, 'fake-uuid', 'fake-event', want_result=True)
- mock_start.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(event, fake_event)
-
- @mock.patch.object(db, 'action_event_start')
- def test_event_start_no_result(self, mock_start):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_start(
- self.context, 'fake-uuid', 'fake-event')
- mock_start.return_value = fake_event
- event = instance_action.InstanceActionEvent.event_start(
- self.context, 'fake-uuid', 'fake-event', want_result=False)
- mock_start.assert_called_once_with(self.context,
- expected_packed_values)
- self.assertIsNone(event)
-
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish(self, mock_finish):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event')
- expected_packed_values['finish_time'] = timeutils.utcnow()
- mock_finish.return_value = fake_event
- event = instance_action.InstanceActionEvent.event_finish(
- self.context, 'fake-uuid', 'fake-event', want_result=True)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(event, fake_event)
-
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish_no_result(self, mock_finish):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event')
- expected_packed_values['finish_time'] = timeutils.utcnow()
- mock_finish.return_value = fake_event
- event = instance_action.InstanceActionEvent.event_finish(
- self.context, 'fake-uuid', 'fake-event', want_result=False)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.assertIsNone(event)
-
- @mock.patch.object(traceback, 'format_tb')
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish_with_failure(self, mock_finish, mock_tb):
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
- expected_packed_values['finish_time'] = timeutils.utcnow()
-
- mock_finish.return_value = fake_event
- event = test_class.event_finish_with_failure(
- self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
- want_result=True)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(event, fake_event)
-
- @mock.patch.object(traceback, 'format_tb')
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish_with_failure_legacy(self, mock_finish, mock_tb):
- # Tests that exc_tb is serialized when it's not a string type.
- mock_tb.return_value = 'fake-tb'
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
- expected_packed_values['finish_time'] = timeutils.utcnow()
-
- mock_finish.return_value = fake_event
- fake_tb = mock.sentinel.fake_tb
- event = test_class.event_finish_with_failure(
- self.context, 'fake-uuid', 'fake-event', exc_val='val',
- exc_tb=fake_tb, want_result=True)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(event, fake_event)
- mock_tb.assert_called_once_with(fake_tb)
-
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish_with_failure_legacy_unicode(self, mock_finish):
- # Tests that traceback.format_tb is not called when exc_tb is unicode.
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event', 'val', unicode('fake-tb'))
- expected_packed_values['finish_time'] = timeutils.utcnow()
-
- mock_finish.return_value = fake_event
- event = test_class.event_finish_with_failure(
- self.context, 'fake-uuid', 'fake-event', exc_val='val',
- exc_tb=unicode('fake-tb'), want_result=True)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.compare_obj(event, fake_event)
-
- @mock.patch.object(traceback, 'format_tb')
- @mock.patch.object(db, 'action_event_finish')
- def test_event_finish_with_failure_no_result(self, mock_finish, mock_tb):
- # Tests that traceback.format_tb is not called when exc_tb is a str
- # and want_result is False, so no event should come back.
- mock_tb.return_value = 'fake-tb'
- timeutils.set_time_override(override_time=NOW)
- test_class = instance_action.InstanceActionEvent
- expected_packed_values = test_class.pack_action_event_finish(
- self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
- expected_packed_values['finish_time'] = timeutils.utcnow()
-
- mock_finish.return_value = fake_event
- event = test_class.event_finish_with_failure(
- self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
- want_result=False)
- mock_finish.assert_called_once_with(self.context,
- expected_packed_values)
- self.assertIsNone(event)
- self.assertFalse(mock_tb.called)
-
- @mock.patch.object(db, 'action_events_get')
- def test_get_by_action(self, mock_get):
- fake_events = [dict(fake_event, id=1234),
- dict(fake_event, id=5678)]
- mock_get.return_value = fake_events
- obj_list = instance_action.InstanceActionEventList.get_by_action(
- self.context, 'fake-action-id')
- for index, event in enumerate(obj_list):
- self.compare_obj(event, fake_events[index])
- mock_get.assert_called_once_with(self.context, 'fake-action-id')
-
- @mock.patch('nova.objects.instance_action.InstanceActionEvent.'
- 'pack_action_event_finish')
- @mock.patch('traceback.format_tb')
- def test_event_finish_with_failure_serialized(self, mock_format,
- mock_pack):
- mock_format.return_value = 'traceback'
- mock_pack.side_effect = test.TestingException
- self.assertRaises(
- test.TestingException,
- instance_action.InstanceActionEvent.event_finish_with_failure,
- self.context, 'fake-uuid', 'fake-event',
- exc_val=mock.sentinel.exc_val,
- exc_tb=mock.sentinel.exc_tb)
- mock_pack.assert_called_once_with(self.context, 'fake-uuid',
- 'fake-event',
- exc_val=str(mock.sentinel.exc_val),
- exc_tb='traceback')
- mock_format.assert_called_once_with(mock.sentinel.exc_tb)
-
-
-class TestInstanceActionEventObject(test_objects._LocalTest,
- _TestInstanceActionEventObject):
- pass
-
-
-class TestRemoteInstanceActionEventObject(test_objects._RemoteTest,
- _TestInstanceActionEventObject):
- pass
diff --git a/nova/tests/objects/test_instance_fault.py b/nova/tests/objects/test_instance_fault.py
deleted file mode 100644
index f1bf217d80..0000000000
--- a/nova/tests/objects/test_instance_fault.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova import exception
-from nova.objects import instance_fault
-from nova.tests.objects import test_objects
-
-
-fake_faults = {
- 'fake-uuid': [
- {'id': 1, 'instance_uuid': 'fake-uuid', 'code': 123, 'message': 'msg1',
- 'details': 'details', 'host': 'host', 'deleted': False,
- 'created_at': None, 'updated_at': None, 'deleted_at': None},
- {'id': 2, 'instance_uuid': 'fake-uuid', 'code': 456, 'message': 'msg2',
- 'details': 'details', 'host': 'host', 'deleted': False,
- 'created_at': None, 'updated_at': None, 'deleted_at': None},
- ]
- }
-
-
-class _TestInstanceFault(object):
- def test_get_latest_for_instance(self):
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
- ).AndReturn(fake_faults)
- self.mox.ReplayAll()
- fault = instance_fault.InstanceFault.get_latest_for_instance(
- self.context, 'fake-uuid')
- for key in fake_faults['fake-uuid'][0]:
- self.assertEqual(fake_faults['fake-uuid'][0][key], fault[key])
-
- def test_get_latest_for_instance_with_none(self):
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
- ).AndReturn({})
- self.mox.ReplayAll()
- fault = instance_fault.InstanceFault.get_latest_for_instance(
- self.context, 'fake-uuid')
- self.assertIsNone(fault)
-
- def test_get_by_instance(self):
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
- ).AndReturn(fake_faults)
- self.mox.ReplayAll()
- faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
- self.context, ['fake-uuid'])
- for index, db_fault in enumerate(fake_faults['fake-uuid']):
- for key in db_fault:
- self.assertEqual(fake_faults['fake-uuid'][index][key],
- faults[index][key])
-
- def test_get_by_instance_with_none(self):
- self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
- db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
- ).AndReturn({})
- self.mox.ReplayAll()
- faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
- self.context, ['fake-uuid'])
- self.assertEqual(0, len(faults))
-
- @mock.patch('nova.cells.rpcapi.CellsAPI.instance_fault_create_at_top')
- @mock.patch('nova.db.instance_fault_create')
- def _test_create(self, update_cells, mock_create, cells_fault_create):
- mock_create.return_value = fake_faults['fake-uuid'][1]
- fault = instance_fault.InstanceFault()
- fault.instance_uuid = 'fake-uuid'
- fault.code = 456
- fault.message = 'foo'
- fault.details = 'you screwed up'
- fault.host = 'myhost'
- fault.create(self.context)
- self.assertEqual(2, fault.id)
- mock_create.assert_called_once_with(self.context,
- {'instance_uuid': 'fake-uuid',
- 'code': 456,
- 'message': 'foo',
- 'details': 'you screwed up',
- 'host': 'myhost'})
- if update_cells:
- cells_fault_create.assert_called_once_with(
- self.context, fake_faults['fake-uuid'][1])
- else:
- self.assertFalse(cells_fault_create.called)
-
- def test_create_no_cells(self):
- self.flags(enable=False, group='cells')
- self._test_create(False)
-
- def test_create_api_cell(self):
- self.flags(cell_type='api', enable=True, group='cells')
- self._test_create(False)
-
- def test_create_compute_cell(self):
- self.flags(cell_type='compute', enable=True, group='cells')
- self._test_create(True)
-
- def test_create_already_created(self):
- fault = instance_fault.InstanceFault()
- fault.id = 1
- self.assertRaises(exception.ObjectActionError,
- fault.create, self.context)
-
-
-class TestInstanceFault(test_objects._LocalTest,
- _TestInstanceFault):
- pass
-
-
-class TestInstanceFaultRemote(test_objects._RemoteTest,
- _TestInstanceFault):
- pass
diff --git a/nova/tests/objects/test_instance_group.py b/nova/tests/objects/test_instance_group.py
deleted file mode 100644
index 77efcd014b..0000000000
--- a/nova/tests/objects/test_instance_group.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-from nova.compute import flavors
-from nova import context
-from nova import db
-from nova import exception
-from nova.objects import instance_group
-from nova.tests import fake_notifier
-from nova.tests.objects import test_objects
-from nova.tests import utils as tests_utils
-
-
-class _TestInstanceGroupObjects(object):
-
- def setUp(self):
- super(_TestInstanceGroupObjects, self).setUp()
- self.user_id = 'fake_user'
- self.project_id = 'fake_project'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def _get_default_values(self):
- return {'name': 'fake_name',
- 'user_id': self.user_id,
- 'project_id': self.project_id}
-
- def _create_instance_group(self, context, values, policies=None,
- members=None):
- return db.instance_group_create(context, values, policies=policies,
- members=members)
-
- def test_get_by_uuid(self):
- values = self._get_default_values()
- policies = ['policy1', 'policy2']
- members = ['instance_id1', 'instance_id2']
- db_result = self._create_instance_group(self.context, values,
- policies=policies,
- members=members)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- self.assertEqual(obj_result.members, members)
- self.assertEqual(obj_result.policies, policies)
-
- def test_get_by_instance_uuid(self):
- values = self._get_default_values()
- policies = ['policy1', 'policy2']
- members = ['instance_id1', 'instance_id2']
- db_result = self._create_instance_group(self.context, values,
- policies=policies,
- members=members)
- obj_result = instance_group.InstanceGroup.get_by_instance_uuid(
- self.context, 'instance_id1')
- self.assertEqual(obj_result.uuid, db_result.uuid)
-
- def test_refresh(self):
- values = self._get_default_values()
- db_result = self._create_instance_group(self.context, values)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- self.assertEqual(obj_result.name, 'fake_name')
- values = {'name': 'new_name', 'user_id': 'new_user',
- 'project_id': 'new_project'}
- db.instance_group_update(self.context, db_result['uuid'],
- values)
- obj_result.refresh()
- self.assertEqual(obj_result.name, 'new_name')
- self.assertEqual(set([]), obj_result.obj_what_changed())
-
- def test_save_simple(self):
- values = self._get_default_values()
- db_result = self._create_instance_group(self.context, values)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- self.assertEqual(obj_result.name, 'fake_name')
- obj_result.name = 'new_name'
- obj_result.save()
- result = db.instance_group_get(self.context, db_result['uuid'])
- self.assertEqual(result['name'], 'new_name')
-
- def test_save_policies(self):
- values = self._get_default_values()
- db_result = self._create_instance_group(self.context, values)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- policies = ['policy1', 'policy2']
- obj_result.policies = policies
- obj_result.save()
- result = db.instance_group_get(self.context, db_result['uuid'])
- self.assertEqual(result['policies'], policies)
-
- def test_save_members(self):
- values = self._get_default_values()
- db_result = self._create_instance_group(self.context, values)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- members = ['instance1', 'instance2']
- obj_result.members = members
- fake_notifier.NOTIFICATIONS = []
- obj_result.save()
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('servergroup.update', msg.event_type)
- self.assertEqual(members, msg.payload['members'])
- result = db.instance_group_get(self.context, db_result['uuid'])
- self.assertEqual(result['members'], members)
-
- def test_create(self):
- group1 = instance_group.InstanceGroup()
- group1.uuid = 'fake-uuid'
- group1.name = 'fake-name'
- fake_notifier.NOTIFICATIONS = []
- group1.create(self.context)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(group1.name, msg.payload['name'])
- self.assertEqual(group1.uuid, msg.payload['server_group_id'])
- self.assertEqual('servergroup.create', msg.event_type)
- group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
- group1.uuid)
- self.assertEqual(group1.id, group2.id)
- self.assertEqual(group1.uuid, group2.uuid)
- self.assertEqual(group1.name, group2.name)
- result = db.instance_group_get(self.context, group1.uuid)
- self.assertEqual(group1.id, result.id)
- self.assertEqual(group1.uuid, result.uuid)
- self.assertEqual(group1.name, result.name)
-
- def test_create_with_policies(self):
- group1 = instance_group.InstanceGroup()
- group1.policies = ['policy1', 'policy2']
- group1.create(self.context)
- group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
- group1.uuid)
- self.assertEqual(group1.id, group2.id)
- self.assertEqual(group1.policies, group2.policies)
-
- def test_create_with_members(self):
- group1 = instance_group.InstanceGroup()
- group1.members = ['instance1', 'instance2']
- group1.create(self.context)
- group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
- group1.uuid)
- self.assertEqual(group1.id, group2.id)
- self.assertEqual(group1.members, group2.members)
-
- def test_recreate_fails(self):
- group = instance_group.InstanceGroup()
- group.create(self.context)
- self.assertRaises(exception.ObjectActionError, group.create,
- self.context)
-
- def test_destroy(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- group = instance_group.InstanceGroup()
- group.id = result.id
- group.uuid = result.uuid
- fake_notifier.NOTIFICATIONS = []
- group.destroy(self.context)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('servergroup.delete', msg.event_type)
- self.assertEqual(group.uuid, msg.payload['server_group_id'])
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_get, self.context, result['uuid'])
-
- def _populate_instances(self):
- instances = [(str(uuid.uuid4()), 'f1', 'p1'),
- (str(uuid.uuid4()), 'f2', 'p1'),
- (str(uuid.uuid4()), 'f3', 'p2'),
- (str(uuid.uuid4()), 'f4', 'p2')]
- for instance in instances:
- values = self._get_default_values()
- values['uuid'] = instance[0]
- values['name'] = instance[1]
- values['project_id'] = instance[2]
- self._create_instance_group(self.context, values)
- return instances
-
- def test_list_all(self):
- self._populate_instances()
- inst_list = instance_group.InstanceGroupList.get_all(self.context)
- groups = db.instance_group_get_all(self.context)
- self.assertEqual(len(groups), len(inst_list.objects))
- self.assertEqual(len(groups), 4)
- for i in range(0, len(groups)):
- self.assertIsInstance(inst_list.objects[i],
- instance_group.InstanceGroup)
- self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
-
- def test_list_by_project_id(self):
- self._populate_instances()
- project_ids = ['p1', 'p2']
- for id in project_ids:
- il = instance_group.InstanceGroupList.get_by_project_id(
- self.context, id)
- groups = db.instance_group_get_all_by_project_id(self.context, id)
- self.assertEqual(len(groups), len(il.objects))
- self.assertEqual(len(groups), 2)
- for i in range(0, len(groups)):
- self.assertIsInstance(il.objects[i],
- instance_group.InstanceGroup)
- self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
- self.assertEqual(il.objects[i].name, groups[i]['name'])
- self.assertEqual(il.objects[i].project_id, id)
-
- def test_get_by_name(self):
- self._populate_instances()
- ctxt = context.RequestContext('fake_user', 'p1')
- ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
- self.assertEqual('f1', ig.name)
-
- def test_get_by_hint(self):
- instances = self._populate_instances()
- for instance in instances:
- ctxt = context.RequestContext('fake_user', instance[2])
- ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
- self.assertEqual(instance[1], ig.name)
- ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
- self.assertEqual(instance[0], ig.uuid)
-
- def test_add_members(self):
- instance_ids = ['fakeid1', 'fakeid2']
- values = self._get_default_values()
- group = self._create_instance_group(self.context, values)
- fake_notifier.NOTIFICATIONS = []
- members = instance_group.InstanceGroup.add_members(self.context,
- group.uuid, instance_ids)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('servergroup.addmember', msg.event_type)
- self.assertEqual(group.uuid, msg.payload['server_group_id'])
- self.assertEqual(instance_ids, msg.payload['instance_uuids'])
- group = instance_group.InstanceGroup.get_by_uuid(self.context,
- group.uuid)
- for instance in instance_ids:
- self.assertIn(instance, members)
- self.assertIn(instance, group.members)
-
- def test_get_hosts(self):
- instance1 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance1.host = 'hostA'
- instance1.save()
- instance2 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance2.host = 'hostB'
- instance2.save()
- instance3 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance3.host = 'hostB'
- instance3.save()
-
- instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
- values = self._get_default_values()
- group = self._create_instance_group(self.context, values)
- instance_group.InstanceGroup.add_members(self.context, group.uuid,
- instance_ids)
-
- group = instance_group.InstanceGroup.get_by_uuid(self.context,
- group.uuid)
- hosts = group.get_hosts(self.context)
- self.assertEqual(2, len(hosts))
- self.assertIn('hostA', hosts)
- self.assertIn('hostB', hosts)
- hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
- self.assertEqual(1, len(hosts))
- self.assertIn('hostB', hosts)
-
- def test_get_hosts_with_some_none(self):
- instance1 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance1.host = None
- instance1.save()
- instance2 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance2.host = 'hostB'
- instance2.save()
-
- instance_ids = [instance1.uuid, instance2.uuid]
- values = self._get_default_values()
- group = self._create_instance_group(self.context, values)
- instance_group.InstanceGroup.add_members(self.context, group.uuid,
- instance_ids)
-
- group = instance_group.InstanceGroup.get_by_uuid(self.context,
- group.uuid)
- hosts = group.get_hosts(self.context)
- self.assertEqual(1, len(hosts))
- self.assertIn('hostB', hosts)
-
- def test_obj_make_compatible(self):
- group = instance_group.InstanceGroup(uuid='fake-uuid',
- name='fake-name')
- group.create(self.context)
- group_primitive = group.obj_to_primitive()
- group.obj_make_compatible(group_primitive, '1.6')
- self.assertEqual({}, group_primitive['metadetails'])
-
- def test_count_members_by_user(self):
- instance1 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance1.user_id = 'user1'
- instance1.save()
- instance2 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance2.user_id = 'user2'
- instance2.save()
- instance3 = tests_utils.get_test_instance(self.context,
- flavor=flavors.get_default_flavor(), obj=True)
- instance3.user_id = 'user2'
- instance3.save()
-
- instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
- values = self._get_default_values()
- group = self._create_instance_group(self.context, values)
- instance_group.InstanceGroup.add_members(self.context, group.uuid,
- instance_ids)
-
- group = instance_group.InstanceGroup.get_by_uuid(self.context,
- group.uuid)
- count_user1 = group.count_members_by_user(self.context, 'user1')
- count_user2 = group.count_members_by_user(self.context, 'user2')
- count_user3 = group.count_members_by_user(self.context, 'user3')
- self.assertEqual(1, count_user1)
- self.assertEqual(2, count_user2)
- self.assertEqual(0, count_user3)
-
-
-class TestInstanceGroupObject(test_objects._LocalTest,
- _TestInstanceGroupObjects):
- pass
-
-
-class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
- _TestInstanceGroupObjects):
- pass
diff --git a/nova/tests/objects/test_instance_info_cache.py b/nova/tests/objects/test_instance_info_cache.py
deleted file mode 100644
index f1e75ad87c..0000000000
--- a/nova/tests/objects/test_instance_info_cache.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.cells import opts as cells_opts
-from nova.cells import rpcapi as cells_rpcapi
-from nova import db
-from nova import exception
-from nova.network import model as network_model
-from nova.objects import instance_info_cache
-from nova.tests.objects import test_objects
-
-
-fake_info_cache = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'instance_uuid': 'fake-uuid',
- 'network_info': '[]',
- }
-
-
-class _TestInstanceInfoCacheObject(object):
- def test_get_by_instance_uuid(self):
- nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
- self.mox.StubOutWithMock(db, 'instance_info_cache_get')
- db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(
- dict(fake_info_cache, network_info=nwinfo.json()))
- self.mox.ReplayAll()
- obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
- self.context, 'fake-uuid')
- self.assertEqual(obj.instance_uuid, 'fake-uuid')
- self.assertEqual(obj.network_info, nwinfo)
- self.assertRemotes()
-
- def test_get_by_instance_uuid_no_entries(self):
- self.mox.StubOutWithMock(db, 'instance_info_cache_get')
- db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None)
- self.mox.ReplayAll()
- self.assertRaises(
- exception.InstanceInfoCacheNotFound,
- instance_info_cache.InstanceInfoCache.get_by_instance_uuid,
- self.context, 'fake-uuid')
-
- def test_new(self):
- obj = instance_info_cache.InstanceInfoCache.new(self.context,
- 'fake-uuid')
- self.assertEqual(set(['instance_uuid', 'network_info']),
- obj.obj_what_changed())
- self.assertEqual('fake-uuid', obj.instance_uuid)
- self.assertIsNone(obj.network_info)
-
- def _save_helper(self, cell_type, update_cells):
- obj = instance_info_cache.InstanceInfoCache()
- cells_api = cells_rpcapi.CellsAPI()
-
- self.mox.StubOutWithMock(db, 'instance_info_cache_update')
- self.mox.StubOutWithMock(cells_opts, 'get_cell_type')
- self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
- use_mock_anything=True)
- self.mox.StubOutWithMock(cells_api,
- 'instance_info_cache_update_at_top')
- nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
- db.instance_info_cache_update(
- self.context, 'fake-uuid',
- {'network_info': nwinfo.json()}).AndReturn('foo')
- if update_cells:
- cells_opts.get_cell_type().AndReturn(cell_type)
- if cell_type == 'compute':
- cells_rpcapi.CellsAPI().AndReturn(cells_api)
- cells_api.instance_info_cache_update_at_top(
- self.context, 'foo')
- self.mox.ReplayAll()
- obj._context = self.context
- obj.instance_uuid = 'fake-uuid'
- obj.network_info = nwinfo
- obj.save(update_cells=update_cells)
-
- def test_save_with_update_cells_and_compute_cell(self):
- self._save_helper('compute', True)
-
- def test_save_with_update_cells_and_non_compute_cell(self):
- self._save_helper(None, True)
-
- def test_save_without_update_cells(self):
- self._save_helper(None, False)
-
- def test_refresh(self):
- obj = instance_info_cache.InstanceInfoCache.new(self.context,
- 'fake-uuid1')
- self.mox.StubOutWithMock(db, 'instance_info_cache_get')
- db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn(
- fake_info_cache)
- self.mox.ReplayAll()
- obj.refresh()
- self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid)
-
-
-class TestInstanceInfoCacheObject(test_objects._LocalTest,
- _TestInstanceInfoCacheObject):
- pass
-
-
-class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
- _TestInstanceInfoCacheObject):
- pass
diff --git a/nova/tests/objects/test_instance_numa_topology.py b/nova/tests/objects/test_instance_numa_topology.py
deleted file mode 100644
index 1f9f3b40fe..0000000000
--- a/nova/tests/objects/test_instance_numa_topology.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-
-from nova import exception
-from nova import objects
-from nova.tests.objects import test_objects
-from nova.virt import hardware
-
-fake_numa_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 0, set([1, 2]), 512, hardware.VirtPageSize(2048)),
- hardware.VirtNUMATopologyCellInstance(
- 1, set([3, 4]), 512, hardware.VirtPageSize(2048))])
-
-fake_db_topology = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 1,
- 'instance_uuid': str(uuid.uuid4()),
- 'numa_topology': fake_numa_topology.to_json()
- }
-
-
-class _TestInstanceNUMATopology(object):
- @mock.patch('nova.db.instance_extra_update_by_uuid')
- def test_create(self, mock_update):
- topo_obj = objects.InstanceNUMATopology.obj_from_topology(
- fake_numa_topology)
- topo_obj.instance_uuid = fake_db_topology['instance_uuid']
- topo_obj.create(self.context)
- self.assertEqual(1, len(mock_update.call_args_list))
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
- def test_get_by_instance_uuid(self, mock_get):
- mock_get.return_value = fake_db_topology
- numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid(
- self.context, 'fake_uuid')
- self.assertEqual(fake_db_topology['instance_uuid'],
- numa_topology.instance_uuid)
- for obj_cell, topo_cell in zip(
- numa_topology.cells, fake_numa_topology.cells):
- self.assertIsInstance(obj_cell, objects.InstanceNUMACell)
- self.assertEqual(topo_cell.cpuset, obj_cell.cpuset)
- self.assertEqual(topo_cell.memory, obj_cell.memory)
- self.assertEqual(topo_cell.pagesize.size_kb, obj_cell.pagesize)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
- def test_get_by_instance_uuid_missing(self, mock_get):
- mock_get.return_value = None
- self.assertRaises(
- exception.NumaTopologyNotFound,
- objects.InstanceNUMATopology.get_by_instance_uuid,
- self.context, 'fake_uuid')
-
-
-class TestInstanceNUMATopology(test_objects._LocalTest,
- _TestInstanceNUMATopology):
- pass
-
-
-class TestInstanceNUMATopologyRemote(test_objects._RemoteTest,
- _TestInstanceNUMATopology):
- pass
diff --git a/nova/tests/objects/test_instance_pci_requests.py b/nova/tests/objects/test_instance_pci_requests.py
deleted file mode 100644
index e688a31e63..0000000000
--- a/nova/tests/objects/test_instance_pci_requests.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import objects
-from nova.tests.objects import test_objects
-
-
-FAKE_UUID = '79a53d6b-0893-4838-a971-15f4f382e7c2'
-FAKE_REQUEST_UUID = '69b53d6b-0793-4839-c981-f5c4f382e7d2'
-
-# NOTE(danms): Yes, these are the same right now, but going forward,
-# we have changes to make which will be reflected in the format
-# in instance_extra, but not in system_metadata.
-fake_pci_requests = [
- {'count': 2,
- 'spec': [{'vendor_id': '8086',
- 'device_id': '1502'}],
- 'alias_name': 'alias_1',
- 'is_new': False,
- 'request_id': FAKE_REQUEST_UUID},
- {'count': 2,
- 'spec': [{'vendor_id': '6502',
- 'device_id': '07B5'}],
- 'alias_name': 'alias_2',
- 'is_new': True,
- 'request_id': FAKE_REQUEST_UUID},
- ]
-
-fake_legacy_pci_requests = [
- {'count': 2,
- 'spec': [{'vendor_id': '8086',
- 'device_id': '1502'}],
- 'alias_name': 'alias_1'},
- {'count': 1,
- 'spec': [{'vendor_id': '6502',
- 'device_id': '07B5'}],
- 'alias_name': 'alias_2'},
- ]
-
-
-class _TestInstancePCIRequests(object):
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
- def test_get_by_instance_uuid(self, mock_get):
- mock_get.return_value = {
- 'instance_uuid': FAKE_UUID,
- 'pci_requests': jsonutils.dumps(fake_pci_requests),
- }
- requests = objects.InstancePCIRequests.get_by_instance_uuid(
- self.context, FAKE_UUID)
- self.assertEqual(2, len(requests.requests))
- for index, request in enumerate(requests.requests):
- self.assertEqual(fake_pci_requests[index]['alias_name'],
- request.alias_name)
- self.assertEqual(fake_pci_requests[index]['count'],
- request.count)
- self.assertEqual(fake_pci_requests[index]['spec'],
- [dict(x.items()) for x in request.spec])
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
- def test_get_by_instance_uuid_and_newness(self, mock_get):
- pcir = objects.InstancePCIRequests
- mock_get.return_value = objects.InstancePCIRequests(
- instance_uuid='fake-uuid',
- requests=[objects.InstancePCIRequest(count=1, is_new=False),
- objects.InstancePCIRequest(count=2, is_new=True)])
- old_req = pcir.get_by_instance_uuid_and_newness(self.context,
- 'fake-uuid',
- False)
- mock_get.return_value = objects.InstancePCIRequests(
- instance_uuid='fake-uuid',
- requests=[objects.InstancePCIRequest(count=1, is_new=False),
- objects.InstancePCIRequest(count=2, is_new=True)])
- new_req = pcir.get_by_instance_uuid_and_newness(self.context,
- 'fake-uuid',
- True)
- self.assertEqual(1, old_req.requests[0].count)
- self.assertEqual(2, new_req.requests[0].count)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
- def test_get_by_instance_current(self, mock_get):
- instance = objects.Instance(uuid='fake-uuid',
- system_metadata={})
- objects.InstancePCIRequests.get_by_instance(self.context,
- instance)
- mock_get.assert_called_once_with(self.context, 'fake-uuid')
-
- def test_get_by_instance_legacy(self):
- fakesysmeta = {
- 'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]),
- 'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]),
- }
- instance = objects.Instance(uuid='fake-uuid',
- system_metadata=fakesysmeta)
- requests = objects.InstancePCIRequests.get_by_instance(self.context,
- instance)
- self.assertEqual(2, len(requests.requests))
- self.assertEqual('alias_1', requests.requests[0].alias_name)
- self.assertFalse(requests.requests[0].is_new)
- self.assertEqual('alias_2', requests.requests[1].alias_name)
- self.assertTrue(requests.requests[1].is_new)
-
- @mock.patch('nova.db.instance_extra_update_by_uuid')
- def test_save(self, mock_update):
- requests = objects.InstancePCIRequests(
- context=self.context,
- instance_uuid=FAKE_UUID,
- requests=[objects.InstancePCIRequest(
- count=1,
- spec=[{'foo': 'bar'}, {'baz': 'bat'}],
- alias_name='alias_1',
- is_new=False,
- request_id=FAKE_REQUEST_UUID)])
- requests.save()
- self.assertEqual(FAKE_UUID, mock_update.call_args_list[0][0][1])
- self.assertEqual(
- [{'count': 1, 'is_new': False,
- 'alias_name': 'alias_1',
- 'spec': [{'foo': 'bar'}, {'baz': 'bat'}],
- 'request_id': FAKE_REQUEST_UUID}],
- jsonutils.loads(
- mock_update.call_args_list[0][0][2]['pci_requests']))
-
- @mock.patch('nova.db.instance_extra_update_by_uuid')
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
- def test_save_and_reload(self, mock_get, mock_update):
- database = {}
-
- def _save(context, uuid, values):
- database.setdefault(uuid, {'instance_uuid': uuid})
- database[uuid].update(values)
-
- def _get(context, uuid, columns):
- return database.get(uuid, {})
-
- mock_update.side_effect = _save
- mock_get.side_effect = _get
-
- requests = objects.InstancePCIRequests(
- context=self.context,
- instance_uuid=FAKE_UUID,
- requests=[objects.InstancePCIRequest(
- count=1, is_new=False, alias_name='alias_1',
- spec=[{'foo': 'bar'}])])
- requests.save()
- _requests = objects.InstancePCIRequests.get_by_instance_uuid(
- self.context, FAKE_UUID)
-
- self.assertEqual(requests.instance_uuid, _requests.instance_uuid)
- self.assertEqual(len(requests.requests), len(_requests.requests))
- self.assertEqual(requests.requests[0].alias_name,
- _requests.requests[0].alias_name)
-
- def test_new_compatibility(self):
- request = objects.InstancePCIRequest(is_new=False)
- self.assertFalse(request.new)
-
- def test_backport_1_0(self):
- requests = objects.InstancePCIRequests(
- requests=[objects.InstancePCIRequest(count=1,
- request_id=FAKE_UUID),
- objects.InstancePCIRequest(count=2,
- request_id=FAKE_UUID)])
- primitive = requests.obj_to_primitive(target_version='1.0')
- backported = objects.InstancePCIRequests.obj_from_primitive(
- primitive)
- self.assertEqual('1.0', backported.VERSION)
- self.assertEqual(2, len(backported.requests))
- self.assertFalse(backported.requests[0].obj_attr_is_set('request_id'))
- self.assertFalse(backported.requests[1].obj_attr_is_set('request_id'))
-
-
-class TestInstancePCIRequests(test_objects._LocalTest,
- _TestInstancePCIRequests):
- pass
-
-
-class TestRemoteInstancePCIRequests(test_objects._RemoteTest,
- _TestInstancePCIRequests):
- pass
diff --git a/nova/tests/objects/test_keypair.py b/nova/tests/objects/test_keypair.py
deleted file mode 100644
index 6ac66b04ae..0000000000
--- a/nova/tests/objects/test_keypair.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.utils import timeutils
-
-from nova import db
-from nova import exception
-from nova.objects import keypair
-from nova.tests.objects import test_objects
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-fake_keypair = {
- 'created_at': NOW,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'name': 'foo-keypair',
- 'user_id': 'fake-user',
- 'fingerprint': 'fake-fingerprint',
- 'public_key': 'fake\npublic\nkey',
- }
-
-
-class _TestKeyPairObject(object):
- def test_get_by_name(self):
- self.mox.StubOutWithMock(db, 'key_pair_get')
- db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn(
- fake_keypair)
- self.mox.ReplayAll()
- keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user',
- 'foo-keypair')
- self.compare_obj(keypair_obj, fake_keypair)
-
- def test_create(self):
- self.mox.StubOutWithMock(db, 'key_pair_create')
- db.key_pair_create(self.context,
- {'name': 'foo-keypair',
- 'public_key': 'keydata'}).AndReturn(fake_keypair)
- self.mox.ReplayAll()
- keypair_obj = keypair.KeyPair()
- keypair_obj.name = 'foo-keypair'
- keypair_obj.public_key = 'keydata'
- keypair_obj.create(self.context)
- self.compare_obj(keypair_obj, fake_keypair)
-
- def test_recreate_fails(self):
- self.mox.StubOutWithMock(db, 'key_pair_create')
- db.key_pair_create(self.context,
- {'name': 'foo-keypair',
- 'public_key': 'keydata'}).AndReturn(fake_keypair)
- self.mox.ReplayAll()
- keypair_obj = keypair.KeyPair()
- keypair_obj.name = 'foo-keypair'
- keypair_obj.public_key = 'keydata'
- keypair_obj.create(self.context)
- self.assertRaises(exception.ObjectActionError, keypair_obj.create,
- self.context)
-
- def test_destroy(self):
- self.mox.StubOutWithMock(db, 'key_pair_destroy')
- db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
- self.mox.ReplayAll()
- keypair_obj = keypair.KeyPair()
- keypair_obj.id = 123
- keypair_obj.user_id = 'fake-user'
- keypair_obj.name = 'foo-keypair'
- keypair_obj.destroy(self.context)
-
- def test_destroy_by_name(self):
- self.mox.StubOutWithMock(db, 'key_pair_destroy')
- db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
- self.mox.ReplayAll()
- keypair.KeyPair.destroy_by_name(self.context, 'fake-user',
- 'foo-keypair')
-
- def test_get_by_user(self):
- self.mox.StubOutWithMock(db, 'key_pair_get_all_by_user')
- self.mox.StubOutWithMock(db, 'key_pair_count_by_user')
- db.key_pair_get_all_by_user(self.context, 'fake-user').AndReturn(
- [fake_keypair])
- db.key_pair_count_by_user(self.context, 'fake-user').AndReturn(1)
- self.mox.ReplayAll()
- keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user')
- self.assertEqual(1, len(keypairs))
- self.compare_obj(keypairs[0], fake_keypair)
- self.assertEqual(1, keypair.KeyPairList.get_count_by_user(self.context,
- 'fake-user'))
-
-
-class TestMigrationObject(test_objects._LocalTest,
- _TestKeyPairObject):
- pass
-
-
-class TestRemoteMigrationObject(test_objects._RemoteTest,
- _TestKeyPairObject):
- pass
diff --git a/nova/tests/objects/test_migration.py b/nova/tests/objects/test_migration.py
deleted file mode 100644
index 8d6d9c8113..0000000000
--- a/nova/tests/objects/test_migration.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.utils import timeutils
-
-from nova import context
-from nova import db
-from nova import exception
-from nova.objects import migration
-from nova.tests import fake_instance
-from nova.tests.objects import test_objects
-
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-
-
-def fake_db_migration(**updates):
- db_instance = {
- 'created_at': NOW,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'source_compute': 'compute-source',
- 'dest_compute': 'compute-dest',
- 'source_node': 'node-source',
- 'dest_node': 'node-dest',
- 'dest_host': 'host-dest',
- 'old_instance_type_id': 42,
- 'new_instance_type_id': 84,
- 'instance_uuid': 'fake-uuid',
- 'status': 'migrating',
- }
-
- if updates:
- db_instance.update(updates)
- return db_instance
-
-
-class _TestMigrationObject(object):
- def test_get_by_id(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- self.mox.StubOutWithMock(db, 'migration_get')
- db.migration_get(ctxt, fake_migration['id']).AndReturn(fake_migration)
- self.mox.ReplayAll()
- mig = migration.Migration.get_by_id(ctxt, fake_migration['id'])
- self.compare_obj(mig, fake_migration)
-
- def test_get_by_instance_and_status(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
- db.migration_get_by_instance_and_status(ctxt,
- fake_migration['id'],
- 'migrating'
- ).AndReturn(fake_migration)
- self.mox.ReplayAll()
- mig = migration.Migration.get_by_instance_and_status(
- ctxt, fake_migration['id'], 'migrating')
- self.compare_obj(mig, fake_migration)
-
- def test_create(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- self.mox.StubOutWithMock(db, 'migration_create')
- db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
- fake_migration)
- self.mox.ReplayAll()
- mig = migration.Migration()
- mig.source_compute = 'foo'
- mig.create(ctxt)
- self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
-
- def test_recreate_fails(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- self.mox.StubOutWithMock(db, 'migration_create')
- db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
- fake_migration)
- self.mox.ReplayAll()
- mig = migration.Migration()
- mig.source_compute = 'foo'
- mig.create(ctxt)
- self.assertRaises(exception.ObjectActionError, mig.create,
- self.context)
-
- def test_save(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- self.mox.StubOutWithMock(db, 'migration_update')
- db.migration_update(ctxt, 123, {'source_compute': 'foo'}
- ).AndReturn(fake_migration)
- self.mox.ReplayAll()
- mig = migration.Migration()
- mig.id = 123
- mig.source_compute = 'foo'
- mig.save(ctxt)
- self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
-
- def test_instance(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- fake_inst = fake_instance.fake_db_instance()
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(fake_inst)
- mig = migration.Migration._from_db_object(ctxt,
- migration.Migration(),
- fake_migration)
- mig._context = ctxt
- self.mox.ReplayAll()
- self.assertEqual(mig.instance.host, fake_inst['host'])
-
- def test_get_unconfirmed_by_dest_compute(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- db_migrations = [fake_migration, dict(fake_migration, id=456)]
- self.mox.StubOutWithMock(
- db, 'migration_get_unconfirmed_by_dest_compute')
- db.migration_get_unconfirmed_by_dest_compute(
- ctxt, 'window', 'foo',
- use_slave=False).AndReturn(db_migrations)
- self.mox.ReplayAll()
- migrations = (
- migration.MigrationList.get_unconfirmed_by_dest_compute(
- ctxt, 'window', 'foo', use_slave=False))
- self.assertEqual(2, len(migrations))
- for index, db_migration in enumerate(db_migrations):
- self.compare_obj(migrations[index], db_migration)
-
- def test_get_in_progress_by_host_and_node(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- db_migrations = [fake_migration, dict(fake_migration, id=456)]
- self.mox.StubOutWithMock(
- db, 'migration_get_in_progress_by_host_and_node')
- db.migration_get_in_progress_by_host_and_node(
- ctxt, 'host', 'node').AndReturn(db_migrations)
- self.mox.ReplayAll()
- migrations = (
- migration.MigrationList.get_in_progress_by_host_and_node(
- ctxt, 'host', 'node'))
- self.assertEqual(2, len(migrations))
- for index, db_migration in enumerate(db_migrations):
- self.compare_obj(migrations[index], db_migration)
-
- def test_get_by_filters(self):
- ctxt = context.get_admin_context()
- fake_migration = fake_db_migration()
- db_migrations = [fake_migration, dict(fake_migration, id=456)]
- self.mox.StubOutWithMock(
- db, 'migration_get_all_by_filters')
- filters = {'foo': 'bar'}
- db.migration_get_all_by_filters(ctxt, filters).AndReturn(db_migrations)
- self.mox.ReplayAll()
- migrations = migration.MigrationList.get_by_filters(ctxt, filters)
- self.assertEqual(2, len(migrations))
- for index, db_migration in enumerate(db_migrations):
- self.compare_obj(migrations[index], db_migration)
-
-
-class TestMigrationObject(test_objects._LocalTest,
- _TestMigrationObject):
- pass
-
-
-class TestRemoteMigrationObject(test_objects._RemoteTest,
- _TestMigrationObject):
- pass
diff --git a/nova/tests/objects/test_network.py b/nova/tests/objects/test_network.py
deleted file mode 100644
index 9c654f33a0..0000000000
--- a/nova/tests/objects/test_network.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import netaddr
-
-from nova.objects import network as network_obj
-from nova.tests.objects import test_objects
-
-
-fake_network = {
- 'deleted': False,
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'id': 1,
- 'label': 'Fake Network',
- 'injected': False,
- 'cidr': '192.168.1.0/24',
- 'cidr_v6': '1234::/64',
- 'multi_host': False,
- 'netmask': '255.255.255.0',
- 'gateway': '192.168.1.1',
- 'broadcast': '192.168.1.255',
- 'netmask_v6': 64,
- 'gateway_v6': '1234::1',
- 'bridge': 'br100',
- 'bridge_interface': 'eth0',
- 'dns1': '8.8.8.8',
- 'dns2': '8.8.4.4',
- 'vlan': None,
- 'vpn_public_address': None,
- 'vpn_public_port': None,
- 'vpn_private_address': None,
- 'dhcp_start': '192.168.1.10',
- 'rxtx_base': None,
- 'project_id': None,
- 'priority': None,
- 'host': None,
- 'uuid': 'fake-uuid',
- 'mtu': None,
- 'dhcp_server': '192.168.1.1',
- 'enable_dhcp': True,
- 'share_address': False,
-}
-
-
-class _TestNetworkObject(object):
- def _compare(self, obj, db_obj):
- for field in obj.fields:
- db_val = db_obj[field]
- obj_val = obj[field]
- if isinstance(obj_val, netaddr.IPAddress):
- obj_val = str(obj_val)
- if isinstance(obj_val, netaddr.IPNetwork):
- obj_val = str(obj_val)
- if field == 'netmask_v6':
- db_val = str(netaddr.IPNetwork('1::/%i' % db_val).netmask)
- self.assertEqual(db_val, obj_val)
-
- @mock.patch('nova.db.network_get')
- def test_get_by_id(self, get):
- get.return_value = fake_network
- network = network_obj.Network.get_by_id(self.context, 'foo')
- self._compare(network, fake_network)
- get.assert_called_once_with(self.context, 'foo',
- project_only='allow_none')
-
- @mock.patch('nova.db.network_get_by_uuid')
- def test_get_by_uuid(self, get):
- get.return_value = fake_network
- network = network_obj.Network.get_by_uuid(self.context, 'foo')
- self._compare(network, fake_network)
- get.assert_called_once_with(self.context, 'foo')
-
- @mock.patch('nova.db.network_get_by_cidr')
- def test_get_by_cidr(self, get):
- get.return_value = fake_network
- network = network_obj.Network.get_by_cidr(self.context,
- '192.168.1.0/24')
- self._compare(network, fake_network)
- get.assert_called_once_with(self.context, '192.168.1.0/24')
-
- @mock.patch('nova.db.network_update')
- @mock.patch('nova.db.network_set_host')
- def test_save(self, set_host, update):
- result = dict(fake_network, injected=True)
- network = network_obj.Network._from_db_object(self.context,
- network_obj.Network(),
- fake_network)
- network.obj_reset_changes()
- network.save()
- network.label = 'bar'
- update.return_value = result
- network.save()
- update.assert_called_once_with(self.context, network.id,
- {'label': 'bar'})
- self.assertFalse(set_host.called)
- self._compare(network, result)
-
- @mock.patch('nova.db.network_update')
- @mock.patch('nova.db.network_set_host')
- @mock.patch('nova.db.network_get')
- def test_save_with_host(self, get, set_host, update):
- result = dict(fake_network, injected=True)
- network = network_obj.Network._from_db_object(self.context,
- network_obj.Network(),
- fake_network)
- network.obj_reset_changes()
- network.host = 'foo'
- get.return_value = result
- network.save()
- set_host.assert_called_once_with(self.context, network.id, 'foo')
- self.assertFalse(update.called)
- self._compare(network, result)
-
- @mock.patch('nova.db.network_update')
- @mock.patch('nova.db.network_set_host')
- def test_save_with_host_and_other(self, set_host, update):
- result = dict(fake_network, injected=True)
- network = network_obj.Network._from_db_object(self.context,
- network_obj.Network(),
- fake_network)
- network.obj_reset_changes()
- network.host = 'foo'
- network.label = 'bar'
- update.return_value = result
- network.save()
- set_host.assert_called_once_with(self.context, network.id, 'foo')
- update.assert_called_once_with(self.context, network.id,
- {'label': 'bar'})
- self._compare(network, result)
-
- @mock.patch('nova.db.network_associate')
- def test_associate(self, associate):
- network_obj.Network.associate(self.context, 'project',
- network_id=123)
- associate.assert_called_once_with(self.context, 'project',
- network_id=123, force=False)
-
- @mock.patch('nova.db.network_disassociate')
- def test_disassociate(self, disassociate):
- network_obj.Network.disassociate(self.context, 123,
- host=True, project=True)
- disassociate.assert_called_once_with(self.context, 123, True, True)
-
- @mock.patch('nova.db.network_create_safe')
- def test_create(self, create):
- create.return_value = fake_network
- network = network_obj.Network(context=self.context, label='foo')
- network.create()
- create.assert_called_once_with(self.context, {'label': 'foo'})
- self._compare(network, fake_network)
-
- @mock.patch('nova.db.network_delete_safe')
- def test_destroy(self, delete):
- network = network_obj.Network(context=self.context, id=123)
- network.destroy()
- delete.assert_called_once_with(self.context, 123)
- self.assertTrue(network.deleted)
- self.assertNotIn('deleted', network.obj_what_changed())
-
- @mock.patch('nova.db.network_get_all')
- def test_get_all(self, get_all):
- get_all.return_value = [fake_network]
- networks = network_obj.NetworkList.get_all(self.context)
- self.assertEqual(1, len(networks))
- get_all.assert_called_once_with(self.context, 'allow_none')
- self._compare(networks[0], fake_network)
-
- @mock.patch('nova.db.network_get_all_by_uuids')
- def test_get_all_by_uuids(self, get_all):
- get_all.return_value = [fake_network]
- networks = network_obj.NetworkList.get_by_uuids(self.context,
- ['foo'])
- self.assertEqual(1, len(networks))
- get_all.assert_called_once_with(self.context, ['foo'], 'allow_none')
- self._compare(networks[0], fake_network)
-
- @mock.patch('nova.db.network_get_all_by_host')
- def test_get_all_by_host(self, get_all):
- get_all.return_value = [fake_network]
- networks = network_obj.NetworkList.get_by_host(self.context, 'host')
- self.assertEqual(1, len(networks))
- get_all.assert_called_once_with(self.context, 'host')
- self._compare(networks[0], fake_network)
-
- @mock.patch('nova.db.network_in_use_on_host')
- def test_in_use_on_host(self, in_use):
- in_use.return_value = True
- self.assertTrue(network_obj.Network.in_use_on_host(self.context,
- 123, 'foo'))
- in_use.assert_called_once_with(self.context, 123, 'foo')
-
- @mock.patch('nova.db.project_get_networks')
- def test_get_all_by_project(self, get_nets):
- get_nets.return_value = [fake_network]
- networks = network_obj.NetworkList.get_by_project(self.context, 123)
- self.assertEqual(1, len(networks))
- get_nets.assert_called_once_with(self.context, 123, associate=True)
- self._compare(networks[0], fake_network)
-
- def test_compat_version_1_1(self):
- network = network_obj.Network._from_db_object(self.context,
- network_obj.Network(),
- fake_network)
- primitive = network.obj_to_primitive(target_version='1.1')
- self.assertNotIn('mtu', primitive)
- self.assertNotIn('enable_dhcp', primitive)
- self.assertNotIn('dhcp_server', primitive)
- self.assertNotIn('share_address', primitive)
-
-
-class TestNetworkObject(test_objects._LocalTest,
- _TestNetworkObject):
- pass
-
-
-class TestRemoteNetworkObject(test_objects._RemoteTest,
- _TestNetworkObject):
- pass
diff --git a/nova/tests/objects/test_network_request.py b/nova/tests/objects/test_network_request.py
deleted file mode 100644
index 4d74994e6f..0000000000
--- a/nova/tests/objects/test_network_request.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import objects
-from nova.tests.objects import test_objects
-
-
-FAKE_UUID = '0C5C9AD2-F967-4E92-A7F3-24410F697440'
-
-
-class _TestNetworkRequestObject(object):
- def test_basic(self):
- request = objects.NetworkRequest()
- request.network_id = '456'
- request.address = '1.2.3.4'
- request.port_id = FAKE_UUID
-
- def test_load(self):
- request = objects.NetworkRequest()
- self.assertIsNone(request.port_id)
-
- def test_to_tuple_neutron(self):
- request = objects.NetworkRequest(network_id='123',
- address='1.2.3.4',
- port_id=FAKE_UUID,
- )
- with mock.patch('nova.utils.is_neutron', return_value=True):
- self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
- request.to_tuple())
-
- def test_to_tuple_nova(self):
- request = objects.NetworkRequest(network_id='123',
- address='1.2.3.4',
- port_id=FAKE_UUID)
- with mock.patch('nova.utils.is_neutron', return_value=False):
- self.assertEqual(('123', '1.2.3.4'),
- request.to_tuple())
-
- def test_from_tuple_neutron(self):
- request = objects.NetworkRequest.from_tuple(
- ('123', '1.2.3.4', FAKE_UUID, None))
- self.assertEqual('123', request.network_id)
- self.assertEqual('1.2.3.4', str(request.address))
- self.assertEqual(FAKE_UUID, request.port_id)
-
- def test_from_tuple_neutron_without_pci_request_id(self):
- request = objects.NetworkRequest.from_tuple(
- ('123', '1.2.3.4', FAKE_UUID))
- self.assertEqual('123', request.network_id)
- self.assertEqual('1.2.3.4', str(request.address))
- self.assertEqual(FAKE_UUID, request.port_id)
-
- def test_from_tuple_nova(self):
- request = objects.NetworkRequest.from_tuple(
- ('123', '1.2.3.4'))
- self.assertEqual('123', request.network_id)
- self.assertEqual('1.2.3.4', str(request.address))
- self.assertIsNone(request.port_id)
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_list_as_tuples(self, is_neutron):
- requests = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='123'),
- objects.NetworkRequest(network_id='456')])
- self.assertEqual(
- [('123', None, None, None), ('456', None, None, None)],
- requests.as_tuples())
-
- def test_is_single_unspecified(self):
- requests = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(network_id='123')])
- self.assertFalse(requests.is_single_unspecified)
- requests = objects.NetworkRequestList(
- objects=[objects.NetworkRequest(),
- objects.NetworkRequest()])
- self.assertFalse(requests.is_single_unspecified)
- requests = objects.NetworkRequestList(
- objects=[objects.NetworkRequest()])
- self.assertTrue(requests.is_single_unspecified)
-
-
-class TestNetworkRequestObject(test_objects._LocalTest,
- _TestNetworkRequestObject):
- pass
-
-
-class TestNetworkRequestRemoteObject(test_objects._RemoteTest,
- _TestNetworkRequestObject):
- pass
diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py
deleted file mode 100644
index c26996f52e..0000000000
--- a/nova/tests/objects/test_objects.py
+++ /dev/null
@@ -1,1126 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import datetime
-import hashlib
-import inspect
-import os
-import pprint
-
-import mock
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six
-from testtools import matchers
-
-from nova.conductor import rpcapi as conductor_rpcapi
-from nova import context
-from nova import exception
-from nova import objects
-from nova.objects import base
-from nova.objects import fields
-from nova.openstack.common import log
-from nova import rpc
-from nova import test
-from nova.tests import fake_notifier
-from nova import utils
-
-
-LOG = log.getLogger(__name__)
-
-
-class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
- VERSION = '1.0'
- fields = {'baz': fields.Field(fields.Integer())}
-
-
-class MyObj(base.NovaPersistentObject, base.NovaObject):
- VERSION = '1.6'
- fields = {'foo': fields.Field(fields.Integer()),
- 'bar': fields.Field(fields.String()),
- 'missing': fields.Field(fields.String()),
- 'readonly': fields.Field(fields.Integer(), read_only=True),
- 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True)
- }
-
- @staticmethod
- def _from_db_object(context, obj, db_obj):
- self = MyObj()
- self.foo = db_obj['foo']
- self.bar = db_obj['bar']
- self.missing = db_obj['missing']
- self.readonly = 1
- return self
-
- def obj_load_attr(self, attrname):
- setattr(self, attrname, 'loaded!')
-
- @base.remotable_classmethod
- def query(cls, context):
- obj = cls(foo=1, bar='bar')
- obj.obj_reset_changes()
- return obj
-
- @base.remotable
- def marco(self, context):
- return 'polo'
-
- @base.remotable
- def _update_test(self, context):
- if context.project_id == 'alternate':
- self.bar = 'alternate-context'
- else:
- self.bar = 'updated'
-
- @base.remotable
- def save(self, context):
- self.obj_reset_changes()
-
- @base.remotable
- def refresh(self, context):
- self.foo = 321
- self.bar = 'refreshed'
- self.obj_reset_changes()
-
- @base.remotable
- def modify_save_modify(self, context):
- self.bar = 'meow'
- self.save()
- self.foo = 42
- self.rel_object = MyOwnedObject(baz=42)
-
- def obj_make_compatible(self, primitive, target_version):
- # NOTE(danms): Simulate an older version that had a different
- # format for the 'bar' attribute
- if target_version == '1.1' and 'bar' in primitive:
- primitive['bar'] = 'old%s' % primitive['bar']
-
-
-class MyObjDiffVers(MyObj):
- VERSION = '1.5'
-
- @classmethod
- def obj_name(cls):
- return 'MyObj'
-
-
-class MyObj2(object):
- @classmethod
- def obj_name(cls):
- return 'MyObj'
-
- @base.remotable_classmethod
- def query(cls, *args, **kwargs):
- pass
-
-
-class RandomMixInWithNoFields(object):
- """Used to test object inheritance using a mixin that has no fields."""
- pass
-
-
-class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
- fields = {'new_field': fields.Field(fields.String())}
-
-
-class TestMetaclass(test.TestCase):
- def test_obj_tracking(self):
-
- @six.add_metaclass(base.NovaObjectMetaclass)
- class NewBaseClass(object):
- VERSION = '1.0'
- fields = {}
-
- @classmethod
- def obj_name(cls):
- return cls.__name__
-
- class Fake1TestObj1(NewBaseClass):
- @classmethod
- def obj_name(cls):
- return 'fake1'
-
- class Fake1TestObj2(Fake1TestObj1):
- pass
-
- class Fake1TestObj3(Fake1TestObj1):
- VERSION = '1.1'
-
- class Fake2TestObj1(NewBaseClass):
- @classmethod
- def obj_name(cls):
- return 'fake2'
-
- class Fake1TestObj4(Fake1TestObj3):
- VERSION = '1.2'
-
- class Fake2TestObj2(Fake2TestObj1):
- VERSION = '1.1'
-
- class Fake1TestObj5(Fake1TestObj1):
- VERSION = '1.1'
-
- # Newest versions first in the list. Duplicate versions take the
- # newest object.
- expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
- 'fake2': [Fake2TestObj2, Fake2TestObj1]}
- self.assertEqual(expected, NewBaseClass._obj_classes)
- # The following should work, also.
- self.assertEqual(expected, Fake1TestObj1._obj_classes)
- self.assertEqual(expected, Fake1TestObj2._obj_classes)
- self.assertEqual(expected, Fake1TestObj3._obj_classes)
- self.assertEqual(expected, Fake1TestObj4._obj_classes)
- self.assertEqual(expected, Fake1TestObj5._obj_classes)
- self.assertEqual(expected, Fake2TestObj1._obj_classes)
- self.assertEqual(expected, Fake2TestObj2._obj_classes)
-
- def test_field_checking(self):
- def create_class(field):
- class TestField(base.NovaObject):
- VERSION = '1.5'
- fields = {'foo': field()}
- return TestField
-
- create_class(fields.IPV4AndV6AddressField)
- self.assertRaises(exception.ObjectFieldInvalid,
- create_class, fields.IPV4AndV6Address)
- self.assertRaises(exception.ObjectFieldInvalid,
- create_class, int)
-
-
-class TestObjToPrimitive(test.TestCase):
-
- def test_obj_to_primitive_list(self):
- class MyObjElement(base.NovaObject):
- fields = {'foo': fields.IntegerField()}
-
- def __init__(self, foo):
- super(MyObjElement, self).__init__()
- self.foo = foo
-
- class MyList(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
-
- mylist = MyList()
- mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
- self.assertEqual([1, 2, 3],
- [x['foo'] for x in base.obj_to_primitive(mylist)])
-
- def test_obj_to_primitive_dict(self):
- myobj = MyObj(foo=1, bar='foo')
- self.assertEqual({'foo': 1, 'bar': 'foo'},
- base.obj_to_primitive(myobj))
-
- def test_obj_to_primitive_recursive(self):
- class MyList(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('MyObj')}
-
- mylist = MyList(objects=[MyObj(), MyObj()])
- for i, value in enumerate(mylist):
- value.foo = i
- self.assertEqual([{'foo': 0}, {'foo': 1}],
- base.obj_to_primitive(mylist))
-
- def test_obj_to_primitive_with_ip_addr(self):
- class TestObject(base.NovaObject):
- fields = {'addr': fields.IPAddressField(),
- 'cidr': fields.IPNetworkField()}
-
- obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
- self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
- base.obj_to_primitive(obj))
-
-
-class TestObjMakeList(test.TestCase):
-
- def test_obj_make_list(self):
- class MyList(base.ObjectListBase, base.NovaObject):
- pass
-
- db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
- {'foo': 2, 'bar': 'bat', 'missing': 'apple'},
- ]
- mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
- self.assertEqual(2, len(mylist))
- self.assertEqual('ctxt', mylist._context)
- for index, item in enumerate(mylist):
- self.assertEqual(db_objs[index]['foo'], item.foo)
- self.assertEqual(db_objs[index]['bar'], item.bar)
- self.assertEqual(db_objs[index]['missing'], item.missing)
-
-
-def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
- comparators=None):
- """Compare a NovaObject and a dict-like database object.
-
- This automatically converts TZ-aware datetimes and iterates over
- the fields of the object.
-
- :param:test: The TestCase doing the comparison
- :param:obj: The NovaObject to examine
- :param:db_obj: The dict-like database object to use as reference
- :param:subs: A dict of objkey=dbkey field substitutions
- :param:allow_missing: A list of fields that may not be in db_obj
- :param:comparators: Map of comparator functions to use for certain fields
- """
-
- if subs is None:
- subs = {}
- if allow_missing is None:
- allow_missing = []
- if comparators is None:
- comparators = {}
-
- for key in obj.fields:
- if key in allow_missing and not obj.obj_attr_is_set(key):
- continue
- obj_val = obj[key]
- db_key = subs.get(key, key)
- db_val = db_obj[db_key]
- if isinstance(obj_val, datetime.datetime):
- obj_val = obj_val.replace(tzinfo=None)
-
- if key in comparators:
- comparator = comparators[key]
- comparator(db_val, obj_val)
- else:
- test.assertEqual(db_val, obj_val)
-
-
-class _BaseTestCase(test.TestCase):
- def setUp(self):
- super(_BaseTestCase, self).setUp()
- self.remote_object_calls = list()
- self.user_id = 'fake-user'
- self.project_id = 'fake-project'
- self.context = context.RequestContext(self.user_id, self.project_id)
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
- comparators=None):
- compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
- comparators=comparators)
-
- def json_comparator(self, expected, obj_val):
- # json-ify an object field for comparison with its db str
- # equivalent
- self.assertEqual(expected, jsonutils.dumps(obj_val))
-
- def str_comparator(self, expected, obj_val):
- """Compare an object field to a string in the db by performing
- a simple coercion on the object field value.
- """
- self.assertEqual(expected, str(obj_val))
-
- def assertNotIsInstance(self, obj, cls, msg=None):
- """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
- try:
- f = super(_BaseTestCase, self).assertNotIsInstance
- except AttributeError:
- self.assertThat(obj,
- matchers.Not(matchers.IsInstance(cls)),
- message=msg or '')
- else:
- f(obj, cls, msg=msg)
-
-
-class _LocalTest(_BaseTestCase):
- def setUp(self):
- super(_LocalTest, self).setUp()
- # Just in case
- base.NovaObject.indirection_api = None
-
- def assertRemotes(self):
- self.assertEqual(self.remote_object_calls, [])
-
-
-@contextlib.contextmanager
-def things_temporarily_local():
- # Temporarily go non-remote so the conductor handles
- # this request directly
- _api = base.NovaObject.indirection_api
- base.NovaObject.indirection_api = None
- yield
- base.NovaObject.indirection_api = _api
-
-
-class _RemoteTest(_BaseTestCase):
- def _testable_conductor(self):
- self.conductor_service = self.start_service(
- 'conductor', manager='nova.conductor.manager.ConductorManager')
- self.remote_object_calls = list()
-
- orig_object_class_action = \
- self.conductor_service.manager.object_class_action
- orig_object_action = \
- self.conductor_service.manager.object_action
-
- def fake_object_class_action(*args, **kwargs):
- self.remote_object_calls.append((kwargs.get('objname'),
- kwargs.get('objmethod')))
- with things_temporarily_local():
- result = orig_object_class_action(*args, **kwargs)
- return (base.NovaObject.obj_from_primitive(result, context=args[0])
- if isinstance(result, base.NovaObject) else result)
- self.stubs.Set(self.conductor_service.manager, 'object_class_action',
- fake_object_class_action)
-
- def fake_object_action(*args, **kwargs):
- self.remote_object_calls.append((kwargs.get('objinst'),
- kwargs.get('objmethod')))
- with things_temporarily_local():
- result = orig_object_action(*args, **kwargs)
- return result
- self.stubs.Set(self.conductor_service.manager, 'object_action',
- fake_object_action)
-
- # Things are remoted by default in this session
- base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
-
- # To make sure local and remote contexts match
- self.stubs.Set(rpc.RequestContextSerializer,
- 'serialize_context',
- lambda s, c: c)
- self.stubs.Set(rpc.RequestContextSerializer,
- 'deserialize_context',
- lambda s, c: c)
-
- def setUp(self):
- super(_RemoteTest, self).setUp()
- self._testable_conductor()
-
- def assertRemotes(self):
- self.assertNotEqual(self.remote_object_calls, [])
-
-
-class _TestObject(object):
- def test_object_attrs_in_init(self):
- # Spot check a few
- objects.Instance
- objects.InstanceInfoCache
- objects.SecurityGroup
- # Now check the test one in this file. Should be newest version
- self.assertEqual('1.6', objects.MyObj.VERSION)
-
- def test_hydration_type_error(self):
- primitive = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.5',
- 'nova_object.data': {'foo': 'a'}}
- self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
-
- def test_hydration(self):
- primitive = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.5',
- 'nova_object.data': {'foo': 1}}
- real_method = MyObj._obj_from_primitive
-
- def _obj_from_primitive(*args):
- return real_method(*args)
-
- with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
- ofp.side_effect = _obj_from_primitive
- obj = MyObj.obj_from_primitive(primitive)
- ofp.assert_called_once_with(None, '1.5', primitive)
- self.assertEqual(obj.foo, 1)
-
- def test_hydration_version_different(self):
- primitive = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.2',
- 'nova_object.data': {'foo': 1}}
- obj = MyObj.obj_from_primitive(primitive)
- self.assertEqual(obj.foo, 1)
- self.assertEqual('1.2', obj.VERSION)
-
- def test_hydration_bad_ns(self):
- primitive = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'foo',
- 'nova_object.version': '1.5',
- 'nova_object.data': {'foo': 1}}
- self.assertRaises(exception.UnsupportedObjectError,
- MyObj.obj_from_primitive, primitive)
-
- def test_dehydration(self):
- expected = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.6',
- 'nova_object.data': {'foo': 1}}
- obj = MyObj(foo=1)
- obj.obj_reset_changes()
- self.assertEqual(obj.obj_to_primitive(), expected)
-
- def test_object_property(self):
- obj = MyObj(foo=1)
- self.assertEqual(obj.foo, 1)
-
- def test_object_property_type_error(self):
- obj = MyObj()
-
- def fail():
- obj.foo = 'a'
- self.assertRaises(ValueError, fail)
-
- def test_object_dict_syntax(self):
- obj = MyObj(foo=123, bar='bar')
- self.assertEqual(obj['foo'], 123)
- self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
- [('bar', 'bar'), ('foo', 123)])
- self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
- [('bar', 'bar'), ('foo', 123)])
-
- def test_load(self):
- obj = MyObj()
- self.assertEqual(obj.bar, 'loaded!')
-
- def test_load_in_base(self):
- class Foo(base.NovaObject):
- fields = {'foobar': fields.Field(fields.Integer())}
- obj = Foo()
- with self.assertRaisesRegexp(NotImplementedError, ".*foobar.*"):
- obj.foobar
-
- def test_loaded_in_primitive(self):
- obj = MyObj(foo=1)
- obj.obj_reset_changes()
- self.assertEqual(obj.bar, 'loaded!')
- expected = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.6',
- 'nova_object.changes': ['bar'],
- 'nova_object.data': {'foo': 1,
- 'bar': 'loaded!'}}
- self.assertEqual(obj.obj_to_primitive(), expected)
-
- def test_changes_in_primitive(self):
- obj = MyObj(foo=123)
- self.assertEqual(obj.obj_what_changed(), set(['foo']))
- primitive = obj.obj_to_primitive()
- self.assertIn('nova_object.changes', primitive)
- obj2 = MyObj.obj_from_primitive(primitive)
- self.assertEqual(obj2.obj_what_changed(), set(['foo']))
- obj2.obj_reset_changes()
- self.assertEqual(obj2.obj_what_changed(), set())
-
- def test_obj_class_from_name(self):
- obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
- self.assertEqual('1.5', obj.VERSION)
-
- def test_obj_class_from_name_latest_compatible(self):
- obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
- self.assertEqual('1.6', obj.VERSION)
-
- def test_unknown_objtype(self):
- self.assertRaises(exception.UnsupportedObjectError,
- base.NovaObject.obj_class_from_name, 'foo', '1.0')
-
- def test_obj_class_from_name_supported_version(self):
- error = None
- try:
- base.NovaObject.obj_class_from_name('MyObj', '1.25')
- except exception.IncompatibleObjectVersion as error:
- pass
-
- self.assertIsNotNone(error)
- self.assertEqual('1.6', error.kwargs['supported'])
-
- def test_with_alternate_context(self):
- ctxt1 = context.RequestContext('foo', 'foo')
- ctxt2 = context.RequestContext('bar', 'alternate')
- obj = MyObj.query(ctxt1)
- obj._update_test(ctxt2)
- self.assertEqual(obj.bar, 'alternate-context')
- self.assertRemotes()
-
- def test_orphaned_object(self):
- obj = MyObj.query(self.context)
- obj._context = None
- self.assertRaises(exception.OrphanedObjectError,
- obj._update_test)
- self.assertRemotes()
-
- def test_changed_1(self):
- obj = MyObj.query(self.context)
- obj.foo = 123
- self.assertEqual(obj.obj_what_changed(), set(['foo']))
- obj._update_test(self.context)
- self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
- self.assertEqual(obj.foo, 123)
- self.assertRemotes()
-
- def test_changed_2(self):
- obj = MyObj.query(self.context)
- obj.foo = 123
- self.assertEqual(obj.obj_what_changed(), set(['foo']))
- obj.save(self.context)
- self.assertEqual(obj.obj_what_changed(), set([]))
- self.assertEqual(obj.foo, 123)
- self.assertRemotes()
-
- def test_changed_3(self):
- obj = MyObj.query(self.context)
- obj.foo = 123
- self.assertEqual(obj.obj_what_changed(), set(['foo']))
- obj.refresh(self.context)
- self.assertEqual(obj.obj_what_changed(), set([]))
- self.assertEqual(obj.foo, 321)
- self.assertEqual(obj.bar, 'refreshed')
- self.assertRemotes()
-
- def test_changed_4(self):
- obj = MyObj.query(self.context)
- obj.bar = 'something'
- self.assertEqual(obj.obj_what_changed(), set(['bar']))
- obj.modify_save_modify(self.context)
- self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
- self.assertEqual(obj.foo, 42)
- self.assertEqual(obj.bar, 'meow')
- self.assertIsInstance(obj.rel_object, MyOwnedObject)
- self.assertRemotes()
-
- def test_changed_with_sub_object(self):
- class ParentObject(base.NovaObject):
- fields = {'foo': fields.IntegerField(),
- 'bar': fields.ObjectField('MyObj'),
- }
- obj = ParentObject()
- self.assertEqual(set(), obj.obj_what_changed())
- obj.foo = 1
- self.assertEqual(set(['foo']), obj.obj_what_changed())
- bar = MyObj()
- obj.bar = bar
- self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
- obj.obj_reset_changes()
- self.assertEqual(set(), obj.obj_what_changed())
- bar.foo = 1
- self.assertEqual(set(['bar']), obj.obj_what_changed())
-
- def test_static_result(self):
- obj = MyObj.query(self.context)
- self.assertEqual(obj.bar, 'bar')
- result = obj.marco()
- self.assertEqual(result, 'polo')
- self.assertRemotes()
-
- def test_updates(self):
- obj = MyObj.query(self.context)
- self.assertEqual(obj.foo, 1)
- obj._update_test()
- self.assertEqual(obj.bar, 'updated')
- self.assertRemotes()
-
- def test_base_attributes(self):
- dt = datetime.datetime(1955, 11, 5)
- obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
- deleted=False)
- expected = {'nova_object.name': 'MyObj',
- 'nova_object.namespace': 'nova',
- 'nova_object.version': '1.6',
- 'nova_object.changes':
- ['deleted', 'created_at', 'deleted_at', 'updated_at'],
- 'nova_object.data':
- {'created_at': timeutils.isotime(dt),
- 'updated_at': timeutils.isotime(dt),
- 'deleted_at': None,
- 'deleted': False,
- }
- }
- self.assertEqual(obj.obj_to_primitive(), expected)
-
- def test_contains(self):
- obj = MyObj()
- self.assertNotIn('foo', obj)
- obj.foo = 1
- self.assertIn('foo', obj)
- self.assertNotIn('does_not_exist', obj)
-
- def test_obj_attr_is_set(self):
- obj = MyObj(foo=1)
- self.assertTrue(obj.obj_attr_is_set('foo'))
- self.assertFalse(obj.obj_attr_is_set('bar'))
- self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
-
- def test_get(self):
- obj = MyObj(foo=1)
- # Foo has value, should not get the default
- self.assertEqual(obj.get('foo', 2), 1)
- # Foo has value, should return the value without error
- self.assertEqual(obj.get('foo'), 1)
- # Bar is not loaded, so we should get the default
- self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
- # Bar without a default should lazy-load
- self.assertEqual(obj.get('bar'), 'loaded!')
- # Bar now has a default, but loaded value should be returned
- self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
- # Invalid attribute should raise AttributeError
- self.assertRaises(AttributeError, obj.get, 'nothing')
- # ...even with a default
- self.assertRaises(AttributeError, obj.get, 'nothing', 3)
-
- def test_object_inheritance(self):
- base_fields = base.NovaPersistentObject.fields.keys()
- myobj_fields = ['foo', 'bar', 'missing',
- 'readonly', 'rel_object'] + base_fields
- myobj3_fields = ['new_field']
- self.assertTrue(issubclass(TestSubclassedObject, MyObj))
- self.assertEqual(len(myobj_fields), len(MyObj.fields))
- self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
- self.assertEqual(len(myobj_fields) + len(myobj3_fields),
- len(TestSubclassedObject.fields))
- self.assertEqual(set(myobj_fields) | set(myobj3_fields),
- set(TestSubclassedObject.fields.keys()))
-
- def test_get_changes(self):
- obj = MyObj()
- self.assertEqual({}, obj.obj_get_changes())
- obj.foo = 123
- self.assertEqual({'foo': 123}, obj.obj_get_changes())
- obj.bar = 'test'
- self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
- obj.obj_reset_changes()
- self.assertEqual({}, obj.obj_get_changes())
-
- def test_obj_fields(self):
- class TestObj(base.NovaObject):
- fields = {'foo': fields.Field(fields.Integer())}
- obj_extra_fields = ['bar']
-
- @property
- def bar(self):
- return 'this is bar'
-
- obj = TestObj()
- self.assertEqual(['foo', 'bar'], obj.obj_fields)
-
- def test_obj_constructor(self):
- obj = MyObj(context=self.context, foo=123, bar='abc')
- self.assertEqual(123, obj.foo)
- self.assertEqual('abc', obj.bar)
- self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
-
- def test_obj_read_only(self):
- obj = MyObj(context=self.context, foo=123, bar='abc')
- obj.readonly = 1
- self.assertRaises(exception.ReadOnlyFieldError, setattr,
- obj, 'readonly', 2)
-
- def test_obj_repr(self):
- obj = MyObj(foo=123)
- self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
- 'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,'
- 'rel_object=<?>,updated_at=<?>)', repr(obj))
-
-
-class TestObject(_LocalTest, _TestObject):
- pass
-
-
-class TestRemoteObject(_RemoteTest, _TestObject):
- def test_major_version_mismatch(self):
- MyObj2.VERSION = '2.0'
- self.assertRaises(exception.IncompatibleObjectVersion,
- MyObj2.query, self.context)
-
- def test_minor_version_greater(self):
- MyObj2.VERSION = '1.7'
- self.assertRaises(exception.IncompatibleObjectVersion,
- MyObj2.query, self.context)
-
- def test_minor_version_less(self):
- MyObj2.VERSION = '1.2'
- obj = MyObj2.query(self.context)
- self.assertEqual(obj.bar, 'bar')
- self.assertRemotes()
-
- def test_compat(self):
- MyObj2.VERSION = '1.1'
- obj = MyObj2.query(self.context)
- self.assertEqual('oldbar', obj.bar)
-
-
-class TestObjectListBase(test.TestCase):
- def test_list_like_operations(self):
- class MyElement(base.NovaObject):
- fields = {'foo': fields.IntegerField()}
-
- def __init__(self, foo):
- super(MyElement, self).__init__()
- self.foo = foo
-
- class Foo(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('MyElement')}
-
- objlist = Foo(context='foo',
- objects=[MyElement(1), MyElement(2), MyElement(3)])
- self.assertEqual(list(objlist), objlist.objects)
- self.assertEqual(len(objlist), 3)
- self.assertIn(objlist.objects[0], objlist)
- self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
- self.assertEqual(objlist[:1]._context, 'foo')
- self.assertEqual(objlist[2], objlist.objects[2])
- self.assertEqual(objlist.count(objlist.objects[0]), 1)
- self.assertEqual(objlist.index(objlist.objects[1]), 1)
- objlist.sort(key=lambda x: x.foo, reverse=True)
- self.assertEqual([3, 2, 1],
- [x.foo for x in objlist])
-
- def test_serialization(self):
- class Foo(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('Bar')}
-
- class Bar(base.NovaObject):
- fields = {'foo': fields.Field(fields.String())}
-
- obj = Foo(objects=[])
- for i in 'abc':
- bar = Bar(foo=i)
- obj.objects.append(bar)
-
- obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
- self.assertFalse(obj is obj2)
- self.assertEqual([x.foo for x in obj],
- [y.foo for y in obj2])
-
- def _test_object_list_version_mappings(self, list_obj_class):
- # Figure out what sort of object this list is for
- list_field = list_obj_class.fields['objects']
- item_obj_field = list_field._type._element_type
- item_obj_name = item_obj_field._type._obj_name
-
- # Look through all object classes of this type and make sure that
- # the versions we find are covered by the parent list class
- for item_class in base.NovaObject._obj_classes[item_obj_name]:
- self.assertIn(
- item_class.VERSION,
- list_obj_class.child_versions.values(),
- 'Version mapping is incomplete for %s' % (
- list_obj_class.__name__))
-
- def test_object_version_mappings(self):
- # Find all object list classes and make sure that they at least handle
- # all the current object versions
- for obj_classes in base.NovaObject._obj_classes.values():
- for obj_class in obj_classes:
- if issubclass(obj_class, base.ObjectListBase):
- self._test_object_list_version_mappings(obj_class)
-
- def test_list_changes(self):
- class Foo(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('Bar')}
-
- class Bar(base.NovaObject):
- fields = {'foo': fields.StringField()}
-
- obj = Foo(objects=[])
- self.assertEqual(set(['objects']), obj.obj_what_changed())
- obj.objects.append(Bar(foo='test'))
- self.assertEqual(set(['objects']), obj.obj_what_changed())
- obj.obj_reset_changes()
- # This should still look dirty because the child is dirty
- self.assertEqual(set(['objects']), obj.obj_what_changed())
- obj.objects[0].obj_reset_changes()
- # This should now look clean because the child is clean
- self.assertEqual(set(), obj.obj_what_changed())
-
- def test_initialize_objects(self):
- class Foo(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('Bar')}
-
- class Bar(base.NovaObject):
- fields = {'foo': fields.StringField()}
-
- obj = Foo()
- self.assertEqual([], obj.objects)
- self.assertEqual(set(), obj.obj_what_changed())
-
- def test_obj_repr(self):
- class Foo(base.ObjectListBase, base.NovaObject):
- fields = {'objects': fields.ListOfObjectsField('Bar')}
-
- class Bar(base.NovaObject):
- fields = {'uuid': fields.StringField()}
-
- obj = Foo(objects=[Bar(uuid='fake-uuid')])
- self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj))
-
-
-class TestObjectSerializer(_BaseTestCase):
- def test_serialize_entity_primitive(self):
- ser = base.NovaObjectSerializer()
- for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
- self.assertEqual(thing, ser.serialize_entity(None, thing))
-
- def test_deserialize_entity_primitive(self):
- ser = base.NovaObjectSerializer()
- for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
- self.assertEqual(thing, ser.deserialize_entity(None, thing))
-
- def test_deserialize_entity_newer_version(self):
- ser = base.NovaObjectSerializer()
- ser._conductor = mock.Mock()
- ser._conductor.object_backport.return_value = 'backported'
- obj = MyObj()
- obj.VERSION = '1.25'
- primitive = obj.obj_to_primitive()
- result = ser.deserialize_entity(self.context, primitive)
- self.assertEqual('backported', result)
- ser._conductor.object_backport.assert_called_with(self.context,
- primitive,
- '1.6')
-
- def test_object_serialization(self):
- ser = base.NovaObjectSerializer()
- obj = MyObj()
- primitive = ser.serialize_entity(self.context, obj)
- self.assertIn('nova_object.name', primitive)
- obj2 = ser.deserialize_entity(self.context, primitive)
- self.assertIsInstance(obj2, MyObj)
- self.assertEqual(self.context, obj2._context)
-
- def test_object_serialization_iterables(self):
- ser = base.NovaObjectSerializer()
- obj = MyObj()
- for iterable in (list, tuple, set):
- thing = iterable([obj])
- primitive = ser.serialize_entity(self.context, thing)
- self.assertEqual(1, len(primitive))
- for item in primitive:
- self.assertNotIsInstance(item, base.NovaObject)
- thing2 = ser.deserialize_entity(self.context, primitive)
- self.assertEqual(1, len(thing2))
- for item in thing2:
- self.assertIsInstance(item, MyObj)
- # dict case
- thing = {'key': obj}
- primitive = ser.serialize_entity(self.context, thing)
- self.assertEqual(1, len(primitive))
- for item in primitive.itervalues():
- self.assertNotIsInstance(item, base.NovaObject)
- thing2 = ser.deserialize_entity(self.context, primitive)
- self.assertEqual(1, len(thing2))
- for item in thing2.itervalues():
- self.assertIsInstance(item, MyObj)
-
- # object-action updates dict case
- thing = {'foo': obj.obj_to_primitive()}
- primitive = ser.serialize_entity(self.context, thing)
- self.assertEqual(thing, primitive)
- thing2 = ser.deserialize_entity(self.context, thing)
- self.assertIsInstance(thing2['foo'], base.NovaObject)
-
-
-# NOTE(danms): The hashes in this list should only be changed if
-# they come with a corresponding version bump in the affected
-# objects
-object_data = {
- 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d',
- 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25',
- 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5',
- 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a',
- 'BandwidthUsage': '1.1-bdab751673947f0ac7de108540a1a8ce',
- 'BandwidthUsageList': '1.1-76898106a9db393cd5f42c557389c507',
- 'BlockDeviceMapping': '1.4-9968ffe513e7672484b0f528b034cd0f',
- 'BlockDeviceMappingList': '1.5-83767968de6e91e9705bddaae02bc649',
- 'ComputeNode': '1.6-d2ea9b8f4a6e95ff6a683266eebddbff',
- 'ComputeNodeList': '1.6-205aa2ea08d49f6ce87df1fcd2407b4e',
- 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
- 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4',
- 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99',
- 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836',
- 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143',
- 'FixedIP': '1.6-2472964d39e50da67202109eb85cd173',
- 'FixedIPList': '1.6-f2f740de66bc2d90627004bd311690ad',
- 'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4',
- 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721',
- 'FloatingIP': '1.6-27eb68b7c9c620dd5f0561b5a3be0e82',
- 'FloatingIPList': '1.7-f376f63ed99243f9d90841b7f6732bbf',
- 'HVSpec': '1.0-c4d8377cc4fe519930e60c1d8265a142',
- 'Instance': '1.16-b00c09fb92ae80b393943f56e84abd9c',
- 'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663',
- 'InstanceActionEvent': '1.1-42dbdba74bd06e0619ca75cd3397cd1b',
- 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e',
- 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266',
- 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7',
- 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e',
- 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d',
- 'InstanceGroup': '1.9-95ece99f092e8f4f88327cdbb44162c9',
- 'InstanceGroupList': '1.6-c6b78f3c9d9080d33c08667e80589817',
- 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f',
- 'InstanceList': '1.10-03dd7839cd11cff75c3661c9e4227900',
- 'InstanceNUMACell': '1.1-8d2a13c8360cc9ea1b68c9c6c4476857',
- 'InstanceNUMATopology': '1.1-86b95d263c4c68411d44c6741b8d2bb0',
- 'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
- 'InstancePCIRequests': '1.1-bc7c6684d8579ee49d6a3b8aef756918',
- 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a',
- 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8',
- 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed',
- 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353',
- 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93',
- 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
- 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e',
- 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e',
- 'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
- 'NetworkRequestList': '1.1-beeab521ac9450f1f5ef4eaa945a783c',
- 'PciDevice': '1.2-29e35c3199f3b98ce66e5d1212612818',
- 'PciDeviceList': '1.1-2896df4f5b06579e5f35adba5fcae9db',
- 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418',
- 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f',
- 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2',
- 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b',
- 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f',
- 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576',
- 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c',
- 'Service': '1.5-82bbfd46a744a9c89bc44b47a1b81683',
- 'ServiceList': '1.3-4a1a5822dea268d0d7f892f5106bb2e1',
- 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd',
- 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2',
- 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6',
-}
-
-
-object_relationships = {
- 'BlockDeviceMapping': {'Instance': '1.16'},
- 'FixedIP': {'Instance': '1.16', 'Network': '1.2',
- 'VirtualInterface': '1.0',
- 'FloatingIPList': '1.7'},
- 'FloatingIP': {'FixedIP': '1.6'},
- 'Instance': {'InstanceFault': '1.2',
- 'InstanceInfoCache': '1.5',
- 'InstanceNUMATopology': '1.1',
- 'PciDeviceList': '1.1',
- 'SecurityGroupList': '1.0',
- 'InstancePCIRequests': '1.1'},
- 'MyObj': {'MyOwnedObject': '1.0'},
- 'SecurityGroupRule': {'SecurityGroup': '1.1'},
- 'Service': {'ComputeNode': '1.6'},
- 'TestSubclassedObject': {'MyOwnedObject': '1.0'}
-}
-
-
-class TestObjectVersions(test.TestCase):
- def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
- """Follow a chain of remotable things down to the original function."""
- if isinstance(thing, classmethod):
- return self._find_remotable_method(cls, thing.__get__(None, cls))
- elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
- return self._find_remotable_method(cls, thing.original_fn,
- parent_was_remotable=True)
- elif parent_was_remotable:
- # We must be the first non-remotable thing underneath a stack of
- # remotable things (i.e. the actual implementation method)
- return thing
- else:
- # This means the top-level thing never hit a remotable layer
- return None
-
- def _get_fingerprint(self, obj_name):
- obj_class = base.NovaObject._obj_classes[obj_name][0]
- fields = obj_class.fields.items()
- fields.sort()
- methods = []
- for name in dir(obj_class):
- thing = getattr(obj_class, name)
- if inspect.ismethod(thing) or isinstance(thing, classmethod):
- method = self._find_remotable_method(obj_class, thing)
- if method:
- methods.append((name, inspect.getargspec(method)))
- methods.sort()
- # NOTE(danms): Things that need a version bump are any fields
- # and their types, or the signatures of any remotable methods.
- # Of course, these are just the mechanical changes we can detect,
- # but many other things may require a version bump (method behavior
- # and return value changes, for example).
- if hasattr(obj_class, 'child_versions'):
- relevant_data = (fields, methods, obj_class.child_versions)
- else:
- relevant_data = (fields, methods)
- fingerprint = '%s-%s' % (obj_class.VERSION,
- hashlib.md5(str(relevant_data)).hexdigest())
- return fingerprint
-
- def test_versions(self):
- fingerprints = {}
- for obj_name in base.NovaObject._obj_classes:
- fingerprints[obj_name] = self._get_fingerprint(obj_name)
-
- if os.getenv('GENERATE_HASHES'):
- file('object_hashes.txt', 'w').write(
- pprint.pformat(fingerprints))
- raise test.TestingException(
- 'Generated hashes in object_hashes.txt')
-
- stored = set(object_data.items())
- computed = set(fingerprints.items())
- changed = stored.symmetric_difference(computed)
- expected = {}
- actual = {}
- for name, hash in changed:
- expected[name] = object_data.get(name)
- actual[name] = fingerprints.get(name)
-
- self.assertEqual(expected, actual,
- 'Some objects have changed; please make sure the '
- 'versions have been bumped, and then update their '
- 'hashes here.')
-
- def _build_tree(self, tree, obj_class):
- obj_name = obj_class.obj_name()
- if obj_name in tree:
- return
-
- for name, field in obj_class.fields.items():
- if isinstance(field._type, fields.Object):
- sub_obj_name = field._type._obj_name
- sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
- self._build_tree(tree, sub_obj_class)
- tree.setdefault(obj_name, {})
- tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
-
- def test_relationships(self):
- tree = {}
- for obj_name in base.NovaObject._obj_classes.keys():
- self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
-
- stored = set([(x, str(y)) for x, y in object_relationships.items()])
- computed = set([(x, str(y)) for x, y in tree.items()])
- changed = stored.symmetric_difference(computed)
- expected = {}
- actual = {}
- for name, deps in changed:
- expected[name] = object_relationships.get(name)
- actual[name] = tree.get(name)
- self.assertEqual(expected, actual,
- 'Some objects have changed dependencies. '
- 'Please make sure to bump the versions of '
- 'parent objects and provide a rule in their '
- 'obj_make_compatible() routines to backlevel '
- 'the child object.')
-
- def test_obj_make_compatible(self):
- # Iterate all object classes and verify that we can run
- # obj_make_compatible with every older version than current.
- # This doesn't actually test the data conversions, but it at least
- # makes sure the method doesn't blow up on something basic like
- # expecting the wrong version format.
- for obj_name in base.NovaObject._obj_classes:
- obj_class = base.NovaObject._obj_classes[obj_name][0]
- version = utils.convert_version_to_tuple(obj_class.VERSION)
- for n in range(version[1]):
- test_version = '%d.%d' % (version[0], n)
- LOG.info('testing obj: %s version: %s' %
- (obj_name, test_version))
- obj_class().obj_to_primitive(target_version=test_version)
diff --git a/nova/tests/objects/test_pci_device.py b/nova/tests/objects/test_pci_device.py
deleted file mode 100644
index 521753a1b0..0000000000
--- a/nova/tests/objects/test_pci_device.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from nova import context
-from nova import db
-from nova.objects import instance
-from nova.objects import pci_device
-from nova.tests.objects import test_objects
-
-dev_dict = {
- 'compute_node_id': 1,
- 'address': 'a',
- 'product_id': 'p',
- 'vendor_id': 'v',
- 'status': 'available'}
-
-
-fake_db_dev = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'compute_node_id': 1,
- 'address': 'a',
- 'vendor_id': 'v',
- 'product_id': 'p',
- 'dev_type': 't',
- 'status': 'available',
- 'dev_id': 'i',
- 'label': 'l',
- 'instance_uuid': None,
- 'extra_info': '{}',
- 'request_id': None,
- }
-
-
-fake_db_dev_1 = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 2,
- 'compute_node_id': 1,
- 'address': 'a1',
- 'vendor_id': 'v1',
- 'product_id': 'p1',
- 'dev_type': 't',
- 'status': 'available',
- 'dev_id': 'i',
- 'label': 'l',
- 'instance_uuid': None,
- 'extra_info': '{}',
- 'request_id': None,
- }
-
-
-class _TestPciDeviceObject(object):
- def _create_fake_instance(self):
- self.inst = instance.Instance()
- self.inst.uuid = 'fake-inst-uuid'
- self.inst.pci_devices = pci_device.PciDeviceList()
-
- def _create_fake_pci_device(self):
- ctxt = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
- db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
- self.mox.ReplayAll()
- self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
-
- def test_create_pci_device(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
- self.assertEqual(self.pci_device.product_id, 'p')
- self.assertEqual(self.pci_device.obj_what_changed(),
- set(['compute_node_id', 'product_id', 'vendor_id',
- 'status', 'address', 'extra_info']))
-
- def test_pci_device_extra_info(self):
- self.dev_dict = copy.copy(dev_dict)
- self.dev_dict['k1'] = 'v1'
- self.dev_dict['k2'] = 'v2'
- self.pci_device = pci_device.PciDevice.create(self.dev_dict)
- extra_value = self.pci_device.extra_info
- self.assertEqual(extra_value.get('k1'), 'v1')
- self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
- self.assertEqual(self.pci_device.obj_what_changed(),
- set(['compute_node_id', 'address', 'product_id',
- 'vendor_id', 'status', 'extra_info']))
-
- def test_update_device(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
- self.pci_device.obj_reset_changes()
- changes = {'product_id': 'p2', 'vendor_id': 'v2'}
- self.pci_device.update_device(changes)
- self.assertEqual(self.pci_device.vendor_id, 'v2')
- self.assertEqual(self.pci_device.obj_what_changed(),
- set(['vendor_id', 'product_id']))
-
- def test_update_device_same_value(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
- self.pci_device.obj_reset_changes()
- changes = {'product_id': 'p', 'vendor_id': 'v2'}
- self.pci_device.update_device(changes)
- self.assertEqual(self.pci_device.product_id, 'p')
- self.assertEqual(self.pci_device.vendor_id, 'v2')
- self.assertEqual(self.pci_device.obj_what_changed(),
- set(['vendor_id', 'product_id']))
-
- def test_get_by_dev_addr(self):
- ctxt = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
- db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
- self.mox.ReplayAll()
- self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
- self.assertEqual(self.pci_device.product_id, 'p')
- self.assertEqual(self.pci_device.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_get_by_dev_id(self):
- ctxt = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
- db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
- self.mox.ReplayAll()
- self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
- self.assertEqual(self.pci_device.product_id, 'p')
- self.assertEqual(self.pci_device.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_save(self):
- ctxt = context.get_admin_context()
- self._create_fake_pci_device()
- return_dev = dict(fake_db_dev, status='available',
- instance_uuid='fake-uuid-3')
- self.pci_device.status = 'allocated'
- self.pci_device.instance_uuid = 'fake-uuid-2'
- expected_updates = dict(status='allocated',
- instance_uuid='fake-uuid-2')
- self.mox.StubOutWithMock(db, 'pci_device_update')
- db.pci_device_update(ctxt, 1, 'a',
- expected_updates).AndReturn(return_dev)
- self.mox.ReplayAll()
- self.pci_device.save(ctxt)
- self.assertEqual(self.pci_device.status, 'available')
- self.assertEqual(self.pci_device.instance_uuid,
- 'fake-uuid-3')
- self.assertRemotes()
-
- def test_save_no_extra_info(self):
- return_dev = dict(fake_db_dev, status='available',
- instance_uuid='fake-uuid-3')
-
- def _fake_update(ctxt, node_id, addr, updates):
- self.extra_info = updates.get('extra_info')
- return return_dev
-
- ctxt = context.get_admin_context()
- self.stubs.Set(db, 'pci_device_update', _fake_update)
- self.pci_device = pci_device.PciDevice.create(dev_dict)
- self.pci_device.save(ctxt)
- self.assertEqual(self.extra_info, '{}')
-
- def test_save_removed(self):
- ctxt = context.get_admin_context()
- self._create_fake_pci_device()
- self.pci_device.status = 'removed'
- self.mox.StubOutWithMock(db, 'pci_device_destroy')
- db.pci_device_destroy(ctxt, 1, 'a')
- self.mox.ReplayAll()
- self.pci_device.save(ctxt)
- self.assertEqual(self.pci_device.status, 'deleted')
- self.assertRemotes()
-
- def test_save_deleted(self):
- def _fake_destroy(ctxt, node_id, addr):
- self.called = True
-
- def _fake_update(ctxt, node_id, addr, updates):
- self.called = True
- ctxt = context.get_admin_context()
- self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
- self.stubs.Set(db, 'pci_device_update', _fake_update)
- self._create_fake_pci_device()
- self.pci_device.status = 'deleted'
- self.called = False
- self.pci_device.save(ctxt)
- self.assertEqual(self.called, False)
-
-
-class TestPciDeviceObject(test_objects._LocalTest,
- _TestPciDeviceObject):
- pass
-
-
-class TestPciDeviceObjectRemote(test_objects._RemoteTest,
- _TestPciDeviceObject):
- pass
-
-
-fake_pci_devs = [fake_db_dev, fake_db_dev_1]
-
-
-class _TestPciDeviceListObject(object):
- def test_get_by_compute_node(self):
- ctxt = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
- db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
- self.mox.ReplayAll()
- devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
- for i in range(len(fake_pci_devs)):
- self.assertIsInstance(devs[i], pci_device.PciDevice)
- self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
- self.assertRemotes()
-
- def test_get_by_instance_uuid(self):
- ctxt = context.get_admin_context()
- fake_db_1 = dict(fake_db_dev, address='a1',
- status='allocated', instance_uuid='1')
- fake_db_2 = dict(fake_db_dev, address='a2',
- status='allocated', instance_uuid='1')
- self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
- db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
- [fake_db_1, fake_db_2])
- self.mox.ReplayAll()
- devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
- self.assertEqual(len(devs), 2)
- for i in range(len(fake_pci_devs)):
- self.assertIsInstance(devs[i], pci_device.PciDevice)
- self.assertEqual(devs[0].vendor_id, 'v')
- self.assertEqual(devs[1].vendor_id, 'v')
- self.assertRemotes()
-
-
-class TestPciDeviceListObject(test_objects._LocalTest,
- _TestPciDeviceListObject):
- pass
-
-
-class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
- _TestPciDeviceListObject):
- pass
diff --git a/nova/tests/objects/test_quotas.py b/nova/tests/objects/test_quotas.py
deleted file mode 100644
index c2a9892a7d..0000000000
--- a/nova/tests/objects/test_quotas.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright 2013 Rackspace Hosting.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import context
-from nova.objects import quotas as quotas_obj
-from nova import quota
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.objects import test_objects
-
-
-QUOTAS = quota.QUOTAS
-
-
-class TestQuotasModule(test.NoDBTestCase):
- def setUp(self):
- super(TestQuotasModule, self).setUp()
- self.context = context.RequestContext('fake_user1', 'fake_proj1')
- self.instance = fake_instance.fake_db_instance(
- project_id='fake_proj2', user_id='fake_user2')
-
- def test_ids_from_instance_non_admin(self):
- project_id, user_id = quotas_obj.ids_from_instance(
- self.context, self.instance)
- self.assertEqual('fake_user2', user_id)
- self.assertEqual('fake_proj1', project_id)
-
- def test_ids_from_instance_admin(self):
- project_id, user_id = quotas_obj.ids_from_instance(
- self.context.elevated(), self.instance)
- self.assertEqual('fake_user2', user_id)
- self.assertEqual('fake_proj2', project_id)
-
-
-class _TestQuotasObject(object):
- def setUp(self):
- super(_TestQuotasObject, self).setUp()
- self.context = context.RequestContext('fake_user1', 'fake_proj1')
- self.instance = fake_instance.fake_db_instance(
- project_id='fake_proj2', user_id='fake_user2')
-
- def test_from_reservations(self):
- fake_reservations = ['1', '2']
- quotas = quotas_obj.Quotas.from_reservations(
- self.context, fake_reservations)
- self.assertEqual(self.context, quotas._context)
- self.assertEqual(fake_reservations, quotas.reservations)
- self.assertIsNone(quotas.project_id)
- self.assertIsNone(quotas.user_id)
-
- def test_from_reservations_bogus(self):
- fake_reservations = [_TestQuotasObject, _TestQuotasObject]
- self.assertRaises(ValueError,
- quotas_obj.Quotas.from_reservations,
- self.context, fake_reservations)
-
- def test_from_reservations_instance(self):
- fake_reservations = ['1', '2']
- quotas = quotas_obj.Quotas.from_reservations(
- self.context, fake_reservations,
- instance=self.instance)
- self.assertEqual(self.context, quotas._context)
- self.assertEqual(fake_reservations, quotas.reservations)
- self.assertEqual('fake_proj1', quotas.project_id)
- self.assertEqual('fake_user2', quotas.user_id)
-
- def test_from_reservations_instance_admin(self):
- fake_reservations = ['1', '2']
- elevated = self.context.elevated()
- quotas = quotas_obj.Quotas.from_reservations(
- elevated, fake_reservations,
- instance=self.instance)
- self.assertEqual(elevated, quotas._context)
- self.assertEqual(fake_reservations, quotas.reservations)
- self.assertEqual('fake_proj2', quotas.project_id)
- self.assertEqual('fake_user2', quotas.user_id)
-
- def test_reserve(self):
- fake_reservations = ['1', '2']
- quotas = quotas_obj.Quotas()
-
- self.mox.StubOutWithMock(QUOTAS, 'reserve')
- QUOTAS.reserve(self.context, expire='expire',
- project_id='project_id', user_id='user_id',
- moo='cow').AndReturn(fake_reservations)
-
- self.mox.ReplayAll()
- quotas.reserve(self.context, expire='expire',
- project_id='project_id', user_id='user_id',
- moo='cow')
- self.assertEqual(self.context, quotas._context)
- self.assertEqual(fake_reservations, quotas.reservations)
- self.assertEqual('project_id', quotas.project_id)
- self.assertEqual('user_id', quotas.user_id)
-
- def test_commit(self):
- fake_reservations = ['1', '2']
- quotas = quotas_obj.Quotas.from_reservations(
- self.context, fake_reservations)
-
- self.mox.StubOutWithMock(QUOTAS, 'commit')
- QUOTAS.commit(self.context, fake_reservations,
- project_id=None, user_id=None)
-
- self.mox.ReplayAll()
- quotas.commit()
- self.assertIsNone(quotas.reservations)
-
- def test_commit_none_reservations(self):
- quotas = quotas_obj.Quotas.from_reservations(self.context, None)
- self.mox.StubOutWithMock(QUOTAS, 'commit')
- self.mox.ReplayAll()
- quotas.commit()
-
- def test_rollback(self):
- fake_reservations = ['1', '2']
- quotas = quotas_obj.Quotas.from_reservations(
- self.context, fake_reservations)
-
- self.mox.StubOutWithMock(QUOTAS, 'rollback')
- QUOTAS.rollback(self.context, fake_reservations,
- project_id=None, user_id=None)
-
- self.mox.ReplayAll()
- quotas.rollback()
- self.assertIsNone(quotas.reservations)
-
- def test_rollback_none_reservations(self):
- quotas = quotas_obj.Quotas.from_reservations(self.context, None)
- self.mox.StubOutWithMock(QUOTAS, 'rollback')
- self.mox.ReplayAll()
- quotas.rollback()
-
- @mock.patch('nova.db.quota_create')
- def test_create_limit(self, mock_create):
- quotas_obj.Quotas.create_limit(self.context, 'fake-project',
- 'foo', 10, user_id='user')
- mock_create.assert_called_once_with(self.context, 'fake-project',
- 'foo', 10, user_id='user')
-
- @mock.patch('nova.db.quota_update')
- def test_update_limit(self, mock_update):
- quotas_obj.Quotas.update_limit(self.context, 'fake-project',
- 'foo', 10, user_id='user')
- mock_update.assert_called_once_with(self.context, 'fake-project',
- 'foo', 10, user_id='user')
-
-
-class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest):
- pass
-
-
-class TestRemoteQuotasObject(_TestQuotasObject, test_objects._RemoteTest):
- pass
diff --git a/nova/tests/objects/test_security_group.py b/nova/tests/objects/test_security_group.py
deleted file mode 100644
index e957017db3..0000000000
--- a/nova/tests/objects/test_security_group.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import db
-from nova.objects import instance
-from nova.objects import security_group
-from nova.tests.objects import test_objects
-
-
-fake_secgroup = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'name': 'fake-name',
- 'description': 'fake-desc',
- 'user_id': 'fake-user',
- 'project_id': 'fake-project',
- }
-
-
-class _TestSecurityGroupObject(object):
- def _fix_deleted(self, db_secgroup):
- # NOTE(danms): Account for the difference in 'deleted'
- return dict(db_secgroup.items(), deleted=False)
-
- def test_get(self):
- self.mox.StubOutWithMock(db, 'security_group_get')
- db.security_group_get(self.context, 1).AndReturn(fake_secgroup)
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup.get(self.context, 1)
- self.assertEqual(self._fix_deleted(fake_secgroup),
- dict(secgroup.items()))
- self.assertEqual(secgroup.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_get_by_name(self):
- self.mox.StubOutWithMock(db, 'security_group_get_by_name')
- db.security_group_get_by_name(self.context, 'fake-project',
- 'fake-name').AndReturn(fake_secgroup)
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup.get_by_name(self.context,
- 'fake-project',
- 'fake-name')
- self.assertEqual(self._fix_deleted(fake_secgroup),
- dict(secgroup.items()))
- self.assertEqual(secgroup.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_in_use(self):
- self.mox.StubOutWithMock(db, 'security_group_in_use')
- db.security_group_in_use(self.context, 123).AndReturn(True)
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup()
- secgroup.id = 123
- self.assertTrue(secgroup.in_use(self.context))
- self.assertRemotes()
-
- def test_save(self):
- self.mox.StubOutWithMock(db, 'security_group_update')
- updated_secgroup = dict(fake_secgroup, project_id='changed')
- db.security_group_update(self.context, 1,
- {'description': 'foobar'}).AndReturn(
- updated_secgroup)
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup._from_db_object(
- self.context, security_group.SecurityGroup(), fake_secgroup)
- secgroup.description = 'foobar'
- secgroup.save(self.context)
- self.assertEqual(self._fix_deleted(updated_secgroup),
- dict(secgroup.items()))
- self.assertEqual(secgroup.obj_what_changed(), set())
- self.assertRemotes()
-
- def test_save_no_changes(self):
- self.mox.StubOutWithMock(db, 'security_group_update')
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup._from_db_object(
- self.context, security_group.SecurityGroup(), fake_secgroup)
- secgroup.save(self.context)
-
- def test_refresh(self):
- updated_secgroup = dict(fake_secgroup, description='changed')
- self.mox.StubOutWithMock(db, 'security_group_get')
- db.security_group_get(self.context, 1).AndReturn(updated_secgroup)
- self.mox.ReplayAll()
- secgroup = security_group.SecurityGroup._from_db_object(
- self.context, security_group.SecurityGroup(), fake_secgroup)
- secgroup.refresh(self.context)
- self.assertEqual(self._fix_deleted(updated_secgroup),
- dict(secgroup.items()))
- self.assertEqual(secgroup.obj_what_changed(), set())
- self.assertRemotes()
-
-
-class TestSecurityGroupObject(test_objects._LocalTest,
- _TestSecurityGroupObject):
- pass
-
-
-class TestSecurityGroupObjectRemote(test_objects._RemoteTest,
- _TestSecurityGroupObject):
- pass
-
-
-fake_secgroups = [
- dict(fake_secgroup, id=1, name='secgroup1'),
- dict(fake_secgroup, id=2, name='secgroup2'),
- ]
-
-
-class _TestSecurityGroupListObject(object):
- def test_get_all(self):
- self.mox.StubOutWithMock(db, 'security_group_get_all')
- db.security_group_get_all(self.context).AndReturn(fake_secgroups)
- self.mox.ReplayAll()
- secgroup_list = security_group.SecurityGroupList.get_all(self.context)
- for i in range(len(fake_secgroups)):
- self.assertIsInstance(secgroup_list[i],
- security_group.SecurityGroup)
- self.assertEqual(fake_secgroups[i]['id'],
- secgroup_list[i]['id'])
- self.assertEqual(secgroup_list[i]._context, self.context)
-
- def test_get_by_project(self):
- self.mox.StubOutWithMock(db, 'security_group_get_by_project')
- db.security_group_get_by_project(self.context,
- 'fake-project').AndReturn(
- fake_secgroups)
- self.mox.ReplayAll()
- secgroup_list = security_group.SecurityGroupList.get_by_project(
- self.context, 'fake-project')
- for i in range(len(fake_secgroups)):
- self.assertIsInstance(secgroup_list[i],
- security_group.SecurityGroup)
- self.assertEqual(fake_secgroups[i]['id'],
- secgroup_list[i]['id'])
-
- def test_get_by_instance(self):
- inst = instance.Instance()
- inst.uuid = 'fake-inst-uuid'
- self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
- db.security_group_get_by_instance(self.context,
- 'fake-inst-uuid').AndReturn(
- fake_secgroups)
- self.mox.ReplayAll()
- secgroup_list = security_group.SecurityGroupList.get_by_instance(
- self.context, inst)
- for i in range(len(fake_secgroups)):
- self.assertIsInstance(secgroup_list[i],
- security_group.SecurityGroup)
- self.assertEqual(fake_secgroups[i]['id'],
- secgroup_list[i]['id'])
-
-
-class TestSecurityGroupListObject(test_objects._LocalTest,
- _TestSecurityGroupListObject):
- pass
-
-
-class TestSecurityGroupListObjectRemote(test_objects._RemoteTest,
- _TestSecurityGroupListObject):
- pass
diff --git a/nova/tests/objects/test_security_group_rule.py b/nova/tests/objects/test_security_group_rule.py
deleted file mode 100644
index e2c5294403..0000000000
--- a/nova/tests/objects/test_security_group_rule.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova import exception
-from nova import objects
-from nova.tests.objects import test_objects
-from nova.tests.objects import test_security_group
-
-fake_rule = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 1,
- 'protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- }
-
-
-class _TestSecurityGroupRuleObject(object):
- def test_get_by_id(self):
- with mock.patch.object(db, 'security_group_rule_get') as sgrg:
- sgrg.return_value = fake_rule
- rule = objects.SecurityGroupRule.get_by_id(
- self.context, 1)
- for field in fake_rule:
- if field == 'cidr':
- self.assertEqual(fake_rule[field], str(rule[field]))
- else:
- self.assertEqual(fake_rule[field], rule[field])
- sgrg.assert_called_with(self.context, 1)
-
- def test_get_by_security_group(self):
- secgroup = objects.SecurityGroup()
- secgroup.id = 123
- rule = dict(fake_rule)
- rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123)
- stupid_method = 'security_group_rule_get_by_security_group'
- with mock.patch.object(db, stupid_method) as sgrgbsg:
- sgrgbsg.return_value = [rule]
- rules = (objects.SecurityGroupRuleList.
- get_by_security_group(self.context, secgroup))
- self.assertEqual(1, len(rules))
- self.assertEqual(123, rules[0].grantee_group.id)
-
- @mock.patch.object(db, 'security_group_rule_create',
- return_value=fake_rule)
- def test_create(self, db_mock):
- rule = objects.SecurityGroupRule()
- rule.protocol = 'tcp'
- secgroup = objects.SecurityGroup()
- secgroup.id = 123
- parentgroup = objects.SecurityGroup()
- parentgroup.id = 223
- rule.grantee_group = secgroup
- rule.parent_group = parentgroup
- rule.create(self.context)
- updates = db_mock.call_args[0][1]
- self.assertEqual(fake_rule['id'], rule.id)
- self.assertEqual(updates['group_id'], rule.grantee_group.id)
- self.assertEqual(updates['parent_group_id'], rule.parent_group.id)
-
- @mock.patch.object(db, 'security_group_rule_create',
- return_value=fake_rule)
- def test_set_id_failure(self, db_mock):
- rule = objects.SecurityGroupRule()
- rule.create(self.context)
- self.assertRaises(exception.ReadOnlyFieldError, setattr,
- rule, 'id', 124)
-
-
-class TestSecurityGroupRuleObject(test_objects._LocalTest,
- _TestSecurityGroupRuleObject):
- pass
-
-
-class TestSecurityGroupRuleObjectRemote(test_objects._RemoteTest,
- _TestSecurityGroupRuleObject):
- pass
diff --git a/nova/tests/objects/test_service.py b/nova/tests/objects/test_service.py
deleted file mode 100644
index 7494dc83bf..0000000000
--- a/nova/tests/objects/test_service.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.utils import timeutils
-
-from nova import db
-from nova import exception
-from nova.objects import aggregate
-from nova.objects import service
-from nova.tests.objects import test_compute_node
-from nova.tests.objects import test_objects
-
-NOW = timeutils.utcnow().replace(microsecond=0)
-fake_service = {
- 'created_at': NOW,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': False,
- 'id': 123,
- 'host': 'fake-host',
- 'binary': 'fake-service',
- 'topic': 'fake-service-topic',
- 'report_count': 1,
- 'disabled': False,
- 'disabled_reason': None,
- }
-
-OPTIONAL = ['availability_zone', 'compute_node']
-
-
-class _TestServiceObject(object):
- def supported_hv_specs_comparator(self, expected, obj_val):
- obj_val = [inst.to_list() for inst in obj_val]
- self.json_comparator(expected, obj_val)
-
- def comparators(self):
- return {'stats': self.json_comparator,
- 'host_ip': self.str_comparator,
- 'supported_hv_specs': self.supported_hv_specs_comparator}
-
- def subs(self):
- return {'supported_hv_specs': 'supported_instances'}
-
- def _test_query(self, db_method, obj_method, *args, **kwargs):
- self.mox.StubOutWithMock(db, db_method)
- getattr(db, db_method)(self.context, *args, **kwargs).AndReturn(
- fake_service)
- self.mox.ReplayAll()
- obj = getattr(service.Service, obj_method)(self.context, *args,
- **kwargs)
- self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
-
- def test_get_by_id(self):
- self._test_query('service_get', 'get_by_id', 123)
-
- def test_get_by_host_and_topic(self):
- self._test_query('service_get_by_host_and_topic',
- 'get_by_host_and_topic', 'fake-host', 'fake-topic')
-
- def test_get_by_compute_host(self):
- self._test_query('service_get_by_compute_host', 'get_by_compute_host',
- 'fake-host')
-
- def test_get_by_args(self):
- self._test_query('service_get_by_args', 'get_by_args', 'fake-host',
- 'fake-service')
-
- def test_with_compute_node(self):
- self.mox.StubOutWithMock(db, 'service_get')
- self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
- _fake_service = dict(
- fake_service, compute_node=[test_compute_node.fake_compute_node])
- db.service_get(self.context, 123).AndReturn(_fake_service)
- self.mox.ReplayAll()
- service_obj = service.Service.get_by_id(self.context, 123)
- self.assertTrue(service_obj.obj_attr_is_set('compute_node'))
- self.compare_obj(service_obj.compute_node,
- test_compute_node.fake_compute_node,
- subs=self.subs(),
- allow_missing=OPTIONAL,
- comparators=self.comparators())
-
- def test_create(self):
- self.mox.StubOutWithMock(db, 'service_create')
- db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
- fake_service)
- self.mox.ReplayAll()
- service_obj = service.Service()
- service_obj.host = 'fake-host'
- service_obj.create(self.context)
- self.assertEqual(fake_service['id'], service_obj.id)
-
- def test_recreate_fails(self):
- self.mox.StubOutWithMock(db, 'service_create')
- db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
- fake_service)
- self.mox.ReplayAll()
- service_obj = service.Service()
- service_obj.host = 'fake-host'
- service_obj.create(self.context)
- self.assertRaises(exception.ObjectActionError, service_obj.create,
- self.context)
-
- def test_save(self):
- self.mox.StubOutWithMock(db, 'service_update')
- db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn(
- fake_service)
- self.mox.ReplayAll()
- service_obj = service.Service()
- service_obj.id = 123
- service_obj.host = 'fake-host'
- service_obj.save(self.context)
-
- @mock.patch.object(db, 'service_create',
- return_value=fake_service)
- def test_set_id_failure(self, db_mock):
- service_obj = service.Service()
- service_obj.create(self.context)
- self.assertRaises(exception.ReadOnlyFieldError, setattr,
- service_obj, 'id', 124)
-
- def _test_destroy(self):
- self.mox.StubOutWithMock(db, 'service_destroy')
- db.service_destroy(self.context, 123)
- self.mox.ReplayAll()
- service_obj = service.Service()
- service_obj.id = 123
- service_obj.destroy(self.context)
-
- def test_destroy(self):
- # The test harness needs db.service_destroy to work,
- # so avoid leaving it broken here after we're done
- orig_service_destroy = db.service_destroy
- try:
- self._test_destroy()
- finally:
- db.service_destroy = orig_service_destroy
-
- def test_get_by_topic(self):
- self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
- db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn(
- [fake_service])
- self.mox.ReplayAll()
- services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
- self.assertEqual(1, len(services))
- self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
-
- def test_get_by_host(self):
- self.mox.StubOutWithMock(db, 'service_get_all_by_host')
- db.service_get_all_by_host(self.context, 'fake-host').AndReturn(
- [fake_service])
- self.mox.ReplayAll()
- services = service.ServiceList.get_by_host(self.context, 'fake-host')
- self.assertEqual(1, len(services))
- self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
-
- def test_get_all(self):
- self.mox.StubOutWithMock(db, 'service_get_all')
- db.service_get_all(self.context, disabled=False).AndReturn(
- [fake_service])
- self.mox.ReplayAll()
- services = service.ServiceList.get_all(self.context, disabled=False)
- self.assertEqual(1, len(services))
- self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
-
- def test_get_all_with_az(self):
- self.mox.StubOutWithMock(db, 'service_get_all')
- self.mox.StubOutWithMock(aggregate.AggregateList,
- 'get_by_metadata_key')
- db.service_get_all(self.context, disabled=None).AndReturn(
- [dict(fake_service, topic='compute')])
- agg = aggregate.Aggregate()
- agg.name = 'foo'
- agg.metadata = {'availability_zone': 'test-az'}
- agg.create(self.context)
- agg.hosts = [fake_service['host']]
- aggregate.AggregateList.get_by_metadata_key(self.context,
- 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
- self.mox.ReplayAll()
- services = service.ServiceList.get_all(self.context, set_zones=True)
- self.assertEqual(1, len(services))
- self.assertEqual('test-az', services[0].availability_zone)
-
- def test_compute_node(self):
- self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
- db.compute_node_get_by_service_id(self.context, 123).AndReturn(
- test_compute_node.fake_compute_node)
- self.mox.ReplayAll()
- service_obj = service.Service()
- service_obj._context = self.context
- service_obj.id = 123
- self.compare_obj(service_obj.compute_node,
- test_compute_node.fake_compute_node,
- subs=self.subs(),
- allow_missing=OPTIONAL,
- comparators=self.comparators())
- # Make sure it doesn't re-fetch this
- service_obj.compute_node
-
- def test_load_when_orphaned(self):
- service_obj = service.Service()
- service_obj.id = 123
- self.assertRaises(exception.OrphanedObjectError,
- getattr, service_obj, 'compute_node')
-
-
-class TestServiceObject(test_objects._LocalTest,
- _TestServiceObject):
- pass
-
-
-class TestRemoteServiceObject(test_objects._RemoteTest,
- _TestServiceObject):
- pass
diff --git a/nova/tests/objects/test_virtual_interface.py b/nova/tests/objects/test_virtual_interface.py
deleted file mode 100644
index 07e60c76f8..0000000000
--- a/nova/tests/objects/test_virtual_interface.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright (C) 2014, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import db
-from nova.objects import virtual_interface as vif_obj
-from nova.tests.objects import test_objects
-
-
-fake_vif = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': 0,
- 'id': 1,
- 'address': '00:00:00:00:00:00',
- 'network_id': 123,
- 'instance_uuid': 'fake-uuid',
- 'uuid': 'fake-uuid-2',
-}
-
-
-class _TestVirtualInterface(object):
- @staticmethod
- def _compare(test, db, obj):
- for field, value in db.items():
- test.assertEqual(db[field], obj[field])
-
- def test_get_by_id(self):
- with mock.patch.object(db, 'virtual_interface_get') as get:
- get.return_value = fake_vif
- vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
- self._compare(self, fake_vif, vif)
-
- def test_get_by_uuid(self):
- with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
- get.return_value = fake_vif
- vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
- 'fake-uuid-2')
- self._compare(self, fake_vif, vif)
-
- def test_get_by_address(self):
- with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
- get.return_value = fake_vif
- vif = vif_obj.VirtualInterface.get_by_address(self.context,
- '00:00:00:00:00:00')
- self._compare(self, fake_vif, vif)
-
- def test_get_by_instance_and_network(self):
- with mock.patch.object(db,
- 'virtual_interface_get_by_instance_and_network') as get:
- get.return_value = fake_vif
- vif = vif_obj.VirtualInterface.get_by_instance_and_network(
- self.context, 'fake-uuid', 123)
- self._compare(self, fake_vif, vif)
-
- def test_create(self):
- vif = vif_obj.VirtualInterface()
- vif.address = '00:00:00:00:00:00'
- vif.network_id = 123
- vif.instance_uuid = 'fake-uuid'
- vif.uuid = 'fake-uuid-2'
-
- with mock.patch.object(db, 'virtual_interface_create') as create:
- create.return_value = fake_vif
- vif.create(self.context)
-
- self.assertEqual(self.context, vif._context)
- vif._context = None
- self._compare(self, fake_vif, vif)
-
- def test_delete_by_instance_uuid(self):
- with mock.patch.object(db,
- 'virtual_interface_delete_by_instance') as delete:
- vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
- 'fake-uuid')
- delete.assert_called_with(self.context, 'fake-uuid')
-
-
-class TestVirtualInterfaceObject(test_objects._LocalTest,
- _TestVirtualInterface):
- pass
-
-
-class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
- _TestVirtualInterface):
- pass
-
-
-class _TestVirtualInterfaceList(object):
- def test_get_all(self):
- with mock.patch.object(db, 'virtual_interface_get_all') as get:
- get.return_value = [fake_vif]
- vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
- self.assertEqual(1, len(vifs))
- _TestVirtualInterface._compare(self, fake_vif, vifs[0])
-
- def test_get_by_instance_uuid(self):
- with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
- get.return_value = [fake_vif]
- vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
- self.context, 'fake-uuid')
- self.assertEqual(1, len(vifs))
- _TestVirtualInterface._compare(self, fake_vif, vifs[0])
-
-
-class TestVirtualInterfaceList(test_objects._LocalTest,
- _TestVirtualInterfaceList):
- pass
-
-
-class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
- _TestVirtualInterfaceList):
- pass
diff --git a/nova/tests/pci/test_manager.py b/nova/tests/pci/test_manager.py
deleted file mode 100644
index e37fd5b067..0000000000
--- a/nova/tests/pci/test_manager.py
+++ /dev/null
@@ -1,364 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova import objects
-from nova.pci import device
-from nova.pci import manager
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.pci import fakes as pci_fakes
-
-
-fake_pci = {
- 'compute_node_id': 1,
- 'address': '0000:00:00.1',
- 'product_id': 'p',
- 'vendor_id': 'v',
- 'request_id': None,
- 'status': 'available'}
-fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
- product_id='p1', vendor_id='v1')
-fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
-
-
-fake_db_dev = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'compute_node_id': 1,
- 'address': '0000:00:00.1',
- 'vendor_id': 'v',
- 'product_id': 'p',
- 'dev_type': 't',
- 'status': 'available',
- 'dev_id': 'i',
- 'label': 'l',
- 'instance_uuid': None,
- 'extra_info': '{}',
- 'request_id': None,
- }
-fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1',
- product_id='p1', id=2,
- address='0000:00:00.2')
-fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3')
-fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2]
-
-
-fake_pci_requests = [
- {'count': 1,
- 'spec': [{'vendor_id': 'v'}]},
- {'count': 1,
- 'spec': [{'vendor_id': 'v1'}]}]
-
-
-class PciDevTrackerTestCase(test.TestCase):
- def _create_fake_instance(self):
- self.inst = objects.Instance()
- self.inst.uuid = 'fake-inst-uuid'
- self.inst.pci_devices = objects.PciDeviceList()
- self.inst.vm_state = vm_states.ACTIVE
- self.inst.task_state = None
-
- def _fake_get_pci_devices(self, ctxt, node_id):
- return fake_db_devs[:]
-
- def _fake_pci_device_update(self, ctxt, node_id, address, value):
- self.update_called += 1
- self.called_values = value
- fake_return = copy.deepcopy(fake_db_dev)
- return fake_return
-
- def _fake_pci_device_destroy(self, ctxt, node_id, address):
- self.destroy_called += 1
-
- def _create_pci_requests_object(self, mock_get, requests):
- pci_reqs = []
- for request in requests:
- pci_req_obj = objects.InstancePCIRequest(count=request['count'],
- spec=request['spec'])
- pci_reqs.append(pci_req_obj)
- mock_get.return_value = objects.InstancePCIRequests(requests=pci_reqs)
-
- def setUp(self):
- super(PciDevTrackerTestCase, self).setUp()
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_get_pci_devices)
- # The fake_pci_whitelist must be called before creating the fake
- # devices
- patcher = pci_fakes.fake_pci_whitelist()
- self.addCleanup(patcher.stop)
- self._create_fake_instance()
- self.tracker = manager.PciDevTracker(1)
-
- def test_pcidev_tracker_create(self):
- self.assertEqual(len(self.tracker.pci_devs), 3)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 3)
- self.assertEqual(self.tracker.stale.keys(), [])
- self.assertEqual(len(self.tracker.stats.pools), 2)
- self.assertEqual(self.tracker.node_id, 1)
-
- def test_pcidev_tracker_create_no_nodeid(self):
- self.tracker = manager.PciDevTracker()
- self.assertEqual(len(self.tracker.pci_devs), 0)
-
- def test_set_hvdev_new_dev(self):
- fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
- copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
- self.tracker.set_hvdevs(fake_pci_devs)
- self.assertEqual(len(self.tracker.pci_devs), 4)
- self.assertEqual(set([dev['address'] for
- dev in self.tracker.pci_devs]),
- set(['0000:00:00.1', '0000:00:00.2',
- '0000:00:00.3', '0000:00:00.4']))
- self.assertEqual(set([dev['vendor_id'] for
- dev in self.tracker.pci_devs]),
- set(['v', 'v1', 'v2']))
-
- def test_set_hvdev_changed(self):
- fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v2)]
- self.tracker.set_hvdevs(fake_pci_devs)
- self.assertEqual(set([dev['vendor_id'] for
- dev in self.tracker.pci_devs]),
- set(['v', 'v1']))
-
- def test_set_hvdev_remove(self):
- self.tracker.set_hvdevs([fake_pci])
- self.assertEqual(len([dev for dev in self.tracker.pci_devs
- if dev['status'] == 'removed']),
- 2)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_set_hvdev_changed_stal(self, mock_get):
- self._create_pci_requests_object(mock_get,
- [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
- self.tracker._claim_instance(mock.sentinel.context, self.inst)
- fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_3)]
- self.tracker.set_hvdevs(fake_pci_devs)
- self.assertEqual(len(self.tracker.stale), 1)
- self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_instance_active(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_instance(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.assertEqual(free_devs[0]['vendor_id'], 'v')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_instance_fail(self, mock_get):
- pci_requests = copy.deepcopy(fake_pci_requests)
- pci_requests[0]['count'] = 4
- self._create_pci_requests_object(mock_get, pci_requests)
- self.assertRaises(exception.PciDeviceRequestFailed,
- self.tracker.update_pci_for_instance,
- None,
- self.inst)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_instance_deleted(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_instance(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.inst.vm_state = vm_states.DELETED
- self.tracker.update_pci_for_instance(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 3)
- self.assertEqual(set([dev['vendor_id'] for
- dev in self.tracker.pci_devs]),
- set(['v', 'v1']))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_instance_resize_source(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_instance(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.inst.task_state = task_states.RESIZE_MIGRATED
- self.tracker.update_pci_for_instance(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 3)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_instance_resize_dest(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_migration(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
- self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
- self.inst.task_state = task_states.RESIZE_FINISH
- self.tracker.update_pci_for_instance(None, self.inst)
- self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2)
- self.assertNotIn('fake-inst-uuid', self.tracker.claims)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_migration_in(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_migration(None, self.inst)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.assertEqual(free_devs[0]['vendor_id'], 'v')
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_update_pci_for_migration_out(self, mock_get):
- self._create_pci_requests_object(mock_get, fake_pci_requests)
- self.tracker.update_pci_for_migration(None, self.inst)
- self.tracker.update_pci_for_migration(None, self.inst, sign=-1)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 3)
- self.assertEqual(set([dev['vendor_id'] for
- dev in self.tracker.pci_devs]),
- set(['v', 'v1']))
-
- def test_save(self):
- self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
- ctxt = context.get_admin_context()
- fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v3)]
- self.tracker.set_hvdevs(fake_pci_devs)
- self.update_called = 0
- self.tracker.save(ctxt)
- self.assertEqual(self.update_called, 3)
-
- def test_save_removed(self):
- self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
- self.stubs.Set(db, "pci_device_destroy", self._fake_pci_device_destroy)
- self.destroy_called = 0
- ctxt = context.get_admin_context()
- self.assertEqual(len(self.tracker.pci_devs), 3)
- dev = self.tracker.pci_devs[0]
- self.update_called = 0
- device.remove(dev)
- self.tracker.save(ctxt)
- self.assertEqual(len(self.tracker.pci_devs), 2)
- self.assertEqual(self.destroy_called, 1)
-
- def test_set_compute_node_id(self):
- self.tracker = manager.PciDevTracker()
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
- copy.deepcopy(fake_pci_2)]
- self.tracker.set_hvdevs(fake_pci_devs)
- self.tracker.set_compute_node_id(1)
- self.assertEqual(self.tracker.node_id, 1)
- self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1)
- fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
- copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)]
- self.tracker.set_hvdevs(fake_pci_devs)
- for dev in self.tracker.pci_devs:
- self.assertEqual(dev.compute_node_id, 1)
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_clean_usage(self, mock_get):
- inst_2 = copy.copy(self.inst)
- inst_2.uuid = 'uuid5'
- migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
- orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
-
- self._create_pci_requests_object(mock_get,
- [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
- self.tracker.update_pci_for_instance(None, self.inst)
- self._create_pci_requests_object(mock_get,
- [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
- self.tracker.update_pci_for_instance(None, inst_2)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.assertEqual(free_devs[0]['vendor_id'], 'v')
-
- self.tracker.clean_usage([self.inst], [migr], [orph])
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 2)
- self.assertEqual(
- set([dev['vendor_id'] for dev in free_devs]),
- set(['v', 'v1']))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_clean_usage_claims(self, mock_get):
- inst_2 = copy.copy(self.inst)
- inst_2.uuid = 'uuid5'
- migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
- orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
-
- self._create_pci_requests_object(mock_get,
- [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
- self.tracker.update_pci_for_instance(None, self.inst)
- self._create_pci_requests_object(mock_get,
- [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
- self.tracker.update_pci_for_migration(None, inst_2)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 1)
- self.tracker.clean_usage([self.inst], [migr], [orph])
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(len(free_devs), 2)
- self.assertEqual(
- set([dev['vendor_id'] for dev in free_devs]),
- set(['v', 'v1']))
-
- @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
- def test_clean_usage_no_request_match_no_claims(self, mock_get):
- # Tests the case that there is no match for the request so the
- # claims mapping is set to None for the instance when the tracker
- # calls clean_usage.
- self._create_pci_requests_object(mock_get, [])
- self.tracker.update_pci_for_migration(None, instance=self.inst, sign=1)
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(3, len(free_devs))
- self.tracker.clean_usage([], [], [])
- free_devs = self.tracker.pci_stats.get_free_devs()
- self.assertEqual(3, len(free_devs))
- self.assertEqual(
- set([dev['address'] for dev in free_devs]),
- set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
-
-
-class PciGetInstanceDevs(test.TestCase):
- def test_get_devs_object(self):
- def _fake_obj_load_attr(foo, attrname):
- if attrname == 'pci_devices':
- self.load_attr_called = True
- foo.pci_devices = objects.PciDeviceList()
-
- inst = fakes.stub_instance(id='1')
- ctxt = context.get_admin_context()
- self.mox.StubOutWithMock(db, 'instance_get')
- db.instance_get(ctxt, '1', columns_to_join=[]
- ).AndReturn(inst)
- self.mox.ReplayAll()
- inst = objects.Instance.get_by_id(ctxt, '1', expected_attrs=[])
- self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr)
-
- self.load_attr_called = False
- manager.get_instance_pci_devs(inst)
- self.assertEqual(self.load_attr_called, True)
diff --git a/nova/tests/pci/test_stats.py b/nova/tests/pci/test_stats.py
deleted file mode 100644
index 9a4d1afd1d..0000000000
--- a/nova/tests/pci/test_stats.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import exception
-from nova import objects
-from nova.pci import stats
-from nova.pci import whitelist
-from nova import test
-from nova.tests.pci import fakes
-
-fake_pci_1 = {
- 'compute_node_id': 1,
- 'address': '0000:00:00.1',
- 'product_id': 'p1',
- 'vendor_id': 'v1',
- 'status': 'available',
- 'extra_k1': 'v1',
- 'request_id': None,
- }
-
-
-fake_pci_2 = dict(fake_pci_1, vendor_id='v2',
- product_id='p2',
- address='0000:00:00.2')
-
-
-fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3')
-
-
-pci_requests = [objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v1'}]),
- objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v2'}])]
-
-
-pci_requests_multiple = [objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v1'}]),
- objects.InstancePCIRequest(count=3,
- spec=[{'vendor_id': 'v2'}])]
-
-
-class PciDeviceStatsTestCase(test.NoDBTestCase):
- def _create_fake_devs(self):
- self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
- self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
- self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
-
- map(self.pci_stats.add_device,
- [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3])
-
- def setUp(self):
- super(PciDeviceStatsTestCase, self).setUp()
- self.pci_stats = stats.PciDeviceStats()
- # The following two calls need to be made before adding the devices.
- patcher = fakes.fake_pci_whitelist()
- self.addCleanup(patcher.stop)
- self._create_fake_devs()
-
- def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
- set(['v1', 'v2']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
-
- def test_remove_device(self):
- self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 1)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
- self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
-
- def test_remove_device_exception(self):
- self.pci_stats.remove_device(self.fake_dev_2)
- self.assertRaises(exception.PciDevicePoolEmpty,
- self.pci_stats.remove_device,
- self.fake_dev_2)
-
- def test_json_creat(self):
- m = jsonutils.dumps(self.pci_stats)
- new_stats = stats.PciDeviceStats(m)
-
- self.assertEqual(len(new_stats.pools), 2)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
- self.assertEqual(set([d['vendor_id'] for d in new_stats]),
- set(['v1', 'v2']))
-
- def test_support_requests(self):
- self.assertEqual(self.pci_stats.support_requests(pci_requests),
- True)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
-
- def test_support_requests_failed(self):
- self.assertEqual(
- self.pci_stats.support_requests(pci_requests_multiple), False)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
-
- def test_apply_requests(self):
- self.pci_stats.apply_requests(pci_requests)
- self.assertEqual(len(self.pci_stats.pools), 1)
- self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
- self.assertEqual(self.pci_stats.pools[0]['count'], 1)
-
- def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
- self.pci_stats.apply_requests,
- pci_requests_multiple)
-
- def test_consume_requests(self):
- devs = self.pci_stats.consume_requests(pci_requests)
- self.assertEqual(2, len(devs))
- self.assertEqual(set(['v1', 'v2']),
- set([dev['vendor_id'] for dev in devs]))
-
- def test_consume_requests_empty(self):
- devs = self.pci_stats.consume_requests([])
- self.assertEqual(0, len(devs))
-
- def test_consume_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
- self.pci_stats.consume_requests,
- pci_requests_multiple)
-
-
-@mock.patch.object(whitelist, 'get_pci_devices_filter')
-class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(PciDeviceStatsWithTagsTestCase, self).setUp()
- self.pci_stats = stats.PciDeviceStats()
- self._create_whitelist()
-
- def _create_whitelist(self):
- white_list = ['{"vendor_id":"1137","product_id":"0071",'
- '"address":"*:0a:00.*","physical_network":"physnet1"}',
- '{"vendor_id":"1137","product_id":"0072"}']
- self.pci_wlist = whitelist.PciHostDevicesWhiteList(white_list)
-
- def _create_pci_devices(self):
- self.pci_tagged_devices = []
- for dev in range(4):
- pci_dev = {'compute_node_id': 1,
- 'address': '0000:0a:00.%d' % dev,
- 'vendor_id': '1137',
- 'product_id': '0071',
- 'status': 'available',
- 'request_id': None}
- self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
-
- self.pci_untagged_devices = []
- for dev in range(3):
- pci_dev = {'compute_node_id': 1,
- 'address': '0000:0b:00.%d' % dev,
- 'vendor_id': '1137',
- 'product_id': '0072',
- 'status': 'available',
- 'request_id': None}
- self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
-
- map(self.pci_stats.add_device, self.pci_tagged_devices)
- map(self.pci_stats.add_device, self.pci_untagged_devices)
-
- def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
- self.assertEqual(vendor_id, pool['vendor_id'])
- self.assertEqual(product_id, pool['product_id'])
- self.assertEqual(count, pool['count'])
- if tags:
- for k, v in tags.iteritems():
- self.assertEqual(v, pool[k])
-
- def _assertPools(self):
- # Pools are ordered based on the number of keys. 'product_id',
- # 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # two pools with the second one having the tag 'physical_network'
- # and the value 'physnet1'
- self.assertEqual(2, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
-
- def test_add_devices(self, mock_get_dev_filter):
- mock_get_dev_filter.return_value = self.pci_wlist
- self._create_pci_devices()
- self._assertPools()
-
- def test_consume_reqeusts(self, mock_get_dev_filter):
- mock_get_dev_filter.return_value = self.pci_wlist
- self._create_pci_devices()
- pci_requests = [objects.InstancePCIRequest(count=1,
- spec=[{'physical_network': 'physnet1'}]),
- objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': '1137',
- 'product_id': '0072'}])]
- devs = self.pci_stats.consume_requests(pci_requests)
- self.assertEqual(2, len(devs))
- self.assertEqual(set(['0071', '0072']),
- set([dev['product_id'] for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
- physical_network='physnet1')
-
- def test_add_device_no_devspec(self, mock_get_dev_filter):
- mock_get_dev_filter.return_value = self.pci_wlist
- self._create_pci_devices()
- pci_dev = {'compute_node_id': 1,
- 'address': '0000:0c:00.1',
- 'vendor_id': '2345',
- 'product_id': '0172',
- 'status': 'available',
- 'request_id': None}
- pci_dev_obj = objects.PciDevice.create(pci_dev)
- self.pci_stats.add_device(pci_dev_obj)
- # There should be no change
- self.assertIsNone(
- self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
- self._assertPools()
-
- def test_remove_device_no_devspec(self, mock_get_dev_filter):
- mock_get_dev_filter.return_value = self.pci_wlist
- self._create_pci_devices()
- pci_dev = {'compute_node_id': 1,
- 'address': '0000:0c:00.1',
- 'vendor_id': '2345',
- 'product_id': '0172',
- 'status': 'available',
- 'request_id': None}
- pci_dev_obj = objects.PciDevice.create(pci_dev)
- self.pci_stats.remove_device(pci_dev_obj)
- # There should be no change
- self.assertIsNone(
- self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
- self._assertPools()
-
- def test_remove_device(self, mock_get_dev_filter):
- mock_get_dev_filter.return_value = self.pci_wlist
- self._create_pci_devices()
- dev1 = self.pci_untagged_devices.pop()
- self.pci_stats.remove_device(dev1)
- dev2 = self.pci_tagged_devices.pop()
- self.pci_stats.remove_device(dev2)
- self._assertPools()
diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py
deleted file mode 100644
index 18010d6756..0000000000
--- a/nova/tests/policy_fixture.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2012 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-import fixtures
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.openstack.common import policy as common_policy
-import nova.policy
-from nova.tests import fake_policy
-
-CONF = cfg.CONF
-
-
-class PolicyFixture(fixtures.Fixture):
-
- def setUp(self):
- super(PolicyFixture, self).setUp()
- self.policy_dir = self.useFixture(fixtures.TempDir())
- self.policy_file_name = os.path.join(self.policy_dir.path,
- 'policy.json')
- with open(self.policy_file_name, 'w') as policy_file:
- policy_file.write(fake_policy.policy_data)
- CONF.set_override('policy_file', self.policy_file_name)
- nova.policy.reset()
- nova.policy.init()
- self.addCleanup(nova.policy.reset)
-
- def set_rules(self, rules):
- policy = nova.policy._ENFORCER
- policy.set_rules(dict((k, common_policy.parse_rule(v))
- for k, v in rules.items()))
-
-
-class RoleBasedPolicyFixture(fixtures.Fixture):
-
- def __init__(self, role="admin", *args, **kwargs):
- super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
- self.role = role
-
- def setUp(self):
- """Copy live policy.json file and convert all actions to
- allow users of the specified role only
- """
- super(RoleBasedPolicyFixture, self).setUp()
- policy = jsonutils.load(open(CONF.policy_file))
-
- # Convert all actions to require specified role
- for action, rule in policy.iteritems():
- policy[action] = 'role:%s' % self.role
-
- self.policy_dir = self.useFixture(fixtures.TempDir())
- self.policy_file_name = os.path.join(self.policy_dir.path,
- 'policy.json')
- with open(self.policy_file_name, 'w') as policy_file:
- jsonutils.dump(policy, policy_file)
- CONF.set_override('policy_file', self.policy_file_name)
- nova.policy.reset()
- nova.policy.init()
- self.addCleanup(nova.policy.reset)
diff --git a/nova/tests/scheduler/filters/test_affinity_filters.py b/nova/tests/scheduler/filters/test_affinity_filters.py
deleted file mode 100644
index ba6f58bc37..0000000000
--- a/nova/tests/scheduler/filters/test_affinity_filters.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-
-from nova.scheduler.filters import affinity_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-CONF = cfg.CONF
-
-CONF.import_opt('my_ip', 'nova.netconf')
-
-
-@mock.patch('nova.compute.api.API.get_all')
-class TestDifferentHostFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestDifferentHostFilter, self).setUp()
- self.filt_cls = affinity_filter.DifferentHostFilter()
-
- def test_affinity_different_filter_passes(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = []
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'different_host': ['fake'], }}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_different_filter_no_list_passes(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = []
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'different_host': 'fake'}}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_different_filter_fails(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = [mock.sentinel.instances]
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'different_host': ['fake'], }}
-
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_different_filter_handles_none(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': None}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertFalse(get_all_mock.called)
-
-
-@mock.patch('nova.compute.api.API.get_all')
-class TestSameHostFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestSameHostFilter, self).setUp()
- self.filt_cls = affinity_filter.SameHostFilter()
-
- def test_affinity_same_filter_passes(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = [mock.sentinel.images]
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'same_host': ['fake'], }}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_same_filter_no_list_passes(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = [mock.sentinel.images]
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'same_host': 'fake'}}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_same_filter_fails(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
- get_all_mock.return_value = []
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'same_host': ['fake'], }}
-
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- get_all_mock.assert_called_once_with(mock.sentinel.ctx,
- {'host': 'host1',
- 'uuid': ['fake'],
- 'deleted': False})
-
- def test_affinity_same_filter_handles_none(self, get_all_mock):
- host = fakes.FakeHostState('host1', 'node1', {})
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': None}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertFalse(get_all_mock.called)
-
-
-class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestSimpleCIDRAffinityFilter, self).setUp()
- self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
-
- def test_affinity_simple_cidr_filter_passes(self):
- host = fakes.FakeHostState('host1', 'node1', {})
- host.host_ip = '10.8.1.1'
-
- affinity_ip = "10.8.1.100"
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'cidr': '/24',
- 'build_near_host_ip': affinity_ip}}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_simple_cidr_filter_fails(self):
- host = fakes.FakeHostState('host1', 'node1', {})
- host.host_ip = '10.8.1.1'
-
- affinity_ip = "10.8.1.100"
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': {
- 'cidr': '/32',
- 'build_near_host_ip': affinity_ip}}
-
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_affinity_simple_cidr_filter_handles_none(self):
- host = fakes.FakeHostState('host1', 'node1', {})
-
- affinity_ip = CONF.my_ip.split('.')[0:3]
- affinity_ip.append('100')
- affinity_ip = str.join('.', affinity_ip)
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'scheduler_hints': None}
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
-
-class TestGroupAffinityFilter(test.NoDBTestCase):
-
- def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {}
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- filter_properties = {'group_policies': ['affinity']}
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- filter_properties = {'group_policies': [policy]}
- filter_properties['group_hosts'] = []
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- filter_properties['group_hosts'] = ['host2']
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_group_anti_affinity_filter_passes(self):
- self._test_group_anti_affinity_filter_passes(
- affinity_filter.ServerGroupAntiAffinityFilter(),
- 'anti-affinity')
-
- def test_group_anti_affinity_filter_passes_legacy(self):
- self._test_group_anti_affinity_filter_passes(
- affinity_filter.GroupAntiAffinityFilter(),
- 'legacy')
-
- def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {'group_policies': [policy],
- 'group_hosts': ['host1']}
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_group_anti_affinity_filter_fails(self):
- self._test_group_anti_affinity_filter_fails(
- affinity_filter.ServerGroupAntiAffinityFilter(),
- 'anti-affinity')
-
- def test_group_anti_affinity_filter_fails_legacy(self):
- self._test_group_anti_affinity_filter_fails(
- affinity_filter.GroupAntiAffinityFilter(),
- 'legacy')
-
- def _test_group_affinity_filter_passes(self, filt_cls, policy):
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {}
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- filter_properties = {'group_policies': ['anti-affinity']}
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- filter_properties = {'group_policies': ['affinity'],
- 'group_hosts': ['host1']}
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_group_affinity_filter_passes(self):
- self._test_group_affinity_filter_passes(
- affinity_filter.ServerGroupAffinityFilter(), 'affinity')
-
- def test_group_affinity_filter_passes_legacy(self):
- self._test_group_affinity_filter_passes(
- affinity_filter.GroupAffinityFilter(), 'legacy')
-
- def _test_group_affinity_filter_fails(self, filt_cls, policy):
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {'group_policies': [policy],
- 'group_hosts': ['host2']}
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_group_affinity_filter_fails(self):
- self._test_group_affinity_filter_fails(
- affinity_filter.ServerGroupAffinityFilter(), 'affinity')
-
- def test_group_affinity_filter_fails_legacy(self):
- self._test_group_affinity_filter_fails(
- affinity_filter.GroupAffinityFilter(), 'legacy')
diff --git a/nova/tests/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
deleted file mode 100644
index b574350f0e..0000000000
--- a/nova/tests/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-@mock.patch('nova.db.aggregate_metadata_get_by_host')
-class TestAggImagePropsIsolationFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestAggImagePropsIsolationFilter, self).setUp()
- self.filt_cls = aipi.AggregateImagePropertiesIsolation()
-
- def test_aggregate_image_properties_isolation_passes(self, agg_mock):
- agg_mock.return_value = {'foo': 'bar'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'foo': 'bar'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_multi_props_passes(self,
- agg_mock):
- agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'foo': 'bar',
- 'foo2': 'bar2'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_props_with_meta_passes(self,
- agg_mock):
- agg_mock.return_value = {'foo': 'bar'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_props_imgprops_passes(self,
- agg_mock):
- agg_mock.return_value = {}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'foo': 'bar'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_props_not_match_fails(self,
- agg_mock):
- agg_mock.return_value = {'foo': 'bar'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'foo': 'no-bar'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_props_not_match2_fails(self,
- agg_mock):
- agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'foo': 'bar',
- 'foo2': 'bar3'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_image_properties_isolation_props_namespace(self,
- agg_mock):
- self.flags(aggregate_image_properties_isolation_namespace="np")
- agg_mock.return_value = {'np.foo': 'bar', 'foo2': 'bar2'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'image': {
- 'properties': {'np.foo': 'bar',
- 'foo2': 'bar3'}}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
deleted file mode 100644
index c534b6c3ff..0000000000
--- a/nova/tests/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-@mock.patch('nova.db.aggregate_metadata_get_by_host')
-class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
- self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
-
- def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
- capabilities = {'opt1': 1, 'opt2': 2}
-
- filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
- {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertFalse(agg_mock.called)
-
- def _do_test_aggregate_filter_extra_specs(self, especs, passes):
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024})
- assertion = self.assertTrue if passes else self.assertFalse
- assertion(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
- agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
- especs = {
- # Un-scoped extra spec
- 'opt1': '1',
- # Scoped extra spec that applies to this filter
- 'aggregate_instance_extra_specs:opt2': '2',
- # Scoped extra spec that does not apply to this filter
- 'trust:trusted_host': 'true',
- }
- self._do_test_aggregate_filter_extra_specs(especs, passes=True)
-
- def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
- agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
- especs = {
- # Un-scoped extra spec, make sure we don't blow up if it
- # happens to match our scope.
- 'aggregate_instance_extra_specs': '1',
- }
- self._do_test_aggregate_filter_extra_specs(especs, passes=True)
-
- def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
- agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
- especs = {
- 'opt1': '1',
- 'opt2': '222',
- 'trust:trusted_host': 'true'
- }
- self._do_test_aggregate_filter_extra_specs(especs, passes=False)
diff --git a/nova/tests/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
deleted file mode 100644
index 5691f40a2a..0000000000
--- a/nova/tests/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-@mock.patch('nova.db.aggregate_metadata_get_by_host')
-class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestAggregateMultitenancyIsolationFilter, self).setUp()
- self.filt_cls = ami.AggregateMultiTenancyIsolation()
-
- def test_aggregate_multi_tenancy_isolation_with_meta_passes(self,
- agg_mock):
- agg_mock.return_value = {'filter_tenant_id': 'my_tenantid'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'instance_properties': {
- 'project_id': 'my_tenantid'}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock):
- agg_mock.return_value = {'filter_tenant_id': 'other_tenantid'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'instance_properties': {
- 'project_id': 'my_tenantid'}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock):
- agg_mock.return_value = {}
- filter_properties = {'context': mock.sentinel.ctx,
- 'request_spec': {
- 'instance_properties': {
- 'project_id': 'my_tenantid'}}}
- host = fakes.FakeHostState('host1', 'compute', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_availability_zone_filters.py b/nova/tests/scheduler/filters/test_availability_zone_filters.py
deleted file mode 100644
index ca4a4cb589..0000000000
--- a/nova/tests/scheduler/filters/test_availability_zone_filters.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import availability_zone_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-@mock.patch('nova.db.aggregate_metadata_get_by_host')
-class TestAvailabilityZoneFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestAvailabilityZoneFilter, self).setUp()
- self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
-
- @staticmethod
- def _make_zone_request(zone):
- return {
- 'context': mock.sentinel.ctx,
- 'request_spec': {
- 'instance_properties': {
- 'availability_zone': zone
- }
- }
- }
-
- def test_availability_zone_filter_same(self, agg_mock):
- agg_mock.return_value = {'availability_zone': 'nova'}
- request = self._make_zone_request('nova')
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(self.filt_cls.host_passes(host, request))
-
- def test_availability_zone_filter_different(self, agg_mock):
- agg_mock.return_value = {'availability_zone': 'nova'}
- request = self._make_zone_request('bad')
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertFalse(self.filt_cls.host_passes(host, request))
diff --git a/nova/tests/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/scheduler/filters/test_compute_capabilities_filters.py
deleted file mode 100644
index 7f54d818ad..0000000000
--- a/nova/tests/scheduler/filters/test_compute_capabilities_filters.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import six
-
-from nova.scheduler.filters import compute_capabilities_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestComputeCapabilitiesFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestComputeCapabilitiesFilter, self).setUp()
- self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter()
-
- def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
- # In real OpenStack runtime environment,compute capabilities
- # value may be number, so we should use number to do unit test.
- capabilities = {}
- capabilities.update(ecaps)
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'extra_specs': especs}}
- host_state = {'free_ram_mb': 1024}
- host_state.update(capabilities)
- host = fakes.FakeHostState('host1', 'node1', host_state)
- assertion = self.assertTrue if passes else self.assertFalse
- assertion(self.filt_cls.host_passes(host, filter_properties))
-
- def test_compute_filter_pass_cpu_info_as_text_type(self):
- cpu_info = """ { "vendor": "Intel", "model": "core2duo",
- "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
- {"cores": 1, "threads":1, "sockets": 1}} """
-
- cpu_info = six.text_type(cpu_info)
-
- self._do_test_compute_filter_extra_specs(
- ecaps={'cpu_info': cpu_info},
- especs={'capabilities:cpu_info:vendor': 'Intel'},
- passes=True)
-
- def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
- cpu_info = "cpu_info"
-
- cpu_info = six.text_type(cpu_info)
-
- self._do_test_compute_filter_extra_specs(
- ecaps={'cpu_info': cpu_info},
- especs={'capabilities:cpu_info:vendor': 'Intel'},
- passes=False)
-
- def test_compute_filter_passes_extra_specs_simple(self):
- self._do_test_compute_filter_extra_specs(
- ecaps={'stats': {'opt1': 1, 'opt2': 2}},
- especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
- passes=True)
-
- def test_compute_filter_fails_extra_specs_simple(self):
- self._do_test_compute_filter_extra_specs(
- ecaps={'stats': {'opt1': 1, 'opt2': 2}},
- especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
- passes=False)
-
- def test_compute_filter_pass_extra_specs_simple_with_scope(self):
- self._do_test_compute_filter_extra_specs(
- ecaps={'stats': {'opt1': 1, 'opt2': 2}},
- especs={'capabilities:opt1': '1',
- 'trust:trusted_host': 'true'},
- passes=True)
-
- def test_compute_filter_pass_extra_specs_same_as_scope(self):
- # Make sure this still works even if the key is the same as the scope
- self._do_test_compute_filter_extra_specs(
- ecaps={'capabilities': 1},
- especs={'capabilities': '1'},
- passes=True)
-
- def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
- self._do_test_compute_filter_extra_specs(
- ecaps={'opt1': 1, 'opt2': 2},
- especs={'wrong_scope:opt1': '1',
- 'trust:trusted_host': 'true'},
- passes=True)
-
- def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
- self._do_test_compute_filter_extra_specs(
- ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
- especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
- 'trust:trusted_host': 'true'},
- passes=True)
diff --git a/nova/tests/scheduler/filters/test_compute_filters.py b/nova/tests/scheduler/filters/test_compute_filters.py
deleted file mode 100644
index beab03b952..0000000000
--- a/nova/tests/scheduler/filters/test_compute_filters.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import compute_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-@mock.patch('nova.servicegroup.API.service_is_up')
-class TestComputeFilter(test.NoDBTestCase):
-
- def test_compute_filter_manual_disable(self, service_up_mock):
- filt_cls = compute_filter.ComputeFilter()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': True}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'service': service})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
- self.assertFalse(service_up_mock.called)
-
- def test_compute_filter_sgapi_passes(self, service_up_mock):
- filt_cls = compute_filter.ComputeFilter()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'service': service})
- service_up_mock.return_value = True
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- service_up_mock.assert_called_once_with(service)
-
- def test_compute_filter_sgapi_fails(self, service_up_mock):
- filt_cls = compute_filter.ComputeFilter()
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- service = {'disabled': False, 'updated_at': 'now'}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'service': service})
- service_up_mock.return_value = False
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
- service_up_mock.assert_called_once_with(service)
diff --git a/nova/tests/scheduler/filters/test_core_filters.py b/nova/tests/scheduler/filters/test_core_filters.py
deleted file mode 100644
index 44e74cb811..0000000000
--- a/nova/tests/scheduler/filters/test_core_filters.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import core_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestCoreFilter(test.NoDBTestCase):
-
- def test_core_filter_passes(self):
- self.filt_cls = core_filter.CoreFilter()
- filter_properties = {'instance_type': {'vcpus': 1}}
- self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'node1',
- {'vcpus_total': 4, 'vcpus_used': 7})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_core_filter_fails_safe(self):
- self.filt_cls = core_filter.CoreFilter()
- filter_properties = {'instance_type': {'vcpus': 1}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_core_filter_fails(self):
- self.filt_cls = core_filter.CoreFilter()
- filter_properties = {'instance_type': {'vcpus': 1}}
- self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'node1',
- {'vcpus_total': 4, 'vcpus_used': 8})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_core_filter_value_error(self, agg_mock):
- self.filt_cls = core_filter.AggregateCoreFilter()
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'vcpus': 1}}
- self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'node1',
- {'vcpus_total': 4, 'vcpus_used': 7})
- agg_mock.return_value = set(['XXX'])
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'cpu_allocation_ratio')
- self.assertEqual(4 * 2, host.limits['vcpu'])
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_core_filter_default_value(self, agg_mock):
- self.filt_cls = core_filter.AggregateCoreFilter()
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'vcpus': 1}}
- self.flags(cpu_allocation_ratio=2)
- host = fakes.FakeHostState('host1', 'node1',
- {'vcpus_total': 4, 'vcpus_used': 8})
- agg_mock.return_value = set([])
- # False: fallback to default flag w/o aggregates
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'cpu_allocation_ratio')
- # True: use ratio from aggregates
- agg_mock.return_value = set(['3'])
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(4 * 3, host.limits['vcpu'])
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_core_filter_conflict_values(self, agg_mock):
- self.filt_cls = core_filter.AggregateCoreFilter()
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'vcpus': 1}}
- self.flags(cpu_allocation_ratio=1)
- host = fakes.FakeHostState('host1', 'node1',
- {'vcpus_total': 4, 'vcpus_used': 8})
- agg_mock.return_value = set(['2', '3'])
- # use the minimum ratio from aggregates
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(4 * 2, host.limits['vcpu'])
diff --git a/nova/tests/scheduler/filters/test_disk_filters.py b/nova/tests/scheduler/filters/test_disk_filters.py
deleted file mode 100644
index 8ae7b86fa6..0000000000
--- a/nova/tests/scheduler/filters/test_disk_filters.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import disk_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestDiskFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestDiskFilter, self).setUp()
-
- def test_disk_filter_passes(self):
- self.flags(disk_allocation_ratio=1.0)
- filt_cls = disk_filter.DiskFilter()
- filter_properties = {'instance_type': {'root_gb': 1,
- 'ephemeral_gb': 1, 'swap': 512}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
-
- def test_disk_filter_fails(self):
- self.flags(disk_allocation_ratio=1.0)
- filt_cls = disk_filter.DiskFilter()
- filter_properties = {'instance_type': {'root_gb': 10,
- 'ephemeral_gb': 1, 'swap': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- def test_disk_filter_oversubscribe(self):
- self.flags(disk_allocation_ratio=10.0)
- filt_cls = disk_filter.DiskFilter()
- filter_properties = {'instance_type': {'root_gb': 100,
- 'ephemeral_gb': 18, 'swap': 1024}}
- # 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- self.assertEqual(12 * 10.0, host.limits['disk_gb'])
-
- def test_disk_filter_oversubscribe_fail(self):
- self.flags(disk_allocation_ratio=10.0)
- filt_cls = disk_filter.DiskFilter()
- filter_properties = {'instance_type': {'root_gb': 100,
- 'ephemeral_gb': 19, 'swap': 1024}}
- # 1GB used... so 119GB allowed...
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_disk_filter_value_error(self, agg_mock):
- filt_cls = disk_filter.AggregateDiskFilter()
- self.flags(disk_allocation_ratio=1.0)
- filter_properties = {
- 'context': mock.sentinel.ctx,
- 'instance_type': {'root_gb': 1,
- 'ephemeral_gb': 1,
- 'swap': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 3 * 1024,
- 'total_usable_disk_gb': 1})
- agg_mock.return_value = set(['XXX'])
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'disk_allocation_ratio')
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_disk_filter_default_value(self, agg_mock):
- filt_cls = disk_filter.AggregateDiskFilter()
- self.flags(disk_allocation_ratio=1.0)
- filter_properties = {
- 'context': mock.sentinel.ctx,
- 'instance_type': {'root_gb': 2,
- 'ephemeral_gb': 1,
- 'swap': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 3 * 1024,
- 'total_usable_disk_gb': 1})
- # Uses global conf.
- agg_mock.return_value = set([])
- self.assertFalse(filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'disk_allocation_ratio')
-
- agg_mock.return_value = set(['2'])
- self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_image_props_filters.py b/nova/tests/scheduler/filters/test_image_props_filters.py
deleted file mode 100644
index 72102b82ef..0000000000
--- a/nova/tests/scheduler/filters/test_image_props_filters.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.compute import arch
-from nova.compute import hvtype
-from nova.compute import vm_mode
-from nova.scheduler.filters import image_props_filter
-from nova import test
-from nova.tests.scheduler import fakes
-from nova import utils
-
-
-class TestImagePropsFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestImagePropsFilter, self).setUp()
- self.filt_cls = image_props_filter.ImagePropertiesFilter()
-
- def test_image_properties_filter_passes_same_inst_props_and_version(self):
- img_props = {'properties': {'_architecture': arch.X86_64,
- 'hypervisor_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM,
- 'hypervisor_version_requires': '>=6.0,<6.2'
- }}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_fails_different_inst_props(self):
- img_props = {'properties': {'architecture': arch.ARMV7,
- 'hypervisor_type': hvtype.QEMU,
- 'vm_mode': vm_mode.HVM}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_fails_different_hyper_version(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'hypervisor_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM,
- 'hypervisor_version_requires': '>=6.2'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'enabled': True,
- 'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_passes_partial_inst_props(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'vm_mode': vm_mode.HVM}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_fails_partial_inst_props(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'vm_mode': vm_mode.HVM}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_passes_without_inst_props(self):
- filter_properties = {'request_spec': {}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_fails_without_host_props(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'hypervisor_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'enabled': True,
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_passes_without_hyper_version(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'hypervisor_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM,
- 'hypervisor_version_requires': '>=6.0'}}
- filter_properties = {'request_spec': {'image': img_props}}
- capabilities = {'enabled': True,
- 'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)]}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
- img_props = {'properties': {'architecture': arch.X86_64,
- 'hypervisor_type': hvtype.KVM,
- 'vm_mode': vm_mode.HVM,
- 'hypervisor_version_requires': '>=6.0'}}
- filter_properties = {'request_spec': {'image': img_props}}
- capabilities = {'enabled': True,
- 'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': 5000}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_pv_mode_compat(self):
- # if an old image has 'pv' for a vm_mode it should be treated as xen
- img_props = {'properties': {'vm_mode': 'pv'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_hvm_mode_compat(self):
- # if an old image has 'hv' for a vm_mode it should be treated as xen
- img_props = {'properties': {'vm_mode': 'hv'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_xen_arch_compat(self):
- # if an old image has 'x86_32' for arch it should be treated as i686
- img_props = {'properties': {'architecture': 'x86_32'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.I686, hvtype.KVM, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_xen_hvtype_compat(self):
- # if an old image has 'xapi' for hvtype it should be treated as xen
- img_props = {'properties': {'hypervisor_type': 'xapi'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.I686, hvtype.XEN, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_image_properties_filter_baremetal_vmmode_compat(self):
- # if an old image has 'baremetal' for vmmode it should be
- # treated as hvm
- img_props = {'properties': {'vm_mode': 'baremetal'}}
- filter_properties = {'request_spec': {'image': img_props}}
- hypervisor_version = utils.convert_version_to_int('6.0.0')
- capabilities = {'supported_instances':
- [(arch.I686, hvtype.BAREMETAL, vm_mode.HVM)],
- 'hypervisor_version': hypervisor_version}
- host = fakes.FakeHostState('host1', 'node1', capabilities)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_io_ops_filters.py b/nova/tests/scheduler/filters/test_io_ops_filters.py
deleted file mode 100644
index b84c25b9b2..0000000000
--- a/nova/tests/scheduler/filters/test_io_ops_filters.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import mock
-
-from nova.scheduler.filters import io_ops_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestNumInstancesFilter(test.NoDBTestCase):
-
- def test_filter_num_iops_passes(self):
- self.flags(max_io_ops_per_host=8)
- self.filt_cls = io_ops_filter.IoOpsFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_io_ops': 7})
- filter_properties = {}
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_filter_num_iops_fails(self):
- self.flags(max_io_ops_per_host=8)
- self.filt_cls = io_ops_filter.IoOpsFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_io_ops': 8})
- filter_properties = {}
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_filter_num_iops_value(self, agg_mock):
- self.flags(max_io_ops_per_host=7)
- self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_io_ops': 7})
- filter_properties = {'context': mock.sentinel.ctx}
- agg_mock.return_value = set([])
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'max_io_ops_per_host')
- agg_mock.return_value = set(['8'])
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_filter_num_iops_value_error(self, agg_mock):
- self.flags(max_io_ops_per_host=8)
- self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_io_ops': 7})
- agg_mock.return_value = set(['XXX'])
- filter_properties = {'context': mock.sentinel.ctx}
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'max_io_ops_per_host')
diff --git a/nova/tests/scheduler/filters/test_isolated_hosts_filter.py b/nova/tests/scheduler/filters/test_isolated_hosts_filter.py
deleted file mode 100644
index 05e26f84ea..0000000000
--- a/nova/tests/scheduler/filters/test_isolated_hosts_filter.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.scheduler.filters import isolated_hosts_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestIsolatedHostsFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestIsolatedHostsFilter, self).setUp()
- self.filt_cls = isolated_hosts_filter.IsolatedHostsFilter()
-
- def _do_test_isolated_hosts(self, host_in_list, image_in_list,
- set_flags=True,
- restrict_isolated_hosts_to_isolated_images=True):
- if set_flags:
- self.flags(isolated_images=['isolated_image'],
- isolated_hosts=['isolated_host'],
- restrict_isolated_hosts_to_isolated_images=
- restrict_isolated_hosts_to_isolated_images)
- host_name = 'isolated_host' if host_in_list else 'free_host'
- image_ref = 'isolated_image' if image_in_list else 'free_image'
- filter_properties = {
- 'request_spec': {
- 'instance_properties': {'image_ref': image_ref}
- }
- }
- host = fakes.FakeHostState(host_name, 'node', {})
- return self.filt_cls.host_passes(host, filter_properties)
-
- def test_isolated_hosts_fails_isolated_on_non_isolated(self):
- self.assertFalse(self._do_test_isolated_hosts(False, True))
-
- def test_isolated_hosts_fails_non_isolated_on_isolated(self):
- self.assertFalse(self._do_test_isolated_hosts(True, False))
-
- def test_isolated_hosts_passes_isolated_on_isolated(self):
- self.assertTrue(self._do_test_isolated_hosts(True, True))
-
- def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
- self.assertTrue(self._do_test_isolated_hosts(False, False))
-
- def test_isolated_hosts_no_config(self):
- # If there are no hosts nor isolated images in the config, it should
- # not filter at all. This is the default config.
- self.assertTrue(self._do_test_isolated_hosts(False, True, False))
- self.assertTrue(self._do_test_isolated_hosts(True, False, False))
- self.assertTrue(self._do_test_isolated_hosts(True, True, False))
- self.assertTrue(self._do_test_isolated_hosts(False, False, False))
-
- def test_isolated_hosts_no_hosts_config(self):
- self.flags(isolated_images=['isolated_image'])
- # If there are no hosts in the config, it should only filter out
- # images that are listed
- self.assertFalse(self._do_test_isolated_hosts(False, True, False))
- self.assertTrue(self._do_test_isolated_hosts(True, False, False))
- self.assertFalse(self._do_test_isolated_hosts(True, True, False))
- self.assertTrue(self._do_test_isolated_hosts(False, False, False))
-
- def test_isolated_hosts_no_images_config(self):
- self.flags(isolated_hosts=['isolated_host'])
- # If there are no images in the config, it should only filter out
- # isolated_hosts
- self.assertTrue(self._do_test_isolated_hosts(False, True, False))
- self.assertFalse(self._do_test_isolated_hosts(True, False, False))
- self.assertFalse(self._do_test_isolated_hosts(True, True, False))
- self.assertTrue(self._do_test_isolated_hosts(False, False, False))
-
- def test_isolated_hosts_less_restrictive(self):
- # If there are isolated hosts and non isolated images
- self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
- # If there are isolated hosts and isolated images
- self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
- # If there are non isolated hosts and non isolated images
- self.assertTrue(self._do_test_isolated_hosts(False, False, True,
- False))
- # If there are non isolated hosts and isolated images
- self.assertFalse(self._do_test_isolated_hosts(False, True, True,
- False))
diff --git a/nova/tests/scheduler/filters/test_json_filters.py b/nova/tests/scheduler/filters/test_json_filters.py
deleted file mode 100644
index 3c7d924cf6..0000000000
--- a/nova/tests/scheduler/filters/test_json_filters.py
+++ /dev/null
@@ -1,289 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.serialization import jsonutils
-
-from nova.scheduler.filters import json_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestJsonFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestJsonFilter, self).setUp()
- self.filt_cls = json_filter.JsonFilter()
- self.json_query = jsonutils.dumps(
- ['and', ['>=', '$free_ram_mb', 1024],
- ['>=', '$free_disk_mb', 200 * 1024]])
-
- def test_json_filter_passes(self):
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'root_gb': 200,
- 'ephemeral_gb': 0},
- 'scheduler_hints': {'query': self.json_query}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024,
- 'free_disk_mb': 200 * 1024})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_passes_with_no_query(self):
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'root_gb': 200,
- 'ephemeral_gb': 0}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 0,
- 'free_disk_mb': 0})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_fails_on_memory(self):
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'root_gb': 200,
- 'ephemeral_gb': 0},
- 'scheduler_hints': {'query': self.json_query}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023,
- 'free_disk_mb': 200 * 1024})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_fails_on_disk(self):
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'root_gb': 200,
- 'ephemeral_gb': 0},
- 'scheduler_hints': {'query': self.json_query}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024,
- 'free_disk_mb': (200 * 1024) - 1})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_fails_on_service_disabled(self):
- json_query = jsonutils.dumps(
- ['and', ['>=', '$free_ram_mb', 1024],
- ['>=', '$free_disk_mb', 200 * 1024],
- ['not', '$service.disabled']])
- filter_properties = {'instance_type': {'memory_mb': 1024,
- 'local_gb': 200},
- 'scheduler_hints': {'query': json_query}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024,
- 'free_disk_mb': 200 * 1024})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_happy_day(self):
- # Test json filter more thoroughly.
- raw = ['and',
- '$capabilities.enabled',
- ['=', '$capabilities.opt1', 'match'],
- ['or',
- ['and',
- ['<', '$free_ram_mb', 30],
- ['<', '$free_disk_mb', 300]],
- ['and',
- ['>', '$free_ram_mb', 30],
- ['>', '$free_disk_mb', 300]]]]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
-
- # Passes
- capabilities = {'opt1': 'match'}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 10,
- 'free_disk_mb': 200,
- 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- # Passes
- capabilities = {'opt1': 'match'}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 40,
- 'free_disk_mb': 400,
- 'capabilities': capabilities,
- 'service': service})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- # Fails due to capabilities being disabled
- capabilities = {'enabled': False, 'opt1': 'match'}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 40,
- 'free_disk_mb': 400,
- 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- # Fails due to being exact memory/disk we don't want
- capabilities = {'enabled': True, 'opt1': 'match'}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 30,
- 'free_disk_mb': 300,
- 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- # Fails due to memory lower but disk higher
- capabilities = {'enabled': True, 'opt1': 'match'}
- service = {'disabled': False}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 20,
- 'free_disk_mb': 400,
- 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- # Fails due to capabilities 'opt1' not equal
- capabilities = {'enabled': True, 'opt1': 'no-match'}
- service = {'enabled': True}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 20,
- 'free_disk_mb': 400,
- 'capabilities': capabilities,
- 'service': service})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_basic_operators(self):
- host = fakes.FakeHostState('host1', 'node1', {})
- # (operator, arguments, expected_result)
- ops_to_test = [
- ['=', [1, 1], True],
- ['=', [1, 2], False],
- ['<', [1, 2], True],
- ['<', [1, 1], False],
- ['<', [2, 1], False],
- ['>', [2, 1], True],
- ['>', [2, 2], False],
- ['>', [2, 3], False],
- ['<=', [1, 2], True],
- ['<=', [1, 1], True],
- ['<=', [2, 1], False],
- ['>=', [2, 1], True],
- ['>=', [2, 2], True],
- ['>=', [2, 3], False],
- ['in', [1, 1], True],
- ['in', [1, 1, 2, 3], True],
- ['in', [4, 1, 2, 3], False],
- ['not', [True], False],
- ['not', [False], True],
- ['or', [True, False], True],
- ['or', [False, False], False],
- ['and', [True, True], True],
- ['and', [False, False], False],
- ['and', [True, False], False],
- # Nested ((True or False) and (2 > 1)) == Passes
- ['and', [['or', True, False], ['>', 2, 1]], True]]
-
- for (op, args, expected) in ops_to_test:
- raw = [op] + args
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertEqual(expected,
- self.filt_cls.host_passes(host, filter_properties))
-
- # This results in [False, True, False, True] and if any are True
- # then it passes...
- raw = ['not', True, False, True, False]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- # This results in [False, False, False] and if any are True
- # then it passes...which this doesn't
- raw = ['not', True, True, True]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_unknown_operator_raises(self):
- raw = ['!=', 1, 2]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- host = fakes.FakeHostState('host1', 'node1',
- {})
- self.assertRaises(KeyError,
- self.filt_cls.host_passes, host, filter_properties)
-
- def test_json_filter_empty_filters_pass(self):
- host = fakes.FakeHostState('host1', 'node1',
- {})
-
- raw = []
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- raw = {}
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_invalid_num_arguments_fails(self):
- host = fakes.FakeHostState('host1', 'node1',
- {})
-
- raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- raw = ['>', 1]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_json_filter_unknown_variable_ignored(self):
- host = fakes.FakeHostState('host1', 'node1',
- {})
-
- raw = ['=', '$........', 1, 1]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- raw = ['=', '$foo', 2, 2]
- filter_properties = {
- 'scheduler_hints': {
- 'query': jsonutils.dumps(raw),
- },
- }
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_metrics_filters.py b/nova/tests/scheduler/filters/test_metrics_filters.py
deleted file mode 100644
index 1693681a16..0000000000
--- a/nova/tests/scheduler/filters/test_metrics_filters.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.scheduler.filters import metrics_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestMetricsFilter(test.NoDBTestCase):
-
- def test_metrics_filter_pass(self):
- self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
- filt_cls = metrics_filter.MetricsFilter()
- metrics = dict(foo=1, bar=2)
- host = fakes.FakeHostState('host1', 'node1',
- attribute_dict={'metrics': metrics})
- self.assertTrue(filt_cls.host_passes(host, None))
-
- def test_metrics_filter_missing_metrics(self):
- self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
- filt_cls = metrics_filter.MetricsFilter()
- metrics = dict(foo=1)
- host = fakes.FakeHostState('host1', 'node1',
- attribute_dict={'metrics': metrics})
- self.assertFalse(filt_cls.host_passes(host, None))
diff --git a/nova/tests/scheduler/filters/test_num_instances_filters.py b/nova/tests/scheduler/filters/test_num_instances_filters.py
deleted file mode 100644
index 6fe0153df6..0000000000
--- a/nova/tests/scheduler/filters/test_num_instances_filters.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import num_instances_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestNumInstancesFilter(test.NoDBTestCase):
-
- def test_filter_num_instances_passes(self):
- self.flags(max_instances_per_host=5)
- self.filt_cls = num_instances_filter.NumInstancesFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_instances': 4})
- filter_properties = {}
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_filter_num_instances_fails(self):
- self.flags(max_instances_per_host=5)
- self.filt_cls = num_instances_filter.NumInstancesFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_instances': 5})
- filter_properties = {}
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_filter_aggregate_num_instances_value(self, agg_mock):
- self.flags(max_instances_per_host=4)
- self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
- host = fakes.FakeHostState('host1', 'node1',
- {'num_instances': 5})
- filter_properties = {'context': mock.sentinel.ctx}
- agg_mock.return_value = set([])
- # No aggregate defined for that host.
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'max_instances_per_host')
- agg_mock.return_value = set(['6'])
- # Aggregate defined for that host.
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_filter_aggregate_num_instances_value_error(self, agg_mock):
- self.flags(max_instances_per_host=6)
- self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {'context': mock.sentinel.ctx}
- agg_mock.return_value = set(['XXX'])
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
- 'max_instances_per_host')
diff --git a/nova/tests/scheduler/filters/test_numa_topology_filters.py b/nova/tests/scheduler/filters/test_numa_topology_filters.py
deleted file mode 100644
index 9b52373283..0000000000
--- a/nova/tests/scheduler/filters/test_numa_topology_filters.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import objects
-from nova.objects import base as obj_base
-from nova.scheduler.filters import numa_topology_filter
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.scheduler import fakes
-from nova.virt import hardware
-
-
-class TestNUMATopologyFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestNUMATopologyFilter, self).setUp()
- self.filt_cls = numa_topology_filter.NUMATopologyFilter()
-
- def test_numa_topology_filter_pass(self):
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
- hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
- hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
-
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = None
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_fail_fit(self):
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
- hardware.VirtNUMATopologyCellInstance(1, set([2]), 512),
- hardware.VirtNUMATopologyCellInstance(2, set([3]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_fail_memory(self):
- self.flags(ram_allocation_ratio=1)
-
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 1024),
- hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_fail_cpu(self):
- self.flags(cpu_allocation_ratio=1)
-
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
- hardware.VirtNUMATopologyCellInstance(
- 1, set([3, 4, 5]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_numa_topology_filter_pass_set_limit(self):
- self.flags(cpu_allocation_ratio=21)
- self.flags(ram_allocation_ratio=1.3)
-
- instance_topology = hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
- hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- instance.numa_topology = (
- objects.InstanceNUMATopology.obj_from_topology(
- instance_topology))
- filter_properties = {
- 'request_spec': {
- 'instance_properties': jsonutils.to_primitive(
- obj_base.obj_to_primitive(instance))}}
- host = fakes.FakeHostState('host1', 'node1',
- {'numa_topology': fakes.NUMA_TOPOLOGY})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- limits_topology = hardware.VirtNUMALimitTopology.from_json(
- host.limits['numa_topology'])
- self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
- self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
- self.assertEqual(limits_topology.cells[0].memory_limit, 665)
- self.assertEqual(limits_topology.cells[1].memory_limit, 665)
diff --git a/nova/tests/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/scheduler/filters/test_pci_passthrough_filters.py
deleted file mode 100644
index 0c7aa29638..0000000000
--- a/nova/tests/scheduler/filters/test_pci_passthrough_filters.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import objects
-from nova.scheduler.filters import pci_passthrough_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestPCIPassthroughFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestPCIPassthroughFilter, self).setUp()
- self.filt_cls = pci_passthrough_filter.PciPassthroughFilter()
-
- def test_pci_passthrough_pass(self):
- pci_stats_mock = mock.MagicMock()
- pci_stats_mock.support_requests.return_value = True
- request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': '8086'}])
- requests = objects.InstancePCIRequests(requests=[request])
- filter_properties = {'pci_requests': requests}
- host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
-
- def test_pci_passthrough_fail(self):
- pci_stats_mock = mock.MagicMock()
- pci_stats_mock.support_requests.return_value = False
- request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': '8086'}])
- requests = objects.InstancePCIRequests(requests=[request])
- filter_properties = {'pci_requests': requests}
- host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
-
- def test_pci_passthrough_no_pci_request(self):
- filter_properties = {}
- host = fakes.FakeHostState('h1', 'n1', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_pci_passthrough_compute_stats(self):
- requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
- filter_properties = {'pci_requests': requests}
- host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={})
- self.assertRaises(AttributeError, self.filt_cls.host_passes,
- host, filter_properties)
diff --git a/nova/tests/scheduler/filters/test_ram_filters.py b/nova/tests/scheduler/filters/test_ram_filters.py
deleted file mode 100644
index 880cdac296..0000000000
--- a/nova/tests/scheduler/filters/test_ram_filters.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import ram_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestRamFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestRamFilter, self).setUp()
- self.filt_cls = ram_filter.RamFilter()
-
- def test_ram_filter_fails_on_memory(self):
- ram_filter.RamFilter.ram_allocation_ratio = 1.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_ram_filter_passes(self):
- ram_filter.RamFilter.ram_allocation_ratio = 1.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_ram_filter_oversubscribe(self):
- ram_filter.RamFilter.ram_allocation_ratio = 2.0
- filter_properties = {'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
-
-
-@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
-class TestAggregateRamFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestAggregateRamFilter, self).setUp()
- self.filt_cls = ram_filter.AggregateRamFilter()
-
- def test_aggregate_ram_filter_value_error(self, agg_mock):
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
- agg_mock.return_value = set(['XXX'])
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
-
- def test_aggregate_ram_filter_default_value(self, agg_mock):
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
- # False: fallback to default flag w/o aggregates
- agg_mock.return_value = set()
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.return_value = set(['2.0'])
- # True: use ratio from aggregates
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
-
- def test_aggregate_ram_filter_conflict_values(self, agg_mock):
- self.flags(ram_allocation_ratio=1.0)
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1',
- {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
- agg_mock.return_value = set(['1.5', '2.0'])
- # use the minimum ratio from aggregates
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
diff --git a/nova/tests/scheduler/filters/test_retry_filters.py b/nova/tests/scheduler/filters/test_retry_filters.py
deleted file mode 100644
index a80f0f5879..0000000000
--- a/nova/tests/scheduler/filters/test_retry_filters.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.scheduler.filters import retry_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestRetryFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestRetryFilter, self).setUp()
- self.filt_cls = retry_filter.RetryFilter()
-
- def test_retry_filter_disabled(self):
- # Test case where retry/re-scheduling is disabled.
- host = fakes.FakeHostState('host1', 'node1', {})
- filter_properties = {}
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_retry_filter_pass(self):
- # Node not previously tried.
- host = fakes.FakeHostState('host1', 'nodeX', {})
- retry = dict(num_attempts=2,
- hosts=[['host1', 'node1'], # same host, different node
- ['host2', 'node2'], # different host and node
- ])
- filter_properties = dict(retry=retry)
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_retry_filter_fail(self):
- # Node was already tried.
- host = fakes.FakeHostState('host1', 'node1', {})
- retry = dict(num_attempts=1,
- hosts=[['host1', 'node1']])
- filter_properties = dict(retry=retry)
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_trusted_filters.py b/nova/tests/scheduler/filters/test_trusted_filters.py
deleted file mode 100644
index 4b0b84a413..0000000000
--- a/nova/tests/scheduler/filters/test_trusted_filters.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-from oslo.utils import timeutils
-import requests
-
-from nova.scheduler.filters import trusted_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-CONF = cfg.CONF
-
-
-@mock.patch.object(trusted_filter.AttestationService, '_request')
-class TestTrustedFilter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestTrustedFilter, self).setUp()
- # TrustedFilter's constructor creates the attestation cache, which
- # calls to get a list of all the compute nodes.
- fake_compute_nodes = [
- {'hypervisor_hostname': 'node1',
- 'service': {'host': 'host1'},
- }
- ]
- with mock.patch('nova.db.compute_node_get_all') as mocked:
- mocked.return_value = fake_compute_nodes
- self.filt_cls = trusted_filter.TrustedFilter()
-
- def test_trusted_filter_default_passes(self, req_mock):
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertFalse(req_mock.called)
-
- def test_trusted_filter_trusted_and_trusted_passes(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "trusted",
- "vtime": timeutils.isotime()}]}
- req_mock.return_value = requests.codes.OK, oat_data
-
- extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- req_mock.assert_called_once_with("POST", "PollHosts", ["node1"])
-
- def test_trusted_filter_trusted_and_untrusted_fails(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "untrusted",
- "vtime": timeutils.isotime()}]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_trusted_filter_untrusted_and_trusted_fails(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node",
- "trust_lvl": "trusted",
- "vtime": timeutils.isotime()}]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "untrusted",
- "vtime": timeutils.isotime()}]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
-
- def test_trusted_filter_update_cache(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "untrusted",
- "vtime": timeutils.isotime()}]}
-
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
-
- self.filt_cls.host_passes(host, filter_properties) # Fill the caches
-
- req_mock.reset_mock()
- self.filt_cls.host_passes(host, filter_properties)
- self.assertFalse(req_mock.called)
-
- req_mock.reset_mock()
-
- timeutils.set_time_override(timeutils.utcnow())
- timeutils.advance_time_seconds(
- CONF.trusted_computing.attestation_auth_timeout + 80)
- self.filt_cls.host_passes(host, filter_properties)
- self.assertTrue(req_mock.called)
-
- timeutils.clear_time_override()
-
- def test_trusted_filter_update_cache_timezone(self, req_mock):
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "untrusted",
- "vtime": "2012-09-09T05:10:40-04:00"}]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'untrusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
-
- timeutils.set_time_override(
- timeutils.normalize_time(
- timeutils.parse_isotime("2012-09-09T09:10:40Z")))
-
- self.filt_cls.host_passes(host, filter_properties) # Fill the caches
-
- req_mock.reset_mock()
- self.filt_cls.host_passes(host, filter_properties)
- self.assertFalse(req_mock.called)
-
- req_mock.reset_mock()
- timeutils.advance_time_seconds(
- CONF.trusted_computing.attestation_auth_timeout - 10)
- self.filt_cls.host_passes(host, filter_properties)
- self.assertFalse(req_mock.called)
-
- timeutils.clear_time_override()
-
- def test_trusted_filter_combine_hosts(self, req_mock):
- fake_compute_nodes = [
- {'hypervisor_hostname': 'node1',
- 'service': {'host': 'host1'},
- },
- {'hypervisor_hostname': 'node2',
- 'service': {'host': 'host2'},
- },
- ]
- with mock.patch('nova.db.compute_node_get_all') as mocked:
- mocked.return_value = fake_compute_nodes
- self.filt_cls = trusted_filter.TrustedFilter()
- oat_data = {"hosts": [{"host_name": "node1",
- "trust_lvl": "untrusted",
- "vtime": "2012-09-09T05:10:40-04:00"}]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'node1', {})
-
- self.filt_cls.host_passes(host, filter_properties) # Fill the caches
- req_mock.assert_called_once_with("POST", "PollHosts",
- ["node1", "node2"])
-
- def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self,
- req_mock):
- oat_data = {"hosts": [{"host_name": "host1",
- "trust_lvl": "trusted",
- "vtime": timeutils.strtime(fmt="%c")},
- {"host_name": "host2",
- "trust_lvl": "trusted",
- "vtime": timeutils.strtime(fmt="%D")},
- # This is just a broken date to ensure that
- # we're not just arbitrarily accepting any
- # date format.
- ]}
- req_mock.return_value = requests.codes.OK, oat_data
- extra_specs = {'trust:trusted_host': 'trusted'}
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'memory_mb': 1024,
- 'extra_specs': extra_specs}}
- host = fakes.FakeHostState('host1', 'host1', {})
- bad_host = fakes.FakeHostState('host2', 'host2', {})
-
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- self.assertFalse(self.filt_cls.host_passes(bad_host,
- filter_properties))
diff --git a/nova/tests/scheduler/filters/test_type_filters.py b/nova/tests/scheduler/filters/test_type_filters.py
deleted file mode 100644
index 981a798fe2..0000000000
--- a/nova/tests/scheduler/filters/test_type_filters.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.scheduler.filters import type_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class TestTypeFilter(test.NoDBTestCase):
-
- @mock.patch('nova.db.instance_get_all_by_host_and_not_type')
- def test_type_filter(self, get_mock):
- self.filt_cls = type_filter.TypeAffinityFilter()
-
- host = fakes.FakeHostState('fake_host', 'fake_node', {})
- filter_properties = {'context': mock.MagicMock(),
- 'instance_type': {'id': 'fake1'}}
- get_mock.return_value = []
- # True since empty
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- get_mock.assert_called_once_with(
- mock.ANY, # context...
- 'fake_host',
- 'fake1'
- )
- get_mock.return_value = [mock.sentinel.instances]
- # False since not empty
- self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
-
- @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
- def test_aggregate_type_filter(self, agg_mock):
- self.filt_cls = type_filter.AggregateTypeAffinityFilter()
-
- filter_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'name': 'fake1'}}
- filter2_properties = {'context': mock.sentinel.ctx,
- 'instance_type': {'name': 'fake2'}}
- host = fakes.FakeHostState('fake_host', 'fake_node', {})
- agg_mock.return_value = set(['fake1'])
- # True since no aggregates
- self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
- agg_mock.assert_called_once_with(mock.sentinel.ctx, 'fake_host',
- 'instance_type')
- # False since type matches aggregate, metadata
- self.assertFalse(self.filt_cls.host_passes(host, filter2_properties))
diff --git a/nova/tests/scheduler/test_caching_scheduler.py b/nova/tests/scheduler/test_caching_scheduler.py
deleted file mode 100644
index def432328f..0000000000
--- a/nova/tests/scheduler/test_caching_scheduler.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Copyright (c) 2014 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.utils import timeutils
-
-from nova import exception
-from nova.scheduler import caching_scheduler
-from nova.scheduler import host_manager
-from nova.tests.scheduler import test_scheduler
-
-ENABLE_PROFILER = False
-
-
-class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
- """Test case for Caching Scheduler."""
-
- driver_cls = caching_scheduler.CachingScheduler
-
- @mock.patch.object(caching_scheduler.CachingScheduler,
- "_get_up_hosts")
- def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
- mock_up_hosts.return_value = []
- context = mock.Mock()
-
- self.driver.run_periodic_tasks(context)
-
- self.assertTrue(mock_up_hosts.called)
- self.assertEqual([], self.driver.all_host_states)
- context.elevated.assert_called_with()
-
- @mock.patch.object(caching_scheduler.CachingScheduler,
- "_get_up_hosts")
- def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
- self.driver.all_host_states = []
-
- self.driver._get_all_host_states(self.context)
-
- self.assertFalse(mock_up_hosts.called)
- self.assertEqual([], self.driver.all_host_states)
-
- @mock.patch.object(caching_scheduler.CachingScheduler,
- "_get_up_hosts")
- def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
- mock_up_hosts.return_value = ["asdf"]
-
- result = self.driver._get_all_host_states(self.context)
-
- self.assertTrue(mock_up_hosts.called)
- self.assertEqual(["asdf"], self.driver.all_host_states)
- self.assertEqual(["asdf"], result)
-
- def test_get_up_hosts(self):
- with mock.patch.object(self.driver.host_manager,
- "get_all_host_states") as mock_get_hosts:
- mock_get_hosts.return_value = ["asdf"]
-
- result = self.driver._get_up_hosts(self.context)
-
- self.assertTrue(mock_get_hosts.called)
- self.assertEqual(mock_get_hosts.return_value, result)
-
- def test_select_destination_raises_with_no_hosts(self):
- fake_request_spec = self._get_fake_request_spec()
- self.driver.all_host_states = []
-
- self.assertRaises(exception.NoValidHost,
- self.driver.select_destinations,
- self.context, fake_request_spec, {})
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_select_destination_works(self, mock_get_extra):
- fake_request_spec = self._get_fake_request_spec()
- fake_host = self._get_fake_host_state()
- self.driver.all_host_states = [fake_host]
-
- result = self._test_select_destinations(fake_request_spec)
-
- self.assertEqual(1, len(result))
- self.assertEqual(result[0]["host"], fake_host.host)
-
- def _test_select_destinations(self, request_spec):
- return self.driver.select_destinations(
- self.context, request_spec, {})
-
- def _get_fake_request_spec(self):
- flavor = {
- "flavorid": "small",
- "memory_mb": 512,
- "root_gb": 1,
- "ephemeral_gb": 1,
- "vcpus": 1,
- }
- instance_properties = {
- "os_type": "linux",
- "project_id": "1234",
- "memory_mb": 512,
- "root_gb": 1,
- "ephemeral_gb": 1,
- "vcpus": 1,
- "uuid": 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
- }
- request_spec = {
- "instance_type": flavor,
- "instance_properties": instance_properties,
- "num_instances": 1,
- }
- return request_spec
-
- def _get_fake_host_state(self, index=0):
- host_state = host_manager.HostState(
- 'host_%s' % index,
- 'node_%s' % index)
- host_state.free_ram_mb = 50000
- host_state.service = {
- "disabled": False,
- "updated_at": timeutils.utcnow(),
- "created_at": timeutils.utcnow(),
- }
- return host_state
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_performance_check_select_destination(self, mock_get_extra):
- hosts = 2
- requests = 1
-
- self.flags(service_down_time=240)
-
- request_spec = self._get_fake_request_spec()
- host_states = []
- for x in xrange(hosts):
- host_state = self._get_fake_host_state(x)
- host_states.append(host_state)
- self.driver.all_host_states = host_states
-
- def run_test():
- a = timeutils.utcnow()
-
- for x in xrange(requests):
- self.driver.select_destinations(
- self.context, request_spec, {})
-
- b = timeutils.utcnow()
- c = b - a
-
- seconds = (c.days * 24 * 60 * 60 + c.seconds)
- microseconds = seconds * 1000 + c.microseconds / 1000.0
- per_request_ms = microseconds / requests
- return per_request_ms
-
- per_request_ms = None
- if ENABLE_PROFILER:
- import pycallgraph
- from pycallgraph import output
- config = pycallgraph.Config(max_depth=10)
- config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
- 'pycallgraph.*',
- 'unittest.*',
- 'nova.tests.*',
- ])
- graphviz = output.GraphvizOutput(output_file='scheduler.png')
-
- with pycallgraph.PyCallGraph(output=graphviz):
- per_request_ms = run_test()
-
- else:
- per_request_ms = run_test()
-
- # This has proved to be around 1 ms on a random dev box
- # But this is here so you can do simply performance testing easily.
- self.assertTrue(per_request_ms < 1000)
-
-
-if __name__ == '__main__':
- # A handy tool to help profile the schedulers performance
- ENABLE_PROFILER = True
- import unittest
- suite = unittest.TestSuite()
- test = "test_performance_check_select_destination"
- test_case = CachingSchedulerTestCase(test)
- suite.addTest(test_case)
- runner = unittest.TextTestRunner()
- runner.run(suite)
diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py
deleted file mode 100644
index 3f542e6b88..0000000000
--- a/nova/tests/scheduler/test_chance_scheduler.py
+++ /dev/null
@@ -1,182 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Chance Scheduler.
-"""
-
-import random
-
-import mox
-
-from nova.compute import rpcapi as compute_rpcapi
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.scheduler import chance
-from nova.scheduler import driver
-from nova.tests.scheduler import test_scheduler
-
-
-class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
- """Test case for Chance Scheduler."""
-
- driver_cls = chance.ChanceScheduler
-
- def test_filter_hosts_avoid(self):
- """Test to make sure _filter_hosts() filters original hosts if
- avoid_original_host is True.
- """
-
- hosts = ['host1', 'host2', 'host3']
- request_spec = dict(instance_properties=dict(host='host2'))
- filter_properties = {'ignore_hosts': ['host2']}
-
- filtered = self.driver._filter_hosts(request_spec, hosts,
- filter_properties=filter_properties)
- self.assertEqual(filtered, ['host1', 'host3'])
-
- def test_filter_hosts_no_avoid(self):
- """Test to make sure _filter_hosts() does not filter original
- hosts if avoid_original_host is False.
- """
-
- hosts = ['host1', 'host2', 'host3']
- request_spec = dict(instance_properties=dict(host='host2'))
- filter_properties = {'ignore_hosts': []}
-
- filtered = self.driver._filter_hosts(request_spec, hosts,
- filter_properties=filter_properties)
- self.assertEqual(filtered, hosts)
-
- def test_basic_schedule_run_instance(self):
- ctxt = context.RequestContext('fake', 'fake', False)
- ctxt_elevated = 'fake-context-elevated'
- instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
- instance1 = {'uuid': 'fake-uuid1'}
- instance2 = {'uuid': 'fake-uuid2'}
- request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
- 'instance_properties': instance_opts}
-
- def inc_launch_index(*args):
- request_spec['instance_properties']['launch_index'] = (
- request_spec['instance_properties']['launch_index'] + 1)
-
- self.mox.StubOutWithMock(ctxt, 'elevated')
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.mox.StubOutWithMock(random, 'choice')
- self.mox.StubOutWithMock(driver, 'instance_update_db')
- self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
-
- ctxt.elevated().AndReturn(ctxt_elevated)
- # instance 1
- hosts_full = ['host1', 'host2', 'host3', 'host4']
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host3')
- driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects(
- inc_launch_index).AndReturn(instance1)
- compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
- instance=instance1, requested_networks=None,
- injected_files=None, admin_password=None, is_first_time=None,
- request_spec=request_spec, filter_properties={},
- legacy_bdm_in_spec=False)
-
- # instance 2
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host1')
- driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects(
- inc_launch_index).AndReturn(instance2)
- compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
- instance=instance2, requested_networks=None,
- injected_files=None, admin_password=None, is_first_time=None,
- request_spec=request_spec, filter_properties={},
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
- self.driver.schedule_run_instance(ctxt, request_spec,
- None, None, None, None, {}, False)
-
- def test_basic_schedule_run_instance_no_hosts(self):
- ctxt = context.RequestContext('fake', 'fake', False)
- ctxt_elevated = 'fake-context-elevated'
- uuid = 'fake-uuid1'
- instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
- request_spec = {'instance_uuids': [uuid],
- 'instance_properties': instance_opts}
-
- self.mox.StubOutWithMock(ctxt, 'elevated')
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- # instance 1
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
- old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
- {'vm_state': vm_states.ERROR,
- 'task_state': None}).AndReturn(({}, {}))
- compute_utils.add_instance_fault_from_exc(ctxt, new_ref,
- mox.IsA(exception.NoValidHost), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.driver.schedule_run_instance(
- ctxt, request_spec, None, None, None, None, {}, False)
-
- def test_select_destinations(self):
- ctxt = context.RequestContext('fake', 'fake', False)
- ctxt_elevated = 'fake-context-elevated'
- request_spec = {'num_instances': 2}
-
- self.mox.StubOutWithMock(ctxt, 'elevated')
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.mox.StubOutWithMock(random, 'choice')
-
- hosts_full = ['host1', 'host2', 'host3', 'host4']
-
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host3')
-
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host2')
-
- self.mox.ReplayAll()
- dests = self.driver.select_destinations(ctxt, request_spec, {})
- self.assertEqual(2, len(dests))
- (host, node) = (dests[0]['host'], dests[0]['nodename'])
- self.assertEqual('host3', host)
- self.assertIsNone(node)
- (host, node) = (dests[1]['host'], dests[1]['nodename'])
- self.assertEqual('host2', host)
- self.assertIsNone(node)
-
- def test_select_destinations_no_valid_host(self):
-
- def _return_no_host(*args, **kwargs):
- return []
-
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.driver.hosts_up(mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn([1, 2])
- self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
- self.mox.ReplayAll()
-
- request_spec = {'num_instances': 1}
- self.assertRaises(exception.NoValidHost,
- self.driver.select_destinations, self.context,
- request_spec, {})
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
deleted file mode 100644
index f5e6924c10..0000000000
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ /dev/null
@@ -1,596 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Filter Scheduler.
-"""
-
-import mock
-import mox
-
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.scheduler import driver
-from nova.scheduler import filter_scheduler
-from nova.scheduler import host_manager
-from nova.scheduler import utils as scheduler_utils
-from nova.scheduler import weights
-from nova.tests.scheduler import fakes
-from nova.tests.scheduler import test_scheduler
-
-
-def fake_get_filtered_hosts(hosts, filter_properties, index):
- return list(hosts)
-
-
-class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
- """Test case for Filter Scheduler."""
-
- driver_cls = filter_scheduler.FilterScheduler
-
- def test_run_instance_no_hosts(self):
- sched = fakes.FakeFilterScheduler()
- uuid = 'fake-uuid1'
- fake_context = context.RequestContext('user', 'project')
- instance_properties = {'project_id': 1, 'os_type': 'Linux'}
- request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
- 'ephemeral_gb': 0},
- 'instance_properties': instance_properties,
- 'instance_uuids': [uuid]}
-
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- old_ref, new_ref = db.instance_update_and_get_original(fake_context,
- uuid, {'vm_state': vm_states.ERROR, 'task_state':
- None}).AndReturn(({}, {}))
- compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
- mox.IsA(exception.NoValidHost), mox.IgnoreArg())
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
-
- self.mox.ReplayAll()
- sched.schedule_run_instance(
- fake_context, request_spec, None, None,
- None, None, {}, False)
-
- def test_run_instance_non_admin(self):
- self.was_admin = False
-
- def fake_get(context, *args, **kwargs):
- # make sure this is called with admin context, even though
- # we're using user context below
- self.was_admin = context.is_admin
- return {}
-
- sched = fakes.FakeFilterScheduler()
- self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
-
- fake_context = context.RequestContext('user', 'project')
-
- uuid = 'fake-uuid1'
- instance_properties = {'project_id': 1, 'os_type': 'Linux'}
- request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
- 'instance_properties': instance_properties,
- 'instance_uuids': [uuid]}
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- old_ref, new_ref = db.instance_update_and_get_original(fake_context,
- uuid, {'vm_state': vm_states.ERROR, 'task_state':
- None}).AndReturn(({}, {}))
- compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
- mox.IsA(exception.NoValidHost), mox.IgnoreArg())
- self.mox.ReplayAll()
- sched.schedule_run_instance(
- fake_context, request_spec, None, None, None, None, {}, False)
- self.assertTrue(self.was_admin)
-
- def test_scheduler_includes_launch_index(self):
- fake_context = context.RequestContext('user', 'project')
- instance_opts = {'fake_opt1': 'meow'}
- request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
- 'instance_properties': instance_opts}
- instance1 = {'uuid': 'fake-uuid1'}
- instance2 = {'uuid': 'fake-uuid2'}
-
- def _has_launch_index(expected_index):
- """Return a function that verifies the expected index."""
- def _check_launch_index(value):
- if 'instance_properties' in value:
- if 'launch_index' in value['instance_properties']:
- index = value['instance_properties']['launch_index']
- if index == expected_index:
- return True
- return False
- return _check_launch_index
-
- self.mox.StubOutWithMock(self.driver, '_schedule')
- self.mox.StubOutWithMock(self.driver, '_provision_resource')
-
- expected_filter_properties = {'retry': {'num_attempts': 1,
- 'hosts': []}}
- self.driver._schedule(fake_context, request_spec,
- expected_filter_properties).AndReturn(['host1', 'host2'])
- # instance 1
- self.driver._provision_resource(
- fake_context, 'host1',
- mox.Func(_has_launch_index(0)), expected_filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False).AndReturn(instance1)
- # instance 2
- self.driver._provision_resource(
- fake_context, 'host2',
- mox.Func(_has_launch_index(1)), expected_filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid2',
- legacy_bdm_in_spec=False).AndReturn(instance2)
- self.mox.ReplayAll()
-
- self.driver.schedule_run_instance(fake_context, request_spec,
- None, None, None, None, {}, False)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_schedule_happy_day(self, mock_get_extra):
- """Make sure there's nothing glaringly wrong with _schedule()
- by doing a happy day pass through.
- """
-
- self.next_weight = 1.0
-
- def _fake_weigh_objects(_self, functions, hosts, options):
- self.next_weight += 2.0
- host_state = hosts[0]
- return [weights.WeighedHost(host_state, self.next_weight)]
-
- sched = fakes.FakeFilterScheduler()
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
-
- self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- self.stubs.Set(weights.HostWeightHandler,
- 'get_weighed_objects', _fake_weigh_objects)
- fakes.mox_host_manager_db_calls(self.mox, fake_context)
-
- request_spec = {'num_instances': 10,
- 'instance_type': {'memory_mb': 512, 'root_gb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1},
- 'instance_properties': {'project_id': 1,
- 'root_gb': 512,
- 'memory_mb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1,
- 'os_type': 'Linux',
- 'uuid': 'fake-uuid'}}
- self.mox.ReplayAll()
- weighed_hosts = sched._schedule(fake_context, request_spec, {})
- self.assertEqual(len(weighed_hosts), 10)
- for weighed_host in weighed_hosts:
- self.assertIsNotNone(weighed_host.obj)
-
- def test_max_attempts(self):
- self.flags(scheduler_max_attempts=4)
- self.assertEqual(4, scheduler_utils._max_attempts())
-
- def test_invalid_max_attempts(self):
- self.flags(scheduler_max_attempts=0)
- self.assertRaises(exception.NovaException,
- scheduler_utils._max_attempts)
-
- def test_retry_disabled(self):
- # Retry info should not get populated when re-scheduling is off.
- self.flags(scheduler_max_attempts=1)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {}
-
- self.mox.StubOutWithMock(sched, '_schedule')
- self.mox.StubOutWithMock(sched, '_provision_resource')
-
- sched._schedule(self.context, request_spec,
- filter_properties).AndReturn(['host1'])
- sched._provision_resource(
- self.context, 'host1',
- request_spec, filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
-
- sched.schedule_run_instance(self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_retry_force_hosts(self):
- # Retry info should not get populated when re-scheduling is off.
- self.flags(scheduler_max_attempts=2)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {'force_hosts': ['force_host']}
-
- self.mox.StubOutWithMock(sched, '_schedule')
- self.mox.StubOutWithMock(sched, '_provision_resource')
-
- sched._schedule(self.context, request_spec,
- filter_properties).AndReturn(['host1'])
- sched._provision_resource(
- self.context, 'host1',
- request_spec, filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
-
- sched.schedule_run_instance(self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_retry_force_nodes(self):
- # Retry info should not get populated when re-scheduling is off.
- self.flags(scheduler_max_attempts=2)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {'force_nodes': ['force_node']}
-
- self.mox.StubOutWithMock(sched, '_schedule')
- self.mox.StubOutWithMock(sched, '_provision_resource')
-
- sched._schedule(self.context, request_spec,
- filter_properties).AndReturn(['host1'])
- sched._provision_resource(
- self.context, 'host1',
- request_spec, filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
-
- sched.schedule_run_instance(self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_retry_attempt_one(self):
- # Test retry logic on initial scheduling attempt.
- self.flags(scheduler_max_attempts=2)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {}
- expected_filter_properties = {'retry': {'num_attempts': 1,
- 'hosts': []}}
- self.mox.StubOutWithMock(sched, '_schedule')
- self.mox.StubOutWithMock(sched, '_provision_resource')
-
- sched._schedule(self.context, request_spec,
- expected_filter_properties).AndReturn(['host1'])
- sched._provision_resource(
- self.context, 'host1',
- request_spec, expected_filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
-
- sched.schedule_run_instance(self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_retry_attempt_two(self):
- # Test retry logic when re-scheduling.
- self.flags(scheduler_max_attempts=2)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {'retry': {'num_attempts': 1}}
- expected_filter_properties = {'retry': {'num_attempts': 2}}
- self.mox.StubOutWithMock(sched, '_schedule')
- self.mox.StubOutWithMock(sched, '_provision_resource')
-
- sched._schedule(self.context, request_spec,
- expected_filter_properties).AndReturn(['host1'])
- sched._provision_resource(
- self.context, 'host1',
- request_spec, expected_filter_properties,
- None, None, None, None,
- instance_uuid='fake-uuid1',
- legacy_bdm_in_spec=False)
-
- self.mox.ReplayAll()
-
- sched.schedule_run_instance(self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_retry_exceeded_max_attempts(self):
- # Test for necessary explosion when max retries is exceeded and that
- # the information needed in request_spec is still present for error
- # handling
- self.flags(scheduler_max_attempts=2)
- sched = fakes.FakeFilterScheduler()
- request_spec = dict(instance_properties={},
- instance_uuids=['fake-uuid1'])
- filter_properties = {'retry': {'num_attempts': 2}}
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
- self.context, request_spec, None, None,
- None, None, filter_properties, False)
-
- def test_add_retry_host(self):
- retry = dict(num_attempts=1, hosts=[])
- filter_properties = dict(retry=retry)
- host = "fakehost"
- node = "fakenode"
-
- scheduler_utils._add_retry_host(filter_properties, host, node)
-
- hosts = filter_properties['retry']['hosts']
- self.assertEqual(1, len(hosts))
- self.assertEqual([host, node], hosts[0])
-
- def test_post_select_populate(self):
- # Test addition of certain filter props after a node is selected.
- retry = {'hosts': [], 'num_attempts': 1}
- filter_properties = {'retry': retry}
-
- host_state = host_manager.HostState('host', 'node')
- host_state.limits['vcpus'] = 5
- scheduler_utils.populate_filter_properties(filter_properties,
- host_state)
-
- self.assertEqual(['host', 'node'],
- filter_properties['retry']['hosts'][0])
-
- self.assertEqual({'vcpus': 5}, host_state.limits)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_schedule_host_pool(self, mock_get_extra):
- """Make sure the scheduler_host_subset_size property works properly."""
-
- self.flags(scheduler_host_subset_size=2)
- sched = fakes.FakeFilterScheduler()
-
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
- self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- fakes.mox_host_manager_db_calls(self.mox, fake_context)
-
- instance_properties = {'project_id': 1,
- 'root_gb': 512,
- 'memory_mb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1,
- 'os_type': 'Linux',
- 'uuid': 'fake-uuid'}
-
- request_spec = dict(instance_properties=instance_properties,
- instance_type={})
- filter_properties = {}
- self.mox.ReplayAll()
- hosts = sched._schedule(self.context, request_spec,
- filter_properties=filter_properties)
-
- # one host should be chosen
- self.assertEqual(len(hosts), 1)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_schedule_large_host_pool(self, mock_get_extra):
- """Hosts should still be chosen if pool size
- is larger than number of filtered hosts.
- """
-
- sched = fakes.FakeFilterScheduler()
-
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
- self.flags(scheduler_host_subset_size=20)
- self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- fakes.mox_host_manager_db_calls(self.mox, fake_context)
-
- instance_properties = {'project_id': 1,
- 'root_gb': 512,
- 'memory_mb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1,
- 'os_type': 'Linux',
- 'uuid': 'fake-uuid'}
- request_spec = dict(instance_properties=instance_properties,
- instance_type={})
- filter_properties = {}
- self.mox.ReplayAll()
- hosts = sched._schedule(self.context, request_spec,
- filter_properties=filter_properties)
-
- # one host should be chose
- self.assertEqual(len(hosts), 1)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_schedule_chooses_best_host(self, mock_get_extra):
- """If scheduler_host_subset_size is 1, the largest host with greatest
- weight should be returned.
- """
-
- self.flags(scheduler_host_subset_size=1)
-
- sched = fakes.FakeFilterScheduler()
-
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
- self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- fakes.mox_host_manager_db_calls(self.mox, fake_context)
-
- self.next_weight = 50
-
- def _fake_weigh_objects(_self, functions, hosts, options):
- this_weight = self.next_weight
- self.next_weight = 0
- host_state = hosts[0]
- return [weights.WeighedHost(host_state, this_weight)]
-
- instance_properties = {'project_id': 1,
- 'root_gb': 512,
- 'memory_mb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1,
- 'os_type': 'Linux',
- 'uuid': 'fake-uuid'}
-
- request_spec = dict(instance_properties=instance_properties,
- instance_type={})
-
- self.stubs.Set(weights.HostWeightHandler,
- 'get_weighed_objects', _fake_weigh_objects)
-
- filter_properties = {}
- self.mox.ReplayAll()
- hosts = sched._schedule(self.context, request_spec,
- filter_properties=filter_properties)
-
- # one host should be chosen
- self.assertEqual(1, len(hosts))
-
- self.assertEqual(50, hosts[0].weight)
-
- @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
- return_value={'numa_topology': None,
- 'pci_requests': None})
- def test_select_destinations(self, mock_get_extra):
- """select_destinations is basically a wrapper around _schedule().
-
- Similar to the _schedule tests, this just does a happy path test to
- ensure there is nothing glaringly wrong.
- """
-
- self.next_weight = 1.0
-
- selected_hosts = []
- selected_nodes = []
-
- def _fake_weigh_objects(_self, functions, hosts, options):
- self.next_weight += 2.0
- host_state = hosts[0]
- selected_hosts.append(host_state.host)
- selected_nodes.append(host_state.nodename)
- return [weights.WeighedHost(host_state, self.next_weight)]
-
- sched = fakes.FakeFilterScheduler()
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
-
- self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- self.stubs.Set(weights.HostWeightHandler,
- 'get_weighed_objects', _fake_weigh_objects)
- fakes.mox_host_manager_db_calls(self.mox, fake_context)
-
- request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1},
- 'instance_properties': {'project_id': 1,
- 'root_gb': 512,
- 'memory_mb': 512,
- 'ephemeral_gb': 0,
- 'vcpus': 1,
- 'os_type': 'Linux',
- 'uuid': 'fake-uuid'},
- 'num_instances': 1}
- self.mox.ReplayAll()
- dests = sched.select_destinations(fake_context, request_spec, {})
- (host, node) = (dests[0]['host'], dests[0]['nodename'])
- self.assertEqual(host, selected_hosts[0])
- self.assertEqual(node, selected_nodes[0])
-
- @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
- def test_select_destinations_notifications(self, mock_schedule):
- mock_schedule.return_value = [mock.Mock()]
-
- with mock.patch.object(self.driver.notifier, 'info') as mock_info:
- request_spec = {'num_instances': 1}
-
- self.driver.select_destinations(self.context, request_spec, {})
-
- expected = [
- mock.call(self.context, 'scheduler.select_destinations.start',
- dict(request_spec=request_spec)),
- mock.call(self.context, 'scheduler.select_destinations.end',
- dict(request_spec=request_spec))]
- self.assertEqual(expected, mock_info.call_args_list)
-
- def test_select_destinations_no_valid_host(self):
-
- def _return_no_host(*args, **kwargs):
- return []
-
- self.stubs.Set(self.driver, '_schedule', _return_no_host)
- self.assertRaises(exception.NoValidHost,
- self.driver.select_destinations, self.context,
- {'num_instances': 1}, {})
-
- def test_select_destinations_no_valid_host_not_enough(self):
- # Tests that we have fewer hosts available than number of instances
- # requested to build.
- with mock.patch.object(self.driver, '_schedule',
- return_value=[mock.sentinel.host1]):
- try:
- self.driver.select_destinations(
- self.context, {'num_instances': 2}, {})
- self.fail('Expected NoValidHost to be raised.')
- except exception.NoValidHost as e:
- # Make sure that we provided a reason why NoValidHost.
- self.assertIn('reason', e.kwargs)
- self.assertTrue(len(e.kwargs['reason']) > 0)
-
- def test_handles_deleted_instance(self):
- """Test instance deletion while being scheduled."""
-
- def _raise_instance_not_found(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='123')
-
- self.stubs.Set(driver, 'instance_update_db',
- _raise_instance_not_found)
-
- sched = fakes.FakeFilterScheduler()
-
- fake_context = context.RequestContext('user', 'project')
- host_state = host_manager.HostState('host2', 'node2')
- weighted_host = weights.WeighedHost(host_state, 1.42)
- filter_properties = {}
-
- uuid = 'fake-uuid1'
- instance_properties = {'project_id': 1, 'os_type': 'Linux'}
- request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
- 'instance_properties': instance_properties,
- 'instance_uuids': [uuid]}
- sched._provision_resource(fake_context, weighted_host,
- request_spec, filter_properties,
- None, None, None, None)
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
deleted file mode 100644
index b3483b169a..0000000000
--- a/nova/tests/scheduler/test_host_filters.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2011 OpenStack Foundation # All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Scheduler Host Filters.
-"""
-
-from nova.scheduler import filters
-from nova.scheduler.filters import all_hosts_filter
-from nova.scheduler.filters import compute_filter
-from nova import test
-from nova.tests.scheduler import fakes
-
-
-class HostFiltersTestCase(test.NoDBTestCase):
-
- def test_filter_handler(self):
- # Double check at least a couple of known filters exist
- filter_handler = filters.HostFilterHandler()
- classes = filter_handler.get_matching_classes(
- ['nova.scheduler.filters.all_filters'])
- self.assertIn(all_hosts_filter.AllHostsFilter, classes)
- self.assertIn(compute_filter.ComputeFilter, classes)
-
- def test_all_host_filter(self):
- filt_cls = all_hosts_filter.AllHostsFilter()
- host = fakes.FakeHostState('host1', 'node1', {})
- self.assertTrue(filt_cls.host_passes(host, {}))
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
deleted file mode 100644
index ec28e2d959..0000000000
--- a/nova/tests/scheduler/test_host_manager.py
+++ /dev/null
@@ -1,545 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For HostManager
-"""
-
-import mock
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-import six
-
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import db
-from nova import exception
-from nova.i18n import _LW
-from nova.scheduler import filters
-from nova.scheduler import host_manager
-from nova import test
-from nova.tests import matchers
-from nova.tests.scheduler import fakes
-from nova import utils
-from nova.virt import hardware
-
-
-class FakeFilterClass1(filters.BaseHostFilter):
- def host_passes(self, host_state, filter_properties):
- pass
-
-
-class FakeFilterClass2(filters.BaseHostFilter):
- def host_passes(self, host_state, filter_properties):
- pass
-
-
-class HostManagerTestCase(test.NoDBTestCase):
- """Test case for HostManager class."""
-
- def setUp(self):
- super(HostManagerTestCase, self).setUp()
- self.host_manager = host_manager.HostManager()
- self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
- 'fake-node') for x in xrange(1, 5)]
- self.fake_hosts += [host_manager.HostState('fake_multihost',
- 'fake-node%s' % x) for x in xrange(1, 5)]
- self.addCleanup(timeutils.clear_time_override)
-
- def test_choose_host_filters_not_found(self):
- self.flags(scheduler_default_filters='FakeFilterClass3')
- self.host_manager.filter_classes = [FakeFilterClass1,
- FakeFilterClass2]
- self.assertRaises(exception.SchedulerHostFilterNotFound,
- self.host_manager._choose_host_filters, None)
-
- def test_choose_host_filters(self):
- self.flags(scheduler_default_filters=['FakeFilterClass2'])
- self.host_manager.filter_classes = [FakeFilterClass1,
- FakeFilterClass2]
-
- # Test we returns 1 correct function
- filter_classes = self.host_manager._choose_host_filters(None)
- self.assertEqual(len(filter_classes), 1)
- self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
-
- def _mock_get_filtered_hosts(self, info, specified_filters=None):
- self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
-
- info['got_objs'] = []
- info['got_fprops'] = []
-
- def fake_filter_one(_self, obj, filter_props):
- info['got_objs'].append(obj)
- info['got_fprops'].append(filter_props)
- return True
-
- self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
- self.host_manager._choose_host_filters(specified_filters).AndReturn(
- [FakeFilterClass1])
-
- def _verify_result(self, info, result, filters=True):
- for x in info['got_fprops']:
- self.assertEqual(x, info['expected_fprops'])
- if filters:
- self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
- self.assertEqual(set(info['expected_objs']), set(result))
-
- def test_get_filtered_hosts(self):
- fake_properties = {'moo': 1, 'cow': 2}
-
- info = {'expected_objs': self.fake_hosts,
- 'expected_fprops': fake_properties}
-
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_specified_filters(self):
- fake_properties = {'moo': 1, 'cow': 2}
-
- specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
- info = {'expected_objs': self.fake_hosts,
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info, specified_filters)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties, filter_class_names=specified_filters)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_ignore(self):
- fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
- 'fake_host5', 'fake_multihost']}
-
- # [1] and [3] are host2 and host4
- info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_force_hosts(self):
- fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
- 'fake_host5']}
-
- # [0] and [2] are host1 and host3
- info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_no_matching_force_hosts(self):
- fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
- # Ensure ignore_hosts processed before force_hosts in host filters.
- fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
- 'ignore_hosts': ['fake_host1']}
-
- # only fake_host3 should be left.
- info = {'expected_objs': [self.fake_hosts[2]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
- # Ensure all nodes returned for a host with many nodes
- fake_properties = {'force_hosts': ['fake_multihost']}
-
- info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
- self.fake_hosts[6], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_nodes(self):
- fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
- 'fake-node9']}
-
- # [5] is fake-node2, [7] is fake-node4
- info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
- # Ensure only overlapping results if both force host and node
- fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
- 'force_nodes': ['fake-node2', 'fake-node9']}
-
- # [5] is fake-node2
- info = {'expected_objs': [self.fake_hosts[5]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
- # Ensure non-overlapping force_node and force_host yield no result
- fake_properties = {'force_hosts': ['fake_multihost'],
- 'force_nodes': ['fake-node']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
- # Ensure ignore_hosts can coexist with force_nodes
- fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
- 'ignore_hosts': ['fake_host1', 'fake_host2']}
-
- info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
- # Ensure ignore_hosts is processed before force_nodes
- fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
- 'ignore_hosts': ['fake_multihost']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_all_host_states(self):
-
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- self.mox.StubOutWithMock(host_manager.LOG, 'warn')
-
- db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # node 3 host physical disk space is greater than database
- host_manager.LOG.warn(_LW("Host %(hostname)s has more disk space than "
- "database expected (%(physical)sgb > "
- "%(database)sgb)"),
- {'physical': 3333, 'database': 3072,
- 'hostname': 'node3'})
- # Invalid service
- host_manager.LOG.warn(_LW("No service for compute ID %s"), 5)
-
- self.mox.ReplayAll()
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
-
- self.assertEqual(len(host_states_map), 4)
- # Check that .service is set properly
- for i in xrange(4):
- compute_node = fakes.COMPUTE_NODES[i]
- host = compute_node['service']['host']
- node = compute_node['hypervisor_hostname']
- state_key = (host, node)
- self.assertEqual(host_states_map[state_key].service,
- compute_node['service'])
- self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
- 512)
- # 511GB
- self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
- 524288)
- self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
- 1024)
- # 1023GB
- self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
- 1048576)
- self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
- 3072)
- # 3071GB
- self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
- 3145728)
- self.assertThat(
- hardware.VirtNUMAHostTopology.from_json(
- host_states_map[('host3', 'node3')].numa_topology
- )._to_dict(),
- matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict()))
- self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
- 8192)
- # 8191GB
- self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
- 8388608)
-
-
-class HostManagerChangedNodesTestCase(test.NoDBTestCase):
- """Test case for HostManager class."""
-
- def setUp(self):
- super(HostManagerChangedNodesTestCase, self).setUp()
- self.host_manager = host_manager.HostManager()
- self.fake_hosts = [
- host_manager.HostState('host1', 'node1'),
- host_manager.HostState('host2', 'node2'),
- host_manager.HostState('host3', 'node3'),
- host_manager.HostState('host4', 'node4')
- ]
- self.addCleanup(timeutils.clear_time_override)
-
- def test_get_all_host_states(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
- self.assertEqual(len(host_states_map), 4)
-
- def test_get_all_host_states_after_delete_one(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- # all nodes active for first call
- db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # remove node4 for second call
- running_nodes = [n for n in fakes.COMPUTE_NODES
- if n.get('hypervisor_hostname') != 'node4']
- db.compute_node_get_all(context).AndReturn(running_nodes)
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
- self.assertEqual(len(host_states_map), 3)
-
- def test_get_all_host_states_after_delete_all(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- # all nodes active for first call
- db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # remove all nodes for second call
- db.compute_node_get_all(context).AndReturn([])
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
- self.assertEqual(len(host_states_map), 0)
-
-
-class HostStateTestCase(test.NoDBTestCase):
- """Test case for HostState class."""
-
- # update_from_compute_node() and consume_from_instance() are tested
- # in HostManagerTestCase.test_get_all_host_states()
-
- def test_stat_consumption_from_compute_node(self):
- stats = {
- 'num_instances': '5',
- 'num_proj_12345': '3',
- 'num_proj_23456': '1',
- 'num_vm_%s' % vm_states.BUILDING: '2',
- 'num_vm_%s' % vm_states.SUSPENDED: '1',
- 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
- 'num_task_%s' % task_states.MIGRATING: '2',
- 'num_os_type_linux': '4',
- 'num_os_type_windoze': '1',
- 'io_workload': '42',
- }
- stats = jsonutils.dumps(stats)
-
- hyper_ver_int = utils.convert_version_to_int('6.0.0')
- compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
- local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
- updated_at=None, host_ip='127.0.0.1',
- hypervisor_type='htype',
- hypervisor_hostname='hostname', cpu_info='cpu_info',
- supported_instances='{}',
- hypervisor_version=hyper_ver_int, numa_topology=None)
-
- host = host_manager.HostState("fakehost", "fakenode")
- host.update_from_compute_node(compute)
-
- self.assertEqual(5, host.num_instances)
- self.assertEqual(42, host.num_io_ops)
- self.assertEqual(10, len(host.stats))
-
- self.assertEqual('127.0.0.1', host.host_ip)
- self.assertEqual('htype', host.hypervisor_type)
- self.assertEqual('hostname', host.hypervisor_hostname)
- self.assertEqual('cpu_info', host.cpu_info)
- self.assertEqual({}, host.supported_instances)
- self.assertEqual(hyper_ver_int, host.hypervisor_version)
-
- def test_stat_consumption_from_compute_node_non_pci(self):
- stats = {
- 'num_instances': '5',
- 'num_proj_12345': '3',
- 'num_proj_23456': '1',
- 'num_vm_%s' % vm_states.BUILDING: '2',
- 'num_vm_%s' % vm_states.SUSPENDED: '1',
- 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
- 'num_task_%s' % task_states.MIGRATING: '2',
- 'num_os_type_linux': '4',
- 'num_os_type_windoze': '1',
- 'io_workload': '42',
- }
- stats = jsonutils.dumps(stats)
-
- hyper_ver_int = utils.convert_version_to_int('6.0.0')
- compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
- local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
- updated_at=None, host_ip='127.0.0.1',
- hypervisor_version=hyper_ver_int, numa_topology=None)
-
- host = host_manager.HostState("fakehost", "fakenode")
- host.update_from_compute_node(compute)
- self.assertIsNone(host.pci_stats)
- self.assertEqual(hyper_ver_int, host.hypervisor_version)
-
- def test_stat_consumption_from_compute_node_rescue_unshelving(self):
- stats = {
- 'num_instances': '5',
- 'num_proj_12345': '3',
- 'num_proj_23456': '1',
- 'num_vm_%s' % vm_states.BUILDING: '2',
- 'num_vm_%s' % vm_states.SUSPENDED: '1',
- 'num_task_%s' % task_states.UNSHELVING: '1',
- 'num_task_%s' % task_states.RESCUING: '2',
- 'num_os_type_linux': '4',
- 'num_os_type_windoze': '1',
- 'io_workload': '42',
- }
- stats = jsonutils.dumps(stats)
-
- hyper_ver_int = utils.convert_version_to_int('6.0.0')
- compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
- local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
- updated_at=None, host_ip='127.0.0.1',
- hypervisor_version=hyper_ver_int, numa_topology=None)
-
- host = host_manager.HostState("fakehost", "fakenode")
- host.update_from_compute_node(compute)
-
- self.assertEqual(5, host.num_instances)
- self.assertEqual(42, host.num_io_ops)
- self.assertEqual(10, len(host.stats))
-
- self.assertIsNone(host.pci_stats)
- self.assertEqual(hyper_ver_int, host.hypervisor_version)
-
- @mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
- def test_stat_consumption_from_instance(self, numa_usage_mock):
- numa_usage_mock.return_value = 'fake-consumed-once'
- host = host_manager.HostState("fakehost", "fakenode")
-
- instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
- project_id='12345', vm_state=vm_states.BUILDING,
- task_state=task_states.SCHEDULING, os_type='Linux',
- uuid='fake-uuid')
- host.consume_from_instance(instance)
- numa_usage_mock.assert_called_once_with(host, instance)
- self.assertEqual('fake-consumed-once', host.numa_topology)
-
- numa_usage_mock.return_value = 'fake-consumed-twice'
- instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
- project_id='12345', vm_state=vm_states.PAUSED,
- task_state=None, os_type='Linux',
- uuid='fake-uuid')
- host.consume_from_instance(instance)
-
- self.assertEqual(2, host.num_instances)
- self.assertEqual(1, host.num_io_ops)
- self.assertEqual(2, numa_usage_mock.call_count)
- self.assertEqual(((host, instance),), numa_usage_mock.call_args)
- self.assertEqual('fake-consumed-twice', host.numa_topology)
-
- def test_resources_consumption_from_compute_node(self):
- metrics = [
- dict(name='res1',
- value=1.0,
- source='source1',
- timestamp=None),
- dict(name='res2',
- value="string2",
- source='source2',
- timestamp=None),
- ]
- hyper_ver_int = utils.convert_version_to_int('6.0.0')
- compute = dict(metrics=jsonutils.dumps(metrics),
- memory_mb=0, free_disk_gb=0, local_gb=0,
- local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
- updated_at=None, host_ip='127.0.0.1',
- hypervisor_version=hyper_ver_int,
- numa_topology=fakes.NUMA_TOPOLOGY.to_json())
- host = host_manager.HostState("fakehost", "fakenode")
- host.update_from_compute_node(compute)
-
- self.assertEqual(len(host.metrics), 2)
- self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys()))
- self.assertEqual(1.0, host.metrics['res1'].value)
- self.assertEqual('source1', host.metrics['res1'].source)
- self.assertEqual('string2', host.metrics['res2'].value)
- self.assertEqual('source2', host.metrics['res2'].source)
- self.assertIsInstance(host.numa_topology, six.string_types)
diff --git a/nova/tests/scheduler/test_ironic_host_manager.py b/nova/tests/scheduler/test_ironic_host_manager.py
deleted file mode 100644
index 1a779e76c3..0000000000
--- a/nova/tests/scheduler/test_ironic_host_manager.py
+++ /dev/null
@@ -1,430 +0,0 @@
-# Copyright (c) 2014 OpenStack Foundation
-# Copyright (c) 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For IronicHostManager
-"""
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import db
-from nova import exception
-from nova.scheduler import filters
-from nova.scheduler import host_manager
-from nova.scheduler import ironic_host_manager
-from nova import test
-from nova.tests.scheduler import ironic_fakes
-
-
-class FakeFilterClass1(filters.BaseHostFilter):
- def host_passes(self, host_state, filter_properties):
- pass
-
-
-class FakeFilterClass2(filters.BaseHostFilter):
- def host_passes(self, host_state, filter_properties):
- pass
-
-
-class IronicHostManagerTestCase(test.NoDBTestCase):
- """Test case for IronicHostManager class."""
-
- def setUp(self):
- super(IronicHostManagerTestCase, self).setUp()
- self.host_manager = ironic_host_manager.IronicHostManager()
-
- def test_manager_public_api_signatures(self):
- self.assertPublicAPISignatures(host_manager.HostManager(),
- self.host_manager)
-
- def test_state_public_api_signatures(self):
- self.assertPublicAPISignatures(
- host_manager.HostState("dummy",
- "dummy"),
- ironic_host_manager.IronicNodeState("dummy",
- "dummy")
- )
-
- def test_get_all_host_states(self):
- # Ensure .service is set and we have the values we expect to.
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
-
- self.assertEqual(len(host_states_map), 4)
- for i in range(4):
- compute_node = ironic_fakes.COMPUTE_NODES[i]
- host = compute_node['service']['host']
- node = compute_node['hypervisor_hostname']
- state_key = (host, node)
- self.assertEqual(compute_node['service'],
- host_states_map[state_key].service)
- self.assertEqual(jsonutils.loads(compute_node['stats']),
- host_states_map[state_key].stats)
- self.assertEqual(compute_node['free_ram_mb'],
- host_states_map[state_key].free_ram_mb)
- self.assertEqual(compute_node['free_disk_gb'] * 1024,
- host_states_map[state_key].free_disk_mb)
-
-
-class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
- """Test case for IronicHostManager class."""
-
- def setUp(self):
- super(IronicHostManagerChangedNodesTestCase, self).setUp()
- self.host_manager = ironic_host_manager.IronicHostManager()
- ironic_driver = "nova.virt.ironic.driver.IronicDriver"
- supported_instances = '[["i386", "baremetal", "baremetal"]]'
- self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
- vcpus_used=0, local_gb_used=0, memory_mb_used=0,
- updated_at=None, cpu_info='baremetal cpu',
- stats=jsonutils.dumps(dict(
- ironic_driver=ironic_driver,
- cpu_arch='i386')),
- supported_instances=supported_instances,
- free_disk_gb=10, free_ram_mb=1024,
- hypervisor_type='ironic',
- hypervisor_version = 1,
- hypervisor_hostname = 'fake_host')
-
- @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
- def test_create_ironic_node_state(self, init_mock):
- init_mock.return_value = None
- compute = {'cpu_info': 'baremetal cpu'}
- host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
- compute=compute)
- self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
-
- @mock.patch.object(host_manager.HostState, '__init__')
- def test_create_non_ironic_host_state(self, init_mock):
- init_mock.return_value = None
- compute = {'cpu_info': 'other cpu'}
- host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
- compute=compute)
- self.assertIs(host_manager.HostState, type(host_state))
-
- def test_get_all_host_states_after_delete_one(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- # all nodes active for first call
- db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
- # remove node4 for second call
- running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
- if n.get('hypervisor_hostname') != 'node4uuid']
- db.compute_node_get_all(context).AndReturn(running_nodes)
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
- self.assertEqual(3, len(host_states_map))
-
- def test_get_all_host_states_after_delete_all(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(db, 'compute_node_get_all')
- # all nodes active for first call
- db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
- # remove all nodes for second call
- db.compute_node_get_all(context).AndReturn([])
- self.mox.ReplayAll()
-
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
- host_states_map = self.host_manager.host_state_map
- self.assertEqual(0, len(host_states_map))
-
- def test_update_from_compute_node(self):
- host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
- host.update_from_compute_node(self.compute_node)
-
- self.assertEqual(1024, host.free_ram_mb)
- self.assertEqual(1024, host.total_usable_ram_mb)
- self.assertEqual(10240, host.free_disk_mb)
- self.assertEqual(1, host.vcpus_total)
- self.assertEqual(0, host.vcpus_used)
- self.assertEqual(jsonutils.loads(self.compute_node['stats']),
- host.stats)
- self.assertEqual('ironic', host.hypervisor_type)
- self.assertEqual(1, host.hypervisor_version)
- self.assertEqual('fake_host', host.hypervisor_hostname)
-
- def test_consume_identical_instance_from_compute(self):
- host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
- host.update_from_compute_node(self.compute_node)
-
- instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
- host.consume_from_instance(instance)
-
- self.assertEqual(1, host.vcpus_used)
- self.assertEqual(0, host.free_ram_mb)
- self.assertEqual(0, host.free_disk_mb)
-
- def test_consume_larger_instance_from_compute(self):
- host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
- host.update_from_compute_node(self.compute_node)
-
- instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
- host.consume_from_instance(instance)
-
- self.assertEqual(1, host.vcpus_used)
- self.assertEqual(0, host.free_ram_mb)
- self.assertEqual(0, host.free_disk_mb)
-
- def test_consume_smaller_instance_from_compute(self):
- host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
- host.update_from_compute_node(self.compute_node)
-
- instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
- host.consume_from_instance(instance)
-
- self.assertEqual(1, host.vcpus_used)
- self.assertEqual(0, host.free_ram_mb)
- self.assertEqual(0, host.free_disk_mb)
-
-
-class IronicHostManagerTestFilters(test.NoDBTestCase):
- """Test filters work for IronicHostManager."""
-
- def setUp(self):
- super(IronicHostManagerTestFilters, self).setUp()
- self.host_manager = ironic_host_manager.IronicHostManager()
- self.fake_hosts = [ironic_host_manager.IronicNodeState(
- 'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
- self.fake_hosts += [ironic_host_manager.IronicNodeState(
- 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
-
- def test_choose_host_filters_not_found(self):
- self.flags(scheduler_default_filters='FakeFilterClass3')
- self.host_manager.filter_classes = [FakeFilterClass1,
- FakeFilterClass2]
- self.assertRaises(exception.SchedulerHostFilterNotFound,
- self.host_manager._choose_host_filters, None)
-
- def test_choose_host_filters(self):
- self.flags(scheduler_default_filters=['FakeFilterClass2'])
- self.host_manager.filter_classes = [FakeFilterClass1,
- FakeFilterClass2]
-
- # Test we returns 1 correct function
- filter_classes = self.host_manager._choose_host_filters(None)
- self.assertEqual(1, len(filter_classes))
- self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
-
- def _mock_get_filtered_hosts(self, info, specified_filters=None):
- self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
-
- info['got_objs'] = []
- info['got_fprops'] = []
-
- def fake_filter_one(_self, obj, filter_props):
- info['got_objs'].append(obj)
- info['got_fprops'].append(filter_props)
- return True
-
- self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
- self.host_manager._choose_host_filters(specified_filters).AndReturn(
- [FakeFilterClass1])
-
- def _verify_result(self, info, result, filters=True):
- for x in info['got_fprops']:
- self.assertEqual(x, info['expected_fprops'])
- if filters:
- self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
- self.assertEqual(set(info['expected_objs']), set(result))
-
- def test_get_filtered_hosts(self):
- fake_properties = {'moo': 1, 'cow': 2}
-
- info = {'expected_objs': self.fake_hosts,
- 'expected_fprops': fake_properties}
-
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_specified_filters(self):
- fake_properties = {'moo': 1, 'cow': 2}
-
- specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
- info = {'expected_objs': self.fake_hosts,
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info, specified_filters)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties, filter_class_names=specified_filters)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_ignore(self):
- fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
- 'fake_host5', 'fake_multihost']}
-
- # [1] and [3] are host2 and host4
- info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result)
-
- def test_get_filtered_hosts_with_force_hosts(self):
- fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
- 'fake_host5']}
-
- # [0] and [2] are host1 and host3
- info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_no_matching_force_hosts(self):
- fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
- # Ensure ignore_hosts processed before force_hosts in host filters.
- fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
- 'ignore_hosts': ['fake_host1']}
-
- # only fake_host3 should be left.
- info = {'expected_objs': [self.fake_hosts[2]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
- # Ensure all nodes returned for a host with many nodes
- fake_properties = {'force_hosts': ['fake_multihost']}
-
- info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
- self.fake_hosts[6], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_nodes(self):
- fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
- 'fake-node9']}
-
- # [5] is fake-node2, [7] is fake-node4
- info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
- # Ensure only overlapping results if both force host and node
- fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
- 'force_nodes': ['fake-node2', 'fake-node9']}
-
- # [5] is fake-node2
- info = {'expected_objs': [self.fake_hosts[5]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
- # Ensure non-overlapping force_node and force_host yield no result
- fake_properties = {'force_hosts': ['fake_multihost'],
- 'force_nodes': ['fake-node']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
- # Ensure ignore_hosts can coexist with force_nodes
- fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
- 'ignore_hosts': ['fake_host1', 'fake_host2']}
-
- info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
-
- def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
- # Ensure ignore_hosts is processed before force_nodes
- fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
- 'ignore_hosts': ['fake_multihost']}
-
- info = {'expected_objs': [],
- 'expected_fprops': fake_properties}
- self._mock_get_filtered_hosts(info)
-
- self.mox.ReplayAll()
-
- result = self.host_manager.get_filtered_hosts(self.fake_hosts,
- fake_properties)
- self._verify_result(info, result, False)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
deleted file mode 100644
index eb1f3d4888..0000000000
--- a/nova/tests/scheduler/test_scheduler.py
+++ /dev/null
@@ -1,378 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Scheduler
-"""
-
-import mox
-from oslo.config import cfg
-
-from nova.compute import api as compute_api
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.image import glance
-from nova import objects
-from nova import rpc
-from nova.scheduler import driver
-from nova.scheduler import manager
-from nova import servicegroup
-from nova import test
-from nova.tests import fake_instance
-from nova.tests import fake_server_actions
-from nova.tests.image import fake as fake_image
-from nova.tests.objects import test_instance_fault
-from nova.tests.scheduler import fakes
-
-CONF = cfg.CONF
-
-
-class SchedulerManagerTestCase(test.NoDBTestCase):
- """Test case for scheduler manager."""
-
- manager_cls = manager.SchedulerManager
- driver_cls = driver.Scheduler
- driver_cls_name = 'nova.scheduler.driver.Scheduler'
-
- def setUp(self):
- super(SchedulerManagerTestCase, self).setUp()
- self.flags(scheduler_driver=self.driver_cls_name)
- self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
- self.manager = self.manager_cls()
- self.context = context.RequestContext('fake_user', 'fake_project')
- self.topic = 'fake_topic'
- self.fake_args = (1, 2, 3)
- self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
- fake_server_actions.stub_out_action_events(self.stubs)
-
- def test_1_correct_init(self):
- # Correct scheduler driver
- manager = self.manager
- self.assertIsInstance(manager.driver, self.driver_cls)
-
- def _mox_schedule_method_helper(self, method_name):
- # Make sure the method exists that we're going to test call
- def stub_method(*args, **kwargs):
- pass
-
- setattr(self.manager.driver, method_name, stub_method)
-
- self.mox.StubOutWithMock(self.manager.driver,
- method_name)
-
- def test_run_instance_exception_puts_instance_in_error_state(self):
- fake_instance_uuid = 'fake-instance-id'
- inst = {"vm_state": "", "task_state": ""}
-
- self._mox_schedule_method_helper('schedule_run_instance')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- request_spec = {'instance_properties': inst,
- 'instance_uuids': [fake_instance_uuid]}
-
- self.manager.driver.schedule_run_instance(self.context,
- request_spec, None, None, None, None, {}, False).AndRaise(
- exception.NoValidHost(reason=""))
- old, new_ref = db.instance_update_and_get_original(self.context,
- fake_instance_uuid,
- {"vm_state": vm_states.ERROR,
- "task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context,
- new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.manager.run_instance(self.context, request_spec,
- None, None, None, None, {}, False)
-
- def test_prep_resize_no_valid_host_back_in_active_state(self):
- fake_instance_uuid = 'fake-instance-id'
- fake_instance = {'uuid': fake_instance_uuid}
- inst = {"vm_state": "", "task_state": ""}
-
- self._mox_schedule_method_helper('select_destinations')
-
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- request_spec = {'instance_type': 'fake_type',
- 'instance_uuids': [fake_instance_uuid],
- 'instance_properties': {'uuid': fake_instance_uuid}}
- kwargs = {
- 'context': self.context,
- 'image': 'fake_image',
- 'request_spec': request_spec,
- 'filter_properties': 'fake_props',
- 'instance': fake_instance,
- 'instance_type': 'fake_type',
- 'reservations': list('fake_res'),
- }
- self.manager.driver.select_destinations(
- self.context, request_spec, 'fake_props').AndRaise(
- exception.NoValidHost(reason=""))
- old_ref, new_ref = db.instance_update_and_get_original(self.context,
- fake_instance_uuid,
- {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
- (inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
- mox.IsA(exception.NoValidHost), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.manager.prep_resize(**kwargs)
-
- def test_prep_resize_no_valid_host_back_in_shutoff_state(self):
- fake_instance_uuid = 'fake-instance-id'
- fake_instance = {'uuid': fake_instance_uuid, "vm_state": "stopped"}
- inst = {"vm_state": "stopped", "task_state": ""}
-
- self._mox_schedule_method_helper('select_destinations')
-
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- request_spec = {'instance_type': 'fake_type',
- 'instance_uuids': [fake_instance_uuid],
- 'instance_properties': {'uuid': fake_instance_uuid}}
- kwargs = {
- 'context': self.context,
- 'image': 'fake_image',
- 'request_spec': request_spec,
- 'filter_properties': 'fake_props',
- 'instance': fake_instance,
- 'instance_type': 'fake_type',
- 'reservations': list('fake_res'),
- }
- self.manager.driver.select_destinations(
- self.context, request_spec, 'fake_props').AndRaise(
- exception.NoValidHost(reason=""))
- old_ref, new_ref = db.instance_update_and_get_original(self.context,
- fake_instance_uuid,
- {"vm_state": vm_states.STOPPED, "task_state": None}).AndReturn(
- (inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
- mox.IsA(exception.NoValidHost), mox.IgnoreArg())
-
- self.mox.ReplayAll()
- self.manager.prep_resize(**kwargs)
-
- def test_prep_resize_exception_host_in_error_state_and_raise(self):
- fake_instance_uuid = 'fake-instance-id'
- fake_instance = {'uuid': fake_instance_uuid}
-
- self._mox_schedule_method_helper('select_destinations')
-
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- request_spec = {
- 'instance_properties': {'uuid': fake_instance_uuid},
- 'instance_uuids': [fake_instance_uuid]
- }
- kwargs = {
- 'context': self.context,
- 'image': 'fake_image',
- 'request_spec': request_spec,
- 'filter_properties': 'fake_props',
- 'instance': fake_instance,
- 'instance_type': 'fake_type',
- 'reservations': list('fake_res'),
- }
-
- self.manager.driver.select_destinations(
- self.context, request_spec, 'fake_props').AndRaise(
- test.TestingException('something happened'))
-
- inst = {
- "vm_state": "",
- "task_state": "",
- }
- old_ref, new_ref = db.instance_update_and_get_original(self.context,
- fake_instance_uuid,
- {"vm_state": vm_states.ERROR,
- "task_state": None}).AndReturn((inst, inst))
- compute_utils.add_instance_fault_from_exc(self.context, new_ref,
- mox.IsA(test.TestingException), mox.IgnoreArg())
-
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException, self.manager.prep_resize,
- **kwargs)
-
- def test_set_vm_state_and_notify_adds_instance_fault(self):
- request = {'instance_properties': {'uuid': 'fake-uuid'}}
- updates = {'vm_state': 'foo'}
- fake_inst = {'uuid': 'fake-uuid'}
-
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(db, 'instance_fault_create')
- self.mox.StubOutWithMock(rpc, 'get_notifier')
- notifier = self.mox.CreateMockAnything()
- rpc.get_notifier('scheduler').AndReturn(notifier)
- db.instance_update_and_get_original(self.context, 'fake-uuid',
- updates).AndReturn((None,
- fake_inst))
- db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
- test_instance_fault.fake_faults['fake-uuid'][0])
- notifier.error(self.context, 'scheduler.foo', mox.IgnoreArg())
- self.mox.ReplayAll()
-
- self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
- self.context, None, request)
-
- def test_prep_resize_post_populates_retry(self):
- self.manager.driver = fakes.FakeFilterScheduler()
-
- image = 'image'
- instance_uuid = 'fake-instance-id'
- instance = fake_instance.fake_db_instance(uuid=instance_uuid)
-
- instance_properties = {'project_id': 'fake', 'os_type': 'Linux'}
- instance_type = "m1.tiny"
- request_spec = {'instance_properties': instance_properties,
- 'instance_type': instance_type,
- 'instance_uuids': [instance_uuid]}
- retry = {'hosts': [], 'num_attempts': 1}
- filter_properties = {'retry': retry}
- reservations = None
-
- hosts = [dict(host='host', nodename='node', limits={})]
-
- self._mox_schedule_method_helper('select_destinations')
- self.manager.driver.select_destinations(
- self.context, request_spec, filter_properties).AndReturn(hosts)
-
- self.mox.StubOutWithMock(self.manager.compute_rpcapi, 'prep_resize')
- self.manager.compute_rpcapi.prep_resize(self.context, image,
- mox.IsA(objects.Instance),
- instance_type, 'host', reservations, request_spec=request_spec,
- filter_properties=filter_properties, node='node')
-
- self.mox.ReplayAll()
- self.manager.prep_resize(self.context, image, request_spec,
- filter_properties, instance, instance_type, reservations)
-
- self.assertEqual([['host', 'node']],
- filter_properties['retry']['hosts'])
-
-
-class SchedulerTestCase(test.NoDBTestCase):
- """Test case for base scheduler driver class."""
-
- # So we can subclass this test and re-use tests if we need.
- driver_cls = driver.Scheduler
-
- def setUp(self):
- super(SchedulerTestCase, self).setUp()
- self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
-
- def fake_show(meh, context, id, **kwargs):
- if id:
- return {'id': id, 'min_disk': None, 'min_ram': None,
- 'name': 'fake_name',
- 'status': 'active',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id',
- 'something_else': 'meow'}}
- else:
- raise exception.ImageNotFound(image_id=id)
-
- fake_image.stub_out_image_service(self.stubs)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
- self.image_service = glance.get_default_image_service()
-
- self.driver = self.driver_cls()
- self.context = context.RequestContext('fake_user', 'fake_project')
- self.topic = 'fake_topic'
- self.servicegroup_api = servicegroup.API()
-
- def test_hosts_up(self):
- service1 = {'host': 'host1'}
- service2 = {'host': 'host2'}
- services = [service1, service2]
-
- self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
- self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
-
- db.service_get_all_by_topic(self.context,
- self.topic).AndReturn(services)
- self.servicegroup_api.service_is_up(service1).AndReturn(False)
- self.servicegroup_api.service_is_up(service2).AndReturn(True)
-
- self.mox.ReplayAll()
- result = self.driver.hosts_up(self.context, self.topic)
- self.assertEqual(result, ['host2'])
-
- def test_handle_schedule_error_adds_instance_fault(self):
- instance = {'uuid': 'fake-uuid'}
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
- self.mox.StubOutWithMock(db, 'instance_fault_create')
- db.instance_update_and_get_original(self.context, instance['uuid'],
- mox.IgnoreArg()).AndReturn(
- (None, instance))
- db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
- test_instance_fault.fake_faults['fake-uuid'][0])
- self.mox.StubOutWithMock(rpc, 'get_notifier')
- notifier = self.mox.CreateMockAnything()
- rpc.get_notifier('scheduler').AndReturn(notifier)
- notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
- self.mox.ReplayAll()
-
- driver.handle_schedule_error(self.context,
- exception.NoValidHost('test'),
- instance['uuid'], {})
-
-
-class SchedulerDriverBaseTestCase(SchedulerTestCase):
- """Test cases for base scheduler driver class methods
- that will fail if the driver is changed.
- """
-
- def test_unimplemented_schedule_run_instance(self):
- fake_request_spec = {'instance_properties':
- {'uuid': 'uuid'}}
-
- self.assertRaises(NotImplementedError,
- self.driver.schedule_run_instance,
- self.context, fake_request_spec, None, None, None,
- None, None, False)
-
- def test_unimplemented_select_destinations(self):
- self.assertRaises(NotImplementedError,
- self.driver.select_destinations, self.context, {}, {})
-
-
-class SchedulerInstanceGroupData(test.TestCase):
-
- driver_cls = driver.Scheduler
-
- def setUp(self):
- super(SchedulerInstanceGroupData, self).setUp()
- self.user_id = 'fake_user'
- self.project_id = 'fake_project'
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.driver = self.driver_cls()
-
- def _get_default_values(self):
- return {'name': 'fake_name',
- 'user_id': self.user_id,
- 'project_id': self.project_id}
-
- def _create_instance_group(self, context, values, policies=None,
- metadata=None, members=None):
- return db.instance_group_create(context, values, policies=policies,
- metadata=metadata, members=members)
diff --git a/nova/tests/scheduler/test_scheduler_utils.py b/nova/tests/scheduler/test_scheduler_utils.py
deleted file mode 100644
index 6af2c7dd55..0000000000
--- a/nova/tests/scheduler/test_scheduler_utils.py
+++ /dev/null
@@ -1,314 +0,0 @@
-# Copyright (c) 2013 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Scheduler Utils
-"""
-import contextlib
-import uuid
-
-import mock
-import mox
-from oslo.config import cfg
-
-from nova.compute import flavors
-from nova.compute import utils as compute_utils
-from nova import db
-from nova import exception
-from nova import notifications
-from nova import objects
-from nova import rpc
-from nova.scheduler import utils as scheduler_utils
-from nova import test
-from nova.tests import fake_instance
-
-CONF = cfg.CONF
-
-
-class SchedulerUtilsTestCase(test.NoDBTestCase):
- """Test case for scheduler utils methods."""
- def setUp(self):
- super(SchedulerUtilsTestCase, self).setUp()
- self.context = 'fake-context'
-
- def test_build_request_spec_without_image(self):
- image = None
- instance = {'uuid': 'fake-uuid'}
- instance_type = {'flavorid': 'fake-id'}
-
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
- flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
- db.flavor_extra_specs_get(self.context, mox.IgnoreArg()).AndReturn([])
- self.mox.ReplayAll()
-
- request_spec = scheduler_utils.build_request_spec(self.context, image,
- [instance])
- self.assertEqual({}, request_spec['image'])
-
- @mock.patch.object(flavors, 'extract_flavor')
- @mock.patch.object(db, 'flavor_extra_specs_get')
- def test_build_request_spec_with_object(self, flavor_extra_specs_get,
- extract_flavor):
- instance_type = {'flavorid': 'fake-id'}
- instance = fake_instance.fake_instance_obj(self.context)
-
- extract_flavor.return_value = instance_type
- flavor_extra_specs_get.return_value = []
-
- request_spec = scheduler_utils.build_request_spec(self.context, None,
- [instance])
- self.assertIsInstance(request_spec['instance_properties'], dict)
-
- def _test_set_vm_state_and_notify(self, request_spec,
- expected_uuids):
- updates = dict(vm_state='fake-vm-state')
- service = 'fake-service'
- method = 'fake-method'
- exc_info = 'exc_info'
-
- self.mox.StubOutWithMock(compute_utils,
- 'add_instance_fault_from_exc')
- self.mox.StubOutWithMock(notifications, 'send_update')
- self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
-
- self.mox.StubOutWithMock(rpc, 'get_notifier')
- notifier = self.mox.CreateMockAnything()
- rpc.get_notifier(service).AndReturn(notifier)
-
- old_ref = 'old_ref'
- new_ref = 'new_ref'
-
- for _uuid in expected_uuids:
- db.instance_update_and_get_original(
- self.context, _uuid, updates).AndReturn((old_ref, new_ref))
- notifications.send_update(self.context, old_ref, new_ref,
- service=service)
- compute_utils.add_instance_fault_from_exc(
- self.context,
- new_ref, exc_info, mox.IsA(tuple))
-
- payload = dict(request_spec=request_spec,
- instance_properties=request_spec.get(
- 'instance_properties', {}),
- instance_id=_uuid,
- state='fake-vm-state',
- method=method,
- reason=exc_info)
- event_type = '%s.%s' % (service, method)
- notifier.error(self.context, event_type, payload)
-
- self.mox.ReplayAll()
-
- scheduler_utils.set_vm_state_and_notify(self.context,
- service,
- method,
- updates,
- exc_info,
- request_spec,
- db)
-
- def test_set_vm_state_and_notify_rs_uuids(self):
- expected_uuids = ['1', '2', '3']
- request_spec = dict(instance_uuids=expected_uuids)
- self._test_set_vm_state_and_notify(request_spec, expected_uuids)
-
- def test_set_vm_state_and_notify_uuid_from_instance_props(self):
- expected_uuids = ['fake-uuid']
- request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
- self._test_set_vm_state_and_notify(request_spec, expected_uuids)
-
- def _test_populate_filter_props(self, host_state_obj=True,
- with_retry=True,
- force_hosts=None,
- force_nodes=None):
- if force_hosts is None:
- force_hosts = []
- if force_nodes is None:
- force_nodes = []
- if with_retry:
- if not force_hosts and not force_nodes:
- filter_properties = dict(retry=dict(hosts=[]))
- else:
- filter_properties = dict(force_hosts=force_hosts,
- force_nodes=force_nodes)
- else:
- filter_properties = dict()
-
- if host_state_obj:
- class host_state(object):
- host = 'fake-host'
- nodename = 'fake-node'
- limits = 'fake-limits'
- else:
- host_state = dict(host='fake-host',
- nodename='fake-node',
- limits='fake-limits')
-
- scheduler_utils.populate_filter_properties(filter_properties,
- host_state)
- if with_retry and not force_hosts and not force_nodes:
- # So we can check for 2 hosts
- scheduler_utils.populate_filter_properties(filter_properties,
- host_state)
-
- if force_hosts:
- expected_limits = None
- else:
- expected_limits = 'fake-limits'
- self.assertEqual(expected_limits,
- filter_properties.get('limits'))
-
- if with_retry and not force_hosts and not force_nodes:
- self.assertEqual([['fake-host', 'fake-node'],
- ['fake-host', 'fake-node']],
- filter_properties['retry']['hosts'])
- else:
- self.assertNotIn('retry', filter_properties)
-
- def test_populate_filter_props(self):
- self._test_populate_filter_props()
-
- def test_populate_filter_props_host_dict(self):
- self._test_populate_filter_props(host_state_obj=False)
-
- def test_populate_filter_props_no_retry(self):
- self._test_populate_filter_props(with_retry=False)
-
- def test_populate_filter_props_force_hosts_no_retry(self):
- self._test_populate_filter_props(force_hosts=['force-host'])
-
- def test_populate_filter_props_force_nodes_no_retry(self):
- self._test_populate_filter_props(force_nodes=['force-node'])
-
- @mock.patch.object(scheduler_utils, '_max_attempts')
- def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
- _max_attempts.return_value = 2
- msg = 'The exception text was preserved!'
- filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
- exc=[msg]))
- nvh = self.assertRaises(exception.NoValidHost,
- scheduler_utils.populate_retry,
- filter_properties, 'fake-uuid')
- # make sure 'msg' is a substring of the complete exception text
- self.assertIn(msg, nvh.message)
-
- def _check_parse_options(self, opts, sep, converter, expected):
- good = scheduler_utils.parse_options(opts,
- sep=sep,
- converter=converter)
- for item in expected:
- self.assertIn(item, good)
-
- def test_parse_options(self):
- # check normal
- self._check_parse_options(['foo=1', 'bar=-2.1'],
- '=',
- float,
- [('foo', 1.0), ('bar', -2.1)])
- # check convert error
- self._check_parse_options(['foo=a1', 'bar=-2.1'],
- '=',
- float,
- [('bar', -2.1)])
- # check separator missing
- self._check_parse_options(['foo', 'bar=-2.1'],
- '=',
- float,
- [('bar', -2.1)])
- # check key missing
- self._check_parse_options(['=5', 'bar=-2.1'],
- '=',
- float,
- [('bar', -2.1)])
-
- def test_validate_filters_configured(self):
- self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
- self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
-
- def _create_server_group(self, policy='anti-affinity'):
- instance = fake_instance.fake_instance_obj(self.context,
- params={'host': 'hostA'})
-
- group = objects.InstanceGroup()
- group.name = 'pele'
- group.uuid = str(uuid.uuid4())
- group.members = [instance.uuid]
- group.policies = [policy]
- return group
-
- def _group_details_in_filter_properties(self, group, func='get_by_uuid',
- hint=None, policy=None):
- group_hint = hint
- group_hosts = ['hostB']
-
- with contextlib.nested(
- mock.patch.object(objects.InstanceGroup, func, return_value=group),
- mock.patch.object(objects.InstanceGroup, 'get_hosts',
- return_value=['hostA']),
- ) as (get_group, get_hosts):
- scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
- scheduler_utils._SUPPORTS_AFFINITY = None
- group_info = scheduler_utils.setup_instance_group(
- self.context, group_hint, group_hosts)
- self.assertEqual(
- (set(['hostA', 'hostB']), [policy]),
- group_info)
-
- def test_group_details_in_filter_properties(self):
- for policy in ['affinity', 'anti-affinity']:
- group = self._create_server_group(policy)
- self._group_details_in_filter_properties(group, func='get_by_uuid',
- hint=group.uuid,
- policy=policy)
-
- def _group_filter_with_filter_not_configured(self, policy):
- self.flags(scheduler_default_filters=['f1', 'f2'])
-
- instance = fake_instance.fake_instance_obj(self.context,
- params={'host': 'hostA'})
-
- group = objects.InstanceGroup()
- group.uuid = str(uuid.uuid4())
- group.members = [instance.uuid]
- group.policies = [policy]
-
- with contextlib.nested(
- mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
- return_value=group),
- mock.patch.object(objects.InstanceGroup, 'get_hosts',
- return_value=['hostA']),
- ) as (get_group, get_hosts):
- scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
- scheduler_utils._SUPPORTS_AFFINITY = None
- self.assertRaises(exception.NoValidHost,
- scheduler_utils.setup_instance_group,
- self.context, group.uuid)
-
- def test_group_filter_with_filter_not_configured(self):
- policies = ['anti-affinity', 'affinity']
- for policy in policies:
- self._group_filter_with_filter_not_configured(policy)
-
- def test_group_uuid_details_in_filter_properties(self):
- group = self._create_server_group()
- self._group_details_in_filter_properties(group, 'get_by_uuid',
- group.uuid, 'anti-affinity')
-
- def test_group_name_details_in_filter_properties(self):
- group = self._create_server_group()
- self._group_details_in_filter_properties(group, 'get_by_name',
- group.name, 'anti-affinity')
diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py
deleted file mode 100644
index 6a2be9ecee..0000000000
--- a/nova/tests/scheduler/test_weights.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# Copyright 2011-2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Scheduler weights.
-"""
-
-from oslo.serialization import jsonutils
-
-from nova import context
-from nova import exception
-from nova.openstack.common.fixture import mockpatch
-from nova.scheduler import weights
-from nova import test
-from nova.tests import matchers
-from nova.tests.scheduler import fakes
-
-
-class TestWeighedHost(test.NoDBTestCase):
- def test_dict_conversion(self):
- host_state = fakes.FakeHostState('somehost', None, {})
- host = weights.WeighedHost(host_state, 'someweight')
- expected = {'weight': 'someweight',
- 'host': 'somehost'}
- self.assertThat(host.to_dict(), matchers.DictMatches(expected))
-
- def test_all_weighers(self):
- classes = weights.all_weighers()
- class_names = [cls.__name__ for cls in classes]
- self.assertIn('RAMWeigher', class_names)
- self.assertIn('MetricsWeigher', class_names)
- self.assertIn('IoOpsWeigher', class_names)
-
-
-class RamWeigherTestCase(test.NoDBTestCase):
- def setUp(self):
- super(RamWeigherTestCase, self).setUp()
- self.useFixture(mockpatch.Patch(
- 'nova.db.compute_node_get_all',
- return_value=fakes.COMPUTE_NODES))
- self.host_manager = fakes.FakeHostManager()
- self.weight_handler = weights.HostWeightHandler()
- self.weight_classes = self.weight_handler.get_matching_classes(
- ['nova.scheduler.weights.ram.RAMWeigher'])
-
- def _get_weighed_host(self, hosts, weight_properties=None):
- if weight_properties is None:
- weight_properties = {}
- return self.weight_handler.get_weighed_objects(self.weight_classes,
- hosts, weight_properties)[0]
-
- def _get_all_hosts(self):
- ctxt = context.get_admin_context()
- return self.host_manager.get_all_host_states(ctxt)
-
- def test_default_of_spreading_first(self):
- hostinfo_list = self._get_all_hosts()
-
- # host1: free_ram_mb=512
- # host2: free_ram_mb=1024
- # host3: free_ram_mb=3072
- # host4: free_ram_mb=8192
-
- # so, host4 should win:
- weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(1.0, weighed_host.weight)
- self.assertEqual('host4', weighed_host.obj.host)
-
- def test_ram_filter_multiplier1(self):
- self.flags(ram_weight_multiplier=0.0)
- hostinfo_list = self._get_all_hosts()
-
- # host1: free_ram_mb=512
- # host2: free_ram_mb=1024
- # host3: free_ram_mb=3072
- # host4: free_ram_mb=8192
-
- # We do not know the host, all have same weight.
- weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(0.0, weighed_host.weight)
-
- def test_ram_filter_multiplier2(self):
- self.flags(ram_weight_multiplier=2.0)
- hostinfo_list = self._get_all_hosts()
-
- # host1: free_ram_mb=512
- # host2: free_ram_mb=1024
- # host3: free_ram_mb=3072
- # host4: free_ram_mb=8192
-
- # so, host4 should win:
- weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(1.0 * 2, weighed_host.weight)
- self.assertEqual('host4', weighed_host.obj.host)
-
- def test_ram_filter_negative(self):
- self.flags(ram_weight_multiplier=1.0)
- hostinfo_list = self._get_all_hosts()
- host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
- host_state = fakes.FakeHostState('negative', 'negative', host_attr)
- hostinfo_list = list(hostinfo_list) + [host_state]
-
- # host1: free_ram_mb=512
- # host2: free_ram_mb=1024
- # host3: free_ram_mb=3072
- # host4: free_ram_mb=8192
- # negativehost: free_ram_mb=-512
-
- # so, host4 should win
- weights = self.weight_handler.get_weighed_objects(self.weight_classes,
- hostinfo_list, {})
-
- weighed_host = weights[0]
- self.assertEqual(1, weighed_host.weight)
- self.assertEqual('host4', weighed_host.obj.host)
-
- # and negativehost should lose
- weighed_host = weights[-1]
- self.assertEqual(0, weighed_host.weight)
- self.assertEqual('negative', weighed_host.obj.host)
-
-
-class MetricsWeigherTestCase(test.NoDBTestCase):
- def setUp(self):
- super(MetricsWeigherTestCase, self).setUp()
- self.useFixture(mockpatch.Patch(
- 'nova.db.compute_node_get_all',
- return_value=fakes.COMPUTE_NODES_METRICS))
- self.host_manager = fakes.FakeHostManager()
- self.weight_handler = weights.HostWeightHandler()
- self.weight_classes = self.weight_handler.get_matching_classes(
- ['nova.scheduler.weights.metrics.MetricsWeigher'])
-
- def _get_weighed_host(self, hosts, setting, weight_properties=None):
- if not weight_properties:
- weight_properties = {}
- self.flags(weight_setting=setting, group='metrics')
- return self.weight_handler.get_weighed_objects(self.weight_classes,
- hosts, weight_properties)[0]
-
- def _get_all_hosts(self):
- ctxt = context.get_admin_context()
- return self.host_manager.get_all_host_states(ctxt)
-
- def _do_test(self, settings, expected_weight, expected_host):
- hostinfo_list = self._get_all_hosts()
- weighed_host = self._get_weighed_host(hostinfo_list, settings)
- self.assertEqual(expected_weight, weighed_host.weight)
- self.assertEqual(expected_host, weighed_host.obj.host)
-
- def test_single_resource(self):
- # host1: foo=512
- # host2: foo=1024
- # host3: foo=3072
- # host4: foo=8192
- # so, host4 should win:
- setting = ['foo=1']
- self._do_test(setting, 1.0, 'host4')
-
- def test_multiple_resource(self):
- # host1: foo=512, bar=1
- # host2: foo=1024, bar=2
- # host3: foo=3072, bar=1
- # host4: foo=8192, bar=0
- # so, host2 should win:
- setting = ['foo=0.0001', 'bar=1']
- self._do_test(setting, 1.0, 'host2')
-
- def test_single_resourcenegtive_ratio(self):
- # host1: foo=512
- # host2: foo=1024
- # host3: foo=3072
- # host4: foo=8192
- # so, host1 should win:
- setting = ['foo=-1']
- self._do_test(setting, 1.0, 'host1')
-
- def test_multiple_resource_missing_ratio(self):
- # host1: foo=512, bar=1
- # host2: foo=1024, bar=2
- # host3: foo=3072, bar=1
- # host4: foo=8192, bar=0
- # so, host4 should win:
- setting = ['foo=0.0001', 'bar']
- self._do_test(setting, 1.0, 'host4')
-
- def test_multiple_resource_wrong_ratio(self):
- # host1: foo=512, bar=1
- # host2: foo=1024, bar=2
- # host3: foo=3072, bar=1
- # host4: foo=8192, bar=0
- # so, host4 should win:
- setting = ['foo=0.0001', 'bar = 2.0t']
- self._do_test(setting, 1.0, 'host4')
-
- def _check_parsing_result(self, weigher, setting, results):
- self.flags(weight_setting=setting, group='metrics')
- weigher._parse_setting()
- self.assertEqual(len(weigher.setting), len(results))
- for item in results:
- self.assertIn(item, weigher.setting)
-
- def test_parse_setting(self):
- weigher = self.weight_classes[0]()
- self._check_parsing_result(weigher,
- ['foo=1'],
- [('foo', 1.0)])
- self._check_parsing_result(weigher,
- ['foo=1', 'bar=-2.1'],
- [('foo', 1.0), ('bar', -2.1)])
- self._check_parsing_result(weigher,
- ['foo=a1', 'bar=-2.1'],
- [('bar', -2.1)])
- self._check_parsing_result(weigher,
- ['foo', 'bar=-2.1'],
- [('bar', -2.1)])
- self._check_parsing_result(weigher,
- ['=5', 'bar=-2.1'],
- [('bar', -2.1)])
-
- def test_metric_not_found_required(self):
- setting = ['foo=1', 'zot=2']
- self.assertRaises(exception.ComputeHostMetricNotFound,
- self._do_test,
- setting,
- 8192,
- 'host4')
-
- def test_metric_not_found_non_required(self):
- # host1: foo=512, bar=1
- # host2: foo=1024, bar=2
- # host3: foo=3072, bar=1
- # host4: foo=8192, bar=0
- # host5: foo=768, bar=0, zot=1
- # host6: foo=2048, bar=0, zot=2
- # so, host5 should win:
- self.flags(required=False, group='metrics')
- setting = ['foo=0.0001', 'zot=-1']
- self._do_test(setting, 1.0, 'host5')
-
-
-COMPUTE_NODES_IO_OPS = [
- # host1: num_io_ops=1
- dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
- disk_available_least=None, free_ram_mb=512, vcpus_used=1,
- free_disk_gb=512, local_gb_used=0, updated_at=None,
- service=dict(host='host1', disabled=False),
- hypervisor_hostname='node1', host_ip='127.0.0.1',
- hypervisor_version=0, numa_topology=None,
- stats=jsonutils.dumps({'io_workload': '1'})),
- # host2: num_io_ops=2
- dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
- disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
- free_disk_gb=1024, local_gb_used=0, updated_at=None,
- service=dict(host='host2', disabled=True),
- hypervisor_hostname='node2', host_ip='127.0.0.1',
- hypervisor_version=0, numa_topology=None,
- stats=jsonutils.dumps({'io_workload': '2'})),
- # host3: num_io_ops=0, so host3 should win in the case of default
- # io_ops_weight_multiplier configure.
- dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
- disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
- free_disk_gb=3072, local_gb_used=0, updated_at=None,
- service=dict(host='host3', disabled=False),
- hypervisor_hostname='node3', host_ip='127.0.0.1',
- hypervisor_version=0, numa_topology=None,
- stats=jsonutils.dumps({'io_workload': '0'})),
- # host4: num_io_ops=4, so host4 should win in the case of positive
- # io_ops_weight_multiplier configure.
- dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
- disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
- free_disk_gb=8888, local_gb_used=0, updated_at=None,
- service=dict(host='host4', disabled=False),
- hypervisor_hostname='node4', host_ip='127.0.0.1',
- hypervisor_version=0, numa_topology=None,
- stats=jsonutils.dumps({'io_workload': '4'})),
- # Broken entry
- dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
-]
-
-
-class IoOpsWeigherTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(IoOpsWeigherTestCase, self).setUp()
- self.useFixture(mockpatch.Patch(
- 'nova.db.compute_node_get_all',
- return_value=COMPUTE_NODES_IO_OPS))
- self.host_manager = fakes.FakeHostManager()
- self.weight_handler = weights.HostWeightHandler()
- self.weight_classes = self.weight_handler.get_matching_classes(
- ['nova.scheduler.weights.io_ops.IoOpsWeigher'])
-
- def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
- if io_ops_weight_multiplier is not None:
- self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
- return self.weight_handler.get_weighed_objects(self.weight_classes,
- hosts, {})[0]
-
- def _get_all_hosts(self):
- ctxt = context.get_admin_context()
- return self.host_manager.get_all_host_states(ctxt)
-
- def _do_test(self, io_ops_weight_multiplier, expected_weight,
- expected_host):
- hostinfo_list = self._get_all_hosts()
- weighed_host = self._get_weighed_host(hostinfo_list,
- io_ops_weight_multiplier)
- self.assertEqual(weighed_host.weight, expected_weight)
- if expected_host:
- self.assertEqual(weighed_host.obj.host, expected_host)
-
- def test_io_ops_weight_multiplier_by_default(self):
- self._do_test(io_ops_weight_multiplier=None,
- expected_weight=0.0,
- expected_host='host3')
-
- def test_io_ops_weight_multiplier_zero_value(self):
- # We do not know the host, all have same weight.
- self._do_test(io_ops_weight_multiplier=0.0,
- expected_weight=0.0,
- expected_host=None)
-
- def test_io_ops_weight_multiplier_positive_value(self):
- self._do_test(io_ops_weight_multiplier=2.0,
- expected_weight=2.0,
- expected_host='host4')
diff --git a/nova/tests/servicegroup/test_db_servicegroup.py b/nova/tests/servicegroup/test_db_servicegroup.py
deleted file mode 100644
index 6f08e6fb50..0000000000
--- a/nova/tests/servicegroup/test_db_servicegroup.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import fixtures
-from oslo.utils import timeutils
-
-from nova import context
-from nova import db
-from nova import service
-from nova import servicegroup
-from nova import test
-
-
-class ServiceFixture(fixtures.Fixture):
-
- def __init__(self, host, binary, topic):
- super(ServiceFixture, self).__init__()
- self.host = host
- self.binary = binary
- self.topic = topic
- self.serv = None
-
- def setUp(self):
- super(ServiceFixture, self).setUp()
- self.serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager',
- 1, 1)
- self.addCleanup(self.serv.kill)
-
-
-class DBServiceGroupTestCase(test.TestCase):
-
- def setUp(self):
- super(DBServiceGroupTestCase, self).setUp()
- servicegroup.API._driver = None
- self.flags(servicegroup_driver='db')
- self.down_time = 15
- self.flags(enable_new_services=True)
- self.flags(service_down_time=self.down_time)
- self.servicegroup_api = servicegroup.API()
- self._host = 'foo'
- self._binary = 'nova-fake'
- self._topic = 'unittest'
- self._ctx = context.get_admin_context()
-
- def test_DB_driver(self):
- serv = self.useFixture(
- ServiceFixture(self._host, self._binary, self._topic)).serv
- serv.start()
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
-
- self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
- self.useFixture(test.TimeOverride())
- timeutils.advance_time_seconds(self.down_time + 1)
- self.servicegroup_api._driver._report_state(serv)
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
-
- self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
- serv.stop()
- timeutils.advance_time_seconds(self.down_time + 1)
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
- self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
-
- def test_get_all(self):
- host1 = self._host + '_1'
- host2 = self._host + '_2'
-
- serv1 = self.useFixture(
- ServiceFixture(host1, self._binary, self._topic)).serv
- serv1.start()
-
- serv2 = self.useFixture(
- ServiceFixture(host2, self._binary, self._topic)).serv
- serv2.start()
-
- service_ref1 = db.service_get_by_args(self._ctx,
- host1,
- self._binary)
- service_ref2 = db.service_get_by_args(self._ctx,
- host2,
- self._binary)
-
- services = self.servicegroup_api.get_all(self._topic)
-
- self.assertIn(service_ref1['host'], services)
- self.assertIn(service_ref2['host'], services)
-
- service_id = self.servicegroup_api.get_one(self._topic)
- self.assertIn(service_id, services)
-
- def test_service_is_up(self):
- fts_func = datetime.datetime.fromtimestamp
- fake_now = 1000
- down_time = 15
- self.flags(service_down_time=down_time)
- self.mox.StubOutWithMock(timeutils, 'utcnow')
- self.servicegroup_api = servicegroup.API()
-
- # Up (equal)
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - self.down_time),
- 'created_at': fts_func(fake_now - self.down_time)}
- self.mox.ReplayAll()
- result = self.servicegroup_api.service_is_up(service)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Up
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - self.down_time + 1),
- 'created_at': fts_func(fake_now - self.down_time + 1)}
- self.mox.ReplayAll()
- result = self.servicegroup_api.service_is_up(service)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Down
- timeutils.utcnow().AndReturn(fts_func(fake_now))
- service = {'updated_at': fts_func(fake_now - self.down_time - 3),
- 'created_at': fts_func(fake_now - self.down_time - 3)}
- self.mox.ReplayAll()
- result = self.servicegroup_api.service_is_up(service)
- self.assertFalse(result)
diff --git a/nova/tests/servicegroup/test_mc_servicegroup.py b/nova/tests/servicegroup/test_mc_servicegroup.py
deleted file mode 100644
index f49e6f752f..0000000000
--- a/nova/tests/servicegroup/test_mc_servicegroup.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
-#
-# This is derived from test_db_servicegroup.py.
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-from oslo.utils import timeutils
-
-from nova import context
-from nova import db
-from nova import service
-from nova import servicegroup
-from nova import test
-
-
-class ServiceFixture(fixtures.Fixture):
-
- def __init__(self, host, binary, topic):
- super(ServiceFixture, self).__init__()
- self.host = host
- self.binary = binary
- self.topic = topic
- self.serv = None
-
- def setUp(self):
- super(ServiceFixture, self).setUp()
- self.serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager',
- 1, 1)
- self.addCleanup(self.serv.kill)
-
-
-class MemcachedServiceGroupTestCase(test.TestCase):
-
- def setUp(self):
- super(MemcachedServiceGroupTestCase, self).setUp()
- servicegroup.API._driver = None
- self.flags(servicegroup_driver='mc')
- self.down_time = 15
- self.flags(enable_new_services=True)
- self.flags(service_down_time=self.down_time)
- self.servicegroup_api = servicegroup.API(test=True)
- self._host = 'foo'
- self._binary = 'nova-fake'
- self._topic = 'unittest'
- self._ctx = context.get_admin_context()
-
- def test_memcached_driver(self):
- serv = self.useFixture(
- ServiceFixture(self._host, self._binary, self._topic)).serv
- serv.start()
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
- hostkey = str("%s:%s" % (self._topic, self._host))
- self.servicegroup_api._driver.mc.set(hostkey,
- timeutils.utcnow(),
- time=self.down_time)
-
- self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
- self.useFixture(test.TimeOverride())
- timeutils.advance_time_seconds(self.down_time + 1)
- self.servicegroup_api._driver._report_state(serv)
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
-
- self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
- serv.stop()
- timeutils.advance_time_seconds(self.down_time + 1)
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
- self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
-
- def test_get_all(self):
- host1 = self._host + '_1'
- host2 = self._host + '_2'
- host3 = self._host + '_3'
-
- serv1 = self.useFixture(
- ServiceFixture(host1, self._binary, self._topic)).serv
- serv1.start()
-
- serv2 = self.useFixture(
- ServiceFixture(host2, self._binary, self._topic)).serv
- serv2.start()
-
- serv3 = self.useFixture(
- ServiceFixture(host3, self._binary, self._topic)).serv
- serv3.start()
-
- db.service_get_by_args(self._ctx, host1, self._binary)
- db.service_get_by_args(self._ctx, host2, self._binary)
- db.service_get_by_args(self._ctx, host3, self._binary)
-
- host1key = str("%s:%s" % (self._topic, host1))
- host2key = str("%s:%s" % (self._topic, host2))
- host3key = str("%s:%s" % (self._topic, host3))
- self.servicegroup_api._driver.mc.set(host1key,
- timeutils.utcnow(),
- time=self.down_time)
- self.servicegroup_api._driver.mc.set(host2key,
- timeutils.utcnow(),
- time=self.down_time)
- self.servicegroup_api._driver.mc.set(host3key,
- timeutils.utcnow(),
- time=-1)
-
- services = self.servicegroup_api.get_all(self._topic)
-
- self.assertIn(host1, services)
- self.assertIn(host2, services)
- self.assertNotIn(host3, services)
-
- service_id = self.servicegroup_api.get_one(self._topic)
- self.assertIn(service_id, services)
-
- def test_service_is_up(self):
- serv = self.useFixture(
- ServiceFixture(self._host, self._binary, self._topic)).serv
- serv.start()
- service_ref = db.service_get_by_args(self._ctx,
- self._host,
- self._binary)
- fake_now = 1000
- down_time = 15
- self.flags(service_down_time=down_time)
- self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
- self.servicegroup_api = servicegroup.API()
- hostkey = str("%s:%s" % (self._topic, self._host))
-
- # Up (equal)
- timeutils.utcnow_ts().AndReturn(fake_now)
- timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
- self.mox.ReplayAll()
- self.servicegroup_api._driver.mc.set(hostkey,
- timeutils.utcnow(),
- time=down_time)
- result = self.servicegroup_api.service_is_up(service_ref)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Up
- timeutils.utcnow_ts().AndReturn(fake_now)
- timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
- self.mox.ReplayAll()
- self.servicegroup_api._driver.mc.set(hostkey,
- timeutils.utcnow(),
- time=down_time)
- result = self.servicegroup_api.service_is_up(service_ref)
- self.assertTrue(result)
-
- self.mox.ResetAll()
- # Down
- timeutils.utcnow_ts().AndReturn(fake_now)
- timeutils.utcnow_ts().AndReturn(fake_now + down_time)
- self.mox.ReplayAll()
- self.servicegroup_api._driver.mc.set(hostkey,
- timeutils.utcnow(),
- time=down_time)
- result = self.servicegroup_api.service_is_up(service_ref)
- self.assertFalse(result)
-
- self.mox.ResetAll()
- # Down
- timeutils.utcnow_ts().AndReturn(fake_now)
- timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
- self.mox.ReplayAll()
- self.servicegroup_api._driver.mc.set(hostkey,
- timeutils.utcnow(),
- time=down_time)
- result = self.servicegroup_api.service_is_up(service_ref)
- self.assertFalse(result)
-
- self.mox.ResetAll()
-
- def test_report_state(self):
- serv = self.useFixture(
- ServiceFixture(self._host, self._binary, self._topic)).serv
- serv.start()
- db.service_get_by_args(self._ctx, self._host, self._binary)
- self.servicegroup_api = servicegroup.API()
-
- # updating model_disconnected
- serv.model_disconnected = True
- self.servicegroup_api._driver._report_state(serv)
- self.assertFalse(serv.model_disconnected)
-
- # handling exception
- serv.model_disconnected = True
- self.servicegroup_api._driver.mc = None
- self.servicegroup_api._driver._report_state(serv)
- self.assertTrue(serv.model_disconnected)
-
- delattr(serv, 'model_disconnected')
- self.servicegroup_api._driver.mc = None
- self.servicegroup_api._driver._report_state(serv)
- self.assertTrue(serv.model_disconnected)
diff --git a/nova/tests/servicegroup/test_zk_driver.py b/nova/tests/servicegroup/test_zk_driver.py
deleted file mode 100644
index 43b56590cf..0000000000
--- a/nova/tests/servicegroup/test_zk_driver.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test the ZooKeeper driver for servicegroup.
-
-You need to install ZooKeeper locally and related dependencies
-to run the test. It's unclear how to install python-zookeeper lib
-in venv so you might have to run the test without it.
-
-To set up in Ubuntu 12.04:
-$ sudo apt-get install zookeeper zookeeperd python-zookeeper
-$ sudo pip install evzookeeper
-$ nosetests nova.tests.servicegroup.test_zk_driver
-"""
-
-import eventlet
-
-from nova import servicegroup
-from nova import test
-
-
-class ZKServiceGroupTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(ZKServiceGroupTestCase, self).setUp()
- servicegroup.API._driver = None
- from nova.servicegroup.drivers import zk
- self.flags(servicegroup_driver='zk')
- self.flags(address='localhost:2181', group="zookeeper")
- try:
- zk.ZooKeeperDriver()
- except ImportError:
- self.skipTest("Unable to test due to lack of ZooKeeper")
-
- def test_join_leave(self):
- self.servicegroup_api = servicegroup.API()
- service_id = {'topic': 'unittest', 'host': 'serviceA'}
- self.servicegroup_api.join(service_id['host'], service_id['topic'])
- self.assertTrue(self.servicegroup_api.service_is_up(service_id))
- self.servicegroup_api.leave(service_id['host'], service_id['topic'])
- # make sure zookeeper is updated and watcher is triggered
- eventlet.sleep(1)
- self.assertFalse(self.servicegroup_api.service_is_up(service_id))
-
- def test_stop(self):
- self.servicegroup_api = servicegroup.API()
- service_id = {'topic': 'unittest', 'host': 'serviceA'}
- pulse = self.servicegroup_api.join(service_id['host'],
- service_id['topic'], None)
- self.assertTrue(self.servicegroup_api.service_is_up(service_id))
- pulse.stop()
- eventlet.sleep(1)
- self.assertFalse(self.servicegroup_api.service_is_up(service_id))
diff --git a/nova/tests/test_availability_zones.py b/nova/tests/test_availability_zones.py
deleted file mode 100644
index b7c78048f2..0000000000
--- a/nova/tests/test_availability_zones.py
+++ /dev/null
@@ -1,255 +0,0 @@
-# Copyright 2013 Netease Corporation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests for availability zones
-"""
-
-from oslo.config import cfg
-
-from nova import availability_zones as az
-from nova import context
-from nova import db
-from nova import test
-from nova.tests.api.openstack import fakes
-
-CONF = cfg.CONF
-CONF.import_opt('internal_service_availability_zone',
- 'nova.availability_zones')
-CONF.import_opt('default_availability_zone',
- 'nova.availability_zones')
-
-
-class AvailabilityZoneTestCases(test.TestCase):
- """Test case for aggregate based availability zone."""
-
- def setUp(self):
- super(AvailabilityZoneTestCases, self).setUp()
- self.host = 'me'
- self.availability_zone = 'nova-test'
- self.default_az = CONF.default_availability_zone
- self.default_in_az = CONF.internal_service_availability_zone
- self.context = context.get_admin_context()
- self.agg = self._create_az('az_agg', self.availability_zone)
-
- def tearDown(self):
- db.aggregate_delete(self.context, self.agg['id'])
- super(AvailabilityZoneTestCases, self).tearDown()
-
- def _create_az(self, agg_name, az_name):
- agg_meta = {'name': agg_name}
- agg = db.aggregate_create(self.context, agg_meta)
-
- metadata = {'availability_zone': az_name}
- db.aggregate_metadata_add(self.context, agg['id'], metadata)
-
- return agg
-
- def _update_az(self, aggregate, az_name):
- metadata = {'availability_zone': az_name}
- db.aggregate_update(self.context, aggregate['id'], metadata)
-
- def _create_service_with_topic(self, topic, host, disabled=False):
- values = {
- 'binary': 'bin',
- 'host': host,
- 'topic': topic,
- 'disabled': disabled,
- }
- return db.service_create(self.context, values)
-
- def _destroy_service(self, service):
- return db.service_destroy(self.context, service['id'])
-
- def _add_to_aggregate(self, service, aggregate):
- return db.aggregate_host_add(self.context,
- aggregate['id'], service['host'])
-
- def _delete_from_aggregate(self, service, aggregate):
- return db.aggregate_host_delete(self.context,
- aggregate['id'], service['host'])
-
- def test_rest_availability_zone_reset_cache(self):
- az._get_cache().add('cache', 'fake_value')
- az.reset_cache()
- self.assertIsNone(az._get_cache().get('cache'))
-
- def test_update_host_availability_zone_cache(self):
- """Test availability zone cache could be update."""
- service = self._create_service_with_topic('compute', self.host)
-
- # Create a new aggregate with an AZ and add the host to the AZ
- az_name = 'az1'
- cache_key = az._make_cache_key(self.host)
- agg_az1 = self._create_az('agg-az1', az_name)
- self._add_to_aggregate(service, agg_az1)
- az.update_host_availability_zone_cache(self.context, self.host)
- self.assertEqual(az._get_cache().get(cache_key), 'az1')
- az.update_host_availability_zone_cache(self.context, self.host, 'az2')
- self.assertEqual(az._get_cache().get(cache_key), 'az2')
-
- def test_set_availability_zone_compute_service(self):
- """Test for compute service get right availability zone."""
- service = self._create_service_with_topic('compute', self.host)
- services = db.service_get_all(self.context)
-
- # The service is not add into aggregate, so confirm it is default
- # availability zone.
- new_service = az.set_availability_zones(self.context, services)[0]
- self.assertEqual(new_service['availability_zone'],
- self.default_az)
-
- # The service is added into aggregate, confirm return the aggregate
- # availability zone.
- self._add_to_aggregate(service, self.agg)
- new_service = az.set_availability_zones(self.context, services)[0]
- self.assertEqual(new_service['availability_zone'],
- self.availability_zone)
-
- self._destroy_service(service)
-
- def test_set_availability_zone_unicode_key(self):
- """Test set availability zone cache key is unicode."""
- service = self._create_service_with_topic('network', self.host)
- services = db.service_get_all(self.context)
- az.set_availability_zones(self.context, services)
- self.assertIsInstance(services[0]['host'], unicode)
- cached_key = az._make_cache_key(services[0]['host'])
- self.assertIsInstance(cached_key, str)
- self._destroy_service(service)
-
- def test_set_availability_zone_not_compute_service(self):
- """Test not compute service get right availability zone."""
- service = self._create_service_with_topic('network', self.host)
- services = db.service_get_all(self.context)
- new_service = az.set_availability_zones(self.context, services)[0]
- self.assertEqual(new_service['availability_zone'],
- self.default_in_az)
- self._destroy_service(service)
-
- def test_get_host_availability_zone(self):
- """Test get right availability zone by given host."""
- self.assertEqual(self.default_az,
- az.get_host_availability_zone(self.context, self.host))
-
- service = self._create_service_with_topic('compute', self.host)
- self._add_to_aggregate(service, self.agg)
-
- self.assertEqual(self.availability_zone,
- az.get_host_availability_zone(self.context, self.host))
-
- def test_update_host_availability_zone(self):
- """Test availability zone could be update by given host."""
- service = self._create_service_with_topic('compute', self.host)
-
- # Create a new aggregate with an AZ and add the host to the AZ
- az_name = 'az1'
- agg_az1 = self._create_az('agg-az1', az_name)
- self._add_to_aggregate(service, agg_az1)
- self.assertEqual(az_name,
- az.get_host_availability_zone(self.context, self.host))
- # Update AZ
- new_az_name = 'az2'
- self._update_az(agg_az1, new_az_name)
- self.assertEqual(new_az_name,
- az.get_host_availability_zone(self.context, self.host))
-
- def test_delete_host_availability_zone(self):
- """Test availability zone could be deleted successfully."""
- service = self._create_service_with_topic('compute', self.host)
-
- # Create a new aggregate with an AZ and add the host to the AZ
- az_name = 'az1'
- agg_az1 = self._create_az('agg-az1', az_name)
- self._add_to_aggregate(service, agg_az1)
- self.assertEqual(az_name,
- az.get_host_availability_zone(self.context, self.host))
- # Delete the AZ via deleting the aggregate
- self._delete_from_aggregate(service, agg_az1)
- self.assertEqual(self.default_az,
- az.get_host_availability_zone(self.context, self.host))
-
- def test_get_availability_zones(self):
- """Test get_availability_zones."""
-
- # When the param get_only_available of get_availability_zones is set
- # to default False, it returns two lists, zones with at least one
- # enabled services, and zones with no enabled services,
- # when get_only_available is set to True, only return a list of zones
- # with at least one enabled servies.
- # Use the following test data:
- #
- # zone host enabled
- # nova-test host1 Yes
- # nova-test host2 No
- # nova-test2 host3 Yes
- # nova-test3 host4 No
- # <default> host5 No
-
- agg2 = self._create_az('agg-az2', 'nova-test2')
- agg3 = self._create_az('agg-az3', 'nova-test3')
-
- service1 = self._create_service_with_topic('compute', 'host1',
- disabled=False)
- service2 = self._create_service_with_topic('compute', 'host2',
- disabled=True)
- service3 = self._create_service_with_topic('compute', 'host3',
- disabled=False)
- service4 = self._create_service_with_topic('compute', 'host4',
- disabled=True)
- self._create_service_with_topic('compute', 'host5',
- disabled=True)
-
- self._add_to_aggregate(service1, self.agg)
- self._add_to_aggregate(service2, self.agg)
- self._add_to_aggregate(service3, agg2)
- self._add_to_aggregate(service4, agg3)
-
- zones, not_zones = az.get_availability_zones(self.context)
-
- self.assertEqual(zones, ['nova-test', 'nova-test2'])
- self.assertEqual(not_zones, ['nova-test3', 'nova'])
-
- zones = az.get_availability_zones(self.context, True)
-
- self.assertEqual(zones, ['nova-test', 'nova-test2'])
-
- zones, not_zones = az.get_availability_zones(self.context,
- with_hosts=True)
-
- self.assertEqual(zones, [(u'nova-test2', set([u'host3'])),
- (u'nova-test', set([u'host1']))])
- self.assertEqual(not_zones, [(u'nova-test3', set([u'host4'])),
- (u'nova', set([u'host5']))])
-
- def test_get_instance_availability_zone_default_value(self):
- """Test get right availability zone by given an instance."""
- fake_inst_id = 162
- fake_inst = fakes.stub_instance(fake_inst_id, host=self.host)
-
- self.assertEqual(self.default_az,
- az.get_instance_availability_zone(self.context, fake_inst))
-
- def test_get_instance_availability_zone_from_aggregate(self):
- """Test get availability zone from aggregate by given an instance."""
- host = 'host170'
- service = self._create_service_with_topic('compute', host)
- self._add_to_aggregate(service, self.agg)
-
- fake_inst_id = 174
- fake_inst = fakes.stub_instance(fake_inst_id, host=host)
-
- self.assertEqual(self.availability_zone,
- az.get_instance_availability_zone(self.context, fake_inst))
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
deleted file mode 100644
index 31ec68b87a..0000000000
--- a/nova/tests/test_bdm.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# Copyright 2011 Isaku Yamahata
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests for Block Device Mapping Code.
-"""
-
-from nova.api.ec2 import cloud
-from nova.api.ec2 import ec2utils
-from nova import test
-from nova.tests import matchers
-
-
-class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
- """Test Case for Block Device Mapping."""
-
- def fake_ec2_vol_id_to_uuid(obj, ec2_id):
- if ec2_id == 'vol-87654321':
- return '22222222-3333-4444-5555-666666666666'
- elif ec2_id == 'vol-98765432':
- return '77777777-8888-9999-0000-aaaaaaaaaaaa'
- else:
- return 'OhNoooo'
-
- def fake_ec2_snap_id_to_uuid(obj, ec2_id):
- if ec2_id == 'snap-12345678':
- return '00000000-1111-2222-3333-444444444444'
- elif ec2_id == 'snap-23456789':
- return '11111111-2222-3333-4444-555555555555'
- else:
- return 'OhNoooo'
-
- def _assertApply(self, action, bdm_list):
- for bdm, expected_result in bdm_list:
- self.assertThat(action(bdm), matchers.DictMatches(expected_result))
-
- def test_parse_block_device_mapping(self):
- self.stubs.Set(ec2utils,
- 'ec2_vol_id_to_uuid',
- self.fake_ec2_vol_id_to_uuid)
- self.stubs.Set(ec2utils,
- 'ec2_snap_id_to_uuid',
- self.fake_ec2_snap_id_to_uuid)
- bdm_list = [
- ({'device_name': '/dev/fake0',
- 'ebs': {'snapshot_id': 'snap-12345678',
- 'volume_size': 1}},
- {'device_name': '/dev/fake0',
- 'snapshot_id': '00000000-1111-2222-3333-444444444444',
- 'volume_size': 1,
- 'delete_on_termination': True}),
-
- ({'device_name': '/dev/fake1',
- 'ebs': {'snapshot_id': 'snap-23456789',
- 'delete_on_termination': False}},
- {'device_name': '/dev/fake1',
- 'snapshot_id': '11111111-2222-3333-4444-555555555555',
- 'delete_on_termination': False}),
-
- ({'device_name': '/dev/fake2',
- 'ebs': {'snapshot_id': 'vol-87654321',
- 'volume_size': 2}},
- {'device_name': '/dev/fake2',
- 'volume_id': '22222222-3333-4444-5555-666666666666',
- 'volume_size': 2,
- 'delete_on_termination': True}),
-
- ({'device_name': '/dev/fake3',
- 'ebs': {'snapshot_id': 'vol-98765432',
- 'delete_on_termination': False}},
- {'device_name': '/dev/fake3',
- 'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
- 'delete_on_termination': False}),
-
- ({'device_name': '/dev/fake4',
- 'ebs': {'no_device': True}},
- {'device_name': '/dev/fake4',
- 'no_device': True}),
-
- ({'device_name': '/dev/fake5',
- 'virtual_name': 'ephemeral0'},
- {'device_name': '/dev/fake5',
- 'virtual_name': 'ephemeral0'}),
-
- ({'device_name': '/dev/fake6',
- 'virtual_name': 'swap'},
- {'device_name': '/dev/fake6',
- 'virtual_name': 'swap'}),
- ]
- self._assertApply(cloud._parse_block_device_mapping, bdm_list)
-
- def test_format_block_device_mapping(self):
- bdm_list = [
- ({'device_name': '/dev/fake0',
- 'snapshot_id': 0x12345678,
- 'volume_size': 1,
- 'delete_on_termination': True},
- {'deviceName': '/dev/fake0',
- 'ebs': {'snapshotId': 'snap-12345678',
- 'volumeSize': 1,
- 'deleteOnTermination': True}}),
-
- ({'device_name': '/dev/fake1',
- 'snapshot_id': 0x23456789},
- {'deviceName': '/dev/fake1',
- 'ebs': {'snapshotId': 'snap-23456789'}}),
-
- ({'device_name': '/dev/fake2',
- 'snapshot_id': 0x23456789,
- 'delete_on_termination': False},
- {'deviceName': '/dev/fake2',
- 'ebs': {'snapshotId': 'snap-23456789',
- 'deleteOnTermination': False}}),
-
- ({'device_name': '/dev/fake3',
- 'volume_id': 0x12345678,
- 'volume_size': 1,
- 'delete_on_termination': True},
- {'deviceName': '/dev/fake3',
- 'ebs': {'snapshotId': 'vol-12345678',
- 'volumeSize': 1,
- 'deleteOnTermination': True}}),
-
- ({'device_name': '/dev/fake4',
- 'volume_id': 0x23456789},
- {'deviceName': '/dev/fake4',
- 'ebs': {'snapshotId': 'vol-23456789'}}),
-
- ({'device_name': '/dev/fake5',
- 'volume_id': 0x23456789,
- 'delete_on_termination': False},
- {'deviceName': '/dev/fake5',
- 'ebs': {'snapshotId': 'vol-23456789',
- 'deleteOnTermination': False}}),
- ]
- self._assertApply(cloud._format_block_device_mapping, bdm_list)
-
- def test_format_mapping(self):
- properties = {
- 'mappings': [
- {'virtual': 'ami',
- 'device': 'sda1'},
- {'virtual': 'root',
- 'device': '/dev/sda1'},
-
- {'virtual': 'swap',
- 'device': 'sdb1'},
- {'virtual': 'swap',
- 'device': 'sdb2'},
- {'virtual': 'swap',
- 'device': 'sdb3'},
- {'virtual': 'swap',
- 'device': 'sdb4'},
-
- {'virtual': 'ephemeral0',
- 'device': 'sdc1'},
- {'virtual': 'ephemeral1',
- 'device': 'sdc2'},
- {'virtual': 'ephemeral2',
- 'device': 'sdc3'},
- ],
-
- 'block_device_mapping': [
- # root
- {'device_name': '/dev/sda1',
- 'snapshot_id': 0x12345678,
- 'delete_on_termination': False},
-
-
- # overwrite swap
- {'device_name': '/dev/sdb2',
- 'snapshot_id': 0x23456789,
- 'delete_on_termination': False},
- {'device_name': '/dev/sdb3',
- 'snapshot_id': 0x3456789A},
- {'device_name': '/dev/sdb4',
- 'no_device': True},
-
- # overwrite ephemeral
- {'device_name': '/dev/sdc2',
- 'snapshot_id': 0x3456789A,
- 'delete_on_termination': False},
- {'device_name': '/dev/sdc3',
- 'snapshot_id': 0x456789AB},
- {'device_name': '/dev/sdc4',
- 'no_device': True},
-
- # volume
- {'device_name': '/dev/sdd1',
- 'snapshot_id': 0x87654321,
- 'delete_on_termination': False},
- {'device_name': '/dev/sdd2',
- 'snapshot_id': 0x98765432},
- {'device_name': '/dev/sdd3',
- 'snapshot_id': 0xA9875463},
- {'device_name': '/dev/sdd4',
- 'no_device': True}]}
-
- expected_result = {
- 'blockDeviceMapping': [
- # root
- {'deviceName': '/dev/sda1',
- 'ebs': {'snapshotId': 'snap-12345678',
- 'deleteOnTermination': False}},
-
- # swap
- {'deviceName': '/dev/sdb1',
- 'virtualName': 'swap'},
- {'deviceName': '/dev/sdb2',
- 'ebs': {'snapshotId': 'snap-23456789',
- 'deleteOnTermination': False}},
- {'deviceName': '/dev/sdb3',
- 'ebs': {'snapshotId': 'snap-3456789a'}},
-
- # ephemeral
- {'deviceName': '/dev/sdc1',
- 'virtualName': 'ephemeral0'},
- {'deviceName': '/dev/sdc2',
- 'ebs': {'snapshotId': 'snap-3456789a',
- 'deleteOnTermination': False}},
- {'deviceName': '/dev/sdc3',
- 'ebs': {'snapshotId': 'snap-456789ab'}},
-
- # volume
- {'deviceName': '/dev/sdd1',
- 'ebs': {'snapshotId': 'snap-87654321',
- 'deleteOnTermination': False}},
- {'deviceName': '/dev/sdd2',
- 'ebs': {'snapshotId': 'snap-98765432'}},
- {'deviceName': '/dev/sdd3',
- 'ebs': {'snapshotId': 'snap-a9875463'}}]}
-
- result = {}
- cloud._format_mappings(properties, result)
- self.assertEqual(result['blockDeviceMapping'].sort(),
- expected_result['blockDeviceMapping'].sort())
diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py
deleted file mode 100644
index 3b817ac338..0000000000
--- a/nova/tests/test_block_device.py
+++ /dev/null
@@ -1,604 +0,0 @@
-# Copyright 2011 Isaku Yamahata
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Tests for Block Device utility functions.
-"""
-
-from nova import block_device
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests import matchers
-
-
-class BlockDeviceTestCase(test.NoDBTestCase):
- def setUp(self):
- super(BlockDeviceTestCase, self).setUp()
- BDM = block_device.BlockDeviceDict
-
- self.new_mapping = [
- BDM({'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'volume_size': 1,
- 'guest_format': 'swap',
- 'boot_index': -1}),
- BDM({'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdc1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'volume_size': 10,
- 'delete_on_termination': True,
- 'boot_index': -1}),
- BDM({'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda1',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'connection_info': "{'fake': 'connection_info'}",
- 'boot_index': 0}),
- BDM({'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'snapshot_id': 'fake-snapshot-id-1',
- 'volume_id': 'fake-volume-id-2',
- 'boot_index': -1}),
- BDM({'id': 5, 'instance_uuid': 'fake-instance',
- 'no_device': True,
- 'device_name': '/dev/vdc'}),
- ]
-
- def test_properties(self):
- root_device0 = '/dev/sda'
- root_device1 = '/dev/sdb'
- mappings = [{'virtual': 'root',
- 'device': root_device0}]
-
- properties0 = {'mappings': mappings}
- properties1 = {'mappings': mappings,
- 'root_device_name': root_device1}
-
- self.assertIsNone(block_device.properties_root_device_name({}))
- self.assertEqual(
- block_device.properties_root_device_name(properties0),
- root_device0)
- self.assertEqual(
- block_device.properties_root_device_name(properties1),
- root_device1)
-
- def test_ephemeral(self):
- self.assertFalse(block_device.is_ephemeral('ephemeral'))
- self.assertTrue(block_device.is_ephemeral('ephemeral0'))
- self.assertTrue(block_device.is_ephemeral('ephemeral1'))
- self.assertTrue(block_device.is_ephemeral('ephemeral11'))
- self.assertFalse(block_device.is_ephemeral('root'))
- self.assertFalse(block_device.is_ephemeral('swap'))
- self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
-
- self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0)
- self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1)
- self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11)
-
- self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
- self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
- self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
- self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
- self.assertFalse(block_device.is_swap_or_ephemeral('root'))
- self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
-
- def test_mappings_prepend_dev(self):
- mapping = [
- {'virtual': 'ami', 'device': '/dev/sda'},
- {'virtual': 'root', 'device': 'sda'},
- {'virtual': 'ephemeral0', 'device': 'sdb'},
- {'virtual': 'swap', 'device': 'sdc'},
- {'virtual': 'ephemeral1', 'device': 'sdd'},
- {'virtual': 'ephemeral2', 'device': 'sde'}]
-
- expected = [
- {'virtual': 'ami', 'device': '/dev/sda'},
- {'virtual': 'root', 'device': 'sda'},
- {'virtual': 'ephemeral0', 'device': '/dev/sdb'},
- {'virtual': 'swap', 'device': '/dev/sdc'},
- {'virtual': 'ephemeral1', 'device': '/dev/sdd'},
- {'virtual': 'ephemeral2', 'device': '/dev/sde'}]
-
- prepended = block_device.mappings_prepend_dev(mapping)
- self.assertEqual(prepended.sort(), expected.sort())
-
- def test_strip_dev(self):
- self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda')
- self.assertEqual(block_device.strip_dev('sda'), 'sda')
-
- def test_strip_prefix(self):
- self.assertEqual(block_device.strip_prefix('/dev/sda'), 'a')
- self.assertEqual(block_device.strip_prefix('a'), 'a')
- self.assertEqual(block_device.strip_prefix('xvda'), 'a')
- self.assertEqual(block_device.strip_prefix('vda'), 'a')
-
- def test_get_device_letter(self):
- self.assertEqual(block_device.get_device_letter(''), '')
- self.assertEqual(block_device.get_device_letter('/dev/sda1'), 'a')
- self.assertEqual(block_device.get_device_letter('/dev/xvdb'), 'b')
- self.assertEqual(block_device.get_device_letter('/dev/d'), 'd')
- self.assertEqual(block_device.get_device_letter('a'), 'a')
- self.assertEqual(block_device.get_device_letter('sdb2'), 'b')
- self.assertEqual(block_device.get_device_letter('vdc'), 'c')
-
- def test_volume_in_mapping(self):
- swap = {'device_name': '/dev/sdb',
- 'swap_size': 1}
- ephemerals = [{'num': 0,
- 'virtual_name': 'ephemeral0',
- 'device_name': '/dev/sdc1',
- 'size': 1},
- {'num': 2,
- 'virtual_name': 'ephemeral2',
- 'device_name': '/dev/sdd',
- 'size': 1}]
- block_device_mapping = [{'mount_device': '/dev/sde',
- 'device_path': 'fake_device'},
- {'mount_device': '/dev/sdf',
- 'device_path': 'fake_device'}]
- block_device_info = {
- 'root_device_name': '/dev/sda',
- 'swap': swap,
- 'ephemerals': ephemerals,
- 'block_device_mapping': block_device_mapping}
-
- def _assert_volume_in_mapping(device_name, true_or_false):
- in_mapping = block_device.volume_in_mapping(
- device_name, block_device_info)
- self.assertEqual(in_mapping, true_or_false)
-
- _assert_volume_in_mapping('sda', False)
- _assert_volume_in_mapping('sdb', True)
- _assert_volume_in_mapping('sdc1', True)
- _assert_volume_in_mapping('sdd', True)
- _assert_volume_in_mapping('sde', True)
- _assert_volume_in_mapping('sdf', True)
- _assert_volume_in_mapping('sdg', False)
- _assert_volume_in_mapping('sdh1', False)
-
- def test_get_root_bdm(self):
- root_bdm = {'device_name': 'vda', 'boot_index': 0}
- bdms = [root_bdm,
- {'device_name': 'vdb', 'boot_index': 1},
- {'device_name': 'vdc', 'boot_index': -1},
- {'device_name': 'vdd'}]
- self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
- self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
- self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
- self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
- self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
- self.assertIsNone(block_device.get_root_bdm([]))
-
- def test_get_bdm_ephemeral_disk_size(self):
- size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
- self.assertEqual(10, size)
-
- def test_get_bdm_swap_list(self):
- swap_list = block_device.get_bdm_swap_list(self.new_mapping)
- self.assertEqual(1, len(swap_list))
- self.assertEqual(1, swap_list[0].get('id'))
-
- def test_get_bdm_local_disk_num(self):
- size = block_device.get_bdm_local_disk_num(self.new_mapping)
- self.assertEqual(2, size)
-
- def test_new_format_is_swap(self):
- expected_results = [True, False, False, False, False]
- for expected, bdm in zip(expected_results, self.new_mapping):
- res = block_device.new_format_is_swap(bdm)
- self.assertEqual(expected, res)
-
- def test_new_format_is_ephemeral(self):
- expected_results = [False, True, False, False, False]
- for expected, bdm in zip(expected_results, self.new_mapping):
- res = block_device.new_format_is_ephemeral(bdm)
- self.assertEqual(expected, res)
-
- def test_validate_device_name(self):
- for value in [' ', 10, None, 'a' * 260]:
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.validate_device_name,
- value)
-
- def test_validate_and_default_volume_size(self):
- bdm = {}
- for value in [-1, 'a', 2.5]:
- bdm['volume_size'] = value
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.validate_and_default_volume_size,
- bdm)
-
- def test_get_bdms_to_connect(self):
- root_bdm = {'device_name': 'vda', 'boot_index': 0}
- bdms = [root_bdm,
- {'device_name': 'vdb', 'boot_index': 1},
- {'device_name': 'vdc', 'boot_index': -1},
- {'device_name': 'vde', 'boot_index': None},
- {'device_name': 'vdd'}]
- self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
- exclude_root_mapping=True))
- self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
-
-
-class TestBlockDeviceDict(test.NoDBTestCase):
- def setUp(self):
- super(TestBlockDeviceDict, self).setUp()
-
- BDM = block_device.BlockDeviceDict
-
- self.api_mapping = [
- {'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'boot_index': -1},
- {'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdc1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'boot_index': -1},
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda1',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'uuid': 'fake-volume-id-1',
- 'boot_index': 0},
- {'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'uuid': 'fake-snapshot-id-1',
- 'boot_index': -1},
- {'id': 5, 'instance_uuid': 'fake-instance',
- 'no_device': True,
- 'device_name': '/dev/vdc'},
- ]
-
- self.new_mapping = [
- BDM({'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'boot_index': -1}),
- BDM({'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdc1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'boot_index': -1}),
- BDM({'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda1',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'connection_info': "{'fake': 'connection_info'}",
- 'boot_index': 0}),
- BDM({'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'snapshot_id': 'fake-snapshot-id-1',
- 'volume_id': 'fake-volume-id-2',
- 'boot_index': -1}),
- BDM({'id': 5, 'instance_uuid': 'fake-instance',
- 'no_device': True,
- 'device_name': '/dev/vdc'}),
- ]
-
- self.legacy_mapping = [
- {'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb1',
- 'delete_on_termination': True,
- 'virtual_name': 'swap'},
- {'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdc1',
- 'delete_on_termination': True,
- 'virtual_name': 'ephemeral0'},
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda1',
- 'volume_id': 'fake-volume-id-1',
- 'connection_info': "{'fake': 'connection_info'}"},
- {'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'connection_info': "{'fake': 'connection_info'}",
- 'snapshot_id': 'fake-snapshot-id-1',
- 'volume_id': 'fake-volume-id-2'},
- {'id': 5, 'instance_uuid': 'fake-instance',
- 'no_device': True,
- 'device_name': '/dev/vdc'},
- ]
-
- self.new_mapping_source_image = [
- BDM({'id': 6, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda3',
- 'source_type': 'image',
- 'destination_type': 'volume',
- 'connection_info': "{'fake': 'connection_info'}",
- 'volume_id': 'fake-volume-id-3',
- 'boot_index': -1}),
- BDM({'id': 7, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda4',
- 'source_type': 'image',
- 'destination_type': 'local',
- 'connection_info': "{'fake': 'connection_info'}",
- 'image_id': 'fake-image-id-2',
- 'boot_index': -1}),
- ]
-
- self.legacy_mapping_source_image = [
- {'id': 6, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda3',
- 'connection_info': "{'fake': 'connection_info'}",
- 'volume_id': 'fake-volume-id-3'},
- ]
-
- def test_init(self):
- def fake_validate(obj, dct):
- pass
-
- self.stubs.Set(block_device.BlockDeviceDict, '_fields',
- set(['field1', 'field2']))
- self.stubs.Set(block_device.BlockDeviceDict, '_db_only_fields',
- set(['db_field1', 'db_field2']))
- self.stubs.Set(block_device.BlockDeviceDict, '_validate',
- fake_validate)
-
- # Make sure db fields are not picked up if they are not
- # in the original dict
- dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
- 'field2': 'bar',
- 'db_field1': 'baz'})
- self.assertIn('field1', dev_dict)
- self.assertIn('field2', dev_dict)
- self.assertIn('db_field1', dev_dict)
- self.assertNotIn('db_field2', dev_dict)
-
- # Make sure all expected fields are defaulted
- dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
- self.assertIn('field1', dev_dict)
- self.assertIn('field2', dev_dict)
- self.assertIsNone(dev_dict['field2'])
- self.assertNotIn('db_field1', dev_dict)
- self.assertNotIn('db_field2', dev_dict)
-
- # Unless they are not meant to be
- dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
- do_not_default=set(['field2']))
- self.assertIn('field1', dev_dict)
- self.assertNotIn('field2', dev_dict)
- self.assertNotIn('db_field1', dev_dict)
- self.assertNotIn('db_field2', dev_dict)
-
- # Passing kwargs to constructor works
- dev_dict = block_device.BlockDeviceDict(field1='foo')
- self.assertIn('field1', dev_dict)
- self.assertIn('field2', dev_dict)
- self.assertIsNone(dev_dict['field2'])
- dev_dict = block_device.BlockDeviceDict(
- {'field1': 'foo'}, field2='bar')
- self.assertEqual('foo', dev_dict['field1'])
- self.assertEqual('bar', dev_dict['field2'])
-
- def test_init_prepend_dev_to_device_name(self):
- bdm = {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': 'vda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'boot_index': 0}
- bdm_dict = block_device.BlockDeviceDict(bdm)
- self.assertEqual('/dev/vda', bdm_dict['device_name'])
-
- bdm['device_name'] = '/dev/vdb'
- bdm_dict = block_device.BlockDeviceDict(bdm)
- self.assertEqual('/dev/vdb', bdm_dict['device_name'])
-
- bdm['device_name'] = None
- bdm_dict = block_device.BlockDeviceDict(bdm)
- self.assertIsNone(bdm_dict['device_name'])
-
- def test_validate(self):
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- {'bogus_field': 'lame_val'})
-
- lame_bdm = dict(self.new_mapping[2])
- del lame_bdm['source_type']
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- lame_bdm)
-
- lame_bdm['no_device'] = True
- block_device.BlockDeviceDict(lame_bdm)
-
- lame_dev_bdm = dict(self.new_mapping[2])
- lame_dev_bdm['device_name'] = "not a valid name"
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- lame_dev_bdm)
-
- lame_dev_bdm['device_name'] = ""
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- lame_dev_bdm)
-
- cool_volume_size_bdm = dict(self.new_mapping[2])
- cool_volume_size_bdm['volume_size'] = '42'
- cool_volume_size_bdm = block_device.BlockDeviceDict(
- cool_volume_size_bdm)
- self.assertEqual(cool_volume_size_bdm['volume_size'], 42)
-
- lame_volume_size_bdm = dict(self.new_mapping[2])
- lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- lame_volume_size_bdm)
-
- truthy_bdm = dict(self.new_mapping[2])
- truthy_bdm['delete_on_termination'] = '1'
- truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
- self.assertEqual(truthy_bdm['delete_on_termination'], True)
-
- verbose_bdm = dict(self.new_mapping[2])
- verbose_bdm['boot_index'] = 'first'
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict,
- verbose_bdm)
-
- def test_from_legacy(self):
- for legacy, new in zip(self.legacy_mapping, self.new_mapping):
- self.assertThat(
- block_device.BlockDeviceDict.from_legacy(legacy),
- matchers.IsSubDictOf(new))
-
- def test_from_legacy_mapping(self):
- def _get_image_bdms(bdms):
- return [bdm for bdm in bdms if bdm['source_type'] == 'image']
-
- def _get_bootable_bdms(bdms):
- return [bdm for bdm in bdms if bdm['boot_index'] >= 0]
-
- new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
- self.assertEqual(len(_get_image_bdms(new_no_img)), 0)
-
- for new, expected in zip(new_no_img, self.new_mapping):
- self.assertThat(new, matchers.IsSubDictOf(expected))
-
- new_with_img = block_device.from_legacy_mapping(
- self.legacy_mapping, 'fake_image_ref')
- image_bdms = _get_image_bdms(new_with_img)
- boot_bdms = _get_bootable_bdms(new_with_img)
- self.assertEqual(len(image_bdms), 1)
- self.assertEqual(len(boot_bdms), 1)
- self.assertEqual(image_bdms[0]['boot_index'], 0)
- self.assertEqual(boot_bdms[0]['source_type'], 'image')
-
- new_with_img_and_root = block_device.from_legacy_mapping(
- self.legacy_mapping, 'fake_image_ref', 'sda1')
- image_bdms = _get_image_bdms(new_with_img_and_root)
- boot_bdms = _get_bootable_bdms(new_with_img_and_root)
- self.assertEqual(len(image_bdms), 0)
- self.assertEqual(len(boot_bdms), 1)
- self.assertEqual(boot_bdms[0]['boot_index'], 0)
- self.assertEqual(boot_bdms[0]['source_type'], 'volume')
-
- new_no_root = block_device.from_legacy_mapping(
- self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
- self.assertEqual(len(_get_image_bdms(new_no_root)), 0)
- self.assertEqual(len(_get_bootable_bdms(new_no_root)), 0)
-
- def test_from_api(self):
- for api, new in zip(self.api_mapping, self.new_mapping):
- new['connection_info'] = None
- if new['snapshot_id']:
- new['volume_id'] = None
- self.assertThat(
- block_device.BlockDeviceDict.from_api(api),
- matchers.IsSubDictOf(new))
-
- def test_from_api_invalid_blank_id(self):
- api_dict = {'id': 1,
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'uuid': 'fake-volume-id-1',
- 'delete_on_termination': True,
- 'boot_index': -1}
- self.assertRaises(exception.InvalidBDMFormat,
- block_device.BlockDeviceDict.from_api, api_dict)
-
- def test_legacy(self):
- for legacy, new in zip(self.legacy_mapping, self.new_mapping):
- self.assertThat(
- legacy,
- matchers.IsSubDictOf(new.legacy()))
-
- def test_legacy_mapping(self):
- got_legacy = block_device.legacy_mapping(self.new_mapping)
-
- for legacy, expected in zip(got_legacy, self.legacy_mapping):
- self.assertThat(expected, matchers.IsSubDictOf(legacy))
-
- def test_legacy_source_image(self):
- for legacy, new in zip(self.legacy_mapping_source_image,
- self.new_mapping_source_image):
- if new['destination_type'] == 'volume':
- self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
- else:
- self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
-
- def test_legacy_mapping_source_image(self):
- got_legacy = block_device.legacy_mapping(self.new_mapping)
-
- for legacy, expected in zip(got_legacy, self.legacy_mapping):
- self.assertThat(expected, matchers.IsSubDictOf(legacy))
-
- def test_legacy_mapping_from_object_list(self):
- bdm1 = objects.BlockDeviceMapping()
- bdm1 = objects.BlockDeviceMapping._from_db_object(
- None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
- self.new_mapping[0]))
- bdm2 = objects.BlockDeviceMapping()
- bdm2 = objects.BlockDeviceMapping._from_db_object(
- None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
- self.new_mapping[1]))
- bdmlist = objects.BlockDeviceMappingList()
- bdmlist.objects = [bdm1, bdm2]
- block_device.legacy_mapping(bdmlist)
-
- def test_image_mapping(self):
- removed_fields = ['id', 'instance_uuid', 'connection_info',
- 'device_name', 'created_at', 'updated_at',
- 'deleted_at', 'deleted']
- for bdm in self.new_mapping:
- mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
- bdm).get_image_mapping()
- for fld in removed_fields:
- self.assertNotIn(fld, mapping_bdm)
-
- def _test_snapshot_from_bdm(self, template):
- snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
- self.assertEqual(snapshot['snapshot_id'], 'new-snapshot-id')
- self.assertEqual(snapshot['source_type'], 'snapshot')
- self.assertEqual(snapshot['destination_type'], 'volume')
- for key in ['disk_bus', 'device_type', 'boot_index']:
- self.assertEqual(snapshot[key], template[key])
-
- def test_snapshot_from_bdm(self):
- for bdm in self.new_mapping:
- self._test_snapshot_from_bdm(bdm)
-
- def test_snapshot_from_object(self):
- for bdm in self.new_mapping[:-1]:
- obj = objects.BlockDeviceMapping()
- obj = objects.BlockDeviceMapping._from_db_object(
- None, obj, fake_block_device.FakeDbBlockDeviceDict(
- bdm))
- self._test_snapshot_from_bdm(obj)
diff --git a/nova/tests/test_configdrive2.py b/nova/tests/test_configdrive2.py
deleted file mode 100644
index 1f4397b572..0000000000
--- a/nova/tests/test_configdrive2.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import os
-import tempfile
-
-import mox
-from oslo.config import cfg
-
-from nova import context
-from nova.openstack.common import fileutils
-from nova import test
-from nova.tests import fake_instance
-from nova import utils
-from nova.virt import configdrive
-
-CONF = cfg.CONF
-
-
-class FakeInstanceMD(object):
- def metadata_for_config_drive(self):
- yield ('this/is/a/path/hello', 'This is some content')
-
-
-class ConfigDriveTestCase(test.NoDBTestCase):
-
- def test_create_configdrive_iso(self):
- CONF.set_override('config_drive_format', 'iso9660')
- imagefile = None
-
- try:
- self.mox.StubOutWithMock(utils, 'execute')
-
- utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
- '-allow-lowercase', '-allow-multidot', '-l',
- '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r',
- '-V', 'config-2', mox.IgnoreArg(), attempts=1,
- run_as_root=False).AndReturn(None)
-
- self.mox.ReplayAll()
-
- with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
- (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
- os.close(fd)
- c.make_drive(imagefile)
-
- finally:
- if imagefile:
- fileutils.delete_if_exists(imagefile)
-
- def test_create_configdrive_vfat(self):
- CONF.set_override('config_drive_format', 'vfat')
- imagefile = None
- try:
- self.mox.StubOutWithMock(utils, 'mkfs')
- self.mox.StubOutWithMock(utils, 'execute')
- self.mox.StubOutWithMock(utils, 'trycmd')
-
- utils.mkfs('vfat', mox.IgnoreArg(),
- label='config-2').AndReturn(None)
- utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(),
- run_as_root=True).AndReturn((None, None))
- utils.execute('umount', mox.IgnoreArg(),
- run_as_root=True).AndReturn(None)
-
- self.mox.ReplayAll()
-
- with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
- (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
- os.close(fd)
- c.make_drive(imagefile)
-
- # NOTE(mikal): we can't check for a VFAT output here because the
- # filesystem creation stuff has been mocked out because it
- # requires root permissions
-
- finally:
- if imagefile:
- fileutils.delete_if_exists(imagefile)
-
- def test_config_drive_required_by_image_property(self):
- inst = fake_instance.fake_instance_obj(context.get_admin_context())
- inst.config_drive = ''
- inst.system_metadata = {
- utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'}
- self.assertTrue(configdrive.required_by(inst))
-
- inst.system_metadata = {
- utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'}
- self.assertFalse(configdrive.required_by(inst))
diff --git a/nova/tests/test_loadables.py b/nova/tests/test_loadables.py
deleted file mode 100644
index df023d1e5a..0000000000
--- a/nova/tests/test_loadables.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright 2012 OpenStack Foundation # All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For Loadable class handling.
-"""
-
-from nova import exception
-from nova import test
-from nova.tests import fake_loadables
-
-
-class LoadablesTestCase(test.NoDBTestCase):
- def setUp(self):
- super(LoadablesTestCase, self).setUp()
- self.fake_loader = fake_loadables.FakeLoader()
- # The name that we imported above for testing
- self.test_package = 'nova.tests.fake_loadables'
-
- def test_loader_init(self):
- self.assertEqual(self.fake_loader.package, self.test_package)
- # Test the path of the module
- ending_path = '/' + self.test_package.replace('.', '/')
- self.assertTrue(self.fake_loader.path.endswith(ending_path))
- self.assertEqual(self.fake_loader.loadable_cls_type,
- fake_loadables.FakeLoadable)
-
- def _compare_classes(self, classes, expected):
- class_names = [cls.__name__ for cls in classes]
- self.assertEqual(set(class_names), set(expected))
-
- def test_get_all_classes(self):
- classes = self.fake_loader.get_all_classes()
- expected_class_names = ['FakeLoadableSubClass1',
- 'FakeLoadableSubClass2',
- 'FakeLoadableSubClass5',
- 'FakeLoadableSubClass6']
- self._compare_classes(classes, expected_class_names)
-
- def test_get_matching_classes(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
- prefix + '.fake_loadable2.FakeLoadableSubClass5']
- classes = self.fake_loader.get_matching_classes(test_classes)
- expected_class_names = ['FakeLoadableSubClass1',
- 'FakeLoadableSubClass5']
- self._compare_classes(classes, expected_class_names)
-
- def test_get_matching_classes_with_underscore(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
- prefix + '.fake_loadable2._FakeLoadableSubClass7']
- self.assertRaises(exception.ClassNotFound,
- self.fake_loader.get_matching_classes,
- test_classes)
-
- def test_get_matching_classes_with_wrong_type1(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4',
- prefix + '.fake_loadable2.FakeLoadableSubClass5']
- self.assertRaises(exception.ClassNotFound,
- self.fake_loader.get_matching_classes,
- test_classes)
-
- def test_get_matching_classes_with_wrong_type2(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
- prefix + '.fake_loadable2.FakeLoadableSubClass8']
- self.assertRaises(exception.ClassNotFound,
- self.fake_loader.get_matching_classes,
- test_classes)
-
- def test_get_matching_classes_with_one_function(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.return_valid_classes',
- prefix + '.fake_loadable2.FakeLoadableSubClass5']
- classes = self.fake_loader.get_matching_classes(test_classes)
- expected_class_names = ['FakeLoadableSubClass1',
- 'FakeLoadableSubClass2',
- 'FakeLoadableSubClass5']
- self._compare_classes(classes, expected_class_names)
-
- def test_get_matching_classes_with_two_functions(self):
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.return_valid_classes',
- prefix + '.fake_loadable2.return_valid_class']
- classes = self.fake_loader.get_matching_classes(test_classes)
- expected_class_names = ['FakeLoadableSubClass1',
- 'FakeLoadableSubClass2',
- 'FakeLoadableSubClass6']
- self._compare_classes(classes, expected_class_names)
-
- def test_get_matching_classes_with_function_including_invalids(self):
- # When using a method, no checking is done on valid classes.
- prefix = self.test_package
- test_classes = [prefix + '.fake_loadable1.return_invalid_classes',
- prefix + '.fake_loadable2.return_valid_class']
- classes = self.fake_loader.get_matching_classes(test_classes)
- expected_class_names = ['FakeLoadableSubClass1',
- '_FakeLoadableSubClass3',
- 'FakeLoadableSubClass4',
- 'FakeLoadableSubClass6']
- self._compare_classes(classes, expected_class_names)
diff --git a/nova/tests/test_matchers.py b/nova/tests/test_matchers.py
deleted file mode 100644
index 0e45962441..0000000000
--- a/nova/tests/test_matchers.py
+++ /dev/null
@@ -1,349 +0,0 @@
-# Copyright 2012 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import testtools
-from testtools.tests.matchers import helpers
-
-from nova.tests import matchers
-
-
-class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
-
- matches_matcher = matchers.DictMatches(
- {'foo': 'bar', 'baz': 'DONTCARE',
- 'cat': {'tabby': True, 'fluffy': False}}
- )
-
- matches_matches = [
- {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
- {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
- ]
-
- matches_mismatches = [
- {},
- {'foo': 'bar', 'baz': 'qux'},
- {'foo': 'bop', 'baz': 'qux',
- 'cat': {'tabby': True, 'fluffy': False}},
- {'foo': 'bar', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': True}},
- {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
- ]
-
- str_examples = [
- ("DictMatches({'baz': 'DONTCARE', 'cat':"
- " {'fluffy': False, 'tabby': True}, 'foo': 'bar'})",
- matches_matcher),
- ]
-
- describe_examples = [
- ("Keys in d1 and not d2: set(['foo', 'baz', 'cat'])."
- " Keys in d2 and not d1: set([])", {}, matches_matcher),
- ("Dictionaries do not match at fluffy. d1: False d2: True",
- {'foo': 'bar', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
- ("Dictionaries do not match at foo. d1: bar d2: bop",
- {'foo': 'bop', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
- ]
-
-
-class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
-
- matches_matcher = matchers.DictListMatches(
- [{'foo': 'bar', 'baz': 'DONTCARE',
- 'cat': {'tabby': True, 'fluffy': False}},
- {'dog': 'yorkie'},
- ])
-
- matches_matches = [
- [{'foo': 'bar', 'baz': 'qoox',
- 'cat': {'tabby': True, 'fluffy': False}},
- {'dog': 'yorkie'}],
- [{'foo': 'bar', 'baz': False,
- 'cat': {'tabby': True, 'fluffy': False}},
- {'dog': 'yorkie'}],
- ]
-
- matches_mismatches = [
- [],
- {},
- [{'foo': 'bar', 'baz': 'qoox',
- 'cat': {'tabby': True, 'fluffy': True}},
- {'dog': 'yorkie'}],
- [{'foo': 'bar', 'baz': False,
- 'cat': {'tabby': True, 'fluffy': False}},
- {'cat': 'yorkie'}],
- [{'foo': 'bop', 'baz': False,
- 'cat': {'tabby': True, 'fluffy': False}},
- {'dog': 'yorkie'}],
- ]
-
- str_examples = [
- ("DictListMatches([{'baz': 'DONTCARE', 'cat':"
- " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
- " {'dog': 'yorkie'}])",
- matches_matcher),
- ]
-
- describe_examples = [
- ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
- ("Dictionaries do not match at fluffy. d1: True d2: False",
- [{'foo': 'bar', 'baz': 'qoox',
- 'cat': {'tabby': True, 'fluffy': True}},
- {'dog': 'yorkie'}],
- matches_matcher),
- ]
-
-
-class TestIsSubDictOf(testtools.TestCase, helpers.TestMatchersInterface):
-
- matches_matcher = matchers.IsSubDictOf(
- {'foo': 'bar', 'baz': 'DONTCARE',
- 'cat': {'tabby': True, 'fluffy': False}}
- )
-
- matches_matches = [
- {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
- {'foo': 'bar', 'baz': 'quux'}
- ]
-
- matches_mismatches = [
- {'foo': 'bop', 'baz': 'qux',
- 'cat': {'tabby': True, 'fluffy': False}},
- {'foo': 'bar', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': True}},
- {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
- ]
-
- str_examples = [
- ("IsSubDictOf({'foo': 'bar', 'baz': 'DONTCARE',"
- " 'cat': {'fluffy': False, 'tabby': True}})",
- matches_matcher),
- ]
-
- describe_examples = [
- ("Dictionaries do not match at fluffy. d1: False d2: True",
- {'foo': 'bar', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
- ("Dictionaries do not match at foo. d1: bar d2: bop",
- {'foo': 'bop', 'baz': 'quux',
- 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
- ]
-
-
-class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface):
-
- matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="DONTCARE"/>
- <children>
- <!--This is a comment-->
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>DONTCARE</child3>
- <?spam processing instruction?>
- </children>
-</root>""")
-
- matches_matches = ["""<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key2="spam" key1="spam"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children><child1>child 1</child1>
-<child2>child 2</child2>
-<child3>blah</child3>
- </children>
-</root>""",
- ]
-
- matches_mismatches = ["""<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>mismatch text</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key3="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="quux" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child4>child 4</child4>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- </children>
-</root>""",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- <child4>child 4</child4>
- </children>
-</root>""",
- ]
-
- str_examples = [
- ("XMLMatches('<?xml version=\"1.0\"?>\\n"
- "<root>\\n"
- " <text>some text here</text>\\n"
- " <text>some other text here</text>\\n"
- " <attrs key1=\"spam\" key2=\"DONTCARE\"/>\\n"
- " <children>\\n"
- " <!--This is a comment-->\\n"
- " <child1>child 1</child1>\\n"
- " <child2>child 2</child2>\\n"
- " <child3>DONTCARE</child3>\\n"
- " <?spam processing instruction?>\\n"
- " </children>\\n"
- "</root>')", matches_matcher),
- ]
-
- describe_examples = [
- ("/root/text[1]: XML text value mismatch: expected text value: "
- "'some other text here'; actual value: 'mismatch text'",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>mismatch text</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""", matches_matcher),
- ("/root/attrs[2]: XML attributes mismatch: keys only in expected: "
- "key2; keys only in actual: key3",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key3="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""", matches_matcher),
- ("/root/attrs[2]: XML attribute value mismatch: expected value of "
- "attribute key1: 'spam'; actual value: 'quux'",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="quux" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""", matches_matcher),
- ("/root/children[3]: XML tag mismatch at index 1: expected tag "
- "<child2>; actual tag <child4>",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child4>child 4</child4>
- <child2>child 2</child2>
- <child3>child 3</child3>
- </children>
-</root>""", matches_matcher),
- ("/root/children[3]: XML expected child element <child3> not "
- "present at index 2",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- </children>
-</root>""", matches_matcher),
- ("/root/children[3]: XML unexpected child element <child4> "
- "present at index 3",
- """<?xml version="1.0"?>
-<root>
- <text>some text here</text>
- <text>some other text here</text>
- <attrs key1="spam" key2="quux"/>
- <children>
- <child1>child 1</child1>
- <child2>child 2</child2>
- <child3>child 3</child3>
- <child4>child 4</child4>
- </children>
-</root>""", matches_matcher),
- ]
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
deleted file mode 100644
index e38f833298..0000000000
--- a/nova/tests/test_metadata.py
+++ /dev/null
@@ -1,865 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for metadata service."""
-
-import base64
-import hashlib
-import hmac
-import re
-
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-import webob
-
-from nova.api.metadata import base
-from nova.api.metadata import handler
-from nova.api.metadata import password
-from nova import block_device
-from nova.compute import flavors
-from nova.conductor import api as conductor_api
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import api
-from nova import exception
-from nova.network import api as network_api
-from nova import objects
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests.objects import test_instance_info_cache
-from nova.tests.objects import test_security_group
-from nova.virt import netutils
-
-CONF = cfg.CONF
-
-USER_DATA_STRING = ("This is an encoded string")
-ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
-
-INSTANCE = fake_instance.fake_db_instance(**
- {'id': 1,
- 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
- 'name': 'fake',
- 'project_id': 'test',
- 'key_name': "mykey",
- 'key_data': "ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
- 'host': 'test',
- 'launch_index': 1,
- 'instance_type': {'name': 'm1.tiny'},
- 'reservation_id': 'r-xxxxxxxx',
- 'user_data': ENCODE_USER_DATA_STRING,
- 'image_ref': 7,
- 'vcpus': 1,
- 'fixed_ips': [],
- 'root_device_name': '/dev/sda1',
- 'info_cache': test_instance_info_cache.fake_info_cache,
- 'hostname': 'test.novadomain',
- 'display_name': 'my_displayname',
- 'metadata': {},
- 'system_metadata': {},
- })
-
-
-def fake_inst_obj(context):
- return objects.Instance._from_db_object(
- context, objects.Instance(), INSTANCE,
- expected_attrs=['metadata', 'system_metadata',
- 'info_cache'])
-
-
-def get_default_sys_meta():
- return flavors.save_flavor_info(
- {}, flavors.get_default_flavor())
-
-
-def return_non_existing_address(*args, **kwarg):
- raise exception.NotFound()
-
-
-def fake_InstanceMetadata(stubs, inst_data, address=None,
- sgroups=None, content=None, extra_md=None,
- vd_driver=None, network_info=None):
- content = content or []
- extra_md = extra_md or {}
- if sgroups is None:
- sgroups = [dict(test_security_group.fake_secgroup,
- name='default')]
-
- def sg_get(*args, **kwargs):
- return sgroups
-
- stubs.Set(api, 'security_group_get_by_instance', sg_get)
- return base.InstanceMetadata(inst_data, address=address,
- content=content, extra_md=extra_md,
- vd_driver=vd_driver, network_info=network_info)
-
-
-def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
- fake_get_metadata=None, headers=None,
- fake_get_metadata_by_instance_id=None):
-
- def get_metadata_by_remote_address(address):
- return mdinst
-
- app = handler.MetadataRequestHandler()
-
- if fake_get_metadata is None:
- fake_get_metadata = get_metadata_by_remote_address
-
- if stubs:
- stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
-
- if fake_get_metadata_by_instance_id:
- stubs.Set(app, 'get_metadata_by_instance_id',
- fake_get_metadata_by_instance_id)
-
- request = webob.Request.blank(relpath)
- request.remote_addr = address
-
- if headers is not None:
- request.headers.update(headers)
-
- response = request.get_response(app)
- return response
-
-
-class MetadataTestCase(test.TestCase):
- def setUp(self):
- super(MetadataTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.instance = fake_inst_obj(self.context)
- self.instance.system_metadata = get_default_sys_meta()
- self.flags(use_local=True, group='conductor')
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
-
- def test_can_pickle_metadata(self):
- # Make sure that InstanceMetadata is possible to pickle. This is
- # required for memcache backend to work correctly.
- md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
- pickle.dumps(md, protocol=0)
-
- def test_user_data(self):
- inst = self.instance.obj_clone()
- inst['user_data'] = base64.b64encode("happy")
- md = fake_InstanceMetadata(self.stubs, inst)
- self.assertEqual(
- md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
-
- def test_no_user_data(self):
- inst = self.instance.obj_clone()
- inst.user_data = None
- md = fake_InstanceMetadata(self.stubs, inst)
- obj = object()
- self.assertEqual(
- md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
- obj)
-
- def test_security_groups(self):
- inst = self.instance.obj_clone()
- sgroups = [dict(test_security_group.fake_secgroup, name='default'),
- dict(test_security_group.fake_secgroup, name='other')]
- expected = ['default', 'other']
-
- md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
- data = md.get_ec2_metadata(version='2009-04-04')
- self.assertEqual(data['meta-data']['security-groups'], expected)
-
- def test_local_hostname_fqdn(self):
- md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
- data = md.get_ec2_metadata(version='2009-04-04')
- self.assertEqual(data['meta-data']['local-hostname'],
- "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
-
- def test_format_instance_mapping(self):
- # Make sure that _format_instance_mappings works.
- ctxt = None
- instance_ref0 = objects.Instance(**{'id': 0,
- 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
- 'root_device_name': None,
- 'default_ephemeral_device': None,
- 'default_swap_device': None})
- instance_ref1 = objects.Instance(**{'id': 0,
- 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
- 'root_device_name': '/dev/sda1',
- 'default_ephemeral_device': None,
- 'default_swap_device': None})
-
- def fake_bdm_get(ctxt, uuid, use_slave=False):
- return [fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': 87654321,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'delete_on_termination': True,
- 'device_name': '/dev/sdh'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': 'swap',
- 'delete_on_termination': None,
- 'device_name': '/dev/sdc'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'volume_id': None,
- 'snapshot_id': None,
- 'no_device': None,
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'guest_format': None,
- 'delete_on_termination': None,
- 'device_name': '/dev/sdb'})]
-
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdm_get)
-
- expected = {'ami': 'sda1',
- 'root': '/dev/sda1',
- 'ephemeral0': '/dev/sdb',
- 'swap': '/dev/sdc',
- 'ebs0': '/dev/sdh'}
-
- conductor_api.LocalAPI()
-
- self.assertEqual(base._format_instance_mapping(ctxt,
- instance_ref0), block_device._DEFAULT_MAPPINGS)
- self.assertEqual(base._format_instance_mapping(ctxt,
- instance_ref1), expected)
-
- def test_pubkey(self):
- md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
- pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
-
- self.assertEqual(base.ec2_md_print(pubkey_ent),
- "0=%s" % self.instance['key_name'])
- self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
- self.instance['key_data'])
-
- def test_image_type_ramdisk(self):
- inst = self.instance.obj_clone()
- inst['ramdisk_id'] = 'ari-853667c0'
- md = fake_InstanceMetadata(self.stubs, inst)
- data = md.lookup("/latest/meta-data/ramdisk-id")
-
- self.assertIsNotNone(data)
- self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
-
- def test_image_type_kernel(self):
- inst = self.instance.obj_clone()
- inst['kernel_id'] = 'aki-c2e26ff2'
- md = fake_InstanceMetadata(self.stubs, inst)
- data = md.lookup("/2009-04-04/meta-data/kernel-id")
-
- self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
-
- self.assertEqual(
- md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
-
- inst.kernel_id = None
- md = fake_InstanceMetadata(self.stubs, inst)
- self.assertRaises(base.InvalidMetadataPath,
- md.lookup, "/2009-04-04/meta-data/kernel-id")
-
- def test_check_version(self):
- inst = self.instance.obj_clone()
- md = fake_InstanceMetadata(self.stubs, inst)
-
- self.assertTrue(md._check_version('1.0', '2009-04-04'))
- self.assertFalse(md._check_version('2009-04-04', '1.0'))
-
- self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
- self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
-
- self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
-
- def test_InstanceMetadata_uses_passed_network_info(self):
- network_info = []
-
- self.mox.StubOutWithMock(netutils, "get_injected_network_template")
- netutils.get_injected_network_template(network_info).AndReturn(False)
- self.mox.ReplayAll()
-
- base.InstanceMetadata(fake_inst_obj(self.context),
- network_info=network_info)
-
- def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
- inst = self.instance.obj_clone()
- inst_md = base.InstanceMetadata(inst)
- for (path, value) in inst_md.metadata_for_config_drive():
- self.assertIsNotNone(path)
-
- def test_InstanceMetadata_queries_network_API_when_needed(self):
- network_info_from_api = []
-
- self.mox.StubOutWithMock(netutils, "get_injected_network_template")
-
- netutils.get_injected_network_template(
- network_info_from_api).AndReturn(False)
-
- self.mox.ReplayAll()
-
- base.InstanceMetadata(fake_inst_obj(self.context))
-
- def test_local_ipv4_from_nw_info(self):
- nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
- num_networks=2)
- expected_local = "192.168.1.100"
- md = fake_InstanceMetadata(self.stubs, self.instance,
- network_info=nw_info)
- data = md.get_ec2_metadata(version='2009-04-04')
- self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
-
- def test_local_ipv4_from_address(self):
- nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
- num_networks=2)
- expected_local = "fake"
- md = fake_InstanceMetadata(self.stubs, self.instance,
- network_info=nw_info, address="fake")
- data = md.get_ec2_metadata(version='2009-04-04')
- self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
-
- def test_local_ipv4_from_nw_none(self):
- md = fake_InstanceMetadata(self.stubs, self.instance,
- network_info=[])
- data = md.get_ec2_metadata(version='2009-04-04')
- self.assertEqual(data['meta-data']['local-ipv4'], '')
-
-
-class OpenStackMetadataTestCase(test.TestCase):
- def setUp(self):
- super(OpenStackMetadataTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.instance = fake_inst_obj(self.context)
- self.instance['system_metadata'] = get_default_sys_meta()
- self.flags(use_local=True, group='conductor')
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
-
- def test_with_primitive_instance(self):
- mdinst = fake_InstanceMetadata(self.stubs, INSTANCE)
- result = mdinst.lookup('/openstack')
- self.assertIn('latest', result)
-
- def test_top_level_listing(self):
- # request for /openstack/<version>/ should show metadata.json
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- result = mdinst.lookup("/openstack")
-
- # trailing / should not affect anything
- self.assertEqual(result, mdinst.lookup("/openstack/"))
-
- # the 'content' should not show up in directory listing
- self.assertNotIn(base.CONTENT_DIR, result)
- self.assertIn('2012-08-10', result)
- self.assertIn('latest', result)
-
- def test_version_content_listing(self):
- # request for /openstack/<version>/ should show metadata.json
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- listing = mdinst.lookup("/openstack/2012-08-10")
- self.assertIn("meta_data.json", listing)
-
- def test_returns_apis_supported_in_havana_version(self):
- mdinst = fake_InstanceMetadata(self.stubs, self.instance)
- havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
-
- self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
- base.VD_JSON_NAME], havana_supported_apis)
-
- def test_returns_apis_supported_in_folsom_version(self):
- mdinst = fake_InstanceMetadata(self.stubs, self.instance)
- folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
-
- self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
- folsom_supported_apis)
-
- def test_returns_apis_supported_in_grizzly_version(self):
- mdinst = fake_InstanceMetadata(self.stubs, self.instance)
- grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
-
- self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
- grizzly_supported_apis)
-
- def test_metadata_json(self):
- inst = self.instance.obj_clone()
- content = [
- ('/etc/my.conf', "content of my.conf"),
- ('/root/hello', "content of /root/hello"),
- ]
-
- mdinst = fake_InstanceMetadata(self.stubs, inst,
- content=content)
- mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
- mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
-
- mddict = jsonutils.loads(mdjson)
-
- self.assertEqual(mddict['uuid'], self.instance['uuid'])
- self.assertIn('files', mddict)
-
- self.assertIn('public_keys', mddict)
- self.assertEqual(mddict['public_keys'][self.instance['key_name']],
- self.instance['key_data'])
-
- self.assertIn('launch_index', mddict)
- self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
-
- # verify that each of the things we put in content
- # resulted in an entry in 'files', that their content
- # there is as expected, and that /content lists them.
- for (path, content) in content:
- fent = [f for f in mddict['files'] if f['path'] == path]
- self.assertEqual(1, len(fent))
- fent = fent[0]
- found = mdinst.lookup("/openstack%s" % fent['content_path'])
- self.assertEqual(found, content)
-
- def test_extra_md(self):
- # make sure extra_md makes it through to metadata
- inst = self.instance.obj_clone()
- extra = {'foo': 'bar', 'mylist': [1, 2, 3],
- 'mydict': {"one": 1, "two": 2}}
- mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
-
- mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
- mddict = jsonutils.loads(mdjson)
-
- for key, val in extra.iteritems():
- self.assertEqual(mddict[key], val)
-
- def test_password(self):
- # make sure extra_md makes it through to metadata
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- result = mdinst.lookup("/openstack/latest/password")
- self.assertEqual(result, password.handle_password)
-
- def test_userdata(self):
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
- self.assertEqual(USER_DATA_STRING, userdata_found)
-
- # since we had user-data in this instance, it should be in listing
- self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
-
- inst.user_data = None
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- # since this instance had no user-data it should not be there.
- self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
-
- self.assertRaises(base.InvalidMetadataPath,
- mdinst.lookup, "/openstack/2012-08-10/user_data")
-
- def test_random_seed(self):
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- # verify that 2013-04-04 has the 'random' field
- mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
- mddict = jsonutils.loads(mdjson)
-
- self.assertIn("random_seed", mddict)
- self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
-
- # verify that older version do not have it
- mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
- self.assertNotIn("random_seed", jsonutils.loads(mdjson))
-
- def test_no_dashes_in_metadata(self):
- # top level entries in meta_data should not contain '-' in their name
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
- mdjson = jsonutils.loads(
- mdinst.lookup("/openstack/latest/meta_data.json"))
-
- self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
-
- def test_vendor_data_presence(self):
- inst = self.instance.obj_clone()
- mdinst = fake_InstanceMetadata(self.stubs, inst)
-
- # verify that 2013-10-17 has the vendor_data.json file
- result = mdinst.lookup("/openstack/2013-10-17")
- self.assertIn('vendor_data.json', result)
-
- # verify that older version do not have it
- result = mdinst.lookup("/openstack/2013-04-04")
- self.assertNotIn('vendor_data.json', result)
-
- def test_vendor_data_response(self):
- inst = self.instance.obj_clone()
-
- mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
-
- class myVdriver(base.VendorDataDriver):
- def __init__(self, *args, **kwargs):
- super(myVdriver, self).__init__(*args, **kwargs)
- data = mydata.copy()
- uuid = kwargs['instance']['uuid']
- data.update({'inst_uuid': uuid})
- self.data = data
-
- def get(self):
- return self.data
-
- mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
-
- # verify that 2013-10-17 has the vendor_data.json file
- vdpath = "/openstack/2013-10-17/vendor_data.json"
- vd = jsonutils.loads(mdinst.lookup(vdpath))
-
- # the instance should be passed through, and our class copies the
- # uuid through to 'inst_uuid'.
- self.assertEqual(vd['inst_uuid'], inst['uuid'])
-
- # check the other expected values
- for k, v in mydata.items():
- self.assertEqual(vd[k], v)
-
-
-class MetadataHandlerTestCase(test.TestCase):
- """Test that metadata is returning proper values."""
-
- def setUp(self):
- super(MetadataHandlerTestCase, self).setUp()
-
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
- self.context = context.RequestContext('fake', 'fake')
- self.instance = fake_inst_obj(self.context)
- self.instance.system_metadata = get_default_sys_meta()
- self.flags(use_local=True, group='conductor')
- self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
- address=None, sgroups=None)
-
- def test_callable(self):
-
- def verify(req, meta_data):
- self.assertIsInstance(meta_data, CallableMD)
- return "foo"
-
- class CallableMD(object):
- def lookup(self, path_info):
- return verify
-
- response = fake_request(self.stubs, CallableMD(), "/bar")
- self.assertEqual(response.status_int, 200)
- self.assertEqual(response.body, "foo")
-
- def test_root(self):
- expected = "\n".join(base.VERSIONS) + "\nlatest"
- response = fake_request(self.stubs, self.mdinst, "/")
- self.assertEqual(response.body, expected)
-
- response = fake_request(self.stubs, self.mdinst, "/foo/../")
- self.assertEqual(response.body, expected)
-
- def test_root_metadata_proxy_enabled(self):
- self.flags(service_metadata_proxy=True,
- group='neutron')
-
- expected = "\n".join(base.VERSIONS) + "\nlatest"
- response = fake_request(self.stubs, self.mdinst, "/")
- self.assertEqual(response.body, expected)
-
- response = fake_request(self.stubs, self.mdinst, "/foo/../")
- self.assertEqual(response.body, expected)
-
- def test_version_root(self):
- response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
- response_ctype = response.headers['Content-Type']
- self.assertTrue(response_ctype.startswith("text/plain"))
- self.assertEqual(response.body, 'meta-data/\nuser-data')
-
- response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
- self.assertEqual(response.status_int, 404)
-
- def test_json_data(self):
- response = fake_request(self.stubs, self.mdinst,
- "/openstack/latest/meta_data.json")
- response_ctype = response.headers['Content-Type']
- self.assertTrue(response_ctype.startswith("application/json"))
-
- response = fake_request(self.stubs, self.mdinst,
- "/openstack/latest/vendor_data.json")
- response_ctype = response.headers['Content-Type']
- self.assertTrue(response_ctype.startswith("application/json"))
-
- def test_user_data_non_existing_fixed_address(self):
- self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
- return_non_existing_address)
- response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
- "127.1.1.1")
- self.assertEqual(response.status_int, 404)
-
- def test_fixed_address_none(self):
- response = fake_request(None, self.mdinst,
- relpath="/2009-04-04/user-data", address=None)
- self.assertEqual(response.status_int, 500)
-
- def test_invalid_path_is_404(self):
- response = fake_request(self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data-invalid")
- self.assertEqual(response.status_int, 404)
-
- def test_user_data_with_use_forwarded_header(self):
- expected_addr = "192.192.192.2"
-
- def fake_get_metadata(address):
- if address == expected_addr:
- return self.mdinst
- else:
- raise Exception("Expected addr of %s, got %s" %
- (expected_addr, address))
-
- self.flags(use_forwarded_for=True)
- response = fake_request(self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="168.168.168.1",
- fake_get_metadata=fake_get_metadata,
- headers={'X-Forwarded-For': expected_addr})
-
- self.assertEqual(response.status_int, 200)
- response_ctype = response.headers['Content-Type']
- self.assertTrue(response_ctype.startswith("text/plain"))
- self.assertEqual(response.body,
- base64.b64decode(self.instance['user_data']))
-
- response = fake_request(self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="168.168.168.1",
- fake_get_metadata=fake_get_metadata,
- headers=None)
- self.assertEqual(response.status_int, 500)
-
- @mock.patch('nova.utils.constant_time_compare')
- def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
- mock_compare.side_effect = test.TestingException
-
- req = webob.Request.blank('/')
- hnd = handler.MetadataRequestHandler()
-
- req.headers['X-Instance-ID'] = 'fake-inst'
- req.headers['X-Tenant-ID'] = 'fake-proj'
-
- self.assertRaises(test.TestingException,
- hnd._handle_instance_id_request, req)
-
- self.assertEqual(1, mock_compare.call_count)
-
- def test_user_data_with_neutron_instance_id(self):
- expected_instance_id = 'a-b-c-d'
-
- def fake_get_metadata(instance_id, remote_address):
- if remote_address is None:
- raise Exception('Expected X-Forwared-For header')
- elif instance_id == expected_instance_id:
- return self.mdinst
- else:
- # raise the exception to aid with 500 response code test
- raise Exception("Expected instance_id of %s, got %s" %
- (expected_instance_id, instance_id))
-
- signed = hmac.new(
- CONF.neutron.metadata_proxy_shared_secret,
- expected_instance_id,
- hashlib.sha256).hexdigest()
-
- # try a request with service disabled
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- headers={'X-Instance-ID': 'a-b-c-d',
- 'X-Tenant-ID': 'test',
- 'X-Instance-ID-Signature': signed})
- self.assertEqual(response.status_int, 200)
-
- # now enable the service
- self.flags(service_metadata_proxy=True,
- group='neutron')
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Instance-ID': 'a-b-c-d',
- 'X-Tenant-ID': 'test',
- 'X-Instance-ID-Signature': signed})
-
- self.assertEqual(response.status_int, 200)
- response_ctype = response.headers['Content-Type']
- self.assertTrue(response_ctype.startswith("text/plain"))
- self.assertEqual(response.body,
- base64.b64decode(self.instance['user_data']))
-
- # mismatched signature
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Instance-ID': 'a-b-c-d',
- 'X-Tenant-ID': 'test',
- 'X-Instance-ID-Signature': ''})
-
- self.assertEqual(response.status_int, 403)
-
- # missing X-Tenant-ID from request
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Instance-ID': 'a-b-c-d',
- 'X-Instance-ID-Signature': signed})
-
- self.assertEqual(response.status_int, 400)
-
- # mismatched X-Tenant-ID
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Instance-ID': 'a-b-c-d',
- 'X-Tenant-ID': 'FAKE',
- 'X-Instance-ID-Signature': signed})
-
- self.assertEqual(response.status_int, 404)
-
- # without X-Forwarded-For
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Instance-ID': 'a-b-c-d',
- 'X-Tenant-ID': 'test',
- 'X-Instance-ID-Signature': signed})
-
- self.assertEqual(response.status_int, 500)
-
- # unexpected Instance-ID
- signed = hmac.new(
- CONF.neutron.metadata_proxy_shared_secret,
- 'z-z-z-z',
- hashlib.sha256).hexdigest()
-
- response = fake_request(
- self.stubs, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=fake_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Instance-ID': 'z-z-z-z',
- 'X-Tenant-ID': 'test',
- 'X-Instance-ID-Signature': signed})
- self.assertEqual(response.status_int, 500)
-
- def test_get_metadata(self):
- def _test_metadata_path(relpath):
- # recursively confirm a http 200 from all meta-data elements
- # available at relpath.
- response = fake_request(self.stubs, self.mdinst,
- relpath=relpath)
- for item in response.body.split('\n'):
- if 'public-keys' in relpath:
- # meta-data/public-keys/0=keyname refers to
- # meta-data/public-keys/0
- item = item.split('=')[0]
- if item.endswith('/'):
- path = relpath + '/' + item
- _test_metadata_path(path)
- continue
-
- path = relpath + '/' + item
- response = fake_request(self.stubs, self.mdinst, relpath=path)
- self.assertEqual(response.status_int, 200, message=path)
-
- _test_metadata_path('/2009-04-04/meta-data')
-
-
-class MetadataPasswordTestCase(test.TestCase):
- def setUp(self):
- super(MetadataPasswordTestCase, self).setUp()
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
- self.context = context.RequestContext('fake', 'fake')
- self.instance = fake_inst_obj(self.context)
- self.instance.system_metadata = get_default_sys_meta()
- self.flags(use_local=True, group='conductor')
- self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
- address=None, sgroups=None)
- self.flags(use_local=True, group='conductor')
-
- def test_get_password(self):
- request = webob.Request.blank('')
- self.mdinst.password = 'foo'
- result = password.handle_password(request, self.mdinst)
- self.assertEqual(result, 'foo')
-
- def test_bad_method(self):
- request = webob.Request.blank('')
- request.method = 'PUT'
- self.assertRaises(webob.exc.HTTPBadRequest,
- password.handle_password, request, self.mdinst)
-
- @mock.patch('nova.objects.Instance.get_by_uuid')
- def _try_set_password(self, get_by_uuid, val='bar'):
- request = webob.Request.blank('')
- request.method = 'POST'
- request.body = val
- get_by_uuid.return_value = self.instance
-
- with mock.patch.object(self.instance, 'save') as save:
- password.handle_password(request, self.mdinst)
- save.assert_called_once_with()
-
- self.assertIn('password_0', self.instance.system_metadata)
-
- def test_set_password(self):
- self.mdinst.password = ''
- self._try_set_password()
-
- def test_conflict(self):
- self.mdinst.password = 'foo'
- self.assertRaises(webob.exc.HTTPConflict,
- self._try_set_password)
-
- def test_too_large(self):
- self.mdinst.password = ''
- self.assertRaises(webob.exc.HTTPBadRequest,
- self._try_set_password,
- val=('a' * (password.MAX_SIZE + 1)))
diff --git a/nova/tests/test_notifications.py b/nova/tests/test_notifications.py
deleted file mode 100644
index 21508ec2f7..0000000000
--- a/nova/tests/test_notifications.py
+++ /dev/null
@@ -1,394 +0,0 @@
-# Copyright (c) 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for common notifications."""
-
-import copy
-
-import mock
-from oslo.config import cfg
-
-from nova.compute import flavors
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova.network import api as network_api
-from nova import notifications
-from nova import test
-from nova.tests import fake_network
-from nova.tests import fake_notifier
-
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
-
-class NotificationsTestCase(test.TestCase):
-
- def setUp(self):
- super(NotificationsTestCase, self).setUp()
-
- self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
- 1)
-
- def fake_get_nw_info(cls, ctxt, instance):
- self.assertTrue(ctxt.is_admin)
- return self.net_info
-
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
- fake_get_nw_info)
- fake_network.set_stub_network_methods(self.stubs)
-
- fake_notifier.stub_notifier(self.stubs)
- self.addCleanup(fake_notifier.reset)
-
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- network_manager='nova.network.manager.FlatManager',
- notify_on_state_change="vm_and_task_state",
- host='testhost')
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- self.instance = self._wrapped_create()
-
- def _wrapped_create(self, params=None):
- instance_type = flavors.get_flavor_by_name('m1.tiny')
- sys_meta = flavors.save_flavor_info({}, instance_type)
- inst = {}
- inst['image_ref'] = 1
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['instance_type_id'] = instance_type['id']
- inst['root_gb'] = 0
- inst['ephemeral_gb'] = 0
- inst['access_ip_v4'] = '1.2.3.4'
- inst['access_ip_v6'] = 'feed:5eed'
- inst['display_name'] = 'test_instance'
- inst['hostname'] = 'test_instance_hostname'
- inst['node'] = 'test_instance_node'
- inst['system_metadata'] = sys_meta
- if params:
- inst.update(params)
- return db.instance_create(self.context, inst)
-
- def test_send_api_fault_disabled(self):
- self.flags(notify_api_faults=False)
- notifications.send_api_fault("http://example.com/foo", 500, None)
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
- def test_send_api_fault(self):
- self.flags(notify_api_faults=True)
- exception = None
- try:
- # Get a real exception with a call stack.
- raise test.TestingException("junk")
- except test.TestingException as e:
- exception = e
-
- notifications.send_api_fault("http://example.com/foo", 500, exception)
-
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- n = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(n.priority, 'ERROR')
- self.assertEqual(n.event_type, 'api.fault')
- self.assertEqual(n.payload['url'], 'http://example.com/foo')
- self.assertEqual(n.payload['status'], 500)
- self.assertIsNotNone(n.payload['exception'])
-
- def test_notif_disabled(self):
-
- # test config disable of the notifications
- self.flags(notify_on_state_change=None)
-
- old = copy.copy(self.instance)
- self.instance["vm_state"] = vm_states.ACTIVE
-
- old_vm_state = old['vm_state']
- new_vm_state = self.instance["vm_state"]
- old_task_state = old['task_state']
- new_task_state = self.instance["task_state"]
-
- notifications.send_update_with_states(self.context, self.instance,
- old_vm_state, new_vm_state, old_task_state, new_task_state,
- verify_states=True)
-
- notifications.send_update(self.context, old, self.instance)
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
- def test_task_notif(self):
-
- # test config disable of just the task state notifications
- self.flags(notify_on_state_change="vm_state")
-
- # we should not get a notification on task stgate chagne now
- old = copy.copy(self.instance)
- self.instance["task_state"] = task_states.SPAWNING
-
- old_vm_state = old['vm_state']
- new_vm_state = self.instance["vm_state"]
- old_task_state = old['task_state']
- new_task_state = self.instance["task_state"]
-
- notifications.send_update_with_states(self.context, self.instance,
- old_vm_state, new_vm_state, old_task_state, new_task_state,
- verify_states=True)
-
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
- # ok now enable task state notifications and re-try
- self.flags(notify_on_state_change="vm_and_task_state")
-
- notifications.send_update(self.context, old, self.instance)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- def test_send_no_notif(self):
-
- # test notification on send no initial vm state:
- old_vm_state = self.instance['vm_state']
- new_vm_state = self.instance['vm_state']
- old_task_state = self.instance['task_state']
- new_task_state = self.instance['task_state']
-
- notifications.send_update_with_states(self.context, self.instance,
- old_vm_state, new_vm_state, old_task_state, new_task_state,
- service="compute", host=None, verify_states=True)
-
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
- def test_send_on_vm_change(self):
-
- # pretend we just transitioned to ACTIVE:
- params = {"vm_state": vm_states.ACTIVE}
- (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
- self.instance['uuid'], params)
- notifications.send_update(self.context, old_ref, new_ref)
-
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- def test_send_on_task_change(self):
-
- # pretend we just transitioned to task SPAWNING:
- params = {"task_state": task_states.SPAWNING}
- (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
- self.instance['uuid'], params)
- notifications.send_update(self.context, old_ref, new_ref)
-
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- def test_no_update_with_states(self):
-
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
- task_states.SPAWNING, verify_states=True)
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
- def test_vm_update_with_states(self):
-
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING,
- task_states.SPAWNING, verify_states=True)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- notif = fake_notifier.NOTIFICATIONS[0]
- payload = notif.payload
- access_ip_v4 = self.instance["access_ip_v4"]
- access_ip_v6 = self.instance["access_ip_v6"]
- display_name = self.instance["display_name"]
- hostname = self.instance["hostname"]
- node = self.instance["node"]
-
- self.assertEqual(vm_states.BUILDING, payload["old_state"])
- self.assertEqual(vm_states.ACTIVE, payload["state"])
- self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
- self.assertEqual(task_states.SPAWNING, payload["new_task_state"])
- self.assertEqual(payload["access_ip_v4"], access_ip_v4)
- self.assertEqual(payload["access_ip_v6"], access_ip_v6)
- self.assertEqual(payload["display_name"], display_name)
- self.assertEqual(payload["hostname"], hostname)
- self.assertEqual(payload["node"], node)
-
- def test_task_update_with_states(self):
- self.flags(notify_on_state_change="vm_and_task_state")
-
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
- None, verify_states=True)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- notif = fake_notifier.NOTIFICATIONS[0]
- payload = notif.payload
- access_ip_v4 = self.instance["access_ip_v4"]
- access_ip_v6 = self.instance["access_ip_v6"]
- display_name = self.instance["display_name"]
- hostname = self.instance["hostname"]
-
- self.assertEqual(vm_states.BUILDING, payload["old_state"])
- self.assertEqual(vm_states.BUILDING, payload["state"])
- self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
- self.assertIsNone(payload["new_task_state"])
- self.assertEqual(payload["access_ip_v4"], access_ip_v4)
- self.assertEqual(payload["access_ip_v6"], access_ip_v6)
- self.assertEqual(payload["display_name"], display_name)
- self.assertEqual(payload["hostname"], hostname)
-
- def test_update_no_service_name(self):
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
- None)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- # service name should default to 'compute'
- notif = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('compute.testhost', notif.publisher_id)
-
- def test_update_with_service_name(self):
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
- None, service="testservice")
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- # service name should default to 'compute'
- notif = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('testservice.testhost', notif.publisher_id)
-
- def test_update_with_host_name(self):
- notifications.send_update_with_states(self.context, self.instance,
- vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
- None, host="someotherhost")
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
-
- # service name should default to 'compute'
- notif = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual('compute.someotherhost', notif.publisher_id)
-
- def test_payload_has_fixed_ip_labels(self):
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("fixed_ips", info)
- self.assertEqual(info["fixed_ips"][0]["label"], "test1")
-
- def test_payload_has_vif_mac_address(self):
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("fixed_ips", info)
- self.assertEqual(self.net_info[0]['address'],
- info["fixed_ips"][0]["vif_mac"])
-
- def test_payload_has_cell_name_empty(self):
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("cell_name", info)
- self.assertIsNone(self.instance['cell_name'])
- self.assertEqual("", info["cell_name"])
-
- def test_payload_has_cell_name(self):
- self.instance['cell_name'] = "cell1"
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("cell_name", info)
- self.assertEqual("cell1", info["cell_name"])
-
- def test_payload_has_progress_empty(self):
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("progress", info)
- self.assertIsNone(self.instance['progress'])
- self.assertEqual("", info["progress"])
-
- def test_payload_has_progress(self):
- self.instance['progress'] = 50
- info = notifications.info_from_instance(self.context, self.instance,
- self.net_info, None)
- self.assertIn("progress", info)
- self.assertEqual(50, info["progress"])
-
- def test_send_access_ip_update(self):
- notifications.send_update(self.context, self.instance, self.instance)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- notif = fake_notifier.NOTIFICATIONS[0]
- payload = notif.payload
- access_ip_v4 = self.instance["access_ip_v4"]
- access_ip_v6 = self.instance["access_ip_v6"]
-
- self.assertEqual(payload["access_ip_v4"], access_ip_v4)
- self.assertEqual(payload["access_ip_v6"], access_ip_v6)
-
- def test_send_name_update(self):
- param = {"display_name": "new_display_name"}
- new_name_inst = self._wrapped_create(params=param)
- notifications.send_update(self.context, self.instance, new_name_inst)
- self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
- notif = fake_notifier.NOTIFICATIONS[0]
- payload = notif.payload
- old_display_name = self.instance["display_name"]
- new_display_name = new_name_inst["display_name"]
-
- self.assertEqual(payload["old_display_name"], old_display_name)
- self.assertEqual(payload["display_name"], new_display_name)
-
- def test_send_no_state_change(self):
- called = [False]
-
- def sending_no_state_change(context, instance, **kwargs):
- called[0] = True
- self.stubs.Set(notifications, '_send_instance_update_notification',
- sending_no_state_change)
- notifications.send_update(self.context, self.instance, self.instance)
- self.assertTrue(called[0])
-
- def test_fail_sending_update(self):
- def fail_sending(context, instance, **kwargs):
- raise Exception('failed to notify')
- self.stubs.Set(notifications, '_send_instance_update_notification',
- fail_sending)
-
- notifications.send_update(self.context, self.instance, self.instance)
- self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
-
-
-class NotificationsFormatTestCase(test.NoDBTestCase):
-
- def test_state_computation(self):
- instance = {'vm_state': mock.sentinel.vm_state,
- 'task_state': mock.sentinel.task_state}
- states = notifications._compute_states_payload(instance)
- self.assertEqual(mock.sentinel.vm_state, states['state'])
- self.assertEqual(mock.sentinel.vm_state, states['old_state'])
- self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
- self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
-
- states = notifications._compute_states_payload(
- instance,
- old_vm_state=mock.sentinel.old_vm_state,
- )
- self.assertEqual(mock.sentinel.vm_state, states['state'])
- self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
- self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
- self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
-
- states = notifications._compute_states_payload(
- instance,
- old_vm_state=mock.sentinel.old_vm_state,
- old_task_state=mock.sentinel.old_task_state,
- new_vm_state=mock.sentinel.new_vm_state,
- new_task_state=mock.sentinel.new_task_state,
- )
-
- self.assertEqual(mock.sentinel.new_vm_state, states['state'])
- self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
- self.assertEqual(mock.sentinel.old_task_state,
- states['old_task_state'])
- self.assertEqual(mock.sentinel.new_task_state,
- states['new_task_state'])
diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py
deleted file mode 100644
index 98f5636cbc..0000000000
--- a/nova/tests/test_nova_manage.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2011 Ilya Alekseyev
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import StringIO
-import sys
-
-import fixtures
-import mock
-
-from nova.cmd import manage
-from nova import context
-from nova import db
-from nova import exception
-from nova.i18n import _
-from nova import test
-from nova.tests.db import fakes as db_fakes
-from nova.tests.objects import test_network
-
-
-class FixedIpCommandsTestCase(test.TestCase):
- def setUp(self):
- super(FixedIpCommandsTestCase, self).setUp()
- db_fakes.stub_out_db_network_api(self.stubs)
- self.commands = manage.FixedIpCommands()
-
- def test_reserve(self):
- self.commands.reserve('192.168.0.100')
- address = db.fixed_ip_get_by_address(context.get_admin_context(),
- '192.168.0.100')
- self.assertEqual(address['reserved'], True)
-
- def test_reserve_nonexistent_address(self):
- self.assertEqual(2, self.commands.reserve('55.55.55.55'))
-
- def test_unreserve(self):
- self.commands.unreserve('192.168.0.100')
- address = db.fixed_ip_get_by_address(context.get_admin_context(),
- '192.168.0.100')
- self.assertEqual(address['reserved'], False)
-
- def test_unreserve_nonexistent_address(self):
- self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
-
- def test_list(self):
- self.useFixture(fixtures.MonkeyPatch('sys.stdout',
- StringIO.StringIO()))
- self.commands.list()
- self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
-
- def test_list_just_one_host(self):
- def fake_fixed_ip_get_by_host(*args, **kwargs):
- return [db_fakes.fixed_ip_fields]
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.db.fixed_ip_get_by_host',
- fake_fixed_ip_get_by_host))
- self.useFixture(fixtures.MonkeyPatch('sys.stdout',
- StringIO.StringIO()))
- self.commands.list('banana')
- self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
-
-
-class FloatingIpCommandsTestCase(test.TestCase):
- def setUp(self):
- super(FloatingIpCommandsTestCase, self).setUp()
- db_fakes.stub_out_db_network_api(self.stubs)
- self.commands = manage.FloatingIpCommands()
-
- def test_address_to_hosts(self):
- def assert_loop(result, expected):
- for ip in result:
- self.assertIn(str(ip), expected)
-
- address_to_hosts = self.commands.address_to_hosts
- # /32 and /31
- self.assertRaises(exception.InvalidInput, address_to_hosts,
- '192.168.100.1/32')
- self.assertRaises(exception.InvalidInput, address_to_hosts,
- '192.168.100.1/31')
- # /30
- expected = ["192.168.100.%s" % i for i in range(1, 3)]
- result = address_to_hosts('192.168.100.0/30')
- self.assertEqual(2, len(list(result)))
- assert_loop(result, expected)
- # /29
- expected = ["192.168.100.%s" % i for i in range(1, 7)]
- result = address_to_hosts('192.168.100.0/29')
- self.assertEqual(6, len(list(result)))
- assert_loop(result, expected)
- # /28
- expected = ["192.168.100.%s" % i for i in range(1, 15)]
- result = address_to_hosts('192.168.100.0/28')
- self.assertEqual(14, len(list(result)))
- assert_loop(result, expected)
- # /16
- result = address_to_hosts('192.168.100.0/16')
- self.assertEqual(65534, len(list(result)))
- # NOTE(dripton): I don't test /13 because it makes the test take 3s.
- # /12 gives over a million IPs, which is ridiculous.
- self.assertRaises(exception.InvalidInput, address_to_hosts,
- '192.168.100.1/12')
-
-
-class NetworkCommandsTestCase(test.TestCase):
- def setUp(self):
- super(NetworkCommandsTestCase, self).setUp()
- self.commands = manage.NetworkCommands()
- self.net = {'id': 0,
- 'label': 'fake',
- 'injected': False,
- 'cidr': '192.168.0.0/24',
- 'cidr_v6': 'dead:beef::/64',
- 'multi_host': False,
- 'gateway_v6': 'dead:beef::1',
- 'netmask_v6': '64',
- 'netmask': '255.255.255.0',
- 'bridge': 'fa0',
- 'bridge_interface': 'fake_fa0',
- 'gateway': '192.168.0.1',
- 'broadcast': '192.168.0.255',
- 'dns1': '8.8.8.8',
- 'dns2': '8.8.4.4',
- 'vlan': 200,
- 'vlan_start': 201,
- 'vpn_public_address': '10.0.0.2',
- 'vpn_public_port': '2222',
- 'vpn_private_address': '192.168.0.2',
- 'dhcp_start': '192.168.0.3',
- 'project_id': 'fake_project',
- 'host': 'fake_host',
- 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
-
- def fake_network_get_by_cidr(context, cidr):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(cidr, self.fake_net['cidr'])
- return db_fakes.FakeModel(dict(test_network.fake_network,
- **self.fake_net))
-
- def fake_network_get_by_uuid(context, uuid):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(uuid, self.fake_net['uuid'])
- return db_fakes.FakeModel(dict(test_network.fake_network,
- **self.fake_net))
-
- def fake_network_update(context, network_id, values):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(network_id, self.fake_net['id'])
- self.assertEqual(values, self.fake_update_value)
- self.fake_network_get_by_cidr = fake_network_get_by_cidr
- self.fake_network_get_by_uuid = fake_network_get_by_uuid
- self.fake_network_update = fake_network_update
-
- def test_create(self):
-
- def fake_create_networks(obj, context, **kwargs):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(kwargs['label'], 'Test')
- self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
- self.assertEqual(kwargs['multi_host'], False)
- self.assertEqual(kwargs['num_networks'], 1)
- self.assertEqual(kwargs['network_size'], 256)
- self.assertEqual(kwargs['vlan'], 200)
- self.assertEqual(kwargs['vlan_start'], 201)
- self.assertEqual(kwargs['vpn_start'], 2000)
- self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
- self.assertEqual(kwargs['gateway'], '10.2.0.1')
- self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
- self.assertEqual(kwargs['bridge'], 'br200')
- self.assertEqual(kwargs['bridge_interface'], 'eth0')
- self.assertEqual(kwargs['dns1'], '8.8.8.8')
- self.assertEqual(kwargs['dns2'], '8.8.4.4')
- self.flags(network_manager='nova.network.manager.VlanManager')
- from nova.network import manager as net_manager
- self.stubs.Set(net_manager.VlanManager, 'create_networks',
- fake_create_networks)
- self.commands.create(
- label='Test',
- cidr='10.2.0.0/24',
- num_networks=1,
- network_size=256,
- multi_host='F',
- vlan=200,
- vlan_start=201,
- vpn_start=2000,
- cidr_v6='fd00:2::/120',
- gateway='10.2.0.1',
- gateway_v6='fd00:2::22',
- bridge='br200',
- bridge_interface='eth0',
- dns1='8.8.8.8',
- dns2='8.8.4.4',
- uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
-
- def test_list(self):
-
- def fake_network_get_all(context):
- return [db_fakes.FakeModel(self.net)]
- self.stubs.Set(db, 'network_get_all', fake_network_get_all)
- output = StringIO.StringIO()
- sys.stdout = output
- self.commands.list()
- sys.stdout = sys.__stdout__
- result = output.getvalue()
- _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
- "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
- "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
- head = _fmt % {'id': _('id'),
- 'cidr': _('IPv4'),
- 'cidr_v6': _('IPv6'),
- 'dhcp_start': _('start address'),
- 'dns1': _('DNS1'),
- 'dns2': _('DNS2'),
- 'vlan': _('VlanID'),
- 'project_id': _('project'),
- 'uuid': _("uuid")}
- body = _fmt % {'id': self.net['id'],
- 'cidr': self.net['cidr'],
- 'cidr_v6': self.net['cidr_v6'],
- 'dhcp_start': self.net['dhcp_start'],
- 'dns1': self.net['dns1'],
- 'dns2': self.net['dns2'],
- 'vlan': self.net['vlan'],
- 'project_id': self.net['project_id'],
- 'uuid': self.net['uuid']}
- answer = '%s\n%s\n' % (head, body)
- self.assertEqual(result, answer)
-
- def test_delete(self):
- self.fake_net = self.net
- self.fake_net['project_id'] = None
- self.fake_net['host'] = None
- self.stubs.Set(db, 'network_get_by_uuid',
- self.fake_network_get_by_uuid)
-
- def fake_network_delete_safe(context, network_id):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(network_id, self.fake_net['id'])
- self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
- self.commands.delete(uuid=self.fake_net['uuid'])
-
- def test_delete_by_cidr(self):
- self.fake_net = self.net
- self.fake_net['project_id'] = None
- self.fake_net['host'] = None
- self.stubs.Set(db, 'network_get_by_cidr',
- self.fake_network_get_by_cidr)
-
- def fake_network_delete_safe(context, network_id):
- self.assertTrue(context.to_dict()['is_admin'])
- self.assertEqual(network_id, self.fake_net['id'])
- self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
- self.commands.delete(fixed_range=self.fake_net['cidr'])
-
- def _test_modify_base(self, update_value, project, host, dis_project=None,
- dis_host=None):
- self.fake_net = self.net
- self.fake_update_value = update_value
- self.stubs.Set(db, 'network_get_by_cidr',
- self.fake_network_get_by_cidr)
- self.stubs.Set(db, 'network_update', self.fake_network_update)
- self.commands.modify(self.fake_net['cidr'], project=project, host=host,
- dis_project=dis_project, dis_host=dis_host)
-
- def test_modify_associate(self):
- self._test_modify_base(update_value={'project_id': 'test_project',
- 'host': 'test_host'},
- project='test_project', host='test_host')
-
- def test_modify_unchanged(self):
- self._test_modify_base(update_value={}, project=None, host=None)
-
- def test_modify_disassociate(self):
- self._test_modify_base(update_value={'project_id': None, 'host': None},
- project=None, host=None, dis_project=True,
- dis_host=True)
-
-
-class NeutronV2NetworkCommandsTestCase(test.TestCase):
- def setUp(self):
- super(NeutronV2NetworkCommandsTestCase, self).setUp()
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- self.commands = manage.NetworkCommands()
-
- def test_create(self):
- self.assertEqual(2, self.commands.create())
-
- def test_list(self):
- self.assertEqual(2, self.commands.list())
-
- def test_delete(self):
- self.assertEqual(2, self.commands.delete())
-
- def test_modify(self):
- self.assertEqual(2, self.commands.modify('192.168.0.1'))
-
-
-class ProjectCommandsTestCase(test.TestCase):
- def setUp(self):
- super(ProjectCommandsTestCase, self).setUp()
- self.commands = manage.ProjectCommands()
-
- def test_quota(self):
- output = StringIO.StringIO()
- sys.stdout = output
- self.commands.quota(project_id='admin',
- key='instances',
- value='unlimited',
- )
-
- sys.stdout = sys.__stdout__
- result = output.getvalue()
- print_format = "%-36s %-10s" % ('instances', 'unlimited')
- self.assertEqual((print_format in result), True)
-
- def test_quota_update_invalid_key(self):
- self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
-
-
-class DBCommandsTestCase(test.TestCase):
- def setUp(self):
- super(DBCommandsTestCase, self).setUp()
- self.commands = manage.DbCommands()
-
- def test_archive_deleted_rows_negative(self):
- self.assertEqual(1, self.commands.archive_deleted_rows(-1))
-
-
-class ServiceCommandsTestCase(test.TestCase):
- def setUp(self):
- super(ServiceCommandsTestCase, self).setUp()
- self.commands = manage.ServiceCommands()
-
- def test_service_enable_invalid_params(self):
- self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
-
- def test_service_disable_invalid_params(self):
- self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
-
-
-class CellCommandsTestCase(test.TestCase):
- def setUp(self):
- super(CellCommandsTestCase, self).setUp()
- self.commands = manage.CellCommands()
-
- def test_create_transport_hosts_multiple(self):
- """Test the _create_transport_hosts method
- when broker_hosts is set.
- """
- brokers = "127.0.0.1:5672,127.0.0.2:5671"
- thosts = self.commands._create_transport_hosts(
- 'guest', 'devstack',
- broker_hosts=brokers)
- self.assertEqual(2, len(thosts))
- self.assertEqual('127.0.0.1', thosts[0].hostname)
- self.assertEqual(5672, thosts[0].port)
- self.assertEqual('127.0.0.2', thosts[1].hostname)
- self.assertEqual(5671, thosts[1].port)
-
- def test_create_transport_hosts_single(self):
- """Test the _create_transport_hosts method when hostname is passed."""
- thosts = self.commands._create_transport_hosts('guest', 'devstack',
- hostname='127.0.0.1',
- port=80)
- self.assertEqual(1, len(thosts))
- self.assertEqual('127.0.0.1', thosts[0].hostname)
- self.assertEqual(80, thosts[0].port)
-
- def test_create_transport_hosts_single_broker(self):
- """Test the _create_transport_hosts method for single broker_hosts."""
- thosts = self.commands._create_transport_hosts(
- 'guest', 'devstack',
- broker_hosts='127.0.0.1:5672')
- self.assertEqual(1, len(thosts))
- self.assertEqual('127.0.0.1', thosts[0].hostname)
- self.assertEqual(5672, thosts[0].port)
-
- def test_create_transport_hosts_both(self):
- """Test the _create_transport_hosts method when both broker_hosts
- and hostname/port are passed.
- """
- thosts = self.commands._create_transport_hosts(
- 'guest', 'devstack',
- broker_hosts='127.0.0.1:5672',
- hostname='127.0.0.2', port=80)
- self.assertEqual(1, len(thosts))
- self.assertEqual('127.0.0.1', thosts[0].hostname)
- self.assertEqual(5672, thosts[0].port)
-
- def test_create_transport_hosts_wrong_val(self):
- """Test the _create_transport_hosts method when broker_hosts
- is wrongly sepcified
- """
- self.assertRaises(ValueError,
- self.commands._create_transport_hosts,
- 'guest', 'devstack',
- broker_hosts='127.0.0.1:5672,127.0.0.1')
-
- def test_create_transport_hosts_wrong_port_val(self):
- """Test the _create_transport_hosts method when port in
- broker_hosts is wrongly sepcified
- """
- self.assertRaises(ValueError,
- self.commands._create_transport_hosts,
- 'guest', 'devstack',
- broker_hosts='127.0.0.1:')
-
- def test_create_transport_hosts_wrong_port_arg(self):
- """Test the _create_transport_hosts method when port
- argument is wrongly sepcified
- """
- self.assertRaises(ValueError,
- self.commands._create_transport_hosts,
- 'guest', 'devstack',
- hostname='127.0.0.1', port='ab')
-
- @mock.patch.object(context, 'get_admin_context')
- @mock.patch.object(db, 'cell_create')
- def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
- """Test the create function when broker_hosts is
- passed
- """
- cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
- cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
- ctxt = mock.sentinel
- mock_ctxt.return_value = mock.sentinel
- self.commands.create("test",
- broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
- woffset=0, wscale=0,
- username="guest", password="devstack")
- exp_values = {'name': "test",
- 'is_parent': False,
- 'transport_url': cell_tp_url,
- 'weight_offset': 0.0,
- 'weight_scale': 0.0}
- mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
-
- @mock.patch.object(context, 'get_admin_context')
- @mock.patch.object(db, 'cell_create')
- def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
- """Test the create function when hostname and port is
- passed
- """
- cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
- ctxt = mock.sentinel
- mock_ctxt.return_value = mock.sentinel
- self.commands.create("test",
- hostname='127.0.0.1', port="9999",
- woffset=0, wscale=0,
- username="guest", password="devstack")
- exp_values = {'name': "test",
- 'is_parent': False,
- 'transport_url': cell_tp_url,
- 'weight_offset': 0.0,
- 'weight_scale': 0.0}
- mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
deleted file mode 100644
index a7174b1384..0000000000
--- a/nova/tests/test_policy.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright 2011 Piston Cloud Computing, Inc.
-# All Rights Reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test of Policy Engine For Nova."""
-
-import os.path
-import StringIO
-
-import mock
-import six.moves.urllib.request as urlrequest
-
-from nova import context
-from nova import exception
-from nova.openstack.common import policy as common_policy
-from nova import policy
-from nova import test
-from nova.tests import policy_fixture
-from nova import utils
-
-
-class PolicyFileTestCase(test.NoDBTestCase):
- def setUp(self):
- super(PolicyFileTestCase, self).setUp()
- self.context = context.RequestContext('fake', 'fake')
- self.target = {}
-
- def test_modified_policy_reloads(self):
- with utils.tempdir() as tmpdir:
- tmpfilename = os.path.join(tmpdir, 'policy')
-
- self.flags(policy_file=tmpfilename)
-
- # NOTE(uni): context construction invokes policy check to determin
- # is_admin or not. As a side-effect, policy reset is needed here
- # to flush existing policy cache.
- policy.reset()
-
- action = "example:test"
- with open(tmpfilename, "w") as policyfile:
- policyfile.write('{"example:test": ""}')
- policy.enforce(self.context, action, self.target)
- with open(tmpfilename, "w") as policyfile:
- policyfile.write('{"example:test": "!"}')
- policy._ENFORCER.load_rules(True)
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, self.target)
-
-
-class PolicyTestCase(test.NoDBTestCase):
- def setUp(self):
- super(PolicyTestCase, self).setUp()
- rules = {
- "true": '@',
- "example:allowed": '@',
- "example:denied": "!",
- "example:get_http": "http://www.example.com",
- "example:my_file": "role:compute_admin or "
- "project_id:%(project_id)s",
- "example:early_and_fail": "! and @",
- "example:early_or_success": "@ or !",
- "example:lowercase_admin": "role:admin or role:sysadmin",
- "example:uppercase_admin": "role:ADMIN or role:sysadmin",
- }
- policy.reset()
- policy.init()
- policy.set_rules(dict((k, common_policy.parse_rule(v))
- for k, v in rules.items()))
- self.context = context.RequestContext('fake', 'fake', roles=['member'])
- self.target = {}
-
- def test_enforce_nonexistent_action_throws(self):
- action = "example:noexist"
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, self.target)
-
- def test_enforce_bad_action_throws(self):
- action = "example:denied"
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, self.target)
-
- def test_enforce_bad_action_noraise(self):
- action = "example:denied"
- result = policy.enforce(self.context, action, self.target, False)
- self.assertEqual(result, False)
-
- def test_enforce_good_action(self):
- action = "example:allowed"
- result = policy.enforce(self.context, action, self.target)
- self.assertEqual(result, True)
-
- @mock.patch.object(urlrequest, 'urlopen',
- return_value=StringIO.StringIO("True"))
- def test_enforce_http_true(self, mock_urlrequest):
- action = "example:get_http"
- target = {}
- result = policy.enforce(self.context, action, target)
- self.assertEqual(result, True)
-
- @mock.patch.object(urlrequest, 'urlopen',
- return_value=StringIO.StringIO("False"))
- def test_enforce_http_false(self, mock_urlrequest):
- action = "example:get_http"
- target = {}
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, target)
-
- def test_templatized_enforcement(self):
- target_mine = {'project_id': 'fake'}
- target_not_mine = {'project_id': 'another'}
- action = "example:my_file"
- policy.enforce(self.context, action, target_mine)
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, target_not_mine)
-
- def test_early_AND_enforcement(self):
- action = "example:early_and_fail"
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, self.target)
-
- def test_early_OR_enforcement(self):
- action = "example:early_or_success"
- policy.enforce(self.context, action, self.target)
-
- def test_ignore_case_role_check(self):
- lowercase_action = "example:lowercase_admin"
- uppercase_action = "example:uppercase_admin"
- # NOTE(dprince) we mix case in the Admin role here to ensure
- # case is ignored
- admin_context = context.RequestContext('admin',
- 'fake',
- roles=['AdMiN'])
- policy.enforce(admin_context, lowercase_action, self.target)
- policy.enforce(admin_context, uppercase_action, self.target)
-
-
-class DefaultPolicyTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(DefaultPolicyTestCase, self).setUp()
-
- self.rules = {
- "default": '',
- "example:exist": "!",
- }
-
- self._set_rules('default')
-
- self.context = context.RequestContext('fake', 'fake')
-
- def _set_rules(self, default_rule):
- policy.reset()
- rules = dict((k, common_policy.parse_rule(v))
- for k, v in self.rules.items())
- policy.init(rules=rules, default_rule=default_rule, use_conf=False)
-
- def test_policy_called(self):
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, "example:exist", {})
-
- def test_not_found_policy_calls_default(self):
- policy.enforce(self.context, "example:noexist", {})
-
- def test_default_not_found(self):
- self._set_rules("default_noexist")
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, "example:noexist", {})
-
-
-class IsAdminCheckTestCase(test.NoDBTestCase):
- def setUp(self):
- super(IsAdminCheckTestCase, self).setUp()
- policy.init()
-
- def test_init_true(self):
- check = policy.IsAdminCheck('is_admin', 'True')
-
- self.assertEqual(check.kind, 'is_admin')
- self.assertEqual(check.match, 'True')
- self.assertEqual(check.expected, True)
-
- def test_init_false(self):
- check = policy.IsAdminCheck('is_admin', 'nottrue')
-
- self.assertEqual(check.kind, 'is_admin')
- self.assertEqual(check.match, 'False')
- self.assertEqual(check.expected, False)
-
- def test_call_true(self):
- check = policy.IsAdminCheck('is_admin', 'True')
-
- self.assertEqual(check('target', dict(is_admin=True),
- policy._ENFORCER), True)
- self.assertEqual(check('target', dict(is_admin=False),
- policy._ENFORCER), False)
-
- def test_call_false(self):
- check = policy.IsAdminCheck('is_admin', 'False')
-
- self.assertEqual(check('target', dict(is_admin=True),
- policy._ENFORCER), False)
- self.assertEqual(check('target', dict(is_admin=False),
- policy._ENFORCER), True)
-
-
-class AdminRolePolicyTestCase(test.NoDBTestCase):
- def setUp(self):
- super(AdminRolePolicyTestCase, self).setUp()
- self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
- self.context = context.RequestContext('fake', 'fake', roles=['member'])
- self.actions = policy.get_rules().keys()
- self.target = {}
-
- def test_enforce_admin_actions_with_nonadmin_context_throws(self):
- """Check if non-admin context passed to admin actions throws
- Policy not authorized exception
- """
- for action in self.actions:
- self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
- self.context, action, self.target)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
deleted file mode 100644
index d1589d3481..0000000000
--- a/nova/tests/test_quota.py
+++ /dev/null
@@ -1,2765 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova import compute
-from nova.compute import flavors
-from nova import context
-from nova import db
-from nova.db.sqlalchemy import api as sqa_api
-from nova.db.sqlalchemy import models as sqa_models
-from nova import exception
-from nova import quota
-from nova import test
-import nova.tests.image.fake
-
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
-
-class QuotaIntegrationTestCase(test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(QuotaIntegrationTestCase, self).setUp()
- self.flags(compute_driver='nova.virt.fake.FakeDriver',
- quota_instances=2,
- quota_cores=4,
- quota_floating_ips=1,
- network_manager='nova.network.manager.FlatDHCPManager')
-
- # Apparently needed by the RPC tests...
- self.network = self.start_service('network')
-
- self.user_id = 'admin'
- self.project_id = 'admin'
- self.context = context.RequestContext(self.user_id,
- self.project_id,
- is_admin=True)
-
- nova.tests.image.fake.stub_out_image_service(self.stubs)
-
- self.compute_api = compute.API()
-
- def tearDown(self):
- super(QuotaIntegrationTestCase, self).tearDown()
- nova.tests.image.fake.FakeImageService_reset()
-
- def _create_instance(self, cores=2):
- """Create a test instance."""
- inst = {}
- inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = self.user_id
- inst['project_id'] = self.project_id
- inst['instance_type_id'] = '3' # m1.large
- inst['vcpus'] = cores
- return db.instance_create(self.context, inst)
-
- def test_too_many_instances(self):
- instance_uuids = []
- for i in range(CONF.quota_instances):
- instance = self._create_instance()
- instance_uuids.append(instance['uuid'])
- inst_type = flavors.get_flavor_by_name('m1.small')
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- try:
- self.compute_api.create(self.context, min_count=1, max_count=1,
- instance_type=inst_type,
- image_href=image_uuid)
- except exception.QuotaError as e:
- expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
- 'used': 4, 'allowed': 4, 'overs': 'cores,instances'}
- self.assertEqual(e.kwargs, expected_kwargs)
- else:
- self.fail('Expected QuotaError exception')
- for instance_uuid in instance_uuids:
- db.instance_destroy(self.context, instance_uuid)
-
- def test_too_many_cores(self):
- instance = self._create_instance(cores=4)
- inst_type = flavors.get_flavor_by_name('m1.small')
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- try:
- self.compute_api.create(self.context, min_count=1, max_count=1,
- instance_type=inst_type,
- image_href=image_uuid)
- except exception.QuotaError as e:
- expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
- 'used': 4, 'allowed': 4, 'overs': 'cores'}
- self.assertEqual(e.kwargs, expected_kwargs)
- else:
- self.fail('Expected QuotaError exception')
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_many_cores_with_unlimited_quota(self):
- # Setting cores quota to unlimited:
- self.flags(quota_cores=-1)
- instance = self._create_instance(cores=4)
- db.instance_destroy(self.context, instance['uuid'])
-
- def test_too_many_addresses(self):
- address = '192.168.0.100'
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'project_id': self.project_id})
- self.assertRaises(exception.QuotaError,
- self.network.allocate_floating_ip,
- self.context,
- self.project_id)
- db.floating_ip_destroy(context.get_admin_context(), address)
-
- def test_auto_assigned(self):
- address = '192.168.0.100'
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'project_id': self.project_id})
- # auto allocated addresses should not be counted
- self.assertRaises(exception.NoMoreFloatingIps,
- self.network.allocate_floating_ip,
- self.context,
- self.project_id,
- True)
- db.floating_ip_destroy(context.get_admin_context(), address)
-
- def test_too_many_metadata_items(self):
- metadata = {}
- for i in range(CONF.quota_metadata_items + 1):
- metadata['key%s' % i] = 'value%s' % i
- inst_type = flavors.get_flavor_by_name('m1.small')
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- self.assertRaises(exception.QuotaError, self.compute_api.create,
- self.context,
- min_count=1,
- max_count=1,
- instance_type=inst_type,
- image_href=image_uuid,
- metadata=metadata)
-
- def _create_with_injected_files(self, files):
- api = self.compute_api
- inst_type = flavors.get_flavor_by_name('m1.small')
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- api.create(self.context, min_count=1, max_count=1,
- instance_type=inst_type, image_href=image_uuid,
- injected_files=files)
-
- def test_no_injected_files(self):
- api = self.compute_api
- inst_type = flavors.get_flavor_by_name('m1.small')
- image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
- api.create(self.context,
- instance_type=inst_type,
- image_href=image_uuid)
-
- def test_max_injected_files(self):
- files = []
- for i in xrange(CONF.quota_injected_files):
- files.append(('/my/path%d' % i, 'config = test\n'))
- self._create_with_injected_files(files) # no QuotaError
-
- def test_too_many_injected_files(self):
- files = []
- for i in xrange(CONF.quota_injected_files + 1):
- files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
- self.assertRaises(exception.QuotaError,
- self._create_with_injected_files, files)
-
- def test_max_injected_file_content_bytes(self):
- max = CONF.quota_injected_file_content_bytes
- content = ''.join(['a' for i in xrange(max)])
- files = [('/test/path', content)]
- self._create_with_injected_files(files) # no QuotaError
-
- def test_too_many_injected_file_content_bytes(self):
- max = CONF.quota_injected_file_content_bytes
- content = ''.join(['a' for i in xrange(max + 1)])
- files = [('/test/path', content)]
- self.assertRaises(exception.QuotaError,
- self._create_with_injected_files, files)
-
- def test_max_injected_file_path_bytes(self):
- max = CONF.quota_injected_file_path_length
- path = ''.join(['a' for i in xrange(max)])
- files = [(path, 'config = quotatest')]
- self._create_with_injected_files(files) # no QuotaError
-
- def test_too_many_injected_file_path_bytes(self):
- max = CONF.quota_injected_file_path_length
- path = ''.join(['a' for i in xrange(max + 1)])
- files = [(path, 'config = quotatest')]
- self.assertRaises(exception.QuotaError,
- self._create_with_injected_files, files)
-
- def test_reservation_expire(self):
- self.useFixture(test.TimeOverride())
-
- def assertInstancesReserved(reserved):
- result = quota.QUOTAS.get_project_quotas(self.context,
- self.context.project_id)
- self.assertEqual(result['instances']['reserved'], reserved)
-
- quota.QUOTAS.reserve(self.context,
- expire=60,
- instances=2)
-
- assertInstancesReserved(2)
-
- timeutils.advance_time_seconds(80)
-
- quota.QUOTAS.expire(self.context)
-
- assertInstancesReserved(0)
-
-
-class FakeContext(object):
- def __init__(self, project_id, quota_class):
- self.is_admin = False
- self.user_id = 'fake_user'
- self.project_id = project_id
- self.quota_class = quota_class
- self.read_deleted = 'no'
-
- def elevated(self):
- elevated = self.__class__(self.project_id, self.quota_class)
- elevated.is_admin = True
- return elevated
-
-
-class FakeDriver(object):
- def __init__(self, by_project=None, by_user=None, by_class=None,
- reservations=None):
- self.called = []
- self.by_project = by_project or {}
- self.by_user = by_user or {}
- self.by_class = by_class or {}
- self.reservations = reservations or []
-
- def get_by_project_and_user(self, context, project_id, user_id, resource):
- self.called.append(('get_by_project_and_user',
- context, project_id, user_id, resource))
- try:
- return self.by_user[user_id][resource]
- except KeyError:
- raise exception.ProjectUserQuotaNotFound(project_id=project_id,
- user_id=user_id)
-
- def get_by_project(self, context, project_id, resource):
- self.called.append(('get_by_project', context, project_id, resource))
- try:
- return self.by_project[project_id][resource]
- except KeyError:
- raise exception.ProjectQuotaNotFound(project_id=project_id)
-
- def get_by_class(self, context, quota_class, resource):
- self.called.append(('get_by_class', context, quota_class, resource))
- try:
- return self.by_class[quota_class][resource]
- except KeyError:
- raise exception.QuotaClassNotFound(class_name=quota_class)
-
- def get_defaults(self, context, resources):
- self.called.append(('get_defaults', context, resources))
- return resources
-
- def get_class_quotas(self, context, resources, quota_class,
- defaults=True):
- self.called.append(('get_class_quotas', context, resources,
- quota_class, defaults))
- return resources
-
- def get_user_quotas(self, context, resources, project_id, user_id,
- quota_class=None, defaults=True, usages=True):
- self.called.append(('get_user_quotas', context, resources,
- project_id, user_id, quota_class, defaults,
- usages))
- return resources
-
- def get_project_quotas(self, context, resources, project_id,
- quota_class=None, defaults=True, usages=True,
- remains=False):
- self.called.append(('get_project_quotas', context, resources,
- project_id, quota_class, defaults, usages,
- remains))
- return resources
-
- def limit_check(self, context, resources, values, project_id=None,
- user_id=None):
- self.called.append(('limit_check', context, resources,
- values, project_id, user_id))
-
- def reserve(self, context, resources, deltas, expire=None,
- project_id=None, user_id=None):
- self.called.append(('reserve', context, resources, deltas,
- expire, project_id, user_id))
- return self.reservations
-
- def commit(self, context, reservations, project_id=None, user_id=None):
- self.called.append(('commit', context, reservations, project_id,
- user_id))
-
- def rollback(self, context, reservations, project_id=None, user_id=None):
- self.called.append(('rollback', context, reservations, project_id,
- user_id))
-
- def usage_reset(self, context, resources):
- self.called.append(('usage_reset', context, resources))
-
- def destroy_all_by_project_and_user(self, context, project_id, user_id):
- self.called.append(('destroy_all_by_project_and_user', context,
- project_id, user_id))
-
- def destroy_all_by_project(self, context, project_id):
- self.called.append(('destroy_all_by_project', context, project_id))
-
- def expire(self, context):
- self.called.append(('expire', context))
-
-
-class BaseResourceTestCase(test.TestCase):
- def test_no_flag(self):
- resource = quota.BaseResource('test_resource')
-
- self.assertEqual(resource.name, 'test_resource')
- self.assertIsNone(resource.flag)
- self.assertEqual(resource.default, -1)
-
- def test_with_flag(self):
- # We know this flag exists, so use it...
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
-
- self.assertEqual(resource.name, 'test_resource')
- self.assertEqual(resource.flag, 'quota_instances')
- self.assertEqual(resource.default, 10)
-
- def test_with_flag_no_quota(self):
- self.flags(quota_instances=-1)
- resource = quota.BaseResource('test_resource', 'quota_instances')
-
- self.assertEqual(resource.name, 'test_resource')
- self.assertEqual(resource.flag, 'quota_instances')
- self.assertEqual(resource.default, -1)
-
- def test_quota_no_project_no_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver()
- context = FakeContext(None, None)
- quota_value = resource.quota(driver, context)
-
- self.assertEqual(quota_value, 10)
-
- def test_quota_with_project_no_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- ))
- context = FakeContext('test_project', None)
- quota_value = resource.quota(driver, context)
-
- self.assertEqual(quota_value, 15)
-
- def test_quota_no_project_with_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver(by_class=dict(
- test_class=dict(test_resource=20),
- ))
- context = FakeContext(None, 'test_class')
- quota_value = resource.quota(driver, context)
-
- self.assertEqual(quota_value, 20)
-
- def test_quota_with_project_with_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- ),
- by_class=dict(
- test_class=dict(test_resource=20),
- ))
- context = FakeContext('test_project', 'test_class')
- quota_value = resource.quota(driver, context)
-
- self.assertEqual(quota_value, 15)
-
- def test_quota_override_project_with_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=15),
- override_project=dict(test_resource=20),
- ))
- context = FakeContext('test_project', 'test_class')
- quota_value = resource.quota(driver, context,
- project_id='override_project')
-
- self.assertEqual(quota_value, 20)
-
- def test_quota_with_project_override_class(self):
- self.flags(quota_instances=10)
- resource = quota.BaseResource('test_resource', 'quota_instances')
- driver = FakeDriver(by_class=dict(
- test_class=dict(test_resource=15),
- override_class=dict(test_resource=20),
- ))
- context = FakeContext('test_project', 'test_class')
- quota_value = resource.quota(driver, context,
- quota_class='override_class')
-
- self.assertEqual(quota_value, 20)
-
- def test_valid_method_call_check_invalid_input(self):
- resources = {'dummy': 1}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'limit')
-
- def test_valid_method_call_check_invalid_method(self):
- resources = {'key_pairs': 1}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'dummy')
-
- def test_valid_method_call_check_multiple(self):
- resources = {'key_pairs': 1, 'dummy': 2}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'check')
-
- resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'check')
-
- def test_valid_method_call_check_wrong_method_reserve(self):
- resources = {'key_pairs': 1}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'reserve')
-
- def test_valid_method_call_check_wrong_method_check(self):
- resources = {'fixed_ips': 1}
-
- self.assertRaises(exception.InvalidQuotaMethodUsage,
- quota._valid_method_call_check_resources,
- resources, 'check')
-
-
-class QuotaEngineTestCase(test.TestCase):
- def test_init(self):
- quota_obj = quota.QuotaEngine()
-
- self.assertEqual(quota_obj._resources, {})
- self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
-
- def test_init_override_string(self):
- quota_obj = quota.QuotaEngine(
- quota_driver_class='nova.tests.test_quota.FakeDriver')
-
- self.assertEqual(quota_obj._resources, {})
- self.assertIsInstance(quota_obj._driver, FakeDriver)
-
- def test_init_override_obj(self):
- quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
-
- self.assertEqual(quota_obj._resources, {})
- self.assertEqual(quota_obj._driver, FakeDriver)
-
- def test_register_resource(self):
- quota_obj = quota.QuotaEngine()
- resource = quota.AbsoluteResource('test_resource')
- quota_obj.register_resource(resource)
-
- self.assertEqual(quota_obj._resources, dict(test_resource=resource))
-
- def test_register_resources(self):
- quota_obj = quota.QuotaEngine()
- resources = [
- quota.AbsoluteResource('test_resource1'),
- quota.AbsoluteResource('test_resource2'),
- quota.AbsoluteResource('test_resource3'),
- ]
- quota_obj.register_resources(resources)
-
- self.assertEqual(quota_obj._resources, dict(
- test_resource1=resources[0],
- test_resource2=resources[1],
- test_resource3=resources[2],
- ))
-
- def test_get_by_project_and_user(self):
- context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_user=dict(
- fake_user=dict(test_resource=42)))
- quota_obj = quota.QuotaEngine(quota_driver_class=driver)
- result = quota_obj.get_by_project_and_user(context, 'test_project',
- 'fake_user', 'test_resource')
-
- self.assertEqual(driver.called, [
- ('get_by_project_and_user', context, 'test_project',
- 'fake_user', 'test_resource'),
- ])
- self.assertEqual(result, 42)
-
- def test_get_by_project(self):
- context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_project=dict(
- test_project=dict(test_resource=42)))
- quota_obj = quota.QuotaEngine(quota_driver_class=driver)
- result = quota_obj.get_by_project(context, 'test_project',
- 'test_resource')
-
- self.assertEqual(driver.called, [
- ('get_by_project', context, 'test_project', 'test_resource'),
- ])
- self.assertEqual(result, 42)
-
- def test_get_by_class(self):
- context = FakeContext('test_project', 'test_class')
- driver = FakeDriver(by_class=dict(
- test_class=dict(test_resource=42)))
- quota_obj = quota.QuotaEngine(quota_driver_class=driver)
- result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
-
- self.assertEqual(driver.called, [
- ('get_by_class', context, 'test_class', 'test_resource'),
- ])
- self.assertEqual(result, 42)
-
- def _make_quota_obj(self, driver):
- quota_obj = quota.QuotaEngine(quota_driver_class=driver)
- resources = [
- quota.AbsoluteResource('test_resource4'),
- quota.AbsoluteResource('test_resource3'),
- quota.AbsoluteResource('test_resource2'),
- quota.AbsoluteResource('test_resource1'),
- ]
- quota_obj.register_resources(resources)
-
- return quota_obj
-
- def test_get_defaults(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- result = quota_obj.get_defaults(context)
-
- self.assertEqual(driver.called, [
- ('get_defaults', context, quota_obj._resources),
- ])
- self.assertEqual(result, quota_obj._resources)
-
- def test_get_class_quotas(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- result1 = quota_obj.get_class_quotas(context, 'test_class')
- result2 = quota_obj.get_class_quotas(context, 'test_class', False)
-
- self.assertEqual(driver.called, [
- ('get_class_quotas', context, quota_obj._resources,
- 'test_class', True),
- ('get_class_quotas', context, quota_obj._resources,
- 'test_class', False),
- ])
- self.assertEqual(result1, quota_obj._resources)
- self.assertEqual(result2, quota_obj._resources)
-
- def test_get_user_quotas(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- result1 = quota_obj.get_user_quotas(context, 'test_project',
- 'fake_user')
- result2 = quota_obj.get_user_quotas(context, 'test_project',
- 'fake_user',
- quota_class='test_class',
- defaults=False,
- usages=False)
-
- self.assertEqual(driver.called, [
- ('get_user_quotas', context, quota_obj._resources,
- 'test_project', 'fake_user', None, True, True),
- ('get_user_quotas', context, quota_obj._resources,
- 'test_project', 'fake_user', 'test_class', False, False),
- ])
- self.assertEqual(result1, quota_obj._resources)
- self.assertEqual(result2, quota_obj._resources)
-
- def test_get_project_quotas(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- result1 = quota_obj.get_project_quotas(context, 'test_project')
- result2 = quota_obj.get_project_quotas(context, 'test_project',
- quota_class='test_class',
- defaults=False,
- usages=False)
-
- self.assertEqual(driver.called, [
- ('get_project_quotas', context, quota_obj._resources,
- 'test_project', None, True, True, False),
- ('get_project_quotas', context, quota_obj._resources,
- 'test_project', 'test_class', False, False, False),
- ])
- self.assertEqual(result1, quota_obj._resources)
- self.assertEqual(result2, quota_obj._resources)
-
- def test_count_no_resource(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- self.assertRaises(exception.QuotaResourceUnknown,
- quota_obj.count, context, 'test_resource5',
- True, foo='bar')
-
- def test_count_wrong_resource(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- self.assertRaises(exception.QuotaResourceUnknown,
- quota_obj.count, context, 'test_resource1',
- True, foo='bar')
-
- def test_count(self):
- def fake_count(context, *args, **kwargs):
- self.assertEqual(args, (True,))
- self.assertEqual(kwargs, dict(foo='bar'))
- return 5
-
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.register_resource(quota.CountableResource('test_resource5',
- fake_count))
- result = quota_obj.count(context, 'test_resource5', True, foo='bar')
-
- self.assertEqual(result, 5)
-
- def test_limit_check(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
- test_resource3=2, test_resource4=1)
-
- self.assertEqual(driver.called, [
- ('limit_check', context, quota_obj._resources, dict(
- test_resource1=4,
- test_resource2=3,
- test_resource3=2,
- test_resource4=1,
- ), None, None),
- ])
-
- def test_reserve(self):
- context = FakeContext(None, None)
- driver = FakeDriver(reservations=[
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
- quota_obj = self._make_quota_obj(driver)
- result1 = quota_obj.reserve(context, test_resource1=4,
- test_resource2=3, test_resource3=2,
- test_resource4=1)
- result2 = quota_obj.reserve(context, expire=3600,
- test_resource1=1, test_resource2=2,
- test_resource3=3, test_resource4=4)
- result3 = quota_obj.reserve(context, project_id='fake_project',
- test_resource1=1, test_resource2=2,
- test_resource3=3, test_resource4=4)
-
- self.assertEqual(driver.called, [
- ('reserve', context, quota_obj._resources, dict(
- test_resource1=4,
- test_resource2=3,
- test_resource3=2,
- test_resource4=1,
- ), None, None, None),
- ('reserve', context, quota_obj._resources, dict(
- test_resource1=1,
- test_resource2=2,
- test_resource3=3,
- test_resource4=4,
- ), 3600, None, None),
- ('reserve', context, quota_obj._resources, dict(
- test_resource1=1,
- test_resource2=2,
- test_resource3=3,
- test_resource4=4,
- ), None, 'fake_project', None),
- ])
- self.assertEqual(result1, [
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
- self.assertEqual(result2, [
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
- self.assertEqual(result3, [
- 'resv-01', 'resv-02', 'resv-03', 'resv-04',
- ])
-
- def test_commit(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
-
- self.assertEqual(driver.called, [
- ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
- None),
- ])
-
- def test_rollback(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
-
- self.assertEqual(driver.called, [
- ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
- None),
- ])
-
- def test_usage_reset(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
-
- self.assertEqual(driver.called, [
- ('usage_reset', context, ['res1', 'res2', 'res3']),
- ])
-
- def test_destroy_all_by_project_and_user(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.destroy_all_by_project_and_user(context,
- 'test_project', 'fake_user')
-
- self.assertEqual(driver.called, [
- ('destroy_all_by_project_and_user', context, 'test_project',
- 'fake_user'),
- ])
-
- def test_destroy_all_by_project(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.destroy_all_by_project(context, 'test_project')
-
- self.assertEqual(driver.called, [
- ('destroy_all_by_project', context, 'test_project'),
- ])
-
- def test_expire(self):
- context = FakeContext(None, None)
- driver = FakeDriver()
- quota_obj = self._make_quota_obj(driver)
- quota_obj.expire(context)
-
- self.assertEqual(driver.called, [
- ('expire', context),
- ])
-
- def test_resources(self):
- quota_obj = self._make_quota_obj(None)
-
- self.assertEqual(quota_obj.resources,
- ['test_resource1', 'test_resource2',
- 'test_resource3', 'test_resource4'])
-
-
-class DbQuotaDriverTestCase(test.TestCase):
- def setUp(self):
- super(DbQuotaDriverTestCase, self).setUp()
-
- self.flags(quota_instances=10,
- quota_cores=20,
- quota_ram=50 * 1024,
- quota_floating_ips=10,
- quota_fixed_ips=10,
- quota_metadata_items=128,
- quota_injected_files=5,
- quota_injected_file_content_bytes=10 * 1024,
- quota_injected_file_path_length=255,
- quota_security_groups=10,
- quota_security_group_rules=20,
- quota_server_groups=10,
- quota_server_group_members=10,
- reservation_expire=86400,
- until_refresh=0,
- max_age=0,
- )
-
- self.driver = quota.DbQuotaDriver()
-
- self.calls = []
-
- self.useFixture(test.TimeOverride())
-
- def test_get_defaults(self):
- # Use our pre-defined resources
- self._stub_quota_class_get_default()
- result = self.driver.get_defaults(None, quota.QUOTAS._resources)
-
- self.assertEqual(result, dict(
- instances=5,
- cores=20,
- ram=25 * 1024,
- floating_ips=10,
- fixed_ips=10,
- metadata_items=64,
- injected_files=5,
- injected_file_content_bytes=5 * 1024,
- injected_file_path_bytes=255,
- security_groups=10,
- security_group_rules=20,
- key_pairs=100,
- server_groups=10,
- server_group_members=10,
- ))
-
- def _stub_quota_class_get_default(self):
- # Stub out quota_class_get_default
- def fake_qcgd(context):
- self.calls.append('quota_class_get_default')
- return dict(
- instances=5,
- ram=25 * 1024,
- metadata_items=64,
- injected_file_content_bytes=5 * 1024,
- )
- self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
-
- def _stub_quota_class_get_all_by_name(self):
- # Stub out quota_class_get_all_by_name
- def fake_qcgabn(context, quota_class):
- self.calls.append('quota_class_get_all_by_name')
- self.assertEqual(quota_class, 'test_class')
- return dict(
- instances=5,
- ram=25 * 1024,
- metadata_items=64,
- injected_file_content_bytes=5 * 1024,
- )
- self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
-
- def test_get_class_quotas(self):
- self._stub_quota_class_get_all_by_name()
- result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
- 'test_class')
-
- self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
- self.assertEqual(result, dict(
- instances=5,
- cores=20,
- ram=25 * 1024,
- floating_ips=10,
- fixed_ips=10,
- metadata_items=64,
- injected_files=5,
- injected_file_content_bytes=5 * 1024,
- injected_file_path_bytes=255,
- security_groups=10,
- security_group_rules=20,
- key_pairs=100,
- server_groups=10,
- server_group_members=10,
- ))
-
- def test_get_class_quotas_no_defaults(self):
- self._stub_quota_class_get_all_by_name()
- result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
- 'test_class', False)
-
- self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
- self.assertEqual(result, dict(
- instances=5,
- ram=25 * 1024,
- metadata_items=64,
- injected_file_content_bytes=5 * 1024,
- ))
-
- def _stub_get_by_project_and_user(self):
- def fake_qgabpau(context, project_id, user_id):
- self.calls.append('quota_get_all_by_project_and_user')
- self.assertEqual(project_id, 'test_project')
- self.assertEqual(user_id, 'fake_user')
- return dict(
- cores=10,
- injected_files=2,
- injected_file_path_bytes=127,
- )
-
- def fake_qgabp(context, project_id):
- self.calls.append('quota_get_all_by_project')
- self.assertEqual(project_id, 'test_project')
- return {
- 'cores': 10,
- 'injected_files': 2,
- 'injected_file_path_bytes': 127,
- }
-
- def fake_qugabpau(context, project_id, user_id):
- self.calls.append('quota_usage_get_all_by_project_and_user')
- self.assertEqual(project_id, 'test_project')
- self.assertEqual(user_id, 'fake_user')
- return dict(
- instances=dict(in_use=2, reserved=2),
- cores=dict(in_use=4, reserved=4),
- ram=dict(in_use=10 * 1024, reserved=0),
- floating_ips=dict(in_use=2, reserved=0),
- metadata_items=dict(in_use=0, reserved=0),
- injected_files=dict(in_use=0, reserved=0),
- injected_file_content_bytes=dict(in_use=0, reserved=0),
- injected_file_path_bytes=dict(in_use=0, reserved=0),
- )
-
- self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
- self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
- self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
- fake_qugabpau)
-
- self._stub_quota_class_get_all_by_name()
-
- def test_get_user_quotas(self):
- self.maxDiff = None
- self._stub_get_by_project_and_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', 'fake_user')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project_and_user',
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project_and_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def _stub_get_by_project_and_user_specific(self):
- def fake_quota_get(context, project_id, resource, user_id=None):
- self.calls.append('quota_get')
- self.assertEqual(project_id, 'test_project')
- self.assertEqual(user_id, 'fake_user')
- self.assertEqual(resource, 'test_resource')
- return dict(
- test_resource=dict(in_use=20, reserved=10),
- )
- self.stubs.Set(db, 'quota_get', fake_quota_get)
-
- def test_get_by_project_and_user(self):
- self._stub_get_by_project_and_user_specific()
- result = self.driver.get_by_project_and_user(
- FakeContext('test_project', 'test_class'),
- 'test_project', 'fake_user', 'test_resource')
-
- self.assertEqual(self.calls, ['quota_get'])
- self.assertEqual(result, dict(
- test_resource=dict(in_use=20, reserved=10),
- ))
-
- def _stub_get_by_project(self):
- def fake_qgabp(context, project_id):
- self.calls.append('quota_get_all_by_project')
- self.assertEqual(project_id, 'test_project')
- return dict(
- cores=10,
- injected_files=2,
- injected_file_path_bytes=127,
- )
-
- def fake_qugabp(context, project_id):
- self.calls.append('quota_usage_get_all_by_project')
- self.assertEqual(project_id, 'test_project')
- return dict(
- instances=dict(in_use=2, reserved=2),
- cores=dict(in_use=4, reserved=4),
- ram=dict(in_use=10 * 1024, reserved=0),
- floating_ips=dict(in_use=2, reserved=0),
- metadata_items=dict(in_use=0, reserved=0),
- injected_files=dict(in_use=0, reserved=0),
- injected_file_content_bytes=dict(in_use=0, reserved=0),
- injected_file_path_bytes=dict(in_use=0, reserved=0),
- )
-
- self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
- self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
-
- self._stub_quota_class_get_all_by_name()
- self._stub_quota_class_get_default()
-
- def test_get_project_quotas(self):
- self.maxDiff = None
- self._stub_get_by_project()
- result = self.driver.get_project_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- 'quota_class_get_default',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_alt_context_no_class(self):
- self.maxDiff = None
- self._stub_get_by_project_and_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', None),
- quota.QUOTAS._resources, 'test_project', 'fake_user')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project_and_user',
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project_and_user',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=10,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=50 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=128,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=10 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_project_quotas_alt_context_no_class(self):
- self.maxDiff = None
- self._stub_get_by_project()
- result = self.driver.get_project_quotas(
- FakeContext('other_project', 'other_class'),
- quota.QUOTAS._resources, 'test_project')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_default',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_alt_context_with_class(self):
- self.maxDiff = None
- self._stub_get_by_project_and_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', 'fake_user',
- quota_class='test_class')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project_and_user',
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project_and_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_project_quotas_alt_context_with_class(self):
- self.maxDiff = None
- self._stub_get_by_project()
- result = self.driver.get_project_quotas(
- FakeContext('other_project', 'other_class'),
- quota.QUOTAS._resources, 'test_project', quota_class='test_class')
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- 'quota_class_get_default',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- in_use=2,
- reserved=2,
- ),
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- ram=dict(
- limit=25 * 1024,
- in_use=10 * 1024,
- reserved=0,
- ),
- floating_ips=dict(
- limit=10,
- in_use=2,
- reserved=0,
- ),
- fixed_ips=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- metadata_items=dict(
- limit=64,
- in_use=0,
- reserved=0,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- security_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- security_group_rules=dict(
- limit=20,
- in_use=0,
- reserved=0,
- ),
- key_pairs=dict(
- limit=100,
- in_use=0,
- reserved=0,
- ),
- server_groups=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- server_group_members=dict(
- limit=10,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_no_defaults(self):
- self._stub_get_by_project_and_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', 'fake_user',
- defaults=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project_and_user',
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project_and_user',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_project_quotas_no_defaults(self):
- self._stub_get_by_project()
- result = self.driver.get_project_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', defaults=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_usage_get_all_by_project',
- 'quota_class_get_all_by_name',
- 'quota_class_get_default',
- ])
- self.assertEqual(result, dict(
- cores=dict(
- limit=10,
- in_use=4,
- reserved=4,
- ),
- injected_files=dict(
- limit=2,
- in_use=0,
- reserved=0,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- in_use=0,
- reserved=0,
- ),
- ))
-
- def test_get_user_quotas_no_usages(self):
- self._stub_get_by_project_and_user()
- result = self.driver.get_user_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project_and_user',
- 'quota_get_all_by_project',
- 'quota_class_get_all_by_name',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- ),
- cores=dict(
- limit=10,
- ),
- ram=dict(
- limit=25 * 1024,
- ),
- floating_ips=dict(
- limit=10,
- ),
- fixed_ips=dict(
- limit=10,
- ),
- metadata_items=dict(
- limit=64,
- ),
- injected_files=dict(
- limit=2,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- ),
- security_groups=dict(
- limit=10,
- ),
- security_group_rules=dict(
- limit=20,
- ),
- key_pairs=dict(
- limit=100,
- ),
- server_groups=dict(
- limit=10,
- ),
- server_group_members=dict(
- limit=10,
- ),
- ))
-
- def test_get_project_quotas_no_usages(self):
- self._stub_get_by_project()
- result = self.driver.get_project_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', usages=False)
-
- self.assertEqual(self.calls, [
- 'quota_get_all_by_project',
- 'quota_class_get_all_by_name',
- 'quota_class_get_default',
- ])
- self.assertEqual(result, dict(
- instances=dict(
- limit=5,
- ),
- cores=dict(
- limit=10,
- ),
- ram=dict(
- limit=25 * 1024,
- ),
- floating_ips=dict(
- limit=10,
- ),
- fixed_ips=dict(
- limit=10,
- ),
- metadata_items=dict(
- limit=64,
- ),
- injected_files=dict(
- limit=2,
- ),
- injected_file_content_bytes=dict(
- limit=5 * 1024,
- ),
- injected_file_path_bytes=dict(
- limit=127,
- ),
- security_groups=dict(
- limit=10,
- ),
- security_group_rules=dict(
- limit=20,
- ),
- key_pairs=dict(
- limit=100,
- ),
- server_groups=dict(
- limit=10,
- ),
- server_group_members=dict(
- limit=10,
- ),
- ))
-
- def _stub_get_settable_quotas(self):
- def fake_get_project_quotas(context, resources, project_id,
- quota_class=None, defaults=True,
- usages=True, remains=False,
- project_quotas=None):
- self.calls.append('get_project_quotas')
- result = {}
- for k, v in resources.items():
- limit = v.default
- reserved = 0
- if k == 'instances':
- remains = v.default - 5
- in_use = 1
- elif k == 'cores':
- remains = -1
- in_use = 5
- limit = -1
- else:
- remains = v.default
- in_use = 0
- result[k] = {'limit': limit, 'in_use': in_use,
- 'reserved': reserved, 'remains': remains}
- return result
-
- def fake_get_user_quotas(context, resources, project_id, user_id,
- quota_class=None, defaults=True,
- usages=True, project_quotas=None,
- user_quotas=None):
- self.calls.append('get_user_quotas')
- result = {}
- for k, v in resources.items():
- reserved = 0
- if k == 'instances':
- in_use = 1
- elif k == 'cores':
- in_use = 5
- reserved = 10
- else:
- in_use = 0
- result[k] = {'limit': v.default,
- 'in_use': in_use, 'reserved': reserved}
- return result
-
- def fake_qgabpau(context, project_id, user_id):
- self.calls.append('quota_get_all_by_project_and_user')
- return {'instances': 2, 'cores': -1}
-
- self.stubs.Set(self.driver, 'get_project_quotas',
- fake_get_project_quotas)
- self.stubs.Set(self.driver, 'get_user_quotas',
- fake_get_user_quotas)
- self.stubs.Set(db, 'quota_get_all_by_project_and_user',
- fake_qgabpau)
-
- def test_get_settable_quotas_with_user(self):
- self._stub_get_settable_quotas()
- result = self.driver.get_settable_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', user_id='test_user')
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- 'quota_get_all_by_project_and_user',
- 'get_user_quotas',
- ])
- self.assertEqual(result, {
- 'instances': {
- 'minimum': 1,
- 'maximum': 7,
- },
- 'cores': {
- 'minimum': 15,
- 'maximum': -1,
- },
- 'ram': {
- 'minimum': 0,
- 'maximum': 50 * 1024,
- },
- 'floating_ips': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'fixed_ips': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'metadata_items': {
- 'minimum': 0,
- 'maximum': 128,
- },
- 'injected_files': {
- 'minimum': 0,
- 'maximum': 5,
- },
- 'injected_file_content_bytes': {
- 'minimum': 0,
- 'maximum': 10 * 1024,
- },
- 'injected_file_path_bytes': {
- 'minimum': 0,
- 'maximum': 255,
- },
- 'security_groups': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'security_group_rules': {
- 'minimum': 0,
- 'maximum': 20,
- },
- 'key_pairs': {
- 'minimum': 0,
- 'maximum': 100,
- },
- 'server_groups': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'server_group_members': {
- 'minimum': 0,
- 'maximum': 10,
- },
- })
-
- def test_get_settable_quotas_without_user(self):
- self._stub_get_settable_quotas()
- result = self.driver.get_settable_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project')
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ])
- self.assertEqual(result, {
- 'instances': {
- 'minimum': 5,
- 'maximum': -1,
- },
- 'cores': {
- 'minimum': 5,
- 'maximum': -1,
- },
- 'ram': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'floating_ips': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'fixed_ips': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'metadata_items': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'injected_files': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'injected_file_content_bytes': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'injected_file_path_bytes': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'security_groups': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'security_group_rules': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'key_pairs': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'server_groups': {
- 'minimum': 0,
- 'maximum': -1,
- },
- 'server_group_members': {
- 'minimum': 0,
- 'maximum': -1,
- },
- })
-
- def test_get_settable_quotas_by_user_with_unlimited_value(self):
- self._stub_get_settable_quotas()
- result = self.driver.get_settable_quotas(
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources, 'test_project', user_id='test_user')
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- 'quota_get_all_by_project_and_user',
- 'get_user_quotas',
- ])
- self.assertEqual(result, {
- 'instances': {
- 'minimum': 1,
- 'maximum': 7,
- },
- 'cores': {
- 'minimum': 15,
- 'maximum': -1,
- },
- 'ram': {
- 'minimum': 0,
- 'maximum': 50 * 1024,
- },
- 'floating_ips': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'fixed_ips': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'metadata_items': {
- 'minimum': 0,
- 'maximum': 128,
- },
- 'injected_files': {
- 'minimum': 0,
- 'maximum': 5,
- },
- 'injected_file_content_bytes': {
- 'minimum': 0,
- 'maximum': 10 * 1024,
- },
- 'injected_file_path_bytes': {
- 'minimum': 0,
- 'maximum': 255,
- },
- 'security_groups': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'security_group_rules': {
- 'minimum': 0,
- 'maximum': 20,
- },
- 'key_pairs': {
- 'minimum': 0,
- 'maximum': 100,
- },
- 'server_groups': {
- 'minimum': 0,
- 'maximum': 10,
- },
- 'server_group_members': {
- 'minimum': 0,
- 'maximum': 10,
- },
- })
-
- def _stub_get_project_quotas(self):
- def fake_get_project_quotas(context, resources, project_id,
- quota_class=None, defaults=True,
- usages=True, remains=False,
- project_quotas=None):
- self.calls.append('get_project_quotas')
- return dict((k, dict(limit=v.default))
- for k, v in resources.items())
-
- self.stubs.Set(self.driver, 'get_project_quotas',
- fake_get_project_quotas)
-
- def test_get_quotas_has_sync_unknown(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.QuotaResourceUnknown,
- self.driver._get_quotas,
- None, quota.QUOTAS._resources,
- ['unknown'], True)
- self.assertEqual(self.calls, [])
-
- def test_get_quotas_no_sync_unknown(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.QuotaResourceUnknown,
- self.driver._get_quotas,
- None, quota.QUOTAS._resources,
- ['unknown'], False)
- self.assertEqual(self.calls, [])
-
- def test_get_quotas_has_sync_no_sync_resource(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.QuotaResourceUnknown,
- self.driver._get_quotas,
- None, quota.QUOTAS._resources,
- ['metadata_items'], True)
- self.assertEqual(self.calls, [])
-
- def test_get_quotas_no_sync_has_sync_resource(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.QuotaResourceUnknown,
- self.driver._get_quotas,
- None, quota.QUOTAS._resources,
- ['instances'], False)
- self.assertEqual(self.calls, [])
-
- def test_get_quotas_has_sync(self):
- self._stub_get_project_quotas()
- result = self.driver._get_quotas(FakeContext('test_project',
- 'test_class'),
- quota.QUOTAS._resources,
- ['instances', 'cores', 'ram',
- 'floating_ips', 'security_groups',
- 'server_groups'],
- True,
- project_id='test_project')
-
- self.assertEqual(self.calls, ['get_project_quotas'])
- self.assertEqual(result, dict(
- instances=10,
- cores=20,
- ram=50 * 1024,
- floating_ips=10,
- security_groups=10,
- server_groups=10,
- ))
-
- def test_get_quotas_no_sync(self):
- self._stub_get_project_quotas()
- result = self.driver._get_quotas(FakeContext('test_project',
- 'test_class'),
- quota.QUOTAS._resources,
- ['metadata_items', 'injected_files',
- 'injected_file_content_bytes',
- 'injected_file_path_bytes',
- 'security_group_rules',
- 'server_group_members'], False,
- project_id='test_project')
-
- self.assertEqual(self.calls, ['get_project_quotas'])
- self.assertEqual(result, dict(
- metadata_items=128,
- injected_files=5,
- injected_file_content_bytes=10 * 1024,
- injected_file_path_bytes=255,
- security_group_rules=20,
- server_group_members=10,
- ))
-
- def test_limit_check_under(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.InvalidQuotaValue,
- self.driver.limit_check,
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(metadata_items=-1))
-
- def test_limit_check_over(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.OverQuota,
- self.driver.limit_check,
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(metadata_items=129))
-
- def test_limit_check_project_overs(self):
- self._stub_get_project_quotas()
- self.assertRaises(exception.OverQuota,
- self.driver.limit_check,
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(injected_file_content_bytes=10241,
- injected_file_path_bytes=256))
-
- def test_limit_check_unlimited(self):
- self.flags(quota_metadata_items=-1)
- self._stub_get_project_quotas()
- self.driver.limit_check(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(metadata_items=32767))
-
- def test_limit_check(self):
- self._stub_get_project_quotas()
- self.driver.limit_check(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(metadata_items=128))
-
- def _stub_quota_reserve(self):
- def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
- expire, until_refresh, max_age, project_id=None,
- user_id=None):
- self.calls.append(('quota_reserve', expire, until_refresh,
- max_age))
- return ['resv-1', 'resv-2', 'resv-3']
- self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
-
- def test_reserve_bad_expire(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- self.assertRaises(exception.InvalidReservationExpiration,
- self.driver.reserve,
- FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire='invalid')
- self.assertEqual(self.calls, [])
-
- def test_reserve_default_expire(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2))
-
- expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_reserve_int_expire(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire=3600)
-
- expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_reserve_timedelta_expire(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- expire_delta = datetime.timedelta(seconds=60)
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire=expire_delta)
-
- expire = timeutils.utcnow() + expire_delta
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_reserve_datetime_expire(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire=expire)
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 0),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_reserve_until_refresh(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- self.flags(until_refresh=500)
- expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire=expire)
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 500, 0),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_reserve_max_age(self):
- self._stub_get_project_quotas()
- self._stub_quota_reserve()
- self.flags(max_age=86400)
- expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
- result = self.driver.reserve(FakeContext('test_project', 'test_class'),
- quota.QUOTAS._resources,
- dict(instances=2), expire=expire)
-
- self.assertEqual(self.calls, [
- 'get_project_quotas',
- ('quota_reserve', expire, 0, 86400),
- ])
- self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
-
- def test_usage_reset(self):
- calls = []
-
- def fake_quota_usage_update(context, project_id, user_id, resource,
- **kwargs):
- calls.append(('quota_usage_update', context, project_id, user_id,
- resource, kwargs))
- if resource == 'nonexist':
- raise exception.QuotaUsageNotFound(project_id=project_id)
- self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
-
- ctx = FakeContext('test_project', 'test_class')
- resources = ['res1', 'res2', 'nonexist', 'res4']
- self.driver.usage_reset(ctx, resources)
-
- # Make sure we had some calls
- self.assertEqual(len(calls), len(resources))
-
- # Extract the elevated context that was used and do some
- # sanity checks
- elevated = calls[0][1]
- self.assertEqual(elevated.project_id, ctx.project_id)
- self.assertEqual(elevated.quota_class, ctx.quota_class)
- self.assertEqual(elevated.is_admin, True)
-
- # Now check that all the expected calls were made
- exemplar = [('quota_usage_update', elevated, 'test_project',
- 'fake_user', res, dict(in_use=-1)) for res in resources]
- self.assertEqual(calls, exemplar)
-
-
-class FakeSession(object):
- def begin(self):
- return self
-
- def add(self, instance):
- pass
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_traceback):
- return False
-
-
-class FakeUsage(sqa_models.QuotaUsage):
- def save(self, *args, **kwargs):
- pass
-
-
-class QuotaReserveSqlAlchemyTestCase(test.TestCase):
- # nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
- # own test case, and since it's a quota manipulator, this is the
- # best place to put it...
-
- def setUp(self):
- super(QuotaReserveSqlAlchemyTestCase, self).setUp()
- self.sync_called = set()
- self.quotas = dict(
- instances=5,
- cores=10,
- ram=10 * 1024,
- fixed_ips=5,
- )
- self.deltas = dict(
- instances=2,
- cores=4,
- ram=2 * 1024,
- fixed_ips=2,
- )
-
- def make_sync(res_name):
- def sync(context, project_id, user_id, session):
- self.sync_called.add(res_name)
- if res_name in self.usages:
- if self.usages[res_name].in_use < 0:
- return {res_name: 2}
- else:
- return {res_name: self.usages[res_name].in_use - 1}
- return {res_name: 0}
- return sync
- self.resources = {}
-
- _existing_quota_sync_func_dict = dict(sqa_api.QUOTA_SYNC_FUNCTIONS)
-
- def restore_sync_functions():
- sqa_api.QUOTA_SYNC_FUNCTIONS.clear()
- sqa_api.QUOTA_SYNC_FUNCTIONS.update(_existing_quota_sync_func_dict)
-
- self.addCleanup(restore_sync_functions)
-
- for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
- method_name = '_sync_%s' % res_name
- sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
- res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
- self.resources[res_name] = res
-
- self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
- self.usages = {}
- self.usages_created = {}
- self.reservations_created = {}
- self.usages_list = [
- dict(resource='instances',
- project_id='test_project',
- user_id='fake_user',
- in_use=2,
- reserved=2,
- until_refresh=None),
- dict(resource='cores',
- project_id='test_project',
- user_id='fake_user',
- in_use=2,
- reserved=4,
- until_refresh=None),
- dict(resource='ram',
- project_id='test_project',
- user_id='fake_user',
- in_use=2,
- reserved=2 * 1024,
- until_refresh=None),
- dict(resource='fixed_ips',
- project_id='test_project',
- user_id=None,
- in_use=2,
- reserved=2,
- until_refresh=None),
- ]
-
- def fake_get_session():
- return FakeSession()
-
- def fake_get_project_user_quota_usages(context, session, project_id,
- user_id):
- return self.usages.copy(), self.usages.copy()
-
- def fake_quota_usage_create(project_id, user_id, resource,
- in_use, reserved, until_refresh,
- session=None, save=True):
- quota_usage_ref = self._make_quota_usage(
- project_id, user_id, resource, in_use, reserved, until_refresh,
- timeutils.utcnow(), timeutils.utcnow())
-
- self.usages_created[resource] = quota_usage_ref
-
- return quota_usage_ref
-
- def fake_reservation_create(uuid, usage_id, project_id,
- user_id, resource, delta, expire,
- session=None):
- reservation_ref = self._make_reservation(
- uuid, usage_id, project_id, user_id, resource, delta, expire,
- timeutils.utcnow(), timeutils.utcnow())
-
- self.reservations_created[resource] = reservation_ref
-
- return reservation_ref
-
- self.stubs.Set(sqa_api, 'get_session', fake_get_session)
- self.stubs.Set(sqa_api, '_get_project_user_quota_usages',
- fake_get_project_user_quota_usages)
- self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
- self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
-
- self.useFixture(test.TimeOverride())
-
- def _make_quota_usage(self, project_id, user_id, resource, in_use,
- reserved, until_refresh, created_at, updated_at):
- quota_usage_ref = FakeUsage()
- quota_usage_ref.id = len(self.usages) + len(self.usages_created)
- quota_usage_ref.project_id = project_id
- quota_usage_ref.user_id = user_id
- quota_usage_ref.resource = resource
- quota_usage_ref.in_use = in_use
- quota_usage_ref.reserved = reserved
- quota_usage_ref.until_refresh = until_refresh
- quota_usage_ref.created_at = created_at
- quota_usage_ref.updated_at = updated_at
- quota_usage_ref.deleted_at = None
- quota_usage_ref.deleted = False
-
- return quota_usage_ref
-
- def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
- until_refresh=None, created_at=None, updated_at=None):
- if created_at is None:
- created_at = timeutils.utcnow()
- if updated_at is None:
- updated_at = timeutils.utcnow()
- if resource == 'fixed_ips':
- user_id = None
-
- quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
- in_use, reserved,
- until_refresh,
- created_at, updated_at)
-
- self.usages[resource] = quota_usage_ref
-
- def compare_usage(self, usage_dict, expected):
- for usage in expected:
- resource = usage['resource']
- for key, value in usage.items():
- actual = getattr(usage_dict[resource], key)
- self.assertEqual(actual, value,
- "%s != %s on usage for resource %s" %
- (actual, value, resource))
-
- def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
- delta, expire, created_at, updated_at):
- reservation_ref = sqa_models.Reservation()
- reservation_ref.id = len(self.reservations_created)
- reservation_ref.uuid = uuid
- reservation_ref.usage_id = usage_id
- reservation_ref.project_id = project_id
- reservation_ref.user_id = user_id
- reservation_ref.resource = resource
- reservation_ref.delta = delta
- reservation_ref.expire = expire
- reservation_ref.created_at = created_at
- reservation_ref.updated_at = updated_at
- reservation_ref.deleted_at = None
- reservation_ref.deleted = False
-
- return reservation_ref
-
- def compare_reservation(self, reservations, expected):
- reservations = set(reservations)
- for resv in expected:
- resource = resv['resource']
- resv_obj = self.reservations_created[resource]
-
- self.assertIn(resv_obj.uuid, reservations)
- reservations.discard(resv_obj.uuid)
-
- for key, value in resv.items():
- actual = getattr(resv_obj, key)
- self.assertEqual(actual, value,
- "%s != %s on reservation for resource %s" %
- (actual, value, resource))
-
- self.assertEqual(len(reservations), 0)
-
- def _update_reservations_list(self, usage_id_change=False,
- delta_change=False):
- reservations_list = [
- dict(resource='instances',
- project_id='test_project',
- delta=2),
- dict(resource='cores',
- project_id='test_project',
- delta=4),
- dict(resource='ram',
- delta=2 * 1024),
- dict(resource='fixed_ips',
- project_id='test_project',
- delta=2),
- ]
- if usage_id_change:
- reservations_list[0]["usage_id"] = self.usages_created['instances']
- reservations_list[1]["usage_id"] = self.usages_created['cores']
- reservations_list[2]["usage_id"] = self.usages_created['ram']
- reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
- else:
- reservations_list[0]["usage_id"] = self.usages['instances']
- reservations_list[1]["usage_id"] = self.usages['cores']
- reservations_list[2]["usage_id"] = self.usages['ram']
- reservations_list[3]["usage_id"] = self.usages['fixed_ips']
- if delta_change:
- reservations_list[0]["delta"] = -2
- reservations_list[1]["delta"] = -4
- reservations_list[2]["delta"] = -2 * 1024
- reservations_list[3]["delta"] = -2
- return reservations_list
-
- def _init_usages(self, *in_use, **kwargs):
- for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
- self.init_usage('test_project', 'fake_user',
- option, in_use[i], **kwargs)
- return FakeContext('test_project', 'test_class')
-
- def test_quota_reserve_create_usages(self):
- context = FakeContext('test_project', 'test_class')
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 0, 0)
-
- self.assertEqual(self.sync_called, set(['instances', 'cores',
- 'ram', 'fixed_ips']))
- self.usages_list[0]["in_use"] = 0
- self.usages_list[1]["in_use"] = 0
- self.usages_list[2]["in_use"] = 0
- self.usages_list[3]["in_use"] = 0
- self.compare_usage(self.usages_created, self.usages_list)
- reservations_list = self._update_reservations_list(True)
- self.compare_reservation(result, reservations_list)
-
- def test_quota_reserve_negative_in_use(self):
- context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 5, 0)
-
- self.assertEqual(self.sync_called, set(['instances', 'cores',
- 'ram', 'fixed_ips']))
- self.usages_list[0]["until_refresh"] = 5
- self.usages_list[1]["until_refresh"] = 5
- self.usages_list[2]["until_refresh"] = 5
- self.usages_list[3]["until_refresh"] = 5
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, self._update_reservations_list())
-
- def test_quota_reserve_until_refresh(self):
- context = self._init_usages(3, 3, 3, 3, until_refresh=1)
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 5, 0)
-
- self.assertEqual(self.sync_called, set(['instances', 'cores',
- 'ram', 'fixed_ips']))
- self.usages_list[0]["until_refresh"] = 5
- self.usages_list[1]["until_refresh"] = 5
- self.usages_list[2]["until_refresh"] = 5
- self.usages_list[3]["until_refresh"] = 5
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, self._update_reservations_list())
-
- def test_quota_reserve_max_age(self):
- max_age = 3600
- record_created = (timeutils.utcnow() -
- datetime.timedelta(seconds=max_age))
- context = self._init_usages(3, 3, 3, 3, created_at=record_created,
- updated_at=record_created)
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 0, max_age)
-
- self.assertEqual(self.sync_called, set(['instances', 'cores',
- 'ram', 'fixed_ips']))
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, self._update_reservations_list())
-
- def test_quota_reserve_no_refresh(self):
- context = self._init_usages(3, 3, 3, 3)
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 0, 0)
-
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 3
- self.usages_list[1]["in_use"] = 3
- self.usages_list[2]["in_use"] = 3
- self.usages_list[3]["in_use"] = 3
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.compare_reservation(result, self._update_reservations_list())
-
- def test_quota_reserve_unders(self):
- context = self._init_usages(1, 3, 1 * 1024, 1)
- self.deltas["instances"] = -2
- self.deltas["cores"] = -4
- self.deltas["ram"] = -2 * 1024
- self.deltas["fixed_ips"] = -2
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 0, 0)
-
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 1
- self.usages_list[0]["reserved"] = 0
- self.usages_list[1]["in_use"] = 3
- self.usages_list[1]["reserved"] = 0
- self.usages_list[2]["in_use"] = 1 * 1024
- self.usages_list[2]["reserved"] = 0
- self.usages_list[3]["in_use"] = 1
- self.usages_list[3]["reserved"] = 0
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- reservations_list = self._update_reservations_list(False, True)
- self.compare_reservation(result, reservations_list)
-
- def test_quota_reserve_overs(self):
- context = self._init_usages(4, 8, 10 * 1024, 4)
- try:
- sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire, 0, 0)
- except exception.OverQuota as e:
- expected_kwargs = {'code': 500,
- 'usages': {'instances': {'reserved': 0, 'in_use': 4},
- 'ram': {'reserved': 0, 'in_use': 10240},
- 'fixed_ips': {'reserved': 0, 'in_use': 4},
- 'cores': {'reserved': 0, 'in_use': 8}},
- 'headroom': {'cores': 2, 'ram': 0, 'fixed_ips': 1,
- 'instances': 1},
- 'overs': ['cores', 'fixed_ips', 'instances', 'ram'],
- 'quotas': {'cores': 10, 'ram': 10240,
- 'fixed_ips': 5, 'instances': 5}}
- self.assertEqual(e.kwargs, expected_kwargs)
- else:
- self.fail('Expected OverQuota failure')
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 4
- self.usages_list[0]["reserved"] = 0
- self.usages_list[1]["in_use"] = 8
- self.usages_list[1]["reserved"] = 0
- self.usages_list[2]["in_use"] = 10 * 1024
- self.usages_list[2]["reserved"] = 0
- self.usages_list[3]["in_use"] = 4
- self.usages_list[3]["reserved"] = 0
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.assertEqual(self.reservations_created, {})
-
- def test_quota_reserve_cores_unlimited(self):
- # Requesting 8 cores, quota_cores set to unlimited:
- self.flags(quota_cores=-1)
- self._init_usages(1, 8, 1 * 1024, 1)
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 1
- self.usages_list[0]["reserved"] = 0
- self.usages_list[1]["in_use"] = 8
- self.usages_list[1]["reserved"] = 0
- self.usages_list[2]["in_use"] = 1 * 1024
- self.usages_list[2]["reserved"] = 0
- self.usages_list[3]["in_use"] = 1
- self.usages_list[3]["reserved"] = 0
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.assertEqual(self.reservations_created, {})
-
- def test_quota_reserve_ram_unlimited(self):
- # Requesting 10*1024 ram, quota_ram set to unlimited:
- self.flags(quota_ram=-1)
- self._init_usages(1, 1, 10 * 1024, 1)
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 1
- self.usages_list[0]["reserved"] = 0
- self.usages_list[1]["in_use"] = 1
- self.usages_list[1]["reserved"] = 0
- self.usages_list[2]["in_use"] = 10 * 1024
- self.usages_list[2]["reserved"] = 0
- self.usages_list[3]["in_use"] = 1
- self.usages_list[3]["reserved"] = 0
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- self.assertEqual(self.reservations_created, {})
-
- def test_quota_reserve_reduction(self):
- context = self._init_usages(10, 20, 20 * 1024, 10)
- self.deltas["instances"] = -2
- self.deltas["cores"] = -4
- self.deltas["ram"] = -2 * 1024
- self.deltas["fixed_ips"] = -2
- result = sqa_api.quota_reserve(context, self.resources, self.quotas,
- self.quotas, self.deltas, self.expire,
- 0, 0)
-
- self.assertEqual(self.sync_called, set([]))
- self.usages_list[0]["in_use"] = 10
- self.usages_list[0]["reserved"] = 0
- self.usages_list[1]["in_use"] = 20
- self.usages_list[1]["reserved"] = 0
- self.usages_list[2]["in_use"] = 20 * 1024
- self.usages_list[2]["reserved"] = 0
- self.usages_list[3]["in_use"] = 10
- self.usages_list[3]["reserved"] = 0
- self.compare_usage(self.usages, self.usages_list)
- self.assertEqual(self.usages_created, {})
- reservations_list = self._update_reservations_list(False, True)
- self.compare_reservation(result, reservations_list)
-
-
-class NoopQuotaDriverTestCase(test.TestCase):
- def setUp(self):
- super(NoopQuotaDriverTestCase, self).setUp()
-
- self.flags(quota_instances=10,
- quota_cores=20,
- quota_ram=50 * 1024,
- quota_floating_ips=10,
- quota_metadata_items=128,
- quota_injected_files=5,
- quota_injected_file_content_bytes=10 * 1024,
- quota_injected_file_path_length=255,
- quota_security_groups=10,
- quota_security_group_rules=20,
- reservation_expire=86400,
- until_refresh=0,
- max_age=0,
- )
-
- self.expected_with_usages = {}
- self.expected_without_usages = {}
- self.expected_without_dict = {}
- self.expected_settable_quotas = {}
- for r in quota.QUOTAS._resources:
- self.expected_with_usages[r] = dict(limit=-1,
- in_use=-1,
- reserved=-1)
- self.expected_without_usages[r] = dict(limit=-1)
- self.expected_without_dict[r] = -1
- self.expected_settable_quotas[r] = dict(minimum=0, maximum=-1)
-
- self.driver = quota.NoopQuotaDriver()
-
- def test_get_defaults(self):
- # Use our pre-defined resources
- result = self.driver.get_defaults(None, quota.QUOTAS._resources)
- self.assertEqual(self.expected_without_dict, result)
-
- def test_get_class_quotas(self):
- result = self.driver.get_class_quotas(None,
- quota.QUOTAS._resources,
- 'test_class')
- self.assertEqual(self.expected_without_dict, result)
-
- def test_get_class_quotas_no_defaults(self):
- result = self.driver.get_class_quotas(None,
- quota.QUOTAS._resources,
- 'test_class',
- False)
- self.assertEqual(self.expected_without_dict, result)
-
- def test_get_project_quotas(self):
- result = self.driver.get_project_quotas(None,
- quota.QUOTAS._resources,
- 'test_project')
- self.assertEqual(self.expected_with_usages, result)
-
- def test_get_user_quotas(self):
- result = self.driver.get_user_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- 'fake_user')
- self.assertEqual(self.expected_with_usages, result)
-
- def test_get_project_quotas_no_defaults(self):
- result = self.driver.get_project_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- defaults=False)
- self.assertEqual(self.expected_with_usages, result)
-
- def test_get_user_quotas_no_defaults(self):
- result = self.driver.get_user_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- 'fake_user',
- defaults=False)
- self.assertEqual(self.expected_with_usages, result)
-
- def test_get_project_quotas_no_usages(self):
- result = self.driver.get_project_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- usages=False)
- self.assertEqual(self.expected_without_usages, result)
-
- def test_get_user_quotas_no_usages(self):
- result = self.driver.get_user_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- 'fake_user',
- usages=False)
- self.assertEqual(self.expected_without_usages, result)
-
- def test_get_settable_quotas_with_user(self):
- result = self.driver.get_settable_quotas(None,
- quota.QUOTAS._resources,
- 'test_project',
- 'fake_user')
- self.assertEqual(self.expected_settable_quotas, result)
-
- def test_get_settable_quotas_without_user(self):
- result = self.driver.get_settable_quotas(None,
- quota.QUOTAS._resources,
- 'test_project')
- self.assertEqual(self.expected_settable_quotas, result)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
deleted file mode 100644
index 4bc7104945..0000000000
--- a/nova/tests/test_service.py
+++ /dev/null
@@ -1,370 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unit Tests for remote procedure calls using queue
-"""
-
-import sys
-
-import mock
-import mox
-from oslo.concurrency import processutils
-from oslo.config import cfg
-import testtools
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import manager
-from nova.openstack.common import service as _service
-from nova import rpc
-from nova import service
-from nova import test
-from nova.tests import utils
-from nova import wsgi
-
-test_service_opts = [
- cfg.StrOpt("fake_manager",
- default="nova.tests.test_service.FakeManager",
- help="Manager for testing"),
- cfg.StrOpt("test_service_listen",
- default='127.0.0.1',
- help="Host to bind test service to"),
- cfg.IntOpt("test_service_listen_port",
- default=0,
- help="Port number to bind test service to"),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(test_service_opts)
-
-
-class FakeManager(manager.Manager):
- """Fake manager for tests."""
- def test_method(self):
- return 'manager'
-
-
-class ExtendedService(service.Service):
- def test_method(self):
- return 'service'
-
-
-class ServiceManagerTestCase(test.TestCase):
- """Test cases for Services."""
-
- def test_message_gets_to_manager(self):
- serv = service.Service('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- serv.start()
- self.assertEqual(serv.test_method(), 'manager')
-
- def test_override_manager_method(self):
- serv = ExtendedService('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- serv.start()
- self.assertEqual(serv.test_method(), 'service')
-
- def test_service_with_min_down_time(self):
- CONF.set_override('service_down_time', 10)
- CONF.set_override('report_interval', 10)
- serv = service.Service('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- serv.start()
- self.assertEqual(CONF.service_down_time, 25)
-
-
-class ServiceFlagsTestCase(test.TestCase):
- def test_service_enabled_on_create_based_on_flag(self):
- self.flags(enable_new_services=True)
- host = 'foo'
- binary = 'nova-fake'
- app = service.Service.create(host=host, binary=binary)
- app.start()
- app.stop()
- ref = db.service_get(context.get_admin_context(), app.service_id)
- db.service_destroy(context.get_admin_context(), app.service_id)
- self.assertFalse(ref['disabled'])
-
- def test_service_disabled_on_create_based_on_flag(self):
- self.flags(enable_new_services=False)
- host = 'foo'
- binary = 'nova-fake'
- app = service.Service.create(host=host, binary=binary)
- app.start()
- app.stop()
- ref = db.service_get(context.get_admin_context(), app.service_id)
- db.service_destroy(context.get_admin_context(), app.service_id)
- self.assertTrue(ref['disabled'])
-
-
-class ServiceTestCase(test.TestCase):
- """Test cases for Services."""
-
- def setUp(self):
- super(ServiceTestCase, self).setUp()
- self.host = 'foo'
- self.binary = 'nova-fake'
- self.topic = 'fake'
- self.mox.StubOutWithMock(db, 'service_create')
- self.mox.StubOutWithMock(db, 'service_get_by_args')
- self.flags(use_local=True, group='conductor')
-
- def test_create(self):
-
- # NOTE(vish): Create was moved out of mox replay to make sure that
- # the looping calls are created in StartService.
- app = service.Service.create(host=self.host, binary=self.binary,
- topic=self.topic)
-
- self.assertTrue(app)
-
- def _service_start_mocks(self):
- service_create = {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0}
- service_ref = {'host': self.host,
- 'binary': self.binary,
- 'topic': self.topic,
- 'report_count': 0,
- 'id': 1}
-
- db.service_get_by_args(mox.IgnoreArg(),
- self.host, self.binary).AndRaise(exception.NotFound())
- db.service_create(mox.IgnoreArg(),
- service_create).AndReturn(service_ref)
- return service_ref
-
- def test_init_and_start_hooks(self):
- self.manager_mock = self.mox.CreateMock(FakeManager)
- self.mox.StubOutWithMock(sys.modules[__name__],
- 'FakeManager', use_mock_anything=True)
- self.mox.StubOutWithMock(self.manager_mock, 'init_host')
- self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
- self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
-
- FakeManager(host=self.host).AndReturn(self.manager_mock)
-
- self.manager_mock.service_name = self.topic
- self.manager_mock.additional_endpoints = []
-
- # init_host is called before any service record is created
- self.manager_mock.init_host()
- self._service_start_mocks()
- # pre_start_hook is called after service record is created,
- # but before RPC consumer is created
- self.manager_mock.pre_start_hook()
- # post_start_hook is called after RPC consumer is created.
- self.manager_mock.post_start_hook()
-
- self.mox.ReplayAll()
-
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
-
- def _test_service_check_create_race(self, ex):
- self.manager_mock = self.mox.CreateMock(FakeManager)
- self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
- use_mock_anything=True)
- self.mox.StubOutWithMock(self.manager_mock, 'init_host')
- self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
- self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
-
- FakeManager(host=self.host).AndReturn(self.manager_mock)
-
- # init_host is called before any service record is created
- self.manager_mock.init_host()
-
- db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
- ).AndRaise(exception.NotFound)
- db.service_create(mox.IgnoreArg(), mox.IgnoreArg()
- ).AndRaise(ex)
-
- class TestException(Exception):
- pass
-
- db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
- ).AndRaise(TestException)
-
- self.mox.ReplayAll()
-
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- self.assertRaises(TestException, serv.start)
-
- def test_service_check_create_race_topic_exists(self):
- ex = exception.ServiceTopicExists(host='foo', topic='bar')
- self._test_service_check_create_race(ex)
-
- def test_service_check_create_race_binary_exists(self):
- ex = exception.ServiceBinaryExists(host='foo', binary='bar')
- self._test_service_check_create_race(ex)
-
- def test_parent_graceful_shutdown(self):
- self.manager_mock = self.mox.CreateMock(FakeManager)
- self.mox.StubOutWithMock(sys.modules[__name__],
- 'FakeManager', use_mock_anything=True)
- self.mox.StubOutWithMock(self.manager_mock, 'init_host')
- self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
- self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
-
- self.mox.StubOutWithMock(_service.Service, 'stop')
-
- FakeManager(host=self.host).AndReturn(self.manager_mock)
-
- self.manager_mock.service_name = self.topic
- self.manager_mock.additional_endpoints = []
-
- # init_host is called before any service record is created
- self.manager_mock.init_host()
- self._service_start_mocks()
- # pre_start_hook is called after service record is created,
- # but before RPC consumer is created
- self.manager_mock.pre_start_hook()
- # post_start_hook is called after RPC consumer is created.
- self.manager_mock.post_start_hook()
-
- _service.Service.stop()
-
- self.mox.ReplayAll()
-
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
-
- serv.stop()
-
- @mock.patch('nova.servicegroup.API')
- @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
- def test_parent_graceful_shutdown_with_cleanup_host(self,
- mock_svc_get_by_args,
- mock_API):
- mock_svc_get_by_args.return_value = {'id': 'some_value'}
- mock_manager = mock.Mock()
-
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
-
- serv.manager = mock_manager
- serv.manager.additional_endpoints = []
-
- serv.start()
- serv.manager.init_host.assert_called_with()
-
- serv.stop()
- serv.manager.cleanup_host.assert_called_with()
-
- @mock.patch('nova.servicegroup.API')
- @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
- @mock.patch.object(rpc, 'get_server')
- def test_service_stop_waits_for_rpcserver(
- self, mock_rpc, mock_svc_get_by_args, mock_API):
- mock_svc_get_by_args.return_value = {'id': 'some_value'}
- serv = service.Service(self.host,
- self.binary,
- self.topic,
- 'nova.tests.test_service.FakeManager')
- serv.start()
- serv.stop()
- serv.rpcserver.start.assert_called_once_with()
- serv.rpcserver.stop.assert_called_once_with()
- serv.rpcserver.wait.assert_called_once_with()
-
-
-class TestWSGIService(test.TestCase):
-
- def setUp(self):
- super(TestWSGIService, self).setUp()
- self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
-
- def test_service_random_port(self):
- test_service = service.WSGIService("test_service")
- test_service.start()
- self.assertNotEqual(0, test_service.port)
- test_service.stop()
-
- def test_workers_set_default(self):
- test_service = service.WSGIService("osapi_compute")
- self.assertEqual(test_service.workers, processutils.get_worker_count())
-
- def test_workers_set_good_user_setting(self):
- CONF.set_override('osapi_compute_workers', 8)
- test_service = service.WSGIService("osapi_compute")
- self.assertEqual(test_service.workers, 8)
-
- def test_workers_set_zero_user_setting(self):
- CONF.set_override('osapi_compute_workers', 0)
- test_service = service.WSGIService("osapi_compute")
- # If a value less than 1 is used, defaults to number of procs available
- self.assertEqual(test_service.workers, processutils.get_worker_count())
-
- def test_service_start_with_illegal_workers(self):
- CONF.set_override("osapi_compute_workers", -1)
- self.assertRaises(exception.InvalidInput,
- service.WSGIService, "osapi_compute")
-
- @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
- def test_service_random_port_with_ipv6(self):
- CONF.set_default("test_service_listen", "::1")
- test_service = service.WSGIService("test_service")
- test_service.start()
- self.assertEqual("::1", test_service.host)
- self.assertNotEqual(0, test_service.port)
- test_service.stop()
-
- def test_reset_pool_size_to_default(self):
- test_service = service.WSGIService("test_service")
- test_service.start()
-
- # Stopping the service, which in turn sets pool size to 0
- test_service.stop()
- self.assertEqual(test_service.server._pool.size, 0)
-
- # Resetting pool size to default
- test_service.reset()
- test_service.start()
- self.assertEqual(test_service.server._pool.size,
- CONF.wsgi_default_pool_size)
-
-
-class TestLauncher(test.TestCase):
-
- def setUp(self):
- super(TestLauncher, self).setUp()
- self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
- self.service = service.WSGIService("test_service")
-
- def test_launch_app(self):
- service.serve(self.service)
- self.assertNotEqual(0, self.service.port)
- service._launcher.stop()
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
deleted file mode 100644
index a7b6f430fe..0000000000
--- a/nova/tests/test_test_utils.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import errno
-import socket
-import tempfile
-
-import fixtures
-
-from nova import db
-from nova import test
-from nova.tests import utils as test_utils
-
-
-class TestUtilsTestCase(test.TestCase):
- def test_get_test_admin_context(self):
- # get_test_admin_context's return value behaves like admin context.
- ctxt = test_utils.get_test_admin_context()
-
- # TODO(soren): This should verify the full interface context
- # objects expose.
- self.assertTrue(ctxt.is_admin)
-
- def test_get_test_instance(self):
- # get_test_instance's return value looks like an instance_ref.
- instance_ref = test_utils.get_test_instance()
- ctxt = test_utils.get_test_admin_context()
- db.instance_get(ctxt, instance_ref['id'])
-
- def _test_get_test_network_info(self):
- """Does the return value match a real network_info structure."""
- # The challenge here is to define what exactly such a structure
- # must look like.
- pass
-
- def test_ipv6_supported(self):
- self.assertIn(test_utils.is_ipv6_supported(), (False, True))
-
- def fake_open(path):
- raise IOError
-
- def fake_socket_fail(x, y):
- e = socket.error()
- e.errno = errno.EAFNOSUPPORT
- raise e
-
- def fake_socket_ok(x, y):
- return tempfile.TemporaryFile()
-
- with fixtures.MonkeyPatch('socket.socket', fake_socket_fail):
- self.assertFalse(test_utils.is_ipv6_supported())
-
- with fixtures.MonkeyPatch('socket.socket', fake_socket_ok):
- with fixtures.MonkeyPatch('sys.platform', 'windows'):
- self.assertTrue(test_utils.is_ipv6_supported())
-
- with fixtures.MonkeyPatch('sys.platform', 'linux2'):
- with fixtures.MonkeyPatch('__builtin__.open', fake_open):
- self.assertFalse(test_utils.is_ipv6_supported())
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
deleted file mode 100644
index 548e973b6a..0000000000
--- a/nova/tests/test_utils.py
+++ /dev/null
@@ -1,981 +0,0 @@
-# Copyright 2011 Justin Santa Barbara
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import __builtin__
-import datetime
-import functools
-import hashlib
-import importlib
-import os
-import os.path
-import StringIO
-import tempfile
-
-import mox
-import netaddr
-from oslo.concurrency import processutils
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-import nova
-from nova import exception
-from nova import test
-from nova import utils
-
-CONF = cfg.CONF
-
-
-class GetMyIP4AddressTestCase(test.NoDBTestCase):
- def test_get_my_ipv4_address_with_no_ipv4(self):
- response = """172.16.0.0/16 via 172.16.251.13 dev tun1
-172.16.251.1 via 172.16.251.13 dev tun1
-172.16.251.13 dev tun1 proto kernel scope link src 172.16.251.14
-172.24.0.0/16 via 172.16.251.13 dev tun1
-192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1"""
-
- def fake_execute(*args, **kwargs):
- return response, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- address = utils.get_my_ipv4_address()
- self.assertEqual(address, '127.0.0.1')
-
- def test_get_my_ipv4_address_bad_process(self):
- def fake_execute(*args, **kwargs):
- raise processutils.ProcessExecutionError()
-
- self.stubs.Set(utils, 'execute', fake_execute)
- address = utils.get_my_ipv4_address()
- self.assertEqual(address, '127.0.0.1')
-
- def test_get_my_ipv4_address_with_single_interface(self):
- response_route = """default via 192.168.1.1 dev wlan0 proto static
-192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.137 metric 9
-"""
- response_addr = """
-1: lo inet 127.0.0.1/8 scope host lo
-3: wlan0 inet 192.168.1.137/24 brd 192.168.1.255 scope global wlan0
-"""
-
- def fake_execute(*args, **kwargs):
- if 'route' in args:
- return response_route, None
- return response_addr, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- address = utils.get_my_ipv4_address()
- self.assertEqual(address, '192.168.1.137')
-
- def test_get_my_ipv4_address_with_multi_ipv4_on_single_interface(self):
- response_route = """
-172.18.56.0/24 dev customer proto kernel scope link src 172.18.56.22
-169.254.0.0/16 dev customer scope link metric 1031
-default via 172.18.56.1 dev customer
-"""
- response_addr = (""
-"31: customer inet 172.18.56.22/24 brd 172.18.56.255 scope global"
-" customer\n"
-"31: customer inet 172.18.56.32/24 brd 172.18.56.255 scope global "
-"secondary customer")
-
- def fake_execute(*args, **kwargs):
- if 'route' in args:
- return response_route, None
- return response_addr, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- address = utils.get_my_ipv4_address()
- self.assertEqual(address, '172.18.56.22')
-
- def test_get_my_ipv4_address_with_multiple_interfaces(self):
- response_route = """
-169.1.9.0/24 dev eth1 proto kernel scope link src 169.1.9.10
-172.17.248.0/21 dev eth0 proto kernel scope link src 172.17.255.9
-169.254.0.0/16 dev eth0 scope link metric 1002
-169.254.0.0/16 dev eth1 scope link metric 1003
-default via 172.17.248.1 dev eth0 proto static
-"""
- response_addr = """
-1: lo inet 127.0.0.1/8 scope host lo
-2: eth0 inet 172.17.255.9/21 brd 172.17.255.255 scope global eth0
-3: eth1 inet 169.1.9.10/24 scope global eth1
-"""
-
- def fake_execute(*args, **kwargs):
- if 'route' in args:
- return response_route, None
- return response_addr, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- address = utils.get_my_ipv4_address()
- self.assertEqual(address, '172.17.255.9')
-
-
-class GenericUtilsTestCase(test.NoDBTestCase):
- def test_parse_server_string(self):
- result = utils.parse_server_string('::1')
- self.assertEqual(('::1', ''), result)
- result = utils.parse_server_string('[::1]:8773')
- self.assertEqual(('::1', '8773'), result)
- result = utils.parse_server_string('2001:db8::192.168.1.1')
- self.assertEqual(('2001:db8::192.168.1.1', ''), result)
- result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
- self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
- result = utils.parse_server_string('192.168.1.1')
- self.assertEqual(('192.168.1.1', ''), result)
- result = utils.parse_server_string('192.168.1.2:8773')
- self.assertEqual(('192.168.1.2', '8773'), result)
- result = utils.parse_server_string('192.168.1.3')
- self.assertEqual(('192.168.1.3', ''), result)
- result = utils.parse_server_string('www.example.com:8443')
- self.assertEqual(('www.example.com', '8443'), result)
- result = utils.parse_server_string('www.example.com')
- self.assertEqual(('www.example.com', ''), result)
- # error case
- result = utils.parse_server_string('www.exa:mple.com:8443')
- self.assertEqual(('', ''), result)
-
- def test_hostname_unicode_sanitization(self):
- hostname = u"\u7684.test.example.com"
- self.assertEqual("test.example.com",
- utils.sanitize_hostname(hostname))
-
- def test_hostname_sanitize_periods(self):
- hostname = "....test.example.com..."
- self.assertEqual("test.example.com",
- utils.sanitize_hostname(hostname))
-
- def test_hostname_sanitize_dashes(self):
- hostname = "----test.example.com---"
- self.assertEqual("test.example.com",
- utils.sanitize_hostname(hostname))
-
- def test_hostname_sanitize_characters(self):
- hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
- self.assertEqual("91----test-host.example.com-0",
- utils.sanitize_hostname(hostname))
-
- def test_hostname_translate(self):
- hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
- self.assertEqual("hello", utils.sanitize_hostname(hostname))
-
- def test_read_cached_file(self):
- self.mox.StubOutWithMock(os.path, "getmtime")
- os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
- self.mox.ReplayAll()
-
- cache_data = {"data": 1123, "mtime": 1}
- data = utils.read_cached_file("/this/is/a/fake", cache_data)
- self.assertEqual(cache_data["data"], data)
-
- def test_read_modified_cached_file(self):
- self.mox.StubOutWithMock(os.path, "getmtime")
- self.mox.StubOutWithMock(__builtin__, 'open')
- os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
-
- fake_contents = "lorem ipsum"
- fake_file = self.mox.CreateMockAnything()
- fake_file.read().AndReturn(fake_contents)
- fake_context_manager = self.mox.CreateMockAnything()
- fake_context_manager.__enter__().AndReturn(fake_file)
- fake_context_manager.__exit__(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
-
- __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
-
- self.mox.ReplayAll()
- cache_data = {"data": 1123, "mtime": 1}
- self.reload_called = False
-
- def test_reload(reloaded_data):
- self.assertEqual(reloaded_data, fake_contents)
- self.reload_called = True
-
- data = utils.read_cached_file("/this/is/a/fake", cache_data,
- reload_func=test_reload)
- self.assertEqual(data, fake_contents)
- self.assertTrue(self.reload_called)
-
- def test_generate_password(self):
- password = utils.generate_password()
- self.assertTrue([c for c in password if c in '0123456789'])
- self.assertTrue([c for c in password
- if c in 'abcdefghijklmnopqrstuvwxyz'])
- self.assertTrue([c for c in password
- if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
-
- def test_read_file_as_root(self):
- def fake_execute(*args, **kwargs):
- if args[1] == 'bad':
- raise processutils.ProcessExecutionError()
- return 'fakecontents', None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- contents = utils.read_file_as_root('good')
- self.assertEqual(contents, 'fakecontents')
- self.assertRaises(exception.FileNotFound,
- utils.read_file_as_root, 'bad')
-
- def test_temporary_chown(self):
- def fake_execute(*args, **kwargs):
- if args[0] == 'chown':
- fake_execute.uid = args[1]
- self.stubs.Set(utils, 'execute', fake_execute)
-
- with tempfile.NamedTemporaryFile() as f:
- with utils.temporary_chown(f.name, owner_uid=2):
- self.assertEqual(fake_execute.uid, 2)
- self.assertEqual(fake_execute.uid, os.getuid())
-
- def test_xhtml_escape(self):
- self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
- self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
- self.assertEqual('&amp;', utils.xhtml_escape('&'))
- self.assertEqual('&gt;', utils.xhtml_escape('>'))
- self.assertEqual('&lt;', utils.xhtml_escape('<'))
- self.assertEqual('&lt;foo&gt;', utils.xhtml_escape('<foo>'))
-
- def test_is_valid_ipv4(self):
- self.assertTrue(utils.is_valid_ipv4('127.0.0.1'))
- self.assertFalse(utils.is_valid_ipv4('::1'))
- self.assertFalse(utils.is_valid_ipv4('bacon'))
- self.assertFalse(utils.is_valid_ipv4(""))
- self.assertFalse(utils.is_valid_ipv4(10))
-
- def test_is_valid_ipv6(self):
- self.assertTrue(utils.is_valid_ipv6("::1"))
- self.assertTrue(utils.is_valid_ipv6(
- "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
- self.assertTrue(utils.is_valid_ipv6(
- "0000:0000:0000:0000:0000:0000:0000:0001"))
- self.assertFalse(utils.is_valid_ipv6("foo"))
- self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
- self.assertFalse(utils.is_valid_ipv6(""))
- self.assertFalse(utils.is_valid_ipv6(10))
-
- def test_is_valid_ipv6_cidr(self):
- self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
- self.assertTrue(utils.is_valid_ipv6_cidr(
- "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
- self.assertTrue(utils.is_valid_ipv6_cidr(
- "0000:0000:0000:0000:0000:0000:0000:0001/32"))
- self.assertTrue(utils.is_valid_ipv6_cidr(
- "0000:0000:0000:0000:0000:0000:0000:0001"))
- self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
- self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
-
- def test_get_shortened_ipv6(self):
- self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
- utils.get_shortened_ipv6(
- "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
- self.assertEqual("::1", utils.get_shortened_ipv6(
- "0000:0000:0000:0000:0000:0000:0000:0001"))
- self.assertEqual("caca::caca:0:babe:201:102",
- utils.get_shortened_ipv6(
- "caca:0000:0000:caca:0000:babe:0201:0102"))
- self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
- "127.0.0.1")
- self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
- "failure")
-
- def test_get_shortened_ipv6_cidr(self):
- self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
- "2600:0000:0000:0000:0000:0000:0000:0000/64"))
- self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
- "2600::1/64"))
- self.assertRaises(netaddr.AddrFormatError,
- utils.get_shortened_ipv6_cidr,
- "127.0.0.1")
- self.assertRaises(netaddr.AddrFormatError,
- utils.get_shortened_ipv6_cidr,
- "failure")
-
- def test_get_hash_str(self):
- base_str = "foo"
- value = hashlib.md5(base_str).hexdigest()
- self.assertEqual(
- value, utils.get_hash_str(base_str))
-
-
-class MonkeyPatchTestCase(test.NoDBTestCase):
- """Unit test for utils.monkey_patch()."""
- def setUp(self):
- super(MonkeyPatchTestCase, self).setUp()
- self.example_package = 'nova.tests.monkey_patch_example.'
- self.flags(
- monkey_patch=True,
- monkey_patch_modules=[self.example_package + 'example_a' + ':'
- + self.example_package + 'example_decorator'])
-
- def test_monkey_patch(self):
- utils.monkey_patch()
- nova.tests.monkey_patch_example.CALLED_FUNCTION = []
- from nova.tests.monkey_patch_example import example_a
- from nova.tests.monkey_patch_example import example_b
-
- self.assertEqual('Example function', example_a.example_function_a())
- exampleA = example_a.ExampleClassA()
- exampleA.example_method()
- ret_a = exampleA.example_method_add(3, 5)
- self.assertEqual(ret_a, 8)
-
- self.assertEqual('Example function', example_b.example_function_b())
- exampleB = example_b.ExampleClassB()
- exampleB.example_method()
- ret_b = exampleB.example_method_add(3, 5)
-
- self.assertEqual(ret_b, 8)
- package_a = self.example_package + 'example_a.'
- self.assertIn(package_a + 'example_function_a',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
-
- self.assertIn(package_a + 'ExampleClassA.example_method',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
- self.assertIn(package_a + 'ExampleClassA.example_method_add',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
- package_b = self.example_package + 'example_b.'
- self.assertNotIn(package_b + 'example_function_b',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
- self.assertNotIn(package_b + 'ExampleClassB.example_method',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
- self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
- nova.tests.monkey_patch_example.CALLED_FUNCTION)
-
-
-class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
- """Unit test for default monkey_patch_modules value."""
-
- def setUp(self):
- super(MonkeyPatchDefaultTestCase, self).setUp()
- self.flags(
- monkey_patch=True)
-
- def test_monkey_patch_default_mod(self):
- # monkey_patch_modules is defined to be
- # <module_to_patch>:<decorator_to_patch_with>
- # Here we check that both parts of the default values are
- # valid
- for module in CONF.monkey_patch_modules:
- m = module.split(':', 1)
- # Check we can import the module to be patched
- importlib.import_module(m[0])
- # check the decorator is valid
- decorator_name = m[1].rsplit('.', 1)
- decorator_module = importlib.import_module(decorator_name[0])
- getattr(decorator_module, decorator_name[1])
-
-
-class AuditPeriodTest(test.NoDBTestCase):
-
- def setUp(self):
- super(AuditPeriodTest, self).setUp()
- # a fairly random time to test with
- self.test_time = datetime.datetime(second=23,
- minute=12,
- hour=8,
- day=5,
- month=3,
- year=2012)
- timeutils.set_time_override(override_time=self.test_time)
-
- def tearDown(self):
- timeutils.clear_time_override()
- super(AuditPeriodTest, self).tearDown()
-
- def test_hour(self):
- begin, end = utils.last_completed_audit_period(unit='hour')
- self.assertEqual(begin, datetime.datetime(
- hour=7,
- day=5,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- hour=8,
- day=5,
- month=3,
- year=2012))
-
- def test_hour_with_offset_before_current(self):
- begin, end = utils.last_completed_audit_period(unit='hour@10')
- self.assertEqual(begin, datetime.datetime(
- minute=10,
- hour=7,
- day=5,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- minute=10,
- hour=8,
- day=5,
- month=3,
- year=2012))
-
- def test_hour_with_offset_after_current(self):
- begin, end = utils.last_completed_audit_period(unit='hour@30')
- self.assertEqual(begin, datetime.datetime(
- minute=30,
- hour=6,
- day=5,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- minute=30,
- hour=7,
- day=5,
- month=3,
- year=2012))
-
- def test_day(self):
- begin, end = utils.last_completed_audit_period(unit='day')
- self.assertEqual(begin, datetime.datetime(
- day=4,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- day=5,
- month=3,
- year=2012))
-
- def test_day_with_offset_before_current(self):
- begin, end = utils.last_completed_audit_period(unit='day@6')
- self.assertEqual(begin, datetime.datetime(
- hour=6,
- day=4,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- hour=6,
- day=5,
- month=3,
- year=2012))
-
- def test_day_with_offset_after_current(self):
- begin, end = utils.last_completed_audit_period(unit='day@10')
- self.assertEqual(begin, datetime.datetime(
- hour=10,
- day=3,
- month=3,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- hour=10,
- day=4,
- month=3,
- year=2012))
-
- def test_month(self):
- begin, end = utils.last_completed_audit_period(unit='month')
- self.assertEqual(begin, datetime.datetime(
- day=1,
- month=2,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- day=1,
- month=3,
- year=2012))
-
- def test_month_with_offset_before_current(self):
- begin, end = utils.last_completed_audit_period(unit='month@2')
- self.assertEqual(begin, datetime.datetime(
- day=2,
- month=2,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- day=2,
- month=3,
- year=2012))
-
- def test_month_with_offset_after_current(self):
- begin, end = utils.last_completed_audit_period(unit='month@15')
- self.assertEqual(begin, datetime.datetime(
- day=15,
- month=1,
- year=2012))
- self.assertEqual(end, datetime.datetime(
- day=15,
- month=2,
- year=2012))
-
- def test_year(self):
- begin, end = utils.last_completed_audit_period(unit='year')
- self.assertEqual(begin, datetime.datetime(
- day=1,
- month=1,
- year=2011))
- self.assertEqual(end, datetime.datetime(
- day=1,
- month=1,
- year=2012))
-
- def test_year_with_offset_before_current(self):
- begin, end = utils.last_completed_audit_period(unit='year@2')
- self.assertEqual(begin, datetime.datetime(
- day=1,
- month=2,
- year=2011))
- self.assertEqual(end, datetime.datetime(
- day=1,
- month=2,
- year=2012))
-
- def test_year_with_offset_after_current(self):
- begin, end = utils.last_completed_audit_period(unit='year@6')
- self.assertEqual(begin, datetime.datetime(
- day=1,
- month=6,
- year=2010))
- self.assertEqual(end, datetime.datetime(
- day=1,
- month=6,
- year=2011))
-
-
-class MkfsTestCase(test.NoDBTestCase):
-
- def test_mkfs(self):
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
- run_as_root=False)
- utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
- run_as_root=False)
- utils.execute('mkswap', '/my/swap/block/dev',
- run_as_root=False)
- self.mox.ReplayAll()
-
- utils.mkfs('ext4', '/my/block/dev')
- utils.mkfs('msdos', '/my/msdos/block/dev')
- utils.mkfs('swap', '/my/swap/block/dev')
-
- def test_mkfs_with_label(self):
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs', '-t', 'ext4', '-F',
- '-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
- utils.execute('mkfs', '-t', 'msdos',
- '-n', 'msdos-vol', '/my/msdos/block/dev',
- run_as_root=False)
- utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev',
- run_as_root=False)
- self.mox.ReplayAll()
-
- utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
- utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
- utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
-
-
-class LastBytesTestCase(test.NoDBTestCase):
- """Test the last_bytes() utility method."""
-
- def setUp(self):
- super(LastBytesTestCase, self).setUp()
- self.f = StringIO.StringIO('1234567890')
-
- def test_truncated(self):
- self.f.seek(0, os.SEEK_SET)
- out, remaining = utils.last_bytes(self.f, 5)
- self.assertEqual(out, '67890')
- self.assertTrue(remaining > 0)
-
- def test_read_all(self):
- self.f.seek(0, os.SEEK_SET)
- out, remaining = utils.last_bytes(self.f, 1000)
- self.assertEqual(out, '1234567890')
- self.assertFalse(remaining > 0)
-
- def test_seek_too_far_real_file(self):
- # StringIO doesn't raise IOError if you see past the start of the file.
- flo = tempfile.TemporaryFile()
- content = '1234567890'
- flo.write(content)
- self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
-
-
-class IntLikeTestCase(test.NoDBTestCase):
-
- def test_is_int_like(self):
- self.assertTrue(utils.is_int_like(1))
- self.assertTrue(utils.is_int_like("1"))
- self.assertTrue(utils.is_int_like("514"))
- self.assertTrue(utils.is_int_like("0"))
-
- self.assertFalse(utils.is_int_like(1.1))
- self.assertFalse(utils.is_int_like("1.1"))
- self.assertFalse(utils.is_int_like("1.1.1"))
- self.assertFalse(utils.is_int_like(None))
- self.assertFalse(utils.is_int_like("0."))
- self.assertFalse(utils.is_int_like("aaaaaa"))
- self.assertFalse(utils.is_int_like("...."))
- self.assertFalse(utils.is_int_like("1g"))
- self.assertFalse(
- utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
- self.assertFalse(utils.is_int_like("a1"))
-
-
-class MetadataToDictTestCase(test.NoDBTestCase):
- def test_metadata_to_dict(self):
- self.assertEqual(utils.metadata_to_dict(
- [{'key': 'foo1', 'value': 'bar'},
- {'key': 'foo2', 'value': 'baz'}]),
- {'foo1': 'bar', 'foo2': 'baz'})
-
- def test_metadata_to_dict_empty(self):
- self.assertEqual(utils.metadata_to_dict([]), {})
-
- def test_dict_to_metadata(self):
- expected = [{'key': 'foo1', 'value': 'bar1'},
- {'key': 'foo2', 'value': 'bar2'}]
- self.assertEqual(utils.dict_to_metadata(dict(foo1='bar1',
- foo2='bar2')),
- expected)
-
- def test_dict_to_metadata_empty(self):
- self.assertEqual(utils.dict_to_metadata({}), [])
-
-
-class WrappedCodeTestCase(test.NoDBTestCase):
- """Test the get_wrapped_function utility method."""
-
- def _wrapper(self, function):
- @functools.wraps(function)
- def decorated_function(self, *args, **kwargs):
- function(self, *args, **kwargs)
- return decorated_function
-
- def test_single_wrapped(self):
- @self._wrapper
- def wrapped(self, instance, red=None, blue=None):
- pass
-
- func = utils.get_wrapped_function(wrapped)
- func_code = func.func_code
- self.assertEqual(4, len(func_code.co_varnames))
- self.assertIn('self', func_code.co_varnames)
- self.assertIn('instance', func_code.co_varnames)
- self.assertIn('red', func_code.co_varnames)
- self.assertIn('blue', func_code.co_varnames)
-
- def test_double_wrapped(self):
- @self._wrapper
- @self._wrapper
- def wrapped(self, instance, red=None, blue=None):
- pass
-
- func = utils.get_wrapped_function(wrapped)
- func_code = func.func_code
- self.assertEqual(4, len(func_code.co_varnames))
- self.assertIn('self', func_code.co_varnames)
- self.assertIn('instance', func_code.co_varnames)
- self.assertIn('red', func_code.co_varnames)
- self.assertIn('blue', func_code.co_varnames)
-
- def test_triple_wrapped(self):
- @self._wrapper
- @self._wrapper
- @self._wrapper
- def wrapped(self, instance, red=None, blue=None):
- pass
-
- func = utils.get_wrapped_function(wrapped)
- func_code = func.func_code
- self.assertEqual(4, len(func_code.co_varnames))
- self.assertIn('self', func_code.co_varnames)
- self.assertIn('instance', func_code.co_varnames)
- self.assertIn('red', func_code.co_varnames)
- self.assertIn('blue', func_code.co_varnames)
-
-
-class ExpectedArgsTestCase(test.NoDBTestCase):
- def test_passes(self):
- @utils.expects_func_args('foo', 'baz')
- def dec(f):
- return f
-
- @dec
- def func(foo, bar, baz="lol"):
- pass
-
- def test_raises(self):
- @utils.expects_func_args('foo', 'baz')
- def dec(f):
- return f
-
- def func(bar, baz):
- pass
-
- self.assertRaises(TypeError, dec, func)
-
- def test_var_no_of_args(self):
- @utils.expects_func_args('foo')
- def dec(f):
- return f
-
- @dec
- def func(bar, *args, **kwargs):
- pass
-
- def test_more_layers(self):
- @utils.expects_func_args('foo', 'baz')
- def dec(f):
- return f
-
- def dec_2(f):
- def inner_f(*a, **k):
- return f()
- return inner_f
-
- @dec_2
- def func(bar, baz):
- pass
-
- self.assertRaises(TypeError, dec, func)
-
-
-class StringLengthTestCase(test.NoDBTestCase):
- def test_check_string_length(self):
- self.assertIsNone(utils.check_string_length(
- 'test', 'name', max_length=255))
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- 11, 'name', max_length=255)
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- '', 'name', min_length=1)
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- 'a' * 256, 'name', max_length=255)
-
- def test_check_string_length_noname(self):
- self.assertIsNone(utils.check_string_length(
- 'test', max_length=255))
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- 11, max_length=255)
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- '', min_length=1)
- self.assertRaises(exception.InvalidInput,
- utils.check_string_length,
- 'a' * 256, max_length=255)
-
-
-class ValidateIntegerTestCase(test.NoDBTestCase):
- def test_valid_inputs(self):
- self.assertEqual(
- utils.validate_integer(42, "answer"), 42)
- self.assertEqual(
- utils.validate_integer("42", "answer"), 42)
- self.assertEqual(
- utils.validate_integer(
- "7", "lucky", min_value=7, max_value=8), 7)
- self.assertEqual(
- utils.validate_integer(
- 7, "lucky", min_value=6, max_value=7), 7)
- self.assertEqual(
- utils.validate_integer(
- 300, "Spartaaa!!!", min_value=300), 300)
- self.assertEqual(
- utils.validate_integer(
- "300", "Spartaaa!!!", max_value=300), 300)
-
- def test_invalid_inputs(self):
- self.assertRaises(exception.InvalidInput,
- utils.validate_integer,
- "im-not-an-int", "not-an-int")
- self.assertRaises(exception.InvalidInput,
- utils.validate_integer,
- 3.14, "Pie")
- self.assertRaises(exception.InvalidInput,
- utils.validate_integer,
- "299", "Sparta no-show",
- min_value=300, max_value=300)
- self.assertRaises(exception.InvalidInput,
- utils.validate_integer,
- 55, "doing 55 in a 54",
- max_value=54)
- self.assertRaises(exception.InvalidInput,
- utils.validate_integer,
- unichr(129), "UnicodeError",
- max_value=1000)
-
-
-class ValidateNeutronConfiguration(test.NoDBTestCase):
- def test_nova_network(self):
- self.assertFalse(utils.is_neutron())
-
- def test_neutron(self):
- self.flags(network_api_class='nova.network.neutronv2.api.API')
- self.assertTrue(utils.is_neutron())
-
- def test_quantum(self):
- self.flags(network_api_class='nova.network.quantumv2.api.API')
- self.assertTrue(utils.is_neutron())
-
-
-class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
- def test_is_auto_disk_config_disabled(self):
- self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
-
- def test_is_auto_disk_config_disabled_none(self):
- self.assertFalse(utils.is_auto_disk_config_disabled(None))
-
- def test_is_auto_disk_config_disabled_false(self):
- self.assertFalse(utils.is_auto_disk_config_disabled("false"))
-
-
-class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
- def get_image(self):
- image_meta = {
- "id": "fake-image",
- "name": "fake-name",
- "min_ram": 1,
- "min_disk": 1,
- "disk_format": "raw",
- "container_format": "bare",
- }
-
- return image_meta
-
- def get_flavor(self):
- flavor = {
- "id": "fake.flavor",
- "root_gb": 10,
- }
-
- return flavor
-
- def test_base_image_properties(self):
- image = self.get_image()
-
- # Verify that we inherit all the needed keys
- sys_meta = utils.get_system_metadata_from_image(image)
- for key in utils.SM_INHERITABLE_KEYS:
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- self.assertEqual(image[key], sys_meta.get(sys_key))
-
- # Verify that everything else is ignored
- self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
-
- def test_inherit_image_properties(self):
- image = self.get_image()
- image["properties"] = {"foo1": "bar", "foo2": "baz"}
-
- sys_meta = utils.get_system_metadata_from_image(image)
-
- # Verify that we inherit all the image properties
- for key, expected in image["properties"].iteritems():
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- self.assertEqual(sys_meta[sys_key], expected)
-
- def test_vhd_min_disk_image(self):
- image = self.get_image()
- flavor = self.get_flavor()
-
- image["disk_format"] = "vhd"
-
- sys_meta = utils.get_system_metadata_from_image(image, flavor)
-
- # Verify that the min_disk property is taken from
- # flavor's root_gb when using vhd disk format
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
- self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
-
- def test_dont_inherit_empty_values(self):
- image = self.get_image()
-
- for key in utils.SM_INHERITABLE_KEYS:
- image[key] = None
-
- sys_meta = utils.get_system_metadata_from_image(image)
-
- # Verify that the empty properties have not been inherited
- for key in utils.SM_INHERITABLE_KEYS:
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- self.assertNotIn(sys_key, sys_meta)
-
-
-class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
- def get_system_metadata(self):
- sys_meta = {
- "image_min_ram": 1,
- "image_min_disk": 1,
- "image_disk_format": "raw",
- "image_container_format": "bare",
- }
-
- return sys_meta
-
- def test_image_from_system_metadata(self):
- sys_meta = self.get_system_metadata()
- sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
- sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
-
- image = utils.get_image_from_system_metadata(sys_meta)
-
- # Verify that we inherit all the needed keys
- for key in utils.SM_INHERITABLE_KEYS:
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- self.assertEqual(image[key], sys_meta.get(sys_key))
-
- # Verify that we inherit the rest of metadata as properties
- self.assertIn("properties", image)
-
- for key, value in image["properties"].iteritems():
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- self.assertEqual(image["properties"][key], sys_meta[sys_key])
-
- def test_dont_inherit_empty_values(self):
- sys_meta = self.get_system_metadata()
-
- for key in utils.SM_INHERITABLE_KEYS:
- sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
- sys_meta[sys_key] = None
-
- image = utils.get_image_from_system_metadata(sys_meta)
-
- # Verify that the empty properties have not been inherited
- for key in utils.SM_INHERITABLE_KEYS:
- self.assertNotIn(key, image)
-
- def test_non_inheritable_image_properties(self):
- sys_meta = self.get_system_metadata()
- sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
-
- self.flags(non_inheritable_image_properties=["foo1"])
-
- image = utils.get_image_from_system_metadata(sys_meta)
-
- # Verify that the foo1 key has not been inherited
- self.assertNotIn("foo1", image)
-
-
-class VersionTestCase(test.NoDBTestCase):
- def test_convert_version_to_int(self):
- self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
- self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
- self.assertEqual(utils.convert_version_to_int((5, )), 5)
- self.assertRaises(exception.NovaException,
- utils.convert_version_to_int, '5a.6b')
-
- def test_convert_version_to_string(self):
- self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
- self.assertEqual(utils.convert_version_to_str(4), '4')
-
- def test_convert_version_to_tuple(self):
- self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
-
-
-class ConstantTimeCompareTestCase(test.NoDBTestCase):
- def test_constant_time_compare(self):
- self.assertTrue(utils.constant_time_compare("abcd1234", "abcd1234"))
- self.assertFalse(utils.constant_time_compare("abcd1234", "a"))
- self.assertFalse(utils.constant_time_compare("abcd1234", "ABCD234"))
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
deleted file mode 100644
index 8efee518f7..0000000000
--- a/nova/tests/test_wsgi.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# Copyright 2011 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Unit tests for `nova.wsgi`."""
-
-import os.path
-import tempfile
-import urllib2
-
-import eventlet
-import eventlet.wsgi
-import mock
-from oslo.config import cfg
-import requests
-import testtools
-import webob
-
-import nova.exception
-from nova import test
-from nova.tests import utils
-import nova.wsgi
-
-SSL_CERT_DIR = os.path.normpath(os.path.join(
- os.path.dirname(os.path.abspath(__file__)),
- 'ssl_cert'))
-CONF = cfg.CONF
-
-
-class TestLoaderNothingExists(test.NoDBTestCase):
- """Loader tests where os.path.exists always returns False."""
-
- def setUp(self):
- super(TestLoaderNothingExists, self).setUp()
- self.stubs.Set(os.path, 'exists', lambda _: False)
-
- def test_relpath_config_not_found(self):
- self.flags(api_paste_config='api-paste.ini')
- self.assertRaises(
- nova.exception.ConfigNotFound,
- nova.wsgi.Loader,
- )
-
- def test_asbpath_config_not_found(self):
- self.flags(api_paste_config='/etc/nova/api-paste.ini')
- self.assertRaises(
- nova.exception.ConfigNotFound,
- nova.wsgi.Loader,
- )
-
-
-class TestLoaderNormalFilesystem(test.NoDBTestCase):
- """Loader tests with normal filesystem (unmodified os.path module)."""
-
- _paste_config = """
-[app:test_app]
-use = egg:Paste#static
-document_root = /tmp
- """
-
- def setUp(self):
- super(TestLoaderNormalFilesystem, self).setUp()
- self.config = tempfile.NamedTemporaryFile(mode="w+t")
- self.config.write(self._paste_config.lstrip())
- self.config.seek(0)
- self.config.flush()
- self.loader = nova.wsgi.Loader(self.config.name)
-
- def test_config_found(self):
- self.assertEqual(self.config.name, self.loader.config_path)
-
- def test_app_not_found(self):
- self.assertRaises(
- nova.exception.PasteAppNotFound,
- self.loader.load_app,
- "nonexistent app",
- )
-
- def test_app_found(self):
- url_parser = self.loader.load_app("test_app")
- self.assertEqual("/tmp", url_parser.directory)
-
- def tearDown(self):
- self.config.close()
- super(TestLoaderNormalFilesystem, self).tearDown()
-
-
-class TestWSGIServer(test.NoDBTestCase):
- """WSGI server tests."""
-
- def test_no_app(self):
- server = nova.wsgi.Server("test_app", None)
- self.assertEqual("test_app", server.name)
-
- def test_custom_max_header_line(self):
- self.flags(max_header_line=4096) # Default value is 16384.
- nova.wsgi.Server("test_custom_max_header_line", None)
- self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE)
-
- def test_start_random_port(self):
- server = nova.wsgi.Server("test_random_port", None,
- host="127.0.0.1", port=0)
- server.start()
- self.assertNotEqual(0, server.port)
- server.stop()
- server.wait()
-
- @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
- def test_start_random_port_with_ipv6(self):
- server = nova.wsgi.Server("test_random_port", None,
- host="::1", port=0)
- server.start()
- self.assertEqual("::1", server.host)
- self.assertNotEqual(0, server.port)
- server.stop()
- server.wait()
-
- def test_server_pool_waitall(self):
- # test pools waitall method gets called while stopping server
- server = nova.wsgi.Server("test_server", None,
- host="127.0.0.1", port=4444)
- server.start()
- with mock.patch.object(server._pool,
- 'waitall') as mock_waitall:
- server.stop()
- server.wait()
- mock_waitall.assert_called_once_with()
-
- def test_uri_length_limit(self):
- server = nova.wsgi.Server("test_uri_length_limit", None,
- host="127.0.0.1", max_url_len=16384)
- server.start()
-
- uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
- resp = requests.get(uri, proxies={"http": ""})
- eventlet.sleep(0)
- self.assertNotEqual(resp.status_code,
- requests.codes.REQUEST_URI_TOO_LARGE)
-
- uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
- resp = requests.get(uri, proxies={"http": ""})
- eventlet.sleep(0)
- self.assertEqual(resp.status_code,
- requests.codes.REQUEST_URI_TOO_LARGE)
- server.stop()
- server.wait()
-
- def test_reset_pool_size_to_default(self):
- server = nova.wsgi.Server("test_resize", None,
- host="127.0.0.1", max_url_len=16384)
- server.start()
-
- # Stopping the server, which in turn sets pool size to 0
- server.stop()
- self.assertEqual(server._pool.size, 0)
-
- # Resetting pool size to default
- server.reset()
- server.start()
- self.assertEqual(server._pool.size, CONF.wsgi_default_pool_size)
-
-
-class TestWSGIServerWithSSL(test.NoDBTestCase):
- """WSGI server with SSL tests."""
-
- def setUp(self):
- super(TestWSGIServerWithSSL, self).setUp()
- self.flags(enabled_ssl_apis=['fake_ssl'],
- ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
- ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
-
- def test_ssl_server(self):
-
- def test_app(env, start_response):
- start_response('200 OK', {})
- return ['PONG']
-
- fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
- host="127.0.0.1", port=0,
- use_ssl=True)
- fake_ssl_server.start()
- self.assertNotEqual(0, fake_ssl_server.port)
-
- cli = eventlet.connect(("localhost", fake_ssl_server.port))
- cli = eventlet.wrap_ssl(cli,
- ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
-
- cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.read(8192)
- self.assertEqual(response[-4:], "PONG")
-
- fake_ssl_server.stop()
- fake_ssl_server.wait()
-
- def test_two_servers(self):
-
- def test_app(env, start_response):
- start_response('200 OK', {})
- return ['PONG']
-
- fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
- host="127.0.0.1", port=0, use_ssl=True)
- fake_ssl_server.start()
- self.assertNotEqual(0, fake_ssl_server.port)
-
- fake_server = nova.wsgi.Server("fake", test_app,
- host="127.0.0.1", port=0)
- fake_server.start()
- self.assertNotEqual(0, fake_server.port)
-
- cli = eventlet.connect(("localhost", fake_ssl_server.port))
- cli = eventlet.wrap_ssl(cli,
- ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
-
- cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.read(8192)
- self.assertEqual(response[-4:], "PONG")
-
- cli = eventlet.connect(("localhost", fake_server.port))
-
- cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.recv(8192)
- self.assertEqual(response[-4:], "PONG")
-
- fake_ssl_server.stop()
- fake_ssl_server.wait()
-
- @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
- def test_app_using_ipv6_and_ssl(self):
- greetings = 'Hello, World!!!'
-
- @webob.dec.wsgify
- def hello_world(req):
- return greetings
-
- server = nova.wsgi.Server("fake_ssl",
- hello_world,
- host="::1",
- port=0,
- use_ssl=True)
-
- server.start()
-
- response = urllib2.urlopen('https://[::1]:%d/' % server.port)
- self.assertEqual(greetings, response.read())
-
- server.stop()
- server.wait()
diff --git a/nova/tests/CA/cacert.pem b/nova/tests/unit/CA/cacert.pem
index 9ffb5bb807..9ffb5bb807 100644
--- a/nova/tests/CA/cacert.pem
+++ b/nova/tests/unit/CA/cacert.pem
diff --git a/nova/tests/CA/private/cakey.pem b/nova/tests/unit/CA/private/cakey.pem
index eee54cc387..eee54cc387 100644
--- a/nova/tests/CA/private/cakey.pem
+++ b/nova/tests/unit/CA/private/cakey.pem
diff --git a/nova/tests/README.rst b/nova/tests/unit/README.rst
index 8ac999c740..8ac999c740 100644
--- a/nova/tests/README.rst
+++ b/nova/tests/unit/README.rst
diff --git a/nova/tests/unit/__init__.py b/nova/tests/unit/__init__.py
new file mode 100644
index 0000000000..31822c1516
--- /dev/null
+++ b/nova/tests/unit/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`nova.tests.unit` -- Nova Unittests
+=====================================================
+
+.. automodule:: nova.tests.unit
+ :platform: Unix
+"""
+
+# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
+import os
+import sys
+import traceback
+
+
+# NOTE(mikal): All of this is because if dnspython is present in your
+# environment then eventlet monkeypatches socket.getaddrinfo() with an
+# implementation which doesn't work for IPv6. What we're checking here is
+# that the magic environment variable was set when the import happened.
+# NOTE(dims): Prevent this code from kicking in under docs generation
+# as it leads to spurious errors/warning.
+stack = traceback.extract_stack()
+if ('eventlet' in sys.modules and
+ os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and
+ (len(stack) < 2 or 'sphinx' not in stack[-2][0])):
+ raise ImportError('eventlet imported before nova/cmd/__init__ '
+ '(env var set to %s)'
+ % os.environ.get('EVENTLET_NO_GREENDNS'))
+
+os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+import eventlet
+
+eventlet.monkey_patch(os=False)
diff --git a/nova/tests/api/__init__.py b/nova/tests/unit/api/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/unit/api/__init__.py
diff --git a/nova/tests/api/ec2/__init__.py b/nova/tests/unit/api/ec2/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/ec2/__init__.py
+++ b/nova/tests/unit/api/ec2/__init__.py
diff --git a/nova/tests/api/ec2/public_key/dummy.fingerprint b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
index 715bca27a2..715bca27a2 100644
--- a/nova/tests/api/ec2/public_key/dummy.fingerprint
+++ b/nova/tests/unit/api/ec2/public_key/dummy.fingerprint
diff --git a/nova/tests/api/ec2/public_key/dummy.pub b/nova/tests/unit/api/ec2/public_key/dummy.pub
index d4cf2bc0d8..d4cf2bc0d8 100644
--- a/nova/tests/api/ec2/public_key/dummy.pub
+++ b/nova/tests/unit/api/ec2/public_key/dummy.pub
diff --git a/nova/tests/unit/api/ec2/test_api.py b/nova/tests/unit/api/ec2/test_api.py
new file mode 100644
index 0000000000..cc4a2adb75
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_api.py
@@ -0,0 +1,635 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the API endpoint."""
+
+import random
+import re
+import StringIO
+
+import boto
+import boto.connection
+from boto.ec2 import regioninfo
+from boto import exception as boto_exc
+# newer versions of boto use their own wrapper on top of httplib.HTTPResponse
+if hasattr(boto.connection, 'HTTPResponse'):
+ httplib = boto.connection
+else:
+ import httplib
+import fixtures
+import webob
+
+from nova.api import auth
+from nova.api import ec2
+from nova.api.ec2 import ec2utils
+from nova import block_device
+from nova import context
+from nova import exception
+from nova.openstack.common import versionutils
+from nova import test
+from nova.tests.unit import matchers
+
+
+class FakeHttplibSocket(object):
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
+ def __init__(self, response_string):
+ self.response_string = response_string
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """A fake httplib.HTTPConnection for boto to use
+
+ requests made via this connection actually get translated and routed into
+ our WSGI app, we then wait for the response and turn it back into
+ the HTTPResponse that boto expects.
+ """
+ def __init__(self, app, host, is_secure=False):
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, data, headers):
+ req = webob.Request.blank(path)
+ req.method = method
+ req.body = data
+ req.headers = headers
+ req.headers['Accept'] = 'text/html'
+ req.host = self.host
+ # Call the WSGI app, get the HTTP response
+ resp = str(req.get_response(self.app))
+ # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
+ # guess that's a function the web server usually provides.
+ resp = "HTTP/1.0 %s" % resp
+ self.sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(self.sock)
+ # NOTE(vish): boto is accessing private variables for some reason
+ self._HTTPConnection__response = self.http_response
+ self.http_response.begin()
+
+ def getresponse(self):
+ return self.http_response
+
+ def getresponsebody(self):
+ return self.sock.response_string
+
+ def close(self):
+ """Required for compatibility with boto/tornado."""
+ pass
+
+
+class XmlConversionTestCase(test.NoDBTestCase):
+ """Unit test api xml conversion."""
+ def test_number_conversion(self):
+ conv = ec2utils._try_convert
+ self.assertIsNone(conv('None'))
+ self.assertEqual(conv('True'), True)
+ self.assertEqual(conv('TRUE'), True)
+ self.assertEqual(conv('true'), True)
+ self.assertEqual(conv('False'), False)
+ self.assertEqual(conv('FALSE'), False)
+ self.assertEqual(conv('false'), False)
+ self.assertEqual(conv('0'), 0)
+ self.assertEqual(conv('42'), 42)
+ self.assertEqual(conv('3.14'), 3.14)
+ self.assertEqual(conv('-57.12'), -57.12)
+ self.assertEqual(conv('0x57'), 0x57)
+ self.assertEqual(conv('-0x57'), -0x57)
+ self.assertEqual(conv('-'), '-')
+ self.assertEqual(conv('-0'), 0)
+ self.assertEqual(conv('0.0'), 0.0)
+ self.assertEqual(conv('1e-8'), 0.0)
+ self.assertEqual(conv('-1e-8'), 0.0)
+ self.assertEqual(conv('0xDD8G'), '0xDD8G')
+ self.assertEqual(conv('0XDD8G'), '0XDD8G')
+ self.assertEqual(conv('-stringy'), '-stringy')
+ self.assertEqual(conv('stringy'), 'stringy')
+ self.assertEqual(conv('add'), 'add')
+ self.assertEqual(conv('remove'), 'remove')
+ self.assertEqual(conv(''), '')
+
+
+class Ec2utilsTestCase(test.NoDBTestCase):
+ def test_ec2_id_to_id(self):
+ self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
+ self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
+ self.assertEqual(ec2utils.ec2_id_to_id('snap-0000001c'), 28)
+ self.assertEqual(ec2utils.ec2_id_to_id('vol-0000001b'), 27)
+
+ def test_bad_ec2_id(self):
+ self.assertRaises(exception.InvalidEc2Id,
+ ec2utils.ec2_id_to_id,
+ 'badone')
+
+ def test_id_to_ec2_id(self):
+ self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e')
+ self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d')
+ self.assertEqual(ec2utils.id_to_ec2_snap_id(28), 'snap-0000001c')
+ self.assertEqual(ec2utils.id_to_ec2_vol_id(27), 'vol-0000001b')
+
+ def test_dict_from_dotted_str(self):
+ in_str = [('BlockDeviceMapping.1.DeviceName', '/dev/sda1'),
+ ('BlockDeviceMapping.1.Ebs.SnapshotId', 'snap-0000001c'),
+ ('BlockDeviceMapping.1.Ebs.VolumeSize', '80'),
+ ('BlockDeviceMapping.1.Ebs.DeleteOnTermination', 'false'),
+ ('BlockDeviceMapping.2.DeviceName', '/dev/sdc'),
+ ('BlockDeviceMapping.2.VirtualName', 'ephemeral0')]
+ expected_dict = {
+ 'block_device_mapping': {
+ '1': {'device_name': '/dev/sda1',
+ 'ebs': {'snapshot_id': 'snap-0000001c',
+ 'volume_size': 80,
+ 'delete_on_termination': False}},
+ '2': {'device_name': '/dev/sdc',
+ 'virtual_name': 'ephemeral0'}}}
+ out_dict = ec2utils.dict_from_dotted_str(in_str)
+
+ self.assertThat(out_dict, matchers.DictMatches(expected_dict))
+
+ def test_properties_root_defice_name(self):
+ mappings = [{"device": "/dev/sda1", "virtual": "root"}]
+ properties0 = {'mappings': mappings}
+ properties1 = {'root_device_name': '/dev/sdb', 'mappings': mappings}
+
+ root_device_name = block_device.properties_root_device_name(
+ properties0)
+ self.assertEqual(root_device_name, '/dev/sda1')
+
+ root_device_name = block_device.properties_root_device_name(
+ properties1)
+ self.assertEqual(root_device_name, '/dev/sdb')
+
+ def test_regex_from_ec2_regex(self):
+ def _test_re(ec2_regex, expected, literal, match=True):
+ regex = ec2utils.regex_from_ec2_regex(ec2_regex)
+ self.assertEqual(regex, expected)
+ if match:
+ self.assertIsNotNone(re.match(regex, literal))
+ else:
+ self.assertIsNone(re.match(regex, literal))
+
+ # wildcards
+ _test_re('foo', '\Afoo\Z(?s)', 'foo')
+ _test_re('foo', '\Afoo\Z(?s)', 'baz', match=False)
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar')
+ _test_re('foo?bar', '\Afoo.bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'foo QUUX bar')
+
+ # backslashes and escaped wildcards
+ _test_re('foo\\', '\Afoo\\\\\Z(?s)', 'foo\\')
+ _test_re('foo*bar', '\Afoo.*bar\Z(?s)', 'zork QUUX bar', match=False)
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo?bar')
+ _test_re('foo\\?bar', '\Afoo[?]bar\Z(?s)', 'foo bar', match=False)
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo*bar')
+ _test_re('foo\\*bar', '\Afoo[*]bar\Z(?s)', 'foo bar', match=False)
+
+ # analog to the example given in the EC2 API docs
+ ec2_regex = '\*nova\?\\end'
+ expected = r'\A[*]nova[?]\\end\Z(?s)'
+ literal = r'*nova?\end'
+ _test_re(ec2_regex, expected, literal)
+
+ def test_mapping_prepend_dev(self):
+ mappings = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': 'sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': 'sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ expected_result = [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': '/dev/sdb1'},
+ {'virtual': 'swap',
+ 'device': '/dev/sdb2'},
+
+ {'virtual': 'ephemeral0',
+ 'device': '/dev/sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': '/dev/sdc1'}]
+ self.assertThat(block_device.mappings_prepend_dev(mappings),
+ matchers.DictListMatches(expected_result))
+
+
+class ApiEc2TestCase(test.TestCase):
+ """Unit test for the cloud controller on an EC2 API."""
+ def setUp(self):
+ super(ApiEc2TestCase, self).setUp()
+ self.host = '127.0.0.1'
+ # NOTE(vish): skipping the Authorizer
+ roles = ['sysadmin', 'netadmin']
+ ctxt = context.RequestContext('fake', 'fake', roles=roles)
+ self.app = auth.InjectContext(ctxt, ec2.FaultWrapper(
+ ec2.RequestLogging(ec2.Requestify(ec2.Authorizer(ec2.Executor()
+ ), 'nova.api.ec2.cloud.CloudController'))))
+ self.useFixture(fixtures.FakeLogger('boto'))
+
+ def expect_http(self, host=None, is_secure=False, api_version=None):
+ """Returns a new EC2 connection."""
+ self.ec2 = boto.connect_ec2(
+ aws_access_key_id='fake',
+ aws_secret_access_key='fake',
+ is_secure=False,
+ region=regioninfo.RegionInfo(None, 'test', self.host),
+ port=8773,
+ path='/services/Cloud')
+ if api_version:
+ self.ec2.APIVersion = api_version
+
+ self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
+ self.http = FakeHttplibConnection(
+ self.app, '%s:8773' % (self.host), False)
+ # pylint: disable=E1103
+ if versionutils.is_compatible('2.14', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or self.host, 8773,
+ is_secure).AndReturn(self.http)
+ elif versionutils.is_compatible('2', boto.Version, same_major=False):
+ self.ec2.new_http_connection(host or '%s:8773' % (self.host),
+ is_secure).AndReturn(self.http)
+ else:
+ self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
+ return self.http
+
+ def test_xmlns_version_matches_request_version(self):
+ self.expect_http(api_version='2010-10-30')
+ self.mox.ReplayAll()
+
+ # Any request should be fine
+ self.ec2.get_all_instances()
+ self.assertTrue(self.ec2.APIVersion in self.http.getresponsebody(),
+ 'The version in the xmlns of the response does '
+ 'not match the API version given in the request.')
+
+ def test_describe_instances(self):
+ """Test that, after creating a user and a project, the describe
+ instances call to the API works properly.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertEqual(self.ec2.get_all_instances(), [])
+
+ def test_terminate_invalid_instance(self):
+ # Attempt to terminate an invalid instance.
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.terminate_instances, "i-00000005")
+
+ def test_get_all_key_pairs(self):
+ """Test that, after creating a user and project and generating
+ a key pair, that the API call to list key pairs works properly.
+ """
+ keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair(keyname)
+ rv = self.ec2.get_all_key_pairs()
+ results = [k for k in rv if k.name == keyname]
+ self.assertEqual(len(results), 1)
+
+ def test_create_duplicate_key_pair(self):
+ """Test that, after successfully generating a keypair,
+ requesting a second keypair with the same name fails sanely.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.create_key_pair('test')
+
+ try:
+ self.ec2.create_key_pair('test')
+ except boto_exc.EC2ResponseError as e:
+ if e.code == 'InvalidKeyPair.Duplicate':
+ pass
+ else:
+ self.assertEqual('InvalidKeyPair.Duplicate', e.code)
+ else:
+ self.fail('Exception not raised.')
+
+ def test_get_all_security_groups(self):
+ # Test that we can retrieve security groups.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_create_delete_security_group(self):
+ # Test that we can create a security group.
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ self.ec2.create_security_group(security_group_name, 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+ self.assertEqual(len(rv), 2)
+ self.assertIn(security_group_name, [group.name for group in rv])
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ def test_group_name_valid_chars_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ EC2 API Spec states we should only accept alphanumeric characters,
+ spaces, dashes, and underscores. Amazon implementation
+ accepts more characters - so, [:print:] is ok.
+ """
+ bad_strict_ec2 = "aa \t\x01\x02\x7f"
+ bad_amazon_ec2 = "aa #^% -=99"
+ test_raise = [
+ (True, bad_amazon_ec2, "test desc"),
+ (True, "test name", bad_amazon_ec2),
+ (False, bad_strict_ec2, "test desc"),
+ ]
+ for t in test_raise:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ t[1],
+ t[2])
+ test_accept = [
+ (False, bad_amazon_ec2, "test desc"),
+ (False, "test name", bad_amazon_ec2),
+ ]
+ for t in test_accept:
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.flags(ec2_strict_validation=t[0])
+ self.ec2.create_security_group(t[1], t[2])
+ self.expect_http()
+ self.mox.ReplayAll()
+ self.ec2.delete_security_group(t[1])
+
+ def test_group_name_valid_length_security_group(self):
+ """Test that we sanely handle invalid security group names.
+
+ API Spec states that the length should not exceed 255 char.
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ # Test block group_name > 255 chars
+ security_group_name = "".join(random.choice("poiuytrewqasdfghjklmnbvc")
+ for x in range(random.randint(256, 266)))
+
+ self.assertRaises(boto_exc.EC2ResponseError,
+ self.ec2.create_security_group,
+ security_group_name,
+ 'test group')
+
+ def test_authorize_revoke_security_group_cidr(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '0.0.0.0/0')
+ group.authorize('icmp', -1, -1, '0.0.0.0/0')
+ group.authorize('udp', 80, 81, '0.0.0.0/0')
+ group.authorize('tcp', 1, 65535, '0.0.0.0/0')
+ group.authorize('udp', 1, 65535, '0.0.0.0/0')
+ group.authorize('icmp', 1, 0, '0.0.0.0/0')
+ group.authorize('icmp', 0, 1, '0.0.0.0/0')
+ group.authorize('icmp', 0, 0, '0.0.0.0/0')
+
+ def _assert(message, *args):
+ try:
+ group.authorize(*args)
+ except boto_exc.EC2ResponseError as e:
+ self.assertEqual(e.status, 400, 'Expected status to be 400')
+ self.assertIn(message, e.error_message)
+ else:
+ raise self.failureException, 'EC2ResponseError not raised'
+
+ # Invalid CIDR address
+ _assert('Invalid CIDR', 'tcp', 80, 81, '0.0.0.0/0444')
+ # Missing ports
+ _assert('Not enough parameters', 'tcp', '0.0.0.0/0')
+ # from port cannot be greater than to port
+ _assert('Invalid port range', 'tcp', 100, 1, '0.0.0.0/0')
+ # For tcp, negative values are not allowed
+ _assert('Invalid port range', 'tcp', -1, 1, '0.0.0.0/0')
+ # For tcp, valid port range 1-65535
+ _assert('Invalid port range', 'tcp', 1, 65599, '0.0.0.0/0')
+ # Invalid Cidr for ICMP type
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.444.0/4')
+ # Invalid protocol
+ _assert('Invalid IP protocol', 'xyz', 1, 14, '0.0.0.0/0')
+ # Invalid port
+ _assert('Invalid input received: To and From ports must be integers',
+ 'tcp', " ", "81", '0.0.0.0/0')
+ # Invalid icmp port
+ _assert('Invalid input received: '
+ 'Type and Code must be integers for ICMP protocol type',
+ 'icmp', " ", "81", '0.0.0.0/0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0')
+ # Invalid CIDR Address
+ _assert('Invalid CIDR', 'icmp', -1, -1, '0.0.0.0/')
+ # Invalid Cidr ports
+ _assert('Invalid port range', 'icmp', 1, 256, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+
+ self.assertEqual(len(group.rules), 8)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '0.0.0.0/0')
+ group.revoke('icmp', -1, -1, '0.0.0.0/0')
+ group.revoke('udp', 80, 81, '0.0.0.0/0')
+ group.revoke('tcp', 1, 65535, '0.0.0.0/0')
+ group.revoke('udp', 1, 65535, '0.0.0.0/0')
+ group.revoke('icmp', 1, 0, '0.0.0.0/0')
+ group.revoke('icmp', 0, 1, '0.0.0.0/0')
+ group.revoke('icmp', 0, 0, '0.0.0.0/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_cidr_v6(self):
+ """Test that we can add and remove CIDR based rules
+ to a security group for IPv6
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd")
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ group = [grp for grp in rv if grp.name == security_group_name][0]
+ self.assertEqual(len(group.rules), 1)
+ self.assertEqual(int(group.rules[0].from_port), 80)
+ self.assertEqual(int(group.rules[0].to_port), 81)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]), '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.revoke('tcp', 80, 81, '::/0')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ rv = self.ec2.get_all_security_groups()
+
+ self.assertEqual(len(rv), 1)
+ self.assertEqual(rv[0].name, 'default')
+
+ def test_authorize_revoke_security_group_foreign_group(self):
+ """Test that we can grant and revoke another security group access
+ to a security group
+ """
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rand_string = 'sdiuisudfsdcnpaqwertasd'
+ security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+ other_security_group_name = "".join(random.choice(rand_string)
+ for x in range(random.randint(4, 8)))
+
+ group = self.ec2.create_security_group(security_group_name,
+ 'test group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ other_group = self.ec2.create_security_group(other_security_group_name,
+ 'some other group')
+
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+
+ group.authorize(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ # I don't bother checkng that we actually find it here,
+ # because the create/delete unit test further up should
+ # be good enough for that.
+ for group in rv:
+ if group.name == security_group_name:
+ self.assertEqual(len(group.rules), 3)
+ self.assertEqual(len(group.rules[0].grants), 1)
+ self.assertEqual(str(group.rules[0].grants[0]),
+ '%s-%s' % (other_security_group_name, 'fake'))
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ rv = self.ec2.get_all_security_groups()
+
+ for group in rv:
+ if group.name == security_group_name:
+ self.expect_http()
+ self.mox.ReplayAll()
+ group.connection = self.ec2
+ group.revoke(src_group=other_group)
+
+ self.expect_http()
+ self.mox.ReplayAll()
+
+ self.ec2.delete_security_group(security_group_name)
+ self.ec2.delete_security_group(other_security_group_name)
diff --git a/nova/tests/api/ec2/test_apirequest.py b/nova/tests/unit/api/ec2/test_apirequest.py
index 4b2dee96f8..4b2dee96f8 100644
--- a/nova/tests/api/ec2/test_apirequest.py
+++ b/nova/tests/unit/api/ec2/test_apirequest.py
diff --git a/nova/tests/unit/api/ec2/test_cinder_cloud.py b/nova/tests/unit/api/ec2/test_cinder_cloud.py
new file mode 100644
index 0000000000..78db126aee
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cinder_cloud.py
@@ -0,0 +1,1096 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+import fixtures
+from oslo.config import cfg
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+def get_fake_cache():
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip('192.168.0.3',
+ floats=['1.2.3.4',
+ '5.6.7.8']),
+ _ip('192.168.0.4')]}]}}]
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+ return info
+
+
+def get_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+ if isinstance(instances, list):
+ for instance in instances:
+ instance['info_cache'] = {'network_info': get_fake_cache()}
+ else:
+ instances['info_cache'] = {'network_info': get_fake_cache()}
+ return instances
+
+
+class CinderCloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CinderCloudTestCase, self).setUp()
+ ec2utils.reset_cache()
+ self.useFixture(fixtures.TempDir()).path
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+ self.volume_api.reset_fake_api(self.context)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CinderCloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def _stub_instance_get_with_fixed_ips(self, func_name):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_volumes(self):
+ # Makes sure describe_volumes works and filters results.
+
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-1',
+ description='test volume 1')
+ self.assertEqual(vol1['status'], 'available')
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-2',
+ description='test volume 2')
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ result = self.cloud.describe_volumes(self.context,
+ [vol1['volumeId']])
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(vol1['volumeId'], result['volumeSet'][0]['volumeId'])
+
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self.cloud.delete_volume(self.context, vol2['volumeId'])
+
+ def test_format_volume_maps_status(self):
+ fake_volume = {'id': 1,
+ 'status': 'creating',
+ 'availability_zone': 'nova',
+ 'volumeId': 'vol-0000000a',
+ 'attachmentSet': [{}],
+ 'snapshotId': None,
+ 'created_at': '2013-04-18T06:03:35.025626',
+ 'size': 1,
+ 'mountpoint': None,
+ 'attach_status': None}
+
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'creating')
+
+ fake_volume['status'] = 'attaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'detaching'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'in-use')
+ fake_volume['status'] = 'banana'
+ self.assertEqual(self.cloud._format_volume(self.context,
+ fake_volume)['status'],
+ 'banana')
+
+ def test_create_volume_in_availability_zone(self):
+ """Makes sure create_volume works when we specify an availability
+ zone
+ """
+ availability_zone = 'zone1:host1'
+
+ result = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ volume_id = result['volumeId']
+ availabilityZone = result['availabilityZone']
+ self.assertEqual(availabilityZone, availability_zone)
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(result['volumeSet'][0]['volumeId'], volume_id)
+ self.assertEqual(result['volumeSet'][0]['availabilityZone'],
+ availabilityZone)
+
+ self.cloud.delete_volume(self.context, volume_id)
+
+ def test_create_volume_from_snapshot(self):
+ # Makes sure create_volume works when we specify a snapshot.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s'
+ % vol1['volumeId'])
+
+ vol2 = self.cloud.create_volume(self.context,
+ snapshot_id=snap['snapshotId'])
+ volume1_id = vol1['volumeId']
+ volume2_id = vol2['volumeId']
+
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ self.assertEqual(result['volumeSet'][1]['volumeId'], volume2_id)
+
+ self.cloud.delete_volume(self.context, volume2_id)
+ self.cloud.delete_snapshot(self.context, snap['snapshotId'])
+ self.cloud.delete_volume(self.context, volume1_id)
+
+ def test_volume_status_of_attaching_volume(self):
+ """Test the volume's status in response when attaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+ resp = self.cloud.attach_volume(self.context,
+ vol1['volumeId'],
+ ec2_instance_id,
+ '/dev/sde')
+ # Here,the status should be 'attaching',but it can be 'attached' in
+ # unittest scenario if the attach action is very fast.
+ self.assertIn(resp['status'], ('attaching', 'attached'))
+
+ def test_volume_status_of_detaching_volume(self):
+ """Test the volume's status in response when detaching a volume."""
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ name='test-ls',
+ description='test volume ls')
+ self.assertEqual('available', vol1['status'])
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ self._run_instance(**kwargs)
+ resp = self.cloud.detach_volume(self.context,
+ vol1['volumeId'])
+
+ # Here,the status should be 'detaching',but it can be 'detached' in
+ # unittest scenario if the detach action is very fast.
+ self.assertIn(resp['status'], ('detaching', 'detached'))
+
+ def test_describe_snapshots(self):
+ # Makes sure describe_snapshots works and filters results.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap2 of vol %s' %
+ vol1['volumeId'])
+
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 2)
+ result = self.cloud.describe_snapshots(
+ self.context,
+ snapshot_id=[snap2['snapshotId']])
+ self.assertEqual(len(result['snapshotSet']), 1)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_snapshot(self.context, snap2['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_format_snapshot_maps_status(self):
+ fake_snapshot = {'status': 'new',
+ 'id': 1,
+ 'volume_id': 1,
+ 'created_at': 1353560191.08117,
+ 'progress': 90,
+ 'project_id': str(uuid.uuid4()),
+ 'volume_size': 10000,
+ 'display_description': 'desc'}
+
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'creating'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'available'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'active'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'completed')
+
+ fake_snapshot['status'] = 'deleting'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'pending')
+
+ fake_snapshot['status'] = 'deleted'
+ self.assertIsNone(self.cloud._format_snapshot(self.context,
+ fake_snapshot))
+
+ fake_snapshot['status'] = 'error'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'error')
+
+ fake_snapshot['status'] = 'banana'
+ self.assertEqual(self.cloud._format_snapshot(self.context,
+ fake_snapshot)['status'],
+ 'banana')
+
+ def test_create_snapshot(self):
+ # Makes sure create_snapshot works.
+ availability_zone = 'zone1:host1'
+ result = self.cloud.describe_snapshots(self.context)
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.describe_snapshots(self.context)
+ self.assertEqual(len(result['snapshotSet']), 1)
+ self.assertEqual(result['snapshotSet'][0]['snapshotId'], snapshot_id)
+
+ self.cloud.delete_snapshot(self.context, snap1['snapshotId'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def test_delete_snapshot(self):
+ # Makes sure delete_snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap1 of vol %s' %
+ vol1['volumeId'])
+
+ snapshot_id = snap1['snapshotId']
+ result = self.cloud.delete_snapshot(self.context,
+ snapshot_id=snapshot_id)
+ self.assertTrue(result)
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+
+ def _block_device_mapping_create(self, instance_uuid, mappings):
+ volumes = []
+ for bdm in mappings:
+ db.block_device_mapping_create(self.context, bdm)
+ if 'volume_id' in bdm:
+ values = {'id': bdm['volume_id']}
+ for bdm_key, vol_key in [('snapshot_id', 'snapshot_id'),
+ ('snapshot_size', 'volume_size'),
+ ('delete_on_termination',
+ 'delete_on_termination')]:
+ if bdm_key in bdm:
+ values[vol_key] = bdm[bdm_key]
+ kwargs = {'name': 'bdmtest-volume',
+ 'description': 'bdm test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached',
+ 'volume_id': values['id']}
+ vol = self.volume_api.create_with_kwargs(self.context,
+ **kwargs)
+ if 'snapshot_id' in values:
+ self.volume_api.create_snapshot(self.context,
+ vol['id'],
+ 'snapshot-bdm',
+ 'fake snap for bdm tests',
+ values['snapshot_id'])
+
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, bdm['device_name'])
+ volumes.append(vol)
+ return volumes
+
+ def _setUpBlockDeviceMapping(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst0 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdb1',
+ 'system_metadata': sys_meta})
+ inst1 = db.instance_create(self.context,
+ {'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/sdc1',
+ 'system_metadata': sys_meta})
+ inst2 = db.instance_create(self.context,
+ {'image_ref': '',
+ 'instance_type_id': 1,
+ 'root_device_name': '/dev/vda',
+ 'system_metadata': sys_meta})
+
+ instance0_uuid = inst0['uuid']
+ mappings0 = [
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb1',
+ 'snapshot_id': '1',
+ 'volume_id': '2'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb2',
+ 'volume_id': '3',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb3',
+ 'delete_on_termination': True,
+ 'snapshot_id': '4',
+ 'volume_id': '5'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb4',
+ 'delete_on_termination': False,
+ 'snapshot_id': '6',
+ 'volume_id': '7'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb5',
+ 'snapshot_id': '8',
+ 'volume_id': '9',
+ 'volume_size': 0},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb6',
+ 'snapshot_id': '10',
+ 'volume_id': '11',
+ 'volume_size': 1},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb7',
+ 'no_device': True},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb8',
+ 'virtual_name': 'swap'},
+ {'instance_uuid': instance0_uuid,
+ 'device_name': '/dev/sdb9',
+ 'virtual_name': 'ephemeral3'}]
+ instance2_uuid = inst2['uuid']
+ mappings2 = [
+ {'instance_uuid': instance2_uuid,
+ 'device_name': 'vda',
+ 'snapshot_id': '1',
+ 'volume_id': '21'}]
+
+ volumes0 = self._block_device_mapping_create(instance0_uuid, mappings0)
+ volumes2 = self._block_device_mapping_create(instance2_uuid, mappings2)
+ return ((inst0, inst1, inst2), (volumes0, [], volumes2))
+
+ def _tearDownBlockDeviceMapping(self, instances, volumes):
+ for vols in volumes:
+ for vol in vols:
+ self.volume_api.delete(self.context, vol['id'])
+ for instance in instances:
+ for bdm in db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid']):
+ db.block_device_mapping_destroy(self.context, bdm['id'])
+ db.instance_destroy(self.context, instance['uuid'])
+
+ _expected_instance_bdm0 = {
+ 'instanceId': 'i-00000001',
+ 'rootDeviceName': '/dev/sdb1',
+ 'rootDeviceType': 'ebs'}
+
+ _expected_block_device_mapping0 = [
+ {'deviceName': '/dev/sdb1',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000002',
+ }},
+ {'deviceName': '/dev/sdb2',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000003',
+ }},
+ {'deviceName': '/dev/sdb3',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-00000005',
+ }},
+ {'deviceName': '/dev/sdb4',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000007',
+ }},
+ {'deviceName': '/dev/sdb5',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-00000009',
+ }},
+ {'deviceName': '/dev/sdb6',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': False,
+ 'volumeId': 'vol-0000000b', }}]
+ # NOTE(yamahata): swap/ephemeral device case isn't supported yet.
+
+ _expected_instance_bdm1 = {
+ 'instanceId': 'i-00000002',
+ 'rootDeviceName': '/dev/sdc1',
+ 'rootDeviceType': 'instance-store'}
+
+ _expected_instance_bdm2 = {
+ 'instanceId': 'i-00000003',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'ebs'}
+
+ def test_format_instance_bdm(self):
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[0]['uuid'],
+ '/dev/sdb1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm0['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = {}
+ self.cloud._format_instance_bdm(self.context, instances[1]['uuid'],
+ '/dev/sdc1', result)
+ self.assertThat(
+ {'rootDeviceType': self._expected_instance_bdm1['rootDeviceType']},
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _assertInstance(self, instance_id):
+ ec2_instance_id = ec2utils.id_to_ec2_id(instance_id)
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[ec2_instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ result = result['instancesSet'][0]
+ self.assertEqual(result['instanceId'], ec2_instance_id)
+ return result
+
+ def _assertEqualBlockDeviceMapping(self, expected, result):
+ self.assertEqual(len(expected), len(result))
+ for x in expected:
+ found = False
+ for y in result:
+ if x['deviceName'] == y['deviceName']:
+ self.assertThat(x, matchers.IsSubDictOf(y))
+ found = True
+ break
+ self.assertTrue(found)
+
+ def test_describe_instances_bdm(self):
+ """Make sure describe_instances works with root_device_name and
+ block device mappings
+ """
+ (instances, volumes) = self._setUpBlockDeviceMapping()
+
+ result = self._assertInstance(instances[0]['id'])
+ self.assertThat(
+ self._expected_instance_bdm0,
+ matchers.IsSubDictOf(result))
+ self._assertEqualBlockDeviceMapping(
+ self._expected_block_device_mapping0, result['blockDeviceMapping'])
+
+ result = self._assertInstance(instances[1]['id'])
+ self.assertThat(
+ self._expected_instance_bdm1,
+ matchers.IsSubDictOf(result))
+
+ result = self._assertInstance(instances[2]['id'])
+ self.assertThat(
+ self._expected_instance_bdm2,
+ matchers.IsSubDictOf(result))
+
+ self._tearDownBlockDeviceMapping(instances, volumes)
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1', 'snapshot_id': 1234567},
+ {'device_name': '/dev/sdb2', 'volume_id': 1234567},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1', 'snapshot_id': 12345678},
+ {'device_name': '/dev/sdc2', 'volume_id': 12345678},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 1234567}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ kwargs = {'volume_id': 76543210,
+ 'volume_size': 1,
+ 'name': 'test-snap',
+ 'description': 'test snap desc',
+ 'snap_id': bdm['snapshot_id'],
+ 'status': 'available'}
+ snap = self.volume_api.create_snapshot_with_kwargs(
+ self.context, **kwargs)
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00053977'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00053977'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00bc614e'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00bc614e'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00053977'}}]
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _assert_volume_attached(self, vol, instance_uuid, mountpoint):
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['mountpoint'], mountpoint)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ def _assert_volume_detached(self, vol):
+ self.assertIsNone(vol['instance_uuid'])
+ self.assertIsNone(vol['mountpoint'])
+ self.assertEqual(vol['status'], "available")
+ self.assertEqual(vol['attach_status'], "detached")
+
+ def test_stop_start_with_volume(self):
+ # Make sure run instance with block device mapping works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc',
+ 'volume_id': vol2_uuid,
+ 'delete_on_termination': True},
+ ]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ if str(vol['id']) == str(vol1_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdb')
+ elif str(vol['id']) == str(vol2_uuid):
+ self.volume_api.attach(self.context, vol['id'],
+ instance_uuid, '/dev/sdc')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 2)
+ for vol in vols:
+ self.assertIn(str(vol['id']), [str(vol1_uuid), str(vol2_uuid)])
+ self.assertIn(vol['mountpoint'], ['/dev/sdb', '/dev/sdc'])
+ self.assertEqual(vol['instance_uuid'], instance_uuid)
+ self.assertEqual(vol['status'], "in-use")
+ self.assertEqual(vol['attach_status'], "attached")
+
+ # Here we puke...
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol2_uuid)
+ self.assertFalse(vol['deleted'])
+ self.cloud.delete_volume(self.context, vol1['volumeId'])
+ self._restart_compute_service()
+
+ def test_stop_with_attached_volume(self):
+ # Make sure attach info is reflected to block device mapping.
+
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol2 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+ vol1_uuid = ec2utils.ec2_vol_id_to_uuid(vol1['volumeId'])
+ vol2_uuid = ec2utils.ec2_vol_id_to_uuid(vol2['volumeId'])
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/sdb',
+ 'volume_id': vol1_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+ for vol in vols:
+ self.assertEqual(vol['id'], vol1_uuid)
+ self._assert_volume_attached(vol, instance_uuid, '/dev/sdb')
+ vol = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_detached(vol)
+
+ inst_obj = objects.Instance.get_by_uuid(self.context, instance_uuid)
+ self.cloud.compute_api.attach_volume(self.context,
+ inst_obj,
+ volume_id=vol2_uuid,
+ device='/dev/sdc')
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_attached(vol1, instance_uuid, '/dev/sdb')
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.compute_api.detach_volume(self.context,
+ inst_obj, vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ result = self.cloud.stop_instances(self.context, [ec2_instance_id])
+ self.assertTrue(result)
+
+ vol2 = self.volume_api.get(self.context, vol2_uuid)
+ self._assert_volume_attached(vol2, instance_uuid, '/dev/sdc')
+
+ self.cloud.start_instances(self.context, [ec2_instance_id])
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+ self.assertEqual(len(vols), 1)
+
+ self._assert_volume_detached(vol1)
+
+ vol1 = self.volume_api.get(self.context, vol1_uuid)
+ self._assert_volume_detached(vol1)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def test_run_with_snapshot(self):
+ # Makes sure run/stop/start instance with snapshot works.
+ availability_zone = 'zone1:host1'
+ vol1 = self.cloud.create_volume(self.context,
+ size=1,
+ availability_zone=availability_zone)
+
+ snap1 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-1',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap1_uuid = ec2utils.ec2_snap_id_to_uuid(snap1['snapshotId'])
+
+ snap2 = self.cloud.create_snapshot(self.context,
+ vol1['volumeId'],
+ name='snap-2',
+ description='test snap of vol %s' %
+ vol1['volumeId'])
+ snap2_uuid = ec2utils.ec2_snap_id_to_uuid(snap2['snapshotId'])
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'block_device_mapping': [{'device_name': '/dev/vdb',
+ 'snapshot_id': snap1_uuid,
+ 'delete_on_termination': False, },
+ {'device_name': '/dev/vdc',
+ 'snapshot_id': snap2_uuid,
+ 'delete_on_termination': True}]}
+ ec2_instance_id = self._run_instance(**kwargs)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(self.context,
+ ec2_instance_id)
+
+ vols = self.volume_api.get_all(self.context)
+ vols = [v for v in vols if v['instance_uuid'] == instance_uuid]
+
+ self.assertEqual(len(vols), 2)
+
+ vol1_id = None
+ vol2_id = None
+ for vol in vols:
+ snapshot_uuid = vol['snapshot_id']
+ if snapshot_uuid == snap1_uuid:
+ vol1_id = vol['id']
+ mountpoint = '/dev/vdb'
+ elif snapshot_uuid == snap2_uuid:
+ vol2_id = vol['id']
+ mountpoint = '/dev/vdc'
+ else:
+ self.fail()
+
+ self._assert_volume_attached(vol, instance_uuid, mountpoint)
+
+ # Just make sure we found them
+ self.assertTrue(vol1_id)
+ self.assertTrue(vol2_id)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ vol = self.volume_api.get(admin_ctxt, vol1_id)
+ self._assert_volume_detached(vol)
+ self.assertFalse(vol['deleted'])
+
+ def test_create_image(self):
+ # Make sure that CreateImage works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+ self._restart_compute_service()
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id):
+ return [{'volume_id': 87654321,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'virtual_name': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral0',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral1',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'virtual_name': 'ephemeral2',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
diff --git a/nova/tests/unit/api/ec2/test_cloud.py b/nova/tests/unit/api/ec2/test_cloud.py
new file mode 100644
index 0000000000..113af8c96c
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_cloud.py
@@ -0,0 +1,3255 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import copy
+import datetime
+import functools
+import os
+import string
+import tempfile
+
+import fixtures
+import iso8601
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.api.ec2 import inst_state
+from nova.api.metadata import password
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import s3
+from nova.network import api as network_api
+from nova.network import base_api as base_network_api
+from nova.network import model
+from nova.network import neutronv2
+from nova import objects
+from nova.objects import base as obj_base
+from nova.openstack.common import log as logging
+from nova.openstack.common import policy as common_policy
+from nova.openstack.common import uuidutils
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import (
+ test_neutron_security_groups as test_neutron)
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import utils
+from nova.virt import fake as fake_virt
+from nova import volume
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('default_flavor', 'nova.compute.flavors')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+LOG = logging.getLogger(__name__)
+
+HOST = "testhost"
+
+
+def get_fake_cache(get_floating):
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ if get_floating:
+ ip_info = [_ip('192.168.0.3',
+ floats=['1.2.3.4', '5.6.7.8']),
+ _ip('192.168.0.4')]
+ else:
+ ip_info = [_ip('192.168.0.3'),
+ _ip('192.168.0.4')]
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': ip_info}]}}]
+
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+
+ return model.NetworkInfo.hydrate(info)
+
+
+def get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+
+ if kwargs.get('want_objects', False):
+ info_cache = objects.InstanceInfoCache()
+ info_cache.network_info = get_fake_cache(get_floating)
+ info_cache.obj_reset_changes()
+ else:
+ info_cache = {'network_info': get_fake_cache(get_floating)}
+
+ if isinstance(instances, (list, obj_base.ObjectListBase)):
+ for instance in instances:
+ instance['info_cache'] = info_cache
+ else:
+ instances['info_cache'] = info_cache
+ return instances
+
+
+class CloudTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CloudTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ ec2utils.reset_cache()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ volume_api_class='nova.tests.unit.fake_volume.API')
+ self.useFixture(fixtures.FakeLogger('boto'))
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(_self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ fake.stub_out_image_service(self.stubs)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+ self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduler = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.consoleauth = self.start_service('consoleauth')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.volume_api = volume.API()
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ self.volume_api.reset_fake_api(self.context)
+ super(CloudTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def fake_get_target(obj, iqn):
+ return 1
+
+ def fake_remove_iscsi_target(obj, tid, lun, vol_id, **kwargs):
+ pass
+
+ def _stub_instance_get_with_fixed_ips(self,
+ func_name, get_floating=True):
+ orig_func = getattr(self.cloud.compute_api, func_name)
+
+ def fake_get(*args, **kwargs):
+ return get_instances_with_cached_ips(orig_func, get_floating,
+ *args, **kwargs)
+ self.stubs.Set(self.cloud.compute_api, func_name, fake_get)
+
+ def _create_key(self, name):
+ # NOTE(vish): create depends on pool, so just call helper directly
+ keypair_api = compute_api.KeypairAPI()
+ return keypair_api.create_key_pair(self.context, self.context.user_id,
+ name)
+
+ def test_describe_regions(self):
+ # Makes sure describe regions runs without raising an exception.
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 1)
+ self.flags(region_list=["one=test_host1", "two=test_host2"])
+ result = self.cloud.describe_regions(self.context)
+ self.assertEqual(len(result['regionInfo']), 2)
+
+ def test_describe_addresses(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.flags(network_api_class='nova.network.api.API')
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_addresses_in_neutron(self):
+ # Makes sure describe addresses runs without raising an exception.
+ address = "10.10.10.10"
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_specific_address(self):
+ # Makes sure describe specific address works.
+ addresses = ["10.10.10.10", "10.10.10.11"]
+ for address in addresses:
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ result = self.cloud.describe_addresses(self.context)
+ self.assertEqual(len(result['addressesSet']), 2)
+ result = self.cloud.describe_addresses(self.context,
+ public_ip=['10.10.10.10'])
+ self.assertEqual(len(result['addressesSet']), 1)
+ for address in addresses:
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_allocate_address(self):
+ address = "10.10.10.10"
+ allocate = self.cloud.allocate_address
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.assertEqual(allocate(self.context)['publicIp'], address)
+ db.floating_ip_destroy(self.context, address)
+ self.assertRaises(exception.NoMoreFloatingIps,
+ allocate,
+ self.context)
+
+ def test_release_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova',
+ 'project_id': self.project_id})
+ result = self.cloud.release_address(self.context, address)
+ self.assertEqual(result.get('return', None), 'true')
+
+ def test_associate_disassociate_address(self):
+ # Verifies associate runs cleanly without raising an exception.
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ # TODO(jkoelker) Probably need to query for instance_type_id and
+ # make sure we get a valid one
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.context,
+ instance_id=inst['id'],
+ instance_uuid=inst['uuid'],
+ host=inst['host'],
+ vpn=None,
+ rxtx_factor=3,
+ project_id=project_id,
+ macs=None)
+
+ fixed_ips = nw_info.fixed_ips()
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+
+ self.stubs.Set(ec2utils, 'get_ip_info_for_instance',
+ lambda *args: {'fixed_ips': ['10.0.0.1'],
+ 'fixed_ip6s': [],
+ 'floating_ips': []})
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+
+ return
+
+ self.stubs.Set(base_network_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
+ self.cloud.associate_address(self.context,
+ instance_id=ec2_id,
+ public_ip=address)
+ self.cloud.disassociate_address(self.context,
+ public_ip=address)
+ self.cloud.release_address(self.context,
+ public_ip=address)
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'],
+ inst['host'])
+ db.instance_destroy(self.context, inst['uuid'])
+ db.floating_ip_destroy(self.context, address)
+
+ def test_disassociate_auto_assigned_address(self):
+ """Verifies disassociating auto assigned floating IP
+ raises an exception
+ """
+ address = "10.10.10.10"
+
+ def fake_get(*args, **kwargs):
+ pass
+
+ def fake_disassociate_floating_ip(*args, **kwargs):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
+ lambda *args: 1)
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+ self.stubs.Set(network_api.API, 'disassociate_floating_ip',
+ fake_disassociate_floating_ip)
+
+ self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+
+ def test_disassociate_unassociated_address(self):
+ address = "10.10.10.10"
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ self.cloud.allocate_address(self.context)
+ self.cloud.describe_addresses(self.context)
+ self.assertRaises(exception.InvalidAssociation,
+ self.cloud.disassociate_address,
+ self.context, public_ip=address)
+ db.floating_ip_destroy(self.context, address)
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_all_tenants(self):
+ # Makes sure describe_security_groups works and filters results.
+ sec = db.security_group_create(self.context,
+ {'project_id': 'foobar',
+ 'name': 'test'})
+
+ def _check_name(result, i, expected):
+ self.assertEqual(result['securityGroupInfo'][i]['groupName'],
+ expected)
+
+ # include all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 1}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ _check_name(result, 0, 'default')
+ _check_name(result, 1, sec['name'])
+
+ # exclude all tenants
+ filter = [{'name': 'all-tenants', 'value': {'1': 0}}]
+ result = self.cloud.describe_security_groups(self.context,
+ filter=filter)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ # default all tenants
+ result = self.cloud.describe_security_groups(self.context)
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ _check_name(result, 0, 'default')
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_describe_security_groups_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[sec['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ default = db.security_group_get_by_name(self.context,
+ self.context.project_id,
+ 'default')
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[default['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ 'default')
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+ def test_security_group_quota_limit(self):
+ self.flags(quota_security_groups=10)
+ for i in range(1, CONF.quota_security_groups):
+ name = 'test name %i' % i
+ descript = 'test description %i' % i
+ create = self.cloud.create_security_group
+ create(self.context, name, descript)
+
+ # 11'th group should fail
+ self.assertRaises(exception.SecurityGroupLimitExceeded,
+ create, self.context, 'foo', 'bar')
+
+ def test_delete_security_group_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, group_id=sec['id']))
+
+ def test_delete_security_group_with_bad_name(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, 'badname')
+
+ def test_delete_security_group_with_bad_group_id(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, group_id=999)
+
+ def test_delete_security_group_no_params(self):
+ delete = self.cloud.delete_security_group
+ self.assertRaises(exception.MissingParameter, delete, self.context)
+
+ def test_delete_security_group_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.delete_security_group, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.authorize_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_ingress_ip_permissions_ip_ranges(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges':
+ {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_security_group_fail_missing_source_group(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'ip_ranges': {'1': {'cidr_ip': u'0.0.0.0/0'},
+ '2': {'cidr_ip': u'10.10.10.10/32'}},
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertRaises(exception.SecurityGroupNotFound, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_authorize_security_group_ingress_ip_permissions_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [{'to_port': 81, 'from_port': 81,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'},
+ '2': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}},
+ 'ip_protocol': u'tcp'}]}
+ self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs))
+
+ def test_describe_security_group_ingress_groups(self):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'name': 'test'
+ }
+ sec1 = db.security_group_create(self.context, kwargs)
+ sec2 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'somegroup1'})
+ sec3 = db.security_group_create(self.context,
+ {'project_id': 'someuser',
+ 'user_id': 'someuser',
+ 'description': '',
+ 'name': 'othergroup2'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'ip_permissions': [
+ {'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'somegroup1'}}},
+ {'ip_protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 80,
+ 'groups': {'1': {'user_id': u'someuser',
+ 'group_name': u'othergroup2'}}}]}
+ self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs))
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ self.assertEqual(len(actual_rules), 4)
+ expected_rules = [{'fromPort': -1,
+ 'groups': [{'groupName': 'somegroup1',
+ 'userId': 'someuser'}],
+ 'ipProtocol': 'icmp',
+ 'ipRanges': [],
+ 'toPort': -1},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'tcp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 1,
+ 'groups': [{'groupName': u'somegroup1',
+ 'userId': u'someuser'}],
+ 'ipProtocol': 'udp',
+ 'ipRanges': [],
+ 'toPort': 65535},
+ {'fromPort': 80,
+ 'groups': [{'groupName': u'othergroup2',
+ 'userId': u'someuser'}],
+ 'ipProtocol': u'tcp',
+ 'ipRanges': [],
+ 'toPort': 80}]
+ for rule in expected_rules:
+ self.assertIn(rule, actual_rules)
+
+ db.security_group_destroy(self.context, sec3['id'])
+ db.security_group_destroy(self.context, sec2['id'])
+ db.security_group_destroy(self.context, sec1['id'])
+
+ def test_revoke_security_group_ingress_policy_not_allowed(self):
+ rules = {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.revoke_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_revoke_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_revoke_security_group_ingress_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
+
+ def test_authorize_security_group_ingress_missing_protocol_params(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ 'test')
+
+ def test_authorize_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ **kwargs)
+
+ def test_authorize_security_group_ingress_already_exists(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ self.assertRaises(exception.SecurityGroupRuleExists, authz,
+ self.context, group_name=sec['name'], **kwargs)
+
+ def test_security_group_ingress_quota_limit(self):
+ self.flags(quota_security_group_rules=20)
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec_group = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ for i in range(100, 120):
+ kwargs = {'to_port': i, 'from_port': i, 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec_group['id'], **kwargs)
+
+ kwargs = {'to_port': 121, 'from_port': 121, 'ip_protocol': 'tcp'}
+ self.assertRaises(exception.SecurityGroupLimitExceeded, authz,
+ self.context, group_id=sec_group['id'], **kwargs)
+
+ def _test_authorize_security_group_no_ports_with_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto,
+ 'groups': {'1': {'user_id': self.context.user_id,
+ 'group_name': u'test'}}}
+ self.assertTrue(authz(self.context, group_name=sec['name'],
+ **auth_kwargs))
+
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+ self.assertEqual(len(groups['securityGroupInfo']), 1)
+ actual_rules = groups['securityGroupInfo'][0]['ipPermissions']
+ expected_rules = [{'groups': [{'groupName': 'test',
+ 'userId': self.context.user_id}],
+ 'ipProtocol': proto,
+ 'ipRanges': []}]
+ if proto == 'icmp':
+ expected_rules[0]['fromPort'] = -1
+ expected_rules[0]['toPort'] = -1
+ else:
+ expected_rules[0]['fromPort'] = 1
+ expected_rules[0]['toPort'] = 65535
+ self.assertTrue(expected_rules == actual_rules)
+ describe = self.cloud.describe_security_groups
+ groups = describe(self.context, group_name=['test'])
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def _test_authorize_security_group_no_ports_no_source_group(self, proto):
+ kwargs = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ 'description': '',
+ 'name': 'test'
+ }
+ sec = db.security_group_create(self.context, kwargs)
+
+ authz = self.cloud.authorize_security_group_ingress
+ auth_kwargs = {'ip_protocol': proto}
+ self.assertRaises(exception.MissingParameter, authz, self.context,
+ group_name=sec['name'], **auth_kwargs)
+
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_authorize_security_group_no_ports_icmp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('icmp')
+ self._test_authorize_security_group_no_ports_no_source_group('icmp')
+
+ def test_authorize_security_group_no_ports_tcp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('tcp')
+ self._test_authorize_security_group_no_ports_no_source_group('tcp')
+
+ def test_authorize_security_group_no_ports_udp(self):
+ self._test_authorize_security_group_no_ports_with_source_group('udp')
+ self._test_authorize_security_group_no_ports_no_source_group('udp')
+
+ def test_revoke_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertRaises(exception.MissingParameter, revoke,
+ self.context, **kwargs)
+
+ def test_delete_security_group_in_use_by_group(self):
+ self.cloud.create_security_group(self.context, 'testgrp1',
+ "test group 1")
+ self.cloud.create_security_group(self.context, 'testgrp2',
+ "test group 2")
+ kwargs = {'groups': {'1': {'user_id': u'%s' % self.context.user_id,
+ 'group_name': u'testgrp2'}},
+ }
+ self.cloud.authorize_security_group_ingress(self.context,
+ group_name='testgrp1', **kwargs)
+
+ group1 = db.security_group_get_by_name(self.context,
+ self.project_id, 'testgrp1')
+ get_rules = db.security_group_rule_get_by_security_group
+
+ self.assertTrue(get_rules(self.context, group1['id']))
+ self.cloud.delete_security_group(self.context, 'testgrp2')
+ self.assertFalse(get_rules(self.context, group1['id']))
+
+ def test_delete_security_group_in_use_by_instance(self):
+ # Ensure that a group can not be deleted if in use by an instance.
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ args = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active'}
+ inst = db.instance_create(self.context, args)
+
+ args = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'name': 'testgrp',
+ 'description': 'Test group'}
+ group = db.security_group_create(self.context, args)
+
+ db.instance_add_security_group(self.context, inst['uuid'], group['id'])
+
+ self.assertRaises(exception.InvalidGroup,
+ self.cloud.delete_security_group,
+ self.context, 'testgrp')
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ self.cloud.delete_security_group(self.context, 'testgrp')
+
+ def test_describe_availability_zones(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ # Aggregate based zones
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1_zones')
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+ result = self.cloud.describe_availability_zones(self.context)
+ self.assertEqual(len(result['availabilityZoneInfo']), 3)
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+ self.assertEqual(len(result['availabilityZoneInfo']), 18)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def test_describe_availability_zones_verbose(self):
+ # Makes sure describe_availability_zones works and filters results.
+ service1 = db.service_create(self.context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ service2 = db.service_create(self.context, {'host': 'host2_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'second_zone'})
+ db.aggregate_host_add(self.context, agg['id'], 'host2_zones')
+
+ admin_ctxt = context.get_admin_context(read_deleted="no")
+ result = self.cloud.describe_availability_zones(admin_ctxt,
+ zone_name='verbose')
+
+ self.assertEqual(len(result['availabilityZoneInfo']), 17)
+ db.service_destroy(self.context, service1['id'])
+ db.service_destroy(self.context, service2['id'])
+
+ def assertEqualSorted(self, x, y):
+ self.assertEqual(sorted(x), sorted(y))
+
+ def test_describe_instances(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+
+ sys_meta['EC2_client_token'] = "client-token-1"
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+
+ sys_meta['EC2_client_token'] = "client-token-2"
+ inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'hostname': 'server-4321',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ agg = db.aggregate_create(self.context,
+ {'name': 'agg1'}, {'availability_zone': 'zone1'})
+ db.aggregate_host_add(self.context, agg['id'], 'host1')
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+ agg2 = db.aggregate_create(self.context,
+ {'name': 'agg2'}, {'availability_zone': 'zone2'})
+ db.aggregate_host_add(self.context, agg2['id'], 'host2')
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 2)
+
+ # Now try filtering.
+ instance_id = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['placement']['availabilityZone'], 'zone2')
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['tagSet'], [])
+ self.assertEqual(instance['privateDnsName'], 'server-4321')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertEqual(instance['dnsNameV6'],
+ 'fe80:b33f::a8bb:ccff:fedd:eeff')
+ self.assertEqual(instance['clientToken'], 'client-token-2')
+
+ # A filter with even one invalid id should cause an exception to be
+ # raised
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id, '435679'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instances_all_invalid(self):
+ # Makes sure describe_instances works and filters results.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ instance_id = ec2utils.id_to_ec2_inst_id('435679')
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.describe_instances, self.context,
+ instance_id=[instance_id])
+
+ def test_describe_instances_with_filters(self):
+ # Makes sure describe_instances works and filters results.
+ filters = {'filter': [{'name': 'test',
+ 'value': ['a', 'b']},
+ {'name': 'another_test',
+ 'value': 'a string'}]}
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': []})
+
+ def test_describe_instances_with_filters_tags(self):
+ # Makes sure describe_instances works and filters tag results.
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ utc = iso8601.iso8601.Utc()
+
+ # Create some test images
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host2',
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'system_metadata': sys_meta
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, one overlapping key, and a
+ # disparate pair
+ # inst1 : {'foo': 'bar', 'baz': 'wibble', 'bax': 'wobble'}
+ # inst2 : {'foo': 'bar', 'baz': 'quux', 'zog': 'bobble'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ md2 = {'key': 'baz', 'value': 'wibble'}
+ md3 = {'key': 'bax', 'value': 'wobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md2, md3])
+
+ md4 = {'key': 'baz', 'value': 'quux'}
+ md5 = {'key': 'zog', 'value': 'bobble'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md4, md5])
+ # We should be able to search by:
+
+ inst1_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000001',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': 'None (None, host1)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1111',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'wibble'},
+ {'key': u'bax',
+ 'value': u'wobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'a'}
+
+ inst2_ret = {
+ 'groupSet': None,
+ 'instancesSet': [{'amiLaunchIndex': None,
+ 'dnsName': '1.2.3.4',
+ 'dnsNameV6': 'fe80:b33f::a8bb:ccff:fedd:eeff',
+ 'imageId': 'ami-00000001',
+ 'instanceId': 'i-00000002',
+ 'instanceState': {'code': 16,
+ 'name': 'running'},
+ 'instanceType': u'm1.medium',
+ 'ipAddress': '1.2.3.4',
+ 'keyName': u'None (None, host2)',
+ 'launchTime':
+ datetime.datetime(2012, 5, 1, 1, 1, 2,
+ tzinfo=utc),
+ 'placement': {
+ 'availabilityZone': 'nova'},
+ 'privateDnsName': u'server-1112',
+ 'privateIpAddress': '192.168.0.3',
+ 'productCodesSet': None,
+ 'rootDeviceName': '/dev/sda1',
+ 'rootDeviceType': 'instance-store',
+ 'tagSet': [{'key': u'foo',
+ 'value': u'bar'},
+ {'key': u'baz',
+ 'value': u'quux'},
+ {'key': u'zog',
+ 'value': u'bobble'}]}],
+ 'ownerId': None,
+ 'reservationId': u'b'}
+
+ # No filter
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Key search
+ # Both should have tags with key 'foo' and value 'bar'
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'foo'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Value search
+ # Only inst2 should have tags with key 'baz' and value 'quux'
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Only inst2 should have tags with value 'quux'
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['quux']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Multiple values
+ # Both should have tags with key 'baz' and values in the set
+ # ['quux', 'wibble']
+ filters = {'filter': [{'name': 'tag:baz',
+ 'value': ['quux', 'wibble']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Both should have tags with key 'baz' or tags with value 'bar'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['baz']},
+ {'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst1_ret, inst2_ret]})
+
+ # Confirm deletion of tags
+ # Check for format 'tag:'
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id1], tag=[md])
+ filters = {'filter': [{'name': 'tag:foo',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # Check for format 'tag-'
+ filters = {'filter': [{'name': 'tag-key',
+ 'value': ['foo']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+ filters = {'filter': [{'name': 'tag-value',
+ 'value': ['bar']}]}
+ result = self.cloud.describe_instances(self.context, **filters)
+ self.assertEqual(result, {'reservationSet': [inst2_ret]})
+
+ # destroy the test instances
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+
+ def test_describe_instances_sorting(self):
+ # Makes sure describe_instances works and is sorted as expected.
+ self.flags(use_ipv6=True)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst_base = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta,
+ }
+
+ utc = iso8601.iso8601.Utc()
+
+ inst1_kwargs = {}
+ inst1_kwargs.update(inst_base)
+ inst1_kwargs['host'] = 'host1'
+ inst1_kwargs['hostname'] = 'server-1111'
+ inst1_kwargs['created_at'] = datetime.datetime(2012, 5, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+
+ inst2_kwargs = {}
+ inst2_kwargs.update(inst_base)
+ inst2_kwargs['host'] = 'host2'
+ inst2_kwargs['hostname'] = 'server-2222'
+ inst2_kwargs['created_at'] = datetime.datetime(2012, 2, 1, 1, 1, 1,
+ tzinfo=utc)
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+
+ inst3_kwargs = {}
+ inst3_kwargs.update(inst_base)
+ inst3_kwargs['host'] = 'host3'
+ inst3_kwargs['hostname'] = 'server-3333'
+ inst3_kwargs['created_at'] = datetime.datetime(2012, 2, 5, 1, 1, 1,
+ tzinfo=utc)
+ inst3 = db.instance_create(self.context, inst3_kwargs)
+
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+
+ comp2 = db.service_create(self.context, {'host': 'host2',
+ 'topic': "compute"})
+
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result[0]['launchTime'], inst2_kwargs['created_at'])
+ self.assertEqual(result[1]['launchTime'], inst3_kwargs['created_at'])
+ self.assertEqual(result[2]['launchTime'], inst1_kwargs['created_at'])
+
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.instance_destroy(self.context, inst2['uuid'])
+ db.instance_destroy(self.context, inst3['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+ db.service_destroy(self.context, comp2['id'])
+
+ def test_describe_instance_state(self):
+ # Makes sure describe_instances for instanceState works.
+
+ def test_instance_state(expected_code, expected_name,
+ power_state_, vm_state_, values=None):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ values = values or {}
+ values.update({'image_ref': image_uuid, 'instance_type_id': 1,
+ 'power_state': power_state_, 'vm_state': vm_state_,
+ 'system_metadata': sys_meta})
+ inst = db.instance_create(self.context, values)
+
+ instance_id = ec2utils.id_to_ec2_inst_id(inst['uuid'])
+ result = self.cloud.describe_instances(self.context,
+ instance_id=[instance_id])
+ result = result['reservationSet'][0]
+ result = result['instancesSet'][0]['instanceState']
+
+ name = result['name']
+ code = result['code']
+ self.assertEqual(code, expected_code)
+ self.assertEqual(name, expected_name)
+
+ db.instance_destroy(self.context, inst['uuid'])
+
+ test_instance_state(inst_state.RUNNING_CODE, inst_state.RUNNING,
+ power_state.RUNNING, vm_states.ACTIVE)
+ test_instance_state(inst_state.STOPPED_CODE, inst_state.STOPPED,
+ power_state.NOSTATE, vm_states.STOPPED,
+ {'shutdown_terminate': False})
+
+ def test_describe_instances_no_ipv6(self):
+ # Makes sure describe_instances w/ no ipv6 works.
+ self.flags(use_ipv6=False)
+
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ comp1 = db.service_create(self.context, {'host': 'host1',
+ 'topic': "compute"})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ self.assertEqual(len(result['instancesSet']), 1)
+ instance = result['instancesSet'][0]
+ instance_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+ self.assertEqual(instance['instanceId'], instance_id)
+ self.assertEqual(instance['ipAddress'], '1.2.3.4')
+ self.assertEqual(instance['dnsName'], '1.2.3.4')
+ self.assertEqual(instance['privateDnsName'], 'server-1234')
+ self.assertEqual(instance['privateIpAddress'], '192.168.0.3')
+ self.assertNotIn('dnsNameV6', instance)
+ db.instance_destroy(self.context, inst1['uuid'])
+ db.service_destroy(self.context, comp1['id'])
+
+ def test_describe_instances_deleted(self):
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst1 = db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ inst2 = db.instance_create(self.context, args2)
+ db.instance_destroy(self.context, inst1['uuid'])
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 1)
+ result1 = result['reservationSet'][0]['instancesSet']
+ self.assertEqual(result1[0]['instanceId'],
+ ec2utils.id_to_ec2_inst_id(inst2['uuid']))
+
+ def test_describe_instances_with_image_deleted(self):
+ image_uuid = 'aebef54a-ed67-4d10-912f-14455edce176'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ args1 = {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args1)
+ args2 = {'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta}
+ db.instance_create(self.context, args2)
+ result = self.cloud.describe_instances(self.context)
+ self.assertEqual(len(result['reservationSet']), 2)
+
+ def test_describe_instances_dnsName_set(self):
+ # Verifies dnsName doesn't get set if floating IP is set.
+ self._stub_instance_get_with_fixed_ips('get_all', get_floating=False)
+ self._stub_instance_get_with_fixed_ips('get', get_floating=False)
+
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'host': 'host1',
+ 'hostname': 'server-1234',
+ 'vm_state': 'active',
+ 'system_metadata': sys_meta})
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['dnsName'])
+
+ def test_describe_instances_booting_from_a_volume(self):
+ sys_meta = flavors.save_flavor_info(
+ {}, flavors.get_flavor(1))
+ inst = objects.Instance(self.context)
+ inst.reservation_id = 'a'
+ inst.image_ref = ''
+ inst.root_device_name = '/dev/sdh'
+ inst.instance_type_id = 1
+ inst.vm_state = vm_states.ACTIVE
+ inst.host = 'host1'
+ inst.system_metadata = sys_meta
+ inst.create()
+ result = self.cloud.describe_instances(self.context)
+ result = result['reservationSet'][0]
+ instance = result['instancesSet'][0]
+ self.assertIsNone(instance['imageId'])
+
+ def test_describe_images(self):
+ describe_images = self.cloud.describe_images
+
+ def fake_detail(meh, context, **kwargs):
+ return [{'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}}]
+
+ def fake_show_none(meh, context, id):
+ raise exception.ImageNotFound(image_id='bad_image_id')
+
+ def fake_detail_none(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ # list all
+ result1 = describe_images(self.context)
+ result1 = result1['imagesSet'][0]
+ self.assertEqual(result1['imageId'], 'ami-00000001')
+ # provided a valid image_id
+ result2 = describe_images(self.context, ['ami-00000001'])
+ self.assertEqual(1, len(result2['imagesSet']))
+ # provide more than 1 valid image_id
+ result3 = describe_images(self.context, ['ami-00000001',
+ 'ami-00000002'])
+ self.assertEqual(2, len(result3['imagesSet']))
+ # provide a non-existing image_id
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_none)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_none)
+ self.assertRaises(exception.ImageNotFound, describe_images,
+ self.context, ['ami-fake'])
+
+ def assertDictListUnorderedMatch(self, L1, L2, key):
+ self.assertEqual(len(L1), len(L2))
+ for d1 in L1:
+ self.assertIn(key, d1)
+ for d2 in L2:
+ self.assertIn(key, d2)
+ if d1[key] == d2[key]:
+ self.assertThat(d1, matchers.DictMatches(d2))
+
+ def _setUpImageSet(self, create_volumes_and_snapshots=False):
+ self.flags(max_local_block_devices=-1)
+ mappings1 = [
+ {'device': '/dev/sda1', 'virtual': 'root'},
+
+ {'device': 'sdb0', 'virtual': 'ephemeral0'},
+ {'device': 'sdb1', 'virtual': 'ephemeral1'},
+ {'device': 'sdb2', 'virtual': 'ephemeral2'},
+ {'device': 'sdb3', 'virtual': 'ephemeral3'},
+ {'device': 'sdb4', 'virtual': 'ephemeral4'},
+
+ {'device': 'sdc0', 'virtual': 'swap'},
+ {'device': 'sdc1', 'virtual': 'swap'},
+ {'device': 'sdc2', 'virtual': 'swap'},
+ {'device': 'sdc3', 'virtual': 'swap'},
+ {'device': 'sdc4', 'virtual': 'swap'}]
+ block_device_mapping1 = [
+ {'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e3'},
+ {'device_name': '/dev/sdb2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4'},
+ {'device_name': '/dev/sdb3', 'virtual_name': 'ephemeral5'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e5'},
+ {'device_name': '/dev/sdc2',
+ 'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e6'},
+ {'device_name': '/dev/sdc3', 'virtual_name': 'ephemeral6'},
+ {'device_name': '/dev/sdc4', 'no_device': True}]
+ image1 = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available',
+ 'mappings': mappings1,
+ 'block_device_mapping': block_device_mapping1,
+ }
+ }
+
+ mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
+ block_device_mapping2 = [{'device_name': '/dev/sdb1',
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
+ 'volume_id': None}]
+ image2 = {
+ 'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'type': 'machine',
+ 'root_device_name': '/dev/sdb1',
+ 'mappings': mappings2,
+ 'block_device_mapping': block_device_mapping2}}
+
+ def fake_show(meh, context, image_id, **kwargs):
+ _images = [copy.deepcopy(image1), copy.deepcopy(image2)]
+ for i in _images:
+ if str(i['id']) == str(image_id):
+ return i
+ raise exception.ImageNotFound(image_id=image_id)
+
+ def fake_detail(meh, context, **kwargs):
+ return [copy.deepcopy(image1), copy.deepcopy(image2)]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ volumes = []
+ snapshots = []
+ if create_volumes_and_snapshots:
+ for bdm in block_device_mapping1:
+ if 'volume_id' in bdm:
+ vol = self._volume_create(bdm['volume_id'])
+ volumes.append(vol['id'])
+ if 'snapshot_id' in bdm:
+ snap = self._snapshot_create(bdm['snapshot_id'])
+ snapshots.append(snap['id'])
+ return (volumes, snapshots)
+
+ def _assertImageSet(self, result, root_device_type, root_device_name):
+ self.assertEqual(1, len(result['imagesSet']))
+ result = result['imagesSet'][0]
+ self.assertIn('rootDeviceType', result)
+ self.assertEqual(result['rootDeviceType'], root_device_type)
+ self.assertIn('rootDeviceName', result)
+ self.assertEqual(result['rootDeviceName'], root_device_name)
+ self.assertIn('blockDeviceMapping', result)
+
+ return result
+
+ _expected_root_device_name1 = '/dev/sda1'
+ # NOTE(yamahata): noDevice doesn't make sense when returning mapping
+ # It makes sense only when user overriding existing
+ # mapping.
+ _expected_bdms1 = [
+ {'deviceName': '/dev/sdb0', 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdb1', 'ebs': {'snapshotId':
+ 'snap-00000001'}},
+ {'deviceName': '/dev/sdb2', 'ebs': {'snapshotId':
+ 'vol-00000001'}},
+ {'deviceName': '/dev/sdb3', 'virtualName': 'ephemeral5'},
+ # {'deviceName': '/dev/sdb4', 'noDevice': True},
+
+ {'deviceName': '/dev/sdc0', 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdc1', 'ebs': {'snapshotId':
+ 'snap-00000002'}},
+ {'deviceName': '/dev/sdc2', 'ebs': {'snapshotId':
+ 'vol-00000002'}},
+ {'deviceName': '/dev/sdc3', 'virtualName': 'ephemeral6'},
+ # {'deviceName': '/dev/sdc4', 'noDevice': True}
+ ]
+
+ _expected_root_device_name2 = '/dev/sdb1'
+ _expected_bdms2 = [{'deviceName': '/dev/sdb1',
+ 'ebs': {'snapshotId': 'snap-00000003'}}]
+
+ # NOTE(yamahata):
+ # InstanceBlockDeviceMappingItemType
+ # rootDeviceType
+ # rootDeviceName
+ # blockDeviceMapping
+ # deviceName
+ # virtualName
+ # ebs
+ # snapshotId
+ # volumeSize
+ # deleteOnTermination
+ # noDevice
+ def test_describe_image_mapping(self):
+ # test for rootDeviceName and blockDeviceMapping.
+ describe_images = self.cloud.describe_images
+ self._setUpImageSet()
+
+ result = describe_images(self.context, ['ami-00000001'])
+ result = self._assertImageSet(result, 'instance-store',
+ self._expected_root_device_name1)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+
+ result = describe_images(self.context, ['ami-00000002'])
+ result = self._assertImageSet(result, 'ebs',
+ self._expected_root_device_name2)
+
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_describe_image_attribute(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'is_public': True}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission')
+ self.assertEqual([{'group': 'all'}], result['launchPermission'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'kernel')
+ self.assertEqual('aki-00000001', result['kernel']['value'])
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'ramdisk')
+ self.assertEqual('ari-00000001', result['ramdisk']['value'])
+
+ def test_describe_image_attribute_root_device_name(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name1)
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'rootDeviceName')
+ self.assertEqual(result['rootDeviceName'],
+ self._expected_root_device_name2)
+
+ def test_describe_image_attribute_block_device_mapping(self):
+ describe_image_attribute = self.cloud.describe_image_attribute
+ self._setUpImageSet()
+
+ result = describe_image_attribute(self.context, 'ami-00000001',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms1, 'deviceName')
+ result = describe_image_attribute(self.context, 'ami-00000002',
+ 'blockDeviceMapping')
+ self.assertDictListUnorderedMatch(result['blockDeviceMapping'],
+ self._expected_bdms2, 'deviceName')
+
+ def test_modify_image_attribute(self):
+ modify_image_attribute = self.cloud.modify_image_attribute
+
+ fake_metadata = {
+ 'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'is_public': False}
+
+ def fake_show(meh, context, id, **kwargs):
+ return copy.deepcopy(fake_metadata)
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(None, context, None)
+ image['name'] = kwargs.get('filters', {}).get('name')
+ return [image]
+
+ def fake_update(meh, context, image_id, metadata, data=None):
+ self.assertEqual(metadata['properties']['kernel_id'],
+ fake_metadata['properties']['kernel_id'])
+ self.assertEqual(metadata['properties']['ramdisk_id'],
+ fake_metadata['properties']['ramdisk_id'])
+ self.assertTrue(metadata['is_public'])
+ image = copy.deepcopy(fake_metadata)
+ image.update(metadata)
+ return image
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+ self.stubs.Set(fake._FakeImageService, 'update', fake_update)
+ result = modify_image_attribute(self.context, 'ami-00000001',
+ 'launchPermission', 'add',
+ user_group=['all'])
+ self.assertTrue(result['is_public'])
+
+ def test_register_image(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(*args, **kwargs):
+ # NOTE(vish): We are mocking s3 so make sure we have converted
+ # to ids instead of uuids.
+ return {'id': 1,
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'
+ },
+ 'is_public': False
+ }
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ image_location = 'fake_bucket/fake.img.manifest.xml'
+ result = register_image(self.context, image_location)
+ self.assertEqual(result['imageId'], 'ami-00000001')
+
+ def test_register_image_empty(self):
+ register_image = self.cloud.register_image
+ self.assertRaises(exception.MissingParameter, register_image,
+ self.context, image_location=None)
+
+ def test_register_image_name(self):
+ register_image = self.cloud.register_image
+
+ def fake_create(_self, context, metadata, data=None):
+ self.assertEqual(metadata['name'], self.expected_name)
+ metadata['id'] = 1
+ metadata['container_format'] = 'ami'
+ metadata['is_public'] = False
+ return metadata
+
+ self.stubs.Set(s3.S3ImageService, 'create', fake_create)
+ self.expected_name = 'fake_bucket/fake.img.manifest.xml'
+ register_image(self.context,
+ image_location=self.expected_name,
+ name=None)
+ self.expected_name = 'an image name'
+ register_image(self.context,
+ image_location='some_location',
+ name=self.expected_name)
+
+ def test_format_image(self):
+ image = {
+ 'id': 1,
+ 'container_format': 'ami',
+ 'name': 'name',
+ 'owner': 'someone',
+ 'properties': {
+ 'image_location': 'location',
+ 'kernel_id': 1,
+ 'ramdisk_id': 1,
+ 'type': 'machine'},
+ 'is_public': False}
+ expected = {'name': 'name',
+ 'imageOwnerId': 'someone',
+ 'isPublic': False,
+ 'imageId': 'ami-00000001',
+ 'imageState': None,
+ 'rootDeviceType': 'instance-store',
+ 'architecture': None,
+ 'imageLocation': 'location',
+ 'kernelId': 'aki-00000001',
+ 'ramdiskId': 'ari-00000001',
+ 'rootDeviceName': '/dev/sda1',
+ 'imageType': 'machine',
+ 'description': None}
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['properties']['image_location'] = None
+ expected['imageLocation'] = 'None (name)'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+ image['name'] = None
+ image['properties']['image_location'] = 'location'
+ expected['imageLocation'] = 'location'
+ expected['name'] = 'location'
+ result = self.cloud._format_image(image)
+ self.assertThat(result, matchers.DictMatches(expected))
+
+ def test_deregister_image(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ # valid image
+ result = deregister_image(self.context, 'ami-00000001')
+ self.assertTrue(result)
+ # invalid image
+ self.stubs.UnsetAll()
+
+ def fake_detail_empty(self, context, **kwargs):
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty)
+ self.assertRaises(exception.ImageNotFound, deregister_image,
+ self.context, 'ami-bad001')
+
+ def test_deregister_image_wrong_container_type(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ self.assertRaises(exception.NotFound, deregister_image, self.context,
+ 'aki-00000001')
+
+ def _run_instance(self, **kwargs):
+ rv = self.cloud.run_instances(self.context, **kwargs)
+ instance_id = rv['instancesSet'][0]['instanceId']
+ return instance_id
+
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(output['passwordData'], 'fakepass')
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_console_output(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_flavor,
+ max_count=1)
+ output = self.cloud.get_console_output(context=self.context,
+ instance_id=[instance_id])
+ self.assertEqual(base64.b64decode(output['output']),
+ 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
+ # TODO(soren): We need this until we can stop polling in the rpc code
+ # for unit tests.
+ self.cloud.terminate_instances(self.context, [instance_id])
+
+ def test_key_generation(self):
+ result, private_key = self._create_key('test')
+
+ expected = db.key_pair_get(self.context,
+ self.context.user_id,
+ 'test')['public_key']
+
+ (fd, fname) = tempfile.mkstemp()
+ os.write(fd, private_key)
+
+ public_key, err = utils.execute('ssh-keygen', '-e', '-f', fname)
+
+ os.unlink(fname)
+
+ # assert key fields are equal
+ self.assertEqual(''.join(public_key.split("\n")[2:-2]),
+ expected.split(" ")[1].strip())
+
+ def test_describe_key_pairs(self):
+ self._create_key('test1')
+ self._create_key('test2')
+ result = self.cloud.describe_key_pairs(self.context)
+ keys = result["keySet"]
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
+ self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
+
+ def test_describe_bad_key_pairs(self):
+ self.assertRaises(exception.KeypairNotFound,
+ self.cloud.describe_key_pairs, self.context,
+ key_name=['DoesNotExist'])
+
+ def test_import_key_pair(self):
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ with open(pubkey_path + '/dummy.pub') as f:
+ dummypub = f.readline().rstrip()
+ with open(pubkey_path + '/dummy.fingerprint') as f:
+ dummyfprint = f.readline().rstrip()
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ result = self.cloud.import_key_pair(self.context,
+ key_name,
+ public_key_material)
+ self.assertEqual(result['keyName'], key_name)
+ self.assertEqual(result['keyFingerprint'], dummyfprint)
+ keydata = db.key_pair_get(self.context,
+ self.context.user_id,
+ key_name)
+ self.assertEqual(dummypub, keydata['public_key'])
+ self.assertEqual(dummyfprint, keydata['fingerprint'])
+
+ def test_import_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=0)
+ pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key')
+ f = open(pubkey_path + '/dummy.pub', 'r')
+ dummypub = f.readline().rstrip()
+ f.close
+ f = open(pubkey_path + '/dummy.fingerprint', 'r')
+ f.readline().rstrip()
+ f.close
+ key_name = 'testimportkey'
+ public_key_material = base64.b64encode(dummypub)
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.import_key_pair, self.context, key_name,
+ public_key_material)
+
+ def test_create_key_pair(self):
+ good_names = ('a', 'a' * 255, string.ascii_letters + ' -_')
+ bad_names = ('', 'a' * 256, '*', '/')
+
+ for key_name in good_names:
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ for key_name in bad_names:
+ self.assertRaises(exception.InvalidKeypair,
+ self.cloud.create_key_pair,
+ self.context,
+ key_name)
+
+ def test_create_key_pair_quota_limit(self):
+ self.flags(quota_key_pairs=10)
+ for i in range(0, 10):
+ key_name = 'key_%i' % i
+ result = self.cloud.create_key_pair(self.context,
+ key_name)
+ self.assertEqual(result['keyName'], key_name)
+
+ # 11'th group should fail
+ self.assertRaises(exception.KeypairLimitExceeded,
+ self.cloud.create_key_pair,
+ self.context,
+ 'foo')
+
+ def test_delete_key_pair(self):
+ self._create_key('test')
+ self.cloud.delete_key_pair(self.context, 'test')
+
+ def test_run_instances(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['imageId'], 'ami-00000001')
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+ self.assertEqual(instance['instanceState']['name'], 'running')
+ self.assertEqual(instance['instanceType'], 'm1.small')
+
+ def test_run_instances_invalid_maxcount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_mincount(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'min_count': 0}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_invalid_count(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'min_count': 2}
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.assertRaises(exception.InvalidInput, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_availability_zone(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1,
+ 'placement': {'availability_zone': 'fake'},
+ }
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ def fake_format(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.cloud, '_format_run_instances', fake_format)
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['availability_zone'], 'fake')
+ return ({'id': 'fake-instance'}, 'fake-res-id')
+
+ self.stubs.Set(self.cloud.compute_api, 'create', fake_create)
+
+ # NOTE(vish) the assert for this call is in the fake_create method.
+ run_instances(self.context, **kwargs)
+
+ def test_empty_reservation_id_from_token(self):
+ client_token = 'client-token-1'
+
+ def fake_get_all_system_metadata(context, search_filts):
+ reference = [{'key': ['EC2_client_token']},
+ {'value': ['client-token-1']}]
+ self.assertEqual(search_filts, reference)
+ return []
+
+ self.stubs.Set(self.cloud.compute_api, 'get_all_system_metadata',
+ fake_get_all_system_metadata)
+ resv_id = self.cloud._resv_id_from_token(self.context, client_token)
+ self.assertIsNone(resv_id)
+
+ def test_run_instances_idempotent(self):
+ # Ensure subsequent run_instances calls with same client token
+ # are idempotent and that ones with different client_token are not
+
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+
+ run_instances = self.cloud.run_instances
+
+ def fake_show(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-2'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000002')
+
+ kwargs['client_token'] = 'client-token-1'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000001')
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000003')
+
+ # make sure terminated instances lose their client tokens
+ self.cloud.stop_instances(self.context,
+ instance_id=[instance['instanceId']])
+ self.cloud.terminate_instances(self.context,
+ instance_id=[instance['instanceId']])
+
+ kwargs['client_token'] = 'client-token-3'
+ result = run_instances(self.context, **kwargs)
+ instance = result['instancesSet'][0]
+ self.assertEqual(instance['instanceId'], 'i-00000004')
+
+ def test_run_instances_image_state_none(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_no_state(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}, 'container_format': 'ami'}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_state_invalid(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_decrypt(self, context, id):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'status': 'active',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine', 'image_state': 'decrypting'}}
+
+ self.stubs.UnsetAll()
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt)
+ self.assertRaises(exception.ImageNotActive, run_instances,
+ self.context, **kwargs)
+
+ def test_run_instances_image_status_active(self):
+ kwargs = {'image_id': 'ami-00000001',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ run_instances = self.cloud.run_instances
+
+ def fake_show_stat_active(self, context, id, **kwargs):
+ return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'name': 'fake_name',
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'},
+ 'status': 'active'}
+
+ def fake_id_to_glance_id(context, id):
+ return 'cedef40a-ed67-4d10-800e-17455edce175'
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
+ self.stubs.Set(ec2utils, 'id_to_glance_id', fake_id_to_glance_id)
+
+ result = run_instances(self.context, **kwargs)
+ self.assertEqual(len(result['instancesSet']), 1)
+
+ def _restart_compute_service(self, periodic_interval_max=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval_max:
+ self.compute = self.start_service(
+ 'compute', periodic_interval_max=periodic_interval_max)
+ else:
+ self.compute = self.start_service('compute')
+
+ def test_stop_start_instance(self):
+ # Makes sure stop/start instance works.
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ def test_start_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 0,
+ 'name': 'pending'}}]}
+ result = self.cloud.start_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_start_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:start":
+ common_policy.parse_rule("project_id:non_fake"),
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:start", exc.format_message())
+ self._restart_compute_service()
+
+ def test_stop_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_stop_instances_policy_failed(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+ rules = {
+ "compute:stop":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.stop_instances,
+ self.context, [instance_id])
+ self.assertIn("compute:stop", exc.format_message())
+ self._restart_compute_service()
+
+ def test_terminate_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_invalid_instance_id(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ self._run_instance(**kwargs)
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.cloud.terminate_instances,
+ self.context, ['i-2'])
+ self._restart_compute_service()
+
+ def test_terminate_instances_disable_terminate(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ internal_uuid = db.get_instance_uuid_by_ec2_id(self.context,
+ ec2utils.ec2_id_to_id(instance_id))
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': True})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 16,
+ 'name': 'running'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+
+ db.instance_update(self.context, internal_uuid,
+ {'disable_terminate': False})
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_terminate_instances_two_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ inst1 = self._run_instance(**kwargs)
+ inst2 = self._run_instance(**kwargs)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 64,
+ 'name': 'stopping'}}]}
+ result = self.cloud.stop_instances(self.context, [inst1])
+ self.assertEqual(result, expected)
+
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}},
+ {'instanceId': 'i-00000002',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context, [inst1, inst2])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ def test_reboot_instances(self):
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1, }
+ instance_id = self._run_instance(**kwargs)
+
+ # a running instance can't be started.
+ self.assertRaises(exception.InstanceInvalidState,
+ self.cloud.start_instances,
+ self.context, [instance_id])
+
+ result = self.cloud.reboot_instances(self.context, [instance_id])
+ self.assertTrue(result)
+
+ def _volume_create(self, volume_id=None):
+ kwargs = {'name': 'test-volume',
+ 'description': 'test volume description',
+ 'status': 'available',
+ 'host': 'fake',
+ 'size': 1,
+ 'attach_status': 'detached'}
+ if volume_id:
+ kwargs['volume_id'] = volume_id
+ return self.volume_api.create_with_kwargs(self.context, **kwargs)
+
+ def _snapshot_create(self, snapshot_id=None):
+ kwargs = {'volume_id': 'ccec42a2-c220-4806-b762-6b12fbb592e4',
+ 'status': "available",
+ 'volume_size': 1}
+ if snapshot_id:
+ kwargs['snap_id'] = snapshot_id
+ return self.volume_api.create_snapshot_with_kwargs(self.context,
+ **kwargs)
+
+ def _create_snapshot(self, ec2_volume_id):
+ result = self.cloud.create_snapshot(self.context,
+ volume_id=ec2_volume_id)
+ return result['snapshotId']
+
+ def _do_test_create_image(self, no_reboot):
+ """Make sure that CreateImage works."""
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_show(meh, context, id, **kwargs):
+ bdm = [dict(snapshot_id=snapshots[0],
+ volume_size=1,
+ device_name='sda1',
+ delete_on_termination=False)]
+ props = dict(kernel_id='cedef40a-ed67-4d10-800e-17455edce175',
+ ramdisk_id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ root_device_name='/dev/sda1',
+ block_device_mapping=bdm)
+ return dict(id=id,
+ properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'sda1',
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'connection_info': '{"foo":"bar"}',
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ virt_driver = {}
+
+ def fake_power_on(self, context, instance, network_info,
+ block_device_info):
+ virt_driver['powered_on'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
+
+ def fake_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
+ virt_driver['powered_off'] = True
+
+ self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
+
+ result = self.cloud.create_image(self.context, ec2_instance_id,
+ no_reboot=no_reboot)
+ ec2_ids = [result['imageId']]
+ created_image = self.cloud.describe_images(self.context,
+ ec2_ids)['imagesSet'][0]
+
+ self.assertIn('blockDeviceMapping', created_image)
+ bdm = created_image['blockDeviceMapping'][0]
+ self.assertEqual(bdm.get('deviceName'), 'sda1')
+ self.assertIn('ebs', bdm)
+ self.assertEqual(bdm['ebs'].get('snapshotId'),
+ ec2utils.id_to_ec2_snap_id(snapshots[0]))
+ self.assertEqual(created_image.get('kernelId'), 'aki-00000001')
+ self.assertEqual(created_image.get('ramdiskId'), 'ari-00000002')
+ self.assertEqual(created_image.get('rootDeviceType'), 'ebs')
+ self.assertNotEqual(virt_driver.get('powered_on'), no_reboot)
+ self.assertNotEqual(virt_driver.get('powered_off'), no_reboot)
+
+ self.cloud.terminate_instances(self.context, [ec2_instance_id])
+
+ self._restart_compute_service()
+
+ def test_create_image_no_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(True)
+
+ def test_create_image_with_reboot(self):
+ # Make sure that CreateImage works.
+ self._do_test_create_image(False)
+
+ def test_create_image_instance_store(self):
+ """Ensure CreateImage fails as expected for an instance-store-backed
+ instance
+ """
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval_max=0.3)
+
+ (volumes, snapshots) = self._setUpImageSet(
+ create_volumes_and_snapshots=True)
+
+ kwargs = {'image_id': 'ami-1',
+ 'instance_type': CONF.default_flavor,
+ 'max_count': 1}
+ ec2_instance_id = self._run_instance(**kwargs)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': volumes[0],
+ 'snapshot_id': snapshots[0],
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.create_image,
+ self.context,
+ ec2_instance_id,
+ no_reboot=True)
+
+ @staticmethod
+ def _fake_bdm_get(ctxt, id, use_slave=False):
+ blockdms = [{'volume_id': 87654321,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'},
+ {'volume_id': None,
+ 'snapshot_id': 98765432,
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'no_device': None,
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdi'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': True,
+ 'source_type': 'blank',
+ 'destination_type': None,
+ 'delete_on_termination': None,
+ 'device_name': None},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdd'},
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sd3'},
+ ]
+
+ extra = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 0,
+ 'device_type': None,
+ 'disk_bus': None,
+ 'instance_uuid': '',
+ 'image_id': None,
+ 'volume_size': None,
+ 'connection_info': None,
+ 'boot_index': None,
+ 'guest_format': None,
+ }
+
+ for bdm in blockdms:
+ bdm.update(extra)
+
+ return blockdms
+
+ def test_describe_instance_attribute(self):
+ # Make sure that describe_instance_attribute works.
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ self._fake_bdm_get)
+
+ def fake_get(ctxt, instance_id, want_objects=False):
+ self.assertTrue(want_objects)
+ inst_type = flavors.get_default_flavor()
+ inst_type['name'] = 'fake_type'
+ sys_meta = flavors.save_flavor_info({}, inst_type)
+ secgroups = objects.SecurityGroupList()
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake0'))
+ secgroups.objects.append(
+ objects.SecurityGroup(name='fake1'))
+ instance = objects.Instance(ctxt)
+ instance.id = 0
+ instance.uuid = 'e5fe5518-0288-4fa3-b0c4-c79764101b85'
+ instance.root_device_name = '/dev/sdh'
+ instance.security_groups = secgroups
+ instance.vm_state = vm_states.STOPPED
+ instance.kernel_id = 'cedef40a-ed67-4d10-800e-17455edce175'
+ instance.ramdisk_id = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ instance.user_data = 'fake-user data'
+ instance.shutdown_terminate = False
+ instance.disable_terminate = False
+ instance.system_metadata = sys_meta
+ return instance
+ self.stubs.Set(self.cloud.compute_api, 'get', fake_get)
+
+ def fake_ec2_instance_get_by_id(ctxt, int_id):
+ if int_id == 305419896:
+ fake_map = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 305419896,
+ 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
+ }
+ return fake_map
+ raise exception.InstanceNotFound(instance_id=int_id)
+ self.stubs.Set(db, 'ec2_instance_get_by_id',
+ fake_ec2_instance_get_by_id)
+
+ get_attribute = functools.partial(
+ self.cloud.describe_instance_attribute,
+ self.context, 'i-12345678')
+
+ bdm = get_attribute('blockDeviceMapping')
+ bdm['blockDeviceMapping'].sort()
+
+ expected_bdm = {'instance_id': 'i-12345678',
+ 'rootDeviceType': 'ebs',
+ 'blockDeviceMapping': [
+ {'deviceName': '/dev/sdh',
+ 'ebs': {'status': 'attached',
+ 'deleteOnTermination': True,
+ 'volumeId': 'vol-05397fb1',
+ 'attachTime': '13:56:24'}}]}
+ expected_bdm['blockDeviceMapping'].sort()
+ self.assertEqual(bdm, expected_bdm)
+ groupSet = get_attribute('groupSet')
+ groupSet['groupSet'].sort()
+ expected_groupSet = {'instance_id': 'i-12345678',
+ 'groupSet': [{'groupId': 'fake0'},
+ {'groupId': 'fake1'}]}
+ expected_groupSet['groupSet'].sort()
+ self.assertEqual(groupSet, expected_groupSet)
+ self.assertEqual(get_attribute('instanceInitiatedShutdownBehavior'),
+ {'instance_id': 'i-12345678',
+ 'instanceInitiatedShutdownBehavior': 'stop'})
+ self.assertEqual(get_attribute('disableApiTermination'),
+ {'instance_id': 'i-12345678',
+ 'disableApiTermination': False})
+ self.assertEqual(get_attribute('instanceType'),
+ {'instance_id': 'i-12345678',
+ 'instanceType': 'fake_type'})
+ self.assertEqual(get_attribute('kernel'),
+ {'instance_id': 'i-12345678',
+ 'kernel': 'aki-00000001'})
+ self.assertEqual(get_attribute('ramdisk'),
+ {'instance_id': 'i-12345678',
+ 'ramdisk': 'ari-00000002'})
+ self.assertEqual(get_attribute('rootDeviceName'),
+ {'instance_id': 'i-12345678',
+ 'rootDeviceName': '/dev/sdh'})
+ # NOTE(yamahata): this isn't supported
+ # get_attribute('sourceDestCheck')
+ self.assertEqual(get_attribute('userData'),
+ {'instance_id': 'i-12345678',
+ 'userData': '}\xa9\x1e\xba\xc7\xabu\xabZ'})
+
+ def test_instance_initiated_shutdown_behavior(self):
+ def test_dia_iisb(expected_result, **kwargs):
+ """test describe_instance_attribute
+ attribute instance_initiated_shutdown_behavior
+ """
+ kwargs.update({'instance_type': CONF.default_flavor,
+ 'max_count': 1})
+ instance_id = self._run_instance(**kwargs)
+
+ result = self.cloud.describe_instance_attribute(self.context,
+ instance_id, 'instanceInitiatedShutdownBehavior')
+ self.assertEqual(result['instanceInitiatedShutdownBehavior'],
+ expected_result)
+
+ expected = {'instancesSet': [
+ {'instanceId': instance_id,
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 32,
+ 'name': 'shutting-down'}}]}
+ result = self.cloud.terminate_instances(self.context,
+ [instance_id])
+ self.assertEqual(result, expected)
+ self._restart_compute_service()
+
+ test_dia_iisb('stop', image_id='ami-1')
+
+ block_device_mapping = [{'device_name': '/dev/vdb',
+ 'virtual_name': 'ephemeral0'}]
+ test_dia_iisb('stop', image_id='ami-2',
+ block_device_mapping=block_device_mapping)
+
+ def fake_show(self, context, id_, **kwargs):
+ LOG.debug("id_ %s", id_)
+
+ prop = {}
+ if id_ == 'ami-3':
+ pass
+ elif id_ == 'ami-4':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}]}
+ elif id_ == 'ami-5':
+ prop = {'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+ elif id_ == 'ami-6':
+ prop = {'mappings': [{'device': 'sdb0',
+ 'virtual': 'ephemeral0'}],
+ 'block_device_mapping':
+ [{'device_name': '/dev/sdb0',
+ 'virtual_name': 'ephemeral0'}]}
+
+ prop_base = {'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine'}
+ prop_base.update(prop)
+
+ return {
+ 'id': id_,
+ 'name': 'fake_name',
+ 'properties': prop_base,
+ 'container_format': 'ami',
+ 'status': 'active'}
+
+ # NOTE(yamahata): create ami-3 ... ami-7
+ # ami-1 and ami-2 is already created by setUp()
+ for i in range(3, 8):
+ db.s3_image_create(self.context, 'ami-%d' % i)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ test_dia_iisb('stop', image_id='ami-3')
+ test_dia_iisb('stop', image_id='ami-4')
+ test_dia_iisb('stop', image_id='ami-5')
+ test_dia_iisb('stop', image_id='ami-6')
+ test_dia_iisb('terminate', image_id='ami-7',
+ instance_initiated_shutdown_behavior='terminate')
+
+ def test_create_delete_tags(self):
+
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create a test image
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ # Create some tags
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id],
+ tag=[md])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ # Delete them
+ self.cloud.delete_tags(self.context, resource_id=[ec2_id],
+ tag=[{'key': 'foo', 'value': 'bar'}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, {})
+ self.assertEqual(meta_changes, [{'foo': ['-']}])
+
+ def test_describe_tags(self):
+ # We need to stub network calls
+ self._stub_instance_get_with_fixed_ips('get_all')
+ self._stub_instance_get_with_fixed_ips('get')
+
+ # We need to stub out the MQ call - it won't succeed. We do want
+ # to check that the method is called, though
+ meta_changes = [None]
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ # Create some test images
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst1_kwargs = {
+ 'reservation_id': 'a',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1111',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 1)
+ }
+
+ inst2_kwargs = {
+ 'reservation_id': 'b',
+ 'image_ref': image_uuid,
+ 'instance_type_id': 1,
+ 'vm_state': 'active',
+ 'launched_at': timeutils.utcnow(),
+ 'hostname': 'server-1112',
+ 'created_at': datetime.datetime(2012, 5, 1, 1, 1, 2)
+ }
+
+ inst1 = db.instance_create(self.context, inst1_kwargs)
+ ec2_id1 = ec2utils.id_to_ec2_inst_id(inst1['uuid'])
+
+ inst2 = db.instance_create(self.context, inst2_kwargs)
+ ec2_id2 = ec2utils.id_to_ec2_inst_id(inst2['uuid'])
+
+ # Create some tags
+ # We get one overlapping pair, and each has a different key value pair
+ # inst1 : {'foo': 'bar', 'bax': 'wibble'}
+ # inst1 : {'foo': 'bar', 'baz': 'quux'}
+
+ md = {'key': 'foo', 'value': 'bar'}
+ md_result = {'foo': 'bar'}
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1, ec2_id2],
+ tag=[md])
+
+ self.assertEqual(meta_changes, [{'foo': ['+', 'bar']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md_result)
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md_result)
+
+ md2 = {'key': 'baz', 'value': 'quux'}
+ md2_result = {'baz': 'quux'}
+ md2_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id2],
+ tag=[md2])
+
+ self.assertEqual(meta_changes, [{'baz': ['+', 'quux']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst2)
+ self.assertEqual(metadata, md2_result)
+
+ md3 = {'key': 'bax', 'value': 'wibble'}
+ md3_result = {'bax': 'wibble'}
+ md3_result.update(md_result)
+ self.cloud.create_tags(self.context, resource_id=[ec2_id1],
+ tag=[md3])
+
+ self.assertEqual(meta_changes, [{'bax': ['+', 'wibble']}])
+
+ metadata = self.cloud.compute_api.get_instance_metadata(self.context,
+ inst1)
+ self.assertEqual(metadata, md3_result)
+
+ inst1_key_foo = {'key': u'foo', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst1_key_bax = {'key': u'bax', 'resource_id': 'i-00000001',
+ 'resource_type': 'instance', 'value': u'wibble'}
+ inst2_key_foo = {'key': u'foo', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'bar'}
+ inst2_key_baz = {'key': u'baz', 'resource_id': 'i-00000002',
+ 'resource_type': 'instance', 'value': u'quux'}
+
+ # We should be able to search by:
+ # No filter
+ tags = self.cloud.describe_tags(self.context)['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Resource ID
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-id',
+ 'value': [ec2_id1]}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst1_key_bax])
+
+ # Resource Type
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo,
+ inst2_key_baz, inst1_key_bax])
+
+ # Key, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['foo']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['ba?']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['b*']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_bax, inst2_key_baz])
+
+ # Value, either bare or with wildcards
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['bar']}])['tagSet']
+ self.assertEqualSorted(tags, [inst1_key_foo, inst2_key_foo])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['wi*']}])['tagSet']
+ self.assertEqual(tags, [inst1_key_bax])
+
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'value',
+ 'value': ['quu?']}])['tagSet']
+ self.assertEqual(tags, [inst2_key_baz])
+
+ # Multiple values
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz', 'bax']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz, inst1_key_bax])
+
+ # Multiple filters (AND): no match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['wibble']}])['tagSet']
+ self.assertEqual(tags, [])
+
+ # Multiple filters (AND): match
+ tags = self.cloud.describe_tags(self.context,
+ filter=[{'name': 'key',
+ 'value': ['baz']},
+ {'name': 'value',
+ 'value': ['quux']}])['tagSet']
+ self.assertEqualSorted(tags, [inst2_key_baz])
+
+ # And we should fail on supported resource types
+ self.assertRaises(exception.InvalidParameterValue,
+ self.cloud.describe_tags,
+ self.context,
+ filter=[{'name': 'resource-type',
+ 'value': ['instance', 'volume']}])
+
+ def test_resource_type_from_id(self):
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'i-12345'),
+ 'instance')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'r-12345'),
+ 'reservation')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'vol-12345'),
+ 'volume')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'snap-12345'),
+ 'snapshot')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ami-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'ari-12345'),
+ 'image')
+ self.assertEqual(
+ ec2utils.resource_type_from_id(self.context, 'aki-12345'),
+ 'image')
+ self.assertIsNone(
+ ec2utils.resource_type_from_id(self.context, 'x-12345'))
+
+ @mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
+ side_effect=lambda
+ ec2_volume_id: uuidutils.generate_uuid())
+ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
+ # Validates that VolumeUnattached is raised if the volume doesn't
+ # have an instance_uuid value.
+ ec2_volume_id = 'vol-987654321'
+
+ with mock.patch.object(self.cloud.volume_api, 'get',
+ side_effect=lambda context, volume_id:
+ {'id': volume_id}) as mock_get:
+ self.assertRaises(exception.VolumeUnattached,
+ self.cloud.detach_volume,
+ self.context,
+ ec2_volume_id)
+ mock_get.assert_called_once_with(self.context, mock.ANY)
+ mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
+
+
+class CloudTestCaseNeutronProxy(test.NoDBTestCase):
+ def setUp(self):
+ super(CloudTestCaseNeutronProxy, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'neutron')
+ self.cloud = cloud.CloudController()
+ self.original_client = neutronv2.get_client
+ neutronv2.get_client = test_neutron.get_client
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ test_neutron.get_client()._reset()
+ super(CloudTestCaseNeutronProxy, self).tearDown()
+
+ def test_describe_security_groups(self):
+ # Makes sure describe_security_groups works and filters results.
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[group_name])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_describe_security_groups_by_id(self):
+ group_name = 'test'
+ description = 'test'
+ self.cloud.create_security_group(self.context, group_name,
+ description)
+ neutron = test_neutron.get_client()
+ # Get id from neutron since cloud.create_security_group
+ # does not expose it.
+ search_opts = {'name': group_name}
+ groups = neutron.list_security_groups(
+ **search_opts)['security_groups']
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[groups[0]['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ group_name)
+ self.cloud.delete_security_group(self.context, group_name)
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+
+class FormatMappingTestCase(test.TestCase):
+
+ def test_format_mapping(self):
+ properties = {'block_device_mapping':
+ [{'guest_format': None, 'boot_index': 0,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': 'virtio',
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'snapshot_id': '993b31ac-452e-4fed-b745-7718385f1811',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None},
+ {'guest_format': None, 'boot_index': None,
+ 'no_device': None, 'volume_id': None,
+ 'volume_size': None, 'disk_bus': None,
+ 'image_id': None, 'source_type': 'snapshot',
+ 'device_type': None,
+ 'snapshot_id': 'b409a2de-1c79-46bf-aa7e-ebdb4bf427ef',
+ 'destination_type': 'volume',
+ 'delete_on_termination': None}],
+ 'checksum': '50bdc35edb03a38d91b1b071afb20a3c',
+ 'min_ram': '0', 'disk_format': 'qcow2',
+ 'image_name': 'cirros-0.3.0-x86_64-disk', 'bdm_v2': 'True',
+ 'image_id': '4fce9db9-d89e-4eea-8d20-e2bae15292c1',
+ 'root_device_name': '/dev/vda', 'container_format': 'bare',
+ 'min_disk': '0', 'size': '9761280'}
+ result = {'description': None,
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'isPublic': False, 'imageId': 'ami-00000002',
+ 'imageState': 'available', 'architecture': None,
+ 'imageLocation': 'None (xb)',
+ 'rootDeviceType': 'instance-store',
+ 'rootDeviceName': '/dev/vda',
+ 'imageType': 'machine', 'name': 'xb'}
+ cloud._format_mappings(properties, result)
+ expected = {'architecture': None,
+ 'blockDeviceMapping':
+ [{'ebs': {'snapshotId': 'snap-00000002'}}],
+ 'description': None,
+ 'imageId': 'ami-00000002',
+ 'imageLocation': 'None (xb)',
+ 'imageOwnerId': '9fd1513f52f14fe49fa1c83e40c63541',
+ 'imageState': 'available',
+ 'imageType': 'machine',
+ 'isPublic': False,
+ 'name': 'xb',
+ 'rootDeviceName': '/dev/vda',
+ 'rootDeviceType': 'instance-store'}
+ self.assertEqual(expected, result)
diff --git a/nova/tests/unit/api/ec2/test_ec2_validate.py b/nova/tests/unit/api/ec2/test_ec2_validate.py
new file mode 100644
index 0000000000..53ae8c110e
--- /dev/null
+++ b/nova/tests/unit/api/ec2/test_ec2_validate.py
@@ -0,0 +1,277 @@
+# Copyright 2012 Cloudscaling, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class EC2ValidateTestCase(test.TestCase):
+ def setUp(self):
+ super(EC2ValidateTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver')
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # set up our cloud
+ self.cloud = cloud.CloudController()
+
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ # set up services
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.image_service = fake.FakeImageService()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ self.EC2_MALFORMED_IDS = ['foobar', '', 123]
+ self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']
+
+ self.ec2_id_exception_map = [(x,
+ exception.InvalidInstanceIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
+ for x in self.EC2_VALID__IDS])
+ self.volume_id_exception_map = [(x,
+ exception.InvalidVolumeIDMalformed)
+ for x in self.EC2_MALFORMED_IDS]
+ self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
+ for x in self.EC2_VALID__IDS])
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id,
+ 'container_format': 'ami',
+ 'properties': {
+ 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'type': 'machine',
+ 'image_state': 'available'}}
+
+ def fake_detail(self, context, **kwargs):
+ image = fake_show(self, context, None)
+ image['name'] = kwargs.get('name')
+ return [image]
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ # make sure we can map ami-00000001/2 to a uuid in FakeImageService
+ db.s3_image_create(self.context,
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ def tearDown(self):
+ super(EC2ValidateTestCase, self).tearDown()
+ fake.FakeImageService_reset()
+
+ # EC2_API tests (InvalidInstanceID.Malformed)
+ def test_console_output(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.get_console_output,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_describe_instance_attribute(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_instance_attribute,
+ context=self.context,
+ instance_id=ec2_id,
+ attribute='kernel')
+
+ def test_instance_lifecycle(self):
+ lifecycle = [self.cloud.terminate_instances,
+ self.cloud.reboot_instances,
+ self.cloud.stop_instances,
+ self.cloud.start_instances,
+ ]
+ for cmd in lifecycle:
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ cmd,
+ context=self.context,
+ instance_id=[ec2_id])
+
+ def test_create_image(self):
+ for ec2_id, e in self.ec2_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_image,
+ context=self.context,
+ instance_id=ec2_id)
+
+ def test_create_snapshot(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.create_snapshot,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_describe_volumes(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.describe_volumes,
+ context=self.context,
+ volume_id=[ec2_id])
+
+ def test_delete_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.delete_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+ def test_detach_volume(self):
+ for ec2_id, e in self.volume_id_exception_map:
+ self.assertRaises(e,
+ self.cloud.detach_volume,
+ context=self.context,
+ volume_id=ec2_id)
+
+
+class EC2TimestampValidationTestCase(test.NoDBTestCase):
+ """Test case for EC2 request timestamp validation."""
+
+ def test_validate_ec2_timestamp_valid(self):
+ params = {'Timestamp': '2011-04-22T11:29:49Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_old_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_not_set(self):
+ params = {}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_ms_time_regex(self):
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123456Z')
+ self.assertIsNotNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.1234567Z')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49.123')
+ self.assertIsNone(result)
+ result = ec2utils._ms_time_regex.match('2011-04-22T11:29:49Z')
+ self.assertIsNone(result)
+
+ def test_validate_ec2_timestamp_aws_sdk_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.123Z'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_invalid_format(self):
+ params = {'Timestamp': '2011-04-22T11:29:49.000P'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_timestamp_advanced_time(self):
+
+ # EC2 request with Timestamp in advanced time
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_timestamp_advanced_time_expired(self):
+ timestamp = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Timestamp': timeutils.strtime(timestamp,
+ "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_not_expired(self):
+ params = {'Timestamp': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params, expires=15)
+ self.assertFalse(expired)
+
+ def test_validate_ec2_req_timestamp_expired(self):
+ params = {'Timestamp': '2011-04-22T12:00:00Z'}
+ compare = ec2utils.is_ec2_timestamp_expired(params, expires=300)
+ self.assertTrue(compare)
+
+ def test_validate_ec2_req_expired(self):
+ params = {'Expires': timeutils.isotime()}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_not_expired(self):
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=350)
+ params = {'Expires': timeutils.strtime(expire, "%Y-%m-%dT%H:%M:%SZ")}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertFalse(expired)
+
+ def test_validate_Expires_timestamp_invalid_format(self):
+
+ # EC2 request with invalid Expires
+ params = {'Expires': '2011-04-22T11:29:49'}
+ expired = ec2utils.is_ec2_timestamp_expired(params)
+ self.assertTrue(expired)
+
+ def test_validate_ec2_req_timestamp_Expires(self):
+
+ # EC2 request with both Timestamp and Expires
+ params = {'Timestamp': '2011-04-22T11:29:49Z',
+ 'Expires': timeutils.isotime()}
+ self.assertRaises(exception.InvalidRequest,
+ ec2utils.is_ec2_timestamp_expired,
+ params)
diff --git a/nova/tests/api/ec2/test_ec2utils.py b/nova/tests/unit/api/ec2/test_ec2utils.py
index 9dceb7de12..9dceb7de12 100644
--- a/nova/tests/api/ec2/test_ec2utils.py
+++ b/nova/tests/unit/api/ec2/test_ec2utils.py
diff --git a/nova/tests/api/ec2/test_error_response.py b/nova/tests/unit/api/ec2/test_error_response.py
index 925d6723ed..925d6723ed 100644
--- a/nova/tests/api/ec2/test_error_response.py
+++ b/nova/tests/unit/api/ec2/test_error_response.py
diff --git a/nova/tests/api/ec2/test_faults.py b/nova/tests/unit/api/ec2/test_faults.py
index ae71be9bbf..ae71be9bbf 100644
--- a/nova/tests/api/ec2/test_faults.py
+++ b/nova/tests/unit/api/ec2/test_faults.py
diff --git a/nova/tests/api/ec2/test_middleware.py b/nova/tests/unit/api/ec2/test_middleware.py
index 3eb9c703da..3eb9c703da 100644
--- a/nova/tests/api/ec2/test_middleware.py
+++ b/nova/tests/unit/api/ec2/test_middleware.py
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/unit/api/openstack/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/unit/api/openstack/__init__.py
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/unit/api/openstack/common.py
index 972958a329..972958a329 100644
--- a/nova/tests/api/openstack/common.py
+++ b/nova/tests/unit/api/openstack/common.py
diff --git a/nova/tests/api/openstack/compute/__init__.py b/nova/tests/unit/api/openstack/compute/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/__init__.py
diff --git a/nova/tests/api/openstack/compute/contrib/__init__.py b/nova/tests/unit/api/openstack/compute/contrib/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/contrib/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py
new file mode 100644
index 0000000000..44bf495b29
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_admin_actions.py
@@ -0,0 +1,734 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.contrib import admin_actions as \
+ admin_actions_v2
+from nova.api.openstack.compute.plugins.v3 import admin_actions as \
+ admin_actions_v21
+from nova.compute import vm_states
+import nova.context
+from nova import exception
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+class CommonMixin(object):
+ admin_actions = None
+ fake_url = None
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank(self.fake_url + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
+ task_state=None, launched_at=timeutils.utcnow())
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _stub_instance_get_failure(self, exc_info, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc_info)
+ return uuid
+
+ def _test_non_existing_instance(self, action, body_map=None):
+ uuid = uuidutils.generate_uuid()
+ self._stub_instance_get_failure(
+ exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_action(self, action, body=None, method=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+ getattr(self.compute_api, method)(self.context, instance)
+
+ self.mox.ReplayAll()
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: None})
+ self.assertEqual(202, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_invalid_state(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceInvalidState(
+ attr='vm_state', instance_uuid=instance['uuid'],
+ state='foo', method=method))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ self.assertIn("Cannot \'%(action)s\' instance %(id)s"
+ % {'id': instance['uuid'], 'action': action}, res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = (), {}
+ act = None
+
+ if compute_api_args_map:
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ act = body_map.get(action)
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+ self.mox.ReplayAll()
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: act})
+ self.assertEqual(409, res.status_int)
+ self.assertIn('Instance %s is locked' % instance['uuid'], res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+
+class AdminActionsTestV21(CommonMixin, test.NoDBTestCase):
+ admin_actions = admin_actions_v21
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(AdminActionsTestV21, self).setUp()
+ self.controller = self.admin_actions.AdminActionsController()
+ self.compute_api = self.controller.compute_api
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(self.admin_actions, 'AdminActionsController',
+ _fake_controller)
+
+ self.app = self._get_app()
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'os-admin-actions'),
+ fake_auth_context=self.context)
+
+ def test_actions(self):
+ actions = ['resetNetwork', 'injectNetworkInfo']
+ method_translations = {'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_non_existed_instance(self):
+ actions = ['resetNetwork', 'injectNetworkInfo', 'os-resetState']
+ body_map = {'os-resetState': {'state': 'active'}}
+
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resetNetwork', 'injectNetworkInfo']
+ method_translations = {'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+
+class AdminActionsTestV2(AdminActionsTestV21):
+ admin_actions = admin_actions_v2
+
+ def setUp(self):
+ super(AdminActionsTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Admin_actions'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def test_actions(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'resetNetwork', 'injectNetworkInfo', 'lock',
+ 'unlock']
+ method_translations = {'migrate': 'resize',
+ 'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info'}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_raise_conflict_on_invalid_state(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'os-migrateLive']
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_invalid_state(action, method=method, body_map=body_map,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_non_existed_instance(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume',
+ 'resetNetwork', 'injectNetworkInfo', 'lock',
+ 'unlock', 'os-resetState', 'migrate', 'os-migrateLive']
+ body_map = {'os-resetState': {'state': 'active'},
+ 'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_actions_with_locked_instance(self):
+ actions = ['pause', 'unpause', 'suspend', 'resume', 'migrate',
+ 'resetNetwork', 'injectNetworkInfo', 'os-migrateLive']
+ method_translations = {'migrate': 'resize',
+ 'resetNetwork': 'reset_network',
+ 'injectNetworkInfo': 'inject_network_info',
+ 'os-migrateLive': 'live_migrate'}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_migrate_exception(self, exc_info, expected_result):
+ self.mox.StubOutWithMock(self.compute_api, 'resize')
+ instance = self._stub_instance_get()
+ self.compute_api.resize(self.context, instance).AndRaise(exc_info)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'migrate': None})
+ self.assertEqual(expected_result, res.status_int)
+
+ def _test_migrate_live_succeeded(self, param):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+ instance = self._stub_instance_get()
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname')
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'os-migrateLive': param})
+ self.assertEqual(202, res.status_int)
+
+ def test_migrate_live_enabled(self):
+ param = {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_enabled_with_string_param(self):
+ param = {'host': 'hostname',
+ 'block_migration': "False",
+ 'disk_over_commit': "False"}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_missing_dict_param(self):
+ body = {'os-migrateLive': {'dummy': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_block_migration(self):
+ body = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': "foo",
+ 'disk_over_commit': False}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_disk_over_commit(self):
+ body = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': "foo"}}
+ res = self._make_request('/servers/FAKE/action', body)
+ self.assertEqual(400, res.status_int)
+
+ def _test_migrate_live_failed_with_exception(self, fake_exc,
+ uuid=None):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+
+ instance = self._stub_instance_get(uuid=uuid)
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname').AndRaise(fake_exc)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+ self.assertIn(unicode(fake_exc), res.body)
+
+ def test_migrate_live_compute_service_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.ComputeServiceUnavailable(host='host'))
+
+ def test_migrate_live_invalid_hypervisor_type(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidHypervisorType())
+
+ def test_migrate_live_invalid_cpu_info(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidCPUInfo(reason=""))
+
+ def test_migrate_live_unable_to_migrate_to_self(self):
+ uuid = uuidutils.generate_uuid()
+ self._test_migrate_live_failed_with_exception(
+ exception.UnableToMigrateToSelf(instance_id=uuid,
+ host='host'),
+ uuid=uuid)
+
+ def test_migrate_live_destination_hypervisor_too_old(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.DestinationHypervisorTooOld())
+
+ def test_migrate_live_no_valid_host(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.NoValidHost(reason=''))
+
+ def test_migrate_live_invalid_local_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidLocalStorage(path='', reason=''))
+
+ def test_migrate_live_invalid_shared_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidSharedStorage(path='', reason=''))
+
+ def test_migrate_live_hypervisor_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.HypervisorUnavailable(host=""))
+
+ def test_migrate_live_instance_not_running(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InstanceNotRunning(instance_id=""))
+
+ def test_migrate_live_migration_pre_check_error(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.MigrationPreCheckError(reason=''))
+
+ def test_unlock_not_authorized(self):
+ self.mox.StubOutWithMock(self.compute_api, 'unlock')
+
+ instance = self._stub_instance_get()
+
+ self.compute_api.unlock(self.context, instance).AndRaise(
+ exception.PolicyNotAuthorized(action='unlock'))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'unlock': None})
+ self.assertEqual(403, res.status_int)
+
+
+class CreateBackupTestsV2(CommonMixin, test.NoDBTestCase):
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(CreateBackupTestsV2, self).setUp()
+ self.controller = admin_actions_v2.AdminActionsController()
+ self.compute_api = self.controller.compute_api
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(admin_actions_v2, 'AdminActionsController',
+ _fake_controller)
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Admin_actions'])
+
+ self.app = fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ self.mox.StubOutWithMock(common,
+ 'check_img_metadata_properties_quota')
+ self.mox.StubOutWithMock(self.compute_api,
+ 'backup')
+
+ def _make_url(self, uuid):
+ return '/servers/%s/action' % uuid
+
+ def test_create_backup_with_metadata(self):
+ metadata = {'123': 'asdf'}
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': metadata,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties=metadata)
+
+ common.check_img_metadata_properties_quota(self.context, metadata)
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties=metadata).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ # Name is required for backups.
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_rotation(self):
+ # Rotation is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation(self):
+ """Rotation must be greater than or equal to zero
+ for backup requests
+ """
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': -1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ # Backup Type (daily or weekly) is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_bad_entity(self):
+ body = {'createBackup': 'go'}
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_rotation_is_zero(self):
+ # The happy path for creating backups if rotation is zero.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 0,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 0,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertNotIn('Location', res.headers)
+
+ def test_create_backup_rotation_is_positive(self):
+ # The happy path for creating backups if rotation is positive.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body=body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_raises_conflict_on_invalid_state(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ args_map = {
+ 'createBackup': (
+ ('Backup 1', 'daily', 1), {'extra_properties': {}}
+ ),
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_invalid_state('createBackup', method='backup',
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_create_backup_with_non_existed_instance(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_non_existing_instance('createBackup',
+ body_map=body_map)
+
+ def test_create_backup_with_invalid_createBackup(self):
+ body = {
+ 'createBackupup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body=body)
+ self.assertEqual(400, res.status_int)
+
+
+class ResetStateTestsV21(test.NoDBTestCase):
+ admin_act = admin_actions_v21
+ bad_request = exception.ValidationError
+ fake_url = '/servers'
+
+ def setUp(self):
+ super(ResetStateTestsV21, self).setUp()
+ self.uuid = uuidutils.generate_uuid()
+ self.admin_api = self.admin_act.AdminActionsController()
+ self.compute_api = self.admin_api.compute_api
+
+ url = '%s/%s/action' % (self.fake_url, self.uuid)
+ self.request = self._get_request(url)
+ self.context = self.request.environ['nova.context']
+
+ def _get_request(self, url):
+ return fakes.HTTPRequest.blank(url)
+
+ def test_no_state(self):
+ self.assertRaises(self.bad_request,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": None})
+
+ def test_bad_state(self):
+ self.assertRaises(self.bad_request,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": {"state": "spam"}})
+
+ def test_no_instance(self):
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ exc = exception.InstanceNotFound(instance_id='inst_ud')
+ self.compute_api.get(self.context, self.uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc)
+ self.mox.ReplayAll()
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.admin_api._reset_state,
+ self.request, self.uuid,
+ body={"os-resetState": {"state": "active"}})
+
+ def _setup_mock(self, expected):
+ instance = objects.Instance()
+ instance.uuid = self.uuid
+ instance.vm_state = 'fake'
+ instance.task_state = 'fake'
+ instance.obj_reset_changes()
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def check_state(admin_state_reset=True):
+ self.assertEqual(set(expected.keys()),
+ instance.obj_what_changed())
+ for k, v in expected.items():
+ self.assertEqual(v, getattr(instance, k),
+ "Instance.%s doesn't match" % k)
+ instance.obj_reset_changes()
+
+ self.compute_api.get(self.context, instance.uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ instance.save(admin_state_reset=True).WithSideEffects(check_state)
+
+ def test_reset_active(self):
+ self._setup_mock(dict(vm_state=vm_states.ACTIVE,
+ task_state=None))
+ self.mox.ReplayAll()
+
+ body = {"os-resetState": {"state": "active"}}
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.admin_api,
+ admin_actions_v21.AdminActionsController):
+ status_int = self.admin_api._reset_state.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_reset_error(self):
+ self._setup_mock(dict(vm_state=vm_states.ERROR,
+ task_state=None))
+ self.mox.ReplayAll()
+ body = {"os-resetState": {"state": "error"}}
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.admin_api,
+ admin_actions_v21.AdminActionsController):
+ status_int = self.admin_api._reset_state.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+
+class ResetStateTestsV2(ResetStateTestsV21):
+ admin_act = admin_actions_v2
+ bad_request = webob.exc.HTTPBadRequest
+ fake_url = '/fake/servers'
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py b/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py
new file mode 100644
index 0000000000..4ddfc08dcc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py
@@ -0,0 +1,111 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import admin_password \
+ as admin_password_v21
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get(self, context, id, expected_attrs=None, want_objects=False):
+ return {'uuid': id}
+
+
+def fake_get_non_existent(self, context, id, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=id)
+
+
+def fake_set_admin_password(self, context, instance, password=None):
+ pass
+
+
+def fake_set_admin_password_failed(self, context, instance, password=None):
+ raise exception.InstancePasswordSetFailed(instance=instance, reason='')
+
+
+def fake_set_admin_password_not_implemented(self, context, instance,
+ password=None):
+ raise NotImplementedError()
+
+
+class AdminPasswordTestV21(test.NoDBTestCase):
+ plugin = admin_password_v21
+
+ def setUp(self):
+ super(AdminPasswordTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ self.plugin.ALIAS))
+
+ def _make_request(self, body):
+ req = webob.Request.blank('/v2/fake/servers/1/action')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ res = req.get_response(self.app)
+ return res
+
+ def test_change_password(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
+
+ def test_change_password_empty_string(self):
+ body = {'changePassword': {'adminPass': ''}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
+
+ def test_change_password_with_non_implement(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password_not_implemented)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 501)
+
+ def test_change_password_with_non_existed_instance(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'get', fake_get_non_existent)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 404)
+
+ def test_change_password_with_non_string_password(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
+
+ def test_change_password_failed(self):
+ body = {'changePassword': {'adminPass': 'test'}}
+ self.stubs.Set(compute_api.API, 'set_admin_password',
+ fake_set_admin_password_failed)
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_change_password_without_admin_password(self):
+ body = {'changPassword': {}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
+
+ def test_change_password_none(self):
+ body = {'changePassword': None}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 400)
diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/unit/api/openstack/compute/contrib/test_agents.py
index b8c6f857b6..b8c6f857b6 100644
--- a/nova/tests/api/openstack/compute/contrib/test_agents.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_agents.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py
new file mode 100644
index 0000000000..9b52146fa1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_aggregates.py
@@ -0,0 +1,670 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the aggregates admin api."""
+
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import aggregates as aggregates_v2
+from nova.api.openstack.compute.plugins.v3 import aggregates as aggregates_v21
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+AGGREGATE_LIST = [
+ {"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
+ {"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
+ {"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
+ {"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
+AGGREGATE = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": "nova1",
+ "metadata": {"foo": "bar"},
+ "hosts": ["host1, host2"]}
+
+FORMATTED_AGGREGATE = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": "nova1"}
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class AggregateTestCaseV21(test.NoDBTestCase):
+ """Test Case for aggregates admin api."""
+
+ add_host = 'self.controller._add_host'
+ remove_host = 'self.controller._remove_host'
+ set_metadata = 'self.controller._set_metadata'
+ bad_request = exception.ValidationError
+
+ def _set_up(self):
+ self.controller = aggregates_v21.AggregateController()
+ self.req = fakes.HTTPRequest.blank('/v3/os-aggregates',
+ use_admin_context=True)
+ self.user_req = fakes.HTTPRequest.blank('/v3/os-aggregates')
+ self.context = self.req.environ['nova.context']
+
+ def setUp(self):
+ super(AggregateTestCaseV21, self).setUp()
+ self._set_up()
+
+ def test_index(self):
+ def stub_list_aggregates(context):
+ if context is None:
+ raise Exception()
+ return AGGREGATE_LIST
+ self.stubs.Set(self.controller.api, 'get_aggregate_list',
+ stub_list_aggregates)
+
+ result = self.controller.index(self.req)
+
+ self.assertEqual(AGGREGATE_LIST, result["aggregates"])
+
+ def test_index_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index,
+ self.user_req)
+
+ def test_create(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("test", name, "name")
+ self.assertEqual("nova1", availability_zone, "availability_zone")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+ self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
+
+ def test_create_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, self.user_req,
+ body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_duplicate_aggregate_name(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ raise exception.AggregateNameExists(aggregate_name=name)
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ self.assertRaises(exc.HTTPConflict, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_incorrect_availability_zone(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ raise exception.InvalidAggregateAction(action='create_aggregate',
+ aggregate_id="'N/A'",
+ reason='invalid zone')
+
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "nova_bad"}})
+
+ def test_create_with_no_aggregate(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"foo":
+ {"name": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_no_name(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"foo": "test",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_no_availability_zone(self):
+ def stub_create_aggregate(context, name, availability_zone):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("test", name, "name")
+ self.assertIsNone(availability_zone, "availability_zone")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "create_aggregate",
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req,
+ body={"aggregate": {"name": "test"}})
+ self.assertEqual(FORMATTED_AGGREGATE, result["aggregate"])
+
+ def test_create_with_null_name(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "",
+ "availability_zone": "nova1"}})
+
+ def test_create_with_name_too_long(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "x" * 256,
+ "availability_zone": "nova1"}})
+
+ def test_create_with_availability_zone_too_long(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": "x" * 256}})
+
+ def test_create_with_null_availability_zone(self):
+ aggregate = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None,
+ "metadata": {},
+ "hosts": []}
+
+ formatted_aggregate = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None}
+
+ def stub_create_aggregate(context, name, az_name):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("aggregate1", name, "name")
+ self.assertIsNone(az_name, "availability_zone")
+ return aggregate
+ self.stubs.Set(self.controller.api, 'create_aggregate',
+ stub_create_aggregate)
+
+ result = self.controller.create(self.req,
+ body={"aggregate":
+ {"name": "aggregate1",
+ "availability_zone": None}})
+ self.assertEqual(formatted_aggregate, result["aggregate"])
+
+ def test_create_with_empty_availability_zone(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"aggregate":
+ {"name": "test",
+ "availability_zone": ""}})
+
+ def test_create_with_extra_invalid_arg(self):
+ self.assertRaises(self.bad_request, self.controller.create,
+ self.req, body={"name": "test",
+ "availability_zone": "nova1",
+ "foo": 'bar'})
+
+ def test_show(self):
+ def stub_get_aggregate(context, id):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", id, "id")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, 'get_aggregate',
+ stub_get_aggregate)
+
+ aggregate = self.controller.show(self.req, "1")
+
+ self.assertEqual(AGGREGATE, aggregate["aggregate"])
+
+ def test_show_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show,
+ self.user_req, "1")
+
+ def test_show_with_invalid_id(self):
+ def stub_get_aggregate(context, id):
+ raise exception.AggregateNotFound(aggregate_id=2)
+
+ self.stubs.Set(self.controller.api, 'get_aggregate',
+ stub_get_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.show, self.req, "2")
+
+ def test_update(self):
+ body = {"aggregate": {"name": "new_name",
+ "availability_zone": "nova1"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual(body["aggregate"], values, "values")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_no_admin(self):
+ body = {"aggregate": {"availability_zone": "nova"}}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update,
+ self.user_req, "1", body=body)
+
+ def test_update_with_only_name(self):
+ body = {"aggregate": {"name": "new_name"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_with_only_availability_zone(self):
+ body = {"aggregate": {"availability_zone": "nova1"}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ result = self.controller.update(self.req, "1", body=body)
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_update_with_no_updates(self):
+ test_metadata = {"aggregate": {}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_no_update_key(self):
+ test_metadata = {"asdf": {}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_wrong_updates(self):
+ test_metadata = {"aggregate": {"status": "disable",
+ "foo": "bar"}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_null_name(self):
+ test_metadata = {"aggregate": {"name": ""}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_name_too_long(self):
+ test_metadata = {"aggregate": {"name": "x" * 256}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_availability_zone_too_long(self):
+ test_metadata = {"aggregate": {"availability_zone": "x" * 256}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_empty_availability_zone(self):
+ test_metadata = {"aggregate": {"availability_zone": ""}}
+ self.assertRaises(self.bad_request, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_null_availability_zone(self):
+ body = {"aggregate": {"availability_zone": None}}
+ aggre = {"name": "aggregate1",
+ "id": "1",
+ "availability_zone": None}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertIsNone(values["availability_zone"], "availability_zone")
+ return aggre
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ result = self.controller.update(self.req, "1", body=body)
+
+ self.assertEqual(aggre, result["aggregate"])
+
+ def test_update_with_bad_aggregate(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNotFound(aggregate_id=2)
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_update_with_duplicated_name(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNameExists(aggregate_name="test_name")
+
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPConflict, self.controller.update,
+ self.req, "2", body=test_metadata)
+
+ def test_invalid_action(self):
+ body = {"append_host": {"host": "host1"}}
+ self.assertRaises(self.bad_request,
+ eval(self.add_host), self.req, "1", body=body)
+
+ def test_update_with_invalid_action(self):
+ with mock.patch.object(self.controller.api, "update_aggregate",
+ side_effect=exception.InvalidAggregateAction(
+ action='invalid', aggregate_id='agg1', reason= "not empty")):
+ body = {"aggregate": {"availability_zone": "nova"}}
+ self.assertRaises(exc.HTTPBadRequest, self.controller.update,
+ self.req, "1", body=body)
+
+ def test_add_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual("host1", host, "host")
+ return AGGREGATE
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ aggregate = eval(self.add_host)(self.req, "1",
+ body={"add_host": {"host":
+ "host1"}})
+
+ self.assertEqual(aggregate["aggregate"], AGGREGATE)
+
+ def test_add_host_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.add_host),
+ self.user_req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_already_added_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.AggregateHostExists(aggregate_id=aggregate,
+ host=host)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPConflict, eval(self.add_host),
+ self.req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_bad_aggregate(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
+ self.req, "bogus_aggregate",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_bad_host(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise exception.ComputeHostNotFound(host=host)
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.add_host),
+ self.req, "1",
+ body={"add_host": {"host": "bogus_host"}})
+
+ def test_add_host_with_missing_host(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"asdf": "asdf"}})
+
+ def test_add_host_with_invalid_format_host(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": "a" * 300}})
+
+ def test_add_host_with_multiple_hosts(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": ["host1", "host2"]}})
+
+ def test_add_host_raises_key_error(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise KeyError
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+ self.assertRaises(exc.HTTPInternalServerError,
+ eval(self.add_host), self.req, "1",
+ body={"add_host": {"host": "host1"}})
+
+ def test_add_host_with_invalid_request(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": "1"})
+
+ def test_add_host_with_non_string(self):
+ self.assertRaises(self.bad_request, eval(self.add_host),
+ self.req, "1", body={"add_host": {"host": 1}})
+
+ def test_remove_host(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertEqual("host1", host, "host")
+ stub_remove_host_from_aggregate.called = True
+ return {}
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+ eval(self.remove_host)(self.req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ self.assertTrue(stub_remove_host_from_aggregate.called)
+
+ def test_remove_host_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.remove_host),
+ self.user_req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_bad_aggregate(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "bogus_aggregate",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_host_not_in_aggregate(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.AggregateHostNotFound(aggregate_id=aggregate,
+ host=host)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "1",
+ body={"remove_host": {"host": "host1"}})
+
+ def test_remove_host_with_bad_host(self):
+ def stub_remove_host_from_aggregate(context, aggregate, host):
+ raise exception.ComputeHostNotFound(host=host)
+ self.stubs.Set(self.controller.api,
+ "remove_host_from_aggregate",
+ stub_remove_host_from_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"host": "bogushost"}})
+
+ def test_remove_host_with_missing_host(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"asdf": "asdf"})
+
+ def test_remove_host_with_multiple_hosts(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"host":
+ ["host1", "host2"]}})
+
+ def test_remove_host_with_extra_param(self):
+ self.assertRaises(self.bad_request, eval(self.remove_host),
+ self.req, "1", body={"remove_host": {"asdf": "asdf",
+ "host": "asdf"}})
+
+ def test_remove_host_with_invalid_request(self):
+ self.assertRaises(self.bad_request,
+ eval(self.remove_host),
+ self.req, "1", body={"remove_host": "1"})
+
+ def test_remove_host_with_missing_host_empty(self):
+ self.assertRaises(self.bad_request,
+ eval(self.remove_host),
+ self.req, "1", body={"remove_host": {}})
+
+ def test_set_metadata(self):
+ body = {"set_metadata": {"metadata": {"foo": "bar"}}}
+
+ def stub_update_aggregate(context, aggregate, values):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ self.assertThat(body["set_metadata"]['metadata'],
+ matchers.DictMatches(values))
+ return AGGREGATE
+ self.stubs.Set(self.controller.api,
+ "update_aggregate_metadata",
+ stub_update_aggregate)
+
+ result = eval(self.set_metadata)(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+
+ def test_set_metadata_delete(self):
+ body = {"set_metadata": {"metadata": {"foo": None}}}
+
+ with mock.patch.object(self.controller.api,
+ 'update_aggregate_metadata') as mocked:
+ mocked.return_value = AGGREGATE
+ result = eval(self.set_metadata)(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+ mocked.assert_called_once_with(self.context, "1",
+ body["set_metadata"]["metadata"])
+
+ def test_set_metadata_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ eval(self.set_metadata),
+ self.user_req, "1",
+ body={"set_metadata": {"metadata":
+ {"foo": "bar"}}})
+
+ def test_set_metadata_with_bad_aggregate(self):
+ body = {"set_metadata": {"metadata": {"foo": "bar"}}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api,
+ "update_aggregate_metadata",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPNotFound, eval(self.set_metadata),
+ self.req, "bad_aggregate", body=body)
+
+ def test_set_metadata_with_missing_metadata(self):
+ body = {"asdf": {"foo": "bar"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_extra_params(self):
+ body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_without_dict(self):
+ body = {"set_metadata": {'metadata': 1}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_empty_key(self):
+ body = {"set_metadata": {"metadata": {"": "value"}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_key_too_long(self):
+ body = {"set_metadata": {"metadata": {"x" * 256: "value"}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_value_too_long(self):
+ body = {"set_metadata": {"metadata": {"key": "x" * 256}}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_set_metadata_with_string(self):
+ body = {"set_metadata": {"metadata": "test"}}
+ self.assertRaises(exc.HTTPBadRequest, eval(self.set_metadata),
+ self.req, "1", body=body)
+
+ def test_delete_aggregate(self):
+ def stub_delete_aggregate(context, aggregate):
+ self.assertEqual(context, self.context, "context")
+ self.assertEqual("1", aggregate, "aggregate")
+ stub_delete_aggregate.called = True
+ self.stubs.Set(self.controller.api, "delete_aggregate",
+ stub_delete_aggregate)
+
+ self.controller.delete(self.req, "1")
+ self.assertTrue(stub_delete_aggregate.called)
+
+ def test_delete_aggregate_no_admin(self):
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete,
+ self.user_req, "1")
+
+ def test_delete_aggregate_with_bad_aggregate(self):
+ def stub_delete_aggregate(context, aggregate):
+ raise exception.AggregateNotFound(aggregate_id=aggregate)
+ self.stubs.Set(self.controller.api, "delete_aggregate",
+ stub_delete_aggregate)
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete,
+ self.req, "bogus_aggregate")
+
+ def test_delete_aggregate_with_host(self):
+ with mock.patch.object(self.controller.api, "delete_aggregate",
+ side_effect=exception.InvalidAggregateAction(
+ action="delete", aggregate_id="agg1",
+ reason="not empty")):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.delete,
+ self.req, "agg1")
+
+
+class AggregateTestCaseV2(AggregateTestCaseV21):
+ add_host = 'self.controller.action'
+ remove_host = 'self.controller.action'
+ set_metadata = 'self.controller.action'
+ bad_request = exc.HTTPBadRequest
+
+ def _set_up(self):
+ self.controller = aggregates_v2.AggregateController()
+ self.req = FakeRequest()
+ self.user_req = fakes.HTTPRequest.blank('/v2/os-aggregates')
+ self.context = self.req.environ['nova.context']
+
+ def test_add_host_raises_key_error(self):
+ def stub_add_host_to_aggregate(context, aggregate, host):
+ raise KeyError
+ self.stubs.Set(self.controller.api, "add_host_to_aggregate",
+ stub_add_host_to_aggregate)
+ # NOTE(mtreinish) The check for a KeyError here is to ensure that
+ # if add_host_to_aggregate() raises a KeyError it propagates. At
+ # one point the api code would mask the error as a HTTPBadRequest.
+ # This test is to ensure that this doesn't occur again.
+ self.assertRaises(KeyError, eval(self.add_host), self.req, "1",
+ body={"add_host": {"host": "host1"}})
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py
new file mode 100644
index 0000000000..3b7e0b058a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_attach_interfaces.py
@@ -0,0 +1,455 @@
+# Copyright 2012 SINA Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute.contrib import attach_interfaces \
+ as attach_interfaces_v2
+from nova.api.openstack.compute.plugins.v3 import attach_interfaces \
+ as attach_interfaces_v3
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_network_cache_model
+
+import webob
+from webob import exc
+
+
+CONF = cfg.CONF
+
+FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+
+FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
+FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
+FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
+
+FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
+FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
+FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
+FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
+
+port_data1 = {
+ "id": FAKE_PORT_ID1,
+ "network_id": FAKE_NET_ID1,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "aa:aa:aa:aa:aa:aa",
+ "fixed_ips": ["10.0.1.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data2 = {
+ "id": FAKE_PORT_ID2,
+ "network_id": FAKE_NET_ID2,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": FAKE_UUID1,
+}
+
+port_data3 = {
+ "id": FAKE_PORT_ID3,
+ "network_id": FAKE_NET_ID3,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "bb:bb:bb:bb:bb:bb",
+ "fixed_ips": ["10.0.2.2"],
+ "device_id": '',
+}
+
+fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
+ports = [port_data1, port_data2, port_data3]
+
+
+def fake_list_ports(self, *args, **kwargs):
+ result = []
+ for port in ports:
+ if port['device_id'] == kwargs['device_id']:
+ result.append(port)
+ return {'ports': result}
+
+
+def fake_show_port(self, context, port_id, **kwargs):
+ for port in ports:
+ if port['id'] == port_id:
+ return {'port': port}
+ else:
+ raise exception.PortNotFound(port_id=port_id)
+
+
+def fake_attach_interface(self, context, instance, network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ # if no network_id is given when add a port to an instance, use the
+ # first default network.
+ network_id = fake_networks[0]
+ if network_id == FAKE_BAD_NET_ID:
+ raise exception.NetworkNotFound(network_id=network_id)
+ if not port_id:
+ port_id = ports[fake_networks.index(network_id)]['id']
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
+ return vif
+
+
+def fake_detach_interface(self, context, instance, port_id):
+ for port in ports:
+ if port['id'] == port_id:
+ return
+ raise exception.PortNotFound(port_id=port_id)
+
+
+def fake_get_instance(self, *args, **kwargs):
+ return objects.Instance(uuid=FAKE_UUID1)
+
+
+class InterfaceAttachTestsV21(test.NoDBTestCase):
+ url = '/v3/os-interfaces'
+ controller_cls = attach_interfaces_v3.InterfaceAttachmentController
+ validate_exc = exception.ValidationError
+
+ def setUp(self):
+ super(InterfaceAttachTestsV21, self).setUp()
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(compute_api.API, 'get', fake_get_instance)
+ self.context = context.get_admin_context()
+ self.expected_show = {'interfaceAttachment':
+ {'net_id': FAKE_NET_ID1,
+ 'port_id': FAKE_PORT_ID1,
+ 'mac_addr': port_data1['mac_address'],
+ 'port_state': port_data1['status'],
+ 'fixed_ips': port_data1['fixed_ips'],
+ }}
+ self.attachments = self.controller_cls()
+
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=''))
+ def _test_instance_not_found(self, url, func, args, mock_get, kwargs=None,
+ method='GET'):
+ req = webob.Request.blank(url)
+ req.method = method
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ if not kwargs:
+ kwargs = {}
+ self.assertRaises(exc.HTTPNotFound, func, req, *args, **kwargs)
+
+ def test_show_instance_not_found(self):
+ self._test_instance_not_found(self.url + 'fake',
+ self.attachments.show, ('fake', 'fake'))
+
+ def test_index_instance_not_found(self):
+ self._test_instance_not_found(self.url,
+ self.attachments.index, ('fake', ))
+
+ def test_detach_interface_instance_not_found(self):
+ self._test_instance_not_found(self.url + '/fake',
+ self.attachments.delete,
+ ('fake', 'fake'), method='DELETE')
+
+ def test_attach_interface_instance_not_found(self):
+ self._test_instance_not_found(
+ '/v2/fake/os-interfaces', self.attachments.create, ('fake', ),
+ kwargs={'body': {'interfaceAttachment': {}}}, method='POST')
+
+ def test_show(self):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
+ self.assertEqual(self.expected_show, result)
+
+ def test_show_invalid(self):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show, req, FAKE_UUID2,
+ FAKE_PORT_ID1)
+
+ @mock.patch.object(network_api.API, 'show_port',
+ side_effect=exception.Forbidden)
+ def test_show_forbidden(self, show_port_mock):
+ req = webob.Request.blank(self.url + '/show')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPForbidden,
+ self.attachments.show, req, FAKE_UUID1,
+ FAKE_PORT_ID1)
+
+ def test_delete(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.attachments,
+ attach_interfaces_v3.InterfaceAttachmentController):
+ status_int = self.attachments.delete.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_detach_interface_instance_locked(self):
+ def fake_detach_interface_from_locked_server(self, context,
+ instance, port_id):
+ raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
+
+ self.stubs.Set(compute_api.API,
+ 'detach_interface',
+ fake_detach_interface_from_locked_server)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ FAKE_PORT_ID1)
+
+ def test_delete_interface_not_found(self):
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ req = webob.Request.blank(self.url + '/delete')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ 'invaid-port-id')
+
+ def test_attach_interface_instance_locked(self):
+ def fake_attach_interface_to_locked_server(self, context,
+ instance, network_id, port_id, requested_ip):
+ raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
+
+ self.stubs.Set(compute_api.API,
+ 'attach_interface',
+ fake_attach_interface_to_locked_server)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_without_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID1)
+
+ def test_attach_interface_with_network_id(self):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'net_id': FAKE_NET_ID2}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ self.assertEqual(result['interfaceAttachment']['net_id'],
+ FAKE_NET_ID2)
+
+ def _attach_interface_bad_request_case(self, body):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_with_port_and_network_id(self):
+ body = {
+ 'interfaceAttachment': {
+ 'port_id': FAKE_PORT_ID1,
+ 'net_id': FAKE_NET_ID2
+ }
+ }
+ self._attach_interface_bad_request_case(body)
+
+ def test_attach_interface_with_invalid_data(self):
+ body = {
+ 'interfaceAttachment': {
+ 'net_id': FAKE_BAD_NET_ID
+ }
+ }
+ self._attach_interface_bad_request_case(body)
+
+ def test_attach_interface_with_invalid_state(self):
+ def fake_attach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='attach_interface')
+
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interfaceAttachment':
+ {'net_id': FAKE_NET_ID1}})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_detach_interface_with_invalid_state(self):
+ def fake_detach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='detach_interface')
+
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ FAKE_NET_ID1)
+
+ def test_attach_interface_invalid_fixed_ip(self):
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ body = {
+ 'interfaceAttachment': {
+ 'net_id': FAKE_NET_ID1,
+ 'fixed_ips': [{'ip_address': 'invalid_ip'}]
+ }
+ }
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.validate_exc,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ @mock.patch.object(compute_api.API, 'get')
+ @mock.patch.object(compute_api.API, 'attach_interface')
+ def test_attach_interface_fixed_ip_already_in_use(self,
+ attach_mock,
+ get_mock):
+ fake_instance = objects.Instance(uuid=FAKE_UUID1)
+ get_mock.return_value = fake_instance
+ attach_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address='10.0.2.2', instance_uuid=FAKE_UUID1)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ attach_mock.assert_called_once_with(self.context, fake_instance, None,
+ None, None)
+ get_mock.assert_called_once_with(self.context, FAKE_UUID1,
+ want_objects=True,
+ expected_attrs=None)
+
+ def _test_attach_interface_with_invalid_parameter(self, param):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interface_attachment': param})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ param = {'net_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ param = {'port_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ param = {'fixed_ips': 'non_array'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+
+class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
+ url = '/v2/fake/os-interfaces'
+ controller_cls = attach_interfaces_v2.InterfaceAttachmentController
+ validate_exc = exc.HTTPBadRequest
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py
new file mode 100644
index 0000000000..31b20d6861
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_availability_zone.py
@@ -0,0 +1,512 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import availability_zone as az_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import availability_zone as az_v21
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+from nova import availability_zones
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import servicegroup
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_service
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_service_get_all(context, disabled=None):
+ def __fake_service(binary, availability_zone,
+ created_at, updated_at, host, disabled):
+ return dict(test_service.fake_service,
+ binary=binary,
+ availability_zone=availability_zone,
+ available_zones=availability_zone,
+ created_at=created_at,
+ updated_at=updated_at,
+ host=host,
+ disabled=disabled)
+
+ if disabled:
+ return [__fake_service("nova-compute", "zone-2",
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", True),
+ __fake_service("nova-scheduler", "internal",
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", True),
+ __fake_service("nova-network", "internal",
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
+ "fake_host-2", True)]
+ else:
+ return [__fake_service("nova-compute", "zone-1",
+ datetime.datetime(2012, 11, 14, 9, 53, 25, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", False),
+ __fake_service("nova-sched", "internal",
+ datetime.datetime(2012, 11, 14, 9, 57, 3, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 25, 0),
+ "fake_host-1", False),
+ __fake_service("nova-network", "internal",
+ datetime.datetime(2012, 11, 16, 7, 25, 46, 0),
+ datetime.datetime(2012, 12, 26, 14, 45, 24, 0),
+ "fake_host-2", False)]
+
+
+def fake_service_is_up(self, service):
+ return service['binary'] != u"nova-network"
+
+
+def fake_set_availability_zones(context, services):
+ return services
+
+
+def fake_get_availability_zones(context):
+ return ['nova'], []
+
+
+CONF = cfg.CONF
+
+
+class AvailabilityZoneApiTestV21(test.NoDBTestCase):
+ availability_zone = az_v21
+ url = '/v2/fake/os-availability-zone'
+
+ def setUp(self):
+ super(AvailabilityZoneApiTestV21, self).setUp()
+ availability_zones.reset_cache()
+ self.stubs.Set(db, 'service_get_all', fake_service_get_all)
+ self.stubs.Set(availability_zones, 'set_availability_zones',
+ fake_set_availability_zones)
+ self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
+
+ def _get_wsgi_instance(self):
+ return fakes.wsgi_app_v21(init_only=('os-availability-zone',
+ 'servers'))
+
+ def test_filtered_availability_zones(self):
+ az = self.availability_zone.AvailabilityZoneController()
+ zones = ['zone1', 'internal']
+ expected = [{'zoneName': 'zone1',
+ 'zoneState': {'available': True},
+ "hosts": None}]
+ result = az._get_filtered_availability_zones(zones, True)
+ self.assertEqual(result, expected)
+
+ expected = [{'zoneName': 'zone1',
+ 'zoneState': {'available': False},
+ "hosts": None}]
+ result = az._get_filtered_availability_zones(zones, False)
+ self.assertEqual(result, expected)
+
+ def test_availability_zone_index(self):
+ req = webob.Request.blank(self.url)
+ resp = req.get_response(self._get_wsgi_instance())
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+
+ self.assertIn('availabilityZoneInfo', resp_dict)
+ zones = resp_dict['availabilityZoneInfo']
+ self.assertEqual(len(zones), 2)
+ self.assertEqual(zones[0]['zoneName'], u'zone-1')
+ self.assertTrue(zones[0]['zoneState']['available'])
+ self.assertIsNone(zones[0]['hosts'])
+ self.assertEqual(zones[1]['zoneName'], u'zone-2')
+ self.assertFalse(zones[1]['zoneState']['available'])
+ self.assertIsNone(zones[1]['hosts'])
+
+ def test_availability_zone_detail(self):
+ def _formatZone(zone_dict):
+ result = []
+
+ # Zone tree view item
+ result.append({'zoneName': zone_dict['zoneName'],
+ 'zoneState': u'available'
+ if zone_dict['zoneState']['available'] else
+ u'not available'})
+
+ if zone_dict['hosts'] is not None:
+ for (host, services) in zone_dict['hosts'].items():
+ # Host tree view item
+ result.append({'zoneName': u'|- %s' % host,
+ 'zoneState': u''})
+ for (svc, state) in services.items():
+ # Service tree view item
+ result.append({'zoneName': u'| |- %s' % svc,
+ 'zoneState': u'%s %s %s' % (
+ 'enabled' if state['active'] else
+ 'disabled',
+ ':-)' if state['available'] else
+ 'XXX',
+ jsonutils.to_primitive(
+ state['updated_at']))})
+ return result
+
+ def _assertZone(zone, name, status):
+ self.assertEqual(zone['zoneName'], name)
+ self.assertEqual(zone['zoneState'], status)
+
+ availabilityZone = self.availability_zone.AvailabilityZoneController()
+
+ req_url = self.url + '/detail'
+ req = webob.Request.blank(req_url)
+ req.method = 'GET'
+ req.environ['nova.context'] = context.get_admin_context()
+ resp_dict = availabilityZone.detail(req)
+
+ self.assertIn('availabilityZoneInfo', resp_dict)
+ zones = resp_dict['availabilityZoneInfo']
+ self.assertEqual(len(zones), 3)
+
+ ''' availabilityZoneInfo field content in response body:
+ [{'zoneName': 'zone-1',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-compute': {'active': True, 'available': True,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}}}},
+ {'zoneName': 'internal',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-sched': {'active': True, 'available': True,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 25)}},
+ 'fake_host-2': {
+ 'nova-network': {'active': True, 'available': False,
+ 'updated_at': datetime(2012, 12, 26, 14, 45, 24)}}}},
+ {'zoneName': 'zone-2',
+ 'zoneState': {'available': False},
+ 'hosts': None}]
+ '''
+
+ l0 = [u'zone-1', u'available']
+ l1 = [u'|- fake_host-1', u'']
+ l2 = [u'| |- nova-compute', u'enabled :-) 2012-12-26T14:45:25.000000']
+ l3 = [u'internal', u'available']
+ l4 = [u'|- fake_host-1', u'']
+ l5 = [u'| |- nova-sched', u'enabled :-) 2012-12-26T14:45:25.000000']
+ l6 = [u'|- fake_host-2', u'']
+ l7 = [u'| |- nova-network', u'enabled XXX 2012-12-26T14:45:24.000000']
+ l8 = [u'zone-2', u'not available']
+
+ z0 = _formatZone(zones[0])
+ z1 = _formatZone(zones[1])
+ z2 = _formatZone(zones[2])
+
+ self.assertEqual(len(z0), 3)
+ self.assertEqual(len(z1), 5)
+ self.assertEqual(len(z2), 1)
+
+ _assertZone(z0[0], l0[0], l0[1])
+ _assertZone(z0[1], l1[0], l1[1])
+ _assertZone(z0[2], l2[0], l2[1])
+ _assertZone(z1[0], l3[0], l3[1])
+ _assertZone(z1[1], l4[0], l4[1])
+ _assertZone(z1[2], l5[0], l5[1])
+ _assertZone(z1[3], l6[0], l6[1])
+ _assertZone(z1[4], l7[0], l7[1])
+ _assertZone(z2[0], l8[0], l8[1])
+
+ def test_availability_zone_detail_no_services(self):
+ expected_response = {'availabilityZoneInfo':
+ [{'zoneState': {'available': True},
+ 'hosts': {},
+ 'zoneName': 'nova'}]}
+ self.stubs.Set(availability_zones, 'get_availability_zones',
+ fake_get_availability_zones)
+ availabilityZone = self.availability_zone.AvailabilityZoneController()
+
+ req_url = self.url + '/detail'
+ req = webob.Request.blank(req_url)
+ req.method = 'GET'
+ req.environ['nova.context'] = context.get_admin_context()
+ resp_dict = availabilityZone.detail(req)
+
+ self.assertThat(resp_dict,
+ matchers.DictMatches(expected_response))
+
+
+class AvailabilityZoneApiTestV2(AvailabilityZoneApiTestV21):
+ availability_zone = az_v2
+
+ def _get_wsgi_instance(self):
+ return fakes.wsgi_app()
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+ base_url = '/v2/fake/'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'availability_zone': 'nova',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v21.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ 'os-availability-zone',
+ 'osapi_v3')
+ self.no_availability_zone_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verify_no_availability_zone(self, **kwargs):
+ self.assertNotIn('availability_zone', kwargs)
+
+ def _test_create_extra(self, params, controller):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_availability_zone_disabled(self):
+ params = {'availability_zone': 'foo'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self._verify_no_availability_zone(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params, self.no_availability_zone_controller)
+
+ def _create_instance_with_availability_zone(self, zone_name):
+ def create(*args, **kwargs):
+ self.assertIn('availability_zone', kwargs)
+ self.assertEqual('nova', kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'availability_zone': zone_name,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ admin_context = context.get_admin_context()
+ db.service_create(admin_context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(admin_context,
+ {'name': 'agg1'}, {'availability_zone': 'nova'})
+ db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
+ return req, body
+
+ def test_create_instance_with_availability_zone(self):
+ zone_name = 'nova'
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_invalid_availability_zone_too_long(self):
+ zone_name = 'a' * 256
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_with_invalid_availability_zone_too_short(self):
+ zone_name = ''
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_with_invalid_availability_zone_not_str(self):
+ zone_name = 111
+ req, body = self._create_instance_with_availability_zone(zone_name)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_without_availability_zone(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+
+ def _set_up_controller(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-availability-zone': 'fake'}
+ self.controller = servers_v2.Controller(ext_mgr)
+ ext_mgr_no_az = extensions.ExtensionManager()
+ ext_mgr_no_az.extensions = {}
+ self.no_availability_zone_controller = servers_v2.Controller(
+ ext_mgr_no_az)
+
+ def _verify_no_availability_zone(self, **kwargs):
+ self.assertIsNone(kwargs['availability_zone'])
+
+ def test_create_instance_with_invalid_availability_zone_too_long(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+ def test_create_instance_with_invalid_availability_zone_too_short(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+ def test_create_instance_with_invalid_availability_zone_not_str(self):
+ # NOTE: v2.0 API does not check this bad request case.
+ # So we skip this test for v2.0 API.
+ pass
+
+
+class AvailabilityZoneSerializerTest(test.NoDBTestCase):
+ def test_availability_zone_index_detail_serializer(self):
+ def _verify_zone(zone_dict, tree):
+ self.assertEqual(tree.tag, 'availabilityZone')
+ self.assertEqual(zone_dict['zoneName'], tree.get('name'))
+ self.assertEqual(str(zone_dict['zoneState']['available']),
+ tree[0].get('available'))
+
+ for _idx, host_child in enumerate(tree[1]):
+ self.assertIn(host_child.get('name'), zone_dict['hosts'])
+ svcs = zone_dict['hosts'][host_child.get('name')]
+ for _idx, svc_child in enumerate(host_child[0]):
+ self.assertIn(svc_child.get('name'), svcs)
+ svc = svcs[svc_child.get('name')]
+ self.assertEqual(len(svc_child), 1)
+
+ self.assertEqual(str(svc['available']),
+ svc_child[0].get('available'))
+ self.assertEqual(str(svc['active']),
+ svc_child[0].get('active'))
+ self.assertEqual(str(svc['updated_at']),
+ svc_child[0].get('updated_at'))
+
+ serializer = az_v2.AvailabilityZonesTemplate()
+ raw_availability_zones = \
+ [{'zoneName': 'zone-1',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-compute': {'active': True, 'available': True,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}}}},
+ {'zoneName': 'internal',
+ 'zoneState': {'available': True},
+ 'hosts': {'fake_host-1': {
+ 'nova-sched': {'active': True, 'available': True,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 25)}},
+ 'fake_host-2': {
+ 'nova-network': {'active': True,
+ 'available': False,
+ 'updated_at':
+ datetime.datetime(
+ 2012, 12, 26, 14, 45, 24)}}}},
+ {'zoneName': 'zone-2',
+ 'zoneState': {'available': False},
+ 'hosts': None}]
+
+ text = serializer.serialize(
+ dict(availabilityZoneInfo=raw_availability_zones))
+ tree = etree.fromstring(text)
+
+ self.assertEqual('availabilityZones', tree.tag)
+ self.assertEqual(len(raw_availability_zones), len(tree))
+ for idx, child in enumerate(tree):
+ _verify_zone(raw_availability_zones[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py
new file mode 100644
index 0000000000..451c92a40b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_baremetal_nodes.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2013 NTT DOCOMO, INC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import baremetal_nodes as b_nodes_v2
+from nova.api.openstack.compute.plugins.v3 import baremetal_nodes \
+ as b_nodes_v21
+from nova.api.openstack import extensions
+from nova import context
+from nova import test
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+
+
+class FakeRequest(object):
+
+ def __init__(self, context):
+ self.environ = {"nova.context": context}
+
+
+def fake_node(**updates):
+ node = {
+ 'id': 1,
+ 'service_host': "host",
+ 'cpus': 8,
+ 'memory_mb': 8192,
+ 'local_gb': 128,
+ 'pm_address': "10.1.2.3",
+ 'pm_user': "pm_user",
+ 'pm_password': "pm_pass",
+ 'terminal_port': 8000,
+ 'interfaces': [],
+ 'instance_uuid': 'fake-instance-uuid',
+ }
+ if updates:
+ node.update(updates)
+ return node
+
+
+def fake_node_ext_status(**updates):
+ node = fake_node(uuid='fake-uuid',
+ task_state='fake-task-state',
+ updated_at='fake-updated-at',
+ pxe_config_path='fake-pxe-config-path')
+ if updates:
+ node.update(updates)
+ return node
+
+
+FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
+
+
+@mock.patch.object(b_nodes_v21, '_get_ironic_client',
+ lambda *_: FAKE_IRONIC_CLIENT)
+class BareMetalNodesTestV21(test.NoDBTestCase):
+ def setUp(self):
+ super(BareMetalNodesTestV21, self).setUp()
+
+ self._setup()
+ self.context = context.get_admin_context()
+ self.request = FakeRequest(self.context)
+
+ def _setup(self):
+ self.controller = b_nodes_v21.BareMetalNodeController()
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
+ def test_index_ironic(self, mock_list):
+ properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
+ node = ironic_utils.get_test_node(properties=properties)
+ mock_list.return_value = [node]
+
+ res_dict = self.controller.index(self.request)
+ expected_output = {'nodes':
+ [{'memory_mb': properties['memory_mb'],
+ 'host': 'IRONIC MANAGED',
+ 'disk_gb': properties['local_gb'],
+ 'interfaces': [],
+ 'task_state': None,
+ 'id': node.uuid,
+ 'cpus': properties['cpus']}]}
+ self.assertEqual(expected_output, res_dict)
+ mock_list.assert_called_once_with(detail=True)
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
+ def test_show_ironic(self, mock_get, mock_list_ports):
+ properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
+ node = ironic_utils.get_test_node(properties=properties)
+ port = ironic_utils.get_test_port()
+ mock_get.return_value = node
+ mock_list_ports.return_value = [port]
+
+ res_dict = self.controller.show(self.request, node.uuid)
+ expected_output = {'node':
+ {'memory_mb': properties['memory_mb'],
+ 'instance_uuid': None,
+ 'host': 'IRONIC MANAGED',
+ 'disk_gb': properties['local_gb'],
+ 'interfaces': [{'address': port.address}],
+ 'task_state': None,
+ 'id': node.uuid,
+ 'cpus': properties['cpus']}}
+ self.assertEqual(expected_output, res_dict)
+ mock_get.assert_called_once_with(node.uuid)
+ mock_list_ports.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
+ def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
+ properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
+ node = ironic_utils.get_test_node(properties=properties)
+ mock_get.return_value = node
+ mock_list_ports.return_value = []
+
+ res_dict = self.controller.show(self.request, node.uuid)
+ self.assertEqual([], res_dict['node']['interfaces'])
+ mock_get.assert_called_once_with(node.uuid)
+ mock_list_ports.assert_called_once_with(node.uuid)
+
+ def test_create_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create,
+ self.request, {'node': object()})
+
+ def test_delete_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.delete,
+ self.request, 'fake-id')
+
+ def test_add_interface_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller._add_interface,
+ self.request, 'fake-id', 'fake-body')
+
+ def test_remove_interface_ironic_not_supported(self):
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller._remove_interface,
+ self.request, 'fake-id', 'fake-body')
+
+
+@mock.patch.object(b_nodes_v2, '_get_ironic_client',
+ lambda *_: FAKE_IRONIC_CLIENT)
+class BareMetalNodesTestV2(BareMetalNodesTestV21):
+ def _setup(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = b_nodes_v2.BareMetalNodeController(self.ext_mgr)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py
new file mode 100644
index 0000000000..ab20ad85c3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping.py
@@ -0,0 +1,359 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute import extensions
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
+from nova.api.openstack.compute import servers as servers_v2
+from nova import block_device
+from nova.compute import api as compute_api
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+
+CONF = cfg.CONF
+
+
+class BlockDeviceMappingTestV21(test.TestCase):
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v3.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
+ 'osapi_v3')
+ self.no_bdm_v2_controller = servers_v3.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', '', 'osapi_v3')
+
+ def setUp(self):
+ super(BlockDeviceMappingTestV21, self).setUp()
+ self._setup_controller()
+ fake.stub_out_image_service(self.stubs)
+
+ self.bdm = [{
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'uuid': 'fake',
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ }]
+
+ def _get_servers_body(self, no_image=False):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ if no_image:
+ del body['server']['imageRef']
+ return body
+
+ def _test_create(self, params, no_image=False, override_controller=None):
+ body = self._get_servers_body(no_image)
+ body['server'].update(params)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ req.body = jsonutils.dumps(body)
+
+ if override_controller:
+ override_controller.create(req, body=body).obj['server']
+ else:
+ self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_block_device_mapping_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('block_device_mapping', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params,
+ override_controller=self.no_bdm_v2_controller)
+
+ def test_create_instance_with_volumes_enabled_no_image(self):
+ """Test that the create will fail if there is no image
+ and no bdms supplied in the request
+ """
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, {}, no_image=True)
+
+ def test_create_instance_with_bdms_and_no_image(self):
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertThat(
+ block_device.BlockDeviceDict(self.bdm[0]),
+ matchers.DictMatches(kwargs['block_device_mapping'][0])
+ )
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
+ self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
+
+ compute_api.API._validate_bdm(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(True)
+ compute_api.API._get_bdm_image_metadata(
+ mox.IgnoreArg(), mox.IgnoreArg(), False).AndReturn({})
+ self.mox.ReplayAll()
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_with_device_name_not_string(self):
+ self.bdm[0]['device_name'] = 123
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ def test_create_instance_with_device_name_empty(self):
+ self.bdm[0]['device_name'] = ''
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_device_name_too_long(self):
+ self.bdm[0]['device_name'] = 'a' * 256
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_space_in_device_name(self):
+ self.bdm[0]['device_name'] = 'v da'
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertTrue(kwargs['legacy_bdm'])
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_invalid_size(self):
+ self.bdm[0]['volume_size'] = 'hello world'
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_bdm(self):
+ bdm = [{
+ 'source_type': 'volume',
+ 'device_name': 'fake_dev',
+ 'uuid': 'fake_vol'
+ }]
+ bdm_expected = [{
+ 'source_type': 'volume',
+ 'device_name': 'fake_dev',
+ 'volume_id': 'fake_vol'
+ }]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertFalse(kwargs['legacy_bdm'])
+ for expected, received in zip(bdm_expected,
+ kwargs['block_device_mapping']):
+ self.assertThat(block_device.BlockDeviceDict(expected),
+ matchers.DictMatches(received))
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_bdm_missing_device_name(self):
+ del self.bdm[0]['device_name']
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertFalse(kwargs['legacy_bdm'])
+ self.assertNotIn(None,
+ kwargs['block_device_mapping'][0]['device_name'])
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_bdm_validation_error(self):
+ def _validate(*args, **kwargs):
+ raise exception.InvalidBDMFormat(details='Wrong BDM')
+
+ self.stubs.Set(block_device.BlockDeviceDict,
+ '_validate', _validate)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
+ params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
+ fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
+ self.assertRaises(exc.HTTPBadRequest, self._test_create, params,
+ no_image=True)
+
+ def test_create_instance_bdm_api_validation_fails(self):
+ self.validation_fail_test_validate_called = False
+ self.validation_fail_instance_destroy_called = False
+
+ bdm_exceptions = ((exception.InvalidBDMSnapshot, {'id': 'fake'}),
+ (exception.InvalidBDMVolume, {'id': 'fake'}),
+ (exception.InvalidBDMImage, {'id': 'fake'}),
+ (exception.InvalidBDMBootSequence, {}),
+ (exception.InvalidBDMLocalsLimit, {}))
+
+ ex_iter = iter(bdm_exceptions)
+
+ def _validate_bdm(*args, **kwargs):
+ self.validation_fail_test_validate_called = True
+ ex, kargs = ex_iter.next()
+ raise ex(**kargs)
+
+ def _instance_destroy(*args, **kwargs):
+ self.validation_fail_instance_destroy_called = True
+
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self.stubs.Set(objects.Instance, 'destroy', _instance_destroy)
+
+ for _unused in xrange(len(bdm_exceptions)):
+ params = {block_device_mapping.ATTRIBUTE_NAME:
+ [self.bdm[0].copy()]}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+ self.assertTrue(self.validation_fail_test_validate_called)
+ self.assertTrue(self.validation_fail_instance_destroy_called)
+ self.validation_fail_test_validate_called = False
+ self.validation_fail_instance_destroy_called = False
+
+
+class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {'os-volumes': 'fake',
+ 'os-block-device-mapping-v2-boot': 'fake'}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.ext_mgr_bdm_v2 = extensions.ExtensionManager()
+ self.ext_mgr_bdm_v2.extensions = {'os-volumes': 'fake'}
+ self.no_bdm_v2_controller = servers_v2.Controller(
+ self.ext_mgr_bdm_v2)
+
+ def test_create_instance_with_block_device_mapping_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['block_device_mapping'], None)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
+ self._test_create(params,
+ override_controller=self.no_bdm_v2_controller)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py
new file mode 100644
index 0000000000..2f73f00952
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_block_device_mapping_v1.py
@@ -0,0 +1,421 @@
+# Copyright (c) 2014 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute import extensions
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping_v1 as \
+ block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v3
+from nova.api.openstack.compute import servers as servers_v2
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+
+
+class BlockDeviceMappingTestV21(test.TestCase):
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist', 'os-block-device-mapping',
+ 'osapi_v3')
+ self.controller = servers_v3.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ ['os-block-device-mapping-v1',
+ 'os-block-device-mapping'],
+ 'osapi_v3')
+ self.no_volumes_controller = servers_v3.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', '', 'osapi_v3')
+
+ def setUp(self):
+ super(BlockDeviceMappingTestV21, self).setUp()
+ self._setup_controller()
+ fake.stub_out_image_service(self.stubs)
+ self.volume_id = fakes.FAKE_UUID
+ self.bdm = [{
+ 'id': 1,
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'status': 'active',
+ 'device_name': 'vda',
+ 'delete_on_termination': False,
+ 'volume_image_metadata':
+ {'test_key': 'test_value'}
+ }]
+
+ def _get_servers_body(self, no_image=False):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ if no_image:
+ del body['server']['imageRef']
+ return body
+
+ def _test_create(self, params, no_image=False, override_controller=None):
+ body = self._get_servers_body(no_image)
+ body['server'].update(params)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+
+ req.body = jsonutils.dumps(body)
+
+ if override_controller:
+ override_controller.create(req, body=body).obj['server']
+ else:
+ self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_volumes_enabled(self):
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self._test_create(params)
+
+ def test_create_instance_with_volumes_enabled_and_bdms_no_image(self):
+ """Test that the create works if there is no image supplied but
+ os-volumes extension is enabled and bdms are supplied
+ """
+ self.mox.StubOutWithMock(compute_api.API, '_validate_bdm')
+ self.mox.StubOutWithMock(compute_api.API, '_get_bdm_image_metadata')
+ volume = self.bdm[0]
+ compute_api.API._validate_bdm(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(True)
+ compute_api.API._get_bdm_image_metadata(mox.IgnoreArg(),
+ self.bdm,
+ True).AndReturn(volume)
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.mox.ReplayAll()
+ self._test_create(params, no_image=True)
+
+ def test_create_instance_with_volumes_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn(block_device_mapping, kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create(params,
+ override_controller=self.no_volumes_controller)
+
+ @mock.patch('nova.compute.api.API._get_bdm_image_metadata')
+ def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
+ bdm = [{
+ 'id': 1,
+ 'bootable': False,
+ 'volume_id': self.volume_id,
+ 'status': 'active',
+ 'device_name': 'vda',
+ }]
+ params = {'block_device_mapping': bdm}
+ fake_bdm_meta.side_effect = exception.InvalidBDMVolumeNotBootable(id=1)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
+
+ def test_create_instance_with_device_name_not_string(self):
+ old_create = compute_api.API.create
+ self.params = {'block_device_mapping': self.bdm}
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_bdm_param_not_list(self, mock_create):
+ self.params = {'block_device_mapping': '/dev/vdb'}
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, self.params)
+
+ def test_create_instance_with_device_name_empty(self):
+ self.bdm[0]['device_name'] = ''
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_device_name_too_long(self):
+ self.bdm[0]['device_name'] = 'a' * 256,
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_space_in_device_name(self):
+ self.bdm[0]['device_name'] = 'vd a',
+ params = {'block_device_mapping': self.bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertTrue(kwargs['legacy_bdm'])
+ self.assertEqual(kwargs['block_device_mapping'], self.bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_invalid_size(self):
+ bdm = [{'delete_on_termination': 1,
+ 'device_name': 'vda',
+ 'volume_size': "hello world",
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['block_device_mapping'], bdm)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params)
+
+ def test_create_instance_with_bdm_delete_on_termination(self):
+ bdm = [{'device_name': 'foo1', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1},
+ {'device_name': 'foo2', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo3', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 'invalid'},
+ {'device_name': 'foo4', 'volume_id': 'fake_vol',
+ 'delete_on_termination': 0},
+ {'device_name': 'foo5', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False}]
+ expected_bdm = [
+ {'device_name': 'foo1', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo2', 'volume_id': 'fake_vol',
+ 'delete_on_termination': True},
+ {'device_name': 'foo3', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False},
+ {'device_name': 'foo4', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False},
+ {'device_name': 'foo5', 'volume_id': 'fake_vol',
+ 'delete_on_termination': False}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(expected_bdm, kwargs['block_device_mapping'])
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
+ self._test_create(params)
+
+ def test_create_instance_decide_format_legacy(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist',
+ ['os-block-device-mapping',
+ 'os-block-device-mapping-v1'],
+ 'osapi_v3')
+ controller = servers_v3.ServersController(extension_info=ext_info)
+ bdm = [{'device_name': 'foo1',
+ 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1}]
+
+ expected_legacy_flag = True
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ legacy_bdm = kwargs.get('legacy_bdm', True)
+ self.assertEqual(legacy_bdm, expected_legacy_flag)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm',
+ _validate_bdm)
+
+ self._test_create({}, override_controller=controller)
+
+ params = {'block_device_mapping': bdm}
+ self._test_create(params, override_controller=controller)
+
+ def test_create_instance_both_bdm_formats(self):
+ bdm = [{'device_name': 'foo'}]
+ bdm_v2 = [{'source_type': 'volume',
+ 'uuid': 'fake_vol'}]
+ params = {'block_device_mapping': bdm,
+ 'block_device_mapping_v2': bdm_v2}
+ self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+
+
+class BlockDeviceMappingTestV2(BlockDeviceMappingTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.ext_mgr_no_vols = extensions.ExtensionManager()
+ self.ext_mgr_no_vols.extensions = {}
+ self.no_volumes_controller = servers_v2.Controller(
+ self.ext_mgr_no_vols)
+
+ def test_create_instance_with_volumes_disabled(self):
+ bdm = [{'device_name': 'foo'}]
+ params = {'block_device_mapping': bdm}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['block_device_mapping'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create(params,
+ override_controller=self.no_volumes_controller)
+
+ def test_create_instance_decide_format_legacy(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-volumes': 'fake',
+ 'os-block-device-mapping-v2-boot': 'fake'}
+ controller = servers_v2.Controller(self.ext_mgr)
+ bdm = [{'device_name': 'foo1',
+ 'volume_id': 'fake_vol',
+ 'delete_on_termination': 1}]
+
+ expected_legacy_flag = True
+
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ legacy_bdm = kwargs.get('legacy_bdm', True)
+ self.assertEqual(legacy_bdm, expected_legacy_flag)
+ return old_create(*args, **kwargs)
+
+ def _validate_bdm(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.stubs.Set(compute_api.API, '_validate_bdm',
+ _validate_bdm)
+
+ self._test_create({}, override_controller=controller)
+
+ params = {'block_device_mapping': bdm}
+ self._test_create(params, override_controller=controller)
+
+
+class TestServerCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers_v2.CreateDeserializer()
+
+ def test_request_with_block_device_mapping(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <block_device_mapping>
+ <mapping volume_id="7329b667-50c7-46a6-b913-cb2a09dfeee0"
+ device_name="/dev/vda" virtual_name="root"
+ delete_on_termination="False" />
+ <mapping snapshot_id="f31efb24-34d2-43e1-8b44-316052956a39"
+ device_name="/dev/vdb" virtual_name="ephemeral0"
+ delete_on_termination="False" />
+ <mapping device_name="/dev/vdc" no_device="True" />
+ </block_device_mapping>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "block_device_mapping": [
+ {
+ "volume_id": "7329b667-50c7-46a6-b913-cb2a09dfeee0",
+ "device_name": "/dev/vda",
+ "virtual_name": "root",
+ "delete_on_termination": False,
+ },
+ {
+ "snapshot_id": "f31efb24-34d2-43e1-8b44-316052956a39",
+ "device_name": "/dev/vdb",
+ "virtual_name": "ephemeral0",
+ "delete_on_termination": False,
+ },
+ {
+ "device_name": "/dev/vdc",
+ "no_device": True,
+ },
+ ]
+ }}
+ self.assertEqual(request['body'], expected)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cells.py b/nova/tests/unit/api/openstack/compute/contrib/test_cells.py
new file mode 100644
index 0000000000..1460d33e3a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cells.py
@@ -0,0 +1,698 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+from oslo.utils import timeutils
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cells as cells_ext_v2
+from nova.api.openstack.compute.plugins.v3 import cells as cells_ext_v21
+from nova.api.openstack import extensions
+from nova.api.openstack import xmlutil
+from nova.cells import rpcapi as cells_rpcapi
+from nova import context
+from nova import exception
+from nova import rpc
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+
+class BaseCellsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(BaseCellsTest, self).setUp()
+
+ self.fake_cells = [
+ dict(id=1, name='cell1', is_parent=True,
+ weight_scale=1.0, weight_offset=0.0,
+ transport_url='rabbit://bob:xxxx@r1.example.org/'),
+ dict(id=2, name='cell2', is_parent=False,
+ weight_scale=1.0, weight_offset=0.0,
+ transport_url='rabbit://alice:qwerty@r2.example.org/')]
+
+ self.fake_capabilities = [
+ {'cap1': '0,1', 'cap2': '2,3'},
+ {'cap3': '4,5', 'cap4': '5,6'}]
+
+ def fake_cell_get(_self, context, cell_name):
+ for cell in self.fake_cells:
+ if cell_name == cell['name']:
+ return cell
+ else:
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ def fake_cell_create(_self, context, values):
+ cell = dict(id=1)
+ cell.update(values)
+ return cell
+
+ def fake_cell_update(_self, context, cell_id, values):
+ cell = fake_cell_get(_self, context, cell_id)
+ cell.update(values)
+ return cell
+
+ def fake_cells_api_get_all_cell_info(*args):
+ return self._get_all_cell_info(*args)
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', fake_cell_get)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_update', fake_cell_update)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_create', fake_cell_create)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'get_cell_info_for_neighbors',
+ fake_cells_api_get_all_cell_info)
+
+ def _get_all_cell_info(self, *args):
+ def insecure_transport_url(url):
+ transport_url = rpc.get_transport_url(url)
+ transport_url.hosts[0].password = None
+ return str(transport_url)
+
+ cells = copy.deepcopy(self.fake_cells)
+ cells[0]['transport_url'] = insecure_transport_url(
+ cells[0]['transport_url'])
+ cells[1]['transport_url'] = insecure_transport_url(
+ cells[1]['transport_url'])
+ for i, cell in enumerate(cells):
+ cell['capabilities'] = self.fake_capabilities[i]
+ return cells
+
+
+class CellsTestV21(BaseCellsTest):
+ cell_extension = 'compute_extension:v3:os-cells'
+ bad_request = exception.ValidationError
+
+ def _get_cell_controller(self, ext_mgr):
+ return cells_ext_v21.CellsController()
+
+ def _get_request(self, resource):
+ return fakes.HTTPRequest.blank('/v2/fake/' + resource)
+
+ def setUp(self):
+ super(CellsTestV21, self).setUp()
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self._get_cell_controller(self.ext_mgr)
+ self.context = context.get_admin_context()
+ self.flags(enable=True, group='cells')
+
+ def test_index(self):
+ req = self._get_request("cells")
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], self.fake_cells[i]['name'])
+ self.assertNotIn('capabilitiles', cell)
+ self.assertNotIn('password', cell)
+
+ def test_detail(self):
+ req = self._get_request("cells/detail")
+ res_dict = self.controller.detail(req)
+
+ self.assertEqual(len(res_dict['cells']), 2)
+ for i, cell in enumerate(res_dict['cells']):
+ self.assertEqual(cell['name'], self.fake_cells[i]['name'])
+ self.assertEqual(cell['capabilities'], self.fake_capabilities[i])
+ self.assertNotIn('password', cell)
+
+ def test_show_bogus_cell_raises(self):
+ req = self._get_request("cells/bogus")
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'bogus')
+
+ def test_get_cell_by_name(self):
+ req = self._get_request("cells/cell1")
+ res_dict = self.controller.show(req, 'cell1')
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertNotIn('password', cell)
+
+ def _cell_delete(self):
+ call_info = {'delete_called': 0}
+
+ def fake_cell_delete(inst, context, cell_name):
+ self.assertEqual(cell_name, 'cell999')
+ call_info['delete_called'] += 1
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.controller.delete(req, 'cell999')
+ self.assertEqual(call_info['delete_called'], 1)
+
+ def test_cell_delete(self):
+ # Test cell delete with just cell policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_delete()
+
+ def test_cell_delete_with_delete_policy(self):
+ self._cell_delete()
+
+ def test_delete_bogus_cell_raises(self):
+ def fake_cell_delete(inst, context, cell_name):
+ return 0
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
+ 'cell999')
+
+ def test_cell_delete_fails_for_invalid_policy(self):
+ def fake_cell_delete(inst, context, cell_name):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete, req, 'cell999')
+
+ def _cell_create_parent(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'parent')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_parent(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_parent()
+
+ def test_cell_create_parent_with_create_policy(self):
+ self._cell_create_parent()
+
+ def _cell_create_child(self):
+ body = {'cell': {'name': 'meow',
+ 'username': 'fred',
+ 'password': 'fubar',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'child'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'meow')
+ self.assertEqual(cell['username'], 'fred')
+ self.assertEqual(cell['rpc_host'], 'r3.example.org')
+ self.assertEqual(cell['type'], 'child')
+ self.assertNotIn('password', cell)
+ self.assertNotIn('is_parent', cell)
+
+ def test_cell_create_child(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_child()
+
+ def test_cell_create_child_with_create_policy(self):
+ self._cell_create_child()
+
+ def test_cell_create_no_name_raises(self):
+ body = {'cell': {'username': 'moocow',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_empty_string_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_with_bang_raises(self):
+ body = {'cell': {'name': 'moo!cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
+ cell = res_dict['cell']
+ self.assertEqual(cell['name'], 'moo.cow')
+
+ def test_cell_create_name_with_invalid_type_raises(self):
+ body = {'cell': {'name': 'moocow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'invalid'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_cell_create_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'fake'}}
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
+ def _cell_update(self):
+ body = {'cell': {'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell['username'], 'zeb')
+ self.assertNotIn('password', cell)
+
+ def test_cell_update(self):
+ # Test cell update with just cell policy
+ rules = {"default": "is_admin:true",
+ self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_update()
+
+ def test_cell_update_with_update_policy(self):
+ self._cell_update()
+
+ def test_cell_update_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'got_changed'}}
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
+ def test_cell_update_empty_name_raises(self):
+ body = {'cell': {'name': '',
+ 'username': 'zeb',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.update, req, 'cell1', body=body)
+
+ def test_cell_update_invalid_type_raises(self):
+ body = {'cell': {'username': 'zeb',
+ 'type': 'invalid',
+ 'password': 'sneaky'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(self.bad_request,
+ self.controller.update, req, 'cell1', body=body)
+
+ def test_cell_update_without_type_specified(self):
+ body = {'cell': {'username': 'wingwj'}}
+
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
+ cell = res_dict['cell']
+
+ self.assertEqual(cell['name'], 'cell1')
+ self.assertEqual(cell['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell['username'], 'wingwj')
+ self.assertEqual(cell['type'], 'parent')
+
+ def test_cell_update_with_type_specified(self):
+ body1 = {'cell': {'username': 'wingwj', 'type': 'child'}}
+ body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}}
+
+ req1 = self._get_request("cells/cell1")
+ req1.environ['nova.context'] = self.context
+ res_dict1 = self.controller.update(req1, 'cell1', body=body1)
+ cell1 = res_dict1['cell']
+
+ req2 = self._get_request("cells/cell2")
+ req2.environ['nova.context'] = self.context
+ res_dict2 = self.controller.update(req2, 'cell2', body=body2)
+ cell2 = res_dict2['cell']
+
+ self.assertEqual(cell1['name'], 'cell1')
+ self.assertEqual(cell1['rpc_host'], 'r1.example.org')
+ self.assertEqual(cell1['username'], 'wingwj')
+ self.assertEqual(cell1['type'], 'child')
+
+ self.assertEqual(cell2['name'], 'cell2')
+ self.assertEqual(cell2['rpc_host'], 'r2.example.org')
+ self.assertEqual(cell2['username'], 'wingwj')
+ self.assertEqual(cell2['type'], 'parent')
+
+ def test_cell_info(self):
+ caps = ['cap1=a;b', 'cap2=c;d']
+ self.flags(name='darksecret', capabilities=caps, group='cells')
+
+ req = self._get_request("cells/info")
+ res_dict = self.controller.info(req)
+ cell = res_dict['cell']
+ cell_caps = cell['capabilities']
+
+ self.assertEqual(cell['name'], 'darksecret')
+ self.assertEqual(cell_caps['cap1'], 'a;b')
+ self.assertEqual(cell_caps['cap2'], 'c;d')
+
+ def test_show_capacities(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name=None).AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req)
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_show_capacity_fails_with_non_admin_context(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ rules = {self.cell_extension: "is_admin:true"}
+ self.policy.set_rules(rules)
+
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.capacities, req)
+
+ def test_show_capacities_for_invalid_cell(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ self.controller.cells_rpcapi. \
+ get_capacities(self.context, cell_name="invalid_cell").AndRaise(
+ exception.CellNotFound(cell_name="invalid_cell"))
+ self.mox.ReplayAll()
+ req = self._get_request("cells/invalid_cell/capacities")
+ req.environ["nova.context"] = self.context
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.capacities, req, "invalid_cell")
+
+ def test_show_capacities_for_cell(self):
+ if (self.cell_extension == 'compute_extension:cells'):
+ self.ext_mgr.is_loaded('os-cell-capacities').AndReturn(True)
+ self.mox.StubOutWithMock(self.controller.cells_rpcapi,
+ 'get_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.controller.cells_rpcapi.\
+ get_capacities(self.context, cell_name='cell_name').\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ req = self._get_request("cells/capacities")
+ req.environ["nova.context"] = self.context
+ res_dict = self.controller.capacities(req, 'cell_name')
+ self.assertEqual(response, res_dict['cell']['capacities'])
+
+ def test_sync_instances(self):
+ call_info = {}
+
+ def sync_instances(self, context, **kwargs):
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['deleted'] = kwargs.get('deleted')
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ body = {}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+
+ body = {'project_id': 'test-project'}
+ self.controller.sync_instances(req, body=body)
+ self.assertEqual(call_info['project_id'], 'test-project')
+ self.assertIsNone(call_info['updated_since'])
+
+ expected = timeutils.utcnow().isoformat()
+ if not expected.endswith("+00:00"):
+ expected += "+00:00"
+
+ body = {'updated_since': expected}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], expected)
+
+ body = {'updated_since': 'skjdfkjsdkf'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'deleted': False}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], False)
+
+ body = {'deleted': 'False'}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], False)
+
+ body = {'deleted': 'True'}
+ self.controller.sync_instances(req, body=body)
+ self.assertIsNone(call_info['project_id'])
+ self.assertIsNone(call_info['updated_since'])
+ self.assertEqual(call_info['deleted'], True)
+
+ body = {'deleted': 'foo'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ body = {'foo': 'meow'}
+ self.assertRaises(self.bad_request,
+ self.controller.sync_instances, req, body=body)
+
+ def test_sync_instances_fails_for_invalid_policy(self):
+ def sync_instances(self, context, **kwargs):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+
+ body = {}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.sync_instances, req, body=body)
+
+ def test_cells_disabled(self):
+ self.flags(enable=False, group='cells')
+
+ req = self._get_request("cells")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.index, req)
+
+ req = self._get_request("cells/detail")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.detail, req)
+
+ req = self._get_request("cells/cell1")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.show, req)
+
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.delete, req, 'cell999')
+
+ req = self._get_request("cells/cells")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.create, req, {})
+
+ req = self._get_request("cells/capacities")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.capacities, req)
+
+ req = self._get_request("cells/sync_instances")
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.sync_instances, req, {})
+
+
+class CellsTestV2(CellsTestV21):
+ cell_extension = 'compute_extension:cells'
+ bad_request = exc.HTTPBadRequest
+
+ def _get_cell_controller(self, ext_mgr):
+ return cells_ext_v2.Controller(ext_mgr)
+
+ def test_cell_create_name_with_dot_raises(self):
+ body = {'cell': {'name': 'moo.cow',
+ 'username': 'fred',
+ 'password': 'secret',
+ 'rpc_host': 'r3.example.org',
+ 'type': 'parent'}}
+
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body=body)
+
+
+class TestCellsXMLSerializer(BaseCellsTest):
+ def test_multiple_cells(self):
+ fixture = {'cells': self._get_all_cell_info()}
+
+ serializer = cells_ext_v2.CellsTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cells' % xmlutil.XMLNS_V10)
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree[1].tag, '{%s}cell' % xmlutil.XMLNS_V10)
+
+ def test_single_cell_with_caps(self):
+ cell = {'id': 1,
+ 'name': 'darksecret',
+ 'username': 'meow',
+ 'capabilities': {'cap1': 'a;b',
+ 'cap2': 'c;d'}}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext_v2.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'meow')
+ self.assertIsNone(res_tree.get('password'))
+ self.assertEqual(len(res_tree), 1)
+
+ child = res_tree[0]
+ self.assertEqual(child.tag,
+ '{%s}capabilities' % xmlutil.XMLNS_V10)
+ for elem in child:
+ self.assertIn(elem.tag, ('{%s}cap1' % xmlutil.XMLNS_V10,
+ '{%s}cap2' % xmlutil.XMLNS_V10))
+ if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'a;b')
+ elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
+ self.assertEqual(elem.text, 'c;d')
+
+ def test_single_cell_without_caps(self):
+ cell = {'id': 1,
+ 'username': 'woof',
+ 'name': 'darksecret'}
+ fixture = {'cell': cell}
+
+ serializer = cells_ext_v2.CellTemplate()
+ output = serializer.serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, '{%s}cell' % xmlutil.XMLNS_V10)
+ self.assertEqual(res_tree.get('name'), 'darksecret')
+ self.assertEqual(res_tree.get('username'), 'woof')
+ self.assertIsNone(res_tree.get('password'))
+ self.assertEqual(len(res_tree), 0)
+
+
+class TestCellsXMLDeserializer(test.NoDBTestCase):
+ def test_cell_deserializer(self):
+ caps_dict = {'cap1': 'a;b',
+ 'cap2': 'c;d'}
+ caps_xml = ("<capabilities><cap1>a;b</cap1>"
+ "<cap2>c;d</cap2></capabilities>")
+ expected = {'cell': {'name': 'testcell1',
+ 'type': 'child',
+ 'rpc_host': 'localhost',
+ 'capabilities': caps_dict}}
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ "<cell><name>testcell1</name><type>child</type>"
+ "<rpc_host>localhost</rpc_host>"
+ "%s</cell>") % caps_xml
+ deserializer = cells_ext_v2.CellDeserializer()
+ result = deserializer.deserialize(intext)
+ self.assertEqual(dict(body=expected), result)
+
+ def test_with_corrupt_xml(self):
+ deserializer = cells_ext_v2.CellDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py b/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py
new file mode 100644
index 0000000000..c7066516d8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_certificates.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import mock
+import mox
+from webob import exc
+
+from nova.api.openstack.compute.contrib import certificates as certificates_v2
+from nova.api.openstack.compute.plugins.v3 import certificates \
+ as certificates_v21
+from nova.cert import rpcapi
+from nova import context
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class CertificatesTestV21(test.NoDBTestCase):
+ certificates = certificates_v21
+ url = '/v3/os-certificates'
+ certificate_show_extension = 'compute_extension:v3:os-certificates:show'
+ certificate_create_extension = \
+ 'compute_extension:v3:os-certificates:create'
+
+ def setUp(self):
+ super(CertificatesTestV21, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.controller = self.certificates.CertificatesController()
+
+ def test_translate_certificate_view(self):
+ pk, cert = 'fakepk', 'fakecert'
+ view = self.certificates._translate_certificate_view(cert, pk)
+ self.assertEqual(view['data'], cert)
+ self.assertEqual(view['private_key'], pk)
+
+ def test_certificates_show_root(self):
+ self.mox.StubOutWithMock(self.controller.cert_rpcapi, 'fetch_ca')
+
+ self.controller.cert_rpcapi.fetch_ca(
+ mox.IgnoreArg(), project_id='fake').AndReturn('fakeroot')
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ res_dict = self.controller.show(req, 'root')
+
+ response = {'certificate': {'data': 'fakeroot', 'private_key': None}}
+ self.assertEqual(res_dict, response)
+
+ def test_certificates_show_policy_failed(self):
+ rules = {
+ self.certificate_show_extension:
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, 'root')
+ self.assertIn(self.certificate_show_extension,
+ exc.format_message())
+
+ def test_certificates_create_certificate(self):
+ self.mox.StubOutWithMock(self.controller.cert_rpcapi,
+ 'generate_x509_cert')
+
+ self.controller.cert_rpcapi.generate_x509_cert(
+ mox.IgnoreArg(),
+ user_id='fake_user',
+ project_id='fake').AndReturn(('fakepk', 'fakecert'))
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req)
+
+ response = {
+ 'certificate': {'data': 'fakecert',
+ 'private_key': 'fakepk'}
+ }
+ self.assertEqual(res_dict, response)
+
+ def test_certificates_create_policy_failed(self):
+ rules = {
+ self.certificate_create_extension:
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.url)
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req)
+ self.assertIn(self.certificate_create_extension,
+ exc.format_message())
+
+ @mock.patch.object(rpcapi.CertAPI, 'fetch_ca',
+ side_effect=exception.CryptoCAFileNotFound(project='fake'))
+ def test_non_exist_certificates_show(self, mock_fetch_ca):
+ req = fakes.HTTPRequest.blank(self.url + '/root')
+ self.assertRaises(
+ exc.HTTPNotFound,
+ self.controller.show,
+ req, 'root')
+
+
+class CertificatesTestV2(CertificatesTestV21):
+ certificates = certificates_v2
+ url = '/v2/fake/os-certificates'
+ certificate_show_extension = 'compute_extension:certificates'
+ certificate_create_extension = 'compute_extension:certificates'
+
+
+class CertificatesSerializerTest(test.NoDBTestCase):
+ def test_index_serializer(self):
+ serializer = certificates_v2.CertificateTemplate()
+ text = serializer.serialize(dict(
+ certificate=dict(
+ data='fakecert',
+ private_key='fakepk'),
+ ))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('certificate', tree.tag)
+ self.assertEqual('fakepk', tree.get('private_key'))
+ self.assertEqual('fakecert', tree.get('data'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py
new file mode 100644
index 0000000000..ab3b1a58cc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe.py
@@ -0,0 +1,210 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid as uuid_lib
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.utils import timeutils
+from webob import exc
+
+from nova.api.openstack.compute.contrib import cloudpipe as cloudpipe_v2
+from nova.api.openstack.compute.plugins.v3 import cloudpipe as cloudpipe_v21
+from nova.api.openstack import wsgi
+from nova.compute import utils as compute_utils
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+from nova.tests.unit import matchers
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+
+
+project_id = str(uuid_lib.uuid4().hex)
+uuid = str(uuid_lib.uuid4())
+
+
+def fake_vpn_instance():
+ return {
+ 'id': 7, 'image_ref': CONF.vpn_image_id, 'vm_state': 'active',
+ 'created_at': timeutils.parse_strtime('1981-10-20T00:00:00.000000'),
+ 'uuid': uuid, 'project_id': project_id,
+ }
+
+
+def compute_api_get_all_empty(context, search_opts=None):
+ return []
+
+
+def compute_api_get_all(context, search_opts=None):
+ return [fake_vpn_instance()]
+
+
+def utils_vpn_ping(addr, port, timoeout=0.05, session_id=None):
+ return True
+
+
+class CloudpipeTestV21(test.NoDBTestCase):
+ cloudpipe = cloudpipe_v21
+ url = '/v2/fake/os-cloudpipe'
+
+ def setUp(self):
+ super(CloudpipeTestV21, self).setUp()
+ self.controller = self.cloudpipe.CloudpipeController()
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all_empty)
+ self.stubs.Set(utils, 'vpn_ping', utils_vpn_ping)
+
+ def test_cloudpipe_list_no_network(self):
+
+ def fake_get_nw_info_for_instance(instance):
+ return {}
+
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ fake_get_nw_info_for_instance)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+ response = {'cloudpipes': [{'project_id': project_id,
+ 'instance_id': uuid,
+ 'created_at': '1981-10-20T00:00:00Z'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_list(self):
+
+ def network_api_get(context, network_id):
+ self.assertEqual(context.project_id, project_id)
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ def fake_get_nw_info_for_instance(instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs)
+
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ fake_get_nw_info_for_instance)
+ self.stubs.Set(self.controller.network_api, "get",
+ network_api_get)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+ response = {'cloudpipes': [{'project_id': project_id,
+ 'internal_ip': '192.168.1.100',
+ 'public_ip': '127.0.0.1',
+ 'public_port': 22,
+ 'state': 'running',
+ 'instance_id': uuid,
+ 'created_at': '1981-10-20T00:00:00Z'}]}
+ self.assertThat(res_dict, matchers.DictMatches(response))
+
+ def test_cloudpipe_create(self):
+ def launch_vpn_instance(context):
+ return ([fake_vpn_instance()], 'fake-reservation')
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+
+ response = {'instance_id': uuid}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_create_no_networks(self):
+ def launch_vpn_instance(context):
+ raise exception.NoMoreNetworks
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, req, body=body)
+
+ def test_cloudpipe_create_already_running(self):
+ def launch_vpn_instance(*args, **kwargs):
+ self.fail("Method should not have been called")
+
+ self.stubs.Set(self.controller.cloudpipe, 'launch_vpn_instance',
+ launch_vpn_instance)
+ self.stubs.Set(self.controller.compute_api, "get_all",
+ compute_api_get_all)
+ body = {'cloudpipe': {'project_id': project_id}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {'instance_id': uuid}
+ self.assertEqual(res_dict, response)
+
+ def test_cloudpipe_create_with_bad_project_id_failed(self):
+ body = {'cloudpipe': {'project_id': 'bad.project.id'}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+
+class CloudpipeTestV2(CloudpipeTestV21):
+ cloudpipe = cloudpipe_v2
+
+ def test_cloudpipe_create_with_bad_project_id_failed(self):
+ pass
+
+
+class CloudpipesXMLSerializerTestV2(test.NoDBTestCase):
+ def test_default_serializer(self):
+ serializer = cloudpipe_v2.CloudpipeTemplate()
+ exemplar = dict(cloudpipe=dict(instance_id='1234-1234-1234-1234'))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+ self.assertEqual('cloudpipe', tree.tag)
+ for child in tree:
+ self.assertIn(child.tag, exemplar['cloudpipe'])
+ self.assertEqual(child.text, exemplar['cloudpipe'][child.tag])
+
+ def test_index_serializer(self):
+ serializer = cloudpipe_v2.CloudpipesTemplate()
+ exemplar = dict(cloudpipes=[
+ dict(
+ project_id='1234',
+ public_ip='1.2.3.4',
+ public_port='321',
+ instance_id='1234-1234-1234-1234',
+ created_at=timeutils.isotime(),
+ state='running'),
+ dict(
+ project_id='4321',
+ public_ip='4.3.2.1',
+ public_port='123',
+ state='pending')])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+ self.assertEqual('cloudpipes', tree.tag)
+ self.assertEqual(len(exemplar['cloudpipes']), len(tree))
+ for idx, cl_pipe in enumerate(tree):
+ kp_data = exemplar['cloudpipes'][idx]
+ for child in cl_pipe:
+ self.assertIn(child.tag, kp_data)
+ self.assertEqual(child.text, kp_data[child.tag])
+
+ def test_deserializer(self):
+ deserializer = wsgi.XMLDeserializer()
+ exemplar = dict(cloudpipe=dict(project_id='4321'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<cloudpipe><project_id>4321</project_id></cloudpipe>')
+ result = deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py
new file mode 100644
index 0000000000..23faf6275a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_cloudpipe_update.py
@@ -0,0 +1,99 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import cloudpipe_update as clup_v2
+from nova.api.openstack.compute.plugins.v3 import cloudpipe as clup_v21
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+
+
+fake_networks = [fake_network.fake_network(1),
+ fake_network.fake_network(2)]
+
+
+def fake_project_get_networks(context, project_id, associate=True):
+ return fake_networks
+
+
+def fake_network_update(context, network_id, values):
+ for network in fake_networks:
+ if network['id'] == network_id:
+ for key in values:
+ network[key] = values[key]
+
+
+class CloudpipeUpdateTestV21(test.NoDBTestCase):
+ bad_request = exception.ValidationError
+
+ def setUp(self):
+ super(CloudpipeUpdateTestV21, self).setUp()
+ self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
+ self.stubs.Set(db, "network_update", fake_network_update)
+ self._setup()
+
+ def _setup(self):
+ self.controller = clup_v21.CloudpipeController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, controller_methord.wsgi_code)
+
+ def test_cloudpipe_configure_project(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
+ result = self.controller.update(req, 'configure-project',
+ body=body)
+ self._check_status(202, result, self.controller.update)
+ self.assertEqual(fake_networks[0]['vpn_public_address'], "1.2.3.4")
+ self.assertEqual(fake_networks[0]['vpn_public_port'], 222)
+
+ def test_cloudpipe_configure_project_bad_url(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-projectx')
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req,
+ 'configure-projectx', body=body)
+
+ def test_cloudpipe_configure_project_bad_data(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
+ self.assertRaises(self.bad_request,
+ self.controller.update, req,
+ 'configure-project', body=body)
+
+ def test_cloudpipe_configure_project_bad_vpn_port(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
+ "vpn_port": "foo"}}
+ self.assertRaises(self.bad_request,
+ self.controller.update, req,
+ 'configure-project', body=body)
+
+
+class CloudpipeUpdateTestV2(CloudpipeUpdateTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _setup(self):
+ self.controller = clup_v2.CloudpipeUpdateController()
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py
new file mode 100644
index 0000000000..ef94db0d23
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_config_drive.py
@@ -0,0 +1,260 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import config_drive as config_drive_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import config_drive \
+ as config_drive_v21
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+CONF = cfg.CONF
+
+
+class ConfigDriveTestV21(test.TestCase):
+ base_url = '/v2/fake/servers/'
+
+ def _setup_wsgi(self):
+ self.app = fakes.wsgi_app_v21(init_only=('servers', 'os-config-drive'))
+
+ def _get_config_drive_controller(self):
+ return config_drive_v21.ConfigDriveController()
+
+ def setUp(self):
+ super(ConfigDriveTestV21, self).setUp()
+ self.Controller = self._get_config_drive_controller()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self._setup_wsgi()
+
+ def test_show(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + '1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(self.app)
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ self.assertIn('config_drive', res_dict['server'])
+
+ def test_detail_servers(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fakes.fake_instance_get_all_by_filters())
+ req = fakes.HTTPRequest.blank(self.base_url + 'detail')
+ res = req.get_response(self.app)
+ server_dicts = jsonutils.loads(res.body)['servers']
+ self.assertNotEqual(len(server_dicts), 0)
+ for server_dict in server_dicts:
+ self.assertIn('config_drive', server_dict)
+
+
+class ConfigDriveTestV2(ConfigDriveTestV21):
+
+ def _get_config_drive_controller(self):
+ return config_drive_v2.Controller()
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Config_drive'])
+ self.app = fakes.wsgi_app(init_only=('servers',))
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+ base_url = '/v2/fake/'
+ bad_request = exception.ValidationError
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v21.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ 'os-config-drive',
+ 'osapi_v3')
+ self.no_config_drive_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertNotIn('config_drive', kwargs)
+
+ def _initialize_extension(self):
+ pass
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _test_create_extra(self, params, override_controller):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller is not None:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_config_drive_disabled(self):
+ params = {'config_drive': "False"}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self._verfiy_config_drive(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params,
+ override_controller=self.no_config_drive_controller)
+
+ def _create_instance_body_of_config_drive(self, param):
+ self._initialize_extension()
+
+ def create(*args, **kwargs):
+ self.assertIn('config_drive', kwargs)
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'config_drive': param,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ return req, body
+
+ def test_create_instance_with_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_config_drive_as_boolean_string(self):
+ param = 'false'
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_config_drive(self):
+ param = 12345
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_without_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ del body['server']['config_drive']
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.no_config_drive_controller = None
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertIsNone(kwargs['config_drive'])
+
+ def _initialize_extension(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py
new file mode 100644
index 0000000000..eef4cd62ea
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_console_auth_tokens.py
@@ -0,0 +1,103 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.consoleauth import rpcapi as consoleauth_rpcapi
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path': 'fake_access_path',
+ 'console_type': 'rdp-html5'}
+
+
+def _fake_check_token(self, context, token):
+ return _FAKE_CONNECT_INFO
+
+
+def _fake_check_token_not_found(self, context, token):
+ return None
+
+
+def _fake_check_token_unauthorized(self, context, token):
+ connect_info = _FAKE_CONNECT_INFO
+ connect_info['console_type'] = 'unauthorized_console_type'
+ return connect_info
+
+
+class ConsoleAuthTokensExtensionTest(test.TestCase):
+
+ _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
+
+ _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path':
+ 'fake_access_path'}}
+
+ def setUp(self):
+ super(ConsoleAuthTokensExtensionTest, self).setUp()
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Console_auth_tokens'])
+
+ ctxt = self._get_admin_context()
+ self.app = fakes.wsgi_app(init_only=('os-console-auth-tokens',),
+ fake_auth_context=ctxt)
+
+ def _get_admin_context(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ return ctxt
+
+ def _create_request(self):
+ req = webob.Request.blank(self._FAKE_URL)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ return req
+
+ def test_get_console_connect_info(self):
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_int)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(self._EXPECTED_OUTPUT, output)
+
+ def test_get_console_connect_info_token_not_found(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_not_found)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(404, res.status_int)
+
+ def test_get_console_connect_info_unauthorized_console_type(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_unauthorized)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(401, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py b/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py
new file mode 100644
index 0000000000..441899a19b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_console_output.py
@@ -0,0 +1,171 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import string
+
+from oslo.serialization import jsonutils
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+def fake_get_console_output(self, _context, _instance, tail_length):
+ fixture = [str(i) for i in range(5)]
+
+ if tail_length is None:
+ pass
+ elif tail_length == 0:
+ fixture = []
+ else:
+ fixture = fixture[-int(tail_length):]
+
+ return '\n'.join(fixture)
+
+
+def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
+ raise exception.InstanceNotReady(instance_id=_instance["uuid"])
+
+
+def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
+ return string.printable
+
+
+def fake_get(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
+
+
+def fake_get_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+
+class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
+ application_type = "application/json"
+ action_url = '/v2/fake/servers/1/action'
+
+ def setUp(self):
+ super(ConsoleOutputExtensionTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'os-console-output'))
+
+ def _get_response(self, length_dict=None):
+ length_dict = length_dict or {}
+ body = {'os-getConsoleOutput': length_dict}
+ req = fakes.HTTPRequest.blank(self.action_url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = self.application_type
+ res = req.get_response(self.app)
+ return res
+
+ def test_get_text_console_instance_action(self):
+ res = self._get_response()
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
+
+ def test_get_console_output_with_tail(self):
+ res = self._get_response(length_dict={'length': 3})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '2\n3\n4'}, output)
+
+ def test_get_console_output_with_none_length(self):
+ res = self._get_response(length_dict={'length': None})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
+
+ def test_get_console_output_with_length_as_str(self):
+ res = self._get_response(length_dict={'length': '3'})
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({'output': '2\n3\n4'}, output)
+
+ def test_get_console_output_filtered_characters(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_all_characters)
+ res = self._get_response()
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ expect = string.digits + string.letters + string.punctuation + ' \t\n'
+ self.assertEqual({'output': expect}, output)
+
+ def test_get_text_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ res = self._get_response()
+ self.assertEqual(404, res.status_int)
+
+ def test_get_text_console_no_instance_on_get_output(self):
+ self.stubs.Set(compute_api.API,
+ 'get_console_output',
+ fake_get_not_found)
+ res = self._get_response()
+ self.assertEqual(404, res.status_int)
+
+ def _get_console_output_bad_request_case(self, body):
+ req = fakes.HTTPRequest.blank(self.action_url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_console_output_with_non_integer_length(self):
+ body = {'os-getConsoleOutput': {'length': 'NaN'}}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_text_console_bad_body(self):
+ body = {}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_console_output_with_length_as_float(self):
+ body = {'os-getConsoleOutput': {'length': 2.5}}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_console_output_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fake_get_console_output_not_ready)
+ res = self._get_response(length_dict={'length': 3})
+ self.assertEqual(409, res.status_int)
+
+ def test_not_implemented(self):
+ self.stubs.Set(compute_api.API, 'get_console_output',
+ fakes.fake_not_implemented)
+ res = self._get_response()
+ self.assertEqual(501, res.status_int)
+
+ def test_get_console_output_with_boolean_length(self):
+ res = self._get_response(length_dict={'length': True})
+ self.assertEqual(400, res.status_int)
+
+
+class ConsoleOutputExtensionTestV2(ConsoleOutputExtensionTestV21):
+ need_osapi_compute_extension = True
+
+ def _get_app(self):
+ self.flags(osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Console_output'])
+ return fakes.wsgi_app(init_only=('servers',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py b/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py
new file mode 100644
index 0000000000..debd1e7f5f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_consoles.py
@@ -0,0 +1,587 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_vnc_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_spice_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_rdp_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_serial_console(self, _context, _instance, _console_type):
+ return {'url': 'http://fake'}
+
+
+def fake_get_vnc_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_spice_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_rdp_console_invalid_type(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeInvalid(console_type=_console_type)
+
+
+def fake_get_vnc_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_spice_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_rdp_console_type_unavailable(self, _context,
+ _instance, _console_type):
+ raise exception.ConsoleTypeUnavailable(console_type=_console_type)
+
+
+def fake_get_vnc_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_spice_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_rdp_console_not_ready(self, _context, instance, _console_type):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_get_vnc_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get_spice_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get_rdp_console_not_found(self, _context, instance, _console_type):
+ raise exception.InstanceNotFound(instance_id=instance["uuid"])
+
+
+def fake_get(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ return {'uuid': instance_uuid}
+
+
+def fake_get_not_found(self, context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+class ConsolesExtensionTestV21(test.NoDBTestCase):
+ url = '/v2/fake/servers/1/action'
+
+ def _setup_wsgi(self):
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-remote-consoles'))
+
+ def setUp(self):
+ super(ConsolesExtensionTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console)
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console)
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console)
+ self.stubs.Set(compute_api.API, 'get_serial_console',
+ fake_get_serial_console)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self._setup_wsgi()
+
+ def test_get_vnc_console(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'novnc'}})
+
+ def test_get_vnc_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_not_ready)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_vnc_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_invalid_type)
+ body = {'os-getVNCConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_vnc_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_vnc_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_not_found)
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_vnc_console_invalid_type(self):
+ body = {'os-getVNCConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_vnc_console_type_unavailable(self):
+ body = {'os-getVNCConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fake_get_vnc_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_vnc_console_not_implemented(self):
+ self.stubs.Set(compute_api.API, 'get_vnc_console',
+ fakes.fake_not_implemented)
+
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 501)
+
+ def test_get_spice_console(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'spice-html5'}})
+
+ def test_get_spice_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_not_ready)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_spice_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_invalid_type)
+ body = {'os-getSPICEConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_spice_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_spice_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_not_found)
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_spice_console_invalid_type(self):
+ body = {'os-getSPICEConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_spice_console_not_implemented(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fakes.fake_not_implemented)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 501)
+
+ def test_get_spice_console_type_unavailable(self):
+ body = {'os-getSPICEConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_spice_console',
+ fake_get_spice_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_rdp_console(self):
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(output,
+ {u'console': {u'url': u'http://fake', u'type': u'rdp-html5'}})
+
+ def test_get_rdp_console_not_ready(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_not_ready)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 409)
+
+ def test_get_rdp_console_no_type(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_invalid_type)
+ body = {'os-getRDPConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_rdp_console_no_instance(self):
+ self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_rdp_console_no_instance_on_console_get(self):
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_not_found)
+ body = {'os-getRDPConsole': {'type': 'rdp-html5'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_rdp_console_invalid_type(self):
+ body = {'os-getRDPConsole': {'type': 'invalid'}}
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_invalid_type)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_rdp_console_type_unavailable(self):
+ body = {'os-getRDPConsole': {'type': 'unavailable'}}
+ self.stubs.Set(compute_api.API, 'get_rdp_console',
+ fake_get_rdp_console_type_unavailable)
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_vnc_console_with_undefined_param(self):
+ body = {'os-getVNCConsole': {'type': 'novnc', 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_spice_console_with_undefined_param(self):
+ body = {'os-getSPICEConsole': {'type': 'spice-html5',
+ 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_get_rdp_console_with_undefined_param(self):
+ body = {'os-getRDPConsole': {'type': 'rdp-html5', 'undefined': 'foo'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class ConsolesExtensionTestV2(ConsolesExtensionTestV21):
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Consoles'])
+ self.app = fakes.wsgi_app(init_only=('servers',))
+
+ def test_get_vnc_console_with_undefined_param(self):
+ pass
+
+ def test_get_spice_console_with_undefined_param(self):
+ pass
+
+ def test_get_rdp_console_with_undefined_param(self):
+ pass
+
+ def test_get_serial_console(self):
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual({u'console': {u'url': u'http://fake',
+ u'type': u'serial'}},
+ output)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_not_enable(self, get_serial_console):
+ get_serial_console.side_effect = exception.ConsoleTypeUnavailable(
+ console_type="serial")
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_invalid_type(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ConsoleTypeInvalid(console_type='invalid'))
+
+ body = {'os-getSerialConsole': {'type': 'invalid'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_no_type(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ConsoleTypeInvalid(console_type=''))
+
+ body = {'os-getSerialConsole': {}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_no_instance(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.InstanceNotFound(instance_id='xxx'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_instance_not_ready(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.InstanceNotReady(instance_id='xxx'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_socket_exhausted(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.SocketPortRangeExhaustedException(
+ host='127.0.0.1'))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 500)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_image_nport_invalid(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ImageSerialPortNumberInvalid(
+ num_ports='x', property="hw_serial_port_count"))
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
+
+ @mock.patch.object(compute_api.API, 'get_serial_console')
+ def test_get_serial_console_image_nport_exceed(self, get_serial_console):
+ get_serial_console.side_effect = (
+ exception.ImageSerialPortNumberExceedFlavorValue())
+
+ body = {'os-getSerialConsole': {'type': 'serial'}}
+ req = webob.Request.blank(self.url)
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(get_serial_console.called)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py
new file mode 100644
index 0000000000..eca3aa3953
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_createserverext.py
@@ -0,0 +1,387 @@
+# Copyright 2010-2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+from xml.dom import minidom
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_UUID = fakes.FAKE_UUID
+
+FAKE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', '10.0.2.12')]
+
+DUPLICATE_NETWORKS = [('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '10.0.1.12')]
+
+INVALID_NETWORKS = [('invalid', 'invalid-ip-address')]
+
+
+def return_security_group_non_existing(context, project_id, group_name):
+ raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
+ security_group_id=group_name)
+
+
+def return_security_group_get_by_name(context, project_id, group_name):
+ return {'id': 1, 'name': group_name}
+
+
+def return_security_group_get(context, security_group_id, session):
+ return {'id': security_group_id}
+
+
+def return_instance_add_security_group(context, instance_id,
+ security_group_id):
+ pass
+
+
+class CreateserverextTest(test.TestCase):
+ def setUp(self):
+ super(CreateserverextTest, self).setUp()
+
+ self.security_group = None
+ self.injected_files = None
+ self.networks = None
+ self.user_data = None
+
+ def create(*args, **kwargs):
+ if 'security_group' in kwargs:
+ self.security_group = kwargs['security_group']
+ else:
+ self.security_group = None
+ if 'injected_files' in kwargs:
+ self.injected_files = kwargs['injected_files']
+ else:
+ self.injected_files = None
+
+ if 'requested_networks' in kwargs:
+ self.networks = kwargs['requested_networks']
+ else:
+ self.networks = None
+
+ if 'user_data' in kwargs:
+ self.user_data = kwargs['user_data']
+
+ resv_id = None
+
+ return ([{'id': '1234', 'display_name': 'fakeinstance',
+ 'uuid': FAKE_UUID,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'created_at': "",
+ 'updated_at': "",
+ 'fixed_ips': [],
+ 'progress': 0}], resv_id)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Createserverext', 'User_data',
+ 'Security_groups', 'Os_networks'])
+
+ def _make_stub_method(self, canned_return):
+ def stub_method(*args, **kwargs):
+ return canned_return
+ return stub_method
+
+ def _create_security_group_request_dict(self, security_groups):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ if security_groups is not None:
+ sg_list = []
+ for name in security_groups:
+ sg_list.append({'name': name})
+ server['security_groups'] = sg_list
+ return {'server': server}
+
+ def _create_networks_request_dict(self, networks):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ if networks is not None:
+ network_list = []
+ for uuid, fixed_ip in networks:
+ network_list.append({'uuid': uuid, 'fixed_ip': fixed_ip})
+ server['networks'] = network_list
+ return {'server': server}
+
+ def _create_user_data_request_dict(self, user_data):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageRef'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ server['flavorRef'] = 1
+ server['user_data'] = user_data
+ return {'server': server}
+
+ def _get_create_request_json(self, body_dict):
+ req = webob.Request.blank('/v2/fake/os-create-server-ext')
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body_dict)
+ return req
+
+ def _format_xml_request_body(self, body_dict):
+ server = body_dict['server']
+ body_parts = []
+ body_parts.extend([
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.1"',
+ ' name="%s" imageRef="%s" flavorRef="%s">' % (
+ server['name'], server['imageRef'], server['flavorRef'])])
+ if 'metadata' in server:
+ metadata = server['metadata']
+ body_parts.append('<metadata>')
+ for item in metadata.iteritems():
+ body_parts.append('<meta key="%s">%s</meta>' % item)
+ body_parts.append('</metadata>')
+ if 'personality' in server:
+ personalities = server['personality']
+ body_parts.append('<personality>')
+ for file in personalities:
+ item = (file['path'], file['contents'])
+ body_parts.append('<file path="%s">%s</file>' % item)
+ body_parts.append('</personality>')
+ if 'networks' in server:
+ networks = server['networks']
+ body_parts.append('<networks>')
+ for network in networks:
+ item = (network['uuid'], network['fixed_ip'])
+ body_parts.append('<network uuid="%s" fixed_ip="%s"></network>'
+ % item)
+ body_parts.append('</networks>')
+ body_parts.append('</server>')
+ return ''.join(body_parts)
+
+ def _get_create_request_xml(self, body_dict):
+ req = webob.Request.blank('/v2/fake/os-create-server-ext')
+ req.content_type = 'application/xml'
+ req.accept = 'application/xml'
+ req.method = 'POST'
+ req.body = self._format_xml_request_body(body_dict)
+ return req
+
+ def _create_instance_with_networks_json(self, networks):
+ body_dict = self._create_networks_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.networks
+
+ def _create_instance_with_user_data_json(self, networks):
+ body_dict = self._create_user_data_request_dict(networks)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.user_data
+
+ def _create_instance_with_networks_xml(self, networks):
+ body_dict = self._create_networks_request_dict(networks)
+ request = self._get_create_request_xml(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ return request, response, self.networks
+
+ def test_create_instance_with_no_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks=None)
+ self.assertEqual(response.status_int, 202)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_no_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(networks=None)
+ self.assertEqual(response.status_int, 202)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_one_network(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst([FAKE_NETWORKS[0]])
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
+
+ def test_create_instance_with_one_network_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst([FAKE_NETWORKS[0]])
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([FAKE_NETWORKS[0]], networks.as_tuples())
+
+ def test_create_instance_with_two_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(FAKE_NETWORKS)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
+
+ def test_create_instance_with_two_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(FAKE_NETWORKS)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(FAKE_NETWORKS, networks.as_tuples())
+
+ def test_create_instance_with_duplicate_networks(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(DUPLICATE_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_duplicate_networks_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(DUPLICATE_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_no_id(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ del body_dict['server']['networks'][0]['uuid']
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(self.networks)
+
+ def test_create_instance_with_network_no_id_xml(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ request = self._get_create_request_xml(body_dict)
+ uuid = ' uuid="aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"'
+ request.body = request.body.replace(uuid, '')
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(self.networks)
+
+ def test_create_instance_with_network_invalid_id(self):
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(INVALID_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_invalid_id_xml(self):
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(INVALID_NETWORKS)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_empty_fixed_ip(self):
+ networks = [('1', '')]
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_non_string_fixed_ip(self):
+ networks = [('1', 12345)]
+ _create_inst = self._create_instance_with_networks_json
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_empty_fixed_ip_xml(self):
+ networks = [('1', '')]
+ _create_inst = self._create_instance_with_networks_xml
+ request, response, networks = _create_inst(networks)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(networks)
+
+ def test_create_instance_with_network_no_fixed_ip(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ del body_dict['server']['networks'][0]['fixed_ip']
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
+ self.networks.as_tuples())
+
+ def test_create_instance_with_network_no_fixed_ip_xml(self):
+ body_dict = self._create_networks_request_dict([FAKE_NETWORKS[0]])
+ request = self._get_create_request_xml(body_dict)
+ request.body = request.body.replace(' fixed_ip="10.0.1.12"', '')
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual([('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)],
+ self.networks.as_tuples())
+
+ def test_create_instance_with_userdata(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ user_data_contents = base64.b64encode(user_data_contents)
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_none(self):
+ user_data_contents = None
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(user_data, user_data_contents)
+
+ def test_create_instance_with_userdata_with_non_b64_content(self):
+ user_data_contents = '#!/bin/bash\necho "Oh no!"\n'
+ _create_inst = self._create_instance_with_user_data_json
+ request, response, user_data = _create_inst(user_data_contents)
+ self.assertEqual(response.status_int, 400)
+ self.assertIsNone(user_data)
+
+ def test_create_instance_with_security_group_json(self):
+ security_groups = ['test', 'test1']
+ self.stubs.Set(db, 'security_group_get_by_name',
+ return_security_group_get_by_name)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_instance_add_security_group)
+ body_dict = self._create_security_group_request_dict(security_groups)
+ request = self._get_create_request_json(body_dict)
+ response = request.get_response(fakes.wsgi_app(
+ init_only=('servers', 'os-create-server-ext')))
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(self.security_group, security_groups)
+
+ def test_get_server_by_id_verify_security_groups_json(self):
+ self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(fakes.wsgi_app(
+ init_only=('os-create-server-ext', 'servers')))
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ expected_security_group = [{"name": "test"}]
+ self.assertEqual(res_dict['server'].get('security_groups'),
+ expected_security_group)
+
+ def test_get_server_by_id_verify_security_groups_xml(self):
+ self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
+ req.headers['Accept'] = 'application/xml'
+ response = req.get_response(fakes.wsgi_app(
+ init_only=('os-create-server-ext', 'servers')))
+ self.assertEqual(response.status_int, 200)
+ dom = minidom.parseString(response.body)
+ server = dom.childNodes[0]
+ sec_groups = server.getElementsByTagName('security_groups')[0]
+ sec_group = sec_groups.getElementsByTagName('security_group')[0]
+ self.assertEqual('test', sec_group.getAttribute("name"))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py b/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py
new file mode 100644
index 0000000000..0dfd0e5339
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_deferred_delete.py
@@ -0,0 +1,147 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import deferred_delete
+from nova.api.openstack.compute.plugins.v3 import deferred_delete as dd_v21
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class FakeRequest(object):
+ def __init__(self, context):
+ self.environ = {'nova.context': context}
+
+
+class DeferredDeleteExtensionTestV21(test.NoDBTestCase):
+ ext_ver = dd_v21.DeferredDeleteController
+
+ def setUp(self):
+ super(DeferredDeleteExtensionTestV21, self).setUp()
+ self.fake_input_dict = {}
+ self.fake_uuid = 'fake_uuid'
+ self.fake_context = context.RequestContext('fake', 'fake')
+ self.fake_req = FakeRequest(self.fake_context)
+ self.extension = self.ext_ver()
+
+ def test_force_delete(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'force_delete')
+
+ fake_instance = 'fake_instance'
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.force_delete(self.fake_context, fake_instance)
+
+ self.mox.ReplayAll()
+ res = self.extension._force_delete(self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.extension, dd_v21.DeferredDeleteController):
+ status_int = self.extension._force_delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ def test_force_delete_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.extension._force_delete,
+ self.fake_req,
+ self.fake_uuid,
+ self.fake_input_dict)
+
+ @mock.patch.object(compute_api.API, 'get')
+ @mock.patch.object(compute_api.API, 'force_delete',
+ side_effect=exception.InstanceIsLocked(
+ instance_uuid='fake_uuid'))
+ def test_force_delete_instance_locked(self, mock_force_delete, mock_get):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/fake_uuid/action')
+ ex = self.assertRaises(webob.exc.HTTPConflict,
+ self.extension._force_delete,
+ req, 'fake_uuid', '')
+ self.assertIn('Instance fake_uuid is locked', ex.explanation)
+
+ def test_restore(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'restore')
+
+ fake_instance = 'fake_instance'
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.restore(self.fake_context, fake_instance)
+
+ self.mox.ReplayAll()
+ res = self.extension._restore(self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.extension, dd_v21.DeferredDeleteController):
+ status_int = self.extension._restore.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ def test_restore_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None, want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPNotFound, self.extension._restore,
+ self.fake_req, self.fake_uuid,
+ self.fake_input_dict)
+
+ def test_restore_raises_conflict_on_invalid_state(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ self.mox.StubOutWithMock(compute_api.API, 'restore')
+
+ fake_instance = 'fake_instance'
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ compute_api.API.get(self.fake_context, self.fake_uuid,
+ expected_attrs=None,
+ want_objects=True).AndReturn(fake_instance)
+ compute_api.API.restore(self.fake_context, fake_instance).AndRaise(
+ exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(webob.exc.HTTPConflict, self.extension._restore,
+ self.fake_req, self.fake_uuid, self.fake_input_dict)
+
+
+class DeferredDeleteExtensionTestV2(DeferredDeleteExtensionTestV21):
+ ext_ver = deferred_delete.DeferredDeleteController
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py
new file mode 100644
index 0000000000..b9a514a451
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_disk_config.py
@@ -0,0 +1,449 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.compute import api as compute_api
+from nova import db
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+
+
+MANUAL_INSTANCE_UUID = fakes.FAKE_UUID
+AUTO_INSTANCE_UUID = fakes.FAKE_UUID.replace('a', 'b')
+
+stub_instance = fakes.stub_instance
+
+API_DISK_CONFIG = 'OS-DCF:diskConfig'
+
+
+def instance_addresses(context, instance_id):
+ return None
+
+
+class DiskConfigTestCaseV21(test.TestCase):
+
+ def setUp(self):
+ super(DiskConfigTestCaseV21, self).setUp()
+ self._set_up_app()
+ self._setup_fake_image_service()
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ FAKE_INSTANCES = [
+ fakes.stub_instance(1,
+ uuid=MANUAL_INSTANCE_UUID,
+ auto_disk_config=False),
+ fakes.stub_instance(2,
+ uuid=AUTO_INSTANCE_UUID,
+ auto_disk_config=True)
+ ]
+
+ def fake_instance_get(context, id_):
+ for instance in FAKE_INSTANCES:
+ if id_ == instance['id']:
+ return instance
+
+ self.stubs.Set(db, 'instance_get', fake_instance_get)
+
+ def fake_instance_get_by_uuid(context, uuid,
+ columns_to_join=None, use_slave=False):
+ for instance in FAKE_INSTANCES:
+ if uuid == instance['uuid']:
+ return instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+
+ def fake_instance_get_all(context, *args, **kwargs):
+ return FAKE_INSTANCES
+
+ self.stubs.Set(db, 'instance_get_all', fake_instance_get_all)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_instance_get_all)
+
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ def fake_rebuild(*args, **kwargs):
+ pass
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ def fake_instance_create(context, inst_, session=None):
+ inst = fake_instance.fake_db_instance(**{
+ 'id': 1,
+ 'uuid': AUTO_INSTANCE_UUID,
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'progress': 0,
+ 'name': 'instance-1', # this is a property
+ 'task_state': '',
+ 'vm_state': '',
+ 'auto_disk_config': inst_['auto_disk_config'],
+ 'security_groups': inst_['security_groups'],
+ })
+
+ def fake_instance_get_for_create(context, id_, *args, **kwargs):
+ return (inst, inst)
+
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_instance_get_for_create)
+
+ def fake_instance_get_all_for_create(context, *args, **kwargs):
+ return [inst]
+ self.stubs.Set(db, 'instance_get_all',
+ fake_instance_get_all_for_create)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_instance_get_all_for_create)
+
+ def fake_instance_add_security_group(context, instance_id,
+ security_group_id):
+ pass
+
+ self.stubs.Set(db,
+ 'instance_add_security_group',
+ fake_instance_add_security_group)
+
+ return inst
+
+ self.stubs.Set(db, 'instance_create', fake_instance_create)
+
+ def _set_up_app(self):
+ self.app = compute.APIRouterV21(init_only=('servers', 'images',
+ 'os-disk-config'))
+
+ def _get_expected_msg_for_invalid_disk_config(self):
+ return ('{{"badRequest": {{"message": "Invalid input for'
+ ' field/attribute {0}. Value: {1}. u\'{1}\' is'
+ ' not one of [\'AUTO\', \'MANUAL\']", "code": 400}}}}')
+
+ def _setup_fake_image_service(self):
+ self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
+ self.stubs)
+ timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
+ image = {'id': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'name': 'fakeimage7',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'ova',
+ 'disk_format': 'vhd',
+ 'size': '74185822',
+ 'properties': {'auto_disk_config': 'Disabled'}}
+ self.image_service.create(None, image)
+
+ def tearDown(self):
+ super(DiskConfigTestCaseV21, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def assertDiskConfig(self, dict_, value):
+ self.assertIn(API_DISK_CONFIG, dict_)
+ self.assertEqual(dict_[API_DISK_CONFIG], value)
+
+ def test_show_server(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % AUTO_INSTANCE_UUID)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_detail_servers(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res = req.get_response(self.app)
+ server_dicts = jsonutils.loads(res.body)['servers']
+
+ expectations = ['MANUAL', 'AUTO']
+ for server_dict, expected in zip(server_dicts, expectations):
+ self.assertDiskConfig(server_dict, expected)
+
+ def test_show_image(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/images/a440c04b-79fa-479c-bed1-0b816eaec379')
+ res = req.get_response(self.app)
+ image_dict = jsonutils.loads(res.body)['image']
+ self.assertDiskConfig(image_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/images/70a599e0-31e7-49b7-b260-868f441e862b')
+ res = req.get_response(self.app)
+ image_dict = jsonutils.loads(res.body)['image']
+ self.assertDiskConfig(image_dict, 'AUTO')
+
+ def test_detail_image(self):
+ req = fakes.HTTPRequest.blank('/fake/images/detail')
+ res = req.get_response(self.app)
+ image_dicts = jsonutils.loads(res.body)['images']
+
+ expectations = ['MANUAL', 'AUTO']
+ for image_dict, expected in zip(image_dicts, expectations):
+ # NOTE(sirp): image fixtures 6 and 7 are setup for
+ # auto_disk_config testing
+ if image_dict['id'] in (6, 7):
+ self.assertDiskConfig(image_dict, expected)
+
+ def test_create_server_override_auto(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_create_server_override_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'MANUAL'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def test_create_server_detect_from_image(self):
+ """If user doesn't pass in diskConfig for server, use image metadata
+ to specify AUTO or MANUAL.
+ """
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'a440c04b-79fa-479c-bed1-0b816eaec379',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '70a599e0-31e7-49b7-b260-868f441e862b',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_create_server_detect_from_image_disabled_goes_to_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def test_create_server_errors_when_disabled_and_auto(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_server_when_disabled_and_manual(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': '88580842-f50a-11e2-8d3a-f23c91aec05e',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'MANUAL'
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'MANUAL')
+
+ def _test_update_server_disk_config(self, uuid, disk_config):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % uuid)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {API_DISK_CONFIG: disk_config}}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, disk_config)
+
+ def test_update_server_override_auto(self):
+ self._test_update_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
+
+ def test_update_server_override_manual(self):
+ self._test_update_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
+
+ def test_update_server_invalid_disk_config(self):
+ # Return BadRequest if user passes an invalid diskConfig value.
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s' % MANUAL_INSTANCE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {API_DISK_CONFIG: 'server_test'}}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+ expected_msg = self._get_expected_msg_for_invalid_disk_config()
+ self.assertEqual(expected_msg.format(API_DISK_CONFIG, 'server_test'),
+ res.body)
+
+ def _test_rebuild_server_disk_config(self, uuid, disk_config):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % uuid)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"rebuild": {
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ API_DISK_CONFIG: disk_config
+ }}
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, disk_config)
+
+ def test_rebuild_server_override_auto(self):
+ self._test_rebuild_server_disk_config(AUTO_INSTANCE_UUID, 'AUTO')
+
+ def test_rebuild_server_override_manual(self):
+ self._test_rebuild_server_disk_config(MANUAL_INSTANCE_UUID, 'MANUAL')
+
+ def test_create_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_rebuild_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"rebuild": {
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ def rebuild(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ server_dict = jsonutils.loads(res.body)['server']
+ self.assertDiskConfig(server_dict, 'AUTO')
+
+ def test_resize_server_with_auto_disk_config(self):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/action' % AUTO_INSTANCE_UUID)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {"resize": {
+ "flavorRef": "3",
+ API_DISK_CONFIG: 'AUTO'
+ }}
+
+ def resize(*args, **kwargs):
+ self.assertIn('auto_disk_config', kwargs)
+ self.assertEqual(True, kwargs['auto_disk_config'])
+
+ self.stubs.Set(compute_api.API, 'resize', resize)
+
+ req.body = jsonutils.dumps(body)
+ req.get_response(self.app)
+
+
+class DiskConfigTestCaseV2(DiskConfigTestCaseV21):
+ def _set_up_app(self):
+ self.flags(verbose=True,
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Disk_config'])
+
+ self.app = compute.APIRouter(init_only=('servers', 'images'))
+
+ def _get_expected_msg_for_invalid_disk_config(self):
+ return ('{{"badRequest": {{"message": "{0} must be either'
+ ' \'MANUAL\' or \'AUTO\'.", "code": 400}}}}')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py
new file mode 100644
index 0000000000..3f5b662db5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_evacuate.py
@@ -0,0 +1,268 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import api as compute_api
+from nova.compute import vm_states
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def fake_compute_api(*args, **kwargs):
+ return True
+
+
+def fake_compute_api_get(self, context, instance_id, want_objects=False,
+ **kwargs):
+ # BAD_UUID is something that does not exist
+ if instance_id == 'BAD_UUID':
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ else:
+ return fake_instance.fake_instance_obj(context, id=1, uuid=instance_id,
+ task_state=None, host='host1',
+ vm_state=vm_states.ACTIVE)
+
+
+def fake_service_get_by_compute_host(self, context, host):
+ if host == 'bad-host':
+ raise exception.ComputeHostNotFound(host=host)
+ else:
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+
+class EvacuateTestV21(test.NoDBTestCase):
+
+ _methods = ('resize', 'evacuate')
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(EvacuateTestV21, self).setUp()
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.UUID = uuid.uuid4()
+ for _method in self._methods:
+ self.stubs.Set(compute_api.API, _method, fake_compute_api)
+
+ def _fake_wsgi_app(self, ctxt):
+ return fakes.wsgi_app_v21(fake_auth_context=ctxt)
+
+ def _gen_resource_with_app(self, json_load, is_admin=True, uuid=None):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ ctxt.is_admin = is_admin
+ app = self._fake_wsgi_app(ctxt)
+ req = webob.Request.blank('%s/servers/%s/action' % (self.fake_url,
+ uuid or self.UUID))
+ req.method = 'POST'
+ base_json_load = {'evacuate': json_load}
+ req.body = jsonutils.dumps(base_json_load)
+ req.content_type = 'application/json'
+
+ return req.get_response(app)
+
+ def _fake_update(self, inst, context, instance, task_state,
+ expected_task_state):
+ return None
+
+ def test_evacuate_with_valid_instance(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+
+ self.assertEqual(res.status_int, 200)
+
+ def test_evacuate_with_invalid_instance(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'},
+ uuid='BAD_UUID')
+
+ self.assertEqual(res.status_int, 404)
+
+ def test_evacuate_with_active_service(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.ComputeServiceInUse("Service still in use")
+
+ self.stubs.Set(compute_api.API, 'evacuate', fake_evacuate)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_no_target(self):
+ res = self._gen_resource_with_app({'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(200, res.status_int)
+
+ def test_evacuate_instance_without_on_shared_storage(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_invalid_characters_host(self):
+ host = 'abc!#'
+ res = self._gen_resource_with_app({'host': host,
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_too_long_host(self):
+ host = 'a' * 256
+ res = self._gen_resource_with_app({'host': host,
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_invalid_on_shared_storage(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'foo',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_bad_target(self):
+ res = self._gen_resource_with_app({'host': 'bad-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 404)
+
+ def test_evacuate_instance_with_target(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_shared_and_pass(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_not_shared_pass_generated(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'False'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_evacuate_shared(self):
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True'})
+ self.assertEqual(res.status_int, 200)
+
+ def test_not_admin(self):
+ res = self._gen_resource_with_app({'host': 'my-host',
+ 'onSharedStorage': 'True'},
+ is_admin=False)
+ self.assertEqual(res.status_int, 403)
+
+ def test_evacuate_to_same_host(self):
+ res = self._gen_resource_with_app({'host': 'host1',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(res.status_int, 400)
+
+ def test_evacuate_instance_with_empty_host(self):
+ res = self._gen_resource_with_app({'host': '',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_underscore_in_hostname(self):
+ # NOTE: The hostname grammar in RFC952 does not allow for
+ # underscores in hostnames. However, we should test that it
+ # is supported because it sometimes occurs in real systems.
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+ res = self._gen_resource_with_app({'host': 'underscore_hostname',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(200, res.status_int)
+ resp_json = jsonutils.loads(res.body)
+ self.assertEqual("MyNewPass", resp_json['adminPass'])
+
+ def test_evacuate_disable_password_return(self):
+ self._test_evacuate_enable_instance_password_conf(False)
+
+ def test_evacuate_enable_password_return(self):
+ self._test_evacuate_enable_instance_password_conf(True)
+
+ def _test_evacuate_enable_instance_password_conf(self, enable_pass):
+ self.flags(enable_instance_password=enable_pass)
+ self.stubs.Set(compute_api.API, 'update', self._fake_update)
+
+ res = self._gen_resource_with_app({'host': 'my_host',
+ 'onSharedStorage': 'False'})
+ self.assertEqual(res.status_int, 200)
+ resp_json = jsonutils.loads(res.body)
+ if enable_pass:
+ self.assertIn('adminPass', resp_json)
+ else:
+ self.assertIsNone(resp_json.get('adminPass'))
+
+
+class EvacuateTestV2(EvacuateTestV21):
+
+ def setUp(self):
+ super(EvacuateTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+
+ def _fake_wsgi_app(self, ctxt):
+ return fakes.wsgi_app(fake_auth_context=ctxt)
+
+ def test_evacuate_instance_with_no_target(self):
+ res = self._gen_resource_with_app({'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
+ self.assertEqual(400, res.status_int)
+
+ def test_evacuate_instance_with_too_long_host(self):
+ pass
+
+ def test_evacuate_instance_with_invalid_characters_host(self):
+ pass
+
+ def test_evacuate_instance_with_invalid_on_shared_storage(self):
+ pass
+
+ def test_evacuate_disable_password_return(self):
+ pass
+
+ def test_evacuate_enable_password_return(self):
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
new file mode 100644
index 0000000000..a3e6dd4a78
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -0,0 +1,184 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_availability_zone
+from nova import availability_zones
+from nova import compute
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get_az(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_empty(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="",
+ vm_state=vm_states.ACTIVE,
+ availability_zone='fakeaz')
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, host="get-host",
+ vm_state=vm_states.ACTIVE)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ inst1 = fakes.stub_instance(1, uuid=UUID1, host="all-host",
+ vm_state=vm_states.ACTIVE)
+ inst2 = fakes.stub_instance(2, uuid=UUID2, host="all-host",
+ vm_state=vm_states.ACTIVE)
+ db_list = [inst1, inst2]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_get_host_availability_zone(context, host):
+ return host
+
+
+def fake_get_no_host_availability_zone(context, host):
+ return None
+
+
+class ExtendedAvailabilityZoneTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-AZ:'
+ base_url = '/v2/fake/servers/'
+
+ def setUp(self):
+ super(ExtendedAvailabilityZoneTestV21, self).setUp()
+ availability_zones.reset_cache()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_host_availability_zone)
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=None))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertAvailabilityZone(self, server, az):
+ self.assertEqual(server.get('%savailability_zone' % self.prefix),
+ az)
+
+ def test_show_no_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_az)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
+
+ def test_show_empty_host_az(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_empty)
+ self.stubs.Set(availability_zones, 'get_host_availability_zone',
+ fake_get_no_host_availability_zone)
+
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'fakeaz')
+
+ def test_show(self):
+ url = self.base_url + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertAvailabilityZone(self._get_server(res.body), 'get-host')
+
+ def test_detail(self):
+ url = self.base_url + 'detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertAvailabilityZone(server, 'all-host')
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.base_url + '70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedAvailabilityZoneTestV2(ExtendedAvailabilityZoneTestV21):
+
+ def setUp(self):
+ super(ExtendedAvailabilityZoneTestV2, self).setUp()
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_availability_zone'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedAvailabilityZoneXmlTestV2(ExtendedAvailabilityZoneTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_availability_zone.\
+ Extended_availability_zone.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
new file mode 100644
index 0000000000..1aaee6837a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
@@ -0,0 +1,114 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import vm_states
+from nova import context
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ExtendedEvacuateFindHostTest, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_evacuate_find_host',
+ 'Evacuate'])
+ self.UUID = uuid.uuid4()
+
+ def _get_admin_context(self, user_id='fake', project_id='fake'):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = user_id
+ ctxt.project_id = project_id
+ return ctxt
+
+ def _fake_compute_api(*args, **kwargs):
+ return True
+
+ def _fake_compute_api_get(self, context, instance_id, **kwargs):
+ instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
+ task_state=None,
+ host='host1',
+ vm_state=vm_states.ACTIVE)
+ instance = instance_obj.Instance._from_db_object(context,
+ instance_obj.Instance(),
+ instance)
+ return instance
+
+ def _fake_service_get_by_compute_host(self, context, host):
+ return {'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ @mock.patch('nova.compute.api.API.evacuate')
+ def test_evacuate_instance_with_no_target(self, evacuate_mock,
+ api_get_mock,
+ service_get_mock):
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+ evacuate_mock.side_effects = self._fake_compute_api
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(200, res.status_int)
+ evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
+ mock.ANY, mock.ANY)
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
+ service_get_mock):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py
new file mode 100644
index 0000000000..df5e0d787a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py
@@ -0,0 +1,101 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_compute_node_get(context, compute_id):
+ for hyper in test_hypervisors.TEST_HYPERS:
+ if hyper['id'] == compute_id:
+ return hyper
+ raise exception.ComputeHostNotFound(host=compute_id)
+
+
+def fake_compute_node_get_all(context):
+ return test_hypervisors.TEST_HYPERS
+
+
+class ExtendedHypervisorsTestV21(test.NoDBTestCase):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=1, host='compute1',
+ disabled_reason=None)})
+ DETAIL_HYPERS_DICTS[1].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=2, host='compute2',
+ disabled_reason=None)})
+
+ def _set_up_controller(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
+ use_admin_context=True)
+
+ def setUp(self):
+ super(ExtendedHypervisorsTestV21, self).setUp()
+ self._set_up_controller()
+
+ self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
+ self.stubs.Set(db, 'compute_node_get',
+ fake_compute_node_get)
+
+ def test_view_hypervisor_detail_noservers(self):
+ result = self.controller._view_hypervisor(
+ test_hypervisors.TEST_HYPERS[0], True)
+
+ self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
+
+ def test_detail(self):
+ req = self._get_request()
+ result = self.controller.detail(req)
+
+ self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
+
+ def test_show_withid(self):
+ req = self._get_request()
+ result = self.controller.show(req, '1')
+
+ self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
+
+
+class ExtendedHypervisorsTestV2(ExtendedHypervisorsTestV21):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'service': dict(id=1, host='compute1')})
+ DETAIL_HYPERS_DICTS[1].update({'service': dict(id=2, host='compute2')})
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.ext_mgr.extensions['os-extended-hypervisors'] = True
+ self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py
new file mode 100644
index 0000000000..770814116c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips.py
@@ -0,0 +1,189 @@
+# Copyright 2013 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_ips
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+NW_CACHE = [
+ {
+ 'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [
+ {
+ 'cidr': '192.168.1.0/24',
+ 'ips': [
+ {
+ 'address': '192.168.1.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.1', 'type': 'floating'},
+ ],
+ },
+ ],
+ },
+ ]
+ }
+ },
+ {
+ 'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {
+ 'bridge': 'br1',
+ 'id': 2,
+ 'label': 'public',
+ 'subnets': [
+ {
+ 'cidr': '10.0.0.0/24',
+ 'ips': [
+ {
+ 'address': '10.0.0.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.2', 'type': 'floating'},
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ }
+]
+ALL_IPS = []
+for cache in NW_CACHE:
+ for subnet in cache['network']['subnets']:
+ for fixed in subnet['ips']:
+ sanitized = dict(fixed)
+ sanitized.pop('floating_ips')
+ ALL_IPS.append(sanitized)
+ for floating in fixed['floating_ips']:
+ ALL_IPS.append(floating)
+ALL_IPS.sort()
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
+ fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedIpsTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IPS:'
+
+ def setUp(self):
+ super(ExtendedIpsTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_ips(self, server):
+ for network in server['addresses'].itervalues():
+ for ip in network:
+ yield ip
+
+ def assertServerStates(self, server):
+ results = []
+ for ip in self._get_ips(server):
+ results.append({'address': ip.get('addr'),
+ 'type': ip.get('%stype' % self.prefix)})
+
+ self.assertEqual(ALL_IPS, sorted(results))
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body))
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server)
+
+
+class ExtendedIpsTestV2(ExtendedIpsTestV21):
+
+ def setUp(self):
+ super(ExtendedIpsTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_ips'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedIpsXmlTest(ExtendedIpsTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_ips.Extended_ips.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_ips(self, server):
+ for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
+ for ip in network:
+ yield ip
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py
new file mode 100644
index 0000000000..c3e94600aa
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_ips_mac.py
@@ -0,0 +1,196 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_ips_mac
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+NW_CACHE = [
+ {
+ 'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [
+ {
+ 'cidr': '192.168.1.0/24',
+ 'ips': [
+ {
+ 'address': '192.168.1.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.1', 'type': 'floating'},
+ ],
+ },
+ ],
+ },
+ ]
+ }
+ },
+ {
+ 'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {
+ 'bridge': 'br1',
+ 'id': 2,
+ 'label': 'public',
+ 'subnets': [
+ {
+ 'cidr': '10.0.0.0/24',
+ 'ips': [
+ {
+ 'address': '10.0.0.100',
+ 'type': 'fixed',
+ 'floating_ips': [
+ {'address': '5.0.0.2', 'type': 'floating'},
+ ],
+ }
+ ],
+ },
+ ]
+ }
+ }
+]
+ALL_IPS = []
+for cache in NW_CACHE:
+ for subnet in cache['network']['subnets']:
+ for fixed in subnet['ips']:
+ sanitized = dict(fixed)
+ sanitized['mac_address'] = cache['address']
+ sanitized.pop('floating_ips')
+ sanitized.pop('type')
+ ALL_IPS.append(sanitized)
+ for floating in fixed['floating_ips']:
+ sanitized = dict(floating)
+ sanitized['mac_address'] = cache['address']
+ sanitized.pop('type')
+ ALL_IPS.append(sanitized)
+ALL_IPS.sort()
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, nw_cache=NW_CACHE)
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, nw_cache=NW_CACHE),
+ fakes.stub_instance(2, uuid=UUID2, nw_cache=NW_CACHE),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedIpsMacTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
+
+ def setUp(self):
+ super(ExtendedIpsMacTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_ips(self, server):
+ for network in server['addresses'].itervalues():
+ for ip in network:
+ yield ip
+
+ def assertServerStates(self, server):
+ results = []
+ for ip in self._get_ips(server):
+ results.append({'address': ip.get('addr'),
+ 'mac_address': ip.get('%smac_addr' % self.prefix)})
+
+ self.assertEqual(ALL_IPS, sorted(results))
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body))
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for _i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server)
+
+
+class ExtendedIpsMacTestV2(ExtendedIpsMacTestV21):
+ content_type = 'application/json'
+ prefix = '%s:' % extended_ips_mac.Extended_ips_mac.alias
+
+ def setUp(self):
+ super(ExtendedIpsMacTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_ips_mac'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedIpsMacXmlTest(ExtendedIpsMacTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_ips_mac.Extended_ips_mac.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_ips(self, server):
+ for network in server.find('{%s}addresses' % xmlutil.XMLNS_V11):
+ for ip in network:
+ yield ip
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_rescue_with_image.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py
index 42a8382595..42a8382595 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_rescue_with_image.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_rescue_with_image.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py
new file mode 100644
index 0000000000..f944289efe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_server_attributes.py
@@ -0,0 +1,148 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_server_attributes
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+from oslo.config import cfg
+
+
+NAME_FMT = cfg.CONF.instance_name_template
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return objects.Instance._from_db_object(
+ args[1], objects.Instance(),
+ fakes.stub_instance(1, uuid=UUID3, host="host-fake",
+ node="node-fake"), fields)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"),
+ fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2")
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedServerAttributesTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-SRV-ATTR:'
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(ExtendedServerAttributesTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(
+ fakes.wsgi_app_v21(init_only=('servers',
+ 'os-extended-server-attributes')))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerAttributes(self, server, host, node, instance_name):
+ self.assertEqual(server.get('%shost' % self.prefix), host)
+ self.assertEqual(server.get('%sinstance_name' % self.prefix),
+ instance_name)
+ self.assertEqual(server.get('%shypervisor_hostname' % self.prefix),
+ node)
+
+ def test_show(self):
+ url = self.fake_url + '/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body),
+ host='host-fake',
+ node='node-fake',
+ instance_name=NAME_FMT % 1)
+
+ def test_detail(self):
+ url = self.fake_url + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerAttributes(server,
+ host='host-%s' % (i + 1),
+ node='node-%s' % (i + 1),
+ instance_name=NAME_FMT % (i + 1))
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedServerAttributesTestV2(ExtendedServerAttributesTestV21):
+
+ def setUp(self):
+ super(ExtendedServerAttributesTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_server_attributes'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTestV2):
+ content_type = 'application/xml'
+ ext = extended_server_attributes
+ prefix = '{%s}' % ext.Extended_server_attributes.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py
new file mode 100644
index 0000000000..b47562f7a7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_status.py
@@ -0,0 +1,148 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_status
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
+ vm_state="slightly crunchy", power_state=1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
+ vm_state="vm-1", power_state=1),
+ fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
+ vm_state="vm-2", power_state=2),
+ ]
+
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ExtendedStatusTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-STS:'
+ fake_url = '/v2/fake'
+
+ def _set_flags(self):
+ pass
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(
+ init_only=('servers',
+ 'os-extended-status')))
+ return res
+
+ def setUp(self):
+ super(ExtendedStatusTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self._set_flags()
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerStates(self, server, vm_state, power_state, task_state):
+ self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
+ self.assertEqual(int(server.get('%spower_state' % self.prefix)),
+ power_state)
+ self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
+
+ def test_show(self):
+ url = self.fake_url + '/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerStates(self._get_server(res.body),
+ vm_state='slightly crunchy',
+ power_state=1,
+ task_state='kayaking')
+
+ def test_detail(self):
+ url = self.fake_url + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerStates(server,
+ vm_state='vm-%s' % (i + 1),
+ power_state=(i + 1),
+ task_state='task-%s' % (i + 1))
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ExtendedStatusTestV2(ExtendedStatusTestV21):
+
+ def _set_flags(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_status'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedStatusXmlTest(ExtendedStatusTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_status.Extended_status.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
new file mode 100644
index 0000000000..851848d7a5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_virtual_interfaces_net.py
@@ -0,0 +1,123 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_virtual_interfaces_net
+from nova.api.openstack import wsgi
+from nova import compute
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+FAKE_VIFS = [{'uuid': '00000000-0000-0000-0000-00000000000000000',
+ 'address': '00-00-00-00-00-00',
+ 'net_uuid': '00000000-0000-0000-0000-00000000000000001'},
+ {'uuid': '11111111-1111-1111-1111-11111111111111111',
+ 'address': '11-11-11-11-11-11',
+ 'net_uuid': '11111111-1111-1111-1111-11111111111111112'}]
+
+EXPECTED_NET_UUIDS = ['00000000-0000-0000-0000-00000000000000001',
+ '11111111-1111-1111-1111-11111111111111112']
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def get_vifs_by_instance(self, context, instance_id):
+ return FAKE_VIFS
+
+
+def get_vif_by_mac_address(self, context, mac_address):
+ if mac_address == "00-00-00-00-00-00":
+ return {'net_uuid': '00000000-0000-0000-0000-00000000000000001'}
+ else:
+ return {'net_uuid': '11111111-1111-1111-1111-11111111111111112'}
+
+
+class ExtendedServerVIFNetTest(test.NoDBTestCase):
+ content_type = 'application/json'
+ prefix = "%s:" % extended_virtual_interfaces_net. \
+ Extended_virtual_interfaces_net.alias
+
+ def setUp(self):
+ super(ExtendedServerVIFNetTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_vifs_by_instance",
+ get_vifs_by_instance)
+ self.stubs.Set(network.api.API, "get_vif_by_mac_address",
+ get_vif_by_mac_address)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Virtual_interfaces',
+ 'Extended_virtual_interfaces_net'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=(
+ 'os-virtual-interfaces', 'OS-EXT-VIF-NET')))
+ return res
+
+ def _get_vifs(self, body):
+ return jsonutils.loads(body).get('virtual_interfaces')
+
+ def _get_net_id(self, vifs):
+ for vif in vifs:
+ yield vif['%snet_id' % self.prefix]
+
+ def assertVIFs(self, vifs):
+ result = []
+ for net_id in self._get_net_id(vifs):
+ result.append(net_id)
+ sorted(result)
+
+ for i, net_uuid in enumerate(result):
+ self.assertEqual(net_uuid, EXPECTED_NET_UUIDS[i])
+
+ def test_get_extend_virtual_interfaces_list(self):
+ res = self._make_request('/v2/fake/servers/abcd/os-virtual-interfaces')
+
+ self.assertEqual(res.status_int, 200)
+ self.assertVIFs(self._get_vifs(res.body))
+
+
+class ExtendedServerVIFNetSerializerTest(ExtendedServerVIFNetTest):
+ content_type = 'application/xml'
+ prefix = "{%s}" % extended_virtual_interfaces_net. \
+ Extended_virtual_interfaces_net.namespace
+
+ def setUp(self):
+ super(ExtendedServerVIFNetSerializerTest, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.serializer = extended_virtual_interfaces_net. \
+ ExtendedVirtualInterfaceNetTemplate()
+
+ def _get_vifs(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_net_id(self, vifs):
+ for vif in vifs:
+ yield vif.attrib['%snet_id' % self.prefix]
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py
new file mode 100644
index 0000000000..d441013e8d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_volumes.py
@@ -0,0 +1,124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import extended_volumes
+from nova import compute
+from nova import db
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_bdms_get_all_by_instance(*args, **kwargs):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID1, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID2, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 2})]
+
+
+class ExtendedVolumesTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'os-extended-volumes:'
+
+ def setUp(self):
+ super(ExtendedVolumesTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_volumes'])
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def test_show(self):
+ url = '/v2/fake/servers/%s' % UUID1
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ elif self.content_type == 'application/xml':
+ actual = [dict(elem.items()) for elem in
+ server.findall('%svolume_attached' % self.prefix)]
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ for i, server in enumerate(self._get_servers(res.body)):
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ elif self.content_type == 'application/xml':
+ actual = [dict(elem.items()) for elem in
+ server.findall('%svolume_attached' % self.prefix)]
+ self.assertEqual(exp_volumes, actual)
+
+
+class ExtendedVolumesXmlTest(ExtendedVolumesTest):
+ content_type = 'application/xml'
+ prefix = '{%s}' % extended_volumes.Extended_volumes.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py
new file mode 100644
index 0000000000..f331da80fe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_fixed_ips.py
@@ -0,0 +1,256 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api.openstack.compute.contrib import fixed_ips as fixed_ips_v2
+from nova.api.openstack.compute.plugins.v3 import fixed_ips as fixed_ips_v21
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_network
+
+
+fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False},
+ {'id': 3,
+ 'address': '10.0.0.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 3,
+ 'instance_uuid': '3',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'instance': None,
+ 'network': test_network.fake_network,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': True},
+ ]
+
+
+def fake_fixed_ip_get_by_address(context, address, columns_to_join=None):
+ if address == 'inv.ali.d.ip':
+ msg = _("Invalid fixed IP Address %s in request") % address
+ raise exception.FixedIpInvalid(msg)
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address and not fixed_ip['deleted']:
+ return (fixed_ip, FakeModel(network), None)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+
+def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+
+def fake_network_get_all(context):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ return [FakeModel(network)]
+
+
+class FixedIpTestV21(test.NoDBTestCase):
+
+ fixed_ips = fixed_ips_v21
+ url = '/v2/fake/os-fixed-ips'
+
+ def setUp(self):
+ super(FixedIpTestV21, self).setUp()
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ self.context = context.get_admin_context()
+ self.controller = self.fixed_ips.FixedIPController()
+
+ def _assert_equal(self, ret, exp):
+ self.assertEqual(ret.wsgi_code, exp)
+
+ def _get_reserve_action(self):
+ return self.controller.reserve
+
+ def _get_unreserve_action(self):
+ return self.controller.unreserve
+
+ def test_fixed_ips_get(self):
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1' % self.url)
+ res_dict = self.controller.show(req, '192.168.1.1')
+ response = {'fixed_ip': {'cidr': '192.168.1.0/24',
+ 'hostname': None,
+ 'host': None,
+ 'address': '192.168.1.1'}}
+ self.assertEqual(response, res_dict)
+
+ def test_fixed_ips_get_bad_ip_fail(self):
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.1')
+
+ def test_fixed_ips_get_invalid_ip_address(self):
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip' % self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.show, req,
+ 'inv.ali.d.ip')
+
+ def test_fixed_ips_get_deleted_ip_fail(self):
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req,
+ '10.0.0.2')
+
+ def test_fixed_ip_reserve(self):
+ fake_fixed_ips[0]['reserved'] = False
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
+ action = self._get_reserve_action()
+ result = action(req, "192.168.1.1", body)
+
+ self._assert_equal(result or action, 202)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], True)
+
+ def test_fixed_ip_reserve_bad_ip(self):
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
+ action = self._get_reserve_action()
+
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_reserve_invalid_ip_address(self):
+ body = {'reserve': None}
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
+ action = self._get_reserve_action()
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ action, req, 'inv.ali.d.ip', body)
+
+ def test_fixed_ip_reserve_deleted_ip(self):
+ body = {'reserve': None}
+ action = self._get_reserve_action()
+
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.2', body)
+
+ def test_fixed_ip_unreserve(self):
+ fake_fixed_ips[0]['reserved'] = True
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/192.168.1.1/action' % self.url)
+ action = self._get_unreserve_action()
+ result = action(req, "192.168.1.1", body)
+
+ self._assert_equal(result or action, 202)
+ self.assertEqual(fake_fixed_ips[0]['reserved'], False)
+
+ def test_fixed_ip_unreserve_bad_ip(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.1/action' % self.url)
+ action = self._get_unreserve_action()
+
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.1', body)
+
+ def test_fixed_ip_unreserve_invalid_ip_address(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/inv.ali.d.ip/action' % self.url)
+ action = self._get_unreserve_action()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ action, req, 'inv.ali.d.ip', body)
+
+ def test_fixed_ip_unreserve_deleted_ip(self):
+ body = {'unreserve': None}
+ req = fakes.HTTPRequest.blank('%s/10.0.0.2/action' % self.url)
+ action = self._get_unreserve_action()
+ self.assertRaises(webob.exc.HTTPNotFound, action, req,
+ '10.0.0.2', body)
+
+
+class FixedIpTestV2(FixedIpTestV21):
+
+ fixed_ips = fixed_ips_v2
+
+ def _assert_equal(self, ret, exp):
+ self.assertEqual(ret.status, '202 Accepted')
+
+ def _get_reserve_action(self):
+ return self.controller.action
+
+ def _get_unreserve_action(self):
+ return self.controller.action
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py
new file mode 100644
index 0000000000..5718a826e4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_access.py
@@ -0,0 +1,402 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import flavor_access \
+ as flavor_access_v2
+from nova.api.openstack.compute import flavors as flavors_api
+from nova.api.openstack.compute.plugins.v3 import flavor_access \
+ as flavor_access_v3
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def generate_flavor(flavorid, ispublic):
+ return {
+ 'id': flavorid,
+ 'flavorid': str(flavorid),
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'test',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
+ 'updated_at': None,
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'swap': 512,
+ 'rxtx_factor': 1.0,
+ 'disabled': False,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'is_public': bool(ispublic)
+ }
+
+
+INSTANCE_TYPES = {
+ '0': generate_flavor(0, True),
+ '1': generate_flavor(1, True),
+ '2': generate_flavor(2, False),
+ '3': generate_flavor(3, False)}
+
+
+ACCESS_LIST = [{'flavor_id': '2', 'project_id': 'proj2'},
+ {'flavor_id': '2', 'project_id': 'proj3'},
+ {'flavor_id': '3', 'project_id': 'proj3'}]
+
+
+def fake_get_flavor_access_by_flavor_id(context, flavorid):
+ res = []
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid:
+ res.append(access)
+ return res
+
+
+def fake_get_flavor_by_flavor_id(context, flavorid, read_deleted=None):
+ return INSTANCE_TYPES[flavorid]
+
+
+def _has_flavor_access(flavorid, projectid):
+ for access in ACCESS_LIST:
+ if access['flavor_id'] == flavorid and \
+ access['project_id'] == projectid:
+ return True
+ return False
+
+
+def fake_get_all_flavors_sorted_list(context, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ if filters is None or filters['is_public'] is None:
+ return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
+
+ res = {}
+ for k, v in INSTANCE_TYPES.iteritems():
+ if filters['is_public'] and _has_flavor_access(k, context.project_id):
+ res.update({k: v})
+ continue
+ if v['is_public'] == filters['is_public']:
+ res.update({k: v})
+
+ res = sorted(res.values(), key=lambda item: item[sort_key])
+ return res
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+ def get_db_flavor(self, flavor_id):
+ return INSTANCE_TYPES[flavor_id]
+
+
+class FakeResponse(object):
+ obj = {'flavor': {'id': '0'},
+ 'flavors': [
+ {'id': '0'},
+ {'id': '2'}]
+ }
+
+ def attach(self, **kwargs):
+ pass
+
+
+class FlavorAccessTestV21(test.NoDBTestCase):
+ api_version = "2.1"
+ FlavorAccessController = flavor_access_v3.FlavorAccessController
+ FlavorActionController = flavor_access_v3.FlavorActionController
+ _prefix = "/v3"
+ validation_ex = exception.ValidationError
+
+ def setUp(self):
+ super(FlavorAccessTestV21, self).setUp()
+ self.flavor_controller = flavors_api.Controller()
+ self.req = FakeRequest()
+ self.context = self.req.environ['nova.context']
+ self.stubs.Set(db, 'flavor_get_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(db, 'flavor_get_all',
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(db, 'flavor_access_get_by_flavor_id',
+ fake_get_flavor_access_by_flavor_id)
+
+ self.flavor_access_controller = self.FlavorAccessController()
+ self.flavor_action_controller = self.FlavorActionController()
+
+ def _verify_flavor_list(self, result, expected):
+ # result already sorted by flavor_id
+ self.assertEqual(len(result), len(expected))
+
+ for d1, d2 in zip(result, expected):
+ self.assertEqual(d1['id'], d2['id'])
+
+ def test_list_flavor_access_public(self):
+ # query os-flavor-access on public flavor should return 404
+ self.assertRaises(exc.HTTPNotFound,
+ self.flavor_access_controller.index,
+ self.req, '1')
+
+ def test_list_flavor_access_private(self):
+ expected = {'flavor_access': [
+ {'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]}
+ result = self.flavor_access_controller.index(self.req, '2')
+ self.assertEqual(result, expected)
+
+ def test_list_with_no_context(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/fake/flavors')
+
+ def fake_authorize(context, target=None, action=None):
+ raise exception.PolicyNotAuthorized(action='index')
+
+ if self.api_version == "2.1":
+ self.stubs.Set(flavor_access_v3,
+ 'authorize',
+ fake_authorize)
+ else:
+ self.stubs.Set(flavor_access_v2,
+ 'authorize',
+ fake_authorize)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.flavor_access_controller.index,
+ req, 'fake')
+
+ def test_list_flavor_with_admin_default_proj1(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/fake/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj1'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_default_proj2(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=true'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_false_proj2(self):
+ expected = {'flavors': [{'id': '2'}, {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ req.environ['nova.context'].project_id = 'proj2'
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}, {'id': '2'},
+ {'id': '3'}]}
+ url = self._prefix + '/flavors?is_public=none'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=True)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_default(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors',
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_true(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=true'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_false(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=false'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_list_flavor_with_no_admin_ispublic_none(self):
+ expected = {'flavors': [{'id': '0'}, {'id': '1'}]}
+ url = self._prefix + '/flavors?is_public=none'
+ req = fakes.HTTPRequest.blank(url,
+ use_admin_context=False)
+ result = self.flavor_controller.index(req)
+ self._verify_flavor_list(result['flavors'], expected['flavors'])
+
+ def test_show(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.show(self.req, resp, '0')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+ self.flavor_action_controller.show(self.req, resp, '2')
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': False},
+ resp.obj['flavor'])
+
+ def test_detail(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.detail(self.req, resp)
+ self.assertEqual([{'id': '0', 'os-flavor-access:is_public': True},
+ {'id': '2', 'os-flavor-access:is_public': False}],
+ resp.obj['flavors'])
+
+ def test_create(self):
+ resp = FakeResponse()
+ self.flavor_action_controller.create(self.req, {}, resp)
+ self.assertEqual({'id': '0', 'os-flavor-access:is_public': True},
+ resp.obj['flavor'])
+
+ def _get_add_access(self):
+ if self.api_version == "2.1":
+ return self.flavor_action_controller._add_tenant_access
+ else:
+ return self.flavor_action_controller._addTenantAccess
+
+ def _get_remove_access(self):
+ if self.api_version == "2.1":
+ return self.flavor_action_controller._remove_tenant_access
+ else:
+ return self.flavor_action_controller._removeTenantAccess
+
+ def test_add_tenant_access(self):
+ def stub_add_flavor_access(context, flavorid, projectid):
+ self.assertEqual('3', flavorid, "flavorid")
+ self.assertEqual("proj2", projectid, "projectid")
+ self.stubs.Set(db, 'flavor_access_add',
+ stub_add_flavor_access)
+ expected = {'flavor_access':
+ [{'flavor_id': '3', 'tenant_id': 'proj3'}]}
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+
+ add_access = self._get_add_access()
+ result = add_access(req, '3', body=body)
+ self.assertEqual(result, expected)
+
+ def test_add_tenant_access_with_no_admin_user(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=False)
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ add_access, req, '2', body=body)
+
+ def test_add_tenant_access_with_no_tenant(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'addTenantAccess': {'foo': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(self.validation_ex,
+ add_access, req, '2', body=body)
+ body = {'addTenantAccess': {'tenant': ''}}
+ self.assertRaises(self.validation_ex,
+ add_access, req, '2', body=body)
+
+ def test_add_tenant_access_with_already_added_access(self):
+ def stub_add_flavor_access(context, flavorid, projectid):
+ raise exception.FlavorAccessExists(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(db, 'flavor_access_add',
+ stub_add_flavor_access)
+ body = {'addTenantAccess': {'tenant': 'proj2'}}
+ add_access = self._get_add_access()
+ self.assertRaises(exc.HTTPConflict,
+ add_access, self.req, '3', body=body)
+
+ def test_remove_tenant_access_with_bad_access(self):
+ def stub_remove_flavor_access(context, flavorid, projectid):
+ raise exception.FlavorAccessNotFound(flavor_id=flavorid,
+ project_id=projectid)
+ self.stubs.Set(db, 'flavor_access_remove',
+ stub_remove_flavor_access)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+ remove_access = self._get_remove_access()
+ self.assertRaises(exc.HTTPNotFound,
+ remove_access, self.req, '3', body=body)
+
+ def test_delete_tenant_access_with_no_tenant(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ remove_access = self._get_remove_access()
+ body = {'removeTenantAccess': {'foo': 'proj2'}}
+ self.assertRaises(self.validation_ex,
+ remove_access, req, '2', body=body)
+ body = {'removeTenantAccess': {'tenant': ''}}
+ self.assertRaises(self.validation_ex,
+ remove_access, req, '2', body=body)
+
+ def test_remove_tenant_access_with_no_admin_user(self):
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=False)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+ remove_access = self._get_remove_access()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ remove_access, req, '2', body=body)
+
+
+class FlavorAccessTestV20(FlavorAccessTestV21):
+ api_version = "2.0"
+ FlavorAccessController = flavor_access_v2.FlavorAccessController
+ FlavorActionController = flavor_access_v2.FlavorActionController
+ _prefix = "/v2/fake"
+ validation_ex = exc.HTTPBadRequest
+
+
+class FlavorAccessSerializerTest(test.NoDBTestCase):
+ def test_serializer_empty(self):
+ serializer = flavor_access_v2.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=[]))
+ tree = etree.fromstring(text)
+ self.assertEqual(len(tree), 0)
+
+ def test_serializer(self):
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<flavor_access>'
+ '<access tenant_id="proj2" flavor_id="2"/>'
+ '<access tenant_id="proj3" flavor_id="2"/>'
+ '</flavor_access>')
+ access_list = [{'flavor_id': '2', 'tenant_id': 'proj2'},
+ {'flavor_id': '2', 'tenant_id': 'proj3'}]
+
+ serializer = flavor_access_v2.FlavorAccessTemplate()
+ text = serializer.serialize(dict(flavor_access=access_list))
+ self.assertEqual(text, expected)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py
new file mode 100644
index 0000000000..a646f43fd1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_disabled.py
@@ -0,0 +1,127 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import flavor_disabled
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": 512,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '20',
+ "swap": None,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": True,
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorDisabledTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+ content_type = 'application/json'
+ prefix = "OS-FLV-DISABLED:"
+
+ def setUp(self):
+ super(FlavorDisabledTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_disabled.Flavor_disabled')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ return res
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorDisabled(self, flavor, disabled):
+ self.assertEqual(str(flavor.get('%sdisabled' % self.prefix)), disabled)
+
+ def test_show(self):
+ url = self.base_url + '/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorDisabled(self._get_flavor(res.body), 'False')
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorDisabled(flavors[0], 'False')
+ self.assertFlavorDisabled(flavors[1], 'True')
+
+
+class FlavorDisabledTestV2(FlavorDisabledTestV21):
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+
+class FlavorDisabledXmlTest(FlavorDisabledTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % flavor_disabled.Flavor_disabled.namespace
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py
new file mode 100644
index 0000000000..3d44e4970b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_manage.py
@@ -0,0 +1,465 @@
+# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import flavor_access
+from nova.api.openstack.compute.contrib import flavormanage as flavormanage_v2
+from nova.api.openstack.compute.plugins.v3 import flavor_manage as \
+ flavormanage_v21
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_db_flavor(**updates):
+ db_flavor = {
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'frob',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
+ 'updated_at': None,
+ 'memory_mb': 256,
+ 'vcpus': 1,
+ 'flavorid': 1,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'id': 7,
+ 'is_public': True,
+ 'disabled': False,
+ }
+ if updates:
+ db_flavor.update(updates)
+ return db_flavor
+
+
+def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
+ if flavorid == 'failtest':
+ raise exception.FlavorNotFound(flavor_id=flavorid)
+ elif not str(flavorid) == '1234':
+ raise Exception("This test expects flavorid 1234, not %s" % flavorid)
+ if read_deleted != 'no':
+ raise test.TestingException("Should not be reading deleted")
+ return fake_db_flavor(flavorid=flavorid)
+
+
+def fake_destroy(flavorname):
+ pass
+
+
+def fake_create(context, kwargs, projects=None):
+ newflavor = fake_db_flavor()
+
+ flavorid = kwargs.get('flavorid')
+ if flavorid is None:
+ flavorid = 1234
+
+ newflavor['flavorid'] = flavorid
+ newflavor["name"] = kwargs.get('name')
+ newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
+ newflavor["vcpus"] = int(kwargs.get('vcpus'))
+ newflavor["root_gb"] = int(kwargs.get('root_gb'))
+ newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
+ newflavor["swap"] = kwargs.get('swap')
+ newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
+ newflavor["is_public"] = bool(kwargs.get('is_public'))
+ newflavor["disabled"] = bool(kwargs.get('disabled'))
+
+ return newflavor
+
+
+class FlavorManageTestV21(test.NoDBTestCase):
+ controller = flavormanage_v21.FlavorManageController()
+ validation_error = exception.ValidationError
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(FlavorManageTestV21, self).setUp()
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(flavors, "destroy", fake_destroy)
+ self.stubs.Set(db, "flavor_create", fake_create)
+ self.ctxt = context.RequestContext('fake', 'fake',
+ is_admin=True, auth_token=True)
+ self.app = self._setup_app()
+
+ self.request_body = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": unicode('1234'),
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+ self.expected_flavor = self.request_body
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('flavor-manage', 'os-flavor-rxtx',
+ 'os-flavor-access', 'flavors',
+ 'os-flavor-extra-data'))
+
+ def test_delete(self):
+ req = fakes.HTTPRequest.blank(self.base_url + '/1234')
+ res = self.controller._delete(req, 1234)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller,
+ flavormanage_v21.FlavorManageController):
+ status_int = self.controller._delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+ # subsequent delete should fail
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._delete, req, "failtest")
+
+ def _test_create_missing_parameter(self, parameter):
+ body = {
+ "flavor": {
+ "name": "azAZ09. -_",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": unicode('1234'),
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+
+ del body['flavor'][parameter]
+
+ req = fakes.HTTPRequest.blank(self.base_url)
+ self.assertRaises(self.validation_error, self.controller._create,
+ req, body=body)
+
+ def test_create_missing_name(self):
+ self._test_create_missing_parameter('name')
+
+ def test_create_missing_ram(self):
+ self._test_create_missing_parameter('ram')
+
+ def test_create_missing_vcpus(self):
+ self._test_create_missing_parameter('vcpus')
+
+ def test_create_missing_disk(self):
+ self._test_create_missing_parameter('disk')
+
+ def _create_flavor_success_case(self, body):
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_code)
+ return jsonutils.loads(res.body)
+
+ def test_create(self):
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def test_create_public_default(self):
+ del self.request_body['flavor']['os-flavor-access:is_public']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def test_create_without_flavorid(self):
+ del self.request_body['flavor']['id']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def _create_flavor_bad_request_case(self, body):
+ self.stubs.UnsetAll()
+
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_code, 400)
+
+ def test_create_invalid_name(self):
+ self.request_body['flavor']['name'] = 'bad !@#!$% name'
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_flavor_name_is_whitespace(self):
+ self.request_body['flavor']['name'] = ' '
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_name_too_long(self):
+ self.request_body['flavor']['name'] = 'a' * 256
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_flavorname(self):
+ del self.request_body['flavor']['name']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_empty_body(self):
+ body = {
+ "flavor": {}
+ }
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_no_body(self):
+ body = {}
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_invalid_format_body(self):
+ body = {
+ "flavor": []
+ }
+ self._create_flavor_bad_request_case(body)
+
+ def test_create_invalid_flavorid(self):
+ self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_check_flavor_id_length(self):
+ MAX_LENGTH = 255
+ self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
+ self.request_body['flavor']['id'] = " bad_id "
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_ram(self):
+ del self.request_body['flavor']['ram']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_ram(self):
+ self.request_body['flavor']['ram'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_vcpus(self):
+ del self.request_body['flavor']['vcpus']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_vcpus(self):
+ self.request_body['flavor']['vcpus'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_disk(self):
+ del self.request_body['flavor']['disk']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_disk(self):
+ self.request_body['flavor']['disk'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_ephemeral(self):
+ self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_swap(self):
+ self.request_body['flavor']['swap'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_rxtx_factor(self):
+ self.request_body['flavor']['rxtx_factor'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_non_boolean_is_public(self):
+ self.request_body['flavor']['os-flavor-access:is_public'] = 123
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_flavor_exists_exception_returns_409(self):
+ expected = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": 1235,
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+
+ def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
+ flavorid, swap, rxtx_factor, is_public):
+ raise exception.FlavorExists(name=name)
+
+ self.stubs.Set(flavors, "create", fake_create)
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(expected)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+
+ @mock.patch('nova.compute.flavors.create',
+ side_effect=exception.FlavorCreateFailed)
+ def test_flavor_create_db_failed(self, mock_create):
+ request_dict = {
+ "flavor": {
+ "name": "test",
+ 'id': "12345",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "swap": 512,
+ "rxtx_factor": 1,
+ "os-flavor-access:is_public": True,
+ }
+ }
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(request_dict)
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 500)
+ self.assertIn('Unable to create flavor', res.body)
+
+ def test_invalid_memory_mb(self):
+ """Check negative and decimal number can't be accepted."""
+
+ self.stubs.UnsetAll()
+ self.assertRaises(exception.InvalidInput, flavors.create, "abc",
+ -512, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
+ 512.2, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
+ None, 2, 1, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ 512, 2, None, 1, 1234, 512, 1, True)
+ self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
+ "test_memory_mb", 2, None, 1, 1234, 512, 1, True)
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class PrivateFlavorManageTestV21(test.TestCase):
+ controller = flavormanage_v21.FlavorManageController()
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(PrivateFlavorManageTestV21, self).setUp()
+ self.flavor_access_controller = flavor_access.FlavorAccessController()
+ self.ctxt = context.RequestContext('fake', 'fake',
+ is_admin=True, auth_token=True)
+ self.app = self._setup_app()
+ self.expected = {
+ "flavor": {
+ "name": "test",
+ "ram": 512,
+ "vcpus": 2,
+ "disk": 1,
+ "OS-FLV-EXT-DATA:ephemeral": 1,
+ "swap": 512,
+ "rxtx_factor": 1
+ }
+ }
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('flavor-manage',
+ 'os-flavor-access',
+ 'os-flavor-rxtx', 'flavors',
+ 'os-flavor-extra-data'),
+ fake_auth_context=self.ctxt)
+
+ def _get_response(self):
+ req = webob.Request.blank(self.base_url)
+ req.headers['Content-Type'] = 'application/json'
+ req.method = 'POST'
+ req.body = jsonutils.dumps(self.expected)
+ res = req.get_response(self.app)
+ return jsonutils.loads(res.body)
+
+ def test_create_private_flavor_should_not_grant_flavor_access(self):
+ self.expected["flavor"]["os-flavor-access:is_public"] = False
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
+ flavor_access_body = self.flavor_access_controller.index(
+ FakeRequest(), body["flavor"]["id"])
+ expected_flavor_access_body = {
+ "tenant_id": "%s" % self.ctxt.project_id,
+ "flavor_id": "%s" % body["flavor"]["id"]
+ }
+ self.assertNotIn(expected_flavor_access_body,
+ flavor_access_body["flavor_access"])
+
+ def test_create_public_flavor_should_not_create_flavor_access(self):
+ self.expected["flavor"]["os-flavor-access:is_public"] = True
+ self.mox.StubOutWithMock(flavors, "add_flavor_access")
+ self.mox.ReplayAll()
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
+
+
+class FlavorManageTestV2(FlavorManageTestV21):
+ controller = flavormanage_v2.FlavorManageController()
+ validation_error = webob.exc.HTTPBadRequest
+
+ def setUp(self):
+ super(FlavorManageTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
+ 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('flavors',),
+ fake_auth_context=self.ctxt)
+
+
+class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
+ controller = flavormanage_v2.FlavorManageController()
+
+ def setUp(self):
+ super(PrivateFlavorManageTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
+ 'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('flavors',),
+ fake_auth_context=self.ctxt)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py
new file mode 100644
index 0000000000..a8f31653c1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_rxtx.py
@@ -0,0 +1,127 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": '5',
+ "disabled": False,
+ "ephemeral_gb": '20',
+ "rxtx_factor": '1.0',
+ "vcpus": 1,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '10',
+ "swap": '10',
+ "ephemeral_gb": '25',
+ "rxtx_factor": None,
+ "disabled": False,
+ "vcpus": 1,
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorRxtxTestV21(test.NoDBTestCase):
+ content_type = 'application/json'
+ _prefix = "/v2/fake"
+
+ def setUp(self):
+ super(FlavorRxtxTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_rxtx.Flavor_rxtx')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers',
+ 'flavors', 'os-flavor-rxtx'))
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorRxtx(self, flavor, rxtx):
+ self.assertEqual(str(flavor.get('rxtx_factor')), rxtx)
+
+ def test_show(self):
+ url = self._prefix + '/flavors/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
+
+ def test_detail(self):
+ url = self._prefix + '/flavors/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorRxtx(flavors[0], '1.0')
+ self.assertFlavorRxtx(flavors[1], '')
+
+
+class FlavorRxtxTestV20(FlavorRxtxTestV21):
+
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class FlavorRxtxXmlTest(FlavorRxtxTestV20):
+ content_type = 'application/xml'
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py
new file mode 100644
index 0000000000..f168db060a
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavor_swap.py
@@ -0,0 +1,126 @@
+# Copyright 2012 Nebula, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "swap": 512,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '10',
+ "swap": None,
+ "vcpus": 1,
+ "ephemeral_gb": 1,
+ "disabled": False,
+ },
+}
+
+
+# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor*
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_flavor_get_by_flavor_id(1),
+ fake_flavor_get_by_flavor_id(2)
+ ]
+
+
+class FlavorSwapTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+ content_type = 'application/json'
+ prefix = ''
+
+ def setUp(self):
+ super(FlavorSwapTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavor_swap.Flavor_swap')
+ self.flags(osapi_compute_extension=[ext])
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ return res
+
+ def _get_flavor(self, body):
+ return jsonutils.loads(body).get('flavor')
+
+ def _get_flavors(self, body):
+ return jsonutils.loads(body).get('flavors')
+
+ def assertFlavorSwap(self, flavor, swap):
+ self.assertEqual(str(flavor.get('%sswap' % self.prefix)), swap)
+
+ def test_show(self):
+ url = self.base_url + '/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertFlavorSwap(self._get_flavor(res.body), '512')
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ flavors = self._get_flavors(res.body)
+ self.assertFlavorSwap(flavors[0], '512')
+ self.assertFlavorSwap(flavors[1], '')
+
+
+class FlavorSwapTestV2(FlavorSwapTestV21):
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app())
+ return res
+
+
+class FlavorSwapXmlTest(FlavorSwapTestV2):
+ content_type = 'application/xml'
+
+ def _get_flavor(self, body):
+ return etree.XML(body)
+
+ def _get_flavors(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py
new file mode 100644
index 0000000000..1299b6c88d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py
@@ -0,0 +1,127 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova.compute import flavors
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_flavor_by_flavor_id(flavorid, ctxt=None):
+ return {
+ 'id': flavorid,
+ 'flavorid': str(flavorid),
+ 'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'name': u'test',
+ 'deleted': False,
+ 'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
+ 'updated_at': None,
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'extra_specs': {},
+ 'deleted_at': None,
+ 'vcpu_weight': None,
+ 'swap': 0,
+ 'disabled': False,
+ }
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return [
+ fake_get_flavor_by_flavor_id(1),
+ fake_get_flavor_by_flavor_id(2)
+ ]
+
+
+class FlavorExtraDataTestV21(test.NoDBTestCase):
+ base_url = '/v2/fake/flavors'
+
+ def setUp(self):
+ super(FlavorExtraDataTestV21, self).setUp()
+ ext = ('nova.api.openstack.compute.contrib'
+ '.flavorextradata.Flavorextradata')
+ self.flags(osapi_compute_extension=[ext])
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stubs.Set(flavors, 'get_all_flavors_sorted_list',
+ fake_get_all_flavors_sorted_list)
+ self._setup_app()
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app_v21(init_only=('flavors'))
+
+ def _verify_flavor_response(self, flavor, expected):
+ for key in expected:
+ self.assertEqual(flavor[key], expected[key])
+
+ def test_show(self):
+ expected = {
+ 'flavor': {
+ 'id': '1',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ }
+ }
+
+ url = self.base_url + '/1'
+ req = webob.Request.blank(url)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ body = jsonutils.loads(res.body)
+ self._verify_flavor_response(body['flavor'], expected['flavor'])
+
+ def test_detail(self):
+ expected = [
+ {
+ 'id': '1',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ },
+ {
+ 'id': '2',
+ 'name': 'test',
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1,
+ 'OS-FLV-EXT-DATA:ephemeral': 1,
+ },
+ ]
+
+ url = self.base_url + '/detail'
+ req = webob.Request.blank(url)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ body = jsonutils.loads(res.body)
+ for i, flavor in enumerate(body['flavors']):
+ self._verify_flavor_response(flavor, expected[i])
+
+
+class FlavorExtraDataTestV2(FlavorExtraDataTestV21):
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app(init_only=('flavors',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py
new file mode 100644
index 0000000000..8a6f4814a8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_flavors_extra_specs.py
@@ -0,0 +1,403 @@
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import flavorextraspecs \
+ as flavorextraspecs_v2
+from nova.api.openstack.compute.plugins.v3 import flavors_extraspecs \
+ as flavorextraspecs_v21
+import nova.db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_flavor
+
+
+def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs_item(context, flavor_id, key):
+ return {key: stub_flavor_extra_specs()[key]}
+
+
+def return_empty_flavor_extra_specs(context, flavor_id):
+ return {}
+
+
+def delete_flavor_extra_specs(context, flavor_id, key):
+ pass
+
+
+def stub_flavor_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class FlavorsExtraSpecsTestV21(test.TestCase):
+ bad_request = exception.ValidationError
+ flavorextraspecs = flavorextraspecs_v21
+
+ def _get_request(self, url, use_admin_context=False):
+ req_url = '/v2/fake/flavors/' + url
+ return fakes.HTTPRequest.blank(req_url,
+ use_admin_context=use_admin_context)
+
+ def setUp(self):
+ super(FlavorsExtraSpecsTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.controller = self.flavorextraspecs.FlavorExtraSpecsController()
+
+ def test_index(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key1': 'value1'})
+
+ req = self._get_request('1/os-extra_specs')
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ res_dict = self.controller.index(req, 1)
+
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs')
+ res_dict = self.controller.index(req, 1)
+
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key5': 'value5'})
+ req = self._get_request('1/os-extra_specs/key5')
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ res_dict = self.controller.show(req, 1, 'key5')
+
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key6')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key6')
+
+ def test_not_found_because_flavor(self):
+ req = self._get_request('1/os-extra_specs/key5',
+ use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, 1, 'key5')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, 1, 'key5', body={'key5': 'value5'})
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1, 'key5')
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, 1, body={'extra_specs': {'key5': 'value5'}})
+
+ def test_delete(self):
+ flavor = dict(test_flavor.fake_flavor,
+ extra_specs={'key5': 'value5'})
+ self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key5',
+ use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.return_value = flavor
+ self.controller.delete(req, 1, 'key5')
+
+ def test_delete_no_admin(self):
+ self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key5')
+ self.assertRaises(exception.Forbidden, self.controller.delete,
+ req, 1, 'key 5')
+
+ def test_delete_spec_not_found(self):
+ req = self._get_request('1/os-extra_specs/key6',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1, 'key6')
+
+ def test_create(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ res_dict = self.controller.create(req, 1, body=body)
+
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+ self.assertEqual(0.5, res_dict['extra_specs']['key2'])
+ self.assertEqual(5, res_dict['extra_specs']['key3'])
+
+ def test_create_no_admin(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"extra_specs": {"key1": "value1"}}
+
+ req = self._get_request('1/os-extra_specs')
+ self.assertRaises(exception.Forbidden, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_flavor_not_found(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorNotFound(flavor_id='')
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"extra_specs": {"key1": "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_flavor_db_duplicate(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"extra_specs": {"key1": "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
+ req, 1, body=body)
+
+ def _test_create_bad_request(self, body):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_empty_body(self):
+ self._test_create_bad_request('')
+
+ def test_create_non_dict_extra_specs(self):
+ self._test_create_bad_request({"extra_specs": "non_dict"})
+
+ def test_create_non_string_key(self):
+ self._test_create_bad_request({"extra_specs": {None: "value1"}})
+
+ def test_create_non_string_value(self):
+ self._test_create_bad_request({"extra_specs": {"key1": None}})
+
+ def test_create_zero_length_key(self):
+ self._test_create_bad_request({"extra_specs": {"": "value1"}})
+
+ def test_create_long_key(self):
+ key = "a" * 256
+ self._test_create_bad_request({"extra_specs": {key: "value1"}})
+
+ def test_create_long_value(self):
+ value = "a" * 256
+ self._test_create_bad_request({"extra_specs": {"key1": value}})
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_really_long_integer_value(self, mock_flavor_extra_specs):
+ value = 10 ** 1000
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, 1, body={"extra_specs": {"key1": value}})
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
+ invalid_keys = ("key1/", "<key>", "$$akey$", "!akey", "")
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ for key in invalid_keys:
+ body = {"extra_specs": {key: "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, 1, body=body)
+
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_create_valid_specs_key(self, mock_flavor_extra_specs):
+ valid_keys = ("key1", "month.price", "I_am-a Key", "finance:g2")
+ mock_flavor_extra_specs.side_effects = return_create_flavor_extra_specs
+
+ for key in valid_keys:
+ body = {"extra_specs": {key: "value1"}}
+ req = self._get_request('1/os-extra_specs', use_admin_context=True)
+ res_dict = self.controller.create(req, 1, body=body)
+ self.assertEqual('value1', res_dict['extra_specs'][key])
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 1, 'key1', body=body)
+
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_no_admin(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1')
+ self.assertRaises(exception.Forbidden, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def _test_update_item_bad_request(self, body):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(self.bad_request, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_item_empty_body(self):
+ self._test_update_item_bad_request('')
+
+ def test_update_item_too_many_keys(self):
+ body = {"key1": "value1", "key2": "value2"}
+ self._test_update_item_bad_request(body)
+
+ def test_update_item_non_dict_extra_specs(self):
+ self._test_update_item_bad_request("non_dict")
+
+ def test_update_item_non_string_key(self):
+ self._test_update_item_bad_request({None: "value1"})
+
+ def test_update_item_non_string_value(self):
+ self._test_update_item_bad_request({"key1": None})
+
+ def test_update_item_zero_length_key(self):
+ self._test_update_item_bad_request({"": "value1"})
+
+ def test_update_item_long_key(self):
+ key = "a" * 256
+ self._test_update_item_bad_request({key: "value1"})
+
+ def test_update_item_long_value(self):
+ value = "a" * 256
+ self._test_update_item_bad_request({"key1": value})
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'bad', body=body)
+
+ def test_update_flavor_not_found(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorNotFound(flavor_id='')
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_flavor_db_duplicate(self):
+ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
+ raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
+
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
+ body = {"key1": "value1"}
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_really_long_integer_value(self):
+ value = 10 ** 1000
+ self.stubs.Set(nova.db,
+ 'flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+
+ req = self._get_request('1/os-extra_specs/key1',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 1, 'key1', body={"key1": value})
+
+
+class FlavorsExtraSpecsTestV2(FlavorsExtraSpecsTestV21):
+ bad_request = webob.exc.HTTPBadRequest
+ flavorextraspecs = flavorextraspecs_v2
+
+
+class FlavorsExtraSpecsXMLSerializerTest(test.TestCase):
+ def test_serializer(self):
+ serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><key1>value1</key1></extra_specs>')
+ text = serializer.serialize(dict(extra_specs={"key1": "value1"}))
+ self.assertEqual(text, expected)
+
+ def test_show_update_serializer(self):
+ serializer = flavorextraspecs_v2.ExtraSpecTemplate()
+ expected = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_spec key="key1">value1</extra_spec>')
+ text = serializer.serialize(dict({"key1": "value1"}))
+ self.assertEqual(text, expected)
+
+ def test_serializer_with_colon_tagname(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ serializer = flavorextraspecs_v2.ExtraSpecsTemplate()
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ result = serializer.serialize(obj)
+ self.assertEqual(expected_xml, result)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py
new file mode 100644
index 0000000000..9a68e0de60
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_dns.py
@@ -0,0 +1,412 @@
+# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ip_dns as fipdns_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ip_dns as \
+ fipdns_v21
+from nova import context
+from nova import db
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+name = "arbitraryname"
+name2 = "anotherarbitraryname"
+
+test_ipv4_address = '10.0.0.66'
+test_ipv4_address2 = '10.0.0.67'
+
+test_ipv6_address = 'fe80:0:0:0:0:0:a00:42'
+
+domain = "example.org"
+domain2 = "example.net"
+floating_ip_id = '1'
+
+
+def _quote_domain(domain):
+ """Domain names tend to have .'s in them. Urllib doesn't quote dots,
+ but Routes tends to choke on them, so we need an extra level of
+ by-hand quoting here. This function needs to duplicate the one in
+ python-novaclient/novaclient/v1_1/floating_ip_dns.py
+ """
+ return urllib.quote(domain.replace('.', '%2E'))
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': floating_ip_id, 'address': test_ipv4_address,
+ 'fixed_ip': None}
+
+
+def network_get_dns_domains(self, context):
+ return [{'domain': 'example.org', 'scope': 'public'},
+ {'domain': 'example.com', 'scope': 'public',
+ 'project': 'project1'},
+ {'domain': 'private.example.com', 'scope': 'private',
+ 'availability_zone': 'avzone'}]
+
+
+def network_get_dns_entries_by_address(self, context, address, domain):
+ return [name, name2]
+
+
+def network_get_dns_entries_by_name(self, context, address, domain):
+ return [test_ipv4_address]
+
+
+def network_add_dns_entry(self, context, address, name, dns_type, domain):
+ return {'dns_entry': {'ip': test_ipv4_address,
+ 'name': name,
+ 'type': dns_type,
+ 'domain': domain}}
+
+
+def network_modify_dns_entry(self, context, address, name, domain):
+ return {'dns_entry': {'name': name,
+ 'ip': address,
+ 'domain': domain}}
+
+
+def network_create_private_dns_domain(self, context, domain, avail_zone):
+ pass
+
+
+def network_create_public_dns_domain(self, context, domain, project):
+ pass
+
+
+class FloatingIpDNSTestV21(test.TestCase):
+ floating_ip_dns = fipdns_v21
+
+ def _create_floating_ip(self):
+ """Create a floating ip object."""
+ host = "fake_host"
+ db.floating_ip_create(self.context,
+ {'address': test_ipv4_address,
+ 'host': host})
+ db.floating_ip_create(self.context,
+ {'address': test_ipv6_address,
+ 'host': host})
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, test_ipv4_address)
+ db.floating_ip_destroy(self.context, test_ipv6_address)
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, controller_methord.wsgi_code)
+
+ def _bad_request(self):
+ return webob.exc.HTTPBadRequest
+
+ def setUp(self):
+ super(FloatingIpDNSTestV21, self).setUp()
+ self.stubs.Set(network.api.API, "get_dns_domains",
+ network_get_dns_domains)
+ self.stubs.Set(network.api.API, "get_dns_entries_by_address",
+ network_get_dns_entries_by_address)
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ network_get_dns_entries_by_name)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "add_dns_entry",
+ network_add_dns_entry)
+ self.stubs.Set(network.api.API, "modify_dns_entry",
+ network_modify_dns_entry)
+ self.stubs.Set(network.api.API, "create_public_dns_domain",
+ network_create_public_dns_domain)
+ self.stubs.Set(network.api.API, "create_private_dns_domain",
+ network_create_private_dns_domain)
+
+ self.context = context.get_admin_context()
+
+ self._create_floating_ip()
+ temp = self.floating_ip_dns.FloatingIPDNSDomainController()
+ self.domain_controller = temp
+ self.entry_controller = self.floating_ip_dns.\
+ FloatingIPDNSEntryController()
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(FloatingIpDNSTestV21, self).tearDown()
+
+ def test_dns_domains_list(self):
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns')
+ res_dict = self.domain_controller.index(req)
+ entries = res_dict['domain_entries']
+ self.assertTrue(entries)
+ self.assertEqual(entries[0]['domain'], "example.org")
+ self.assertFalse(entries[0]['project'])
+ self.assertFalse(entries[0]['availability_zone'])
+ self.assertEqual(entries[1]['domain'], "example.com")
+ self.assertEqual(entries[1]['project'], "project1")
+ self.assertFalse(entries[1]['availability_zone'])
+ self.assertEqual(entries[2]['domain'], "private.example.com")
+ self.assertFalse(entries[2]['project'])
+ self.assertEqual(entries[2]['availability_zone'], "avzone")
+
+ def _test_get_dns_entries_by_address(self, address):
+
+ qparams = {'ip': address}
+ params = "?%s" % urllib.urlencode(qparams) if qparams else ""
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s'
+ % (_quote_domain(domain), params))
+ entries = self.entry_controller.show(req, _quote_domain(domain),
+ address)
+ entries = entries.obj
+ self.assertEqual(len(entries['dns_entries']), 2)
+ self.assertEqual(entries['dns_entries'][0]['name'],
+ name)
+ self.assertEqual(entries['dns_entries'][1]['name'],
+ name2)
+ self.assertEqual(entries['dns_entries'][0]['domain'],
+ domain)
+
+ def test_get_dns_entries_by_ipv4_address(self):
+ self._test_get_dns_entries_by_address(test_ipv4_address)
+
+ def test_get_dns_entries_by_ipv6_address(self):
+ self._test_get_dns_entries_by_address(test_ipv6_address)
+
+ def test_get_dns_entries_by_name(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ entry = self.entry_controller.show(req, _quote_domain(domain), name)
+
+ self.assertEqual(entry['dns_entry']['ip'],
+ test_ipv4_address)
+ self.assertEqual(entry['dns_entry']['domain'],
+ domain)
+
+ def test_dns_entries_not_found(self):
+ def fake_get_dns_entries_by_name(self, context, address, domain):
+ raise webob.exc.HTTPNotFound()
+
+ self.stubs.Set(network.api.API, "get_dns_entries_by_name",
+ fake_get_dns_entries_by_name)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), 'nonexistent'))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.entry_controller.show,
+ req, _quote_domain(domain), 'nonexistent')
+
+ def test_create_entry(self):
+ body = {'dns_entry':
+ {'ip': test_ipv4_address,
+ 'dns_type': 'A'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ entry = self.entry_controller.update(req, _quote_domain(domain),
+ name, body=body)
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address)
+
+ def test_create_domain(self):
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ body = {'domain_entry':
+ {'scope': 'private',
+ 'project': 'testproject'}}
+ self.assertRaises(self._bad_request(),
+ self.domain_controller.update,
+ req, _quote_domain(domain), body=body)
+
+ body = {'domain_entry':
+ {'scope': 'public',
+ 'availability_zone': 'zone1'}}
+ self.assertRaises(self._bad_request(),
+ self.domain_controller.update,
+ req, _quote_domain(domain), body=body)
+
+ body = {'domain_entry':
+ {'scope': 'public',
+ 'project': 'testproject'}}
+ entry = self.domain_controller.update(req, _quote_domain(domain),
+ body=body)
+ self.assertEqual(entry['domain_entry']['domain'], domain)
+ self.assertEqual(entry['domain_entry']['scope'], 'public')
+ self.assertEqual(entry['domain_entry']['project'], 'testproject')
+
+ body = {'domain_entry':
+ {'scope': 'private',
+ 'availability_zone': 'zone1'}}
+ entry = self.domain_controller.update(req, _quote_domain(domain),
+ body=body)
+ self.assertEqual(entry['domain_entry']['domain'], domain)
+ self.assertEqual(entry['domain_entry']['scope'], 'private')
+ self.assertEqual(entry['domain_entry']['availability_zone'], 'zone1')
+
+ def test_delete_entry(self):
+ calls = []
+
+ def network_delete_dns_entry(fakeself, context, name, domain):
+ calls.append((name, domain))
+
+ self.stubs.Set(network.api.API, "delete_dns_entry",
+ network_delete_dns_entry)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ res = self.entry_controller.delete(req, _quote_domain(domain), name)
+
+ self._check_status(202, res, self.entry_controller.delete)
+ self.assertEqual([(name, domain)], calls)
+
+ def test_delete_entry_notfound(self):
+ def delete_dns_entry_notfound(fakeself, context, name, domain):
+ raise exception.NotFound
+
+ self.stubs.Set(network.api.API, "delete_dns_entry",
+ delete_dns_entry_notfound)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' %
+ (_quote_domain(domain), name))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.entry_controller.delete, req, _quote_domain(domain), name)
+
+ def test_delete_domain(self):
+ calls = []
+
+ def network_delete_dns_domain(fakeself, context, fqdomain):
+ calls.append(fqdomain)
+
+ self.stubs.Set(network.api.API, "delete_dns_domain",
+ network_delete_dns_domain)
+
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ res = self.domain_controller.delete(req, _quote_domain(domain))
+
+ self._check_status(202, res, self.domain_controller.delete)
+ self.assertEqual([domain], calls)
+
+ def test_delete_domain_notfound(self):
+ def delete_dns_domain_notfound(fakeself, context, fqdomain):
+ raise exception.NotFound
+
+ self.stubs.Set(network.api.API, "delete_dns_domain",
+ delete_dns_domain_notfound)
+
+ req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' %
+ _quote_domain(domain))
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.domain_controller.delete, req, _quote_domain(domain))
+
+ def test_modify(self):
+ body = {'dns_entry':
+ {'ip': test_ipv4_address2,
+ 'dns_type': 'A'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/123/os-floating-ip-dns/%s/entries/%s' % (domain, name))
+ entry = self.entry_controller.update(req, domain, name, body=body)
+
+ self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2)
+
+
+class FloatingIpDNSTestV2(FloatingIpDNSTestV21):
+ floating_ip_dns = fipdns_v2
+
+ def _check_status(self, expected_status, res, controller_methord):
+ self.assertEqual(expected_status, res.status_int)
+
+ def _bad_request(self):
+ return webob.exc.HTTPUnprocessableEntity
+
+
+class FloatingIpDNSSerializerTestV2(test.TestCase):
+ floating_ip_dns = fipdns_v2
+
+ def test_domains(self):
+ serializer = self.floating_ip_dns.DomainsTemplate()
+ text = serializer.serialize(dict(
+ domain_entries=[
+ dict(domain=domain, scope='public', project='testproject'),
+ dict(domain=domain2, scope='private',
+ availability_zone='avzone')]))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('domain_entries', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual(domain, tree[0].get('domain'))
+ self.assertEqual(domain2, tree[1].get('domain'))
+ self.assertEqual('avzone', tree[1].get('availability_zone'))
+
+ def test_domain_serializer(self):
+ serializer = self.floating_ip_dns.DomainTemplate()
+ text = serializer.serialize(dict(
+ domain_entry=dict(domain=domain,
+ scope='public',
+ project='testproject')))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('domain_entry', tree.tag)
+ self.assertEqual(domain, tree.get('domain'))
+ self.assertEqual('testproject', tree.get('project'))
+
+ def test_entries_serializer(self):
+ serializer = self.floating_ip_dns.FloatingIPDNSsTemplate()
+ text = serializer.serialize(dict(
+ dns_entries=[
+ dict(ip=test_ipv4_address,
+ type='A',
+ domain=domain,
+ name=name),
+ dict(ip=test_ipv4_address2,
+ type='C',
+ domain=domain,
+ name=name2)]))
+
+ tree = etree.fromstring(text)
+ self.assertEqual('dns_entries', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('dns_entry', tree[0].tag)
+ self.assertEqual('dns_entry', tree[1].tag)
+ self.assertEqual(test_ipv4_address, tree[0].get('ip'))
+ self.assertEqual('A', tree[0].get('type'))
+ self.assertEqual(domain, tree[0].get('domain'))
+ self.assertEqual(name, tree[0].get('name'))
+ self.assertEqual(test_ipv4_address2, tree[1].get('ip'))
+ self.assertEqual('C', tree[1].get('type'))
+ self.assertEqual(domain, tree[1].get('domain'))
+ self.assertEqual(name2, tree[1].get('name'))
+
+ def test_entry_serializer(self):
+ serializer = self.floating_ip_dns.FloatingIPDNSTemplate()
+ text = serializer.serialize(dict(
+ dns_entry=dict(
+ ip=test_ipv4_address,
+ type='A',
+ domain=domain,
+ name=name)))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('dns_entry', tree.tag)
+ self.assertEqual(test_ipv4_address, tree.get('ip'))
+ self.assertEqual(domain, tree.get('domain'))
+ self.assertEqual(name, tree.get('name'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py
new file mode 100644
index 0000000000..926e88c6ae
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ip_pools.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack.compute.contrib import floating_ip_pools as fipp_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ip_pools as\
+ fipp_v21
+from nova import context
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_get_floating_ip_pools(self, context):
+ return ['nova', 'other']
+
+
+class FloatingIpPoolTestV21(test.NoDBTestCase):
+ floating_ip_pools = fipp_v21
+ url = '/v2/fake/os-floating-ip-pools'
+
+ def setUp(self):
+ super(FloatingIpPoolTestV21, self).setUp()
+ self.stubs.Set(network.api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+
+ self.context = context.RequestContext('fake', 'fake')
+ self.controller = self.floating_ip_pools.FloatingIPPoolsController()
+
+ def test_translate_floating_ip_pools_view(self):
+ pools = fake_get_floating_ip_pools(None, self.context)
+ view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
+ self.assertIn('floating_ip_pools', view)
+ self.assertEqual(view['floating_ip_pools'][0]['name'],
+ pools[0])
+ self.assertEqual(view['floating_ip_pools'][1]['name'],
+ pools[1])
+
+ def test_floating_ips_pools_list(self):
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req)
+
+ pools = fake_get_floating_ip_pools(None, self.context)
+ response = {'floating_ip_pools': [{'name': name} for name in pools]}
+ self.assertEqual(res_dict, response)
+
+
+class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
+ floating_ip_pools = fipp_v2
+
+
+class FloatingIpPoolSerializerTestV2(test.NoDBTestCase):
+ floating_ip_pools = fipp_v2
+
+ def test_index_serializer(self):
+ serializer = self.floating_ip_pools.FloatingIPPoolsTemplate()
+ text = serializer.serialize(dict(
+ floating_ip_pools=[
+ dict(name='nova'),
+ dict(name='other')
+ ]))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ip_pools', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('floating_ip_pool', tree[0].tag)
+ self.assertEqual('floating_ip_pool', tree[1].tag)
+ self.assertEqual('nova', tree[0].get('name'))
+ self.assertEqual('other', tree[1].get('name'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py
new file mode 100644
index 0000000000..b383d1dbc1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips.py
@@ -0,0 +1,853 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ips
+from nova.api.openstack import extensions
+from nova import compute
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_network
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': None}
+
+
+def network_api_get_floating_ip_by_address(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10}
+
+
+def network_api_get_floating_ips_by_project(self, context):
+ return [{'id': 1,
+ 'address': '10.10.10.10',
+ 'pool': 'nova',
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance_uuid': FAKE_UUID,
+ 'instance': {'uuid': FAKE_UUID}}},
+ {'id': 2,
+ 'pool': 'nova', 'interface': 'eth0',
+ 'address': '10.10.10.11',
+ 'fixed_ip': None}]
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def network_api_allocate(self, context):
+ return '10.10.10.10'
+
+
+def network_api_release(self, context, address):
+ pass
+
+
+def compute_api_associate(self, context, instance_id, address):
+ pass
+
+
+def network_api_associate(self, context, floating_address, fixed_address):
+ pass
+
+
+def network_api_disassociate(self, context, instance, floating_address):
+ pass
+
+
+def fake_instance_get(context, instance_id):
+ return {
+ "id": 1,
+ "uuid": uuid.uuid4(),
+ "name": 'fake',
+ "user_id": 'fakeuser',
+ "project_id": '123'}
+
+
+def stub_nw_info(stubs):
+ def get_nw_info_for_instance(instance):
+ return fake_network.fake_get_instance_nw_info(stubs)
+ return get_nw_info_for_instance
+
+
+def get_instance_by_floating_ip_addr(self, context, address):
+ return None
+
+
+class FloatingIpTestNeutron(test.NoDBTestCase):
+
+ def setUp(self):
+ super(FloatingIpTestNeutron, self).setUp()
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.controller = floating_ips.FloatingIPController()
+
+ def _get_fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
+
+ def test_floatingip_delete(self):
+ req = self._get_fake_request()
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_and_release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, dis_and_del, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertFalse(disoc_fip.called)
+ self.assertFalse(rel_fip.called)
+ # Only disassociate_and_release_floating_ip is
+ # called if using neutron
+ self.assertTrue(dis_and_del.called)
+
+
+class FloatingIpTest(test.TestCase):
+ floating_ip = "10.10.10.10"
+ floating_ip_2 = "10.10.10.11"
+
+ def _create_floating_ips(self, floating_ips=None):
+ """Create a floating ip object."""
+ if floating_ips is None:
+ floating_ips = [self.floating_ip]
+ elif not isinstance(floating_ips, (list, tuple)):
+ floating_ips = [floating_ips]
+
+ def make_ip_dict(ip):
+ """Shortcut for creating floating ip dict."""
+ return
+
+ dict_ = {'pool': 'nova', 'host': 'fake_host'}
+ return db.floating_ip_bulk_create(
+ self.context, [dict(address=ip, **dict_) for ip in floating_ips],
+ )
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.floating_ip)
+
+ def _get_fake_fip_request(self, act=''):
+ return fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/%s' % act)
+
+ def _get_fake_server_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+
+ def _get_fake_response(self, req, init_only):
+ return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
+
+ def setUp(self):
+ super(FloatingIpTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+ self.stubs.Set(network.api.API, "get_floating_ips_by_project",
+ network_api_get_floating_ips_by_project)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ stub_nw_info(self.stubs))
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.stubs.Set(db, 'instance_get',
+ fake_instance_get)
+
+ self.context = context.get_admin_context()
+ self._create_floating_ips()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = floating_ips.FloatingIPController()
+ self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
+
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Floating_ips'])
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(FloatingIpTest, self).tearDown()
+
+ def test_floatingip_delete(self):
+ req = self._get_fake_fip_request('1')
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertTrue(disoc_fip.called)
+ self.assertTrue(rel_fip.called)
+
+ def test_translate_floating_ip_view(self):
+ floating_ip_address = self.floating_ip
+ floating_ip = db.floating_ip_get_by_address(self.context,
+ floating_ip_address)
+ # NOTE(vish): network_get uses the id not the address
+ floating_ip = db.floating_ip_get(self.context, floating_ip['id'])
+ view = floating_ips._translate_floating_ip_view(floating_ip)
+ self.assertIn('floating_ip', view)
+ self.assertTrue(view['floating_ip']['id'])
+ self.assertEqual(view['floating_ip']['ip'], self.floating_ip)
+ self.assertIsNone(view['floating_ip']['fixed_ip'])
+ self.assertIsNone(view['floating_ip']['instance_id'])
+
+ def test_translate_floating_ip_view_dict(self):
+ floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova',
+ 'fixed_ip': None}
+ view = floating_ips._translate_floating_ip_view(floating_ip)
+ self.assertIn('floating_ip', view)
+
+ def test_floating_ips_list(self):
+ req = self._get_fake_fip_request()
+ res_dict = self.controller.index(req)
+
+ response = {'floating_ips': [{'instance_id': FAKE_UUID,
+ 'ip': '10.10.10.10',
+ 'pool': 'nova',
+ 'fixed_ip': '10.0.0.1',
+ 'id': 1},
+ {'instance_id': None,
+ 'ip': '10.10.10.11',
+ 'pool': 'nova',
+ 'fixed_ip': None,
+ 'id': 2}]}
+ self.assertEqual(res_dict, response)
+
+ def test_floating_ip_release_nonexisting(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotFound(id=id)
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+
+ req = self._get_fake_fip_request('9876')
+ req.method = 'DELETE'
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 404)
+ expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
+ 'for id 9876", "code": 404}}')
+ self.assertEqual(res.body, expected_msg)
+
+ def test_floating_ip_release_race_cond(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ return {'fixed_ip_id': 1, 'address': self.floating_ip}
+
+ def fake_get_instance_by_floating_ip_addr(*args, **kwargs):
+ return 'test-inst'
+
+ def fake_disassociate_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotAssociated(args[3])
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+ self.stubs.Set(floating_ips, "get_instance_by_floating_ip_addr",
+ fake_get_instance_by_floating_ip_addr)
+ self.stubs.Set(floating_ips, "disassociate_floating_ip",
+ fake_disassociate_floating_ip)
+
+ req = self._get_fake_fip_request('1')
+ req.method = 'DELETE'
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 202)
+
+ def test_floating_ip_show(self):
+ req = self._get_fake_fip_request('1')
+ res_dict = self.controller.show(req, 1)
+
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertIsNone(res_dict['floating_ip']['instance_id'])
+
+ def test_floating_ip_show_not_found(self):
+ def fake_get_floating_ip(*args, **kwargs):
+ raise exception.FloatingIpNotFound(id='fake')
+
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ fake_get_floating_ip)
+
+ req = self._get_fake_fip_request('9876')
+ res = self._get_fake_response(req, 'os-floating-ips')
+ self.assertEqual(res.status_int, 404)
+ expected_msg = ('{"itemNotFound": {"message": "Floating ip not found '
+ 'for id 9876", "code": 404}}')
+ self.assertEqual(res.body, expected_msg)
+
+ def test_show_associated_floating_ip(self):
+ def get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip': {'address': '10.0.0.1',
+ 'instance_uuid': FAKE_UUID,
+ 'instance': {'uuid': FAKE_UUID}}}
+
+ self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip)
+
+ req = self._get_fake_fip_request('1')
+ res_dict = self.controller.show(req, 1)
+
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1')
+ self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID)
+
+ def test_recreation_of_floating_ip(self):
+ self._delete_floating_ip()
+ self._create_floating_ips()
+
+ def test_floating_ip_in_bulk_creation(self):
+ self._delete_floating_ip()
+
+ self._create_floating_ips([self.floating_ip, self.floating_ip_2])
+ all_ips = db.floating_ip_get_all(self.context)
+ ip_list = [ip['address'] for ip in all_ips]
+ self.assertIn(self.floating_ip, ip_list)
+ self.assertIn(self.floating_ip_2, ip_list)
+
+ def test_fail_floating_ip_in_bulk_creation(self):
+ self.assertRaises(exception.FloatingIpExists,
+ self._create_floating_ips,
+ [self.floating_ip, self.floating_ip_2])
+ all_ips = db.floating_ip_get_all(self.context)
+ ip_list = [ip['address'] for ip in all_ips]
+ self.assertIn(self.floating_ip, ip_list)
+ self.assertNotIn(self.floating_ip_2, ip_list)
+
+ def test_floating_ip_allocate_no_free_ips(self):
+ def fake_allocate(*args, **kwargs):
+ raise exception.NoMoreFloatingIps()
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
+
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req)
+
+ self.assertIn('No more floating ips', ex.explanation)
+
+ def test_floating_ip_allocate_no_free_ips_pool(self):
+ def fake_allocate(*args, **kwargs):
+ raise exception.NoMoreFloatingIps()
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate)
+
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('No more floating ips in pool non_existent_pool',
+ ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpLimitExceeded())
+ def test_floating_ip_allocate_over_quota(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req)
+
+ self.assertIn('IP allocation over quota', ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpLimitExceeded())
+ def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('IP allocation over quota in pool non_existent_pool.',
+ ex.explanation)
+
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpPoolNotFound())
+ def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
+ req = self._get_fake_fip_request()
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('Floating ip pool not found.', ex.explanation)
+
+ def test_floating_ip_allocate(self):
+ def fake1(*args, **kwargs):
+ pass
+
+ def fake2(*args, **kwargs):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'}
+
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ fake1)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake2)
+
+ req = self._get_fake_fip_request()
+ res_dict = self.controller.create(req)
+
+ ip = res_dict['floating_ip']
+
+ expected = {
+ "id": 1,
+ "instance_id": None,
+ "ip": "10.10.10.10",
+ "fixed_ip": None,
+ "pool": 'nova'}
+ self.assertEqual(ip, expected)
+
+ def test_floating_ip_release(self):
+ req = self._get_fake_fip_request('1')
+ self.controller.delete(req, 1)
+
+ def test_floating_ip_associate(self):
+ fixed_address = '192.168.1.100'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address, kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_floating_ip_associate_invalid_instance(self):
+
+ def fake_get(self, context, id, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=id)
+
+ self.stubs.Set(compute.api.API, "get", fake_get)
+
+ body = dict(addFloatingIp=dict(address=self.floating_ip))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_not_extended_floating_ip_associate_fixed(self):
+ # Check that fixed_address is ignored if os-extended-floating-ips
+ # is not loaded
+ fixed_address_requested = '192.168.1.101'
+ fixed_address_allocated = '192.168.1.100'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address_allocated,
+ kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address=fixed_address_requested))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_associate_not_allocated_floating_ip_to_instance(self):
+ def fake_associate_floating_ip(self, context, instance,
+ floating_address, fixed_address,
+ affect_auto_assigned=False):
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ floating_ip = '10.10.10.11'
+ body = dict(addFloatingIp=dict(address=floating_ip))
+ req = self._get_fake_server_request()
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = self._get_fake_response(req, 'servers')
+ res_dict = jsonutils.loads(resp.body)
+ self.assertEqual(resp.status_int, 404)
+ self.assertEqual(res_dict['itemNotFound']['message'],
+ "floating ip not found")
+
+ @mock.patch.object(network.api.API, 'associate_floating_ip',
+ side_effect=exception.Forbidden)
+ def test_associate_floating_ip_forbidden(self, associate_mock):
+ body = dict(addFloatingIp=dict(address='10.10.10.11'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_associate_floating_ip_bad_address_key(self):
+ body = dict(addFloatingIp=dict(bad_address='10.10.10.11'))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_associate_floating_ip_bad_addfloatingip_key(self):
+ body = dict(bad_addFloatingIp=dict(address='10.10.10.11'))
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+ def test_floating_ip_disassociate(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ rsp = self.manager._remove_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_floating_ip_disassociate_missing(self):
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_associate_non_existent_ip(self):
+ def fake_network_api_associate(self, context, instance,
+ floating_address=None,
+ fixed_address=None):
+ floating_ips = ["10.10.10.10", "10.10.10.11"]
+ if floating_address not in floating_ips:
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_network_api_associate)
+
+ body = dict(addFloatingIp=dict(address='1.1.1.1'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._add_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_non_existent_ip(self):
+ def network_api_get_floating_ip_by_address(self, context,
+ floating_address):
+ floating_ips = ["10.10.10.10", "10.10.10.11"]
+ if floating_address not in floating_ips:
+ raise exception.FloatingIpNotFoundForAddress(
+ address=floating_address)
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+
+ body = dict(removeFloatingIp=dict(address='1.1.1.1'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_wrong_instance_uuid(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa'
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, wrong_uuid, body)
+
+ def test_floating_ip_disassociate_wrong_instance_id(self):
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'wrong_inst'
+
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_auto_assigned(self):
+ def fake_get_floating_ip_addr_auto_assigned(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10, 'auto_assigned': 1}
+
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ def network_api_disassociate(self, context, instance,
+ floating_address):
+ raise exception.CannotDisassociateAutoAssignedFloatingIP()
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake_get_floating_ip_addr_auto_assigned)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+ def test_floating_ip_disassociate_map_authorization_exc(self):
+ def fake_get_floating_ip_addr_auto_assigned(self, context, address):
+ return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova',
+ 'fixed_ip_id': 10, 'auto_assigned': 1}
+
+ def get_instance_by_floating_ip_addr(self, context, address):
+ if address == '10.10.10.10':
+ return 'test_inst'
+
+ def network_api_disassociate(self, context, instance, address):
+ raise exception.Forbidden()
+
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ fake_get_floating_ip_addr_auto_assigned)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ body = dict(removeFloatingIp=dict(address='10.10.10.10'))
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.manager._remove_floating_ip,
+ req, 'test_inst', body)
+
+# these are a few bad param tests
+
+ def test_bad_address_param_in_remove_floating_ip(self):
+ body = dict(removeFloatingIp=dict(badparam='11.0.0.1'))
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._remove_floating_ip, req, 'test_inst',
+ body)
+
+ def test_missing_dict_param_in_remove_floating_ip(self):
+ body = dict(removeFloatingIp='11.0.0.1')
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._remove_floating_ip, req, 'test_inst',
+ body)
+
+ def test_missing_dict_param_in_add_floating_ip(self):
+ body = dict(addFloatingIp='11.0.0.1')
+
+ req = self._get_fake_server_request()
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._add_floating_ip, req, 'test_inst',
+ body)
+
+
+class ExtendedFloatingIpTest(test.TestCase):
+ floating_ip = "10.10.10.10"
+ floating_ip_2 = "10.10.10.11"
+
+ def _create_floating_ips(self, floating_ips=None):
+ """Create a floating ip object."""
+ if floating_ips is None:
+ floating_ips = [self.floating_ip]
+ elif not isinstance(floating_ips, (list, tuple)):
+ floating_ips = [floating_ips]
+
+ def make_ip_dict(ip):
+ """Shortcut for creating floating ip dict."""
+ return
+
+ dict_ = {'pool': 'nova', 'host': 'fake_host'}
+ return db.floating_ip_bulk_create(
+ self.context, [dict(address=ip, **dict_) for ip in floating_ips],
+ )
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.floating_ip)
+
+ def _get_fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+
+ def _get_fake_response(self, req, init_only):
+ return req.get_response(fakes.wsgi_app(init_only=(init_only,)))
+
+ def setUp(self):
+ super(ExtendedFloatingIpTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "get_floating_ip_by_address",
+ network_api_get_floating_ip_by_address)
+ self.stubs.Set(network.api.API, "get_floating_ips_by_project",
+ network_api_get_floating_ips_by_project)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.stubs.Set(network.api.API, "get_instance_id_by_floating_address",
+ get_instance_by_floating_ip_addr)
+ self.stubs.Set(compute_utils, "get_nw_info_for_instance",
+ stub_nw_info(self.stubs))
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.stubs.Set(db, 'instance_get',
+ fake_instance_get)
+
+ self.context = context.get_admin_context()
+ self._create_floating_ips()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.ext_mgr.extensions['os-floating-ips'] = True
+ self.ext_mgr.extensions['os-extended-floating-ips'] = True
+ self.controller = floating_ips.FloatingIPController()
+ self.manager = floating_ips.FloatingIPActionController(self.ext_mgr)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Floating_ips', 'Extended_floating_ips'])
+
+ def tearDown(self):
+ self._delete_floating_ip()
+ super(ExtendedFloatingIpTest, self).tearDown()
+
+ def test_extended_floating_ip_associate_fixed(self):
+ fixed_address = '192.168.1.101'
+
+ def fake_associate_floating_ip(*args, **kwargs):
+ self.assertEqual(fixed_address, kwargs['fixed_address'])
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address=fixed_address))
+
+ req = self._get_fake_request()
+ rsp = self.manager._add_floating_ip(req, 'test_inst', body)
+ self.assertEqual(202, rsp.status_int)
+
+ def test_extended_floating_ip_associate_fixed_not_allocated(self):
+ def fake_associate_floating_ip(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ fake_associate_floating_ip)
+ body = dict(addFloatingIp=dict(address=self.floating_ip,
+ fixed_address='11.11.11.11'))
+
+ req = self._get_fake_request()
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ resp = self._get_fake_response(req, 'servers')
+ res_dict = jsonutils.loads(resp.body)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(res_dict['badRequest']['message'],
+ "Specified fixed address not assigned to instance")
+
+
+class FloatingIpSerializerTest(test.TestCase):
+ def test_default_serializer(self):
+ serializer = floating_ips.FloatingIPTemplate()
+ text = serializer.serialize(dict(
+ floating_ip=dict(
+ instance_id=1,
+ ip='10.10.10.10',
+ fixed_ip='10.0.0.1',
+ id=1)))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ip', tree.tag)
+ self.assertEqual('1', tree.get('instance_id'))
+ self.assertEqual('10.10.10.10', tree.get('ip'))
+ self.assertEqual('10.0.0.1', tree.get('fixed_ip'))
+ self.assertEqual('1', tree.get('id'))
+
+ def test_index_serializer(self):
+ serializer = floating_ips.FloatingIPsTemplate()
+ text = serializer.serialize(dict(
+ floating_ips=[
+ dict(instance_id=1,
+ ip='10.10.10.10',
+ fixed_ip='10.0.0.1',
+ id=1),
+ dict(instance_id=None,
+ ip='10.10.10.11',
+ fixed_ip=None,
+ id=2)]))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('floating_ips', tree.tag)
+ self.assertEqual(2, len(tree))
+ self.assertEqual('floating_ip', tree[0].tag)
+ self.assertEqual('floating_ip', tree[1].tag)
+ self.assertEqual('1', tree[0].get('instance_id'))
+ self.assertEqual('None', tree[1].get('instance_id'))
+ self.assertEqual('10.10.10.10', tree[0].get('ip'))
+ self.assertEqual('10.10.10.11', tree[1].get('ip'))
+ self.assertEqual('10.0.0.1', tree[0].get('fixed_ip'))
+ self.assertEqual('None', tree[1].get('fixed_ip'))
+ self.assertEqual('1', tree[0].get('id'))
+ self.assertEqual('2', tree[1].get('id'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py
new file mode 100644
index 0000000000..8c81d99ab0
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py
@@ -0,0 +1,139 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import floating_ips_bulk as fipbulk_v2
+from nova.api.openstack.compute.plugins.v3 import floating_ips_bulk as\
+ fipbulk_v21
+from nova import context
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+
+
+class FloatingIPBulkV21(test.TestCase):
+
+ floating_ips_bulk = fipbulk_v21
+ url = '/v2/fake/os-floating-ips-bulk'
+ delete_url = '/v2/fake/os-fixed-ips/delete'
+ bad_request = exception.ValidationError
+
+ def setUp(self):
+ super(FloatingIPBulkV21, self).setUp()
+
+ self.context = context.get_admin_context()
+ self.controller = self.floating_ips_bulk.FloatingIPBulkController()
+
+ def _setup_floating_ips(self, ip_range):
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_create_ips(self):
+ ip_range = '192.168.1.0/24'
+ self._setup_floating_ips(ip_range)
+
+ def test_create_ips_pool(self):
+ ip_range = '10.0.1.0/20'
+ pool = 'a new pool'
+ body = {'floating_ips_bulk_create':
+ {'ip_range': ip_range,
+ 'pool': pool}}
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.create(req, body=body)
+ response = {"floating_ips_bulk_create": {
+ 'ip_range': ip_range,
+ 'pool': pool,
+ 'interface': CONF.public_interface}}
+ self.assertEqual(res_dict, response)
+
+ def test_list_ips(self):
+ ip_range = '192.168.1.1/28'
+ self._setup_floating_ips(ip_range)
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ res_dict = self.controller.index(req)
+
+ ip_info = [{'address': str(ip_addr),
+ 'pool': CONF.default_floating_pool,
+ 'interface': CONF.public_interface,
+ 'project_id': None,
+ 'instance_uuid': None}
+ for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
+ response = {'floating_ip_info': ip_info}
+
+ self.assertEqual(res_dict, response)
+
+ def test_list_ip_by_host(self):
+ ip_range = '192.168.1.1/28'
+ self._setup_floating_ips(ip_range)
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 'host')
+
+ def test_delete_ips(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ body = {'ip_range': ip_range}
+ req = fakes.HTTPRequest.blank(self.delete_url)
+ res_dict = self.controller.update(req, "delete", body=body)
+
+ response = {"floating_ips_bulk_delete": ip_range}
+ self.assertEqual(res_dict, response)
+
+ # Check that the IPs are actually deleted
+ req = fakes.HTTPRequest.blank(self.url, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ response = {'floating_ip_info': []}
+ self.assertEqual(res_dict, response)
+
+ def test_create_duplicate_fail(self):
+ ip_range = '192.168.1.0/20'
+ self._setup_floating_ips(ip_range)
+
+ ip_range = '192.168.1.0/28'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+ def test_create_bad_cidr_fail(self):
+ # netaddr can't handle /32 or 31 cidrs
+ ip_range = '192.168.1.1/32'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+ def test_create_invalid_cidr_fail(self):
+ ip_range = 'not a cidr'
+ body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(self.bad_request, self.controller.create,
+ req, body=body)
+
+
+class FloatingIPBulkV2(FloatingIPBulkV21):
+ floating_ips_bulk = fipbulk_v2
+ bad_request = webob.exc.HTTPBadRequest
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_fping.py b/nova/tests/unit/api/openstack/compute/contrib/test_fping.py
new file mode 100644
index 0000000000..a6364d6ee7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_fping.py
@@ -0,0 +1,106 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack.compute.plugins.v3 import fping as fping_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.utils
+
+
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def execute(*cmd, **args):
+ return "".join(["%s is alive" % ip for ip in cmd[1:]])
+
+
+class FpingTestV21(test.TestCase):
+ controller_cls = fping_v21.FpingController
+
+ def setUp(self):
+ super(FpingTestV21, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(nova.db, "instance_get_all_by_filters",
+ return_servers)
+ self.stubs.Set(nova.db, "instance_get_by_uuid",
+ return_server)
+ self.stubs.Set(nova.utils, "execute",
+ execute)
+ self.stubs.Set(self.controller_cls, "check_fping",
+ lambda self: None)
+ self.controller = self.controller_cls()
+
+ def _get_url(self):
+ return "/v3"
+
+ def test_fping_index(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ self.assertIn("servers", res_dict)
+ for srv in res_dict["servers"]:
+ for key in "project_id", "id", "alive":
+ self.assertIn(key, srv)
+
+ def test_fping_index_policy(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "os-fping?all_tenants=1")
+ self.assertRaises(exception.Forbidden, self.controller.index, req)
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?all_tenants=1")
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ self.assertIn("servers", res_dict)
+
+ def test_fping_index_include(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?include=%s" % ids[0])
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_index_exclude(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + "/os-fping")
+ res_dict = self.controller.index(req)
+ ids = [srv["id"] for srv in res_dict["servers"]]
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "/os-fping?exclude=%s" %
+ ",".join(ids[1:]))
+ res_dict = self.controller.index(req)
+ self.assertEqual(len(res_dict["servers"]), 1)
+ self.assertEqual(res_dict["servers"][0]["id"], ids[0])
+
+ def test_fping_show(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ "os-fping/%s" % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertIn("server", res_dict)
+ srv = res_dict["server"]
+ for key in "project_id", "id", "alive":
+ self.assertIn(key, srv)
+
+
+class FpingTestV2(FpingTestV21):
+ controller_cls = fping.FpingController
+
+ def _get_url(self):
+ return "/v2/1234"
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py
new file mode 100644
index 0000000000..217fd480f9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hide_server_addresses.py
@@ -0,0 +1,172 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack import wsgi
+from nova import compute
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+SENTINEL = object()
+
+
+def fake_compute_get(*args, **kwargs):
+ def _return_server(*_args, **_kwargs):
+ inst = fakes.stub_instance(*args, **kwargs)
+ return fake_instance.fake_instance_obj(_args[1], **inst)
+ return _return_server
+
+
+class HideServerAddressesTestV21(test.TestCase):
+ content_type = 'application/json'
+ base_url = '/v2/fake/servers'
+
+ def _setup_wsgi(self):
+ self.wsgi_app = fakes.wsgi_app_v21(
+ init_only=('servers', 'os-hide-server-addresses'))
+
+ def setUp(self):
+ super(HideServerAddressesTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self._setup_wsgi()
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self.wsgi_app)
+ return res
+
+ @staticmethod
+ def _get_server(body):
+ return jsonutils.loads(body).get('server')
+
+ @staticmethod
+ def _get_servers(body):
+ return jsonutils.loads(body).get('servers')
+
+ @staticmethod
+ def _get_addresses(server):
+ return server.get('addresses', SENTINEL)
+
+ def _check_addresses(self, addresses, exists):
+ self.assertTrue(addresses is not SENTINEL)
+ if exists:
+ self.assertTrue(addresses)
+ else:
+ self.assertFalse(addresses)
+
+ def test_show_hides_in_building(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.BUILDING))
+ res = self._make_request(self.base_url + '/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=False)
+
+ def test_show(self):
+ instance_id = 1
+ uuid = fakes.get_fake_uuid(instance_id)
+ self.stubs.Set(compute.api.API, 'get',
+ fake_compute_get(instance_id, uuid=uuid,
+ vm_state=vm_states.ACTIVE))
+ res = self._make_request(self.base_url + '/%s' % uuid)
+ self.assertEqual(res.status_int, 200)
+
+ server = self._get_server(res.body)
+ addresses = self._get_addresses(server)
+ self._check_addresses(addresses, exists=True)
+
+ def test_detail_hides_building_server_addresses(self):
+ instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0),
+ vm_state=vm_states.ACTIVE)
+ instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1),
+ vm_state=vm_states.BUILDING)
+ instances = [instance_0, instance_1]
+
+ def get_all(*args, **kwargs):
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(
+ args[1], objects.InstanceList(), instances, fields)
+
+ self.stubs.Set(compute.api.API, 'get_all', get_all)
+ res = self._make_request(self.base_url + '/detail')
+
+ self.assertEqual(res.status_int, 200)
+ servers = self._get_servers(res.body)
+
+ self.assertEqual(len(servers), len(instances))
+
+ for instance, server in itertools.izip(instances, servers):
+ addresses = self._get_addresses(server)
+ exists = (instance['vm_state'] == vm_states.ACTIVE)
+ self._check_addresses(addresses, exists=exists)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ res = self._make_request(self.base_url + '/' + fakes.get_fake_uuid())
+
+ self.assertEqual(res.status_int, 404)
+
+
+class HideServerAddressesTestV2(HideServerAddressesTestV21):
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Hide_server_addresses'])
+ self.wsgi_app = fakes.wsgi_app(init_only=('servers',))
+
+
+class HideAddressesXmlTest(HideServerAddressesTestV2):
+ content_type = 'application/xml'
+
+ @staticmethod
+ def _get_server(body):
+ return etree.XML(body)
+
+ @staticmethod
+ def _get_servers(body):
+ return etree.XML(body).getchildren()
+
+ @staticmethod
+ def _get_addresses(server):
+ addresses = server.find('{%s}addresses' % wsgi.XMLNS_V11)
+ if addresses is None:
+ return SENTINEL
+ return addresses
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py b/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py
new file mode 100644
index 0000000000..5478a7dd33
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hosts.py
@@ -0,0 +1,471 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import testtools
+import webob.exc
+
+from nova.api.openstack.compute.contrib import hosts as os_hosts_v2
+from nova.api.openstack.compute.plugins.v3 import hosts as os_hosts_v3
+from nova.compute import power_state
+from nova.compute import vm_states
+from nova import context as context_maker
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_hosts
+from nova.tests.unit import utils
+
+
+def stub_service_get_all(context, disabled=None):
+ return fake_hosts.SERVICES_LIST
+
+
+def stub_service_get_by_host_and_topic(context, host_name, topic):
+ for service in stub_service_get_all(context):
+ if service['host'] == host_name and service['topic'] == topic:
+ return service
+
+
+def stub_set_host_enabled(context, host_name, enabled):
+ """Simulates three possible behaviours for VM drivers or compute
+ drivers when enabling or disabling a host.
+
+ 'enabled' means new instances can go to this host
+ 'disabled' means they can't
+ """
+ results = {True: "enabled", False: "disabled"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not enabled]
+ else:
+ # Do the right thing
+ return results[enabled]
+
+
+def stub_set_host_maintenance(context, host_name, mode):
+ # We'll simulate success and failure by assuming
+ # that 'host_c1' always succeeds, and 'host_c2'
+ # always fails
+ results = {True: "on_maintenance", False: "off_maintenance"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not mode]
+ else:
+ # Do the right thing
+ return results[mode]
+
+
+def stub_host_power_action(context, host_name, action):
+ if host_name == "notimplemented":
+ raise NotImplementedError()
+ elif host_name == "dummydest":
+ # The host does not exist
+ raise exception.ComputeHostNotFound(host=host_name)
+ return action
+
+
+def _create_instance(**kwargs):
+ """Create a test instance."""
+ ctxt = context_maker.get_admin_context()
+ return db.instance_create(ctxt, _create_instance_dict(**kwargs))
+
+
+def _create_instance_dict(**kwargs):
+ """Create a dictionary for a test instance."""
+ inst = {}
+ inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = kwargs.get('user_id', 'admin')
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['instance_type_id'] = '1'
+ if 'host' in kwargs:
+ inst['host'] = kwargs.get('host')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['root_gb'] = kwargs.get('root_gb', 30)
+ inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
+ inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
+ inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
+ inst['task_state'] = kwargs.get('task_state', None)
+ inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['ami_launch_index'] = 0
+ inst['launched_on'] = kwargs.get('launched_on', 'dummy')
+ return inst
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithNovaZone(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"zone": "nova"}
+
+
+class FakeRequestWithNovaService(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"service": "compute"}
+
+
+class FakeRequestWithInvalidNovaService(object):
+ environ = {"nova.context": context_maker.get_admin_context()}
+ GET = {"service": "invalid"}
+
+
+class HostTestCaseV21(test.TestCase):
+ """Test Case for hosts."""
+ validation_ex = exception.ValidationError
+ Controller = os_hosts_v3.HostController
+ policy_ex = exception.PolicyNotAuthorized
+
+ def _setup_stubs(self):
+ # Pretend we have fake_hosts.HOST_LIST in the DB
+ self.stubs.Set(db, 'service_get_all',
+ stub_service_get_all)
+ # Only hosts in our fake DB exist
+ self.stubs.Set(db, 'service_get_by_host_and_topic',
+ stub_service_get_by_host_and_topic)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_enabled',
+ stub_set_host_enabled)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_maintenance',
+ stub_set_host_maintenance)
+ self.stubs.Set(self.hosts_api, 'host_power_action',
+ stub_host_power_action)
+
+ def setUp(self):
+ super(HostTestCaseV21, self).setUp()
+ self.controller = self.Controller()
+ self.hosts_api = self.controller.api
+ self.req = FakeRequest()
+
+ self._setup_stubs()
+
+ def _test_host_update(self, host, key, val, expected_value):
+ body = {key: val}
+ result = self.controller.update(self.req, host, body=body)
+ self.assertEqual(result[key], expected_value)
+
+ def test_list_hosts(self):
+ """Verify that the compute hosts are returned."""
+ result = self.controller.index(self.req)
+ self.assertIn('hosts', result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST, hosts)
+
+ def test_disable_host(self):
+ self._test_host_update('host_c1', 'status', 'disable', 'disabled')
+ self._test_host_update('host_c2', 'status', 'disable', 'enabled')
+
+ def test_enable_host(self):
+ self._test_host_update('host_c1', 'status', 'enable', 'enabled')
+ self._test_host_update('host_c2', 'status', 'enable', 'disabled')
+
+ def test_enable_maintenance(self):
+ self._test_host_update('host_c1', 'maintenance_mode',
+ 'enable', 'on_maintenance')
+
+ def test_disable_maintenance(self):
+ self._test_host_update('host_c1', 'maintenance_mode',
+ 'disable', 'off_maintenance')
+
+ def _test_host_update_notimpl(self, key, val):
+ def stub_service_get_all_notimpl(self, req):
+ return [{'host': 'notimplemented', 'topic': None,
+ 'availability_zone': None}]
+ self.stubs.Set(db, 'service_get_all',
+ stub_service_get_all_notimpl)
+ body = {key: val}
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ self.controller.update,
+ self.req, 'notimplemented', body=body)
+
+ def test_disable_host_notimpl(self):
+ self._test_host_update_notimpl('status', 'disable')
+
+ def test_enable_maintenance_notimpl(self):
+ self._test_host_update_notimpl('maintenance_mode', 'enable')
+
+ def test_host_startup(self):
+ result = self.controller.startup(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "startup")
+
+ def test_host_shutdown(self):
+ result = self.controller.shutdown(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "shutdown")
+
+ def test_host_reboot(self):
+ result = self.controller.reboot(self.req, "host_c1")
+ self.assertEqual(result["power_action"], "reboot")
+
+ def _test_host_power_action_notimpl(self, method):
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ method, self.req, "notimplemented")
+
+ def test_host_startup_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.startup)
+
+ def test_host_shutdown_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.shutdown)
+
+ def test_host_reboot_notimpl(self):
+ self._test_host_power_action_notimpl(self.controller.reboot)
+
+ def test_host_status_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.update(self.req, dest, body={'status': 'enable'})
+
+ def test_host_maintenance_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.update(self.req, dest,
+ body={'maintenance_mode': 'enable'})
+
+ def test_host_power_action_bad_host(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.reboot(self.req, dest)
+
+ def test_bad_status_value(self):
+ bad_body = {"status": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+ bad_body2 = {"status": "disablabc"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body2)
+
+ def test_bad_update_key(self):
+ bad_body = {"crazy": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_update_key_and_correct_update_key(self):
+ bad_body = {"status": "disable", "crazy": "bad"}
+ self.assertRaises(self.validation_ex, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_good_update_keys(self):
+ body = {"status": "disable", "maintenance_mode": "enable"}
+ result = self.controller.update(self.req, 'host_c1', body=body)
+ self.assertEqual(result["host"], "host_c1")
+ self.assertEqual(result["status"], "disabled")
+ self.assertEqual(result["maintenance_mode"], "on_maintenance")
+
+ def test_show_forbidden(self):
+ self.req.environ["nova.context"].is_admin = False
+ dest = 'dummydest'
+ self.assertRaises(self.policy_ex,
+ self.controller.show,
+ self.req, dest)
+ self.req.environ["nova.context"].is_admin = True
+
+ def test_show_host_not_exist(self):
+ # A host given as an argument does not exist.
+ self.req.environ["nova.context"].is_admin = True
+ dest = 'dummydest'
+ with testtools.ExpectedException(webob.exc.HTTPNotFound,
+ ".*%s.*" % dest):
+ self.controller.show(self.req, dest)
+
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = self.req.environ["nova.context"]
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': '', 'stats': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def test_show_no_project(self):
+ """No instances are running on the given host."""
+ ctxt = context_maker.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = self.controller.show(self.req, s_ref['host'])
+
+ proj = ['(total)', '(used_now)', '(used_max)']
+ column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
+ self.assertEqual(len(result['host']), 3)
+ for resource in result['host']:
+ self.assertIn(resource['resource']['project'], proj)
+ self.assertEqual(len(resource['resource']), 5)
+ self.assertEqual(set(column), set(resource['resource'].keys()))
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_works_correctly(self):
+ """show() works correctly as expected."""
+ ctxt = context_maker.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = _create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = self.controller.show(self.req, s_ref['host'])
+
+ proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
+ column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
+ self.assertEqual(len(result['host']), 5)
+ for resource in result['host']:
+ self.assertIn(resource['resource']['project'], proj)
+ self.assertEqual(len(resource['resource']), 5)
+ self.assertEqual(set(column), set(resource['resource'].keys()))
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['uuid'])
+ db.instance_destroy(ctxt, i_ref2['uuid'])
+
+ def test_list_hosts_with_zone(self):
+ result = self.controller.index(FakeRequestWithNovaZone())
+ self.assertIn('hosts', result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
+
+ def test_list_hosts_with_service(self):
+ result = self.controller.index(FakeRequestWithNovaService())
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, result['hosts'])
+
+ def test_list_hosts_with_invalid_service(self):
+ result = self.controller.index(FakeRequestWithInvalidNovaService())
+ self.assertEqual([], result['hosts'])
+
+
+class HostTestCaseV20(HostTestCaseV21):
+ validation_ex = webob.exc.HTTPBadRequest
+ policy_ex = webob.exc.HTTPForbidden
+ Controller = os_hosts_v2.HostController
+
+ # Note: V2 api don't support list with services
+ def test_list_hosts_with_service(self):
+ pass
+
+ def test_list_hosts_with_invalid_service(self):
+ pass
+
+
+class HostSerializerTest(test.TestCase):
+ def setUp(self):
+ super(HostSerializerTest, self).setUp()
+ self.deserializer = os_hosts_v2.HostUpdateDeserializer()
+
+ def test_index_serializer(self):
+ serializer = os_hosts_v2.HostIndexTemplate()
+ text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hosts', tree.tag)
+ self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
+ for i in range(len(fake_hosts.HOST_LIST)):
+ self.assertEqual('host', tree[i].tag)
+ self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
+ tree[i].get('host_name'))
+ self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
+ tree[i].get('service'))
+ self.assertEqual(fake_hosts.HOST_LIST[i]['zone'],
+ tree[i].get('zone'))
+
+ def test_update_serializer_with_status(self):
+ exemplar = dict(host='host_c1', status='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_serializer_with_maintenance_mode(self):
+ exemplar = dict(host='host_c1', maintenance_mode='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_serializer_with_maintenance_mode_and_status(self):
+ exemplar = dict(host='host_c1',
+ maintenance_mode='enabled',
+ status='enabled')
+ serializer = os_hosts_v2.HostUpdateTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_action_serializer(self):
+ exemplar = dict(host='host_c1', power_action='reboot')
+ serializer = os_hosts_v2.HostActionTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('host', tree.tag)
+ for key, value in exemplar.items():
+ self.assertEqual(value, tree.get(key))
+
+ def test_update_deserializer(self):
+ exemplar = dict(status='enabled', maintenance_mode='disable')
+ intext = """<?xml version='1.0' encoding='UTF-8'?>
+ <updates>
+ <status>enabled</status>
+ <maintenance_mode>disable</maintenance_mode>
+ </updates>"""
+ result = self.deserializer.deserialize(intext)
+
+ self.assertEqual(dict(body=exemplar), result)
+
+ def test_corrupt_xml(self):
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py
new file mode 100644
index 0000000000..2d9187a7d1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisor_status.py
@@ -0,0 +1,92 @@
+# Copyright 2014 Intel Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
+
+TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0],
+ service=dict(id=1,
+ host="compute1",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ )
+
+
+class HypervisorStatusTestV21(test.NoDBTestCase):
+ def _prepare_extension(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def test_view_hypervisor_service_status(self):
+ self._prepare_extension()
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertEqual('enabled', result['status'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ result = self.controller._view_hypervisor(hyper, False)
+ self.assertEqual('disabled', result['status'])
+
+ def test_view_hypervisor_detail_status(self):
+ self._prepare_extension()
+
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertIsNone(result['service']['disabled_reason'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ hyper['service']['disabled_reason'] = "fake"
+ result = self.controller._view_hypervisor(hyper, True)
+ self.assertEqual('disabled', result['status'],)
+ self.assertEqual('fake', result['service']['disabled_reason'])
+
+
+class HypervisorStatusTestV2(HypervisorStatusTestV21):
+ def _prepare_extension(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {}
+ ext_mgr.extensions['os-hypervisor-status'] = True
+ self.controller = hypervisors_v2.HypervisorsController(ext_mgr)
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py
new file mode 100644
index 0000000000..9ae3c307c5
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_hypervisors.py
@@ -0,0 +1,596 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+import mock
+from webob import exc
+
+from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
+from nova.api.openstack.compute.plugins.v3 import hypervisors \
+ as hypervisors_v21
+from nova.api.openstack import extensions
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+TEST_HYPERS = [
+ dict(id=1,
+ service_id=1,
+ service=dict(id=1,
+ host="compute1",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=250,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=125,
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1",
+ free_ram_mb=5 * 1024,
+ free_disk_gb=125,
+ current_workload=2,
+ running_vms=2,
+ cpu_info='cpu_info',
+ disk_available_least=100,
+ host_ip='1.1.1.1'),
+ dict(id=2,
+ service_id=2,
+ service=dict(id=2,
+ host="compute2",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=250,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=125,
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper2",
+ free_ram_mb=5 * 1024,
+ free_disk_gb=125,
+ current_workload=2,
+ running_vms=2,
+ cpu_info='cpu_info',
+ disk_available_least=100,
+ host_ip='2.2.2.2')]
+TEST_SERVERS = [dict(name="inst1", uuid="uuid1", host="compute1"),
+ dict(name="inst2", uuid="uuid2", host="compute2"),
+ dict(name="inst3", uuid="uuid3", host="compute1"),
+ dict(name="inst4", uuid="uuid4", host="compute2")]
+
+
+def fake_compute_node_get_all(context):
+ return TEST_HYPERS
+
+
+def fake_compute_node_search_by_hypervisor(context, hypervisor_re):
+ return TEST_HYPERS
+
+
+def fake_compute_node_get(context, compute_id):
+ for hyper in TEST_HYPERS:
+ if hyper['id'] == compute_id:
+ return hyper
+ raise exception.ComputeHostNotFound(host=compute_id)
+
+
+def fake_compute_node_statistics(context):
+ result = dict(
+ count=0,
+ vcpus=0,
+ memory_mb=0,
+ local_gb=0,
+ vcpus_used=0,
+ memory_mb_used=0,
+ local_gb_used=0,
+ free_ram_mb=0,
+ free_disk_gb=0,
+ current_workload=0,
+ running_vms=0,
+ disk_available_least=0,
+ )
+
+ for hyper in TEST_HYPERS:
+ for key in result:
+ if key == 'count':
+ result[key] += 1
+ else:
+ result[key] += hyper[key]
+
+ return result
+
+
+def fake_instance_get_all_by_host(context, host):
+ results = []
+ for inst in TEST_SERVERS:
+ if inst['host'] == host:
+ results.append(inst)
+ return results
+
+
+class HypervisorsTestV21(test.NoDBTestCase):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(TEST_HYPERS)
+ del DETAIL_HYPERS_DICTS[0]['service_id']
+ del DETAIL_HYPERS_DICTS[1]['service_id']
+ DETAIL_HYPERS_DICTS[0].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=1, host='compute1',
+ disabled_reason=None)})
+ DETAIL_HYPERS_DICTS[1].update({'state': 'up',
+ 'status': 'enabled',
+ 'service': dict(id=2, host='compute2',
+ disabled_reason=None)})
+
+ INDEX_HYPER_DICTS = [
+ dict(id=1, hypervisor_hostname="hyper1",
+ state='up', status='enabled'),
+ dict(id=2, hypervisor_hostname="hyper2",
+ state='up', status='enabled')]
+
+ NO_SERVER_HYPER_DICTS = copy.deepcopy(INDEX_HYPER_DICTS)
+ NO_SERVER_HYPER_DICTS[0].update({'servers': []})
+ NO_SERVER_HYPER_DICTS[1].update({'servers': []})
+
+ def _get_request(self, use_admin_context):
+ return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics',
+ use_admin_context=use_admin_context)
+
+ def _set_up_controller(self):
+ self.controller = hypervisors_v21.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def setUp(self):
+ super(HypervisorsTestV21, self).setUp()
+ self._set_up_controller()
+
+ self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor)
+ self.stubs.Set(db, 'compute_node_get',
+ fake_compute_node_get)
+ self.stubs.Set(db, 'compute_node_statistics',
+ fake_compute_node_statistics)
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host)
+
+ def test_view_hypervisor_nodetail_noservers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
+
+ self.assertEqual(result, self.INDEX_HYPER_DICTS[0])
+
+ def test_view_hypervisor_detail_noservers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
+
+ self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
+
+ def test_view_hypervisor_servers(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
+ TEST_SERVERS)
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
+ expected_dict.update({'servers': [
+ dict(name="inst1", uuid="uuid1"),
+ dict(name="inst2", uuid="uuid2"),
+ dict(name="inst3", uuid="uuid3"),
+ dict(name="inst4", uuid="uuid4")]})
+
+ self.assertEqual(result, expected_dict)
+
+ def test_index(self):
+ req = self._get_request(True)
+ result = self.controller.index(req)
+
+ self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
+
+ def test_index_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_detail(self):
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+
+ self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
+
+ def test_detail_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.detail, req)
+
+ def test_show_noid(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
+
+ def test_show_non_integer_id(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, 'abc')
+
+ def test_show_withid(self):
+ req = self._get_request(True)
+ result = self.controller.show(req, '1')
+
+ self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
+
+ def test_show_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, '1')
+
+ def test_uptime_noid(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, '3')
+
+ def test_uptime_notimplemented(self):
+ def fake_get_host_uptime(context, hyp):
+ raise exc.HTTPNotImplemented()
+
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
+ fake_get_host_uptime)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotImplemented,
+ self.controller.uptime, req, '1')
+
+ def test_uptime_implemented(self):
+ def fake_get_host_uptime(context, hyp):
+ return "fake uptime"
+
+ self.stubs.Set(self.controller.host_api, 'get_host_uptime',
+ fake_get_host_uptime)
+
+ req = self._get_request(True)
+ result = self.controller.uptime(req, '1')
+
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS[0])
+ expected_dict.update({'uptime': "fake uptime"})
+ self.assertEqual(result, dict(hypervisor=expected_dict))
+
+ def test_uptime_non_integer_id(self):
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.uptime, req, 'abc')
+
+ def test_uptime_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.uptime, req, '1')
+
+ def test_search(self):
+ req = self._get_request(True)
+ result = self.controller.search(req, 'hyper')
+
+ self.assertEqual(result, dict(hypervisors=self.INDEX_HYPER_DICTS))
+
+ def test_search_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.search, req, '1')
+
+ def test_search_non_exist(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+
+ def test_servers(self):
+ req = self._get_request(True)
+ result = self.controller.servers(req, 'hyper')
+
+ expected_dict = copy.deepcopy(self.INDEX_HYPER_DICTS)
+ expected_dict[0].update({'servers': [
+ dict(name="inst1", uuid="uuid1"),
+ dict(name="inst3", uuid="uuid3")]})
+ expected_dict[1].update({'servers': [
+ dict(name="inst2", uuid="uuid2"),
+ dict(name="inst4", uuid="uuid4")]})
+
+ self.assertEqual(result, dict(hypervisors=expected_dict))
+
+ def test_servers_non_id(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+
+ def test_servers_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.servers, req, '1')
+
+ def test_servers_with_non_integer_hypervisor_id(self):
+ def fake_compute_node_search_by_hypervisor_return_empty(context,
+ hypervisor_re):
+ return []
+ self.stubs.Set(db, 'compute_node_search_by_hypervisor',
+ fake_compute_node_search_by_hypervisor_return_empty)
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers, req, 'abc')
+
+ def test_servers_with_no_server(self):
+ def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
+ return []
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host_return_empty)
+ req = self._get_request(True)
+ result = self.controller.servers(req, '1')
+ self.assertEqual(result, dict(hypervisors=self.NO_SERVER_HYPER_DICTS))
+
+ def test_statistics(self):
+ req = self._get_request(True)
+ result = self.controller.statistics(req)
+
+ self.assertEqual(result, dict(hypervisor_statistics=dict(
+ count=2,
+ vcpus=8,
+ memory_mb=20 * 1024,
+ local_gb=500,
+ vcpus_used=4,
+ memory_mb_used=10 * 1024,
+ local_gb_used=250,
+ free_ram_mb=10 * 1024,
+ free_disk_gb=250,
+ current_workload=4,
+ running_vms=4,
+ disk_available_least=200)))
+
+ def test_statistics_non_admin(self):
+ req = self._get_request(False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.statistics, req)
+
+
+class HypervisorsTestV2(HypervisorsTestV21):
+ DETAIL_HYPERS_DICTS = copy.deepcopy(
+ HypervisorsTestV21.DETAIL_HYPERS_DICTS)
+ del DETAIL_HYPERS_DICTS[0]['state']
+ del DETAIL_HYPERS_DICTS[1]['state']
+ del DETAIL_HYPERS_DICTS[0]['status']
+ del DETAIL_HYPERS_DICTS[1]['status']
+ del DETAIL_HYPERS_DICTS[0]['service']['disabled_reason']
+ del DETAIL_HYPERS_DICTS[1]['service']['disabled_reason']
+ del DETAIL_HYPERS_DICTS[0]['host_ip']
+ del DETAIL_HYPERS_DICTS[1]['host_ip']
+
+ INDEX_HYPER_DICTS = copy.deepcopy(HypervisorsTestV21.INDEX_HYPER_DICTS)
+ del INDEX_HYPER_DICTS[0]['state']
+ del INDEX_HYPER_DICTS[1]['state']
+ del INDEX_HYPER_DICTS[0]['status']
+ del INDEX_HYPER_DICTS[1]['status']
+
+ NO_SERVER_HYPER_DICTS = copy.deepcopy(
+ HypervisorsTestV21.NO_SERVER_HYPER_DICTS)
+ del NO_SERVER_HYPER_DICTS[0]['state']
+ del NO_SERVER_HYPER_DICTS[1]['state']
+ del NO_SERVER_HYPER_DICTS[0]['status']
+ del NO_SERVER_HYPER_DICTS[1]['status']
+ del NO_SERVER_HYPER_DICTS[0]['servers']
+ del NO_SERVER_HYPER_DICTS[1]['servers']
+
+ def _set_up_controller(self):
+ self.context = context.get_admin_context()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
+
+
+class HypervisorsSerializersTest(test.NoDBTestCase):
+ def compare_to_exemplar(self, exemplar, hyper):
+ # Check attributes
+ for key, value in exemplar.items():
+ if key in ('service', 'servers'):
+ # These turn into child elements and get tested
+ # separately below...
+ continue
+
+ self.assertEqual(str(value), hyper.get(key))
+
+ # Check child elements
+ required_children = set([child for child in ('service', 'servers')
+ if child in exemplar])
+ for child in hyper:
+ self.assertIn(child.tag, required_children)
+ required_children.remove(child.tag)
+
+ # Check the node...
+ if child.tag == 'service':
+ for key, value in exemplar['service'].items():
+ self.assertEqual(str(value), child.get(key))
+ elif child.tag == 'servers':
+ for idx, grandchild in enumerate(child):
+ self.assertEqual('server', grandchild.tag)
+ for key, value in exemplar['servers'][idx].items():
+ self.assertEqual(str(value), grandchild.get(key))
+
+ # Are they all accounted for?
+ self.assertEqual(len(required_children), 0)
+
+ def test_index_serializer(self):
+ serializer = hypervisors_v2.HypervisorIndexTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1),
+ dict(hypervisor_hostname="hyper2",
+ id=2)])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_detail_serializer(self):
+ serializer = hypervisors_v2.HypervisorDetailTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='1.1.1.1',
+ service=dict(id=1, host="compute1")),
+ dict(hypervisor_hostname="hyper2",
+ id=2,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='2.2.2.2',
+ service=dict(id=2, host="compute2"))])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_show_serializer(self):
+ serializer = hypervisors_v2.HypervisorTemplate()
+ exemplar = dict(hypervisor=dict(
+ hypervisor_hostname="hyper1",
+ id=1,
+ vcpus=4,
+ memory_mb=10 * 1024,
+ local_gb=500,
+ vcpus_used=2,
+ memory_mb_used=5 * 1024,
+ local_gb_used=250,
+ hypervisor_type='xen',
+ hypervisor_version=3,
+ free_ram_mb=5 * 1024,
+ free_disk_gb=250,
+ current_workload=2,
+ running_vms=2,
+ cpu_info="json data",
+ disk_available_least=100,
+ host_ip='1.1.1.1',
+ service=dict(id=1, host="compute1")))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor'], tree)
+
+ def test_uptime_serializer(self):
+ serializer = hypervisors_v2.HypervisorUptimeTemplate()
+ exemplar = dict(hypervisor=dict(
+ hypervisor_hostname="hyper1",
+ id=1,
+ uptime='fake uptime'))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor'], tree)
+
+ def test_servers_serializer(self):
+ serializer = hypervisors_v2.HypervisorServersTemplate()
+ exemplar = dict(hypervisors=[
+ dict(hypervisor_hostname="hyper1",
+ id=1,
+ servers=[
+ dict(name="inst1",
+ uuid="uuid1"),
+ dict(name="inst2",
+ uuid="uuid2")]),
+ dict(hypervisor_hostname="hyper2",
+ id=2,
+ servers=[
+ dict(name="inst3",
+ uuid="uuid3"),
+ dict(name="inst4",
+ uuid="uuid4")])])
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisors', tree.tag)
+ self.assertEqual(len(exemplar['hypervisors']), len(tree))
+ for idx, hyper in enumerate(tree):
+ self.assertEqual('hypervisor', hyper.tag)
+ self.compare_to_exemplar(exemplar['hypervisors'][idx], hyper)
+
+ def test_statistics_serializer(self):
+ serializer = hypervisors_v2.HypervisorStatisticsTemplate()
+ exemplar = dict(hypervisor_statistics=dict(
+ count=2,
+ vcpus=8,
+ memory_mb=20 * 1024,
+ local_gb=500,
+ vcpus_used=4,
+ memory_mb_used=10 * 1024,
+ local_gb_used=250,
+ free_ram_mb=10 * 1024,
+ free_disk_gb=250,
+ current_workload=4,
+ running_vms=4,
+ disk_available_least=200))
+ text = serializer.serialize(exemplar)
+ tree = etree.fromstring(text)
+
+ self.assertEqual('hypervisor_statistics', tree.tag)
+ self.compare_to_exemplar(exemplar['hypervisor_statistics'], tree)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py b/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py
new file mode 100644
index 0000000000..2a8d95cb86
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_image_size.py
@@ -0,0 +1,138 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import image_size
+from nova.image import glance
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGES = [{
+ 'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'progress': 100,
+ 'minDisk': 10,
+ 'minRam': 128,
+ 'size': 12345678,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/123",
+ }],
+ },
+ {
+ 'id': '124',
+ 'name': 'queued snapshot',
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'size': 87654321,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v2/fake/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/images/124",
+ }],
+ }]
+
+
+def fake_show(*args, **kwargs):
+ return IMAGES[0]
+
+
+def fake_detail(*args, **kwargs):
+ return IMAGES
+
+
+class ImageSizeTestV21(test.NoDBTestCase):
+ content_type = 'application/json'
+ prefix = 'OS-EXT-IMG-SIZE'
+
+ def setUp(self):
+ super(ImageSizeTestV21, self).setUp()
+ self.stubs.Set(glance.GlanceImageService, 'show', fake_show)
+ self.stubs.Set(glance.GlanceImageService, 'detail', fake_detail)
+ self.flags(osapi_compute_extension=['nova.api.openstack.compute'
+ '.contrib.image_size.Image_size'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21()
+
+ def _get_image(self, body):
+ return jsonutils.loads(body).get('image')
+
+ def _get_images(self, body):
+ return jsonutils.loads(body).get('images')
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(image.get('%s:size' % self.prefix), size)
+
+ def test_show(self):
+ url = '/v2/fake/images/1'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ image = self._get_image(res.body)
+ self.assertImageSize(image, 12345678)
+
+ def test_detail(self):
+ url = '/v2/fake/images/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ images = self._get_images(res.body)
+ self.assertImageSize(images[0], 12345678)
+ self.assertImageSize(images[1], 87654321)
+
+
+class ImageSizeTestV2(ImageSizeTestV21):
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class ImageSizeXmlTest(ImageSizeTestV2):
+ content_type = 'application/xml'
+ prefix = '{%s}' % image_size.Image_size.namespace
+
+ def _get_image(self, body):
+ return etree.XML(body)
+
+ def _get_images(self, body):
+ return etree.XML(body).getchildren()
+
+ def assertImageSize(self, image, size):
+ self.assertEqual(int(image.get('%ssize' % self.prefix)), size)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py
new file mode 100644
index 0000000000..a5ea3784e3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_instance_actions.py
@@ -0,0 +1,327 @@
+# Copyright 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid
+
+from lxml import etree
+from webob import exc
+
+from nova.api.openstack.compute.contrib import instance_actions \
+ as instance_actions_v2
+from nova.api.openstack.compute.plugins.v3 import instance_actions \
+ as instance_actions_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_server_actions
+
+FAKE_UUID = fake_server_actions.FAKE_UUID
+FAKE_REQUEST_ID = fake_server_actions.FAKE_REQUEST_ID1
+
+
+def format_action(action):
+ '''Remove keys that aren't serialized.'''
+ to_delete = ('id', 'finish_time', 'created_at', 'updated_at', 'deleted_at',
+ 'deleted')
+ for key in to_delete:
+ if key in action:
+ del(action[key])
+ if 'start_time' in action:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ action['start_time'] = str(action['start_time'].replace(tzinfo=None))
+ for event in action.get('events', []):
+ format_event(event)
+ return action
+
+
+def format_event(event):
+ '''Remove keys that aren't serialized.'''
+ to_delete = ('id', 'created_at', 'updated_at', 'deleted_at', 'deleted',
+ 'action_id')
+ for key in to_delete:
+ if key in event:
+ del(event[key])
+ if 'start_time' in event:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ event['start_time'] = str(event['start_time'].replace(tzinfo=None))
+ if 'finish_time' in event:
+ # NOTE(danms): Without WSGI above us, these will be just stringified
+ event['finish_time'] = str(event['finish_time'].replace(tzinfo=None))
+ return event
+
+
+class InstanceActionsPolicyTestV21(test.NoDBTestCase):
+ instance_actions = instance_actions_v21
+
+ def setUp(self):
+ super(InstanceActionsPolicyTestV21, self).setUp()
+ self.controller = self.instance_actions.InstanceActionsController()
+
+ def _get_http_req(self, action):
+ fake_url = '/123/servers/12/%s' % action
+ return fakes.HTTPRequest.blank(fake_url)
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+ def test_list_actions_restricted_by_project(self):
+ self._set_policy_rules()
+
+ def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id})
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._get_http_req('os-instance-actions')
+ self.assertRaises(exception.Forbidden, self.controller.index, req,
+ str(uuid.uuid4()))
+
+ def test_get_action_restricted_by_project(self):
+ self._set_policy_rules()
+
+ def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' %
+ context.project_id})
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._get_http_req('os-instance-actions/1')
+ self.assertRaises(exception.Forbidden, self.controller.show, req,
+ str(uuid.uuid4()), '1')
+
+
+class InstanceActionsPolicyTestV2(InstanceActionsPolicyTestV21):
+ instance_actions = instance_actions_v2
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ common_policy.parse_rule('project_id:%(project_id)s')}
+ policy.set_rules(rules)
+
+
+class InstanceActionsTestV21(test.NoDBTestCase):
+ instance_actions = instance_actions_v21
+
+ def setUp(self):
+ super(InstanceActionsTestV21, self).setUp()
+ self.controller = self.instance_actions.InstanceActionsController()
+ self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ return {'uuid': instance_uuid}
+
+ def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
+ return {'name': 'fake', 'project_id': context.project_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+
+ def _get_http_req(self, action, use_admin_context=False):
+ fake_url = '/123/servers/12/%s' % action
+ return fakes.HTTPRequest.blank(fake_url,
+ use_admin_context=use_admin_context)
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions':
+ common_policy.parse_rule(''),
+ 'compute_extension:v3:os-instance-actions:events':
+ common_policy.parse_rule('is_admin:True')}
+ policy.set_rules(rules)
+
+ def test_list_actions(self):
+ def fake_get_actions(context, uuid):
+ actions = []
+ for act in self.fake_actions[uuid].itervalues():
+ action = models.InstanceAction()
+ action.update(act)
+ actions.append(action)
+ return actions
+
+ self.stubs.Set(db, 'actions_get', fake_get_actions)
+ req = self._get_http_req('os-instance-actions')
+ res_dict = self.controller.index(req, FAKE_UUID)
+ for res in res_dict['instanceActions']:
+ fake_action = self.fake_actions[FAKE_UUID][res['request_id']]
+ self.assertEqual(format_action(fake_action), format_action(res))
+
+ def test_get_action_with_events_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ action = models.InstanceAction()
+ action.update(self.fake_actions[uuid][request_id])
+ return action
+
+ def fake_get_events(context, action_id):
+ events = []
+ for evt in self.fake_events[action_id]:
+ event = models.InstanceActionEvent()
+ event.update(evt)
+ events.append(event)
+ return events
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+ req = self._get_http_req('os-instance-actions/1',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ fake_events = self.fake_events[fake_action['id']]
+ fake_action['events'] = fake_events
+ self.assertEqual(format_action(fake_action),
+ format_action(res_dict['instanceAction']))
+
+ def test_get_action_with_events_not_allowed(self):
+ def fake_get_action(context, uuid, request_id):
+ return self.fake_actions[uuid][request_id]
+
+ def fake_get_events(context, action_id):
+ return self.fake_events[action_id]
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
+ self.stubs.Set(db, 'action_events_get', fake_get_events)
+
+ self._set_policy_rules()
+ req = self._get_http_req('os-instance-actions/1')
+ res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
+ fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ self.assertEqual(format_action(fake_action),
+ format_action(res_dict['instanceAction']))
+
+ def test_action_not_found(self):
+ def fake_no_action(context, uuid, action_id):
+ return None
+
+ self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
+ req = self._get_http_req('os-instance-actions/1')
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
+ FAKE_UUID, FAKE_REQUEST_ID)
+
+ def test_index_instance_not_found(self):
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ req = self._get_http_req('os-instance-actions')
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req,
+ FAKE_UUID)
+
+ def test_show_instance_not_found(self):
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ req = self._get_http_req('os-instance-actions/fake')
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
+ FAKE_UUID, 'fake')
+
+
+class InstanceActionsTestV2(InstanceActionsTestV21):
+ instance_actions = instance_actions_v2
+
+ def _set_policy_rules(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:instance_actions':
+ common_policy.parse_rule(''),
+ 'compute_extension:instance_actions:events':
+ common_policy.parse_rule('is_admin:True')}
+ policy.set_rules(rules)
+
+
+class InstanceActionsSerializerTestV2(test.NoDBTestCase):
+ def setUp(self):
+ super(InstanceActionsSerializerTestV2, self).setUp()
+ self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
+ self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
+
+ def _verify_instance_action_attachment(self, attach, tree):
+ for key in attach.keys():
+ if key != 'events':
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def _verify_instance_action_event_attachment(self, attach, tree):
+ for key in attach.keys():
+ self.assertEqual(attach[key], tree.get(key),
+ '%s did not match' % key)
+
+ def test_instance_action_serializer(self):
+ serializer = instance_actions_v2.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ self.assertFalse(found_events)
+
+ def test_instance_action_events_serializer(self):
+ serializer = instance_actions_v2.InstanceActionTemplate()
+ action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
+ event = self.fake_events[action['id']][0]
+ action['events'] = [dict(event), dict(event)]
+ text = serializer.serialize({'instanceAction': action})
+ tree = etree.fromstring(text)
+
+ action = format_action(action)
+ self.assertEqual('instanceAction', tree.tag)
+ self._verify_instance_action_attachment(action, tree)
+
+ event = format_event(event)
+ found_events = False
+ for child in tree:
+ if child.tag == 'events':
+ found_events = True
+ for key in event:
+ self.assertEqual(event[key], child.get(key))
+ self.assertTrue(found_events)
+
+ def test_instance_actions_serializer(self):
+ serializer = instance_actions_v2.InstanceActionsTemplate()
+ action_list = self.fake_actions[FAKE_UUID].values()
+ text = serializer.serialize({'instanceActions': action_list})
+ tree = etree.fromstring(text)
+
+ action_list = [format_action(action) for action in action_list]
+ self.assertEqual('instanceActions', tree.tag)
+ self.assertEqual(len(action_list), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('instanceAction', child.tag)
+ request_id = child.get('request_id')
+ self._verify_instance_action_attachment(
+ self.fake_actions[FAKE_UUID][request_id],
+ child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py
new file mode 100644
index 0000000000..1ae85c8625
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_instance_usage_audit_log.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.utils import timeutils
+
+from nova.api.openstack.compute.contrib import instance_usage_audit_log as ial
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+from nova import utils
+
+
+service_base = test_service.fake_service
+TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
+ dict(service_base, host='bar', topic='compute'),
+ dict(service_base, host='baz', topic='compute'),
+ dict(service_base, host='plonk', topic='compute'),
+ dict(service_base, host='wibble', topic='bogus'),
+ ]
+
+
+begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
+begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
+begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
+end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
+
+
+# test data
+
+
+TEST_LOGS1 = [
+ # all services done, no errors.
+ dict(host="plonk", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=23, message="test1"),
+ dict(host="baz", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=17, message="test2"),
+ dict(host="bar", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=10, message="test3"),
+ dict(host="foo", period_beginning=begin1, period_ending=end1,
+ state="DONE", errors=0, task_items=7, message="test4"),
+ ]
+
+
+TEST_LOGS2 = [
+ # some still running...
+ dict(host="plonk", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=23, message="test5"),
+ dict(host="baz", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=17, message="test6"),
+ dict(host="bar", period_beginning=begin2, period_ending=end2,
+ state="RUNNING", errors=0, task_items=10, message="test7"),
+ dict(host="foo", period_beginning=begin2, period_ending=end2,
+ state="DONE", errors=0, task_items=7, message="test8"),
+ ]
+
+
+TEST_LOGS3 = [
+ # some errors..
+ dict(host="plonk", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=0, task_items=23, message="test9"),
+ dict(host="baz", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=2, task_items=17, message="test10"),
+ dict(host="bar", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=0, task_items=10, message="test11"),
+ dict(host="foo", period_beginning=begin3, period_ending=end3,
+ state="DONE", errors=1, task_items=7, message="test12"),
+ ]
+
+
+def fake_task_log_get_all(context, task_name, begin, end,
+ host=None, state=None):
+ assert task_name == "instance_usage_audit"
+
+ if begin == begin1 and end == end1:
+ return TEST_LOGS1
+ if begin == begin2 and end == end2:
+ return TEST_LOGS2
+ if begin == begin3 and end == end3:
+ return TEST_LOGS3
+ raise AssertionError("Invalid date %s to %s" % (begin, end))
+
+
+def fake_last_completed_audit_period(unit=None, before=None):
+ audit_periods = [(begin3, end3),
+ (begin2, end2),
+ (begin1, end1)]
+ if before is not None:
+ for begin, end in audit_periods:
+ if before > end:
+ return begin, end
+ raise AssertionError("Invalid before date %s" % (before))
+ return begin1, end1
+
+
+class InstanceUsageAuditLogTest(test.NoDBTestCase):
+ def setUp(self):
+ super(InstanceUsageAuditLogTest, self).setUp()
+ self.context = context.get_admin_context()
+ timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
+ self.controller = ial.InstanceUsageAuditLogController()
+ self.host_api = self.controller.host_api
+
+ def fake_service_get_all(context, disabled):
+ self.assertIsNone(disabled)
+ return TEST_COMPUTE_SERVICES
+
+ self.stubs.Set(utils, 'last_completed_audit_period',
+ fake_last_completed_audit_period)
+ self.stubs.Set(db, 'service_get_all',
+ fake_service_get_all)
+ self.stubs.Set(db, 'task_log_get_all',
+ fake_task_log_get_all)
+
+ def tearDown(self):
+ super(InstanceUsageAuditLogTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_index(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=True)
+ result = self.controller.index(req)
+ self.assertIn('instance_usage_audit_logs', result)
+ logs = result['instance_usage_audit_logs']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
+
+ def test_index_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_show(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-05 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 0 errors.", logs['overall_status'])
+
+ def test_show_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-instance_usage_audit_log',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, '2012-07-05 10:00:00')
+
+ def test_show_with_running(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-06 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(0, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(3, logs['num_hosts_done'])
+ self.assertEqual(1, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("3 of 4 hosts done. 0 errors.",
+ logs['overall_status'])
+
+ def test_show_with_errors(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-instance_usage_audit_log/show',
+ use_admin_context=True)
+ result = self.controller.show(req, '2012-07-07 10:00:00')
+ self.assertIn('instance_usage_audit_log', result)
+ logs = result['instance_usage_audit_log']
+ self.assertEqual(57, logs['total_instances'])
+ self.assertEqual(3, logs['total_errors'])
+ self.assertEqual(4, len(logs['log']))
+ self.assertEqual(4, logs['num_hosts'])
+ self.assertEqual(4, logs['num_hosts_done'])
+ self.assertEqual(0, logs['num_hosts_running'])
+ self.assertEqual(0, logs['num_hosts_not_run'])
+ self.assertEqual("ALL hosts done. 3 errors.",
+ logs['overall_status'])
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py
new file mode 100644
index 0000000000..6a6c6f0736
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_keypairs.py
@@ -0,0 +1,497 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import keypairs as keypairs_v2
+from nova.api.openstack.compute.plugins.v3 import keypairs as keypairs_v21
+from nova.api.openstack import wsgi
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_keypair
+
+
+QUOTAS = quota.QUOTAS
+
+
+keypair_data = {
+ 'public_key': 'FAKE_KEY',
+ 'fingerprint': 'FAKE_FINGERPRINT',
+}
+
+
+def fake_keypair(name):
+ return dict(test_keypair.fake_keypair,
+ name=name, **keypair_data)
+
+
+def db_key_pair_get_all_by_user(self, user_id):
+ return [fake_keypair('FAKE')]
+
+
+def db_key_pair_create(self, keypair):
+ return fake_keypair(name=keypair['name'])
+
+
+def db_key_pair_destroy(context, user_id, name):
+ if not (user_id and name):
+ raise Exception()
+
+
+def db_key_pair_create_duplicate(context, keypair):
+ raise exception.KeyPairExists(key_name=keypair.get('name', ''))
+
+
+class KeypairsTestV21(test.TestCase):
+ base_url = '/v2/fake'
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app_v21(init_only=('os-keypairs', 'servers'))
+ self.app_server = self.app
+
+ def setUp(self):
+ super(KeypairsTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Keypairs'])
+ self._setup_app()
+
+ def test_keypair_list(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE')}]}
+ self.assertEqual(res_dict, response)
+
+ def test_keypair_create(self):
+ body = {'keypair': {'name': 'create_test'}}
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
+ self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
+
+ def _test_keypair_create_bad_request_case(self, body):
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 400)
+
+ def test_keypair_create_with_empty_name(self):
+ body = {'keypair': {'name': ''}}
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_name_too_long(self):
+ body = {
+ 'keypair': {
+ 'name': 'a' * 256
+ }
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_non_alphanumeric_name(self):
+ body = {
+ 'keypair': {
+ 'name': 'test/keypair'
+ }
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_import_bad_key(self):
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-what negative',
+ },
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_invalid_keypair_body(self):
+ body = {'alpha': {'name': 'create_test'}}
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_import(self):
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 200)
+ # FIXME(ja): sholud we check that public_key was sent to create?
+ res_dict = jsonutils.loads(res.body)
+ self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
+ self.assertNotIn('private_key', res_dict['keypair'])
+
+ def test_keypair_import_quota_limit(self):
+
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return 100
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 403)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Quota exceeded, too many key pairs.",
+ res_dict['forbidden']['message'])
+
+ def test_keypair_create_quota_limit(self):
+
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return 100
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ },
+ }
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 403)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Quota exceeded, too many key pairs.",
+ res_dict['forbidden']['message'])
+
+ def test_keypair_create_duplicate(self):
+ self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+ body = {'keypair': {'name': 'create_duplicate'}}
+ req = webob.Request.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 409)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(
+ "Key pair 'create_duplicate' already exists.",
+ res_dict['conflictingRequest']['message'])
+
+ def test_keypair_delete(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 202)
+
+ def test_keypair_get_keypair_not_found(self):
+ req = webob.Request.blank(self.base_url + '/os-keypairs/DOESNOTEXIST')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_keypair_delete_not_found(self):
+
+ def db_key_pair_get_not_found(context, user_id, name):
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get",
+ db_key_pair_get_not_found)
+ req = webob.Request.blank(self.base_url + '/os-keypairs/WHAT')
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_keypair_show(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ name='foo', public_key='XXX', fingerprint='YYY')
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ res_dict = jsonutils.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual('foo', res_dict['keypair']['name'])
+ self.assertEqual('XXX', res_dict['keypair']['public_key'])
+ self.assertEqual('YYY', res_dict['keypair']['fingerprint'])
+
+ def test_keypair_show_not_found(self):
+
+ def _db_key_pair_get(context, user_id, name):
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+
+ req = webob.Request.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'GET'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(self.app)
+ self.assertEqual(res.status_int, 404)
+
+ def test_show_server(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get())
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + '/servers/1')
+ req.headers['Content-Type'] = 'application/json'
+ response = req.get_response(self.app_server)
+ self.assertEqual(response.status_int, 200)
+ res_dict = jsonutils.loads(response.body)
+ self.assertIn('key_name', res_dict['server'])
+ self.assertEqual(res_dict['server']['key_name'], '')
+
+ def test_detail_servers(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fakes.fake_instance_get_all_by_filters())
+ req = fakes.HTTPRequest.blank(self.base_url + '/servers/detail')
+ res = req.get_response(self.app_server)
+ server_dicts = jsonutils.loads(res.body)['servers']
+ self.assertEqual(len(server_dicts), 5)
+
+ for server_dict in server_dicts:
+ self.assertIn('key_name', server_dict)
+ self.assertEqual(server_dict['key_name'], '')
+
+
+class KeypairPolicyTestV21(test.TestCase):
+ KeyPairController = keypairs_v21.KeypairController()
+ policy_path = 'compute_extension:v3:os-keypairs'
+ base_url = '/v2/fake'
+
+ def setUp(self):
+ super(KeypairPolicyTestV21, self).setUp()
+
+ def _db_key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ name='foo', public_key='XXX', fingerprint='YYY')
+
+ self.stubs.Set(db, "key_pair_get",
+ _db_key_pair_get)
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+
+ def test_keypair_list_fail_policy(self):
+ rules = {self.policy_path + ':index':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.index,
+ req)
+
+ def test_keypair_list_pass_policy(self):
+ rules = {self.policy_path + ':index':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ res = self.KeyPairController.index(req)
+ self.assertIn('keypairs', res)
+
+ def test_keypair_show_fail_policy(self):
+ rules = {self.policy_path + ':show':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.show,
+ req, 'FAKE')
+
+ def test_keypair_show_pass_policy(self):
+ rules = {self.policy_path + ':show':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ res = self.KeyPairController.show(req, 'FAKE')
+ self.assertIn('keypair', res)
+
+ def test_keypair_create_fail_policy(self):
+ body = {'keypair': {'name': 'create_test'}}
+ rules = {self.policy_path + ':create':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.create,
+ req, body=body)
+
+ def test_keypair_create_pass_policy(self):
+ body = {'keypair': {'name': 'create_test'}}
+ rules = {self.policy_path + ':create':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs')
+ req.method = 'POST'
+ res = self.KeyPairController.create(req, body=body)
+ self.assertIn('keypair', res)
+
+ def test_keypair_delete_fail_policy(self):
+ rules = {self.policy_path + ':delete':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ self.assertRaises(exception.Forbidden,
+ self.KeyPairController.delete,
+ req, 'FAKE')
+
+ def test_keypair_delete_pass_policy(self):
+ rules = {self.policy_path + ':delete':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank(self.base_url + '/os-keypairs/FAKE')
+ req.method = 'DELETE'
+ res = self.KeyPairController.delete(req, 'FAKE')
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.KeyPairController, keypairs_v21.KeypairController):
+ status_int = self.KeyPairController.delete.wsgi_code
+ else:
+ status_int = res.status_int
+ self.assertEqual(202, status_int)
+
+
+class KeypairsXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(KeypairsXMLSerializerTest, self).setUp()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_default_serializer(self):
+ exemplar = dict(keypair=dict(
+ public_key='fake_public_key',
+ private_key='fake_private_key',
+ fingerprint='fake_fingerprint',
+ user_id='fake_user_id',
+ name='fake_key_name'))
+ serializer = keypairs_v2.KeypairTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('keypair', tree.tag)
+ for child in tree:
+ self.assertIn(child.tag, exemplar['keypair'])
+ self.assertEqual(child.text, exemplar['keypair'][child.tag])
+
+ def test_index_serializer(self):
+ exemplar = dict(keypairs=[
+ dict(keypair=dict(
+ name='key1_name',
+ public_key='key1_key',
+ fingerprint='key1_fingerprint')),
+ dict(keypair=dict(
+ name='key2_name',
+ public_key='key2_key',
+ fingerprint='key2_fingerprint'))])
+ serializer = keypairs_v2.KeypairsTemplate()
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('keypairs', tree.tag)
+ self.assertEqual(len(exemplar['keypairs']), len(tree))
+ for idx, keypair in enumerate(tree):
+ self.assertEqual('keypair', keypair.tag)
+ kp_data = exemplar['keypairs'][idx]['keypair']
+ for child in keypair:
+ self.assertIn(child.tag, kp_data)
+ self.assertEqual(child.text, kp_data[child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(keypair=dict(
+ name='key_name',
+ public_key='public_key'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<keypair><name>key_name</name>'
+ '<public_key>public_key</public_key></keypair>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
+
+
+class KeypairsTestV2(KeypairsTestV21):
+
+ def _setup_app(self):
+ self.app = fakes.wsgi_app(init_only=('os-keypairs',))
+ self.app_server = fakes.wsgi_app(init_only=('servers',))
+
+
+class KeypairPolicyTestV2(KeypairPolicyTestV21):
+ KeyPairController = keypairs_v2.KeypairController()
+ policy_path = 'compute_extension:keypairs'
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py
new file mode 100644
index 0000000000..069b688837
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_migrate_server.py
@@ -0,0 +1,231 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import migrate_server
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class MigrateServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(MigrateServerTests, self).setUp()
+ self.controller = migrate_server.MigrateServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(migrate_server, 'MigrateServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-migrate-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_migrate(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
+ method_translations=method_translations,
+ args_map=args_map)
+
+ def test_migrate_none_hostname(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': None,
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, None), {})}
+ self._test_actions(['migrate', 'os-migrateLive'], body_map=body_map,
+ method_translations=method_translations,
+ args_map=args_map)
+
+ def test_migrate_with_non_existed_instance(self):
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ self._test_actions_with_non_existed_instance(
+ ['migrate', 'os-migrateLive'], body_map=body_map)
+
+ def test_migrate_raise_conflict_on_invalid_state(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions_raise_conflict_on_invalid_state(
+ ['migrate', 'os-migrateLive'], body_map=body_map,
+ args_map=args_map, method_translations=method_translations)
+
+ def test_actions_with_locked_instance(self):
+ method_translations = {'migrate': 'resize',
+ 'os-migrateLive': 'live_migrate'}
+ body_map = {'os-migrateLive': {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}}
+ args_map = {'os-migrateLive': ((False, False, 'hostname'), {})}
+ self._test_actions_with_locked_instance(
+ ['migrate', 'os-migrateLive'], body_map=body_map,
+ args_map=args_map, method_translations=method_translations)
+
+ def _test_migrate_exception(self, exc_info, expected_result):
+ self.mox.StubOutWithMock(self.compute_api, 'resize')
+ instance = self._stub_instance_get()
+ self.compute_api.resize(self.context, instance).AndRaise(exc_info)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {'migrate': None})
+ self.assertEqual(expected_result, res.status_int)
+
+ def test_migrate_too_many_instances(self):
+ exc_info = exception.TooManyInstances(overs='', req='', used=0,
+ allowed=0, resource='')
+ self._test_migrate_exception(exc_info, 403)
+
+ def _test_migrate_live_succeeded(self, param):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+ instance = self._stub_instance_get()
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname')
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive': param})
+ self.assertEqual(202, res.status_int)
+
+ def test_migrate_live_enabled(self):
+ param = {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_enabled_with_string_param(self):
+ param = {'host': 'hostname',
+ 'block_migration': "False",
+ 'disk_over_commit': "False"}
+ self._test_migrate_live_succeeded(param)
+
+ def test_migrate_live_without_host(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_without_block_migration(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_without_disk_over_commit(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_block_migration(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': "foo",
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+
+ def test_migrate_live_with_invalid_disk_over_commit(self):
+ res = self._make_request('/servers/FAKE/action',
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': "foo"}})
+ self.assertEqual(400, res.status_int)
+
+ def _test_migrate_live_failed_with_exception(self, fake_exc,
+ uuid=None):
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+
+ instance = self._stub_instance_get(uuid=uuid)
+ self.compute_api.live_migrate(self.context, instance, False,
+ False, 'hostname').AndRaise(fake_exc)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'os-migrateLive':
+ {'host': 'hostname',
+ 'block_migration': False,
+ 'disk_over_commit': False}})
+ self.assertEqual(400, res.status_int)
+ self.assertIn(unicode(fake_exc), res.body)
+
+ def test_migrate_live_compute_service_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.ComputeServiceUnavailable(host='host'))
+
+ def test_migrate_live_invalid_hypervisor_type(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidHypervisorType())
+
+ def test_migrate_live_invalid_cpu_info(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidCPUInfo(reason=""))
+
+ def test_migrate_live_unable_to_migrate_to_self(self):
+ uuid = uuidutils.generate_uuid()
+ self._test_migrate_live_failed_with_exception(
+ exception.UnableToMigrateToSelf(instance_id=uuid,
+ host='host'),
+ uuid=uuid)
+
+ def test_migrate_live_destination_hypervisor_too_old(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.DestinationHypervisorTooOld())
+
+ def test_migrate_live_no_valid_host(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.NoValidHost(reason=''))
+
+ def test_migrate_live_invalid_local_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidLocalStorage(path='', reason=''))
+
+ def test_migrate_live_invalid_shared_storage(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InvalidSharedStorage(path='', reason=''))
+
+ def test_migrate_live_hypervisor_unavailable(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.HypervisorUnavailable(host=""))
+
+ def test_migrate_live_instance_not_running(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.InstanceNotRunning(instance_id=""))
+
+ def test_migrate_live_pre_check_error(self):
+ self._test_migrate_live_failed_with_exception(
+ exception.MigrationPreCheckError(reason=''))
diff --git a/nova/tests/api/openstack/compute/contrib/test_migrations.py b/nova/tests/unit/api/openstack/compute/contrib/test_migrations.py
index ac18576389..ac18576389 100644
--- a/nova/tests/api/openstack/compute/contrib/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_migrations.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py b/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py
new file mode 100644
index 0000000000..dcf1dd299f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_multinic.py
@@ -0,0 +1,204 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova import compute
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+last_add_fixed_ip = (None, None)
+last_remove_fixed_ip = (None, None)
+
+
+def compute_api_add_fixed_ip(self, context, instance, network_id):
+ global last_add_fixed_ip
+
+ last_add_fixed_ip = (instance['uuid'], network_id)
+
+
+def compute_api_remove_fixed_ip(self, context, instance, address):
+ global last_remove_fixed_ip
+
+ last_remove_fixed_ip = (instance['uuid'], address)
+
+
+def compute_api_get(self, context, instance_id, want_objects=False,
+ expected_attrs=None):
+ instance = objects.Instance()
+ instance.uuid = instance_id
+ instance.id = 1
+ instance.vm_state = 'fake'
+ instance.task_state = 'fake'
+ instance.obj_reset_changes()
+ return instance
+
+
+class FixedIpTestV21(test.NoDBTestCase):
+ def setUp(self):
+ super(FixedIpTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(compute.api.API, "add_fixed_ip",
+ compute_api_add_fixed_ip)
+ self.stubs.Set(compute.api.API, "remove_fixed_ip",
+ compute_api_remove_fixed_ip)
+ self.stubs.Set(compute.api.API, 'get', compute_api_get)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-multinic'))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def test_add_fixed_ip(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
+
+ def _test_add_fixed_ip_bad_request(self, body):
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+ def test_add_fixed_ip_empty_network_id(self):
+ body = {'addFixedIp': {'network_id': ''}}
+ self._test_add_fixed_ip_bad_request(body)
+
+ def test_add_fixed_ip_network_id_bigger_than_36(self):
+ body = {'addFixedIp': {'network_id': 'a' * 37}}
+ self._test_add_fixed_ip_bad_request(body)
+
+ def test_add_fixed_ip_no_network(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict())
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(last_add_fixed_ip, (None, None))
+
+ @mock.patch.object(compute.api.API, 'add_fixed_ip')
+ def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip):
+ mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps(net='netid')
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_remove_fixed_ip(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict(address='10.10.10.1'))
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
+
+ def test_remove_fixed_ip_no_address(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict())
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+ self.assertEqual(last_remove_fixed_ip, (None, None))
+
+ def test_remove_fixed_ip_invalid_address(self):
+ body = {'remove_fixed_ip': {'address': ''}}
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+ @mock.patch.object(compute.api.API, 'remove_fixed_ip',
+ side_effect=exception.FixedIpNotFoundForSpecificInstance(
+ instance_uuid=UUID, ip='10.10.10.1'))
+ def test_remove_fixed_ip_not_found(self, _remove_fixed_ip):
+
+ body = {'remove_fixed_ip': {'address': '10.10.10.1'}}
+ req = webob.Request.blank(
+ self._get_url() + '/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+
+class FixedIpTestV2(FixedIpTestV21):
+ def setUp(self):
+ super(FixedIpTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Multinic'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+ def test_remove_fixed_ip_invalid_address(self):
+ # NOTE(cyeoh): This test is disabled for the V2 API because it is
+ # has poorer input validation.
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_networks.py b/nova/tests/unit/api/openstack/compute/contrib/test_networks.py
new file mode 100644
index 0000000000..5636a06d0d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_networks.py
@@ -0,0 +1,610 @@
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import datetime
+import math
+import uuid
+
+import iso8601
+import mock
+import netaddr
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
+from nova.api.openstack.compute.plugins.v3 import networks as networks_v21
+from nova.api.openstack.compute.plugins.v3 import networks_associate as \
+ networks_associate_v21
+from nova.api.openstack import extensions
+import nova.context
+from nova import exception
+from nova.network import manager
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.utils
+
+CONF = cfg.CONF
+
+UTC = iso8601.iso8601.Utc()
+FAKE_NETWORKS = [
+ {
+ 'bridge': 'br100', 'vpn_public_port': 1000,
+ 'dhcp_start': '10.0.0.3', 'bridge_interface': 'eth0',
+ 'updated_at': datetime.datetime(2011, 8, 16, 9, 26, 13, 48257,
+ tzinfo=UTC),
+ 'id': 1, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
+ 'cidr_v6': None, 'deleted_at': None,
+ 'gateway': '10.0.0.1', 'label': 'mynet_0',
+ 'project_id': '1234', 'rxtx_base': None,
+ 'vpn_private_address': '10.0.0.2', 'deleted': False,
+ 'vlan': 100, 'broadcast': '10.0.0.7',
+ 'netmask': '255.255.255.248', 'injected': False,
+ 'cidr': '10.0.0.0/29',
+ 'vpn_public_address': '127.0.0.1', 'multi_host': False,
+ 'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
+ 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
+ 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525,
+ tzinfo=UTC),
+ 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True,
+ 'share_address': False,
+ },
+ {
+ 'bridge': 'br101', 'vpn_public_port': 1001,
+ 'dhcp_start': '10.0.0.11', 'bridge_interface': 'eth0',
+ 'updated_at': None, 'id': 2, 'cidr_v6': None,
+ 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
+ 'deleted_at': None, 'gateway': '10.0.0.9',
+ 'label': 'mynet_1', 'project_id': None,
+ 'vpn_private_address': '10.0.0.10', 'deleted': False,
+ 'vlan': 101, 'broadcast': '10.0.0.15', 'rxtx_base': None,
+ 'netmask': '255.255.255.248', 'injected': False,
+ 'cidr': '10.0.0.10/29', 'vpn_public_address': None,
+ 'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
+ 'gateway_v6': None, 'netmask_v6': None, 'priority': None,
+ 'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495,
+ tzinfo=UTC),
+ 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True,
+ 'share_address': False,
+ },
+]
+
+
+FAKE_USER_NETWORKS = [
+ {
+ 'id': 1, 'cidr': '10.0.0.0/29', 'netmask': '255.255.255.248',
+ 'gateway': '10.0.0.1', 'broadcast': '10.0.0.7', 'dns1': None,
+ 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_0',
+ 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf047',
+ },
+ {
+ 'id': 2, 'cidr': '10.0.0.10/29', 'netmask': '255.255.255.248',
+ 'gateway': '10.0.0.9', 'broadcast': '10.0.0.15', 'dns1': None,
+ 'dns2': None, 'cidr_v6': None, 'gateway_v6': None, 'label': 'mynet_1',
+ 'netmask_v6': None, 'uuid': '20c8acc0-f747-4d71-a389-46d078ebf000',
+ },
+]
+
+NEW_NETWORK = {
+ "network": {
+ "bridge_interface": "eth0",
+ "cidr": "10.20.105.0/24",
+ "label": "new net 111",
+ "vlan_start": 111,
+ "injected": False,
+ "multi_host": False,
+ 'mtu': None,
+ 'dhcp_server': '10.0.0.1',
+ 'enable_dhcp': True,
+ 'share_address': False,
+ }
+}
+
+
+class FakeNetworkAPI(object):
+
+ _sentinel = object()
+ _vlan_is_disabled = False
+
+ def __init__(self):
+ self.networks = copy.deepcopy(FAKE_NETWORKS)
+
+ def disable_vlan(self):
+ self._vlan_is_disabled = True
+
+ def delete(self, context, network_id):
+ if network_id == 'always_delete':
+ return True
+ if network_id == -1:
+ raise exception.NetworkInUse(network_id=network_id)
+ for i, network in enumerate(self.networks):
+ if network['id'] == network_id:
+ del self.networks[0]
+ return True
+ raise exception.NetworkNotFoundForUUID(uuid=network_id)
+
+ def disassociate(self, context, network_uuid):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ network['project_id'] = None
+ return True
+ raise exception.NetworkNotFound(network_id=network_uuid)
+
+ def associate(self, context, network_uuid, host=_sentinel,
+ project=_sentinel):
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ if host is not FakeNetworkAPI._sentinel:
+ network['host'] = host
+ if project is not FakeNetworkAPI._sentinel:
+ network['project_id'] = project
+ return True
+ raise exception.NetworkNotFound(network_id=network_uuid)
+
+ def add_network_to_project(self, context,
+ project_id, network_uuid=None):
+ if self._vlan_is_disabled:
+ raise NotImplementedError()
+ if network_uuid:
+ for network in self.networks:
+ if network.get('project_id', None) is None:
+ network['project_id'] = project_id
+ return
+ return
+ for network in self.networks:
+ if network.get('uuid') == network_uuid:
+ network['project_id'] = project_id
+ return
+
+ def get_all(self, context):
+ return self._fake_db_network_get_all(context, project_only=True)
+
+ def _fake_db_network_get_all(self, context, project_only="allow_none"):
+ project_id = context.project_id
+ nets = self.networks
+ if nova.context.is_user_context(context) and project_only:
+ if project_only == 'allow_none':
+ nets = [n for n in self.networks
+ if (n['project_id'] == project_id or
+ n['project_id'] is None)]
+ else:
+ nets = [n for n in self.networks
+ if n['project_id'] == project_id]
+ objs = [objects.Network._from_db_object(context,
+ objects.Network(),
+ net)
+ for net in nets]
+ return objects.NetworkList(objects=objs)
+
+ def get(self, context, network_id):
+ for network in self.networks:
+ if network.get('uuid') == network_id:
+ return objects.Network._from_db_object(context,
+ objects.Network(),
+ network)
+ raise exception.NetworkNotFound(network_id=network_id)
+
+ def create(self, context, **kwargs):
+ subnet_bits = int(math.ceil(math.log(kwargs.get(
+ 'network_size', CONF.network_size), 2)))
+ fixed_net_v4 = netaddr.IPNetwork(kwargs['cidr'])
+ prefixlen_v4 = 32 - subnet_bits
+ subnets_v4 = list(fixed_net_v4.subnet(
+ prefixlen_v4,
+ count=kwargs.get('num_networks', CONF.num_networks)))
+ new_networks = []
+ new_id = max((net['id'] for net in self.networks))
+ for index, subnet_v4 in enumerate(subnets_v4):
+ new_id += 1
+ net = {'id': new_id, 'uuid': str(uuid.uuid4())}
+
+ net['cidr'] = str(subnet_v4)
+ net['netmask'] = str(subnet_v4.netmask)
+ net['gateway'] = kwargs.get('gateway') or str(subnet_v4[1])
+ net['broadcast'] = str(subnet_v4.broadcast)
+ net['dhcp_start'] = str(subnet_v4[2])
+
+ for key in FAKE_NETWORKS[0].iterkeys():
+ net.setdefault(key, kwargs.get(key))
+ new_networks.append(net)
+ self.networks += new_networks
+ return new_networks
+
+
+# NOTE(vish): tests that network create Exceptions actually return
+# the proper error responses
+class NetworkCreateExceptionsTestV21(test.TestCase):
+ url_prefix = '/v2/1234'
+
+ class PassthroughAPI(object):
+ def __init__(self):
+ self.network_manager = manager.FlatDHCPManager()
+
+ def create(self, *args, **kwargs):
+ if kwargs['label'] == 'fail_NetworkNotCreated':
+ raise exception.NetworkNotCreated(req='fake_fail')
+ return self.network_manager.create_networks(*args, **kwargs)
+
+ def setUp(self):
+ super(NetworkCreateExceptionsTestV21, self).setUp()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks_v21.NetworkController(self.PassthroughAPI())
+
+ def test_network_create_bad_vlan(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['vlan_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_no_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_fixed_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['fixed_cidr'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_start(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['allowed_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_handle_network_not_created(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['label'] = 'fail_NetworkNotCreated'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_cidr_conflict(self):
+
+ @staticmethod
+ def get_all(context):
+ ret = objects.NetworkList(context=context, objects=[])
+ net = objects.Network(cidr='10.0.0.0/23')
+ ret.objects.append(net)
+ return ret
+
+ self.stubs.Set(objects.NetworkList, 'get_all', get_all)
+
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '10.0.0.0/24'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create, req, net)
+
+
+class NetworkCreateExceptionsTestV2(NetworkCreateExceptionsTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+
+ self.controller = networks.NetworkController(
+ self.PassthroughAPI(), ext_mgr)
+
+
+class NetworksTestV21(test.NoDBTestCase):
+ url_prefix = '/v2/1234'
+
+ def setUp(self):
+ super(NetworksTestV21, self).setUp()
+ self.fake_network_api = FakeNetworkAPI()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks_v21.NetworkController(
+ self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(method.wsgi_code, 202)
+
+ @staticmethod
+ def network_uuid_to_id(network):
+ network['id'] = network['uuid']
+ del network['uuid']
+
+ def test_network_list_all_as_user(self):
+ self.maxDiff = None
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, {'networks': []})
+
+ project_id = req.environ["nova.context"].project_id
+ cxt = req.environ["nova.context"]
+ uuid = FAKE_NETWORKS[0]['uuid']
+ self.fake_network_api.associate(context=cxt,
+ network_uuid=uuid,
+ project=project_id)
+ res_dict = self.controller.index(req)
+ expected = [copy.deepcopy(FAKE_USER_NETWORKS[0])]
+ for network in expected:
+ self.network_uuid_to_id(network)
+ self.assertEqual({'networks': expected}, res_dict)
+
+ def test_network_list_all_as_admin(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.index(req)
+ expected = copy.deepcopy(FAKE_NETWORKS)
+ for network in expected:
+ self.network_uuid_to_id(network)
+ self.assertEqual({'networks': expected}, res_dict)
+
+ def test_network_disassociate(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s/action' % uuid)
+ res = self.controller._disassociate_host_and_project(
+ req, uuid, {'disassociate': None})
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+ self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_disassociate_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/100/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._disassociate_host_and_project,
+ req, 100, {'disassociate': None})
+
+ def test_network_get_as_user(self):
+ uuid = FAKE_USER_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected = {'network': copy.deepcopy(FAKE_USER_NETWORKS[0])}
+ self.network_uuid_to_id(expected['network'])
+ self.assertEqual(expected, res_dict)
+
+ def test_network_get_as_admin(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ expected = {'network': copy.deepcopy(FAKE_NETWORKS[0])}
+ self.network_uuid_to_id(expected['network'])
+ self.assertEqual(expected, res_dict)
+
+ def test_network_get_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 100)
+
+ def test_network_delete(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res = self.controller.delete(req, 1)
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+
+ def test_network_delete_not_found(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/100')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, 100)
+
+ def test_network_delete_in_use(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/-1')
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.delete, req, -1)
+
+ def test_network_add(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ res = self.controller.add(req, {'id': uuid})
+ self._check_status(res, self.controller._disassociate_host_and_project,
+ 202)
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['project_id'], 'fake')
+
+ @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.'
+ 'FakeNetworkAPI.add_network_to_project',
+ side_effect=exception.NoMoreNetworks)
+ def test_network_add_no_more_networks_fail(self, mock_add):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
+ {'id': uuid})
+
+ @mock.patch('nova.tests.unit.api.openstack.compute.contrib.test_networks.'
+ 'FakeNetworkAPI.add_network_to_project',
+ side_effect=exception.NetworkNotFoundForUUID(uuid='fake_uuid'))
+ def test_network_add_network_not_found_networks_fail(self, mock_add):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks/add')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.add, req,
+ {'id': uuid})
+
+ def test_network_create(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ res_dict = self.controller.create(req, NEW_NETWORK)
+ self.assertIn('network', res_dict)
+ uuid = res_dict['network']['id']
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ self.assertTrue(res_dict['network']['label'].
+ startswith(NEW_NETWORK['network']['label']))
+
+ def test_network_create_large(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ large_network = copy.deepcopy(NEW_NETWORK)
+ large_network['network']['cidr'] = '128.0.0.0/4'
+ res_dict = self.controller.create(req, large_network)
+ self.assertEqual(res_dict['network']['cidr'],
+ large_network['network']['cidr'])
+
+ def test_network_create_bad_cidr(self):
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '128.0.0.0/900'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_neutron_disassociate_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ controller = networks.NetworkController()
+ req = fakes.HTTPRequest.blank(self.url_prefix +
+ '/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ controller._disassociate_host_and_project,
+ req, uuid, {'disassociate': None})
+
+
+class NetworksTestV2(NetworksTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+ self.controller = networks.NetworkController(self.fake_network_api,
+ ext_mgr)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(res.status_int, 202)
+
+ def test_network_create_not_extended(self):
+ self.stubs.Set(self.controller, 'extended', False)
+ # NOTE(vish): Verify that new params are not passed through if
+ # extension is not enabled.
+
+ def no_mtu(*args, **kwargs):
+ if 'mtu' in kwargs:
+ raise test.TestingException("mtu should not pass through")
+ return [{}]
+
+ self.stubs.Set(self.controller.network_api, 'create', no_mtu)
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['mtu'] = 9000
+ self.controller.create(req, net)
+
+
+class NetworksAssociateTestV21(test.NoDBTestCase):
+
+ def setUp(self):
+ super(NetworksAssociateTestV21, self).setUp()
+ self.fake_network_api = FakeNetworkAPI()
+ self._setup()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def _setup(self):
+ self.controller = networks.NetworkController(self.fake_network_api)
+ self.associate_controller = networks_associate_v21\
+ .NetworkAssociateActionController(self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(method.wsgi_code, code)
+
+ def test_network_disassociate_host_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_host_only(
+ req, uuid, {'disassociate_host': None})
+ self._check_status(res,
+ self.associate_controller._disassociate_host_only,
+ 202)
+ self.assertIsNotNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_disassociate_project_only(self):
+ uuid = FAKE_NETWORKS[0]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ res = self.associate_controller._disassociate_project_only(
+ req, uuid, {'disassociate_project': None})
+ self._check_status(
+ res, self.associate_controller._disassociate_project_only, 202)
+ self.assertIsNone(self.fake_network_api.networks[0]['project_id'])
+ self.assertIsNotNone(self.fake_network_api.networks[0]['host'])
+
+ def test_network_associate_with_host(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ req = fakes.HTTPRequest.blank('/v2/1234//os-networks/%s/action' % uuid)
+ res = self.associate_controller._associate_host(
+ req, uuid, {'associate_host': "TestHost"})
+ self._check_status(res, self.associate_controller._associate_host, 202)
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s' % uuid)
+ req.environ["nova.context"].is_admin = True
+ res_dict = self.controller.show(req, uuid)
+ self.assertEqual(res_dict['network']['host'], 'TestHost')
+
+ def test_network_neutron_associate_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._associate_host,
+ req, uuid, {'associate_host': "TestHost"})
+
+ def test_network_neutron_disassociate_project_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._disassociate_project_only,
+ req, uuid, {'disassociate_project': None})
+
+ def test_network_neutron_disassociate_host_not_implemented(self):
+ uuid = FAKE_NETWORKS[1]['uuid']
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ assoc_ctrl = networks_associate.NetworkAssociateActionController()
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks/%s/action' % uuid)
+ self.assertRaises(webob.exc.HTTPNotImplemented,
+ assoc_ctrl._disassociate_host_only,
+ req, uuid, {'disassociate_host': None})
+
+
+class NetworksAssociateTestV2(NetworksAssociateTestV21):
+
+ def _setup(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+ self.controller = networks.NetworkController(
+ self.fake_network_api,
+ ext_mgr)
+ self.associate_controller = networks_associate\
+ .NetworkAssociateActionController(self.fake_network_api)
+
+ def _check_status(self, res, method, code):
+ self.assertEqual(res.status_int, 202)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py
new file mode 100644
index 0000000000..704de21005
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_neutron_security_groups.py
@@ -0,0 +1,918 @@
+# Copyright 2013 Nicira, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import uuid
+
+from lxml import etree
+import mock
+from neutronclient.common import exceptions as n_exc
+from neutronclient.neutron import v2_0 as neutronv20
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import security_groups
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova import context
+import nova.db
+from nova import exception
+from nova.network import model
+from nova.network import neutronv2
+from nova.network.neutronv2 import api as neutron_api
+from nova.network.security_group import neutron_driver
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_security_groups
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNeutronSecurityGroupsTestCase(test.TestCase):
+ def setUp(self):
+ super(TestNeutronSecurityGroupsTestCase, self).setUp()
+ cfg.CONF.set_override('security_group_api', 'neutron')
+ self.original_client = neutronv2.get_client
+ neutronv2.get_client = get_client
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestNeutronSecurityGroupsTestCase, self).tearDown()
+
+
+class TestNeutronSecurityGroupsV21(
+ test_security_groups.TestSecurityGroupsV21,
+ TestNeutronSecurityGroupsTestCase):
+
+ def _create_sg_template(self, **kwargs):
+ sg = test_security_groups.security_group_template(**kwargs)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ return self.controller.create(req, {'security_group': sg})
+
+ def _create_network(self):
+ body = {'network': {'name': 'net1'}}
+ neutron = get_client()
+ net = neutron.create_network(body)
+ body = {'subnet': {'network_id': net['network']['id'],
+ 'cidr': '10.0.0.0/24'}}
+ neutron.create_subnet(body)
+ return net
+
+ def _create_port(self, **kwargs):
+ body = {'port': {'binding:vnic_type': model.VNIC_TYPE_NORMAL}}
+ fields = ['security_groups', 'device_id', 'network_id',
+ 'port_security_enabled']
+ for field in fields:
+ if field in kwargs:
+ body['port'][field] = kwargs[field]
+ neutron = get_client()
+ return neutron.create_port(body)
+
+ def _create_security_group(self, **kwargs):
+ body = {'security_group': {}}
+ fields = ['name', 'description']
+ for field in fields:
+ if field in kwargs:
+ body['security_group'][field] = kwargs[field]
+ neutron = get_client()
+ return neutron.create_security_group(body)
+
+ def test_create_security_group_with_no_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_empty_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_blank_name(self):
+ # Neutron's security group name field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_name(self):
+ # Neutron allows security group name to be whitespace.
+ pass
+
+ def test_create_security_group_with_blank_description(self):
+ # Neutron's security group description field is optional.
+ pass
+
+ def test_create_security_group_with_whitespace_description(self):
+ # Neutron allows description to be whitespace.
+ pass
+
+ def test_create_security_group_with_duplicate_name(self):
+ # Neutron allows duplicate names for security groups.
+ pass
+
+ def test_create_security_group_non_string_name(self):
+ # Neutron allows security group name to be non string.
+ pass
+
+ def test_create_security_group_non_string_description(self):
+ # Neutron allows non string description.
+ pass
+
+ def test_create_security_group_quota_limit(self):
+ # Enforced by Neutron server.
+ pass
+
+ def test_update_security_group(self):
+ # Enforced by Neutron server.
+ pass
+
+ def test_get_security_group_list(self):
+ self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ list_dict = self.controller.index(req)
+ self.assertEqual(len(list_dict['security_groups']), 2)
+
+ def test_get_security_group_list_all_tenants(self):
+ pass
+
+ def test_get_security_group_by_instance(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+ expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
+ 'name': 'test', 'description': 'test-description'}]
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ test_security_groups.return_server_by_uuid)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
+ % test_security_groups.FAKE_UUID1)
+ res_dict = self.server_controller.index(
+ req, test_security_groups.FAKE_UUID1)['security_groups']
+ self.assertEqual(expected, res_dict)
+
+ def test_get_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ res_dict = self.controller.show(req, sg['id'])
+ expected = {'security_group': sg}
+ self.assertEqual(res_dict, expected)
+
+ def test_delete_security_group_by_id(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ sg['id'])
+ self.controller.delete(req, sg['id'])
+
+ def test_delete_security_group_by_admin(self):
+ sg = self._create_sg_template().get('security_group')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ sg['id'], use_admin_context=True)
+ self.controller.delete(req, sg['id'])
+
+ def test_delete_security_group_in_use(self):
+ sg = self._create_sg_template().get('security_group')
+ self._create_network()
+ db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
+ _context = context.get_admin_context()
+ instance = instance_obj.Instance._from_db_object(
+ _context, instance_obj.Instance(), db_inst,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ neutron = neutron_api.API()
+ with mock.patch.object(nova.db, 'instance_get_by_uuid',
+ return_value=db_inst):
+ neutron.allocate_for_instance(_context, instance,
+ security_groups=[sg['id']])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % sg['id'])
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, sg['id'])
+
+ def test_associate_non_running_instance(self):
+ # Neutron does not care if the instance is running or not. When the
+ # instances is detected by nuetron it will push down the security
+ # group policy to it.
+ pass
+
+ def test_associate_already_associated_security_group_to_instance(self):
+ # Neutron security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_associate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_duplicate_names(self):
+ sg1 = self._create_security_group(name='sg1',
+ description='sg1')['security_group']
+ self._create_security_group(name='sg1',
+ description='sg1')['security_group']
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="sg1"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_port_security_enabled_true(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ port_security_enabled=True,
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_port_security_enabled_false(self):
+ self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], port_security_enabled=False,
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup,
+ req, '1', body)
+
+ def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(removeSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_running_instance(self):
+ # Neutron does not care if the instance is running or not. When the
+ # instances is detected by neutron it will push down the security
+ # group policy to it.
+ pass
+
+ def test_disassociate_already_associated_security_group_to_instance(self):
+ # Neutron security groups does not raise an error if you update a
+ # port adding a security group to it that was already associated
+ # to the port. This is because PUT semantics are used.
+ pass
+
+ def test_disassociate(self):
+ sg = self._create_sg_template().get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ self.stubs.Set(nova.db, 'instance_get',
+ test_security_groups.return_server)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+ def test_get_raises_no_unique_match_error(self):
+
+ def fake_find_resourceid_by_name_or_id(client, param, name,
+ project_id=None):
+ raise n_exc.NeutronClientNoUniqueMatch()
+
+ self.stubs.Set(neutronv20, 'find_resourceid_by_name_or_id',
+ fake_find_resourceid_by_name_or_id)
+ security_group_api = self.controller.security_group_api
+ self.assertRaises(exception.NoUniqueMatch, security_group_api.get,
+ context.get_admin_context(), 'foobar')
+
+ def test_get_instances_security_groups_bindings(self):
+ servers = [{'id': test_security_groups.FAKE_UUID1},
+ {'id': test_security_groups.FAKE_UUID2}]
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ sg2 = self._create_sg_template(name='test2').get('security_group')
+ # test name='' is replaced with id
+ sg3 = self._create_sg_template(name='').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id'],
+ sg2['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg2['id'],
+ sg3['id']],
+ device_id=test_security_groups.FAKE_UUID2)
+ expected = {test_security_groups.FAKE_UUID1: [{'name': sg1['name']},
+ {'name': sg2['name']}],
+ test_security_groups.FAKE_UUID2: [{'name': sg2['name']},
+ {'name': sg3['id']}]}
+ security_group_api = self.controller.security_group_api
+ bindings = (
+ security_group_api.get_instances_security_groups_bindings(
+ context.get_admin_context(), servers))
+ self.assertEqual(bindings, expected)
+
+ def test_get_instance_security_groups(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ sg2 = self._create_sg_template(name='test2').get('security_group')
+ # test name='' is replaced with id
+ sg3 = self._create_sg_template(name='').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id'],
+ sg2['id'],
+ sg3['id']],
+ device_id=test_security_groups.FAKE_UUID1)
+
+ expected = [{'name': sg1['name']}, {'name': sg2['name']},
+ {'name': sg3['id']}]
+ security_group_api = self.controller.security_group_api
+ sgs = security_group_api.get_instance_security_groups(
+ context.get_admin_context(), test_security_groups.FAKE_UUID1)
+ self.assertEqual(sgs, expected)
+
+ @mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
+ 'get_instances_security_groups_bindings')
+ def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
+ servers = [{'id': test_security_groups.FAKE_UUID1}]
+ neutron_sg_bind_mock.return_value = {}
+
+ security_group_api = self.controller.security_group_api
+ ctx = context.get_admin_context()
+ sgs = security_group_api.get_instance_security_groups(ctx,
+ test_security_groups.FAKE_UUID1)
+
+ neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
+ self.assertEqual([], sgs)
+
+ def test_create_port_with_sg_and_port_security_enabled_true(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ net = self._create_network()
+ self._create_port(
+ network_id=net['network']['id'], security_groups=[sg1['id']],
+ port_security_enabled=True,
+ device_id=test_security_groups.FAKE_UUID1)
+ security_group_api = self.controller.security_group_api
+ sgs = security_group_api.get_instance_security_groups(
+ context.get_admin_context(), test_security_groups.FAKE_UUID1)
+ self.assertEqual(sgs, [{'name': 'test1'}])
+
+ def test_create_port_with_sg_and_port_security_enabled_false(self):
+ sg1 = self._create_sg_template(name='test1').get('security_group')
+ net = self._create_network()
+ self.assertRaises(exception.SecurityGroupCannotBeApplied,
+ self._create_port,
+ network_id=net['network']['id'],
+ security_groups=[sg1['id']],
+ port_security_enabled=False,
+ device_id=test_security_groups.FAKE_UUID1)
+
+
+class TestNeutronSecurityGroupsV2(TestNeutronSecurityGroupsV21):
+ controller_cls = security_groups.SecurityGroupController
+ server_secgrp_ctl_cls = security_groups.ServerSecurityGroupController
+ secgrp_act_ctl_cls = security_groups.SecurityGroupActionController
+
+
+class TestNeutronSecurityGroupRulesTestCase(TestNeutronSecurityGroupsTestCase):
+ def setUp(self):
+ super(TestNeutronSecurityGroupRulesTestCase, self).setUp()
+ id1 = '11111111-1111-1111-1111-111111111111'
+ sg_template1 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id1)
+ id2 = '22222222-2222-2222-2222-222222222222'
+ sg_template2 = test_security_groups.security_group_template(
+ security_group_rules=[], id=id2)
+ self.controller_sg = security_groups.SecurityGroupController()
+ neutron = get_client()
+ neutron._fake_security_groups[id1] = sg_template1
+ neutron._fake_security_groups[id2] = sg_template2
+
+ def tearDown(self):
+ neutronv2.get_client = self.original_client
+ get_client()._reset()
+ super(TestNeutronSecurityGroupsTestCase, self).tearDown()
+
+
+class _TestNeutronSecurityGroupRulesBase(object):
+
+ def test_create_add_existing_rules_by_cidr(self):
+ sg = test_security_groups.security_group_template()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller_sg.create(req, {'security_group': sg})
+ rule = test_security_groups.security_group_rule_template(
+ cidr='15.0.0.0/8', parent_group_id=self.sg2['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.controller.create(req, {'security_group_rule': rule})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_group_id(self):
+ sg = test_security_groups.security_group_template()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller_sg.create(req, {'security_group': sg})
+ rule = test_security_groups.security_group_rule_template(
+ group=self.sg1['id'], parent_group_id=self.sg2['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.controller.create(req, {'security_group_rule': rule})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_delete(self):
+ rule = test_security_groups.security_group_rule_template(
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % security_group_rule['id'])
+ self.controller.delete(req, security_group_rule['id'])
+
+ def test_create_rule_quota_limit(self):
+ # Enforced by neutron
+ pass
+
+
+class TestNeutronSecurityGroupRulesV2(
+ _TestNeutronSecurityGroupRulesBase,
+ test_security_groups.TestSecurityGroupRulesV2,
+ TestNeutronSecurityGroupRulesTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupRulesV21(
+ _TestNeutronSecurityGroupRulesBase,
+ test_security_groups.TestSecurityGroupRulesV21,
+ TestNeutronSecurityGroupRulesTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsXMLDeserializer(
+ test_security_groups.TestSecurityGroupXMLDeserializer,
+ TestNeutronSecurityGroupsTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsXMLSerializer(
+ test_security_groups.TestSecurityGroupXMLSerializer,
+ TestNeutronSecurityGroupsTestCase):
+ pass
+
+
+class TestNeutronSecurityGroupsOutputTest(TestNeutronSecurityGroupsTestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(TestNeutronSecurityGroupsOutputTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.controller = security_groups.SecurityGroupController()
+ self.stubs.Set(compute.api.API, 'get',
+ test_security_groups.fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all',
+ test_security_groups.fake_compute_get_all)
+ self.stubs.Set(compute.api.API, 'create',
+ test_security_groups.fake_compute_create)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instances_security_groups_bindings',
+ (test_security_groups.
+ fake_get_instances_security_groups_bindings))
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Security_groups'])
+
+ def _make_request(self, url, body=None):
+ req = webob.Request.blank(url)
+ if body:
+ req.method = 'POST'
+ req.body = self._encode_body(body)
+ req.content_type = self.content_type
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+ def _encode_body(self, body):
+ return jsonutils.dumps(body)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_groups(self, server):
+ return server.get('security_groups')
+
+ def test_create(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_create_server_get_default_security_group(self):
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ group = self._get_groups(server)[0]
+ self.assertEqual(group.get('name'), 'default')
+
+ def test_show(self):
+ def fake_get_instance_security_groups(inst, context, id):
+ return [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instance_security_groups',
+ fake_get_instance_security_groups)
+
+ url = '/v2/fake/servers'
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ security_groups = [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]
+ for security_group in security_groups:
+ sg = test_security_groups.security_group_template(
+ name=security_group['name'])
+ self.controller.create(req, {'security_group': sg})
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2,
+ security_groups=security_groups)
+
+ res = self._make_request(url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ # Test that show (GET) returns the same information as create (POST)
+ url = '/v2/fake/servers/' + test_security_groups.UUID3
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_detail(self):
+ url = '/v2/fake/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ for j, group in enumerate(self._get_groups(server)):
+ name = 'fake-%s-%s' % (i, j)
+ self.assertEqual(group.get('name'), name)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class TestNeutronSecurityGroupsOutputXMLTest(
+ TestNeutronSecurityGroupsOutputTest):
+
+ content_type = 'application/xml'
+
+ class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ root.set('name')
+ root.set('id')
+ root.set('imageRef')
+ root.set('flavorRef')
+ elem = xmlutil.SubTemplateElement(root, 'security_groups')
+ sg = xmlutil.SubTemplateElement(elem, 'security_group',
+ selector='security_groups')
+ sg.set('name')
+ return xmlutil.MasterTemplate(root, 1,
+ nsmap={None: xmlutil.XMLNS_V11})
+
+ def _encode_body(self, body):
+ serializer = self.MinimalCreateServerTemplate()
+ return serializer.serialize(body)
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_groups(self, server):
+ # NOTE(vish): we are adding security groups without an extension
+ # namespace so we don't break people using the existing
+ # functionality, but that means we need to use find with
+ # the existing server namespace.
+ namespace = server.nsmap[None]
+ return server.find('{%s}security_groups' % namespace).getchildren()
+
+
+def get_client(context=None, admin=False):
+ return MockClient()
+
+
+class MockClient(object):
+
+ # Needs to be global to survive multiple calls to get_client.
+ _fake_security_groups = {}
+ _fake_ports = {}
+ _fake_networks = {}
+ _fake_subnets = {}
+ _fake_security_group_rules = {}
+
+ def __init__(self):
+ # add default security group
+ if not len(self._fake_security_groups):
+ ret = {'name': 'default', 'description': 'default',
+ 'tenant_id': 'fake_tenant', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+ self._fake_security_groups[ret['id']] = ret
+
+ def _reset(self):
+ self._fake_security_groups.clear()
+ self._fake_ports.clear()
+ self._fake_networks.clear()
+ self._fake_subnets.clear()
+ self._fake_security_group_rules.clear()
+
+ def create_security_group(self, body=None):
+ s = body.get('security_group')
+ if len(s.get('name')) > 255 or len(s.get('description')) > 255:
+ msg = 'Security Group name great than 255'
+ raise n_exc.NeutronClientException(message=msg, status_code=401)
+ ret = {'name': s.get('name'), 'description': s.get('description'),
+ 'tenant_id': 'fake', 'security_group_rules': [],
+ 'id': str(uuid.uuid4())}
+
+ self._fake_security_groups[ret['id']] = ret
+ return {'security_group': ret}
+
+ def create_network(self, body):
+ n = body.get('network')
+ ret = {'status': 'ACTIVE', 'subnets': [], 'name': n.get('name'),
+ 'admin_state_up': n.get('admin_state_up', True),
+ 'tenant_id': 'fake_tenant',
+ 'id': str(uuid.uuid4())}
+ if 'port_security_enabled' in n:
+ ret['port_security_enabled'] = n['port_security_enabled']
+ self._fake_networks[ret['id']] = ret
+ return {'network': ret}
+
+ def create_subnet(self, body):
+ s = body.get('subnet')
+ try:
+ net = self._fake_networks[s.get('network_id')]
+ except KeyError:
+ msg = 'Network %s not found' % s.get('network_id')
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+ ret = {'name': s.get('name'), 'network_id': s.get('network_id'),
+ 'tenant_id': 'fake_tenant', 'cidr': s.get('cidr'),
+ 'id': str(uuid.uuid4()), 'gateway_ip': '10.0.0.1'}
+ net['subnets'].append(ret['id'])
+ self._fake_networks[net['id']] = net
+ self._fake_subnets[ret['id']] = ret
+ return {'subnet': ret}
+
+ def create_port(self, body):
+ p = body.get('port')
+ ret = {'status': 'ACTIVE', 'id': str(uuid.uuid4()),
+ 'mac_address': p.get('mac_address', 'fa:16:3e:b8:f5:fb'),
+ 'device_id': p.get('device_id', str(uuid.uuid4())),
+ 'admin_state_up': p.get('admin_state_up', True),
+ 'security_groups': p.get('security_groups', []),
+ 'network_id': p.get('network_id'),
+ 'binding:vnic_type':
+ p.get('binding:vnic_type') or model.VNIC_TYPE_NORMAL}
+
+ network = self._fake_networks[p['network_id']]
+ if 'port_security_enabled' in p:
+ ret['port_security_enabled'] = p['port_security_enabled']
+ elif 'port_security_enabled' in network:
+ ret['port_security_enabled'] = network['port_security_enabled']
+
+ port_security = ret.get('port_security_enabled', True)
+ # port_security must be True if security groups are present
+ if not port_security and ret['security_groups']:
+ raise exception.SecurityGroupCannotBeApplied()
+
+ if network['subnets']:
+ ret['fixed_ips'] = [{'subnet_id': network['subnets'][0],
+ 'ip_address': '10.0.0.1'}]
+ if not ret['security_groups'] and (port_security is None or
+ port_security is True):
+ for security_group in self._fake_security_groups.values():
+ if security_group['name'] == 'default':
+ ret['security_groups'] = [security_group['id']]
+ break
+ self._fake_ports[ret['id']] = ret
+ return {'port': ret}
+
+ def create_security_group_rule(self, body):
+ # does not handle bulk case so just picks rule[0]
+ r = body.get('security_group_rules')[0]
+ fields = ['direction', 'protocol', 'port_range_min', 'port_range_max',
+ 'ethertype', 'remote_ip_prefix', 'tenant_id',
+ 'security_group_id', 'remote_group_id']
+ ret = {}
+ for field in fields:
+ ret[field] = r.get(field)
+ ret['id'] = str(uuid.uuid4())
+ self._fake_security_group_rules[ret['id']] = ret
+ return {'security_group_rules': [ret]}
+
+ def show_security_group(self, security_group, **_params):
+ try:
+ sg = self._fake_security_groups[security_group]
+ except KeyError:
+ msg = 'Security Group %s not found' % security_group
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+ for security_group_rule in self._fake_security_group_rules.values():
+ if security_group_rule['security_group_id'] == sg['id']:
+ sg['security_group_rules'].append(security_group_rule)
+
+ return {'security_group': sg}
+
+ def show_security_group_rule(self, security_group_rule, **_params):
+ try:
+ return {'security_group_rule':
+ self._fake_security_group_rules[security_group_rule]}
+ except KeyError:
+ msg = 'Security Group rule %s not found' % security_group_rule
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_network(self, network, **_params):
+ try:
+ return {'network':
+ self._fake_networks[network]}
+ except KeyError:
+ msg = 'Network %s not found' % network
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_port(self, port, **_params):
+ try:
+ return {'port':
+ self._fake_ports[port]}
+ except KeyError:
+ msg = 'Port %s not found' % port
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def show_subnet(self, subnet, **_params):
+ try:
+ return {'subnet':
+ self._fake_subnets[subnet]}
+ except KeyError:
+ msg = 'Port %s not found' % subnet
+ raise n_exc.NeutronClientException(message=msg, status_code=404)
+
+ def list_security_groups(self, **_params):
+ ret = []
+ for security_group in self._fake_security_groups.values():
+ names = _params.get('name')
+ if names:
+ if not isinstance(names, list):
+ names = [names]
+ for name in names:
+ if security_group.get('name') == name:
+ ret.append(security_group)
+ ids = _params.get('id')
+ if ids:
+ if not isinstance(ids, list):
+ ids = [ids]
+ for id in ids:
+ if security_group.get('id') == id:
+ ret.append(security_group)
+ elif not (names or ids):
+ ret.append(security_group)
+ return {'security_groups': ret}
+
+ def list_networks(self, **_params):
+ # neutronv2/api.py _get_available_networks calls this assuming
+ # search_opts filter "shared" is implemented and not ignored
+ shared = _params.get("shared", None)
+ if shared:
+ return {'networks': []}
+ else:
+ return {'networks':
+ [network for network in self._fake_networks.values()]}
+
+ def list_ports(self, **_params):
+ ret = []
+ device_id = _params.get('device_id')
+ for port in self._fake_ports.values():
+ if device_id:
+ if port['device_id'] in device_id:
+ ret.append(port)
+ else:
+ ret.append(port)
+ return {'ports': ret}
+
+ def list_subnets(self, **_params):
+ return {'subnets':
+ [subnet for subnet in self._fake_subnets.values()]}
+
+ def list_floatingips(self, **_params):
+ return {'floatingips': []}
+
+ def delete_security_group(self, security_group):
+ self.show_security_group(security_group)
+ ports = self.list_ports()
+ for port in ports.get('ports'):
+ for sg_port in port['security_groups']:
+ if sg_port == security_group:
+ msg = ('Unable to delete Security group %s in use'
+ % security_group)
+ raise n_exc.NeutronClientException(message=msg,
+ status_code=409)
+ del self._fake_security_groups[security_group]
+
+ def delete_security_group_rule(self, security_group_rule):
+ self.show_security_group_rule(security_group_rule)
+ del self._fake_security_group_rules[security_group_rule]
+
+ def delete_network(self, network):
+ self.show_network(network)
+ self._check_ports_on_network(network)
+ for subnet in self._fake_subnets.values():
+ if subnet['network_id'] == network:
+ del self._fake_subnets[subnet['id']]
+ del self._fake_networks[network]
+
+ def delete_subnet(self, subnet):
+ subnet = self.show_subnet(subnet).get('subnet')
+ self._check_ports_on_network(subnet['network_id'])
+ del self._fake_subnet[subnet]
+
+ def delete_port(self, port):
+ self.show_port(port)
+ del self._fake_ports[port]
+
+ def update_port(self, port, body=None):
+ self.show_port(port)
+ self._fake_ports[port].update(body['port'])
+ return {'port': self._fake_ports[port]}
+
+ def list_extensions(self, **_parms):
+ return {'extensions': []}
+
+ def _check_ports_on_network(self, network):
+ ports = self.list_ports()
+ for port in ports:
+ if port['network_id'] == network:
+ msg = ('Unable to complete operation on network %s. There is '
+ 'one or more ports still in use on the network'
+ % network)
+ raise n_exc.NeutronClientException(message=msg, status_code=409)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
new file mode 100644
index 0000000000..228b44f369
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_quota_classes.py
@@ -0,0 +1,222 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import quota_classes
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def quota_set(class_name):
+ return {'quota_class_set': {'id': class_name, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'instances': 10,
+ 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20, 'key_pairs': 100,
+ 'injected_file_path_bytes': 255}}
+
+
+class QuotaClassSetsTest(test.TestCase):
+
+ def setUp(self):
+ super(QuotaClassSetsTest, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = quota_classes.QuotaClassSetsController(self.ext_mgr)
+
+ def test_format_quota_set(self):
+ raw_quota_set = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'floating_ips': 10,
+ 'fixed_ips': -1,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_path_bytes': 255,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }
+
+ quota_set = self.controller._format_quota_set('test_class',
+ raw_quota_set)
+ qs = quota_set['quota_class_set']
+
+ self.assertEqual(qs['id'], 'test_class')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], -1)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_path_bytes'], 255)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+ self.assertEqual(qs['security_groups'], 10)
+ self.assertEqual(qs['security_group_rules'], 20)
+ self.assertEqual(qs['key_pairs'], 100)
+
+ def test_quotas_show_as_admin(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 'test_class')
+
+ self.assertEqual(res_dict, quota_set('test_class'))
+
+ def test_quotas_show_as_unauthorized_user(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 'test_class')
+
+ def test_quotas_update_as_admin(self):
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'test_class', body)
+
+ self.assertEqual(res_dict, body)
+
+ def test_quotas_update_as_user(self):
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'test_class', body)
+
+ def test_quotas_update_with_empty_body(self):
+ body = {}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ def test_quotas_update_with_non_integer(self):
+ body = {'quota_class_set': {'instances': "abc"}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ body = {'quota_class_set': {'instances': 50.5}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+ body = {'quota_class_set': {
+ 'instances': u'\u30aa\u30fc\u30d7\u30f3'}}
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake4/os-quota-class-sets/test_class',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'test_class', body)
+
+
+class QuotaTemplateXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(QuotaTemplateXMLSerializerTest, self).setUp()
+ self.serializer = quota_classes.QuotaClassTemplate()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_serializer(self):
+ exemplar = dict(quota_class_set=dict(
+ id='test_class',
+ metadata_items=10,
+ injected_file_path_bytes=255,
+ injected_file_content_bytes=20,
+ ram=50,
+ floating_ips=60,
+ fixed_ips=-1,
+ instances=70,
+ injected_files=80,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ cores=90))
+ text = self.serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('quota_class_set', tree.tag)
+ self.assertEqual('test_class', tree.get('id'))
+ self.assertEqual(len(exemplar['quota_class_set']) - 1, len(tree))
+ for child in tree:
+ self.assertIn(child.tag, exemplar['quota_class_set'])
+ self.assertEqual(int(child.text),
+ exemplar['quota_class_set'][child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(quota_class_set=dict(
+ metadata_items='10',
+ injected_file_content_bytes='20',
+ ram='50',
+ floating_ips='60',
+ fixed_ips='-1',
+ instances='70',
+ injected_files='80',
+ security_groups='10',
+ security_group_rules='20',
+ key_pairs='100',
+ cores='90'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<quota_class_set>'
+ '<metadata_items>10</metadata_items>'
+ '<injected_file_content_bytes>20'
+ '</injected_file_content_bytes>'
+ '<ram>50</ram>'
+ '<floating_ips>60</floating_ips>'
+ '<fixed_ips>-1</fixed_ips>'
+ '<instances>70</instances>'
+ '<injected_files>80</injected_files>'
+ '<cores>90</cores>'
+ '<security_groups>10</security_groups>'
+ '<security_group_rules>20</security_group_rules>'
+ '<key_pairs>100</key_pairs>'
+ '</quota_class_set>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py b/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py
new file mode 100644
index 0000000000..33511b0cc3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_quotas.py
@@ -0,0 +1,648 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from lxml import etree
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import quotas as quotas_v2
+from nova.api.openstack.compute.plugins.v3 import quota_sets as quotas_v21
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import context as context_maker
+from nova import exception
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def quota_set(id, include_server_group_quotas=True):
+ res = {'quota_set': {'id': id, 'metadata_items': 128,
+ 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1,
+ 'instances': 10, 'injected_files': 5, 'cores': 20,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10, 'security_group_rules': 20,
+ 'key_pairs': 100, 'injected_file_path_bytes': 255}}
+ if include_server_group_quotas:
+ res['quota_set']['server_groups'] = 10
+ res['quota_set']['server_group_members'] = 10
+ return res
+
+
+class BaseQuotaSetsTest(test.TestCase):
+
+ def _is_v20_api_test(self):
+ # NOTE(oomichi): If a test is for v2.0 API, this method returns
+ # True. Otherwise(v2.1 API test), returns False.
+ return (self.plugin == quotas_v2)
+
+ def get_update_expected_response(self, base_body):
+ # NOTE(oomichi): "id" parameter is added to a response of
+ # "update quota" API since v2.1 API, because it makes the
+ # API consistent and it is not backwards incompatible change.
+ # This method adds "id" for an expected body of a response.
+ if self._is_v20_api_test():
+ expected_body = base_body
+ else:
+ expected_body = copy.deepcopy(base_body)
+ expected_body['quota_set'].update({'id': 'update_me'})
+ return expected_body
+
+ def setup_mock_for_show(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+
+ def setup_mock_for_update(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+
+ def get_delete_status_int(self, res):
+ if self._is_v20_api_test():
+ return res.status_int
+ else:
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ return self.controller.delete.wsgi_code
+
+
+class QuotaSetsTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+ validation_error = exception.ValidationError
+ include_server_group_quotas = True
+
+ def setUp(self):
+ super(QuotaSetsTestV21, self).setUp()
+ self._setup_controller()
+ self.default_quotas = {
+ 'instances': 10,
+ 'cores': 20,
+ 'ram': 51200,
+ 'floating_ips': 10,
+ 'fixed_ips': -1,
+ 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_path_bytes': 255,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ }
+ if self.include_server_group_quotas:
+ self.default_quotas['server_groups'] = 10
+ self.default_quotas['server_group_members'] = 10
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def test_format_quota_set(self):
+ quota_set = self.controller._format_quota_set('1234',
+ self.default_quotas)
+ qs = quota_set['quota_set']
+
+ self.assertEqual(qs['id'], '1234')
+ self.assertEqual(qs['instances'], 10)
+ self.assertEqual(qs['cores'], 20)
+ self.assertEqual(qs['ram'], 51200)
+ self.assertEqual(qs['floating_ips'], 10)
+ self.assertEqual(qs['fixed_ips'], -1)
+ self.assertEqual(qs['metadata_items'], 128)
+ self.assertEqual(qs['injected_files'], 5)
+ self.assertEqual(qs['injected_file_path_bytes'], 255)
+ self.assertEqual(qs['injected_file_content_bytes'], 10240)
+ self.assertEqual(qs['security_groups'], 10)
+ self.assertEqual(qs['security_group_rules'], 20)
+ self.assertEqual(qs['key_pairs'], 100)
+ if self.include_server_group_quotas:
+ self.assertEqual(qs['server_groups'], 10)
+ self.assertEqual(qs['server_group_members'], 10)
+
+ def test_quotas_defaults(self):
+ uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults'
+
+ req = fakes.HTTPRequest.blank(uri)
+ res_dict = self.controller.defaults(req, 'fake_tenant')
+ self.default_quotas.update({'id': 'fake_tenant'})
+ expected = {'quota_set': self.default_quotas}
+
+ self.assertEqual(res_dict, expected)
+
+ def test_quotas_show_as_admin(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 1234)
+
+ ref_quota_set = quota_set('1234', self.include_server_group_quotas)
+ self.assertEqual(res_dict, ref_quota_set)
+
+ def test_quotas_show_as_unauthorized_user(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 1234)
+
+ def test_quotas_update_as_admin(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ body = {'quota_set': self.default_quotas}
+ expected_body = self.get_update_expected_response(body)
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+ self.assertEqual(expected_body, res_dict)
+
+ def test_quotas_update_zero_value_as_admin(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 0, 'cores': 0,
+ 'ram': 0, 'floating_ips': 0,
+ 'metadata_items': 0,
+ 'injected_files': 0,
+ 'injected_file_content_bytes': 0,
+ 'injected_file_path_bytes': 0,
+ 'security_groups': 0,
+ 'security_group_rules': 0,
+ 'key_pairs': 100, 'fixed_ips': -1}}
+ if self.include_server_group_quotas:
+ body['quota_set']['server_groups'] = 10
+ body['quota_set']['server_group_members'] = 10
+ expected_body = self.get_update_expected_response(body)
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+ self.assertEqual(expected_body, res_dict)
+
+ def test_quotas_update_as_user(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ body = {'quota_set': self.default_quotas}
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'update_me', body=body)
+
+ def _quotas_update_bad_request_case(self, body):
+ self.setup_mock_for_update()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(self.validation_error, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_quotas_update_invalid_key(self):
+ body = {'quota_set': {'instances2': -2, 'cores': -2,
+ 'ram': -2, 'floating_ips': -2,
+ 'metadata_items': -2, 'injected_files': -2,
+ 'injected_file_content_bytes': -2}}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_limit(self):
+ body = {'quota_set': {'instances': -2, 'cores': -2,
+ 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
+ 'metadata_items': -2, 'injected_files': -2,
+ 'injected_file_content_bytes': -2}}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_empty_body(self):
+ body = {}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_non_int(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': 'test'
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_with_float(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': 50.5
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_update_invalid_value_with_unicode(self):
+ # when PUT non integer value
+ self.default_quotas.update({
+ 'instances': u'\u30aa\u30fc\u30d7\u30f3'
+ })
+ body = {'quota_set': self.default_quotas}
+ self._quotas_update_bad_request_case(body)
+
+ def test_quotas_delete_as_unauthorized_user(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ req, 1234)
+
+ def test_quotas_delete_as_admin(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ context = context_maker.get_admin_context()
+ self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.req.environ['nova.context'] = context
+ self.mox.StubOutWithMock(quota.QUOTAS,
+ "destroy_all_by_project")
+ quota.QUOTAS.destroy_all_by_project(context, 1234)
+ self.mox.ReplayAll()
+ res = self.controller.delete(self.req, 1234)
+ self.mox.VerifyAll()
+ self.assertEqual(202, self.get_delete_status_int(res))
+
+
+class QuotaXMLSerializerTest(test.TestCase):
+ def setUp(self):
+ super(QuotaXMLSerializerTest, self).setUp()
+ self.serializer = quotas_v2.QuotaTemplate()
+ self.deserializer = wsgi.XMLDeserializer()
+
+ def test_serializer(self):
+ exemplar = dict(quota_set=dict(
+ id='project_id',
+ metadata_items=10,
+ injected_file_path_bytes=255,
+ injected_file_content_bytes=20,
+ ram=50,
+ floating_ips=60,
+ fixed_ips=-1,
+ instances=70,
+ injected_files=80,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ cores=90))
+ text = self.serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('quota_set', tree.tag)
+ self.assertEqual('project_id', tree.get('id'))
+ self.assertEqual(len(exemplar['quota_set']) - 1, len(tree))
+ for child in tree:
+ self.assertIn(child.tag, exemplar['quota_set'])
+ self.assertEqual(int(child.text), exemplar['quota_set'][child.tag])
+
+ def test_deserializer(self):
+ exemplar = dict(quota_set=dict(
+ metadata_items='10',
+ injected_file_content_bytes='20',
+ ram='50',
+ floating_ips='60',
+ fixed_ips='-1',
+ instances='70',
+ injected_files='80',
+ security_groups='10',
+ security_group_rules='20',
+ key_pairs='100',
+ cores='90'))
+ intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<quota_set>'
+ '<metadata_items>10</metadata_items>'
+ '<injected_file_content_bytes>20'
+ '</injected_file_content_bytes>'
+ '<ram>50</ram>'
+ '<floating_ips>60</floating_ips>'
+ '<fixed_ips>-1</fixed_ips>'
+ '<instances>70</instances>'
+ '<injected_files>80</injected_files>'
+ '<security_groups>10</security_groups>'
+ '<security_group_rules>20</security_group_rules>'
+ '<key_pairs>100</key_pairs>'
+ '<cores>90</cores>'
+ '</quota_set>')
+
+ result = self.deserializer.deserialize(intext)['body']
+ self.assertEqual(result, exemplar)
+
+
+class ExtendedQuotasTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+
+ def setUp(self):
+ super(ExtendedQuotasTestV21, self).setUp()
+ self._setup_controller()
+ self.setup_mock_for_update()
+
+ fake_quotas = {'ram': {'limit': 51200,
+ 'in_use': 12800,
+ 'reserved': 12800},
+ 'cores': {'limit': 20,
+ 'in_use': 10,
+ 'reserved': 5},
+ 'instances': {'limit': 100,
+ 'in_use': 0,
+ 'reserved': 0}}
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def fake_get_quotas(self, context, id, user_id=None, usages=False):
+ if usages:
+ return self.fake_quotas
+ else:
+ return dict((k, v['limit']) for k, v in self.fake_quotas.items())
+
+ def fake_get_settable_quotas(self, context, project_id, user_id=None):
+ return {
+ 'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
+ self.fake_quotas['ram']['reserved'],
+ 'maximum': -1},
+ 'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
+ self.fake_quotas['cores']['reserved'],
+ 'maximum': -1},
+ 'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
+ self.fake_quotas['instances']['reserved'],
+ 'maximum': -1},
+ }
+
+ def test_quotas_update_exceed_in_used(self):
+ patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
+ get_settable_quotas = patcher.start()
+
+ body = {'quota_set': {'cores': 10}}
+
+ get_settable_quotas.side_effect = self.fake_get_settable_quotas
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+ mock.patch.stopall()
+
+ def test_quotas_force_update_exceed_in_used(self):
+ patcher = mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
+ get_settable_quotas = patcher.start()
+ patcher = mock.patch.object(self.plugin.QuotaSetsController,
+ '_get_quotas')
+ _get_quotas = patcher.start()
+
+ body = {'quota_set': {'cores': 10, 'force': 'True'}}
+
+ get_settable_quotas.side_effect = self.fake_get_settable_quotas
+ _get_quotas.side_effect = self.fake_get_quotas
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.controller.update(req, 'update_me', body=body)
+ mock.patch.stopall()
+
+
+class UserQuotasTestV21(BaseQuotaSetsTest):
+ plugin = quotas_v21
+ include_server_group_quotas = True
+
+ def setUp(self):
+ super(UserQuotasTestV21, self).setUp()
+ self._setup_controller()
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+
+ def test_user_quotas_show_as_admin(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1',
+ use_admin_context=True)
+ res_dict = self.controller.show(req, 1234)
+ ref_quota_set = quota_set('1234', self.include_server_group_quotas)
+ self.assertEqual(res_dict, ref_quota_set)
+
+ def test_user_quotas_show_as_unauthorized_user(self):
+ self.setup_mock_for_show()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
+ req, 1234)
+
+ def test_user_quotas_update_as_admin(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100}}
+ if self.include_server_group_quotas:
+ body['quota_set']['server_groups'] = 10
+ body['quota_set']['server_group_members'] = 10
+
+ expected_body = self.get_update_expected_response(body)
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body=body)
+
+ self.assertEqual(expected_body, res_dict)
+
+ def test_user_quotas_update_as_user(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ 'server_groups': 10,
+ 'server_group_members': 10}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url)
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_user_quotas_update_exceed_project(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 20}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+
+ def test_user_quotas_delete_as_unauthorized_user(self):
+ self.setup_mock_for_update()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ req, 1234)
+
+ def test_user_quotas_delete_as_admin(self):
+ if self._is_v20_api_test():
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ context = context_maker.get_admin_context()
+ url = '/v2/fake4/os-quota-sets/1234?user_id=1'
+ self.req = fakes.HTTPRequest.blank(url)
+ self.req.environ['nova.context'] = context
+ self.mox.StubOutWithMock(quota.QUOTAS,
+ "destroy_all_by_project_and_user")
+ quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1')
+ self.mox.ReplayAll()
+ res = self.controller.delete(self.req, 1234)
+ self.mox.VerifyAll()
+ self.assertEqual(202, self.get_delete_status_int(res))
+
+
+class QuotaSetsTestV2(QuotaSetsTestV21):
+ plugin = quotas_v2
+ validation_error = webob.exc.HTTPBadRequest
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(self.include_server_group_quotas)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+ # NOTE: The following tests are tricky and v2.1 API does not allow
+ # this kind of input by strong input validation. Just for test coverage,
+ # we keep them now.
+ def test_quotas_update_invalid_value_json_fromat_empty_string(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ expected_resp = {'quota_set': self.default_quotas}
+
+ # when PUT JSON format with empty string for quota
+ body = copy.deepcopy(expected_resp)
+ body['quota_set']['ram'] = ''
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
+ def test_quotas_update_invalid_value_xml_fromat_empty_string(self):
+ self.default_quotas.update({
+ 'instances': 50,
+ 'cores': 50
+ })
+ expected_resp = {'quota_set': self.default_quotas}
+
+ # when PUT XML format with empty string for quota
+ body = copy.deepcopy(expected_resp)
+ body['quota_set']['ram'] = {}
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.setup_mock_for_update()
+ res_dict = self.controller.update(req, 'update_me', body)
+ self.assertEqual(res_dict, expected_resp)
+
+ # NOTE: os-extended-quotas and os-user-quotas are only for v2.0.
+ # On v2.1, these features are always enable. So we need the following
+ # tests only for v2.0.
+ def test_delete_quotas_when_extension_not_loaded(self):
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1234)
+
+ def test_delete_user_quotas_when_extension_not_loaded(self):
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False)
+ self.mox.ReplayAll()
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 1234)
+
+
+class QuotaSetsTestV2WithoutServerGroupQuotas(QuotaSetsTestV2):
+ include_server_group_quotas = False
+
+ # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
+ # is always enabled, so this test is only needed for v2.0
+ def test_quotas_update_without_server_group_quotas_extenstion(self):
+ self.setup_mock_for_update()
+ self.default_quotas.update({
+ 'server_groups': 50,
+ 'sever_group_members': 50
+ })
+ body = {'quota_set': self.default_quotas}
+
+ req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
+
+
+class ExtendedQuotasTestV2(ExtendedQuotasTestV21):
+ plugin = quotas_v2
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(False)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+
+class UserQuotasTestV2(UserQuotasTestV21):
+ plugin = quotas_v2
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes().\
+ AndReturn(self.include_server_group_quotas)
+ self.mox.ReplayAll()
+ self.controller = self.plugin.QuotaSetsController(self.ext_mgr)
+ self.mox.ResetAll()
+
+
+class UserQuotasTestV2WithoutServerGroupQuotas(UserQuotasTestV2):
+ include_server_group_quotas = False
+
+ # NOTE: os-server-group-quotas is only for v2.0. On v2.1 this feature
+ # is always enabled, so this test is only needed for v2.0
+ def test_user_quotas_update_as_admin_without_sg_quota_extension(self):
+ self.setup_mock_for_update()
+ body = {'quota_set': {'instances': 10, 'cores': 20,
+ 'ram': 51200, 'floating_ips': 10,
+ 'fixed_ips': -1, 'metadata_items': 128,
+ 'injected_files': 5,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ 'key_pairs': 100,
+ 'server_groups': 100,
+ 'server_group_members': 200}}
+
+ url = '/v2/fake4/os-quota-sets/update_me?user_id=1'
+ req = fakes.HTTPRequest.blank(url, use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, 'update_me', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py b/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py
new file mode 100644
index 0000000000..f8de7de291
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_rescue.py
@@ -0,0 +1,270 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova import compute
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+
+def rescue(self, context, instance, rescue_password=None,
+ rescue_image_ref=None):
+ pass
+
+
+def unrescue(self, context, instance):
+ pass
+
+
+def fake_compute_get(*args, **kwargs):
+ uuid = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
+ return {'id': 1, 'uuid': uuid}
+
+
+class RescueTestV21(test.NoDBTestCase):
+ _prefix = '/v2/fake'
+
+ def setUp(self):
+ super(RescueTestV21, self).setUp()
+
+ self.stubs.Set(compute.api.API, "get", fake_compute_get)
+ self.stubs.Set(compute.api.API, "rescue", rescue)
+ self.stubs.Set(compute.api.API, "unrescue", unrescue)
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-rescue'))
+
+ def test_rescue_from_locked_server(self):
+ def fake_rescue_from_locked_server(self, context,
+ instance, rescue_password=None, rescue_image_ref=None):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute.api.API,
+ 'rescue',
+ fake_rescue_from_locked_server)
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_rescue_with_preset_password(self):
+ body = {"rescue": {"adminPass": "AABBCC112233"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("AABBCC112233", resp_json['adminPass'])
+
+ def test_rescue_generates_password(self):
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual(CONF.password_length, len(resp_json['adminPass']))
+
+ def test_rescue_of_rescued_instance(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_unrescue(self):
+ body = dict(unrescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_unrescue_from_locked_server(self):
+ def fake_unrescue_from_locked_server(self, context,
+ instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute.api.API,
+ 'unrescue',
+ fake_unrescue_from_locked_server)
+
+ body = dict(unrescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_unrescue_of_active_instance(self):
+ body = dict(unrescue=None)
+
+ def fake_unrescue(*args, **kwargs):
+ raise exception.InstanceInvalidState('fake message')
+
+ self.stubs.Set(compute.api.API, "unrescue", fake_unrescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 409)
+
+ def test_rescue_raises_unrescuable(self):
+ body = dict(rescue=None)
+
+ def fake_rescue(*args, **kwargs):
+ raise exception.InstanceNotRescuable('fake message')
+
+ self.stubs.Set(compute.api.API, "rescue", fake_rescue)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ @mock.patch('nova.compute.api.API.rescue')
+ def test_rescue_with_image_specified(self, mock_compute_api_rescue):
+ instance = fake_compute_get()
+ body = {"rescue": {"adminPass": "ABC123",
+ "rescue_image_ref": "img-id"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("ABC123", resp_json['adminPass'])
+
+ mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
+ rescue_password=u'ABC123',
+ rescue_image_ref=u'img-id')
+
+ @mock.patch('nova.compute.api.API.rescue')
+ def test_rescue_without_image_specified(self, mock_compute_api_rescue):
+ instance = fake_compute_get()
+ body = {"rescue": {"adminPass": "ABC123"}}
+
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertEqual("ABC123", resp_json['adminPass'])
+
+ mock_compute_api_rescue.assert_called_with(mock.ANY, instance,
+ rescue_password=u'ABC123',
+ rescue_image_ref=None)
+
+ def test_rescue_with_none(self):
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+
+ def test_rescue_with_empty_dict(self):
+ body = dict(rescue=dict())
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+
+ def test_rescue_disable_password(self):
+ self.flags(enable_instance_password=False)
+ body = dict(rescue=None)
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(200, resp.status_int)
+ resp_json = jsonutils.loads(resp.body)
+ self.assertNotIn('adminPass', resp_json)
+
+ def test_rescue_with_invalid_property(self):
+ body = {"rescue": {"test": "test"}}
+ req = webob.Request.blank(self._prefix + '/servers/test_inst/action')
+ req.method = "POST"
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ resp = req.get_response(self.app)
+ self.assertEqual(400, resp.status_int)
+
+
+class RescueTestV20(RescueTestV21):
+
+ def _get_app(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=None)
+ return fakes.wsgi_app(init_only=('servers',))
+
+ def test_rescue_with_invalid_property(self):
+ # NOTE(cyeoh): input validation in original v2 code does not
+ # check for invalid properties.
+ pass
+
+ def test_rescue_disable_password(self):
+ # NOTE(cyeoh): Original v2.0 code does not support disabling
+ # the admin password being returned through a conf setting
+ pass
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py
new file mode 100644
index 0000000000..fba3a02eec
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_scheduler_hints.py
@@ -0,0 +1,220 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+import nova.compute.api
+from nova.compute import flavors
+from nova import db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+UUID = fakes.FAKE_UUID
+
+
+CONF = cfg.CONF
+
+
+class SchedulerHintsTestCaseV21(test.TestCase):
+
+ def setUp(self):
+ super(SchedulerHintsTestCaseV21, self).setUp()
+ self.fake_instance = fakes.stub_instance(1, uuid=UUID)
+ self._set_up_router()
+
+ def _set_up_router(self):
+ self.app = compute.APIRouterV3(init_only=('servers',
+ 'os-scheduler-hints'))
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
+
+ def test_create_server_without_hints(self):
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+ return ([self.fake_instance], '')
+
+ self.stubs.Set(nova.compute.api.API, 'create', fake_create)
+
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ }}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(202, res.status_int)
+
+ def test_create_server_with_hints(self):
+
+ def fake_create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {'a': 'b'})
+ return ([self.fake_instance], '')
+
+ self.stubs.Set(nova.compute.api.API, 'create', fake_create)
+
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ },
+ 'os:scheduler_hints': {'a': 'b'},
+ }
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(202, res.status_int)
+
+ def test_create_server_bad_hints(self):
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': 'cedef40a-ed67-4d10-800e-17455edce175',
+ 'flavorRef': '1',
+ },
+ 'os:scheduler_hints': 'here',
+ }
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21):
+
+ def _set_up_router(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Scheduler_hints'])
+ self.app = compute.APIRouter(init_only=('servers',))
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
+ 'osapi_v3')
+ self.no_scheduler_hints_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertNotIn('scheduler_hints', kwargs)
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
+
+ def _test_create_extra(self, params):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ body = dict(server=server)
+ body.update(params)
+ req = self._get_request()
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = self.no_scheduler_hints_controller.create(
+ req, body=body).obj['server']
+
+ def test_create_instance_with_scheduler_hints_disabled(self):
+ hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
+ params = {'OS-SCH-HNT:scheduler_hints': hints}
+ old_create = nova.compute.api.API.create
+
+ def create(*args, **kwargs):
+ self._verify_availability_zone(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(nova.compute.api.API, 'create', create)
+ self._test_create_extra(params)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.no_scheduler_hints_controller = servers_v2.Controller(
+ self.ext_mgr)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py
new file mode 100644
index 0000000000..a735f4722e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_security_group_default_rules.py
@@ -0,0 +1,515 @@
+# Copyright 2013 Metacloud, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import \
+ security_group_default_rules as security_group_default_rules_v2
+from nova.api.openstack.compute.plugins.v3 import \
+ security_group_default_rules as security_group_default_rules_v21
+from nova.api.openstack import wsgi
+from nova import context
+import nova.db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+CONF = cfg.CONF
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def security_group_default_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'TCP')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('cidr', '10.10.10.0/24')
+ return rule
+
+
+def security_group_default_rule_db(security_group_default_rule, id=None):
+ attrs = security_group_default_rule.copy()
+ if id is not None:
+ attrs['id'] = id
+ return AttrDict(attrs)
+
+
+class TestSecurityGroupDefaultRulesNeutronV21(test.TestCase):
+ controller_cls = (security_group_default_rules_v21.
+ SecurityGroupDefaultRulesController)
+
+ def setUp(self):
+ self.flags(security_group_api='neutron')
+ super(TestSecurityGroupDefaultRulesNeutronV21, self).setUp()
+ self.controller = self.controller_cls()
+
+ def test_create_security_group_default_rule_not_implemented_neutron(self):
+ sgr = security_group_default_rule_template()
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
+ req)
+
+ def test_security_group_default_rules_show_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+ def test_security_group_default_rules_delete_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+
+class TestSecurityGroupDefaultRulesNeutronV2(test.TestCase):
+ controller_cls = (security_group_default_rules_v2.
+ SecurityGroupDefaultRulesController)
+
+
+class TestSecurityGroupDefaultRulesV21(test.TestCase):
+ controller_cls = (security_group_default_rules_v21.
+ SecurityGroupDefaultRulesController)
+
+ def setUp(self):
+ super(TestSecurityGroupDefaultRulesV21, self).setUp()
+ self.controller = self.controller_cls()
+
+ def test_create_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ sgr_dict = dict(security_group_default_rule=sgr)
+ res_dict = self.controller.create(req, sgr_dict)
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_create_security_group_default_rule_with_no_to_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['to_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_from_port(self):
+ sgr = security_group_default_rule_template()
+ del sgr['from_port']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_ip_protocol(self):
+ sgr = security_group_default_rule_template()
+ del sgr['ip_protocol']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_cidr(self):
+ sgr = security_group_default_rule_template()
+ del sgr['cidr']
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEqual(security_group_default_rule['id'], 0)
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_with_blank_to_port(self):
+ sgr = security_group_default_rule_template(to_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_from_port(self):
+ sgr = security_group_default_rule_template(from_port='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_blank_cidr(self):
+ sgr = security_group_default_rule_template(cidr='')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.create(req,
+ {'security_group_default_rule': sgr})
+ security_group_default_rule = res_dict['security_group_default_rule']
+ self.assertNotEqual(security_group_default_rule['id'], 0)
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ '0.0.0.0/0')
+
+ def test_create_security_group_default_rule_non_numerical_to_port(self):
+ sgr = security_group_default_rule_template(to_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_non_numerical_from_port(self):
+ sgr = security_group_default_rule_template(from_port='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_ip_protocol(self):
+ sgr = security_group_default_rule_template(ip_protocol='invalid')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_cidr(self):
+ sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_to_port(self):
+ sgr = security_group_default_rule_template(to_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_invalid_from_port(self):
+ sgr = security_group_default_rule_template(from_port='666666')
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_create_security_group_default_rule_with_no_body(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_duplicate_security_group_default_rule(self):
+ sgr = security_group_default_rule_template()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.create(req, {'security_group_default_rule': sgr})
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list(self):
+ self.test_create_security_group_default_rule()
+ rules = [dict(id=1,
+ ip_protocol='TCP',
+ from_port=22,
+ to_port=22,
+ ip_range=dict(cidr='10.10.10.0/24'))]
+ expected = {'security_group_default_rules': rules}
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, expected)
+
+ def test_default_security_group_default_rule_show(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ res_dict = self.controller.show(req, '1')
+
+ security_group_default_rule = res_dict['security_group_default_rule']
+
+ self.assertEqual(security_group_default_rule['ip_protocol'],
+ sgr['ip_protocol'])
+ self.assertEqual(security_group_default_rule['to_port'],
+ sgr['to_port'])
+ self.assertEqual(security_group_default_rule['from_port'],
+ sgr['from_port'])
+ self.assertEqual(security_group_default_rule['ip_range']['cidr'],
+ sgr['cidr'])
+
+ def test_delete_security_group_default_rule(self):
+ sgr = security_group_default_rule_template(id=1)
+
+ self.test_create_security_group_default_rule()
+
+ self.called = False
+
+ def security_group_default_rule_destroy(context, id):
+ self.called = True
+
+ def return_security_group_default_rule(context, id):
+ self.assertEqual(sgr['id'], id)
+ return security_group_default_rule_db(sgr)
+
+ self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
+ security_group_default_rule_destroy)
+ self.stubs.Set(nova.db, 'security_group_default_rule_get',
+ return_security_group_default_rule)
+
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.controller.delete(req, '1')
+
+ self.assertTrue(self.called)
+
+ def test_security_group_ensure_default(self):
+ sgr = security_group_default_rule_template(id=1)
+ self.test_create_security_group_default_rule()
+
+ ctxt = context.get_admin_context()
+
+ setattr(ctxt, 'project_id', 'new_project_id')
+
+ sg = nova.db.security_group_ensure_default(ctxt)
+ rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
+ security_group_rule = rules[0]
+ self.assertEqual(sgr['id'], security_group_rule.id)
+ self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
+ self.assertEqual(sgr['from_port'], security_group_rule.from_port)
+ self.assertEqual(sgr['to_port'], security_group_rule.to_port)
+ self.assertEqual(sgr['cidr'], security_group_rule.cidr)
+
+
+class TestSecurityGroupDefaultRulesV2(test.TestCase):
+ controller_cls = (security_group_default_rules_v2.
+ SecurityGroupDefaultRulesController)
+
+
+class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
+ deserializer = security_group_default_rules_v2.\
+ SecurityGroupDefaultRulesXMLDeserializer()
+ self.deserializer = deserializer
+
+ def test_create_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_to_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_from_port_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_ip_protocol_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <cidr>10.10.10.0/24</cidr>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "cidr": "10.10.10.0/24"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_cidr_request(self):
+ serial_request = """
+<security_group_default_rule>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <ip_protocol>TCP</ip_protocol>
+</security_group_default_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_default_rule": {
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "TCP",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+
+class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.rule_serializer =\
+ security_group_default_rules_v2.SecurityGroupDefaultRuleTemplate()
+ self.index_serializer =\
+ security_group_default_rules_v2.SecurityGroupDefaultRulesTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_security_group_default_rule(self, raw_rule, tree):
+ self.assertEqual(raw_rule['id'], tree.get('id'))
+
+ seen = set()
+ expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
+ 'ip_range/cidr'])
+
+ for child in tree:
+ child_tag = self._tag(child)
+ seen.add(child_tag)
+ if child_tag == 'ip_range':
+ for gr_child in child:
+ gr_child_tag = self._tag(gr_child)
+ self.assertIn(gr_child_tag, raw_rule[child_tag])
+ seen.add('%s/%s' % (child_tag, gr_child_tag))
+ self.assertEqual(gr_child.text,
+ raw_rule[child_tag][gr_child_tag])
+ else:
+ self.assertEqual(child.text, raw_rule[child_tag])
+ self.assertEqual(seen, expected)
+
+ def test_rule_serializer(self):
+ raw_rule = dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24'))
+ rule = dict(security_group_default_rule=raw_rule)
+ text = self.rule_serializer.serialize(rule)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_group_default_rule', self._tag(tree))
+ self._verify_security_group_default_rule(raw_rule, tree)
+
+ def test_index_serializer(self):
+ rules = [dict(id='123',
+ ip_protocol='TCP',
+ from_port='22',
+ to_port='22',
+ ip_range=dict(cidr='10.10.10.0/24')),
+ dict(id='234',
+ ip_protocol='UDP',
+ from_port='23456',
+ to_port='234567',
+ ip_range=dict(cidr='10.12.0.0/18')),
+ dict(id='345',
+ ip_protocol='tcp',
+ from_port='3456',
+ to_port='4567',
+ ip_range=dict(cidr='192.168.1.0/32'))]
+
+ rules_dict = dict(security_group_default_rules=rules)
+
+ text = self.index_serializer.serialize(rules_dict)
+
+ tree = etree.fromstring(text)
+ self.assertEqual('security_group_default_rules', self._tag(tree))
+ self.assertEqual(len(rules), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_security_group_default_rule(rules[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py
new file mode 100644
index 0000000000..d1620b6a28
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_security_groups.py
@@ -0,0 +1,1767 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2012 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import security_groups as secgroups_v2
+from nova.api.openstack.compute.plugins.v3 import security_groups as \
+ secgroups_v21
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import compute
+from nova.compute import power_state
+from nova import context as context_maker
+import nova.db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
+FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def security_group_template(**kwargs):
+ sg = kwargs.copy()
+ sg.setdefault('tenant_id', '123')
+ sg.setdefault('name', 'test')
+ sg.setdefault('description', 'test-description')
+ return sg
+
+
+def security_group_db(security_group, id=None):
+ attrs = security_group.copy()
+ if 'tenant_id' in attrs:
+ attrs['project_id'] = attrs.pop('tenant_id')
+ if id is not None:
+ attrs['id'] = id
+ attrs.setdefault('rules', [])
+ attrs.setdefault('instances', [])
+ return AttrDict(attrs)
+
+
+def security_group_rule_template(**kwargs):
+ rule = kwargs.copy()
+ rule.setdefault('ip_protocol', 'tcp')
+ rule.setdefault('from_port', 22)
+ rule.setdefault('to_port', 22)
+ rule.setdefault('parent_group_id', 2)
+ return rule
+
+
+def security_group_rule_db(rule, id=None):
+ attrs = rule.copy()
+ if 'ip_protocol' in attrs:
+ attrs['protocol'] = attrs.pop('ip_protocol')
+ return AttrDict(attrs)
+
+
+def return_server(context, server_id,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': int(server_id),
+ 'power_state': 0x01,
+ 'host': "localhost",
+ 'uuid': FAKE_UUID1,
+ 'name': 'asdf'})
+
+
+def return_server_by_uuid(context, server_uuid,
+ columns_to_join=None,
+ use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'power_state': 0x01,
+ 'host': "localhost",
+ 'uuid': server_uuid,
+ 'name': 'asdf'})
+
+
+def return_non_running_server(context, server_id, columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id, 'power_state': power_state.SHUTDOWN,
+ 'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
+
+
+def return_security_group_by_name(context, project_id, group_name):
+ return {'id': 1, 'name': group_name,
+ "instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
+
+
+def return_security_group_without_instances(context, project_id, group_name):
+ return {'id': 1, 'name': group_name}
+
+
+def return_server_nonexistent(context, server_id, columns_to_join=None):
+ raise exception.InstanceNotFound(instance_id=server_id)
+
+
+class TestSecurityGroupsV21(test.TestCase):
+ secgrp_ctl_cls = secgroups_v21.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v21.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v21.SecurityGroupActionController
+
+ def setUp(self):
+ super(TestSecurityGroupsV21, self).setUp()
+
+ self.controller = self.secgrp_ctl_cls()
+ self.server_controller = self.server_secgrp_ctl_cls()
+ self.manager = self.secgrp_act_ctl_cls()
+
+ # This needs to be done here to set fake_id because the derived
+ # class needs to be called first if it wants to set
+ # 'security_group_api' and this setUp method needs to be called.
+ if self.controller.security_group_api.id_is_uuid:
+ self.fake_id = '11111111-1111-1111-1111-111111111111'
+ else:
+ self.fake_id = '11111111'
+
+ def _assert_no_security_groups_reserved(self, context):
+ """Check that no reservations are leaked during tests."""
+ result = quota.QUOTAS.get_project_quotas(context, context.project_id)
+ self.assertEqual(result['security_groups']['reserved'], 0)
+
+ def _assert_security_groups_in_use(self, project_id, user_id, in_use):
+ context = context_maker.get_admin_context()
+ result = quota.QUOTAS.get_user_quotas(context, project_id, user_id)
+ self.assertEqual(result['security_groups']['in_use'], in_use)
+
+ def test_create_security_group(self):
+ sg = security_group_template()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.create(req, {'security_group': sg})
+ self.assertEqual(res_dict['security_group']['name'], 'test')
+ self.assertEqual(res_dict['security_group']['description'],
+ 'test-description')
+
+ def test_create_security_group_with_no_name(self):
+ sg = security_group_template()
+ del sg['name']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, sg)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_description(self):
+ sg = security_group_template()
+ del sg['description']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_empty_description(self):
+ sg = security_group_template()
+ sg['description'] = ""
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ try:
+ self.controller.create(req, {'security_group': sg})
+ self.fail('Should have raised BadRequest exception')
+ except webob.exc.HTTPBadRequest as exc:
+ self.assertEqual('description has a minimum character requirement'
+ ' of 1.', exc.explanation)
+ except exception.InvalidInput as exc:
+ self.fail('Should have raised BadRequest exception instead of')
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_blank_name(self):
+ sg = security_group_template(name='')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_whitespace_name(self):
+ sg = security_group_template(name=' ')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_blank_description(self):
+ sg = security_group_template(description='')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_whitespace_description(self):
+ sg = security_group_template(description=' ')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_duplicate_name(self):
+ sg = security_group_template()
+
+ # FIXME: Stub out _get instead of creating twice
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller.create(req, {'security_group': sg})
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_with_no_security_group(self):
+ body = {'no-securityGroup': None}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_above_255_characters_name(self):
+ sg = security_group_template(name='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_above_255_characters_description(self):
+ sg = security_group_template(description='1234567890' * 26)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_non_string_name(self):
+ sg = security_group_template(name=12)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_non_string_description(self):
+ sg = security_group_template(description=12)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group': sg})
+
+ self._assert_no_security_groups_reserved(req.environ['nova.context'])
+
+ def test_create_security_group_quota_limit(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ for num in range(1, CONF.quota_security_groups):
+ name = 'test%s' % num
+ sg = security_group_template(name=name)
+ res_dict = self.controller.create(req, {'security_group': sg})
+ self.assertEqual(res_dict['security_group']['name'], name)
+
+ sg = security_group_template()
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
+ req, {'security_group': sg})
+
+ def test_get_security_group_list(self):
+ groups = []
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ groups.append(sg)
+ expected = {'security_groups': groups}
+
+ def return_security_groups(context, project_id):
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_project',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_list_missing_group_id_rule(self):
+ groups = []
+ rule1 = security_group_rule_template(cidr='10.2.3.124/24',
+ parent_group_id=1,
+ group_id={}, id=88,
+ protocol='TCP')
+ rule2 = security_group_rule_template(cidr='10.2.3.125/24',
+ parent_group_id=1,
+ id=99, protocol=88,
+ group_id='HAS_BEEN_DELETED')
+ sg = security_group_template(id=1,
+ name='test',
+ description='test-desc',
+ rules=[rule1, rule2])
+
+ groups.append(sg)
+ # An expected rule here needs to be created as the api returns
+ # different attributes on the rule for a response than what was
+ # passed in. For example:
+ # "cidr": "0.0.0.0/0" ->"ip_range": {"cidr": "0.0.0.0/0"}
+ expected_rule = security_group_rule_template(
+ ip_range={'cidr': '10.2.3.124/24'}, parent_group_id=1,
+ group={}, id=88, ip_protocol='TCP')
+ expected = security_group_template(id=1,
+ name='test',
+ description='test-desc',
+ rules=[expected_rule])
+
+ expected = {'security_groups': [expected]}
+
+ def return_security_groups(context, project, search_opts):
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(self.controller.security_group_api, 'list',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_list_all_tenants(self):
+ all_groups = []
+ tenant_groups = []
+
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ all_groups.append(sg)
+ if name == 'default':
+ tenant_groups.append(sg)
+
+ all = {'security_groups': all_groups}
+ tenant_specific = {'security_groups': tenant_groups}
+
+ def return_all_security_groups(context):
+ return [security_group_db(sg) for sg in all_groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_all',
+ return_all_security_groups)
+
+ def return_tenant_security_groups(context, project_id):
+ return [security_group_db(sg) for sg in tenant_groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_project',
+ return_tenant_security_groups)
+
+ path = '/v2/fake/os-security-groups'
+
+ req = fakes.HTTPRequest.blank(path, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, tenant_specific)
+
+ req = fakes.HTTPRequest.blank('%s?all_tenants=1' % path,
+ use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, all)
+
+ def test_get_security_group_by_instance(self):
+ groups = []
+ for i, name in enumerate(['default', 'test']):
+ sg = security_group_template(id=i + 1,
+ name=name,
+ description=name + '-desc',
+ rules=[])
+ groups.append(sg)
+ expected = {'security_groups': groups}
+
+ def return_instance(context, server_id,
+ columns_to_join=None, use_slave=False):
+ self.assertEqual(server_id, FAKE_UUID1)
+ return return_server_by_uuid(context, server_id)
+
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_instance)
+
+ def return_security_groups(context, instance_uuid):
+ self.assertEqual(instance_uuid, FAKE_UUID1)
+ return [security_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'security_group_get_by_instance',
+ return_security_groups)
+
+ req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
+ ('fake', FAKE_UUID1))
+ res_dict = self.server_controller.index(req, FAKE_UUID1)
+
+ self.assertEqual(res_dict, expected)
+
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.security_group_get_by_instance', return_value=[])
+ def test_get_security_group_empty_for_instance(self, mock_sec_group,
+ mock_db_get_ins):
+ expected = {'security_groups': []}
+
+ def return_instance(context, server_id,
+ columns_to_join=None, use_slave=False):
+ self.assertEqual(server_id, FAKE_UUID1)
+ return return_server_by_uuid(context, server_id)
+ mock_db_get_ins.side_effect = return_instance
+ req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
+ ('fake', FAKE_UUID1))
+ res_dict = self.server_controller.index(req, FAKE_UUID1)
+ self.assertEqual(expected, res_dict)
+ mock_sec_group.assert_called_once_with(req.environ['nova.context'],
+ FAKE_UUID1)
+
+ def test_get_security_group_by_instance_non_existing(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/os-security-groups')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.server_controller.index, req, '1')
+
+ def test_get_security_group_by_instance_invalid_id(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/servers/invalid/os-security-groups')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.server_controller.index, req, 'invalid')
+
+ def test_get_security_group_by_id(self):
+ sg = security_group_template(id=2, rules=[])
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ res_dict = self.controller.show(req, '2')
+
+ expected = {'security_group': sg}
+ self.assertEqual(res_dict, expected)
+
+ def test_get_security_group_by_invalid_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_get_security_group_by_non_existing_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s' %
+ self.fake_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.fake_id)
+
+ def test_update_security_group(self):
+ sg = security_group_template(id=2, rules=[])
+ sg_update = security_group_template(id=2, rules=[],
+ name='update_name', description='update_desc')
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ def return_update_security_group(context, group_id, values,
+ columns_to_join=None):
+ self.assertEqual(sg_update['id'], group_id)
+ self.assertEqual(sg_update['name'], values['name'])
+ self.assertEqual(sg_update['description'], values['description'])
+ return security_group_db(sg_update)
+
+ self.stubs.Set(nova.db, 'security_group_update',
+ return_update_security_group)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ res_dict = self.controller.update(req, '2',
+ {'security_group': sg_update})
+
+ expected = {'security_group': sg_update}
+ self.assertEqual(res_dict, expected)
+
+ def test_update_security_group_name_to_default(self):
+ sg = security_group_template(id=2, rules=[], name='default')
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, '2', {'security_group': sg})
+
+ def test_update_default_security_group_fail(self):
+ sg = security_group_template()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, '1', {'security_group': sg})
+
+ def test_delete_security_group_by_id(self):
+ sg = security_group_template(id=1, project_id='fake_project',
+ user_id='fake_user', rules=[])
+
+ self.called = False
+
+ def security_group_destroy(context, id):
+ self.called = True
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_destroy',
+ security_group_destroy)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.controller.delete(req, '1')
+
+ self.assertTrue(self.called)
+
+ def test_delete_security_group_by_admin(self):
+ sg = security_group_template(id=2, rules=[])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups')
+ self.controller.create(req, {'security_group': sg})
+ context = req.environ['nova.context']
+
+ # Ensure quota usage for security group is correct.
+ self._assert_security_groups_in_use(context.project_id,
+ context.user_id, 2)
+
+ # Delete the security group by admin.
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/2',
+ use_admin_context=True)
+ self.controller.delete(req, '2')
+
+ # Ensure quota for security group in use is released.
+ self._assert_security_groups_in_use(context.project_id,
+ context.user_id, 1)
+
+ def test_delete_security_group_by_invalid_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_delete_security_group_by_non_existing_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
+ % self.fake_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.fake_id)
+
+ def test_delete_security_group_in_use(self):
+ sg = security_group_template(id=1, rules=[])
+
+ def security_group_in_use(context, id):
+ return True
+
+ def return_security_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return security_group_db(sg)
+
+ self.stubs.Set(nova.db, 'security_group_in_use',
+ security_group_in_use)
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/1')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, '1')
+
+ def test_associate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEqual(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
+ body = dict(addSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_by_invalid_server_id(self):
+ body = dict(addSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, 'invalid', body)
+
+ def test_associate_without_body(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=None)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_no_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=dict())
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_security_group_name_with_whitespaces(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(addSecurityGroup=dict(name=" "))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_non_existing_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate_non_running_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_non_running_server)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_associate_already_associated_security_group_to_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._addSecurityGroup, req, '1', body)
+
+ def test_associate(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
+ nova.db.instance_add_security_group(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ self.mox.ReplayAll()
+
+ body = dict(addSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._addSecurityGroup(req, '1', body)
+
+ def test_disassociate_by_non_existing_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.assertEqual(return_server(None, '1'),
+ nova.db.instance_get(None, '1'))
+ body = dict(removeSecurityGroup=dict(name='non-existing'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_by_invalid_server_id(self):
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name='test'))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/invalid/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, 'invalid',
+ body)
+
+ def test_disassociate_without_body(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=None)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_no_security_group_name(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=dict())
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_security_group_name_with_whitespaces(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ body = dict(removeSecurityGroup=dict(name=" "))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_existing_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate_non_running_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_non_running_server)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+ def test_disassociate_already_associated_security_group_to_instance(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_without_instances)
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.manager._removeSecurityGroup, req, '1', body)
+
+ def test_disassociate(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+ self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
+ nova.db.instance_remove_security_group(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.stubs.Set(nova.db, 'security_group_get_by_name',
+ return_security_group_by_name)
+ self.mox.ReplayAll()
+
+ body = dict(removeSecurityGroup=dict(name="test"))
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ self.manager._removeSecurityGroup(req, '1', body)
+
+
+class TestSecurityGroupsV2(TestSecurityGroupsV21):
+ secgrp_ctl_cls = secgroups_v2.SecurityGroupController
+ server_secgrp_ctl_cls = secgroups_v2.ServerSecurityGroupController
+ secgrp_act_ctl_cls = secgroups_v2.SecurityGroupActionController
+
+
+class TestSecurityGroupRulesV21(test.TestCase):
+ secgrp_ctl_cls = secgroups_v21.SecurityGroupRulesController
+
+ def setUp(self):
+ super(TestSecurityGroupRulesV21, self).setUp()
+
+ self.controller = self.secgrp_ctl_cls()
+ if self.controller.security_group_api.id_is_uuid:
+ id1 = '11111111-1111-1111-1111-111111111111'
+ id2 = '22222222-2222-2222-2222-222222222222'
+ self.invalid_id = '33333333-3333-3333-3333-333333333333'
+ else:
+ id1 = 1
+ id2 = 2
+ self.invalid_id = '33333333'
+
+ self.sg1 = security_group_template(id=id1)
+ self.sg2 = security_group_template(
+ id=id2, name='authorize_revoke',
+ description='authorize-revoke testing')
+
+ db1 = security_group_db(self.sg1)
+ db2 = security_group_db(self.sg2)
+
+ def return_security_group(context, group_id, columns_to_join=None):
+ if group_id == db1['id']:
+ return db1
+ if group_id == db2['id']:
+ return db2
+ raise exception.SecurityGroupNotFound(security_group_id=group_id)
+
+ self.stubs.Set(nova.db, 'security_group_get',
+ return_security_group)
+
+ self.parent_security_group = db2
+
+ def test_create_by_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.3.124/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "10.2.3.124/24")
+
+ def test_create_by_group_id(self):
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+
+ def test_create_by_same_group_id(self):
+ rule1 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=80, to_port=80,
+ parent_group_id=self.sg2['id'])
+ self.parent_security_group['rules'] = [security_group_rule_db(rule1)]
+
+ rule2 = security_group_rule_template(group_id=self.sg1['id'],
+ from_port=81, to_port=81,
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule2})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg2['id'])
+ self.assertEqual(security_group_rule['from_port'], 81)
+ self.assertEqual(security_group_rule['to_port'], 81)
+
+ def test_create_none_value_from_to_port(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id']}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertIsNone(security_group_rule['from_port'])
+ self.assertIsNone(security_group_rule['to_port'])
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_none_value_from_to_port_icmp(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id'],
+ 'ip_protocol': 'ICMP'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertEqual(security_group_rule['ip_protocol'], 'ICMP')
+ self.assertEqual(security_group_rule['from_port'], -1)
+ self.assertEqual(security_group_rule['to_port'], -1)
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_none_value_from_to_port_tcp(self):
+ rule = {'parent_group_id': self.sg1['id'],
+ 'group_id': self.sg1['id'],
+ 'ip_protocol': 'TCP'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertEqual(security_group_rule['ip_protocol'], 'TCP')
+ self.assertEqual(security_group_rule['from_port'], 1)
+ self.assertEqual(security_group_rule['to_port'], 65535)
+ self.assertEqual(security_group_rule['group']['name'], 'test')
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+
+ def test_create_by_invalid_cidr_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="tcp",
+ from_port=22,
+ to_port=22,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/2433")
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_by_invalid_tcp_port_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="tcp",
+ from_port=75534,
+ to_port=22,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/24")
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_by_invalid_icmp_port_json(self):
+ rule = security_group_rule_template(
+ ip_protocol="icmp",
+ from_port=1,
+ to_port=256,
+ parent_group_id=self.sg2['id'],
+ cidr="10.2.3.124/24")
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_cidr(self):
+ rule = security_group_rule_template(cidr='10.0.0.0/24',
+ parent_group_id=self.sg2['id'])
+
+ self.parent_security_group['rules'] = [security_group_rule_db(rule)]
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_add_existing_rules_by_group_id(self):
+ rule = security_group_rule_template(group_id=1)
+
+ self.parent_security_group['rules'] = [security_group_rule_db(rule)]
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_body(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_with_no_security_group_rule_in_body(self):
+ rules = {'test': 'test'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, rules)
+
+ def test_create_with_invalid_parent_group_id(self):
+ rule = security_group_rule_template(parent_group_id='invalid')
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_existing_parent_group_id(self):
+ rule = security_group_rule_template(group_id=None,
+ parent_group_id=self.invalid_id)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_existing_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_protocol(self):
+ rule = security_group_rule_template(ip_protocol='invalid-protocol',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_protocol(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['ip_protocol']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_from_port(self):
+ rule = security_group_rule_template(from_port='666666',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_to_port(self):
+ rule = security_group_rule_template(to_port='666666',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_from_port(self):
+ rule = security_group_rule_template(from_port='invalid',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_non_numerical_to_port(self):
+ rule = security_group_rule_template(to_port='invalid',
+ cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_from_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['from_port']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_to_port(self):
+ rule = security_group_rule_template(cidr='10.2.2.0/24',
+ parent_group_id=self.sg2['id'])
+ del rule['to_port']
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_invalid_cidr(self):
+ rule = security_group_rule_template(cidr='10.2.2222.0/24',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_no_cidr_group(self):
+ rule = security_group_rule_template(parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "0.0.0.0/0")
+
+ def test_create_with_invalid_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_empty_group_id(self):
+ rule = security_group_rule_template(group_id='',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_nonexist_group_id(self):
+ rule = security_group_rule_template(group_id=self.invalid_id,
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_with_same_group_parent_id_and_group_id(self):
+ rule = security_group_rule_template(group_id=self.sg1['id'],
+ parent_group_id=self.sg1['id'])
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.sg1['id'])
+ self.assertEqual(security_group_rule['group']['name'],
+ self.sg1['name'])
+
+ def _test_create_with_no_ports_and_no_group(self, proto):
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id']}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def _test_create_with_no_ports(self, proto):
+ rule = {'ip_protocol': proto, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']}
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+ security_group_rule = res_dict['security_group_rule']
+ expected_rule = {
+ 'from_port': 1, 'group': {'tenant_id': '123', 'name': 'test'},
+ 'ip_protocol': proto, 'to_port': 65535, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
+ }
+ if proto == 'icmp':
+ expected_rule['to_port'] = -1
+ expected_rule['from_port'] = -1
+ self.assertEqual(expected_rule, security_group_rule)
+
+ def test_create_with_no_ports_icmp(self):
+ self._test_create_with_no_ports_and_no_group('icmp')
+ self._test_create_with_no_ports('icmp')
+
+ def test_create_with_no_ports_tcp(self):
+ self._test_create_with_no_ports_and_no_group('tcp')
+ self._test_create_with_no_ports('tcp')
+
+ def test_create_with_no_ports_udp(self):
+ self._test_create_with_no_ports_and_no_group('udp')
+ self._test_create_with_no_ports('udp')
+
+ def _test_create_with_ports(self, proto, from_port, to_port):
+ rule = {
+ 'ip_protocol': proto, 'from_port': from_port, 'to_port': to_port,
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
+ }
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ expected_rule = {
+ 'from_port': from_port,
+ 'group': {'tenant_id': '123', 'name': 'test'},
+ 'ip_protocol': proto, 'to_port': to_port, 'parent_group_id':
+ self.sg2['id'], 'ip_range': {}, 'id': security_group_rule['id']
+ }
+ self.assertEqual(proto, security_group_rule['ip_protocol'])
+ self.assertEqual(from_port, security_group_rule['from_port'])
+ self.assertEqual(to_port, security_group_rule['to_port'])
+ self.assertEqual(expected_rule, security_group_rule)
+
+ def test_create_with_ports_icmp(self):
+ self._test_create_with_ports('icmp', 0, 1)
+ self._test_create_with_ports('icmp', 0, 0)
+ self._test_create_with_ports('icmp', 1, 0)
+
+ def test_create_with_ports_tcp(self):
+ self._test_create_with_ports('tcp', 1, 1)
+ self._test_create_with_ports('tcp', 1, 65535)
+ self._test_create_with_ports('tcp', 65535, 65535)
+
+ def test_create_with_ports_udp(self):
+ self._test_create_with_ports('udp', 1, 1)
+ self._test_create_with_ports('udp', 1, 65535)
+ self._test_create_with_ports('udp', 65535, 65535)
+
+ def test_delete(self):
+ rule = security_group_rule_template(id=self.sg2['id'],
+ parent_group_id=self.sg2['id'])
+
+ def security_group_rule_get(context, id):
+ return security_group_rule_db(rule)
+
+ def security_group_rule_destroy(context, id):
+ pass
+
+ self.stubs.Set(nova.db, 'security_group_rule_get',
+ security_group_rule_get)
+ self.stubs.Set(nova.db, 'security_group_rule_destroy',
+ security_group_rule_destroy)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.sg2['id'])
+ self.controller.delete(req, self.sg2['id'])
+
+ def test_delete_invalid_rule_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules' +
+ '/invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, 'invalid')
+
+ def test_delete_non_existing_rule_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules/%s'
+ % self.invalid_id)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.invalid_id)
+
+ def test_create_rule_quota_limit(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ for num in range(100, 100 + CONF.quota_security_group_rules):
+ rule = {
+ 'ip_protocol': 'tcp', 'from_port': num,
+ 'to_port': num, 'parent_group_id': self.sg2['id'],
+ 'group_id': self.sg1['id']
+ }
+ self.controller.create(req, {'security_group_rule': rule})
+
+ rule = {
+ 'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
+ 'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
+ }
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
+ req, {'security_group_rule': rule})
+
+ def test_create_rule_cidr_allow_all(self):
+ rule = security_group_rule_template(cidr='0.0.0.0/0',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "0.0.0.0/0")
+
+ def test_create_rule_cidr_ipv6_allow_all(self):
+ rule = security_group_rule_template(cidr='::/0',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "::/0")
+
+ def test_create_rule_cidr_allow_some(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/8',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ res_dict = self.controller.create(req, {'security_group_rule': rule})
+
+ security_group_rule = res_dict['security_group_rule']
+ self.assertNotEqual(security_group_rule['id'], 0)
+ self.assertEqual(security_group_rule['parent_group_id'],
+ self.parent_security_group['id'])
+ self.assertEqual(security_group_rule['ip_range']['cidr'],
+ "15.0.0.0/8")
+
+ def test_create_rule_cidr_bad_netmask(self):
+ rule = security_group_rule_template(cidr='15.0.0.0/0')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
+
+class TestSecurityGroupRulesV2(TestSecurityGroupRulesV21):
+ secgrp_ctl_cls = secgroups_v2.SecurityGroupRulesController
+
+
+class TestSecurityGroupRulesXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestSecurityGroupRulesXMLDeserializer, self).setUp()
+ self.deserializer = secgroups_v2.SecurityGroupRulesXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<security_group_rule>
+ <parent_group_id>12</parent_group_id>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <group_id></group_id>
+ <ip_protocol>tcp</ip_protocol>
+ <cidr>10.0.0.0/24</cidr>
+</security_group_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_rule": {
+ "parent_group_id": "12",
+ "from_port": "22",
+ "to_port": "22",
+ "ip_protocol": "tcp",
+ "group_id": "",
+ "cidr": "10.0.0.0/24",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_protocol_request(self):
+ serial_request = """
+<security_group_rule>
+ <parent_group_id>12</parent_group_id>
+ <from_port>22</from_port>
+ <to_port>22</to_port>
+ <group_id></group_id>
+ <cidr>10.0.0.0/24</cidr>
+</security_group_rule>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group_rule": {
+ "parent_group_id": "12",
+ "from_port": "22",
+ "to_port": "22",
+ "group_id": "",
+ "cidr": "10.0.0.0/24",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestSecurityGroupXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestSecurityGroupXMLDeserializer, self).setUp()
+ self.deserializer = secgroups_v2.SecurityGroupXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<security_group name="test">
+ <description>test</description>
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "name": "test",
+ "description": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_description_request(self):
+ serial_request = """
+<security_group name="test">
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "name": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_no_name_request(self):
+ serial_request = """
+<security_group>
+<description>test</description>
+</security_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "security_group": {
+ "description": "test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestSecurityGroupXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestSecurityGroupXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.rule_serializer = secgroups_v2.SecurityGroupRuleTemplate()
+ self.index_serializer = secgroups_v2.SecurityGroupsTemplate()
+ self.default_serializer = secgroups_v2.SecurityGroupTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_security_group_rule(self, raw_rule, tree):
+ self.assertEqual(raw_rule['id'], tree.get('id'))
+ self.assertEqual(raw_rule['parent_group_id'],
+ tree.get('parent_group_id'))
+
+ seen = set()
+ expected = set(['ip_protocol', 'from_port', 'to_port',
+ 'group', 'group/name', 'group/tenant_id',
+ 'ip_range', 'ip_range/cidr'])
+
+ for child in tree:
+ child_tag = self._tag(child)
+ self.assertIn(child_tag, raw_rule)
+ seen.add(child_tag)
+ if child_tag in ('group', 'ip_range'):
+ for gr_child in child:
+ gr_child_tag = self._tag(gr_child)
+ self.assertIn(gr_child_tag, raw_rule[child_tag])
+ seen.add('%s/%s' % (child_tag, gr_child_tag))
+ self.assertEqual(gr_child.text,
+ raw_rule[child_tag][gr_child_tag])
+ else:
+ self.assertEqual(child.text, raw_rule[child_tag])
+ self.assertEqual(seen, expected)
+
+ def _verify_security_group(self, raw_group, tree):
+ rules = raw_group['rules']
+ self.assertEqual('security_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['tenant_id'], tree.get('tenant_id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+ self.assertEqual(2, len(tree))
+ for child in tree:
+ child_tag = self._tag(child)
+ if child_tag == 'rules':
+ self.assertEqual(2, len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'rule')
+ self._verify_security_group_rule(rules[idx], gr_child)
+ else:
+ self.assertEqual('description', child_tag)
+ self.assertEqual(raw_group['description'], child.text)
+
+ def test_rule_serializer(self):
+ raw_rule = dict(
+ id='123',
+ parent_group_id='456',
+ ip_protocol='tcp',
+ from_port='789',
+ to_port='987',
+ group=dict(name='group', tenant_id='tenant'),
+ ip_range=dict(cidr='10.0.0.0/8'))
+ rule = dict(security_group_rule=raw_rule)
+ text = self.rule_serializer.serialize(rule)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_group_rule', self._tag(tree))
+ self._verify_security_group_rule(raw_rule, tree)
+
+ def test_group_serializer(self):
+ rules = [dict(
+ id='123',
+ parent_group_id='456',
+ ip_protocol='tcp',
+ from_port='789',
+ to_port='987',
+ group=dict(name='group1', tenant_id='tenant1'),
+ ip_range=dict(cidr='10.55.44.0/24')),
+ dict(
+ id='654',
+ parent_group_id='321',
+ ip_protocol='udp',
+ from_port='234',
+ to_port='567',
+ group=dict(name='group2', tenant_id='tenant2'),
+ ip_range=dict(cidr='10.44.55.0/24'))]
+ raw_group = dict(
+ id='890',
+ description='description',
+ name='name',
+ tenant_id='tenant',
+ rules=rules)
+ sg_group = dict(security_group=raw_group)
+ text = self.default_serializer.serialize(sg_group)
+
+ tree = etree.fromstring(text)
+
+ self._verify_security_group(raw_group, tree)
+
+ def test_groups_serializer(self):
+ rules = [dict(
+ id='123',
+ parent_group_id='1234',
+ ip_protocol='tcp',
+ from_port='12345',
+ to_port='123456',
+ group=dict(name='group1', tenant_id='tenant1'),
+ ip_range=dict(cidr='10.123.0.0/24')),
+ dict(
+ id='234',
+ parent_group_id='2345',
+ ip_protocol='udp',
+ from_port='23456',
+ to_port='234567',
+ group=dict(name='group2', tenant_id='tenant2'),
+ ip_range=dict(cidr='10.234.0.0/24')),
+ dict(
+ id='345',
+ parent_group_id='3456',
+ ip_protocol='tcp',
+ from_port='34567',
+ to_port='345678',
+ group=dict(name='group3', tenant_id='tenant3'),
+ ip_range=dict(cidr='10.345.0.0/24')),
+ dict(
+ id='456',
+ parent_group_id='4567',
+ ip_protocol='udp',
+ from_port='45678',
+ to_port='456789',
+ group=dict(name='group4', tenant_id='tenant4'),
+ ip_range=dict(cidr='10.456.0.0/24'))]
+ groups = [dict(
+ id='567',
+ description='description1',
+ name='name1',
+ tenant_id='tenant1',
+ rules=rules[0:2]),
+ dict(
+ id='678',
+ description='description2',
+ name='name2',
+ tenant_id='tenant2',
+ rules=rules[2:4])]
+ sg_groups = dict(security_groups=groups)
+ text = self.index_serializer.serialize(sg_groups)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('security_groups', self._tag(tree))
+ self.assertEqual(len(groups), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_security_group(groups[idx], child)
+
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get_all(*args, **kwargs):
+ base = {'id': 1, 'description': 'foo', 'user_id': 'bar',
+ 'project_id': 'baz', 'deleted': False, 'deleted_at': None,
+ 'updated_at': None, 'created_at': None}
+ db_list = [
+ fakes.stub_instance(
+ 1, uuid=UUID1,
+ security_groups=[dict(base, **{'name': 'fake-0-0'}),
+ dict(base, **{'name': 'fake-0-1'})]),
+ fakes.stub_instance(
+ 2, uuid=UUID2,
+ security_groups=[dict(base, **{'name': 'fake-1-0'}),
+ dict(base, **{'name': 'fake-1-1'})])
+ ]
+
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list,
+ ['metadata', 'system_metadata',
+ 'security_groups', 'info_cache'])
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3,
+ security_groups=[{'name': 'fake-2-0'},
+ {'name': 'fake-2-1'}])
+ return fake_instance.fake_instance_obj(args[1],
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
+
+
+def fake_compute_create(*args, **kwargs):
+ return ([fake_compute_get(*args, **kwargs)], '')
+
+
+def fake_get_instances_security_groups_bindings(inst, context, servers):
+ groups = {UUID1: [{'name': 'fake-0-0'}, {'name': 'fake-0-1'}],
+ UUID2: [{'name': 'fake-1-0'}, {'name': 'fake-1-1'}],
+ UUID3: [{'name': 'fake-2-0'}, {'name': 'fake-2-1'}]}
+ result = {}
+ for server in servers:
+ result[server['id']] = groups.get(server['id'])
+ return result
+
+
+class SecurityGroupsOutputTestV21(test.TestCase):
+ base_url = '/v2/fake/servers'
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(SecurityGroupsOutputTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(compute.api.API, 'create', fake_compute_create)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Security_groups'])
+ self.app = self._setup_app()
+
+ def _setup_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-security-groups', 'servers'))
+
+ def _make_request(self, url, body=None):
+ req = webob.Request.blank(url)
+ if body:
+ req.method = 'POST'
+ req.body = self._encode_body(body)
+ req.content_type = self.content_type
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(self.app)
+ return res
+
+ def _encode_body(self, body):
+ return jsonutils.dumps(body)
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def _get_groups(self, server):
+ return server.get('security_groups')
+
+ def test_create(self):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ res = self._make_request(self.base_url, {'server': server})
+ self.assertEqual(res.status_int, 202)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_show(self):
+ url = self.base_url + '/' + UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ for i, group in enumerate(self._get_groups(server)):
+ name = 'fake-2-%s' % i
+ self.assertEqual(group.get('name'), name)
+
+ def test_detail(self):
+ url = self.base_url + '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ for j, group in enumerate(self._get_groups(server)):
+ name = 'fake-%s-%s' % (i, j)
+ self.assertEqual(group.get('name'), name)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self.base_url + '/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class SecurityGroupsOutputTestV2(SecurityGroupsOutputTestV21):
+
+ def _setup_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+
+class SecurityGroupsOutputXmlTest(SecurityGroupsOutputTestV2):
+ content_type = 'application/xml'
+
+ class MinimalCreateServerTemplate(xmlutil.TemplateBuilder):
+ def construct(self):
+ root = xmlutil.TemplateElement('server', selector='server')
+ root.set('name')
+ root.set('id')
+ root.set('imageRef')
+ root.set('flavorRef')
+ return xmlutil.MasterTemplate(root, 1,
+ nsmap={None: xmlutil.XMLNS_V11})
+
+ def _encode_body(self, body):
+ serializer = self.MinimalCreateServerTemplate()
+ return serializer.serialize(body)
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
+
+ def _get_groups(self, server):
+ # NOTE(vish): we are adding security groups without an extension
+ # namespace so we don't break people using the existing
+ # functionality, but that means we need to use find with
+ # the existing server namespace.
+ namespace = server.nsmap[None]
+ return server.find('{%s}security_groups' % namespace).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py
new file mode 100644
index 0000000000..535a1afa15
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_diagnostics.py
@@ -0,0 +1,132 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute.contrib import server_diagnostics
+from nova.api.openstack import wsgi
+from nova.compute import api as compute_api
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+UUID = 'abc'
+
+
+def fake_get_diagnostics(self, _context, instance_uuid):
+ return {'data': 'Some diagnostic info'}
+
+
+def fake_instance_get(self, _context, instance_uuid, want_objects=False,
+ expected_attrs=None):
+ if instance_uuid != UUID:
+ raise Exception("Invalid UUID")
+ return {'uuid': instance_uuid}
+
+
+class ServerDiagnosticsTestV21(test.NoDBTestCase):
+
+ def _setup_router(self):
+ self.router = compute.APIRouterV3(init_only=('servers',
+ 'os-server-diagnostics'))
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank(
+ '/servers/%s/diagnostics' % UUID)
+
+ def setUp(self):
+ super(ServerDiagnosticsTestV21, self).setUp()
+ self._setup_router()
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ fake_get_diagnostics)
+ @mock.patch.object(compute_api.API, 'get',
+ fake_instance_get)
+ def test_get_diagnostics(self):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(output, {'data': 'Some diagnostic info'})
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ fake_get_diagnostics)
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=UUID))
+ def test_get_diagnostics_with_non_existed_instance(self, mock_get):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(res.status_int, 404)
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ side_effect=exception.InstanceInvalidState('fake message'))
+ @mock.patch.object(compute_api.API, 'get', fake_instance_get)
+ def test_get_diagnostics_raise_conflict_on_invalid_state(self,
+ mock_get_diagnostics):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(409, res.status_int)
+
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ side_effect=NotImplementedError)
+ @mock.patch.object(compute_api.API, 'get', fake_instance_get)
+ def test_get_diagnostics_raise_no_notimplementederror(self,
+ mock_get_diagnostics):
+ req = self._get_request()
+ res = req.get_response(self.router)
+ self.assertEqual(501, res.status_int)
+
+
+class ServerDiagnosticsTestV2(ServerDiagnosticsTestV21):
+
+ def _setup_router(self):
+ self.flags(verbose=True,
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_diagnostics'])
+
+ self.router = compute.APIRouter(init_only=('servers', 'diagnostics'))
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank(
+ '/fake/servers/%s/diagnostics' % UUID)
+
+
+class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase):
+ namespace = wsgi.XMLNS_V11
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def test_index_serializer(self):
+ serializer = server_diagnostics.ServerDiagnosticsTemplate()
+ exemplar = dict(diag1='foo', diag2='bar')
+ text = serializer.serialize(exemplar)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('diagnostics', self._tag(tree))
+ self.assertEqual(len(tree), len(exemplar))
+ for child in tree:
+ tag = self._tag(child)
+ self.assertIn(tag, exemplar)
+ self.assertEqual(child.text, exemplar[tag])
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py
index 61801ba648..61801ba648 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_external_events.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
new file mode 100644
index 0000000000..9e756cf157
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
@@ -0,0 +1,188 @@
+# Copyright 2014 Hewlett-Packard Development Company, L.P
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+import webob
+
+from nova.api.openstack.compute.contrib import server_groups
+from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
+from nova.api.openstack import extensions
+from nova import context
+import nova.db
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def server_group_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ return sgroup
+
+
+def server_group_db(sg):
+ attrs = sg.copy()
+ if 'id' in attrs:
+ attrs['uuid'] = attrs.pop('id')
+ if 'policies' in attrs:
+ policies = attrs.pop('policies')
+ attrs['policies'] = policies
+ else:
+ attrs['policies'] = []
+ if 'members' in attrs:
+ members = attrs.pop('members')
+ attrs['members'] = members
+ else:
+ attrs['members'] = []
+ if 'metadata' in attrs:
+ attrs['metadetails'] = attrs.pop('metadata')
+ else:
+ attrs['metadetails'] = {}
+ attrs['deleted'] = 0
+ attrs['deleted_at'] = None
+ attrs['created_at'] = None
+ attrs['updated_at'] = None
+ if 'user_id' not in attrs:
+ attrs['user_id'] = 'user_id'
+ if 'project_id' not in attrs:
+ attrs['project_id'] = 'project_id'
+ attrs['id'] = 7
+
+ return AttrDict(attrs)
+
+
+class ServerGroupQuotasTestV21(test.TestCase):
+
+ def setUp(self):
+ super(ServerGroupQuotasTestV21, self).setUp()
+ self._setup_controller()
+ self.app = self._get_app()
+
+ def _setup_controller(self):
+ self.controller = sg_v3.ServerGroupController()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-server-groups',))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def _setup_quotas(self):
+ pass
+
+ def _assert_server_groups_in_use(self, project_id, user_id, in_use):
+ ctxt = context.get_admin_context()
+ result = quota.QUOTAS.get_user_quotas(ctxt, project_id, user_id)
+ self.assertEqual(result['server_groups']['in_use'], in_use)
+
+ def test_create_server_group_normal(self):
+ self._setup_quotas()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ res_dict = self.controller.create(req, {'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ def test_create_server_group_quota_limit(self):
+ self._setup_quotas()
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ # Start by creating as many server groups as we're allowed to.
+ for i in range(CONF.quota_server_groups):
+ self.controller.create(req, {'server_group': sgroup})
+
+ # Then, creating a server group should fail.
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_delete_server_group_by_admin(self):
+ self._setup_quotas()
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups')
+ res = self.controller.create(req, {'server_group': sgroup})
+ sg_id = res['server_group']['id']
+ context = req.environ['nova.context']
+
+ self._assert_server_groups_in_use(context.project_id,
+ context.user_id, 1)
+
+ # Delete the server group we've just created.
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/%s' % sg_id,
+ use_admin_context=True)
+ self.controller.delete(req, sg_id)
+
+ # Make sure the quota in use has been released.
+ self._assert_server_groups_in_use(context.project_id,
+ context.user_id, 0)
+
+ def test_delete_server_group_by_id(self):
+ self._setup_quotas()
+ sg = server_group_template(id='123')
+ self.called = False
+
+ def server_group_delete(context, id):
+ self.called = True
+
+ def return_server_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return server_group_db(sg)
+
+ self.stubs.Set(nova.db, 'instance_group_delete',
+ server_group_delete)
+ self.stubs.Set(nova.db, 'instance_group_get',
+ return_server_group)
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-server-groups/123')
+ resp = self.controller.delete(req, '123')
+ self.assertTrue(self.called)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, sg_v3.ServerGroupController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = resp.status_int
+ self.assertEqual(204, status_int)
+
+
+class ServerGroupQuotasTestV2(ServerGroupQuotasTestV21):
+
+ def _setup_controller(self):
+ self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
+ self.controller = server_groups.ServerGroupController(self.ext_mgr)
+
+ def _setup_quotas(self):
+ self.ext_mgr.is_loaded('os-server-group-quotas').MultipleTimes()\
+ .AndReturn(True)
+ self.mox.ReplayAll()
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('os-server-groups',))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
new file mode 100644
index 0000000000..7dd2675c9e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
@@ -0,0 +1,521 @@
+# Copyright (c) 2014 Cisco Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import webob
+
+from nova.api.openstack.compute.contrib import server_groups
+from nova.api.openstack.compute.plugins.v3 import server_groups as sg_v3
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import context
+import nova.db
+from nova import exception
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
+FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
+FAKE_UUID3 = 'b8713410-9ba3-e913-901b-13410ca90121'
+
+
+class AttrDict(dict):
+ def __getattr__(self, k):
+ return self[k]
+
+
+def server_group_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ return sgroup
+
+
+def server_group_resp_template(**kwargs):
+ sgroup = kwargs.copy()
+ sgroup.setdefault('name', 'test')
+ sgroup.setdefault('policies', [])
+ sgroup.setdefault('members', [])
+ return sgroup
+
+
+def server_group_db(sg):
+ attrs = sg.copy()
+ if 'id' in attrs:
+ attrs['uuid'] = attrs.pop('id')
+ if 'policies' in attrs:
+ policies = attrs.pop('policies')
+ attrs['policies'] = policies
+ else:
+ attrs['policies'] = []
+ if 'members' in attrs:
+ members = attrs.pop('members')
+ attrs['members'] = members
+ else:
+ attrs['members'] = []
+ attrs['deleted'] = 0
+ attrs['deleted_at'] = None
+ attrs['created_at'] = None
+ attrs['updated_at'] = None
+ if 'user_id' not in attrs:
+ attrs['user_id'] = 'user_id'
+ if 'project_id' not in attrs:
+ attrs['project_id'] = 'project_id'
+ attrs['id'] = 7
+
+ return AttrDict(attrs)
+
+
+class ServerGroupTestV21(test.TestCase):
+
+ def setUp(self):
+ super(ServerGroupTestV21, self).setUp()
+ self._setup_controller()
+ self.app = self._get_app()
+
+ def _setup_controller(self):
+ self.controller = sg_v3.ServerGroupController()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('os-server-groups',))
+
+ def _get_url(self):
+ return '/v2/fake'
+
+ def test_create_server_group_with_no_policies(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ sgroup = server_group_template()
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_normal(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ res_dict = self.controller.create(req, {'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ def _create_instance(self, context):
+ instance = objects.Instance(image_ref=1, node='node1',
+ reservation_id='a', host='host1', project_id='fake',
+ vm_state='fake', system_metadata={'key': 'value'})
+ instance.create(context)
+ return instance
+
+ def _create_instance_group(self, context, members):
+ ig = objects.InstanceGroup(name='fake_name',
+ user_id='fake_user', project_id='fake',
+ members=members)
+ ig.create(context)
+ return ig.uuid
+
+ def _create_groups_and_instances(self, ctx):
+ instances = [self._create_instance(ctx), self._create_instance(ctx)]
+ members = [instance.uuid for instance in instances]
+ ig_uuid = self._create_instance_group(ctx, members)
+ return (ig_uuid, instances, members)
+
+ def test_display_members(self):
+ ctx = context.RequestContext('fake_user', 'fake')
+ (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ res_dict = self.controller.show(req, ig_uuid)
+ result_members = res_dict['server_group']['members']
+ self.assertEqual(2, len(result_members))
+ for member in members:
+ self.assertIn(member, result_members)
+
+ def test_display_active_members_only(self):
+ ctx = context.RequestContext('fake_user', 'fake')
+ (ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+
+ # delete an instance
+ instances[1].destroy(ctx)
+ # check that the instance does not exist
+ self.assertRaises(exception.InstanceNotFound,
+ objects.Instance.get_by_uuid,
+ ctx, instances[1].uuid)
+ res_dict = self.controller.show(req, ig_uuid)
+ result_members = res_dict['server_group']['members']
+ # check that only the active instance is displayed
+ self.assertEqual(1, len(result_members))
+ self.assertIn(instances[0].uuid, result_members)
+
+ def test_create_server_group_with_illegal_name(self):
+ # blank name
+ sgroup = server_group_template(name='', policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with length 256
+ sgroup = server_group_template(name='1234567890' * 26,
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # non-string name
+ sgroup = server_group_template(name=12, policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with leading spaces
+ sgroup = server_group_template(name=' leading spaces',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with trailing spaces
+ sgroup = server_group_template(name='trailing space ',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # name with all spaces
+ sgroup = server_group_template(name=' ',
+ policies=['test_policy'])
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_illegal_policies(self):
+ # blank policy
+ sgroup = server_group_template(name='fake-name', policies='')
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as integer
+ sgroup = server_group_template(name='fake-name', policies=7)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as string
+ sgroup = server_group_template(name='fake-name', policies='invalid')
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ # policy as None
+ sgroup = server_group_template(name='fake-name', policies=None)
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_conflicting_policies(self):
+ sgroup = server_group_template()
+ policies = ['anti-affinity', 'affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_duplicate_policies(self):
+ sgroup = server_group_template()
+ policies = ['affinity', 'affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_not_supported(self):
+ sgroup = server_group_template()
+ policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
+ sgroup['policies'] = policies
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'server_group': sgroup})
+
+ def test_create_server_group_with_no_body(self):
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, None)
+
+ def test_create_server_group_with_no_server_group(self):
+ body = {'no-instanceGroup': None}
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_list_server_group_by_tenant(self):
+ groups = []
+ policies = ['anti-affinity']
+ members = []
+ metadata = {} # always empty
+ names = ['default-x', 'test']
+ sg1 = server_group_resp_template(id=str(1345),
+ name=names[0],
+ policies=policies,
+ members=members,
+ metadata=metadata)
+ sg2 = server_group_resp_template(id=str(891),
+ name=names[1],
+ policies=policies,
+ members=members,
+ metadata=metadata)
+ groups = [sg1, sg2]
+ expected = {'server_groups': groups}
+
+ def return_server_groups(context, project_id):
+ return [server_group_db(sg) for sg in groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
+ return_server_groups)
+
+ req = fakes.HTTPRequest.blank(self._get_url() + '/os-server-groups')
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, expected)
+
+ def test_list_server_group_all(self):
+ all_groups = []
+ tenant_groups = []
+ policies = ['anti-affinity']
+ members = []
+ metadata = {} # always empty
+ names = ['default-x', 'test']
+ sg1 = server_group_resp_template(id=str(1345),
+ name=names[0],
+ policies=[],
+ members=members,
+ metadata=metadata)
+ sg2 = server_group_resp_template(id=str(891),
+ name=names[1],
+ policies=policies,
+ members=members,
+ metadata={})
+ tenant_groups = [sg2]
+ all_groups = [sg1, sg2]
+
+ all = {'server_groups': all_groups}
+ tenant_specific = {'server_groups': tenant_groups}
+
+ def return_all_server_groups(context):
+ return [server_group_db(sg) for sg in all_groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all',
+ return_all_server_groups)
+
+ def return_tenant_server_groups(context, project_id):
+ return [server_group_db(sg) for sg in tenant_groups]
+
+ self.stubs.Set(nova.db, 'instance_group_get_all_by_project_id',
+ return_tenant_server_groups)
+
+ path = self._get_url() + '/os-server-groups?all_projects=True'
+
+ req = fakes.HTTPRequest.blank(path, use_admin_context=True)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, all)
+ req = fakes.HTTPRequest.blank(path)
+ res_dict = self.controller.index(req)
+ self.assertEqual(res_dict, tenant_specific)
+
+ def test_delete_server_group_by_id(self):
+ sg = server_group_template(id='123')
+
+ self.called = False
+
+ def server_group_delete(context, id):
+ self.called = True
+
+ def return_server_group(context, group_id):
+ self.assertEqual(sg['id'], group_id)
+ return server_group_db(sg)
+
+ self.stubs.Set(nova.db, 'instance_group_delete',
+ server_group_delete)
+ self.stubs.Set(nova.db, 'instance_group_get',
+ return_server_group)
+
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ '/os-server-groups/123')
+ resp = self.controller.delete(req, '123')
+ self.assertTrue(self.called)
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, sg_v3.ServerGroupController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = resp.status_int
+ self.assertEqual(204, status_int)
+
+ def test_delete_non_existing_server_group(self):
+ req = fakes.HTTPRequest.blank(self._get_url() +
+ '/os-server-groups/invalid')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, 'invalid')
+
+
+class ServerGroupTestV2(ServerGroupTestV21):
+
+ def _setup_controller(self):
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {}
+ self.controller = server_groups.ServerGroupController(ext_mgr)
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('os-server-groups',))
+
+
+class TestServerGroupXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerGroupXMLDeserializer, self).setUp()
+ self.deserializer = server_groups.ServerGroupXMLDeserializer()
+
+ def test_create_request(self):
+ serial_request = """
+<server_group name="test">
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "name": "test",
+ "policies": []
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_update_request(self):
+ serial_request = """
+<server_group name="test">
+<policies>
+<policy>policy-1</policy>
+<policy>policy-2</policy>
+</policies>
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "name": 'test',
+ "policies": ['policy-1', 'policy-2']
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_request_no_name(self):
+ serial_request = """
+<server_group>
+</server_group>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server_group": {
+ "policies": []
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestServerGroupXMLSerializer(test.TestCase):
+ def setUp(self):
+ super(TestServerGroupXMLSerializer, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.index_serializer = server_groups.ServerGroupsTemplate()
+ self.default_serializer = server_groups.ServerGroupTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def _verify_server_group(self, raw_group, tree):
+ policies = raw_group['policies']
+ members = raw_group['members']
+ self.assertEqual('server_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+ self.assertEqual(3, len(tree))
+ for child in tree:
+ child_tag = self._tag(child)
+ if child_tag == 'policies':
+ self.assertEqual(len(policies), len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'policy')
+ self.assertEqual(policies[idx],
+ gr_child.text)
+ elif child_tag == 'members':
+ self.assertEqual(len(members), len(child))
+ for idx, gr_child in enumerate(child):
+ self.assertEqual(self._tag(gr_child), 'member')
+ self.assertEqual(members[idx],
+ gr_child.text)
+ elif child_tag == 'metadata':
+ self.assertEqual(0, len(child))
+
+ def _verify_server_group_brief(self, raw_group, tree):
+ self.assertEqual('server_group', self._tag(tree))
+ self.assertEqual(raw_group['id'], tree.get('id'))
+ self.assertEqual(raw_group['name'], tree.get('name'))
+
+ def test_group_serializer(self):
+ policies = ["policy-1", "policy-2"]
+ members = ["1", "2"]
+ raw_group = dict(
+ id='890',
+ name='name',
+ policies=policies,
+ members=members)
+ sg_group = dict(server_group=raw_group)
+ text = self.default_serializer.serialize(sg_group)
+
+ tree = etree.fromstring(text)
+
+ self._verify_server_group(raw_group, tree)
+
+ def test_groups_serializer(self):
+ policies = ["policy-1", "policy-2",
+ "policy-3"]
+ members = ["1", "2", "3"]
+ groups = [dict(
+ id='890',
+ name='test',
+ policies=policies[0:2],
+ members=members[0:2]),
+ dict(
+ id='123',
+ name='default',
+ policies=policies[2:],
+ members=members[2:])]
+ sg_groups = dict(server_groups=groups)
+ text = self.index_serializer.serialize(sg_groups)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('server_groups', self._tag(tree))
+ self.assertEqual(len(groups), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_server_group_brief(groups[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py
new file mode 100644
index 0000000000..d29b0480f3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_password.py
@@ -0,0 +1,94 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_ext_list', 'nova.api.openstack.compute.contrib')
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(
+ compute.api.API, 'get',
+ lambda self, ctxt, *a, **kw:
+ fake_instance.fake_instance_obj(
+ ctxt,
+ system_metadata={},
+ expected_attrs=['system_metadata']))
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_convert_password(context, password):
+ self.password = password
+ return {}
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'convert_password', fake_convert_password)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_password'])
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
+
+
+class ServerPasswordXmlTest(ServerPasswordTest):
+ content_type = 'application/xml'
+
+ def _get_pass(self, body):
+ # NOTE(vish): first element is password
+ return etree.XML(body).text or ''
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py
new file mode 100644
index 0000000000..6be2a52b86
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_start_stop.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2012 Midokura Japan K.K.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import webob
+
+from nova.api.openstack.compute.contrib import server_start_stop \
+ as server_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers \
+ as server_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+def fake_instance_get(context, instance_id,
+ columns_to_join=None, use_slave=False):
+ result = fakes.stub_instance(id=1, uuid=instance_id)
+ result['created_at'] = None
+ result['deleted_at'] = None
+ result['updated_at'] = None
+ result['deleted'] = 0
+ result['info_cache'] = {'network_info': '[]',
+ 'instance_uuid': result['uuid']}
+ return result
+
+
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_start_stop_locked_server(self, context, instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+def fake_start_stop_invalid_state(self, context, instance):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+class ServerStartStopTestV21(test.TestCase):
+ start_policy = "compute:v3:servers:start"
+ stop_policy = "compute:v3:servers:stop"
+
+ def setUp(self):
+ super(ServerStartStopTestV21, self).setUp()
+ self._setup_controller()
+
+ def _setup_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = server_v21.ServersController(
+ extension_info=ext_info)
+
+ def test_start(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.mox.StubOutWithMock(compute_api.API, 'start')
+ compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.controller._start_server(req, 'test_inst', body)
+
+ def test_start_policy_failed(self):
+ rules = {
+ self.start_policy:
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._start_server,
+ req, 'test_inst', body)
+ self.assertIn(self.start_policy, exc.format_message())
+
+ def test_start_not_ready(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_start_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_start_invalid_state(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.mox.StubOutWithMock(compute_api.API, 'stop')
+ compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.controller._stop_server(req, 'test_inst', body)
+
+ def test_stop_policy_failed(self):
+ rules = {
+ self.stop_policy:
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._stop_server,
+ req, 'test_inst', body)
+ self.assertIn(self.stop_policy, exc.format_message())
+
+ def test_stop_not_ready(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_stop_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_stop_invalid_state(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, 'test_inst', body)
+
+ def test_start_with_bogus_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop_with_bogus_id(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._stop_server, req, 'test_inst', body)
+
+
+class ServerStartStopTestV2(ServerStartStopTestV21):
+ start_policy = "compute:start"
+ stop_policy = "compute:stop"
+
+ def _setup_controller(self):
+ self.controller = server_v2.ServerStartStopActionController()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py
new file mode 100644
index 0000000000..ee0d9a0ef4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_usage.py
@@ -0,0 +1,159 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.api.openstack.compute.contrib import server_usage
+from nova import compute
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+DATE1 = datetime.datetime(year=2013, month=4, day=5, hour=12)
+DATE2 = datetime.datetime(year=2013, month=4, day=5, hour=13)
+DATE3 = datetime.datetime(year=2013, month=4, day=5, hour=14)
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID3, launched_at=DATE1,
+ terminated_at=DATE2)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [
+ fakes.stub_instance(2, uuid=UUID1, launched_at=DATE2,
+ terminated_at=DATE3),
+ fakes.stub_instance(3, uuid=UUID2, launched_at=DATE1,
+ terminated_at=DATE3),
+ ]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+class ServerUsageTestV21(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'OS-SRV-USG:'
+ _prefix = "/v2/fake"
+
+ def setUp(self):
+ super(ServerUsageTestV21, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_usage'])
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url):
+ req = fakes.HTTPRequest.blank(url)
+ req.accept = self.content_type
+ res = req.get_response(self._get_app())
+ return res
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21(init_only=('servers', 'os-server-usage'))
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def assertServerUsage(self, server, launched_at, terminated_at):
+ resp_launched_at = timeutils.parse_isotime(
+ server.get('%slaunched_at' % self.prefix))
+ self.assertEqual(timeutils.normalize_time(resp_launched_at),
+ launched_at)
+ resp_terminated_at = timeutils.parse_isotime(
+ server.get('%sterminated_at' % self.prefix))
+ self.assertEqual(timeutils.normalize_time(resp_terminated_at),
+ terminated_at)
+
+ def test_show(self):
+ url = self._prefix + ('/servers/%s' % UUID3)
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ self.assertServerUsage(self._get_server(res.body),
+ launched_at=DATE1,
+ terminated_at=DATE2)
+
+ def test_detail(self):
+ url = self._prefix + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ servers = self._get_servers(res.body)
+ self.assertServerUsage(servers[0],
+ launched_at=DATE2,
+ terminated_at=DATE3)
+ self.assertServerUsage(servers[1],
+ launched_at=DATE1,
+ terminated_at=DATE3)
+
+ def test_no_instance_passthrough_404(self):
+
+ def fake_compute_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ url = self._prefix + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 404)
+
+
+class ServerUsageTestV20(ServerUsageTestV21):
+
+ def setUp(self):
+ super(ServerUsageTestV20, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Server_usage'])
+
+ def _get_app(self):
+ return fakes.wsgi_app(init_only=('servers',))
+
+
+class ServerUsageXmlTest(ServerUsageTestV20):
+ content_type = 'application/xml'
+ prefix = '{%s}' % server_usage.Server_usage.namespace
+
+ def _get_server(self, body):
+ return etree.XML(body)
+
+ def _get_servers(self, body):
+ return etree.XML(body).getchildren()
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_services.py b/nova/tests/unit/api/openstack/compute/contrib/test_services.py
new file mode 100644
index 0000000000..87297c567b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_services.py
@@ -0,0 +1,576 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import calendar
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+import webob.exc
+
+from nova.api.openstack.compute.contrib import services
+from nova.api.openstack import extensions
+from nova import availability_zones
+from nova.compute import cells_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.servicegroup.drivers import db as db_driver
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+
+
+fake_services_list = [
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host1',
+ id=1,
+ disabled=True,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test1'),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host1',
+ id=2,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test2'),
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host2',
+ id=3,
+ disabled=False,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason=None),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host2',
+ id=4,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason='test4'),
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"binary": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "binary": "nova-compute"}
+
+
+def fake_service_get_all(services):
+ def service_get_all(context, filters=None, set_zones=False):
+ if set_zones or 'availability_zone' in filters:
+ return availability_zones.set_availability_zones(context,
+ services)
+ return services
+ return service_get_all
+
+
+def fake_db_api_service_get_all(context, disabled=None):
+ return fake_services_list
+
+
+def fake_db_service_get_by_host_binary(services):
+ def service_get_by_host_binary(context, host, binary):
+ for service in services:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
+ return service_get_by_host_binary
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ fake = fake_db_service_get_by_host_binary(fake_services_list)
+ return fake(context, host, binary)
+
+
+def _service_get_by_id(services, value):
+ for service in services:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_db_service_update(services):
+ def service_update(context, service_id, values):
+ service = _service_get_by_id(services, service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ return service
+ return service_update
+
+
+def fake_service_update(context, service_id, values):
+ fake = fake_db_service_update(fake_services_list)
+ return fake(context, service_id, values)
+
+
+def fake_utcnow():
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
+
+
+fake_utcnow.override_time = None
+
+
+def fake_utcnow_ts():
+ d = fake_utcnow()
+ return calendar.timegm(d.utctimetuple())
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = services.ServiceController(self.ext_mgr)
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ self.stubs.Set(self.controller.host_api, "service_get_all",
+ fake_service_get_all(fake_services_list))
+
+ self.stubs.Set(db, "service_get_by_args",
+ fake_db_service_get_by_host_binary(fake_services_list))
+ self.stubs.Set(db, "service_update",
+ fake_db_service_update(fake_services_list))
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_service(self):
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_detail(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'disabled_reason': None},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_host(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_service(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_detail_with_host_service(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_detail_with_delete_extension(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2)},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34)},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38)}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ def _service_update(context, service_id, values):
+ self.assertIsNone(values['disabled_reason'])
+ return dict(test_service.fake_service, id=service_id, **values)
+
+ self.stubs.Set(db, "service_update", _service_update)
+
+ body = {'host': 'host1', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+
+ res_dict = self.controller.update(req, "enable", body)
+ self.assertEqual(res_dict['service']['status'], 'enabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_enable_with_invalid_host(self):
+ body = {'host': 'invalid', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ def test_services_enable_with_invalid_binary(self):
+ body = {'host': 'host1', 'binary': 'invalid'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ # This test is just to verify that the servicegroup API gets used when
+ # calling this API.
+ def test_services_with_exception(self):
+ def dummy_is_up(self, dummy):
+ raise KeyError()
+
+ self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
+ req = FakeRequestWithHostService()
+ self.assertRaises(KeyError, self.controller.index, req)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ body = {'host': 'host1', 'binary': 'nova-compute'}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_disable_with_invalid_host(self):
+ body = {'host': 'invalid', 'binary': 'nova-compute'}
+ req = fakes.HTTPRequest.blank('/v2/fake/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_with_invalid_binary(self):
+ body = {'host': 'host1', 'binary': 'invalid'}
+ req = fakes.HTTPRequestV3.blank('/v2/fake/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_log_reason(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = \
+ fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test-reason',
+ }
+ res_dict = self.controller.update(req, "disable-log-reason", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
+
+ def test_mandatory_reason_field(self):
+ self.ext_mgr.extensions['os-extended-services'] = True
+ req = \
+ fakes.HTTPRequest.blank('v2/fakes/os-services/disable-log-reason')
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ }
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, "disable-log-reason", body)
+
+ def test_invalid_reason_field(self):
+ reason = ' '
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'a' * 256
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'it\'s a valid reason.'
+ self.assertTrue(self.controller._is_valid_as_reason(reason))
+
+ def test_services_delete(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/1',
+ use_admin_context=True)
+ request.method = 'DELETE'
+
+ with mock.patch.object(self.controller.host_api,
+ 'service_delete') as service_delete:
+ self.controller.delete(request, '1')
+ service_delete.assert_called_once_with(
+ request.environ['nova.context'], '1')
+ self.assertEqual(self.controller.delete.wsgi_code, 204)
+
+ def test_services_delete_not_found(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/abc',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, 'abc')
+
+ def test_services_delete_not_enabled(self):
+ request = fakes.HTTPRequest.blank('/v2/fakes/os-services/300',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPMethodNotAllowed,
+ self.controller.delete, request, '300')
+
+
+class ServicesCellsTest(test.TestCase):
+ def setUp(self):
+ super(ServicesCellsTest, self).setUp()
+
+ host_api = cells_api.HostAPI()
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = services.ServiceController(self.ext_mgr)
+ self.controller.host_api = host_api
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ services_list = []
+ for service in fake_services_list:
+ service = service.copy()
+ service['id'] = 'cell1@%d' % service['id']
+ services_list.append(service)
+
+ self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
+ fake_service_get_all(services_list))
+
+ def test_services_detail(self):
+ self.ext_mgr.extensions['os-extended-services-delete'] = True
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ utc = iso8601.iso8601.Utc()
+ response = {'services': [
+ {'id': 'cell1@1',
+ 'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
+ tzinfo=utc)},
+ {'id': 'cell1@2',
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
+ tzinfo=utc)},
+ {'id': 'cell1@3',
+ 'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
+ tzinfo=utc)},
+ {'id': 'cell1@4',
+ 'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
+ tzinfo=utc)}]}
+ self.assertEqual(res_dict, response)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py b/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py
new file mode 100644
index 0000000000..df1c6fc449
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_shelve.py
@@ -0,0 +1,148 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import webob
+
+from nova.api.openstack.compute.contrib import shelve as shelve_v2
+from nova.api.openstack.compute.plugins.v3 import shelve as shelve_v21
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+def fake_instance_get_by_uuid(context, instance_id,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
+
+
+def fake_auth_context(context):
+ return True
+
+
+class ShelvePolicyTestV21(test.NoDBTestCase):
+ plugin = shelve_v21
+ prefix = 'v3:os-shelve:'
+ offload = 'shelve_offload'
+
+ def setUp(self):
+ super(ShelvePolicyTestV21, self).setUp()
+ self.controller = self.plugin.ShelveController()
+
+ def _fake_request(self):
+ return fakes.HTTPRequestV3.blank('/servers/12/os-shelve')
+
+ def test_shelve_restricted_by_role(self):
+ rules = {'compute_extension:%sshelve' % self.prefix:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%sshelve' % self.prefix:
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_shelve', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'shelve',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_restricted_by_role(self):
+ rules = {'compute_extension:%sunshelve' % self.prefix:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%sunshelve' % self.prefix:
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_unshelve_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_unshelve', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'unshelve',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
+ req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_restricted_by_role(self):
+ rules = {'compute_extension:%s%s' % (self.prefix, self.offload):
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rules)
+
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden,
+ self.controller._shelve_offload, req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_allowed(self):
+ rules = {'compute:get': common_policy.parse_rule(''),
+ 'compute_extension:%s%s' % (self.prefix, self.offload):
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ req = self._fake_request()
+ self.assertRaises(exception.Forbidden,
+ self.controller._shelve_offload, req, str(uuid.uuid4()), {})
+
+ def test_shelve_offload_locked_server(self):
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(self.plugin, 'auth_shelve_offload', fake_auth_context)
+ self.stubs.Set(compute_api.API, 'shelve_offload',
+ fakes.fake_actions_to_locked_server)
+ req = self._fake_request()
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._shelve_offload,
+ req, str(uuid.uuid4()), {})
+
+
+class ShelvePolicyTestV2(ShelvePolicyTestV21):
+ plugin = shelve_v2
+ prefix = ''
+ offload = 'shelveOffload'
+
+ def _fake_request(self):
+ return fakes.HTTPRequest.blank('/v2/123/servers/12/os-shelve')
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py
new file mode 100644
index 0000000000..9639b886ae
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -0,0 +1,539 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.contrib import simple_tenant_usage as \
+ simple_tenant_usage_v2
+from nova.api.openstack.compute.plugins.v3 import simple_tenant_usage as \
+ simple_tenant_usage_v21
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova import utils
+
+SERVERS = 5
+TENANTS = 2
+HOURS = 24
+ROOT_GB = 10
+EPHEMERAL_GB = 20
+MEMORY_MB = 1024
+VCPUS = 2
+NOW = timeutils.utcnow()
+START = NOW - datetime.timedelta(hours=HOURS)
+STOP = NOW
+
+
+FAKE_INST_TYPE = {'id': 1,
+ 'vcpus': VCPUS,
+ 'root_gb': ROOT_GB,
+ 'ephemeral_gb': EPHEMERAL_GB,
+ 'memory_mb': MEMORY_MB,
+ 'name': 'fakeflavor',
+ 'flavorid': 'foo',
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'swap': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'disabled': False,
+ 'is_public': True,
+ 'extra_specs': {'foo': 'bar'}}
+
+
+def get_fake_db_instance(start, end, instance_id, tenant_id,
+ vm_state=vm_states.ACTIVE):
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, FAKE_INST_TYPE))
+ # NOTE(mriedem): We use fakes.stub_instance since it sets the fields
+ # needed on the db instance for converting it to an object, but we still
+ # need to override system_metadata to use our fake flavor.
+ inst = fakes.stub_instance(
+ id=instance_id,
+ uuid='00000000-0000-0000-0000-00000000000000%02d' % instance_id,
+ image_ref='1',
+ project_id=tenant_id,
+ user_id='fakeuser',
+ display_name='name',
+ flavor_id=FAKE_INST_TYPE['id'],
+ launched_at=start,
+ terminated_at=end,
+ vm_state=vm_state,
+ memory_mb=MEMORY_MB,
+ vcpus=VCPUS,
+ root_gb=ROOT_GB,
+ ephemeral_gb=EPHEMERAL_GB,)
+ inst['system_metadata'] = sys_meta
+ return inst
+
+
+def fake_instance_get_active_by_window_joined(context, begin, end,
+ project_id, host):
+ return [get_fake_db_instance(START,
+ STOP,
+ x,
+ "faketenant_%s" % (x / SERVERS))
+ for x in xrange(TENANTS * SERVERS)]
+
+
+@mock.patch.object(db, 'instance_get_active_by_window_joined',
+ fake_instance_get_active_by_window_joined)
+class SimpleTenantUsageTestV21(test.TestCase):
+ url = '/v2/faketenant_0/os-simple-tenant-usage'
+ alt_url = '/v2/faketenant_1/os-simple-tenant-usage'
+ policy_rule_prefix = "compute_extension:v3:os-simple-tenant-usage"
+
+ def setUp(self):
+ super(SimpleTenantUsageTestV21, self).setUp()
+ self.admin_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=True)
+ self.user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_0',
+ is_admin=False)
+ self.alt_user_context = context.RequestContext('fakeadmin_0',
+ 'faketenant_1',
+ is_admin=False)
+
+ def _get_wsgi_app(self, context):
+ return fakes.wsgi_app_v21(fake_auth_context=context,
+ init_only=('servers',
+ 'os-simple-tenant-usage'))
+
+ def _test_verify_index(self, start, stop):
+ req = webob.Request.blank(
+ self.url + '?start=%s&end=%s' %
+ (start.isoformat(), stop.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.admin_context))
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ usages = res_dict['tenant_usages']
+ for i in xrange(TENANTS):
+ self.assertEqual(int(usages[i]['total_hours']),
+ SERVERS * HOURS)
+ self.assertEqual(int(usages[i]['total_local_gb_usage']),
+ SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS)
+ self.assertEqual(int(usages[i]['total_memory_mb_usage']),
+ SERVERS * MEMORY_MB * HOURS)
+ self.assertEqual(int(usages[i]['total_vcpus_usage']),
+ SERVERS * VCPUS * HOURS)
+ self.assertFalse(usages[i].get('server_usages'))
+
+ def test_verify_index(self):
+ self._test_verify_index(START, STOP)
+
+ def test_verify_index_future_end_time(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ self._test_verify_index(START, future)
+
+ def test_verify_show(self):
+ self._test_verify_show(START, STOP)
+
+ def test_verify_show_future_end_time(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ self._test_verify_show(START, future)
+
+ def _get_tenant_usages(self, detailed=''):
+ req = webob.Request.blank(
+ self.url + '?detailed=%s&start=%s&end=%s' %
+ (detailed, START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.admin_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ return res_dict['tenant_usages']
+
+ def test_verify_detailed_index(self):
+ usages = self._get_tenant_usages('1')
+ for i in xrange(TENANTS):
+ servers = usages[i]['server_usages']
+ for j in xrange(SERVERS):
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+
+ def test_verify_simple_index(self):
+ usages = self._get_tenant_usages(detailed='0')
+ for i in xrange(TENANTS):
+ self.assertIsNone(usages[i].get('server_usages'))
+
+ def test_verify_simple_index_empty_param(self):
+ # NOTE(lzyeval): 'detailed=&start=..&end=..'
+ usages = self._get_tenant_usages()
+ for i in xrange(TENANTS):
+ self.assertIsNone(usages[i].get('server_usages'))
+
+ def _test_verify_show(self, start, stop):
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/faketenant_%s?start=%s&end=%s' %
+ (tenant_id, start.isoformat(), stop.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+
+ usage = res_dict['tenant_usage']
+ servers = usage['server_usages']
+ self.assertEqual(len(usage['server_usages']), SERVERS)
+ uuids = ['00000000-0000-0000-0000-00000000000000%02d' %
+ (x + (tenant_id * SERVERS)) for x in xrange(SERVERS)]
+ for j in xrange(SERVERS):
+ delta = STOP - START
+ uptime = delta.days * 24 * 3600 + delta.seconds
+ self.assertEqual(int(servers[j]['uptime']), uptime)
+ self.assertEqual(int(servers[j]['hours']), HOURS)
+ self.assertIn(servers[j]['instance_id'], uuids)
+
+ def test_verify_show_cannot_view_other_tenant(self):
+ req = webob.Request.blank(
+ self.alt_url + '/faketenant_0?start=%s&end=%s' %
+ (START.isoformat(), STOP.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ rules = {
+ self.policy_rule_prefix + ":show":
+ common_policy.parse_rule([
+ ["role:admin"], ["project_id:%(project_id)s"]
+ ])
+ }
+ policy.set_rules(rules)
+
+ try:
+ res = req.get_response(self._get_wsgi_app(self.alt_user_context))
+ self.assertEqual(res.status_int, 403)
+ finally:
+ policy.reset()
+
+ def test_get_tenants_usage_with_bad_start_date(self):
+ future = NOW + datetime.timedelta(hours=HOURS)
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_%s?start=%s&end=%s' %
+ (tenant_id, future.isoformat(), NOW.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 400)
+
+ def test_get_tenants_usage_with_invalid_start_date(self):
+ tenant_id = 0
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_%s?start=%s&end=%s' %
+ (tenant_id, "xxxx", NOW.isoformat()))
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(res.status_int, 400)
+
+ def _test_get_tenants_usage_with_one_date(self, date_url_param):
+ req = webob.Request.blank(
+ self.url + '/'
+ 'faketenant_0?%s' % date_url_param)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self._get_wsgi_app(self.user_context))
+ self.assertEqual(200, res.status_int)
+
+ def test_get_tenants_usage_with_no_start_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
+
+ def test_get_tenants_usage_with_no_end_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
+
+
+class SimpleTenantUsageTestV2(SimpleTenantUsageTestV21):
+ policy_rule_prefix = "compute_extension:simple_tenant_usage"
+
+ def _get_wsgi_app(self, context):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Simple_tenant_usage'])
+ return fakes.wsgi_app(fake_auth_context=context,
+ init_only=('os-simple-tenant-usage', ))
+
+
+class SimpleTenantUsageSerializerTest(test.TestCase):
+ def _verify_server_usage(self, raw_usage, tree):
+ self.assertEqual('server_usage', tree.tag)
+
+ # Figure out what fields we expect
+ not_seen = set(raw_usage.keys())
+
+ for child in tree:
+ self.assertIn(child.tag, not_seen)
+ not_seen.remove(child.tag)
+ self.assertEqual(str(raw_usage[child.tag]), child.text)
+
+ self.assertEqual(len(not_seen), 0)
+
+ def _verify_tenant_usage(self, raw_usage, tree):
+ self.assertEqual('tenant_usage', tree.tag)
+
+ # Figure out what fields we expect
+ not_seen = set(raw_usage.keys())
+
+ for child in tree:
+ self.assertIn(child.tag, not_seen)
+ not_seen.remove(child.tag)
+ if child.tag == 'server_usages':
+ for idx, gr_child in enumerate(child):
+ self._verify_server_usage(raw_usage['server_usages'][idx],
+ gr_child)
+ else:
+ self.assertEqual(str(raw_usage[child.tag]), child.text)
+
+ self.assertEqual(len(not_seen), 0)
+
+ def test_serializer_show(self):
+ serializer = simple_tenant_usage_v2.SimpleTenantUsageTemplate()
+ today = timeutils.utcnow()
+ yesterday = today - datetime.timedelta(days=1)
+ raw_usage = dict(
+ tenant_id='tenant',
+ total_local_gb_usage=789,
+ total_vcpus_usage=456,
+ total_memory_mb_usage=123,
+ total_hours=24,
+ start=yesterday,
+ stop=today,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000000',
+ name='test',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=1,
+ tenant_id='tenant',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=12,
+ memory_mb=512,
+ local_gb=25,
+ vcpus=2,
+ tenant_id='tenant',
+ flavor='m1.tiny',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=43200),
+ ],
+ )
+ tenant_usage = dict(tenant_usage=raw_usage)
+ text = serializer.serialize(tenant_usage)
+
+ tree = etree.fromstring(text)
+
+ self._verify_tenant_usage(raw_usage, tree)
+
+ def test_serializer_index(self):
+ serializer = simple_tenant_usage_v2.SimpleTenantUsagesTemplate()
+ today = timeutils.utcnow()
+ yesterday = today - datetime.timedelta(days=1)
+ raw_usages = [dict(
+ tenant_id='tenant1',
+ total_local_gb_usage=1024,
+ total_vcpus_usage=23,
+ total_memory_mb_usage=512,
+ total_hours=24,
+ start=yesterday,
+ stop=today,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000001',
+ name='test1',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=2,
+ tenant_id='tenant1',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=42,
+ memory_mb=4201,
+ local_gb=25,
+ vcpus=1,
+ tenant_id='tenant1',
+ flavor='m1.tiny',
+ started_at=today,
+ ended_at=yesterday,
+ state='terminated',
+ uptime=43200),
+ ],
+ ),
+ dict(
+ tenant_id='tenant2',
+ total_local_gb_usage=512,
+ total_vcpus_usage=32,
+ total_memory_mb_usage=1024,
+ total_hours=42,
+ start=today,
+ stop=yesterday,
+ server_usages=[dict(
+ instance_id='00000000-0000-0000-0000-0000000000000003',
+ name='test3',
+ hours=24,
+ memory_mb=1024,
+ local_gb=50,
+ vcpus=2,
+ tenant_id='tenant2',
+ flavor='m1.small',
+ started_at=yesterday,
+ ended_at=today,
+ state='terminated',
+ uptime=86400),
+ dict(
+ instance_id='00000000-0000-0000-0000-0000000000000002',
+ name='test2',
+ hours=42,
+ memory_mb=4201,
+ local_gb=25,
+ vcpus=1,
+ tenant_id='tenant4',
+ flavor='m1.tiny',
+ started_at=today,
+ ended_at=yesterday,
+ state='terminated',
+ uptime=43200),
+ ],
+ ),
+ ]
+ tenant_usages = dict(tenant_usages=raw_usages)
+ text = serializer.serialize(tenant_usages)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('tenant_usages', tree.tag)
+ self.assertEqual(len(raw_usages), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_tenant_usage(raw_usages[idx], child)
+
+
+class SimpleTenantUsageControllerTestV21(test.TestCase):
+ controller = simple_tenant_usage_v21.SimpleTenantUsageController()
+
+ def setUp(self):
+ super(SimpleTenantUsageControllerTestV21, self).setUp()
+
+ self.context = context.RequestContext('fakeuser', 'fake-project')
+
+ self.baseinst = get_fake_db_instance(START, STOP, instance_id=1,
+ tenant_id=self.context.project_id,
+ vm_state=vm_states.DELETED)
+ # convert the fake instance dict to an object
+ self.inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), self.baseinst)
+
+ def test_get_flavor_from_sys_meta(self):
+ # Non-deleted instances get their type information from their
+ # system_metadata
+ with mock.patch.object(db, 'instance_get_by_uuid',
+ return_value=self.baseinst):
+ flavor = self.controller._get_flavor(self.context,
+ self.inst_obj, {})
+ self.assertEqual(objects.Flavor, type(flavor))
+ self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
+
+ def test_get_flavor_from_non_deleted_with_id_fails(self):
+ # If an instance is not deleted and missing type information from
+ # system_metadata, then that's a bug
+ self.inst_obj.system_metadata = {}
+ self.assertRaises(KeyError,
+ self.controller._get_flavor, self.context,
+ self.inst_obj, {})
+
+ def test_get_flavor_from_deleted_with_id(self):
+ # Deleted instances may not have type info in system_metadata,
+ # so verify that they get their type from a lookup of their
+ # instance_type_id
+ self.inst_obj.system_metadata = {}
+ self.inst_obj.deleted = 1
+ flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
+ self.assertEqual(objects.Flavor, type(flavor))
+ self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
+
+ def test_get_flavor_from_deleted_with_id_of_deleted(self):
+ # Verify the legacy behavior of instance_type_id pointing to a
+ # missing type being non-fatal
+ self.inst_obj.system_metadata = {}
+ self.inst_obj.deleted = 1
+ self.inst_obj.instance_type_id = 99
+ flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
+ self.assertIsNone(flavor)
+
+
+class SimpleTenantUsageControllerTestV2(SimpleTenantUsageControllerTestV21):
+ controller = simple_tenant_usage_v2.SimpleTenantUsageController()
+
+
+class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
+ simple_tenant_usage = simple_tenant_usage_v21
+
+ def test_valid_string(self):
+ dt = self.simple_tenant_usage.parse_strtime(
+ "2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
+ self.assertEqual(datetime.datetime(
+ microsecond=824060, second=20, minute=47, hour=13,
+ day=21, month=2, year=2014), dt)
+
+ def test_invalid_string(self):
+ self.assertRaises(exception.InvalidStrTime,
+ self.simple_tenant_usage.parse_strtime,
+ "2014-02-21 13:47:20.824060",
+ "%Y-%m-%dT%H:%M:%S.%f")
+
+
+class SimpleTenantUsageUtilsV2(SimpleTenantUsageUtilsV21):
+ simple_tenant_usage = simple_tenant_usage_v2
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py b/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py
new file mode 100644
index 0000000000..74bb1948e6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_snapshots.py
@@ -0,0 +1,209 @@
+# Copyright 2011 Denali Systems, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.contrib import volumes
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.volume import cinder
+
+
+class SnapshotApiTest(test.NoDBTestCase):
+ def setUp(self):
+ super(SnapshotApiTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+ self.stubs.Set(cinder.API, "create_snapshot_force",
+ fakes.stub_snapshot_create)
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = fakes.wsgi_app(init_only=('os-snapshots',))
+
+ def test_snapshot_create(self):
+ snapshot = {"volume_id": 12,
+ "force": False,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['displayName'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['displayDescription'],
+ snapshot['display_description'])
+ self.assertEqual(resp_dict['snapshot']['volumeId'],
+ snapshot['volume_id'])
+
+ def test_snapshot_create_force(self):
+ snapshot = {"volume_id": 12,
+ "force": True,
+ "display_name": "Snapshot Test Name",
+ "display_description": "Snapshot Test Desc"}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['displayName'],
+ snapshot['display_name'])
+ self.assertEqual(resp_dict['snapshot']['displayDescription'],
+ snapshot['display_description'])
+ self.assertEqual(resp_dict['snapshot']['volumeId'],
+ snapshot['volume_id'])
+
+ # Test invalid force paramter
+ snapshot = {"volume_id": 12,
+ "force": '**&&^^%%$$##@@'}
+ body = dict(snapshot=snapshot)
+ req = webob.Request.blank('/v2/fake/os-snapshots')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
+ def test_snapshot_delete(self):
+ snapshot_id = 123
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_snapshot_delete_invalid_id(self):
+ snapshot_id = -1
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'DELETE'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+
+ def test_snapshot_show(self):
+ snapshot_id = 123
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+
+ self.assertEqual(resp.status_int, 200)
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshot', resp_dict)
+ self.assertEqual(resp_dict['snapshot']['id'], str(snapshot_id))
+
+ def test_snapshot_show_invalid_id(self):
+ snapshot_id = -1
+ req = webob.Request.blank('/v2/fake/os-snapshots/%d' % snapshot_id)
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+
+ def test_snapshot_detail(self):
+ req = webob.Request.blank('/v2/fake/os-snapshots/detail')
+ req.method = 'GET'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('snapshots', resp_dict)
+ resp_snapshots = resp_dict['snapshots']
+ self.assertEqual(len(resp_snapshots), 3)
+
+ resp_snapshot = resp_snapshots.pop()
+ self.assertEqual(resp_snapshot['id'], 102)
+
+
+class SnapshotSerializerTest(test.NoDBTestCase):
+ def _verify_snapshot(self, snap, tree):
+ self.assertEqual(tree.tag, 'snapshot')
+
+ for attr in ('id', 'status', 'size', 'createdAt',
+ 'displayName', 'displayDescription', 'volumeId'):
+ self.assertEqual(str(snap[attr]), tree.get(attr))
+
+ def test_snapshot_show_create_serializer(self):
+ serializer = volumes.SnapshotTemplate()
+ raw_snapshot = dict(
+ id='snap_id',
+ status='snap_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap_name',
+ displayDescription='snap_desc',
+ volumeId='vol_id',
+ )
+ text = serializer.serialize(dict(snapshot=raw_snapshot))
+
+ tree = etree.fromstring(text)
+
+ self._verify_snapshot(raw_snapshot, tree)
+
+ def test_snapshot_index_detail_serializer(self):
+ serializer = volumes.SnapshotsTemplate()
+ raw_snapshots = [dict(
+ id='snap1_id',
+ status='snap1_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap1_name',
+ displayDescription='snap1_desc',
+ volumeId='vol1_id',
+ ),
+ dict(
+ id='snap2_id',
+ status='snap2_status',
+ size=1024,
+ createdAt=timeutils.utcnow(),
+ displayName='snap2_name',
+ displayDescription='snap2_desc',
+ volumeId='vol2_id',
+ )]
+ text = serializer.serialize(dict(snapshots=raw_snapshots))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('snapshots', tree.tag)
+ self.assertEqual(len(raw_snapshots), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_snapshot(raw_snapshots[idx], child)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py b/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py
new file mode 100644
index 0000000000..30d4da6ba1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_tenant_networks.py
@@ -0,0 +1,76 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute.contrib import os_tenant_networks as networks
+from nova.api.openstack.compute.plugins.v3 import tenant_networks \
+ as networks_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TenantNetworksTestV21(test.NoDBTestCase):
+ ctrlr = networks_v21.TenantNetworkController
+
+ def setUp(self):
+ super(TenantNetworksTestV21, self).setUp()
+ self.controller = self.ctrlr()
+ self.flags(enable_network_quota=True)
+
+ @mock.patch('nova.network.api.API.delete',
+ side_effect=exception.NetworkInUse(network_id=1))
+ def test_network_delete_in_use(self, mock_delete):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks/1')
+
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.delete, req, 1)
+
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.rollback')
+ @mock.patch('nova.network.api.API.delete')
+ def _test_network_delete_exception(self, ex, expex, delete_mock,
+ rollback_mock, reserve_mock):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
+ ctxt = req.environ['nova.context']
+
+ reserve_mock.return_value = 'rv'
+ delete_mock.side_effect = ex
+
+ self.assertRaises(expex, self.controller.delete, req, 1)
+
+ delete_mock.assert_called_once_with(ctxt, 1)
+ rollback_mock.assert_called_once_with(ctxt, 'rv')
+ reserve_mock.assert_called_once_with(ctxt, networks=-1)
+
+ def test_network_delete_exception_network_not_found(self):
+ ex = exception.NetworkNotFound(network_id=1)
+ expex = webob.exc.HTTPNotFound
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_policy_failed(self):
+ ex = exception.PolicyNotAuthorized(action='dummy')
+ expex = webob.exc.HTTPForbidden
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_network_in_use(self):
+ ex = exception.NetworkInUse(network_id=1)
+ expex = webob.exc.HTTPConflict
+ self._test_network_delete_exception(ex, expex)
+
+
+class TenantNetworksTestV2(TenantNetworksTestV21):
+ ctrlr = networks.NetworkController
diff --git a/nova/tests/api/openstack/compute/contrib/test_used_limits.py b/nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py
index ee2b0d703b..ee2b0d703b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_used_limits.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_used_limits.py
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py
new file mode 100644
index 0000000000..e8484d61b9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_virtual_interfaces.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2011 Midokura KK
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.contrib import virtual_interfaces
+from nova.api.openstack import wsgi
+from nova import compute
+from nova.compute import api as compute_api
+from nova import context
+from nova import exception
+from nova import network
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def compute_api_get(self, context, instance_id, expected_attrs=None,
+ want_objects=False):
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
+
+
+def get_vifs_by_instance(self, context, instance_id):
+ return [{'uuid': '00000000-0000-0000-0000-00000000000000000',
+ 'address': '00-00-00-00-00-00'},
+ {'uuid': '11111111-1111-1111-1111-11111111111111111',
+ 'address': '11-11-11-11-11-11'}]
+
+
+class FakeRequest(object):
+ def __init__(self, context):
+ self.environ = {'nova.context': context}
+
+
+class ServerVirtualInterfaceTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ServerVirtualInterfaceTest, self).setUp()
+ self.stubs.Set(compute.api.API, "get",
+ compute_api_get)
+ self.stubs.Set(network.api.API, "get_vifs_by_instance",
+ get_vifs_by_instance)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Virtual_interfaces'])
+
+ def test_get_virtual_interfaces_list(self):
+ url = '/v2/fake/servers/abcd/os-virtual-interfaces'
+ req = webob.Request.blank(url)
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-virtual-interfaces',)))
+ self.assertEqual(res.status_int, 200)
+ res_dict = jsonutils.loads(res.body)
+ response = {'virtual_interfaces': [
+ {'id': '00000000-0000-0000-0000-00000000000000000',
+ 'mac_address': '00-00-00-00-00-00'},
+ {'id': '11111111-1111-1111-1111-11111111111111111',
+ 'mac_address': '11-11-11-11-11-11'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_vif_instance_not_found(self):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ fake_context = context.RequestContext('fake', 'fake')
+ fake_req = FakeRequest(fake_context)
+
+ compute_api.API.get(fake_context, 'fake_uuid',
+ expected_attrs=None,
+ want_objects=True).AndRaise(
+ exception.InstanceNotFound(instance_id='instance-0000'))
+
+ self.mox.ReplayAll()
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ virtual_interfaces.ServerVirtualInterfaceController().index,
+ fake_req, 'fake_uuid')
+
+
+class ServerVirtualInterfaceSerializerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ServerVirtualInterfaceSerializerTest, self).setUp()
+ self.namespace = wsgi.XMLNS_V11
+ self.serializer = virtual_interfaces.VirtualInterfaceTemplate()
+
+ def _tag(self, elem):
+ tagname = elem.tag
+ self.assertEqual(tagname[0], '{')
+ tmp = tagname.partition('}')
+ namespace = tmp[0][1:]
+ self.assertEqual(namespace, self.namespace)
+ return tmp[2]
+
+ def test_serializer(self):
+ raw_vifs = [dict(
+ id='uuid1',
+ mac_address='aa:bb:cc:dd:ee:ff'),
+ dict(
+ id='uuid2',
+ mac_address='bb:aa:dd:cc:ff:ee')]
+ vifs = dict(virtual_interfaces=raw_vifs)
+ text = self.serializer.serialize(vifs)
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('virtual_interfaces', self._tag(tree))
+ self.assertEqual(len(raw_vifs), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('virtual_interface', self._tag(child))
+ self.assertEqual(raw_vifs[idx]['id'], child.get('id'))
+ self.assertEqual(raw_vifs[idx]['mac_address'],
+ child.get('mac_address'))
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py b/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py
new file mode 100644
index 0000000000..e3c5b8b071
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_volumes.py
@@ -0,0 +1,1083 @@
+# Copyright 2013 Josh Durgin
+# Copyright 2013 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+from webob import exc
+
+from nova.api.openstack.compute.contrib import assisted_volume_snapshots as \
+ assisted_snaps
+from nova.api.openstack.compute.contrib import volumes
+from nova.api.openstack.compute.plugins.v3 import volumes as volumes_v3
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.volume import cinder
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUID_A = '00000000-aaaa-aaaa-aaaa-000000000000'
+FAKE_UUID_B = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
+FAKE_UUID_C = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
+FAKE_UUID_D = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
+
+IMAGE_UUID = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+
+def fake_get_instance(self, context, instance_id, want_objects=False,
+ expected_attrs=None):
+ return fake_instance.fake_instance_obj(context, **{'uuid': instance_id})
+
+
+def fake_get_volume(self, context, id):
+ return {'id': 'woot'}
+
+
+def fake_attach_volume(self, context, instance, volume_id, device):
+ pass
+
+
+def fake_detach_volume(self, context, instance, volume):
+ pass
+
+
+def fake_swap_volume(self, context, instance,
+ old_volume_id, new_volume_id):
+ pass
+
+
+def fake_create_snapshot(self, context, volume, name, description):
+ return {'id': 123,
+ 'volume_id': 'fakeVolId',
+ 'status': 'available',
+ 'volume_size': 123,
+ 'created_at': '2013-01-01 00:00:01',
+ 'display_name': 'myVolumeName',
+ 'display_description': 'myVolumeDescription'}
+
+
+def fake_delete_snapshot(self, context, snapshot_id):
+ pass
+
+
+def fake_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
+ delete_info):
+ pass
+
+
+def fake_compute_volume_snapshot_create(self, context, volume_id,
+ create_info):
+ pass
+
+
+def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'instance_uuid': instance_uuid,
+ 'device_name': '/dev/fake0',
+ 'delete_on_termination': 'False',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': FAKE_UUID_A,
+ 'volume_size': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'instance_uuid': instance_uuid,
+ 'device_name': '/dev/fake1',
+ 'delete_on_termination': 'False',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': FAKE_UUID_B,
+ 'volume_size': 1})]
+
+
+class BootFromVolumeTest(test.TestCase):
+
+ def setUp(self):
+ super(BootFromVolumeTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'create',
+ self._get_fake_compute_api_create())
+ fakes.stub_out_nw_api(self.stubs)
+ self._block_device_mapping_seen = None
+ self._legacy_bdm_seen = True
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes', 'Block_device_mapping_v2_boot'])
+
+ def _get_fake_compute_api_create(self):
+ def _fake_compute_api_create(cls, context, instance_type,
+ image_href, **kwargs):
+ self._block_device_mapping_seen = kwargs.get(
+ 'block_device_mapping')
+ self._legacy_bdm_seen = kwargs.get('legacy_bdm')
+
+ inst_type = flavors.get_flavor_by_flavor_id(2)
+ resv_id = None
+ return ([{'id': 1,
+ 'display_name': 'test_server',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': IMAGE_UUID,
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ 'updated_at': datetime.datetime(2010, 11, 11, 11, 0, 0),
+ 'progress': 0,
+ 'fixed_ips': []
+ }], resv_id)
+ return _fake_compute_api_create
+
+ def test_create_root_volume(self):
+ body = dict(server=dict(
+ name='test_server', imageRef=IMAGE_UUID,
+ flavorRef=2, min_count=1, max_count=1,
+ block_device_mapping=[dict(
+ volume_id=1,
+ device_name='/dev/vda',
+ virtual='root',
+ delete_on_termination=False,
+ )]
+ ))
+ req = webob.Request.blank('/v2/fake/os-volumes_boot')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-volumes_boot', 'servers')))
+ self.assertEqual(res.status_int, 202)
+ server = jsonutils.loads(res.body)['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
+ self.assertEqual(len(self._block_device_mapping_seen), 1)
+ self.assertTrue(self._legacy_bdm_seen)
+ self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
+ self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
+ '/dev/vda')
+
+ def test_create_root_volume_bdm_v2(self):
+ body = dict(server=dict(
+ name='test_server', imageRef=IMAGE_UUID,
+ flavorRef=2, min_count=1, max_count=1,
+ block_device_mapping_v2=[dict(
+ source_type='volume',
+ uuid=1,
+ device_name='/dev/vda',
+ boot_index=0,
+ delete_on_termination=False,
+ )]
+ ))
+ req = webob.Request.blank('/v2/fake/os-volumes_boot')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ init_only=('os-volumes_boot', 'servers')))
+ self.assertEqual(res.status_int, 202)
+ server = jsonutils.loads(res.body)['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+ self.assertEqual(CONF.password_length, len(server['adminPass']))
+ self.assertEqual(len(self._block_device_mapping_seen), 1)
+ self.assertFalse(self._legacy_bdm_seen)
+ self.assertEqual(self._block_device_mapping_seen[0]['volume_id'], 1)
+ self.assertEqual(self._block_device_mapping_seen[0]['boot_index'],
+ 0)
+ self.assertEqual(self._block_device_mapping_seen[0]['device_name'],
+ '/dev/vda')
+
+
+class VolumeApiTestV21(test.TestCase):
+ url_prefix = '/v2/fake'
+
+ def setUp(self):
+ super(VolumeApiTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", fakes.stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", fakes.stub_volume_get_all)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app_v21()
+
+ def test_volume_create(self):
+ self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
+
+ vol = {"size": 100,
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"}
+ body = {"volume": vol}
+ req = webob.Request.blank(self.url_prefix + '/os-volumes')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+ resp = req.get_response(self.app)
+
+ self.assertEqual(resp.status_int, 200)
+
+ resp_dict = jsonutils.loads(resp.body)
+ self.assertIn('volume', resp_dict)
+ self.assertEqual(resp_dict['volume']['size'],
+ vol['size'])
+ self.assertEqual(resp_dict['volume']['displayName'],
+ vol['display_name'])
+ self.assertEqual(resp_dict['volume']['displayDescription'],
+ vol['display_description'])
+ self.assertEqual(resp_dict['volume']['availabilityZone'],
+ vol['availability_zone'])
+
+ def test_volume_create_bad(self):
+ def fake_volume_create(self, context, size, name, description,
+ snapshot, **param):
+ raise exception.InvalidInput(reason="bad request data")
+
+ self.stubs.Set(cinder.API, "create", fake_volume_create)
+
+ vol = {"size": '#$?',
+ "display_name": "Volume Test Name",
+ "display_description": "Volume Test Desc",
+ "availability_zone": "zone1:host1"}
+ body = {"volume": vol}
+
+ req = fakes.HTTPRequest.blank(self.url_prefix + '/os-volumes')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ volumes.VolumeController().create, req, body)
+
+ def test_volume_index(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_detail(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/detail')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_show(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 200)
+
+ def test_volume_show_no_volume(self):
+ self.stubs.Set(cinder.API, "get", fakes.stub_volume_notfound)
+
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn('Volume 456 could not be found.', resp.body)
+
+ def test_volume_delete(self):
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/123')
+ req.method = 'DELETE'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 202)
+
+ def test_volume_delete_no_volume(self):
+ self.stubs.Set(cinder.API, "delete", fakes.stub_volume_notfound)
+
+ req = webob.Request.blank(self.url_prefix + '/os-volumes/456')
+ req.method = 'DELETE'
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 404)
+ self.assertIn('Volume 456 could not be found.', resp.body)
+
+
+class VolumeApiTestV2(VolumeApiTestV21):
+
+ def setUp(self):
+ super(VolumeApiTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Volumes'])
+
+ self.context = context.get_admin_context()
+ self.app = self._get_app()
+
+ def _get_app(self):
+ return fakes.wsgi_app()
+
+
+class VolumeAttachTests(test.TestCase):
+ def setUp(self):
+ super(VolumeAttachTests, self).setUp()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.stubs.Set(compute_api.API, 'get', fake_get_instance)
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.context = context.get_admin_context()
+ self.expected_show = {'volumeAttachment':
+ {'device': '/dev/fake0',
+ 'serverId': FAKE_UUID,
+ 'id': FAKE_UUID_A,
+ 'volumeId': FAKE_UUID_A
+ }}
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.attachments = volumes.VolumeAttachmentController(self.ext_mgr)
+
+ def test_show(self):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.show(req, FAKE_UUID, FAKE_UUID_A)
+ self.assertEqual(self.expected_show, result)
+
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=FAKE_UUID))
+ def test_show_no_instance(self, mock_mr):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=None)
+ def test_show_no_bdms(self, mock_mr):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ def test_show_bdms_no_mountpoint(self):
+ FAKE_UUID_NOTEXIST = '00000000-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.show,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_NOTEXIST)
+
+ def test_detach(self):
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ result = self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
+ self.assertEqual('202 Accepted', result.status)
+
+ def test_detach_vol_not_found(self):
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(exc.HTTPNotFound,
+ self.attachments.delete,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_C)
+
+ @mock.patch('nova.objects.BlockDeviceMapping.is_root',
+ new_callable=mock.PropertyMock)
+ def test_detach_vol_root(self, mock_isroot):
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ mock_isroot.return_value = True
+ self.assertRaises(exc.HTTPForbidden,
+ self.attachments.delete,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ def test_detach_volume_from_locked_server(self):
+ def fake_detach_volume_from_locked_server(self, context,
+ instance, volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute_api.API,
+ 'detach_volume',
+ fake_detach_volume_from_locked_server)
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPConflict, self.attachments.delete,
+ req, FAKE_UUID, FAKE_UUID_A)
+
+ def test_attach_volume(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID, body)
+ self.assertEqual(result['volumeAttachment']['id'],
+ '00000000-aaaa-aaaa-aaaa-000000000000')
+
+ def test_attach_volume_to_locked_server(self):
+ def fake_attach_volume_to_locked_server(self, context, instance,
+ volume_id, device=None):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume_to_locked_server)
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPConflict, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def test_attach_volume_bad_id(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None,
+ 'volumeId': 'TESTVOLUME',
+ }
+ }
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def test_attach_volume_without_volumeId(self):
+ self.stubs.Set(compute_api.API,
+ 'attach_volume',
+ fake_attach_volume)
+
+ body = {
+ 'volumeAttachment': {
+ 'device': None
+ }
+ }
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+
+ self.assertRaises(webob.exc.HTTPBadRequest, self.attachments.create,
+ req, FAKE_UUID, body)
+
+ def _test_swap(self, uuid=FAKE_UUID_A, fake_func=None, body=None):
+ fake_func = fake_func or fake_swap_volume
+ self.stubs.Set(compute_api.API,
+ 'swap_volume',
+ fake_func)
+ body = body or {'volumeAttachment': {'volumeId': FAKE_UUID_B,
+ 'device': '/dev/fake'}}
+
+ req = webob.Request.blank('/v2/servers/id/os-volume_attachments/uuid')
+ req.method = 'PUT'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ return self.attachments.update(req, FAKE_UUID, uuid, body)
+
+ def test_swap_volume_for_locked_server(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+
+ def fake_swap_volume_for_locked_server(self, context, instance,
+ old_volume, new_volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap,
+ fake_func=fake_swap_volume_for_locked_server)
+
+ def test_swap_volume_no_extension(self):
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
+
+ def test_swap_volume(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ result = self._test_swap()
+ self.assertEqual('202 Accepted', result.status)
+
+ def test_swap_volume_no_attachment(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+
+ self.assertRaises(exc.HTTPNotFound, self._test_swap, FAKE_UUID_C)
+
+ def test_swap_volume_without_volumeId(self):
+ self.ext_mgr.extensions['os-volume-attachment-update'] = True
+ body = {'volumeAttachment': {'device': '/dev/fake'}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_swap,
+ body=body)
+
+
+class VolumeSerializerTest(test.TestCase):
+ def _verify_volume_attachment(self, attach, tree):
+ for attr in ('id', 'volumeId', 'serverId', 'device'):
+ self.assertEqual(str(attach[attr]), tree.get(attr))
+
+ def _verify_volume(self, vol, tree):
+ self.assertEqual(tree.tag, 'volume')
+
+ for attr in ('id', 'status', 'size', 'availabilityZone', 'createdAt',
+ 'displayName', 'displayDescription', 'volumeType',
+ 'snapshotId'):
+ self.assertEqual(str(vol[attr]), tree.get(attr))
+
+ for child in tree:
+ self.assertIn(child.tag, ('attachments', 'metadata'))
+ if child.tag == 'attachments':
+ self.assertEqual(1, len(child))
+ self.assertEqual('attachment', child[0].tag)
+ self._verify_volume_attachment(vol['attachments'][0], child[0])
+ elif child.tag == 'metadata':
+ not_seen = set(vol['metadata'].keys())
+ for gr_child in child:
+ self.assertIn(gr_child.get("key"), not_seen)
+ self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
+ gr_child.text)
+ not_seen.remove(gr_child.get("key"))
+ self.assertEqual(0, len(not_seen))
+
+ def test_attach_show_create_serializer(self):
+ serializer = volumes.VolumeAttachmentTemplate()
+ raw_attach = dict(
+ id='vol_id',
+ volumeId='vol_id',
+ serverId='instance_uuid',
+ device='/foo')
+ text = serializer.serialize(dict(volumeAttachment=raw_attach))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumeAttachment', tree.tag)
+ self._verify_volume_attachment(raw_attach, tree)
+
+ def test_attach_index_serializer(self):
+ serializer = volumes.VolumeAttachmentsTemplate()
+ raw_attaches = [dict(
+ id='vol_id1',
+ volumeId='vol_id1',
+ serverId='instance1_uuid',
+ device='/foo1'),
+ dict(
+ id='vol_id2',
+ volumeId='vol_id2',
+ serverId='instance2_uuid',
+ device='/foo2')]
+ text = serializer.serialize(dict(volumeAttachments=raw_attaches))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumeAttachments', tree.tag)
+ self.assertEqual(len(raw_attaches), len(tree))
+ for idx, child in enumerate(tree):
+ self.assertEqual('volumeAttachment', child.tag)
+ self._verify_volume_attachment(raw_attaches[idx], child)
+
+ def test_volume_show_create_serializer(self):
+ serializer = volumes.VolumeTemplate()
+ raw_volume = dict(
+ id='vol_id',
+ status='vol_status',
+ size=1024,
+ availabilityZone='vol_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol_id',
+ volumeId='vol_id',
+ serverId='instance_uuid',
+ device='/foo')],
+ displayName='vol_name',
+ displayDescription='vol_desc',
+ volumeType='vol_type',
+ snapshotId='snap_id',
+ metadata=dict(
+ foo='bar',
+ baz='quux',
+ ),
+ )
+ text = serializer.serialize(dict(volume=raw_volume))
+
+ tree = etree.fromstring(text)
+
+ self._verify_volume(raw_volume, tree)
+
+ def test_volume_index_detail_serializer(self):
+ serializer = volumes.VolumesTemplate()
+ raw_volumes = [dict(
+ id='vol1_id',
+ status='vol1_status',
+ size=1024,
+ availabilityZone='vol1_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol1_id',
+ volumeId='vol1_id',
+ serverId='instance_uuid',
+ device='/foo1')],
+ displayName='vol1_name',
+ displayDescription='vol1_desc',
+ volumeType='vol1_type',
+ snapshotId='snap1_id',
+ metadata=dict(
+ foo='vol1_foo',
+ bar='vol1_bar',
+ ),
+ ),
+ dict(
+ id='vol2_id',
+ status='vol2_status',
+ size=1024,
+ availabilityZone='vol2_availability',
+ createdAt=timeutils.utcnow(),
+ attachments=[dict(
+ id='vol2_id',
+ volumeId='vol2_id',
+ serverId='instance_uuid',
+ device='/foo2')],
+ displayName='vol2_name',
+ displayDescription='vol2_desc',
+ volumeType='vol2_type',
+ snapshotId='snap2_id',
+ metadata=dict(
+ foo='vol2_foo',
+ bar='vol2_bar',
+ ),
+ )]
+ text = serializer.serialize(dict(volumes=raw_volumes))
+
+ tree = etree.fromstring(text)
+
+ self.assertEqual('volumes', tree.tag)
+ self.assertEqual(len(raw_volumes), len(tree))
+ for idx, child in enumerate(tree):
+ self._verify_volume(raw_volumes[idx], child)
+
+
+class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = volumes.CreateDeserializer()
+
+ def test_minimal_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_display_name(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_display_description(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_volume_type(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_availability_zone(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1"></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_metadata(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ display_name="Volume-xml"
+ size="1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "display_name": "Volume-xml",
+ "size": "1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_full_volume(self):
+ self_request = """
+<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
+ size="1"
+ display_name="Volume-xml"
+ display_description="description"
+ volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
+ availability_zone="us-east1">
+ <metadata><meta key="Type">work</meta></metadata></volume>"""
+ request = self.deserializer.deserialize(self_request)
+ expected = {
+ "volume": {
+ "size": "1",
+ "display_name": "Volume-xml",
+ "display_description": "description",
+ "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
+ "availability_zone": "us-east1",
+ "metadata": {
+ "Type": "work",
+ },
+ },
+ }
+ self.maxDiff = None
+ self.assertEqual(request['body'], expected)
+
+
+class CommonBadRequestTestCase(object):
+
+ resource = None
+ entity_name = None
+ controller_cls = None
+ kwargs = {}
+
+ """
+ Tests of places we throw 400 Bad Request from
+ """
+
+ def setUp(self):
+ super(CommonBadRequestTestCase, self).setUp()
+ self.controller = self.controller_cls()
+
+ def _bad_request_create(self, body):
+ req = fakes.HTTPRequest.blank('/v2/fake/' + self.resource)
+ req.method = 'POST'
+
+ kwargs = self.kwargs.copy()
+ kwargs['body'] = body
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, **kwargs)
+
+ def test_create_no_body(self):
+ self._bad_request_create(body=None)
+
+ def test_create_missing_volume(self):
+ body = {'foo': {'a': 'b'}}
+ self._bad_request_create(body=body)
+
+ def test_create_malformed_entity(self):
+ body = {self.entity_name: 'string'}
+ self._bad_request_create(body=body)
+
+
+class BadRequestVolumeTestCaseV21(CommonBadRequestTestCase,
+ test.TestCase):
+
+ resource = 'os-volumes'
+ entity_name = 'volume'
+ controller_cls = volumes_v3.VolumeController
+
+
+class BadRequestVolumeTestCaseV2(BadRequestVolumeTestCaseV21):
+ controller_cls = volumes.VolumeController
+
+
+class BadRequestAttachmentTestCase(CommonBadRequestTestCase,
+ test.TestCase):
+ resource = 'servers/' + FAKE_UUID + '/os-volume_attachments'
+ entity_name = 'volumeAttachment'
+ controller_cls = volumes.VolumeAttachmentController
+ kwargs = {'server_id': FAKE_UUID}
+
+
+class BadRequestSnapshotTestCaseV21(CommonBadRequestTestCase,
+ test.TestCase):
+
+ resource = 'os-snapshots'
+ entity_name = 'snapshot'
+ controller_cls = volumes.SnapshotController
+
+
+class BadRequestSnapshotTestCaseV2(BadRequestSnapshotTestCaseV21):
+ controller_cls = volumes_v3.SnapshotController
+
+
+class ShowSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(ShowSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+ self.req.method = 'GET'
+
+ def test_show_snapshot_not_exist(self):
+ def fake_get_snapshot(self, context, id):
+ raise exception.SnapshotNotFound(snapshot_id=id)
+ self.stubs.Set(cinder.API, 'get_snapshot', fake_get_snapshot)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.show, self.req, FAKE_UUID_A)
+
+
+class ShowSnapshotTestCaseV2(ShowSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class CreateSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(CreateSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.stubs.Set(cinder.API, 'create_snapshot_force',
+ fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+
+ def test_force_true(self):
+ self.body['snapshot']['force'] = 'True'
+ self.controller.create(self.req, body=self.body)
+
+ def test_force_false(self):
+ self.body['snapshot']['force'] = 'f'
+ self.controller.create(self.req, body=self.body)
+
+ def test_force_invalid(self):
+ self.body['snapshot']['force'] = 'foo'
+ self.assertRaises(exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+
+class CreateSnapshotTestCaseV2(CreateSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class DeleteSnapshotTestCaseV21(test.TestCase):
+ snapshot_cls = volumes_v3.SnapshotController
+
+ def setUp(self):
+ super(DeleteSnapshotTestCaseV21, self).setUp()
+ self.controller = self.snapshot_cls()
+ self.stubs.Set(cinder.API, 'get', fake_get_volume)
+ self.stubs.Set(cinder.API, 'create_snapshot_force',
+ fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'create_snapshot', fake_create_snapshot)
+ self.stubs.Set(cinder.API, 'delete_snapshot', fake_delete_snapshot)
+ self.req = fakes.HTTPRequest.blank('/v2/fake/os-snapshots')
+
+ def test_normal_delete(self):
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+ result = self.controller.create(self.req, body=self.body)
+
+ self.req.method = 'DELETE'
+ result = self.controller.delete(self.req, result['snapshot']['id'])
+
+ # NOTE: on v2.1, http status code is set as wsgi_code of API
+ # method instead of status_int in a response object.
+ if isinstance(self.controller, volumes_v3.SnapshotController):
+ status_int = self.controller.delete.wsgi_code
+ else:
+ status_int = result.status_int
+ self.assertEqual(202, status_int)
+
+ def test_delete_snapshot_not_exists(self):
+ def fake_delete_snapshot_not_exist(self, context, snapshot_id):
+ raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
+
+ self.stubs.Set(cinder.API, 'delete_snapshot',
+ fake_delete_snapshot_not_exist)
+ self.req.method = 'POST'
+ self.body = {'snapshot': {'volume_id': 1}}
+ result = self.controller.create(self.req, body=self.body)
+
+ self.req.method = 'DELETE'
+ self.assertRaises(exc.HTTPNotFound, self.controller.delete,
+ self.req, result['snapshot']['id'])
+
+
+class DeleteSnapshotTestCaseV2(DeleteSnapshotTestCaseV21):
+ snapshot_cls = volumes.SnapshotController
+
+
+class AssistedSnapshotCreateTestCase(test.TestCase):
+ def setUp(self):
+ super(AssistedSnapshotCreateTestCase, self).setUp()
+
+ self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
+ self.stubs.Set(compute_api.API, 'volume_snapshot_create',
+ fake_compute_volume_snapshot_create)
+
+ def test_assisted_create(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ body = {'snapshot': {'volume_id': 1, 'create_info': {}}}
+ req.method = 'POST'
+ self.controller.create(req, body=body)
+
+ def test_assisted_create_missing_create_info(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ body = {'snapshot': {'volume_id': 1}}
+ req.method = 'POST'
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, body=body)
+
+
+class AssistedSnapshotDeleteTestCase(test.TestCase):
+ def setUp(self):
+ super(AssistedSnapshotDeleteTestCase, self).setUp()
+
+ self.controller = assisted_snaps.AssistedVolumeSnapshotsController()
+ self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
+ fake_compute_volume_snapshot_delete)
+
+ def test_assisted_delete(self):
+ params = {
+ 'delete_info': jsonutils.dumps({'volume_id': 1}),
+ }
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-assisted-volume-snapshots?%s' %
+ '&'.join(['%s=%s' % (k, v) for k, v in params.iteritems()]))
+ req.method = 'DELETE'
+ result = self.controller.delete(req, '5')
+ self.assertEqual(result.status_int, 204)
+
+ def test_assisted_delete_missing_delete_info(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-assisted-volume-snapshots')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
+ req, '5')
diff --git a/nova/tests/api/openstack/compute/extensions/__init__.py b/nova/tests/unit/api/openstack/compute/extensions/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/extensions/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/extensions/__init__.py
diff --git a/nova/tests/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py
index 7d1e273ea7..7d1e273ea7 100644
--- a/nova/tests/api/openstack/compute/extensions/foxinsocks.py
+++ b/nova/tests/unit/api/openstack/compute/extensions/foxinsocks.py
diff --git a/nova/tests/api/openstack/compute/plugins/__init__.py b/nova/tests/unit/api/openstack/compute/plugins/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/plugins/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/plugins/__init__.py
diff --git a/nova/tests/api/openstack/compute/plugins/v3/__init__.py b/nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/__init__.py
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py b/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py
new file mode 100644
index 0000000000..ce99d1069b
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/admin_only_action_common.py
@@ -0,0 +1,263 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import webob
+
+from nova.compute import vm_states
+import nova.context
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+
+
+class CommonMixin(object):
+ def setUp(self):
+ super(CommonMixin, self).setUp()
+ self.compute_api = None
+ self.context = nova.context.RequestContext('fake', 'fake')
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.context,
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
+ task_state=None, launched_at=timeutils.utcnow())
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _stub_instance_get_failure(self, exc_info, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ self.compute_api.get(self.context, uuid, expected_attrs=None,
+ want_objects=True).AndRaise(exc_info)
+ return uuid
+
+ def _test_non_existing_instance(self, action, body_map=None):
+ uuid = uuidutils.generate_uuid()
+ self._stub_instance_get_failure(
+ exception.InstanceNotFound(instance_id=uuid), uuid=uuid)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_action(self, action, body=None, method=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(202, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_not_implemented_state(self, action, method=None):
+ if method is None:
+ method = action
+
+ instance = self._stub_instance_get()
+ body = {}
+ compute_api_args_map = {}
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ NotImplementedError())
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(501, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_invalid_state(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(self.compute_api, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceInvalidState(
+ attr='vm_state', instance_uuid=instance.uuid,
+ state='foo', method=method))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ self.assertIn("Cannot \'%(action)s\' instance %(id)s"
+ % {'action': action, 'id': instance.uuid}, res.body)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_locked_instance(self, action, method=None, body=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance.uuid))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def _test_instance_not_found_in_compute_api(self, action,
+ method=None, body=None, compute_api_args_map=None):
+ if method is None:
+ method = action
+
+ compute_api_args_map = compute_api_args_map or {}
+
+ instance = self._stub_instance_get()
+
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+ getattr(self.compute_api, method)(self.context, instance, *args,
+ **kwargs).AndRaise(
+ exception.InstanceNotFound(instance_id=instance.uuid))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {action: body})
+ self.assertEqual(404, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+
+class CommonTests(CommonMixin, test.NoDBTestCase):
+ def _test_actions(self, actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_action(action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_instance_not_found_in_compute_api(self,
+ actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_instance_not_found_in_compute_api(
+ action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_with_non_existed_instance(self, actions, body_map=None):
+ body_map = body_map or {}
+ for action in actions:
+ self._test_non_existing_instance(action,
+ body_map=body_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_raise_conflict_on_invalid_state(
+ self, actions, method_translations=None, body_map=None,
+ args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_invalid_state(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def _test_actions_with_locked_instance(self, actions,
+ method_translations=None,
+ body_map=None, args_map=None):
+ method_translations = method_translations or {}
+ body_map = body_map or {}
+ args_map = args_map or {}
+ for action in actions:
+ method = method_translations.get(action)
+ body = body_map.get(action)
+ self.mox.StubOutWithMock(self.compute_api, method or action)
+ self._test_locked_instance(action, method=method, body=body,
+ compute_api_args_map=args_map)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py
new file mode 100644
index 0000000000..44c1d5b5cd
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_access_ips.py
@@ -0,0 +1,383 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import access_ips
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack import wsgi
+from nova.compute import api as compute_api
+from nova import db
+from nova import exception
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+
+
+class AccessIPsExtTest(test.NoDBTestCase):
+ def setUp(self):
+ super(AccessIPsExtTest, self).setUp()
+ self.access_ips_ext = access_ips.AccessIPs(None)
+
+ def _test(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1',
+ access_ips.AccessIPs.v6_key: 'fe80::'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'})
+
+ def _test_with_ipv4_only(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: '1.1.1.1'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': '1.1.1.1'})
+
+ def _test_with_ipv6_only(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: 'fe80::'}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': 'fe80::'})
+
+ def _test_without_ipv4_and_ipv6(self, func):
+ server_dict = {}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {})
+
+ def _test_with_ipv4_null(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: None}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': None})
+
+ def _test_with_ipv6_null(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: None}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': None})
+
+ def _test_with_ipv4_blank(self, func):
+ server_dict = {access_ips.AccessIPs.v4_key: ''}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v4': None})
+
+ def _test_with_ipv6_blank(self, func):
+ server_dict = {access_ips.AccessIPs.v6_key: ''}
+ create_kwargs = {}
+ func(server_dict, create_kwargs)
+ self.assertEqual(create_kwargs, {'access_ip_v6': None})
+
+ def test_server_create(self):
+ self._test(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_create)
+
+ def test_server_create_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_create)
+
+ def test_server_create_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_create)
+
+ def test_server_update(self):
+ self._test(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_update)
+
+ def test_server_update_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_update)
+
+ def test_server_update_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_update)
+
+ def test_server_rebuild(self):
+ self._test(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_only(self):
+ self._test_with_ipv4_only(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_only(self):
+ self._test_with_ipv6_only(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_without_ipv4_and_ipv6(self):
+ self._test_without_ipv4_and_ipv6(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_null(self):
+ self._test_with_ipv4_null(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_null(self):
+ self._test_with_ipv6_null(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv4_blank(self):
+ self._test_with_ipv4_blank(self.access_ips_ext.server_rebuild)
+
+ def test_server_rebuild_with_ipv6_blank(self):
+ self._test_with_ipv6_blank(self.access_ips_ext.server_rebuild)
+
+
+class AccessIPsExtAPIValidationTest(test.TestCase):
+ def setUp(self):
+ super(AccessIPsExtAPIValidationTest, self).setUp()
+
+ def fake_save(context, **kwargs):
+ pass
+
+ def fake_rebuild(*args, **kwargs):
+ pass
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ self.stubs.Set(instance_obj.Instance, 'save', fake_save)
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ def _test_create(self, params):
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
+ },
+ }
+ body['server'].update(params)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller.create(req, body=body)
+
+ def _test_update(self, params):
+ body = {
+ 'server': {
+ },
+ }
+ body['server'].update(params)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'PUT'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller.update(req, fakes.FAKE_UUID, body=body)
+
+ def _test_rebuild(self, params):
+ body = {
+ 'rebuild': {
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ },
+ }
+ body['rebuild'].update(params)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'PUT'
+ req.headers['content-type'] = 'application/json'
+ req.body = jsonutils.dumps(body)
+ self.controller._action_rebuild(req, fakes.FAKE_UUID, body=body)
+
+ def test_create_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_create(params)
+
+ def test_create_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_create, params)
+
+ def test_create_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_create(params)
+
+ def test_create_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_create, params)
+
+ def test_update_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_update(params)
+
+ def test_update_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_update, params)
+
+ def test_update_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_update(params)
+
+ def test_update_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_update, params)
+
+ def test_rebuild_server_with_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '192.168.0.10'}
+ self._test_rebuild(params)
+
+ def test_rebuild_server_with_invalid_access_ipv4(self):
+ params = {access_ips.AccessIPs.v4_key: '1.1.1.1.1.1'}
+ self.assertRaises(exception.ValidationError, self._test_rebuild,
+ params)
+
+ def test_rebuild_server_with_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: '2001:db8::9abc'}
+ self._test_rebuild(params)
+
+ def test_rebuild_server_with_invalid_access_ipv6(self):
+ params = {access_ips.AccessIPs.v6_key: 'fe80:::::::'}
+ self.assertRaises(exception.ValidationError, self._test_rebuild,
+ params)
+
+
+class AccessIPsControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(AccessIPsControllerTest, self).setUp()
+ self.controller = access_ips.AccessIPsController()
+
+ def _test_with_access_ips(self, func, kwargs={'id': 'fake'}):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance = {'uuid': 'fake',
+ 'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'}
+ req.cache_db_instance(instance)
+ resp_obj = wsgi.ResponseObject(
+ {"server": {'id': 'fake'}})
+ func(req, resp_obj, **kwargs)
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
+ '1.1.1.1')
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
+ 'fe80::')
+
+ def _test_without_access_ips(self, func, kwargs={'id': 'fake'}):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance = {'uuid': 'fake',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ req.cache_db_instance(instance)
+ resp_obj = wsgi.ResponseObject(
+ {"server": {'id': 'fake'}})
+ func(req, resp_obj, **kwargs)
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v4_key],
+ '')
+ self.assertEqual(resp_obj.obj['server'][access_ips.AccessIPs.v6_key],
+ '')
+
+ def test_create(self):
+ self._test_with_access_ips(self.controller.create, {'body': {}})
+
+ def test_create_without_access_ips(self):
+ self._test_with_access_ips(self.controller.create, {'body': {}})
+
+ def test_show(self):
+ self._test_with_access_ips(self.controller.show)
+
+ def test_show_without_access_ips(self):
+ self._test_without_access_ips(self.controller.show)
+
+ def test_detail(self):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance1 = {'uuid': 'fake1',
+ 'access_ip_v4': '1.1.1.1',
+ 'access_ip_v6': 'fe80::'}
+ instance2 = {'uuid': 'fake2',
+ 'access_ip_v4': '1.1.1.2',
+ 'access_ip_v6': 'fe81::'}
+ req.cache_db_instance(instance1)
+ req.cache_db_instance(instance2)
+ resp_obj = wsgi.ResponseObject(
+ {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
+ self.controller.detail(req, resp_obj)
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key],
+ '1.1.1.1')
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key],
+ 'fe80::')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key],
+ '1.1.1.2')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key],
+ 'fe81::')
+
+ def test_detail_without_access_ips(self):
+ req = wsgi.Request({'nova.context':
+ fakes.FakeRequestContext('fake_user', 'fake',
+ is_admin=True)})
+ instance1 = {'uuid': 'fake1',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ instance2 = {'uuid': 'fake2',
+ 'access_ip_v4': None,
+ 'access_ip_v6': None}
+ req.cache_db_instance(instance1)
+ req.cache_db_instance(instance2)
+ resp_obj = wsgi.ResponseObject(
+ {"servers": [{'id': 'fake1'}, {'id': 'fake2'}]})
+ self.controller.detail(req, resp_obj)
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v4_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][0][access_ips.AccessIPs.v6_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v4_key], '')
+ self.assertEqual(
+ resp_obj.obj['servers'][1][access_ips.AccessIPs.v6_key], '')
+
+ def test_update(self):
+ self._test_with_access_ips(self.controller.update, {'id': 'fake',
+ 'body': {}})
+
+ def test_update_without_access_ips(self):
+ self._test_without_access_ips(self.controller.update, {'id': 'fake',
+ 'body': {}})
+
+ def test_rebuild(self):
+ self._test_with_access_ips(self.controller.rebuild, {'id': 'fake',
+ 'body': {}})
+
+ def test_rebuild_without_access_ips(self):
+ self._test_without_access_ips(self.controller.rebuild, {'id': 'fake',
+ 'body': {}})
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
new file mode 100644
index 0000000000..259906c535
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_console_auth_tokens.py
@@ -0,0 +1,95 @@
+# Copyright 2013 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.consoleauth import rpcapi as consoleauth_rpcapi
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path': 'fake_access_path',
+ 'console_type': 'rdp-html5'}
+
+
+def _fake_check_token(self, context, token):
+ return _FAKE_CONNECT_INFO
+
+
+def _fake_check_token_not_found(self, context, token):
+ return None
+
+
+def _fake_check_token_unauthorized(self, context, token):
+ connect_info = _FAKE_CONNECT_INFO
+ connect_info['console_type'] = 'unauthorized_console_type'
+ return connect_info
+
+
+class ConsoleAuthTokensExtensionTest(test.TestCase):
+
+ _FAKE_URL = '/v2/fake/os-console-auth-tokens/1'
+
+ _EXPECTED_OUTPUT = {'console': {'instance_uuid': 'fake_instance_uuid',
+ 'host': 'fake_host',
+ 'port': 'fake_port',
+ 'internal_access_path':
+ 'fake_access_path'}}
+
+ def setUp(self):
+ super(ConsoleAuthTokensExtensionTest, self).setUp()
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token)
+
+ ctxt = self._get_admin_context()
+ self.app = fakes.wsgi_app_v21(init_only=('os-console-auth-tokens'),
+ fake_auth_context=ctxt)
+
+ def _get_admin_context(self):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = 'fake'
+ ctxt.project_id = 'fake'
+ return ctxt
+
+ def _create_request(self):
+ req = fakes.HTTPRequestV3.blank(self._FAKE_URL)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ return req
+
+ def test_get_console_connect_info(self):
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(200, res.status_int)
+ output = jsonutils.loads(res.body)
+ self.assertEqual(self._EXPECTED_OUTPUT, output)
+
+ def test_get_console_connect_info_token_not_found(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_not_found)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(404, res.status_int)
+
+ def test_get_console_connect_info_unauthorized_console_type(self):
+ self.stubs.Set(consoleauth_rpcapi.ConsoleAuthAPI, 'check_token',
+ _fake_check_token_unauthorized)
+ req = self._create_request()
+ res = req.get_response(self.app)
+ self.assertEqual(401, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py
new file mode 100644
index 0000000000..d3ba83dcbc
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_consoles.py
@@ -0,0 +1,270 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid as stdlib_uuid
+
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import consoles
+from nova.compute import vm_states
+from nova import console
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+class FakeInstanceDB(object):
+
+ def __init__(self):
+ self.instances_by_id = {}
+ self.ids_by_uuid = {}
+ self.max_id = 0
+
+ def return_server_by_id(self, context, id):
+ if id not in self.instances_by_id:
+ self._add_server(id=id)
+ return dict(self.instances_by_id[id])
+
+ def return_server_by_uuid(self, context, uuid):
+ if uuid not in self.ids_by_uuid:
+ self._add_server(uuid=uuid)
+ return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
+
+ def _add_server(self, id=None, uuid=None):
+ if id is None:
+ id = self.max_id + 1
+ if uuid is None:
+ uuid = str(stdlib_uuid.uuid4())
+ instance = stub_instance(id, uuid=uuid)
+ self.instances_by_id[id] = instance
+ self.ids_by_uuid[uuid] = id
+ if id > self.max_id:
+ self.max_id = id
+
+
+def stub_instance(id, user_id='fake', project_id='fake', host=None,
+ vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0):
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "admin_password": "",
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "memory_mb": 0,
+ "vcpus": 0,
+ "root_gb": 0,
+ "hostname": "",
+ "host": host,
+ "instance_type": {},
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": timeutils.utcnow(),
+ "terminated_at": timeutils.utcnow(),
+ "availability_zone": "",
+ "display_name": server_name,
+ "display_description": "",
+ "locked": False,
+ "metadata": [],
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress}
+
+ return instance
+
+
+class ConsolesControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ConsolesControllerTest, self).setUp()
+ self.flags(verbose=True)
+ self.instance_db = FakeInstanceDB()
+ self.stubs.Set(db, 'instance_get',
+ self.instance_db.return_server_by_id)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ self.instance_db.return_server_by_uuid)
+ self.uuid = str(stdlib_uuid.uuid4())
+ self.url = '/v3/fake/servers/%s/consoles' % self.uuid
+ self.controller = consoles.ConsolesController()
+
+ def test_create_console(self):
+ def fake_create_console(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+ return {}
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller.create(req, self.uuid, None)
+ self.assertEqual(self.controller.create.wsgi_code, 201)
+
+ def test_create_console_unknown_instance(self):
+ def fake_create_console(cons_self, context, instance_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
+ req, self.uuid, None)
+
+ def test_show_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool, instance_name='inst-0001')
+
+ expected = {'console': {'id': 20,
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'password': 'fake_password',
+ 'instance_name': 'inst-0001',
+ 'console_type': 'fake_type'}}
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ res_dict = self.controller.show(req, self.uuid, '20')
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_show_console_unknown_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_show_console_unknown_instance(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFoundForInstance(
+ instance_uuid=instance_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_list_consoles(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+
+ pool1 = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ cons1 = dict(id=10, password='fake_password',
+ port='fake_port', pool=pool1)
+ pool2 = dict(console_type='fake_type2',
+ public_hostname='fake_hostname2')
+ cons2 = dict(id=11, password='fake_password2',
+ port='fake_port2', pool=pool2)
+ return [cons1, cons2]
+
+ expected = {'consoles':
+ [{'id': 10, 'console_type': 'fake_type'},
+ {'id': 11, 'console_type': 'fake_type2'}]}
+
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ res_dict = self.controller.index(req, self.uuid)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_list_consoles_unknown_instance(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
+ req, self.uuid)
+
+ def test_delete_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool)
+
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.controller.delete(req, self.uuid, '20')
+
+ def test_delete_console_unknown_console(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+ def test_delete_console_unknown_instance(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFoundForInstance(
+ instance_uuid=instance_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequestV3.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py
new file mode 100644
index 0000000000..83701090f8
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_create_backup.py
@@ -0,0 +1,261 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.plugins.v3 import create_backup
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class CreateBackupTests(admin_only_action_common.CommonMixin,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(CreateBackupTests, self).setUp()
+ self.controller = create_backup.CreateBackupController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(create_backup, 'CreateBackupController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-create-backup'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+ self.mox.StubOutWithMock(common,
+ 'check_img_metadata_properties_quota')
+ self.mox.StubOutWithMock(self.compute_api, 'backup')
+
+ def _make_url(self, uuid=None):
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ return '/servers/%s/action' % uuid
+
+ def test_create_backup_with_metadata(self):
+ metadata = {'123': 'asdf'}
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': metadata,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties=metadata)
+
+ common.check_img_metadata_properties_quota(self.context, metadata)
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties=metadata).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ # Name is required for backups.
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_rotation(self):
+ # Rotation is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation(self):
+ """Rotation must be greater than or equal to zero
+ for backup requests
+ """
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': -1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_negative_rotation_with_string_number(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': '-1',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ # Backup Type (daily or weekly) is required for backup requests.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_non_dict_metadata(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': 'non_dict',
+ },
+ }
+ res = self._make_request(self._make_url('fake'), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_bad_entity(self):
+ body = {'createBackup': 'go'}
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
+
+ def test_create_backup_rotation_is_zero(self):
+ # The happy path for creating backups if rotation is zero.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 0,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 0,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertNotIn('Location', res.headers)
+
+ def test_create_backup_rotation_is_positive(self):
+ # The happy path for creating backups if rotation is positive.
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance.uuid), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_rotation_is_string_number(self):
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': '1',
+ },
+ }
+
+ image = dict(id='fake-image-id', status='ACTIVE', name='Backup 1',
+ properties={})
+ common.check_img_metadata_properties_quota(self.context, {})
+ instance = self._stub_instance_get()
+ self.compute_api.backup(self.context, instance, 'Backup 1',
+ 'daily', 1,
+ extra_properties={}).AndReturn(image)
+
+ self.mox.ReplayAll()
+
+ res = self._make_request(self._make_url(instance['uuid']), body)
+ self.assertEqual(202, res.status_int)
+ self.assertIn('fake-image-id', res.headers['Location'])
+
+ def test_create_backup_raises_conflict_on_invalid_state(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ args_map = {
+ 'createBackup': (
+ ('Backup 1', 'daily', 1), {'extra_properties': {}}
+ ),
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_invalid_state('createBackup', method='backup',
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_create_backup_with_non_existed_instance(self):
+ body_map = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ common.check_img_metadata_properties_quota(self.context, {})
+ self._test_non_existing_instance('createBackup',
+ body_map=body_map)
+
+ def test_create_backup_with_invalid_create_backup(self):
+ body = {
+ 'createBackupup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+ res = self._make_request(self._make_url(), body)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py
new file mode 100644
index 0000000000..dc6dd2898f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extended_volumes.py
@@ -0,0 +1,387 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import extended_volumes
+from nova import compute
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova import volume
+
+UUID1 = '00000000-0000-0000-0000-000000000001'
+UUID2 = '00000000-0000-0000-0000-000000000002'
+UUID3 = '00000000-0000-0000-0000-000000000003'
+
+
+def fake_compute_get(*args, **kwargs):
+ inst = fakes.stub_instance(1, uuid=UUID1)
+ return fake_instance.fake_instance_obj(args[1], **inst)
+
+
+def fake_compute_get_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=UUID1)
+
+
+def fake_compute_get_all(*args, **kwargs):
+ db_list = [fakes.stub_instance(1), fakes.stub_instance(2)]
+ fields = instance_obj.INSTANCE_DEFAULT_FIELDS
+ return instance_obj._make_instance_list(args[1],
+ objects.InstanceList(),
+ db_list, fields)
+
+
+def fake_bdms_get_all_by_instance(*args, **kwargs):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID1, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': UUID2, 'source_type': 'volume',
+ 'destination_type': 'volume', 'id': 2})]
+
+
+def fake_attach_volume(self, context, instance, volume_id,
+ device, disk_bus, device_type):
+ pass
+
+
+def fake_attach_volume_not_found_vol(self, context, instance, volume_id,
+ device, disk_bus, device_type):
+ raise exception.VolumeNotFound(volume_id=volume_id)
+
+
+def fake_attach_volume_invalid_device_path(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InvalidDevicePath(path=device)
+
+
+def fake_attach_volume_instance_invalid_state(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
+ method='', attr='')
+
+
+def fake_attach_volume_invalid_volume(self, context, instance,
+ volume_id, device, disk_bus,
+ device_type):
+ raise exception.InvalidVolume(reason='')
+
+
+def fake_detach_volume(self, context, instance, volume):
+ pass
+
+
+def fake_swap_volume(self, context, instance,
+ old_volume_id, new_volume_id):
+ pass
+
+
+def fake_swap_volume_invalid_volume(self, context, instance,
+ volume_id, device):
+ raise exception.InvalidVolume(reason='', volume_id=volume_id)
+
+
+def fake_swap_volume_unattached_volume(self, context, instance,
+ volume_id, device):
+ raise exception.VolumeUnattached(reason='', volume_id=volume_id)
+
+
+def fake_detach_volume_invalid_volume(self, context, instance, volume):
+ raise exception.InvalidVolume(reason='')
+
+
+def fake_swap_volume_instance_invalid_state(self, context, instance,
+ volume_id, device):
+ raise exception.InstanceInvalidState(instance_uuid=UUID1, state='',
+ method='', attr='')
+
+
+def fake_volume_get(*args, **kwargs):
+ pass
+
+
+def fake_volume_get_not_found(*args, **kwargs):
+ raise exception.VolumeNotFound(volume_id=UUID1)
+
+
+class ExtendedVolumesTest(test.TestCase):
+ content_type = 'application/json'
+ prefix = 'os-extended-volumes:'
+
+ def setUp(self):
+ super(ExtendedVolumesTest, self).setUp()
+ self.Controller = extended_volumes.ExtendedVolumesController()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(compute.api.API, 'detach_volume', fake_detach_volume)
+ self.stubs.Set(compute.api.API, 'attach_volume', fake_attach_volume)
+ self.app = fakes.wsgi_app_v21(init_only=('os-extended-volumes',
+ 'servers'))
+ return_server = fakes.fake_instance_get()
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ def _make_request(self, url, body=None):
+ base_url = '/v2/fake/servers'
+ req = webob.Request.blank(base_url + url)
+ req.headers['Accept'] = self.content_type
+ if body:
+ req.body = jsonutils.dumps(body)
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ res = req.get_response(self.app)
+ return res
+
+ def _get_server(self, body):
+ return jsonutils.loads(body).get('server')
+
+ def _get_servers(self, body):
+ return jsonutils.loads(body).get('servers')
+
+ def test_show(self):
+ url = '/%s' % UUID1
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ server = self._get_server(res.body)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detail(self):
+ url = '/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ exp_volumes = [{'id': UUID1}, {'id': UUID2}]
+ for i, server in enumerate(self._get_servers(res.body)):
+ if self.content_type == 'application/json':
+ actual = server.get('%svolumes_attached' % self.prefix)
+ self.assertEqual(exp_volumes, actual)
+
+ def test_detach(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 202)
+
+ def test_detach_volume_from_locked_server(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'detach_volume',
+ fakes.fake_actions_to_locked_server)
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_detach_with_non_existed_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_detach_with_non_existed_instance(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_detach_with_invalid_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'detach_volume',
+ fake_detach_volume_invalid_volume)
+ res = self._make_request(url, {"detach": {"volume_id": UUID2}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_with_bad_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {"volume_id": 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_without_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": {}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_detach_volume_with_invalid_request(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"detach": None})
+ self.assertEqual(res.status_int, 400)
+
+ @mock.patch('nova.objects.BlockDeviceMapping.is_root',
+ new_callable=mock.PropertyMock)
+ def test_detach_volume_root(self, mock_isroot):
+ url = "/%s/action" % UUID1
+ mock_isroot.return_value = True
+ res = self._make_request(url, {"detach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 403)
+
+ def test_attach_volume(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 202)
+
+ def test_attach_volume_to_locked_server(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fakes.fake_actions_to_locked_server)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_attach_volume_disk_bus_and_disk_dev(self):
+ url = "/%s/action" % UUID1
+ self._make_request(url, {"attach": {"volume_id": UUID1,
+ "device": "/dev/vdb",
+ "disk_bus": "ide",
+ "device_type": "cdrom"}})
+
+ def test_attach_volume_with_bad_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {"volume_id": 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_without_id(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": {}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_invalid_request(self):
+ url = "/%s/action" % UUID1
+ res = self._make_request(url, {"attach": None})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_non_existe_vol(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_not_found_vol)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_attach_volume_with_non_existed_instance(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 404)
+
+ def test_attach_volume_with_invalid_device_path(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_device_path)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1,
+ 'device': 'xxx'}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_instance_invalid_state(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_instance_invalid_state)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 409)
+
+ def test_attach_volume_with_invalid_volume(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_volume)
+ res = self._make_request(url, {"attach": {"volume_id": UUID1}})
+ self.assertEqual(res.status_int, 400)
+
+ def test_attach_volume_with_invalid_request_body(self):
+ url = "/%s/action" % UUID1
+ self.stubs.Set(compute.api.API, 'attach_volume',
+ fake_attach_volume_invalid_volume)
+ res = self._make_request(url, {"attach": None})
+ self.assertEqual(res.status_int, 400)
+
+ def _test_swap(self, uuid=UUID1, body=None):
+ body = body or {'swap_volume_attachment': {'old_volume_id': uuid,
+ 'new_volume_id': UUID2}}
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % UUID1)
+ req.method = 'PUT'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = context.get_admin_context()
+ return self.Controller.swap(req, UUID1, body=body)
+
+ def test_swap_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ # Check any exceptions don't happen and status code
+ self._test_swap()
+ self.assertEqual(202, self.Controller.swap.wsgi_code)
+
+ def test_swap_volume_for_locked_server(self):
+ def fake_swap_volume_for_locked_server(self, context, instance,
+ old_volume, new_volume):
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_for_locked_server)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_for_locked_server_new(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fakes.fake_actions_to_locked_server)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_instance_not_found(self):
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get_not_found)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
+
+ def test_swap_volume_with_bad_action(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ body = {'swap_volume_attachment_bad_action': None}
+ self.assertRaises(exception.ValidationError, self._test_swap,
+ body=body)
+
+ def test_swap_volume_with_invalid_body(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ body = {'swap_volume_attachment': {'bad_volume_id_body': UUID1,
+ 'new_volume_id': UUID2}}
+ self.assertRaises(exception.ValidationError, self._test_swap,
+ body=body)
+
+ def test_swap_volume_with_invalid_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_invalid_volume)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_swap)
+
+ def test_swap_volume_with_unattached_volume(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_unattached_volume)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
+
+ def test_swap_volume_with_bad_state_instance(self):
+ self.stubs.Set(compute.api.API, 'swap_volume',
+ fake_swap_volume_instance_invalid_state)
+ self.assertRaises(webob.exc.HTTPConflict, self._test_swap)
+
+ def test_swap_volume_no_attachment(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap, UUID3)
+
+ def test_swap_volume_not_found(self):
+ self.stubs.Set(compute.api.API, 'swap_volume', fake_swap_volume)
+ self.stubs.Set(volume.cinder.API, 'get', fake_volume_get_not_found)
+ self.assertRaises(webob.exc.HTTPNotFound, self._test_swap)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py
new file mode 100644
index 0000000000..ee4e9d18b9
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_extension_info.py
@@ -0,0 +1,98 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import extension_info
+from nova import exception
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class fake_extension(object):
+ def __init__(self, name, alias, description, version):
+ self.name = name
+ self.alias = alias
+ self.__doc__ = description
+ self.version = version
+
+
+fake_extensions = {
+ 'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description', 1),
+ 'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description', 2),
+ 'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description', 1)
+}
+
+
+def fake_policy_enforce(context, action, target, do_raise=True):
+ return True
+
+
+def fake_policy_enforce_selective(context, action, target, do_raise=True):
+ if action == 'compute_extension:v3:ext1-alias:discoverable':
+ raise exception.Forbidden
+ else:
+ return True
+
+
+class ExtensionInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ExtensionInfoTest, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ ext_info.extensions = fake_extensions
+ self.controller = extension_info.ExtensionInfoController(ext_info)
+
+ def test_extension_info_list(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce)
+ req = fakes.HTTPRequestV3.blank('/extensions')
+ res_dict = self.controller.index(req)
+ self.assertEqual(3, len(res_dict['extensions']))
+ for e in res_dict['extensions']:
+ self.assertIn(e['alias'], fake_extensions)
+ self.assertEqual(e['name'], fake_extensions[e['alias']].name)
+ self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
+ self.assertEqual(e['description'],
+ fake_extensions[e['alias']].__doc__)
+ self.assertEqual(e['version'],
+ fake_extensions[e['alias']].version)
+
+ def test_extension_info_show(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce)
+ req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
+ res_dict = self.controller.show(req, 'ext1-alias')
+ self.assertEqual(1, len(res_dict))
+ self.assertEqual(res_dict['extension']['name'],
+ fake_extensions['ext1-alias'].name)
+ self.assertEqual(res_dict['extension']['alias'],
+ fake_extensions['ext1-alias'].alias)
+ self.assertEqual(res_dict['extension']['description'],
+ fake_extensions['ext1-alias'].__doc__)
+ self.assertEqual(res_dict['extension']['version'],
+ fake_extensions['ext1-alias'].version)
+
+ def test_extension_info_list_not_all_discoverable(self):
+ self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
+ req = fakes.HTTPRequestV3.blank('/extensions')
+ res_dict = self.controller.index(req)
+ self.assertEqual(2, len(res_dict['extensions']))
+ for e in res_dict['extensions']:
+ self.assertNotEqual('ext1-alias', e['alias'])
+ self.assertIn(e['alias'], fake_extensions)
+ self.assertEqual(e['name'], fake_extensions[e['alias']].name)
+ self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
+ self.assertEqual(e['description'],
+ fake_extensions[e['alias']].__doc__)
+ self.assertEqual(e['version'],
+ fake_extensions[e['alias']].version)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py
new file mode 100644
index 0000000000..ff5817ba19
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_lock_server.py
@@ -0,0 +1,57 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import lock_server
+from nova import exception
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class LockServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(LockServerTests, self).setUp()
+ self.controller = lock_server.LockServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(lock_server, 'LockServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-lock-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_lock_unlock(self):
+ self._test_actions(['lock', 'unlock'])
+
+ def test_lock_unlock_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['lock', 'unlock'])
+
+ def test_unlock_not_authorized(self):
+ self.mox.StubOutWithMock(self.compute_api, 'unlock')
+
+ instance = self._stub_instance_get()
+
+ self.compute_api.unlock(self.context, instance).AndRaise(
+ exception.PolicyNotAuthorized(action='unlock'))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance.uuid,
+ {'unlock': None})
+ self.assertEqual(403, res.status_int)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py
index c735e87fea..c735e87fea 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_migrations.py
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py
new file mode 100644
index 0000000000..35a559c668
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_multiple_create.py
@@ -0,0 +1,547 @@
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import block_device_mapping
+from nova.api.openstack.compute.plugins.v3 import multiple_create
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+class ServersControllerCreateTest(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-multiple-create',
+ 'osapi_v3')
+ self.no_mult_create_controller = servers.ServersController(
+ extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "security_groups": inst['security_groups'],
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=True,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+
+ def _test_create_extra(self, params, no_image=False,
+ override_controller=None):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ if no_image:
+ server.pop('imageRef', None)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_multiple_create_disabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('min_count', kwargs)
+ self.assertNotIn('max_count', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(
+ params,
+ override_controller=self.no_mult_create_controller)
+
+ def test_multiple_create_with_string_type_min_and_max(self):
+ min_count = '2'
+ max_count = '3'
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsInstance(kwargs['min_count'], int)
+ self.assertIsInstance(kwargs['max_count'], int)
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_enabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count,
+ multiple_create.MAX_ATTRIBUTE_NAME: max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_invalid_negative_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: -1,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_negative_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: -1,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_with_blank_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: '',
+ 'name': 'server_test',
+ 'image_ref': image_href,
+ 'flavor_ref': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_with_blank_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: '',
+ 'name': 'server_test',
+ 'image_ref': image_href,
+ 'flavor_ref': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_min_greater_than_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 4,
+ multiple_create.MAX_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_alpha_min(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 'abcd',
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_instance_invalid_alpha_max(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: 'abcd',
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ }
+ }
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ req,
+ body=body)
+
+ def test_create_multiple_instances(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_len(res["server"])
+
+ def test_create_multiple_instances_pass_disabled(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.flags(enable_instance_password=False)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_missing(res["server"])
+
+ def _check_admin_password_len(self, server_dict):
+ """utility function - check server_dict for admin_password length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_password_missing(self, server_dict):
+ """utility function - check server_dict for admin_password absence."""
+ self.assertNotIn("admin_password", server_dict)
+
+ def _create_multiple_instances_resv_id_return(self, resv_id_return):
+ """Test creating multiple instances with asking for
+ reservation_id
+ """
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ multiple_create.RRID_ATTRIBUTE_NAME: resv_id_return
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body)
+ reservation_id = res.obj['reservation_id']
+ self.assertNotEqual(reservation_id, "")
+ self.assertIsNotNone(reservation_id)
+ self.assertTrue(len(reservation_id) > 1)
+
+ def test_create_multiple_instances_with_resv_id_return(self):
+ self._create_multiple_instances_resv_id_return(True)
+
+ def test_create_multiple_instances_with_string_resv_id_return(self):
+ self._create_multiple_instances_resv_id_return("True")
+
+ def test_create_multiple_instances_with_multiple_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested with a list of block device mappings for volumes.
+ """
+ min_count = 2
+ bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
+ {'source_type': 'volume', 'uuid': 'vol-yyyy'}
+ ]
+ params = {
+ block_device_mapping.ATTRIBUTE_NAME: bdm,
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(len(kwargs['block_device_mapping']), 2)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ exc = self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+ self.assertEqual("Cannot attach one or more volumes to multiple "
+ "instances", exc.explanation)
+
+ def test_create_multiple_instances_with_single_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested to boot from a single volume.
+ """
+ min_count = 2
+ bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
+ params = {
+ block_device_mapping.ATTRIBUTE_NAME: bdm,
+ multiple_create.MIN_ATTRIBUTE_NAME: min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
+ 'vol-xxxx')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ exc = self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+ self.assertEqual("Cannot attach one or more volumes to multiple "
+ "instances", exc.explanation)
+
+ def test_create_multiple_instance_with_non_integer_max_count(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MAX_ATTRIBUTE_NAME: 2.5,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_multiple_instance_with_non_integer_min_count(self):
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ multiple_create.MIN_ATTRIBUTE_NAME: 2.5,
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {'hello': 'world',
+ 'open': 'stack'},
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py
new file mode 100644
index 0000000000..5364fb45b3
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pause_server.py
@@ -0,0 +1,60 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import pause_server
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class PauseServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(PauseServerTests, self).setUp()
+ self.controller = pause_server.PauseServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(pause_server, 'PauseServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-pause-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_pause_unpause(self):
+ self._test_actions(['pause', 'unpause'])
+
+ def test_actions_raise_on_not_implemented(self):
+ for action in ['pause', 'unpause']:
+ self.mox.StubOutWithMock(self.compute_api, action)
+ self._test_not_implemented_state(action)
+ # Re-mock this.
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_pause_unpause_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['pause', 'unpause'])
+
+ def test_pause_unpause_with_non_existed_instance_in_compute_api(self):
+ self._test_actions_instance_not_found_in_compute_api(['pause',
+ 'unpause'])
+
+ def test_pause_unpause_raise_conflict_on_invalid_state(self):
+ self._test_actions_raise_conflict_on_invalid_state(['pause',
+ 'unpause'])
+
+ def test_actions_with_locked_instance(self):
+ self._test_actions_with_locked_instance(['pause', 'unpause'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py
new file mode 100644
index 0000000000..6ac6269195
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_pci.py
@@ -0,0 +1,236 @@
+# Copyright 2013 Intel Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo.serialization import jsonutils
+from webob import exc
+
+from nova.api.openstack.compute.plugins.v3 import pci
+from nova.api.openstack import wsgi
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import device
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_pci_device
+
+
+fake_compute_node = {
+ 'pci_stats': [{"count": 3,
+ "vendor_id": "8086",
+ "product_id": "1520",
+ "extra_info": {"phys_function": '[["0x0000", "0x04", '
+ '"0x00", "0x1"]]'}}]}
+
+
+class FakeResponse(wsgi.ResponseObject):
+ pass
+
+
+class PciServerControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciServerControllerTest, self).setUp()
+ self.controller = pci.PciServerController()
+ self.fake_obj = {'server': {'addresses': {},
+ 'id': 'fb08',
+ 'name': 'a3',
+ 'status': 'ACTIVE',
+ 'tenant_id': '9a3af784c',
+ 'user_id': 'e992080ac0',
+ }}
+ self.fake_list = {'servers': [{'addresses': {},
+ 'id': 'fb08',
+ 'name': 'a3',
+ 'status': 'ACTIVE',
+ 'tenant_id': '9a3af784c',
+ 'user_id': 'e992080ac',
+ }]}
+ self._create_fake_instance()
+ self._create_fake_pci_device()
+ device.claim(self.pci_device, self.inst)
+ device.allocate(self.pci_device, self.inst)
+
+ def _create_fake_instance(self):
+ self.inst = objects.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = objects.PciDeviceList()
+
+ def _create_fake_pci_device(self):
+ def fake_pci_device_get_by_addr(ctxt, id, addr):
+ return test_pci_device.fake_db_dev
+
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_get_by_addr',
+ fake_pci_device_get_by_addr)
+ self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+
+ def test_show(self):
+ def fake_get_db_instance(id):
+ return self.inst
+
+ resp = FakeResponse(self.fake_obj, '')
+ req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
+ self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
+ self.controller.show(req, resp, '1')
+ self.assertEqual([{'id': 1}],
+ resp.obj['server']['os-pci:pci_devices'])
+
+ def test_detail(self):
+ def fake_get_db_instance(id):
+ return self.inst
+
+ resp = FakeResponse(self.fake_list, '')
+ req = fakes.HTTPRequestV3.blank('/os-pci/detail',
+ use_admin_context=True)
+ self.stubs.Set(req, 'get_db_instance', fake_get_db_instance)
+ self.controller.detail(req, resp)
+ self.assertEqual([{'id': 1}],
+ resp.obj['servers'][0]['os-pci:pci_devices'])
+
+
+class PciHypervisorControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciHypervisorControllerTest, self).setUp()
+ self.controller = pci.PciHypervisorController()
+ self.fake_objs = dict(hypervisors=[
+ dict(id=1,
+ service=dict(id=1, host="compute1"),
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1")])
+ self.fake_obj = dict(hypervisor=dict(
+ id=1,
+ service=dict(id=1, host="compute1"),
+ hypervisor_type="xen",
+ hypervisor_version=3,
+ hypervisor_hostname="hyper1"))
+
+ def test_show(self):
+ def fake_get_db_compute_node(id):
+ fake_compute_node['pci_stats'] = jsonutils.dumps(
+ fake_compute_node['pci_stats'])
+ return fake_compute_node
+
+ req = fakes.HTTPRequestV3.blank('/os-hypervisors/1',
+ use_admin_context=True)
+ resp = FakeResponse(self.fake_obj, '')
+ self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
+ self.controller.show(req, resp, '1')
+ self.assertIn('os-pci:pci_stats', resp.obj['hypervisor'])
+ fake_compute_node['pci_stats'] = jsonutils.loads(
+ fake_compute_node['pci_stats'])
+ self.assertEqual(fake_compute_node['pci_stats'][0],
+ resp.obj['hypervisor']['os-pci:pci_stats'][0])
+
+ def test_detail(self):
+ def fake_get_db_compute_node(id):
+ fake_compute_node['pci_stats'] = jsonutils.dumps(
+ fake_compute_node['pci_stats'])
+ return fake_compute_node
+
+ req = fakes.HTTPRequestV3.blank('/os-hypervisors/detail',
+ use_admin_context=True)
+ resp = FakeResponse(self.fake_objs, '')
+ self.stubs.Set(req, 'get_db_compute_node', fake_get_db_compute_node)
+ self.controller.detail(req, resp)
+ fake_compute_node['pci_stats'] = jsonutils.loads(
+ fake_compute_node['pci_stats'])
+ self.assertIn('os-pci:pci_stats', resp.obj['hypervisors'][0])
+ self.assertEqual(fake_compute_node['pci_stats'][0],
+ resp.obj['hypervisors'][0]['os-pci:pci_stats'][0])
+
+
+class PciControlletest(test.NoDBTestCase):
+ def setUp(self):
+ super(PciControlletest, self).setUp()
+ self.controller = pci.PciController()
+
+ def test_show(self):
+ def fake_pci_device_get_by_id(context, id):
+ return test_pci_device.fake_db_dev
+
+ self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ req = fakes.HTTPRequestV3.blank('/os-pci/1', use_admin_context=True)
+ result = self.controller.show(req, '1')
+ dist = {'pci_device': {'address': 'a',
+ 'compute_node_id': 1,
+ 'dev_id': 'i',
+ 'extra_info': {},
+ 'dev_type': 't',
+ 'id': 1,
+ 'server_uuid': None,
+ 'label': 'l',
+ 'product_id': 'p',
+ 'status': 'available',
+ 'vendor_id': 'v'}}
+ self.assertEqual(dist, result)
+
+ def test_show_error_id(self):
+ def fake_pci_device_get_by_id(context, id):
+ raise exception.PciDeviceNotFoundById(id=id)
+
+ self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ req = fakes.HTTPRequestV3.blank('/os-pci/0', use_admin_context=True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
+
+ def _fake_compute_node_get_all(self, context):
+ return [dict(id=1,
+ service_id=1,
+ cpu_info='cpu_info',
+ disk_available_least=100)]
+
+ def _fake_pci_device_get_all_by_node(self, context, node):
+ return [test_pci_device.fake_db_dev, test_pci_device.fake_db_dev_1]
+
+ def test_index(self):
+ self.stubs.Set(db, 'compute_node_get_all',
+ self._fake_compute_node_get_all)
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+
+ req = fakes.HTTPRequestV3.blank('/os-pci', use_admin_context=True)
+ result = self.controller.index(req)
+ dist = {'pci_devices': [test_pci_device.fake_db_dev,
+ test_pci_device.fake_db_dev_1]}
+ for i in range(len(result['pci_devices'])):
+ self.assertEqual(dist['pci_devices'][i]['vendor_id'],
+ result['pci_devices'][i]['vendor_id'])
+ self.assertEqual(dist['pci_devices'][i]['id'],
+ result['pci_devices'][i]['id'])
+ self.assertEqual(dist['pci_devices'][i]['status'],
+ result['pci_devices'][i]['status'])
+ self.assertEqual(dist['pci_devices'][i]['address'],
+ result['pci_devices'][i]['address'])
+
+ def test_detail(self):
+ self.stubs.Set(db, 'compute_node_get_all',
+ self._fake_compute_node_get_all)
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ req = fakes.HTTPRequestV3.blank('/os-pci/detail',
+ use_admin_context=True)
+ result = self.controller.detail(req)
+ dist = {'pci_devices': [test_pci_device.fake_db_dev,
+ test_pci_device.fake_db_dev_1]}
+ for i in range(len(result['pci_devices'])):
+ self.assertEqual(dist['pci_devices'][i]['vendor_id'],
+ result['pci_devices'][i]['vendor_id'])
+ self.assertEqual(dist['pci_devices'][i]['id'],
+ result['pci_devices'][i]['id'])
+ self.assertEqual(dist['pci_devices'][i]['label'],
+ result['pci_devices'][i]['label'])
+ self.assertEqual(dist['pci_devices'][i]['dev_id'],
+ result['pci_devices'][i]['dev_id'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py
new file mode 100644
index 0000000000..0bfe0eb2d4
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_actions.py
@@ -0,0 +1,1131 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.compute import api as compute_api
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+FAKE_UUID = fakes.FAKE_UUID
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+
+def return_server_not_found(*arg, **kwarg):
+ raise exception.InstanceNotFound(instance_id='42')
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, kwargs, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ return inst
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance, password):
+ self.instance_id = instance['uuid']
+ self.password = password
+
+
+class ServerActionsControllerTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServerActionsControllerTest, self).setUp()
+
+ CONF.set_override('host', 'localhost', group='glance')
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ host='fake_host'))
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ fakes.stub_out_nw_api(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.flags(allow_instance_snapshots=True,
+ enable_instance_password=True)
+ self.uuid = FAKE_UUID
+ self.url = '/servers/%s/action' % self.uuid
+ self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ self.compute_api = self.controller.compute_api
+ self.context = context.RequestContext('fake', 'fake')
+ self.app = fakes.wsgi_app_v21(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+
+ self.compute_api.get(self.context, uuid, want_objects=True,
+ expected_attrs=['pci_devices']).AndReturn(instance)
+ return instance
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(compute_api.API, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
+ 'rebuild']
+
+ method_translations = {'confirmResize': 'confirm_resize',
+ 'revertResize': 'revert_resize'}
+
+ body_map = {'resize': {'flavorRef': '2'},
+ 'reboot': {'type': 'HARD'},
+ 'rebuild': {'imageRef': self.image_uuid,
+ 'adminPass': 'TNc53Dr8s7vw'}}
+
+ args_map = {'resize': (('2'), {}),
+ 'confirmResize': ((), {}),
+ 'reboot': (('HARD',), {}),
+ 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'), {})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(compute_api.API, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_reboot_hard(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_soft(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_incorrect_type(self):
+ body = dict(reboot=dict(type="NOT_A_TYPE"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_missing_type(self):
+ body = dict(reboot=dict())
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_none(self):
+ body = dict(reboot=dict(type=None))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_not_found(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server_not_found)
+
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_reboot,
+ req, str(uuid.uuid4()), body)
+
+ def test_reboot_raises_conflict_on_invalid_state(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING_HARD))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accepted_minimum(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(len(body['server']['adminPass']),
+ CONF.password_length)
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_instance_with_image_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_uuid,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_instance_with_image_href_uses_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_accepted_minimum_pass_disabled(self):
+ # run with enable_instance_password disabled to verify admin_password
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn("admin_password", body['server'])
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_raises_conflict_on_invalid_state(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ def fake_rebuild(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_accepted_with_metadata(self):
+ metadata = {'new': 'metadata'}
+
+ return_server = fakes.fake_instance_get(metadata=metadata,
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": metadata,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['metadata'], metadata)
+
+ def test_rebuild_accepted_with_bad_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": "stack",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_with_too_large_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": {
+ 256 * "k": "value"
+ }
+ }
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, req,
+ FAKE_UUID, body=body)
+
+ def test_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ "imageId": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_admin_password(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['adminPass'], 'asdf')
+
+ def test_rebuild_admin_password_pass_disabled(self):
+ # run with enable_instance_password disabled to verify admin_password
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "admin_password": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn('adminPass', body['server'])
+
+ def test_rebuild_server_not_found(self):
+ def server_not_found(self, instance_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_with_bad_image(self):
+ body = {
+ "rebuild": {
+ "imageRef": "foo",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_when_kernel_not_exists(self):
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_rebuild_proper_kernel_ram(self):
+ instance_meta = {'kernel_id': None, 'ramdisk_id': None}
+
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ inst = orig_get(*args, **kwargs)
+ instance_meta['instance'] = inst
+ return inst
+
+ def fake_save(context, **kwargs):
+ instance = instance_meta['instance']
+ for key in instance_meta.keys():
+ if key in instance.obj_what_changed():
+ instance_meta[key] = instance[key]
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
+ self.assertEqual(instance_meta['kernel_id'], '1')
+ self.assertEqual(instance_meta['ramdisk_id'], '2')
+
+ def _test_rebuild_preserve_ephemeral(self, value=None):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+ if value is not None:
+ body['rebuild']['preserve_ephemeral'] = value
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+ if value is not None:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), preserve_ephemeral=value)
+ else:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body=body)
+
+ def test_rebuild_preserve_ephemeral_true(self):
+ self._test_rebuild_preserve_ephemeral(True)
+
+ def test_rebuild_preserve_ephemeral_false(self):
+ self._test_rebuild_preserve_ephemeral(False)
+
+ def test_rebuild_preserve_ephemeral_default(self):
+ self._test_rebuild_preserve_ephemeral()
+
+ @mock.patch.object(compute_api.API, 'rebuild',
+ side_effect=exception.AutoDiskConfigDisabledByImage(
+ image='dummy'))
+ def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_server(self):
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(compute_api.API, 'resize', resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_resize(req, FAKE_UUID, body=body)
+
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_server_no_flavor(self):
+ body = dict(resize=dict())
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_server_no_flavor_ref(self):
+ body = dict(resize=dict(flavorRef=None))
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_with_image_exceptions(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ self.resize_called = 0
+ image_id = 'fake_image_id'
+
+ exceptions = [
+ (exception.ImageNotAuthorized(image_id=image_id),
+ webob.exc.HTTPUnauthorized),
+ (exception.ImageNotFound(image_id=image_id),
+ webob.exc.HTTPBadRequest),
+ (exception.Invalid, webob.exc.HTTPBadRequest),
+ (exception.NoValidHost(reason='Bad host'),
+ webob.exc.HTTPBadRequest),
+ (exception.AutoDiskConfigDisabledByImage(image=image_id),
+ webob.exc.HTTPBadRequest),
+ ]
+
+ raised, expected = map(iter, zip(*exceptions))
+
+ def _fake_resize(obj, context, instance, flavor_id):
+ self.resize_called += 1
+ raise raised.next()
+
+ self.stubs.Set(compute_api.API, 'resize', _fake_resize)
+
+ for call_no in range(len(exceptions)):
+ req = fakes.HTTPRequestV3.blank(self.url)
+ next_exception = expected.next()
+ actual = self.assertRaises(next_exception,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+ if (isinstance(exceptions[call_no][0],
+ exception.NoValidHost)):
+ self.assertEqual(actual.explanation,
+ 'No valid host was found. Bad host')
+ elif (isinstance(exceptions[call_no][0],
+ exception.AutoDiskConfigDisabledByImage)):
+ self.assertEqual(actual.explanation,
+ 'Requested image fake_image_id has automatic'
+ ' disk resize disabled.')
+ self.assertEqual(self.resize_called, call_no + 1)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.CannotResizeDisk(reason=''))
+ def test_resize_raises_cannot_resize_disk(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.FlavorNotFound(reason='',
+ flavor_id='fake_id'))
+ def test_resize_raises_flavor_not_found(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_resize_raises_conflict_on_invalid_state(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_resize,
+ req, FAKE_UUID, body=body)
+
+ def test_confirm_resize_server(self):
+ body = dict(confirmResize=None)
+
+ self.confirm_resize_called = False
+
+ def cr_mock(*args):
+ self.confirm_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.confirm_resize_called, True)
+
+ def test_confirm_resize_migration_not_found(self):
+ body = dict(confirmResize=None)
+
+ def confirm_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'confirm_resize',
+ confirm_resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_raises_conflict_on_invalid_state(self):
+ body = dict(confirmResize=None)
+
+ def fake_confirm_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'confirm_resize',
+ fake_confirm_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_migration_not_found(self):
+ body = dict(revertResize=None)
+
+ def revert_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'revert_resize',
+ revert_resize_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_server_not_found(self):
+ body = dict(revertResize=None)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob. exc.HTTPNotFound,
+ self.controller._action_revert_resize,
+ req, "bad_server_id", body)
+
+ def test_revert_resize_server(self):
+ body = dict(revertResize=None)
+
+ self.revert_resize_called = False
+
+ def revert_mock(*args):
+ self.revert_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ body = self.controller._action_revert_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.revert_resize_called, True)
+
+ def test_revert_resize_raises_conflict_on_invalid_state(self):
+ body = dict(revertResize=None)
+
+ def fake_revert_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'revert_resize',
+ fake_revert_resize)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_create_image(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual(glance.generate_image_url('123'), location)
+
+ def test_create_image_name_too_long(self):
+ long_name = 'a' * 260
+ body = {
+ 'createImage': {
+ 'name': long_name,
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image, req,
+ FAKE_UUID, body)
+
+ def _do_test_create_volume_backed_image(self, extra_properties):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+
+ if extra_properties:
+ body['createImage']['metadata'] = extra_properties
+
+ image_service = glance.get_default_image_service()
+
+ bdm = [dict(volume_id=_fake_id('a'),
+ volume_size=1,
+ device_name='vda',
+ delete_on_termination=False)]
+ props = dict(kernel_id=_fake_id('b'),
+ ramdisk_id=_fake_id('c'),
+ root_device_name='/dev/vda',
+ block_device_mapping=bdm)
+ original_image = dict(properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ image_service.create(None, original_image)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref=original_image['id'],
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake')
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ image_id = location.replace(glance.generate_image_url(''), '')
+ image = image_service.show(None, image_id)
+
+ self.assertEqual(image['name'], 'snapshot_of_volume_backed')
+ properties = image['properties']
+ self.assertEqual(properties['kernel_id'], _fake_id('b'))
+ self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
+ self.assertEqual(properties['root_device_name'], '/dev/vda')
+ self.assertEqual(properties['bdm_v2'], True)
+ bdms = properties['block_device_mapping']
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['boot_index'], 0)
+ self.assertEqual(bdms[0]['source_type'], 'snapshot')
+ self.assertEqual(bdms[0]['destination_type'], 'volume')
+ self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
+ for fld in ('connection_info', 'id',
+ 'instance_uuid', 'device_name'):
+ self.assertNotIn(fld, bdms[0])
+ for k in extra_properties.keys():
+ self.assertEqual(properties[k], extra_properties[k])
+
+ def test_create_volume_backed_image_no_metadata(self):
+ self._do_test_create_volume_backed_image({})
+
+ def test_create_volume_backed_image_with_metadata(self):
+ self._do_test_create_volume_backed_image(dict(ImageType='Gold',
+ ImageVersion='2.0'))
+
+ def _test_create_volume_backed_image_with_metadata_from_volume(
+ self, extra_metadata=None):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+ if extra_metadata:
+ body['createImage']['metadata'] = extra_metadata
+
+ image_service = glance.get_default_image_service()
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref='',
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ fake_metadata = {'test_key1': 'test_value1',
+ 'test_key2': 'test_value2'}
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake',
+ volume_image_metadata=fake_metadata)
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+
+ self.mox.ReplayAll()
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost:9292/images/', '')
+ image = image_service.show(None, image_id)
+
+ properties = image['properties']
+ self.assertEqual(properties['test_key1'], 'test_value1')
+ self.assertEqual(properties['test_key2'], 'test_value2')
+ if extra_metadata:
+ for key, val in extra_metadata.items():
+ self.assertEqual(properties[key], val)
+
+ def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume()
+
+ def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume(
+ extra_metadata={'a': 'b'})
+
+ def test_create_image_snapshots_disabled(self):
+ """Don't permit a snapshot if the allow_instance_snapshots flag is
+ False
+ """
+ self.flags(allow_instance_snapshots=False)
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual(glance.generate_image_url('123'), location)
+
+ def test_create_image_with_too_much_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {},
+ },
+ }
+ for num in range(CONF.quota_metadata_items + 1):
+ body['createImage']['metadata']['foo%i' % num] = "bar"
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_blank_name(self):
+ body = {
+ 'createImage': {
+ 'name': '',
+ }
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_raises_conflict_on_invalid_state(self):
+ def snapshot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ self.stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+ body = {
+ "createImage": {
+ "name": "test_snapshot",
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py
index e9bd4538a0..e9bd4538a0 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_server_external_events.py
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_external_events.py
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py
new file mode 100644
index 0000000000..20a8c1e0a1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_server_password.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import password
+from nova import compute
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+
+
+class ServerPasswordTest(test.TestCase):
+ content_type = 'application/json'
+
+ def setUp(self):
+ super(ServerPasswordTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(
+ compute.api.API, 'get',
+ lambda self, ctxt, *a, **kw:
+ fake_instance.fake_instance_obj(
+ ctxt,
+ system_metadata={},
+ expected_attrs=['system_metadata']))
+ self.password = 'fakepass'
+
+ def fake_extract_password(instance):
+ return self.password
+
+ def fake_convert_password(context, password):
+ self.password = password
+ return {}
+
+ self.stubs.Set(password, 'extract_password', fake_extract_password)
+ self.stubs.Set(password, 'convert_password', fake_convert_password)
+
+ def _make_request(self, url, method='GET'):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ req.method = method
+ res = req.get_response(
+ fakes.wsgi_app_v21(init_only=('servers', 'os-server-password')))
+ return res
+
+ def _get_pass(self, body):
+ return jsonutils.loads(body).get('password')
+
+ def test_get_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), 'fakepass')
+
+ def test_reset_password(self):
+ url = '/v2/fake/servers/fake/os-server-password'
+ res = self._make_request(url, 'DELETE')
+ self.assertEqual(res.status_int, 204)
+
+ res = self._make_request(url)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(self._get_pass(res.body), '')
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py
new file mode 100644
index 0000000000..6eb92902fe
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_servers.py
@@ -0,0 +1,3353 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import contextlib
+import copy
+import datetime
+import uuid
+
+import iso8601
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six.moves.urllib.parse as urlparse
+import testtools
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import disk_config
+from nova.api.openstack.compute.plugins.v3 import ips
+from nova.api.openstack.compute.plugins.v3 import keypairs
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack.compute.schemas.v3 import disk_config as \
+ disk_config_schema
+from nova.api.openstack.compute.schemas.v3 import servers as servers_schema
+from nova.api.openstack.compute import views
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import manager
+from nova.network.neutronv2 import api as neutron_api
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova import utils as nova_utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = fakes.FAKE_UUID
+
+INSTANCE_IDS = {FAKE_UUID: 1}
+FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_servers_empty(context, *args, **kwargs):
+ return []
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, values, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return inst
+
+
+def fake_compute_api(cls, req, id):
+ return True
+
+
+def fake_start_stop_not_ready(self, context, instance):
+ raise exception.InstanceNotReady(instance_id=instance["uuid"])
+
+
+def fake_start_stop_invalid_state(self, context, instance):
+ raise exception.InstanceInvalidState(
+ instance_uuid=instance['uuid'], attr='fake_attr',
+ method='fake_method', state='fake_state')
+
+
+def fake_instance_get_by_uuid_not_found(context, uuid,
+ columns_to_join, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=uuid)
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
+class Base64ValidationTest(test.TestCase):
+ def setUp(self):
+ super(Base64ValidationTest, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def test_decode_base64(self):
+ value = "A random string"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_binary(self):
+ value = "\x00\x12\x75\x99"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_whitespace(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertEqual(result, value)
+
+ def test_decode_base64_invalid(self):
+ invalid = "A random string"
+ result = self.controller._decode_base64(invalid)
+ self.assertIsNone(result)
+
+ def test_decode_base64_illegal_bytes(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertIsNone(result)
+
+
+class NeutronV2Subclass(neutron_api.API):
+ """Used to ensure that API handles subclasses properly."""
+ pass
+
+
+class ControllerTest(test.TestCase):
+
+ def setUp(self):
+ super(ControllerTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ self.ips_controller = ips.IPsController()
+ policy.reset()
+ policy.init()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+
+class ServersControllerTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerTest, self).setUp()
+ CONF.set_override('host', 'localhost', group='glance')
+
+ def test_requested_networks_prefix(self):
+ uuid = 'br-00000000-0000-0000-0000-000000000000'
+ requested_networks = [{'uuid': uuid}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertIn((uuid, None), res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(network, None, None, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_conflict_on_fixed_ip(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ addr = '10.0.0.1'
+ requested_networks = [{'uuid': network,
+ 'fixed_ip': addr,
+ 'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_neutronv2_disabled_with_port(self):
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_api_enabled_with_v2_subclass(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_subclass_with_port(self):
+ cls = ('nova.tests.unit.api.openstack.compute' +
+ '.test_servers.NeutronV2Subclass')
+ self.flags(network_api_class=cls)
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_get_server_by_uuid(self):
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+
+ def test_get_server_joins_pci_devices(self):
+ self.expected_attrs = None
+
+ def fake_get(_self, *args, **kwargs):
+ self.expected_attrs = kwargs['expected_attrs']
+ ctxt = context.RequestContext('fake', 'fake')
+ return fake_instance.fake_instance_obj(ctxt)
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ self.controller.show(req, FAKE_UUID)
+
+ self.assertIn('pci_devices', self.expected_attrs)
+
+ def test_unique_host_id(self):
+ """Create two servers with the same host and different
+ project_ids and check that the host_id's are unique.
+ """
+ def return_instance_with_host(self, *args, **kwargs):
+ project_id = str(uuid.uuid4())
+ return fakes.stub_instance(id=1, uuid=FAKE_UUID,
+ project_id=project_id,
+ host='fake_host')
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_instance_with_host)
+ self.stubs.Set(db, 'instance_get',
+ return_instance_with_host)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ server1 = self.controller.show(req, FAKE_UUID)
+ server2 = self.controller.show(req, FAKE_UUID)
+
+ self.assertNotEqual(server1['server']['hostId'],
+ server2['server']['hostId'])
+
+ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
+ status="ACTIVE", progress=100):
+ return {
+ "server": {
+ "id": uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": progress,
+ "name": "server1",
+ "status": status,
+ "hostId": '',
+ "image": {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {
+ "seq": "1",
+ },
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" % uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % uuid,
+ },
+ ],
+ }
+ }
+
+ def test_get_server_by_id(self):
+ self.flags(use_ipv6=True)
+ image_bookmark = "http://localhost/images/10"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status="BUILD",
+ progress=0)
+
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_active_status_by_id(self):
+ image_bookmark = "http://localhost/images/10"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_id_image_ref_by_id(self):
+ image_ref = "10"
+ image_bookmark = "http://localhost/images/10"
+ flavor_id = "1"
+ flavor_bookmark = "http://localhost/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, image_ref=image_ref,
+ flavor_id=flavor_id, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_addresses_from_cache(self):
+ pub0 = ('172.19.0.1', '172.19.0.2',)
+ pub1 = ('1.2.3.4',)
+ pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
+ priv0 = ('192.168.0.3', '192.168.0.4',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'public',
+ 'subnets': [{'cidr': '172.19.0.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': '1.2.3.0/16',
+ 'ips': [_ip(ip) for ip in pub1]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub2]}]}},
+ {'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {'bridge': 'br1',
+ 'id': 2,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip(ip) for ip in priv0]}]}}]
+
+ return_server = fakes.fake_instance_get(nw_cache=nw_cache)
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % FAKE_UUID)
+ res_dict = self.ips_controller.index(req, FAKE_UUID)
+
+ expected = {
+ 'addresses': {
+ 'private': [
+ {'version': 4, 'addr': '192.168.0.3',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
+ {'version': 4, 'addr': '192.168.0.4',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'bb:bb:bb:bb:bb:bb'},
+ ],
+ 'public': [
+ {'version': 4, 'addr': '172.19.0.1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 4, 'addr': '172.19.0.2',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 4, 'addr': '1.2.3.4',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ ],
+ },
+ }
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_get_server_addresses_nonexistent_network(self):
+ url = '/v3/servers/%s/ips/network_0' % FAKE_UUID
+ req = fakes.HTTPRequestV3.blank(url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
+ req, FAKE_UUID, 'network_0')
+
+ def test_get_server_addresses_nonexistent_server(self):
+ def fake_instance_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+
+ server_id = str(uuid.uuid4())
+ req = fakes.HTTPRequestV3.blank('/servers/%s/ips' % server_id)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.ips_controller.index, req, server_id)
+
+ def test_get_server_list_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_list_with_reservation_id(self):
+ req = fakes.HTTPRequestV3.blank('/servers?reservation_id=foo')
+ res_dict = self.controller.index(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_empty(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_details(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list(self):
+ req = fakes.HTTPRequestV3.blank('/servers')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['servers']), 5)
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertIsNone(s.get('image', None))
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" % s['id'],
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % s['id'],
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=3')
+ res_dict = self.controller.index(req)
+
+ servers = res_dict['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res_dict['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected_params = {'limit': ['3'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected_params))
+
+ def test_get_servers_with_limit_bad_value(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_server_details_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_details_with_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?limit=3')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_server_details_with_limit_bad_value(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.detail, req)
+
+ def test_get_server_details_with_limit_and_other_params(self):
+ req = fakes.HTTPRequestV3.blank('/servers/detail'
+ '?limit=3&blah=2:t')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v3/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_servers_with_too_big_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=30')
+ res_dict = self.controller.index(req)
+ self.assertNotIn('servers_links', res_dict)
+
+ def test_get_servers_with_bad_limit(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_marker(self):
+ url = '/v3/servers?marker=%s' % fakes.get_fake_uuid(2)
+ req = fakes.HTTPRequestV3.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
+
+ def test_get_servers_with_limit_and_marker(self):
+ url = '/v3/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
+ req = fakes.HTTPRequestV3.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
+
+ def test_get_servers_with_bad_marker(self):
+ req = fakes.HTTPRequestV3.blank('/servers?limit=2&marker=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_bad_option(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?unknownoption=whee')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_image(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('image', search_opts)
+ self.assertEqual(search_opts['image'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?image=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_tenant_id_filter_converts_to_project_id_for_admin(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers'
+ '?all_tenants=1&tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_tenant_id_filter_no_admin_context(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotEqual(filters, None)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake')
+ res = self.controller.index(req)
+ self.assertIn('servers', res)
+
+ def test_tenant_id_filter_implies_all_tenants(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotEqual(filters, None)
+ # The project_id assertion checks that the project_id
+ # filter is set to that specified in the request url and
+ # not that of the context, verifying that the all_tenants
+ # flag was enabled
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_normal(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_one(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_zero(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=0',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_false(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=false',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_invalid(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None,
+ expected_attrs=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=xxx',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_admin_restricted_tenant(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_pass_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False,
+ expected_attrs=None):
+ self.assertIsNotNone(filters)
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_fail_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertIsNotNone(filters)
+ return [fakes.stub_instance(100)]
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:non_fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?all_tenants=1')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_get_servers_allows_flavor(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('flavor', search_opts)
+ # flavor is an integer ID
+ self.assertEqual(search_opts['flavor'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_with_bad_flavor(self):
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 0)
+
+ def test_get_server_details_with_bad_flavor(self):
+ req = fakes.HTTPRequestV3.blank('/servers?flavor=abcde')
+ servers = self.controller.detail(req)['servers']
+
+ self.assertThat(servers, testtools.matchers.HasLength(0))
+
+ def test_get_servers_allows_status(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=active')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_task_status(self):
+ server_uuid = str(uuid.uuid4())
+ task_state = task_states.REBOOTING
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('task_state', search_opts)
+ self.assertEqual([task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING],
+ search_opts['task_state'])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid,
+ task_state=task_state)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=reboot')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_resize_status(self):
+ # Test when resize status, it maps list of vm states.
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'],
+ [vm_states.ACTIVE, vm_states.STOPPED])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=resize')
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_invalid_status(self):
+ # Test getting servers by invalid status.
+ req = fakes.HTTPRequestV3.blank('/servers?status=baloney',
+ use_admin_context=False)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(len(servers), 0)
+
+ def test_get_servers_deleted_status_as_user(self):
+ req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
+ use_admin_context=False)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.detail, req)
+
+ def test_get_servers_deleted_status_as_admin(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], ['deleted'])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?status=deleted',
+ use_admin_context=True)
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_name(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('name', search_opts)
+ self.assertEqual(search_opts['name'], 'whee.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?name=whee.*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_flavor_not_found(self, get_all_mock):
+ get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&flavor=abc')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(0, len(servers))
+
+ def test_get_servers_allows_changes_since(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('changes-since', search_opts)
+ changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
+ tzinfo=iso8601.iso8601.UTC)
+ self.assertEqual(search_opts['changes-since'], changes_since)
+ self.assertNotIn('deleted', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ params = 'changes-since=2011-01-24T17:08:01Z'
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since_bad_value(self):
+ params = 'changes-since=asdf'
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % params)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
+
+ def test_get_servers_admin_filters_as_user(self):
+ """Test getting servers by admin-only or unknown options when
+ context is not admin. Make sure the admin and unknown options
+ are stripped before they get to compute_api.get_all()
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ self.assertIn('ip', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertNotIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/servers?%s' % query_str)
+ res = self.controller.index(req)
+
+ servers = res['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_options_as_admin(self):
+ """Test getting servers by admin-only or unknown options when
+ context is admin. All options should be passed
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertIn('ip', search_opts)
+ self.assertIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequestV3.blank('/servers?%s' % query_str,
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_ip(self):
+ """Test getting servers by ip."""
+
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip', search_opts)
+ self.assertEqual(search_opts['ip'], '10\..*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?ip=10\..*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_allows_ip6(self):
+ """Test getting servers by ip6 with admin_api enabled and
+ admin context
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip6', search_opts)
+ self.assertEqual(search_opts['ip6'], 'ffff.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers?ip6=ffff.*',
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_all_server_details(self):
+ expected_flavor = {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/flavors/1',
+ },
+ ],
+ }
+ expected_image = {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/images/10',
+ },
+ ],
+ }
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertEqual(s['image'], expected_image)
+ self.assertEqual(s['flavor'], expected_flavor)
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], str(i + 1))
+
+ def test_get_all_server_details_with_host(self):
+ """We want to make sure that if two instances are on the same host,
+ then they return the same hostId. If two instances are on different
+ hosts, they should return different hostIds. In this test,
+ there are 5 instances - 2 on one host and 3 on another.
+ """
+
+ def return_servers_with_host(context, *args, **kwargs):
+ return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
+ uuid=fakes.get_fake_uuid(i))
+ for i in xrange(5)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_with_host)
+
+ req = fakes.HTTPRequestV3.blank('/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(server_list):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+
+ def test_get_servers_joins_pci_devices(self):
+ self.expected_attrs = None
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False,
+ expected_attrs=None):
+ self.expected_attrs = expected_attrs
+ return []
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequestV3.blank('/servers', use_admin_context=True)
+ self.assertIn('servers', self.controller.index(req))
+ self.assertIn('pci_devices', self.expected_attrs)
+
+
+class ServersControllerDeleteTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerDeleteTest, self).setUp()
+ self.server_delete_called = False
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ def _create_delete_request(self, uuid):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % uuid)
+ req.method = 'DELETE'
+ return req
+
+ def _delete_server_instance(self, uuid=FAKE_UUID):
+ req = self._create_delete_request(uuid)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.controller.delete(req, uuid)
+
+ def test_delete_server_instance(self):
+ self._delete_server_instance()
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_not_found(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self._delete_server_instance,
+ uuid='non-existent-uuid')
+
+ def test_delete_server_instance_while_building(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.controller.delete(req, FAKE_UUID)
+
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_locked_server(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
+ fakes.fake_actions_to_locked_server)
+ self.stubs.Set(compute_api.API, delete_types.DELETE,
+ fakes.fake_actions_to_locked_server)
+
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, FAKE_UUID)
+
+ def test_delete_server_instance_while_resize(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_PREP))
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete shoud be allowed in any case, even during resizing,
+ # because it may get stuck.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_if_not_launched(self):
+ self.flags(reclaim_instance_interval=3600)
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'DELETE'
+
+ self.server_delete_called = False
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(launched_at=None))
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ self.controller.delete(req, FAKE_UUID)
+ # delete() should be called for instance which has never been active,
+ # even if reclaim_instance_interval has been set.
+ self.assertEqual(self.server_delete_called, True)
+
+
+class ServersControllerRebuildInstanceTest(ControllerTest):
+
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServersControllerRebuildInstanceTest, self).setUp()
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.body = {
+ 'rebuild': {
+ 'name': 'new_name',
+ 'imageRef': self.image_href,
+ 'metadata': {
+ 'open': 'stack',
+ },
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def test_rebuild_instance_with_blank_metadata_key(self):
+ self.body['rebuild']['metadata'][''] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_key_too_long(self):
+ self.body['rebuild']['metadata'][('a' * 260)] = 'world'
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_value_too_long(self):
+ self.body['rebuild']['metadata']['key1'] = ('a' * 260)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_metadata_value_not_string(self):
+ self.body['rebuild']['metadata']['key1'] = 1
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_fails_when_min_ram_too_small(self):
+ # make min_ram larger than our instance ram size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="4096", min_disk="10")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_fails_when_min_disk_too_small(self):
+ # make min_disk larger than our instance disk size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="100000")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_image_too_large(self):
+ # make image size larger than our instance disk size
+ size = str(1000 * (1024 ** 3))
+
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', size=size)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_name_all_blank(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.body['rebuild']['name'] = ' '
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_deleted_image(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='DELETED')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_onset_file_limit_over_quota(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ with contextlib.nested(
+ mock.patch.object(fake._FakeImageService, 'show',
+ side_effect=fake_get_image),
+ mock.patch.object(self.controller.compute_api, 'rebuild',
+ side_effect=exception.OnsetFileLimitExceeded)
+ ) as (
+ show_mock, rebuild_mock
+ ):
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_start(self):
+ self.mox.StubOutWithMock(compute_api.API, 'start')
+ compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.controller._start_server(req, FAKE_UUID, body)
+
+ def test_start_policy_failed(self):
+ rules = {
+ "compute:v3:servers:start":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._start_server,
+ req, FAKE_UUID, body)
+ self.assertIn("compute:v3:servers:start", exc.format_message())
+
+ def test_start_not_ready(self):
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_start_locked_server(self):
+ self.stubs.Set(compute_api.API, 'start',
+ fakes.fake_actions_to_locked_server)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_start_invalid(self):
+ self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._start_server, req, FAKE_UUID, body)
+
+ def test_stop(self):
+ self.mox.StubOutWithMock(compute_api.API, 'stop')
+ compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.controller._stop_server(req, FAKE_UUID, body)
+
+ def test_stop_policy_failed(self):
+ rules = {
+ "compute:v3:servers:stop":
+ common_policy.parse_rule("project_id:non_fake")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop='')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._stop_server,
+ req, FAKE_UUID, body)
+ self.assertIn("compute:v3:servers:stop", exc.format_message())
+
+ def test_stop_not_ready(self):
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_stop_locked_server(self):
+ self.stubs.Set(compute_api.API, 'stop',
+ fakes.fake_actions_to_locked_server)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_stop_invalid_state(self):
+ self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
+ req = fakes.HTTPRequestV3.blank('/servers/%s/action' % FAKE_UUID)
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._stop_server, req, FAKE_UUID, body)
+
+ def test_start_with_bogus_id(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
+ req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
+ body = dict(start="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._start_server, req, 'test_inst', body)
+
+ def test_stop_with_bogus_id(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
+ req = fakes.HTTPRequestV3.blank('/servers/test_inst/action')
+ body = dict(stop="")
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._stop_server, req, 'test_inst', body)
+
+
+class ServersControllerUpdateTest(ControllerTest):
+
+ def _get_request(self, body=None, options=None):
+ if options:
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(**options))
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def test_update_server_all_attributes(self):
+ body = {'server': {
+ 'name': 'server_test',
+ }}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name(self):
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name_too_long(self):
+ body = {'server': {'name': 'x' * 256}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_name_all_blank_spaces(self):
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(name='server_test'))
+ req = fakes.HTTPRequest.blank('/v3/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'server': {'name': ' ' * 64}}
+ req.body = jsonutils.dumps(body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_admin_password_ignored(self):
+ inst_dict = dict(name='server_test', admin_password='bacon')
+ body = dict(server=inst_dict)
+
+ def server_update(context, id, params):
+ filtered_dict = {
+ 'display_name': 'server_test',
+ }
+ self.assertEqual(params, filtered_dict)
+ filtered_dict['uuid'] = id
+ return filtered_dict
+
+ self.stubs.Set(db, 'instance_update', server_update)
+ # FIXME (comstud)
+ # self.stubs.Set(db, 'instance_get',
+ # return_server_with_attributes(name='server_test'))
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_not_found(self):
+ def fake_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_not_found_on_update(self):
+ def fake_update(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_policy_fail(self):
+ rule = {'compute:update': common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update, req, FAKE_UUID, body=body)
+
+
+class ServerStatusTest(test.TestCase):
+
+ def setUp(self):
+ super(ServerStatusTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def _get_with_state(self, vm_state, task_state=None):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_state,
+ task_state=task_state))
+
+ request = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ return self.controller.show(request, FAKE_UUID)
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_reboot_hard(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING_HARD)
+ self.assertEqual(response['server']['status'], 'HARD_REBOOT')
+
+ def test_reboot_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:reboot':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_reboot, req, '1234',
+ {'reboot': {'type': 'HARD'}})
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_PREP)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_confirm_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:confirm_resize':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_confirm_resize, req, '1234', {})
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.RESIZED, None)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_revert_resize(self):
+ response = self._get_with_state(vm_states.RESIZED,
+ task_states.RESIZE_REVERTING)
+ self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
+
+ def test_revert_resize_policy_fail(self):
+ def fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', fake_get_server)
+
+ rule = {'compute:revert_resize':
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ req = fakes.HTTPRequestV3.blank('/servers/1234/action')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_revert_resize, req, '1234', {})
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'SHUTOFF')
+
+
+class ServersControllerCreateTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "config_drive": None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=True):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return inst
+
+ def server_update_and_get_original(
+ context, instance_uuid, params, update_cells=False,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update_and_get_original)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+ self.body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ self.bdm = [{'delete_on_termination': 1,
+ 'device_name': 123,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _check_admin_password_len(self, server_dict):
+ """utility function - check server_dict for admin_password length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_password_missing(self, server_dict):
+ """utility function - check server_dict for admin_password absence."""
+ self.assertNotIn("adminPass", server_dict)
+
+ def _test_create_instance(self, flavor=2):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ self.body['server']['imageRef'] = image_uuid
+ self.body['server']['flavorRef'] = flavor
+ self.req.body = jsonutils.dumps(self.body)
+ server = self.controller.create(self.req, body=self.body).obj['server']
+ self._check_admin_password_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_private_flavor(self):
+ values = {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': '1324',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': False,
+ }
+ db.flavor_create(context.get_admin_context(), values)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
+ flavor=1324)
+
+ def test_create_server_bad_image_href(self):
+ image_href = 1
+ self.body['server']['min_count'] = 1
+ self.body['server']['imageRef'] = image_href,
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create,
+ self.req, body=self.body)
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-networks extension tests
+ # def test_create_server_with_invalid_networks_parameter(self):
+ # self.ext_mgr.extensions = {'os-networks': 'fake'}
+ # image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # flavor_ref = 'http://localhost/123/flavors/3'
+ # body = {
+ # 'server': {
+ # 'name': 'server_test',
+ # 'imageRef': image_href,
+ # 'flavorRef': flavor_ref,
+ # 'networks': {'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'},
+ # }
+ # }
+ # req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ # req.method = 'POST'
+ # req.body = jsonutils.dumps(body)
+ # req.headers["content-type"] = "application/json"
+ # self.assertRaises(webob.exc.HTTPBadRequest,
+ # self.controller.create,
+ # req,
+ # body)
+
+ def test_create_server_with_deleted_image(self):
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, '')
+ image_service.update(context, self.image_uuid, {'status': 'DELETED'})
+ self.addCleanup(image_service.update, context, self.image_uuid,
+ {'status': 'active'})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_server_image_too_large(self):
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, self.image_uuid)
+
+ image = image_service.show(context, image_id)
+
+ orig_size = image['size']
+ new_size = str(1000 * (1024 ** 3))
+ image_service.update(context, self.image_uuid, {'size': new_size})
+
+ self.addCleanup(image_service.update, context, self.image_uuid,
+ {'size': orig_size})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ "Flavor's disk is too small for requested image."):
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_instance_image_ref_is_bookmark(self):
+ image_href = 'http://localhost/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_image_ref_is_invalid(self):
+ image_uuid = 'this_is_not_a_valid_uuid'
+ image_href = 'http://localhost/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/flavors/3'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance()
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.controller.create(self.req, body=self.body).obj['server']
+
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-keypairs extension tests
+ # def test_create_instance_with_keypairs_enabled(self):
+ # self.ext_mgr.extensions = {'os-keypairs': 'fake'}
+ # key_name = 'green'
+ #
+ # params = {'key_name': key_name}
+ # old_create = compute_api.API.create
+ #
+ # # NOTE(sdague): key pair goes back to the database,
+ # # so we need to stub it out for tests
+ # def key_pair_get(context, user_id, name):
+ # return {'public_key': 'FAKE_KEY',
+ # 'fingerprint': 'FAKE_FINGERPRINT',
+ # 'name': name}
+ #
+ # def create(*args, **kwargs):
+ # self.assertEqual(kwargs['key_name'], key_name)
+ # return old_create(*args, **kwargs)
+ #
+ # self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ # self.stubs.Set(compute_api.API, 'create', create)
+ # self._test_create_extra(params)
+ #
+ # TODO(cyeoh): bp-v3-api-unittests
+ # This needs to be ported to the os-networks extension tests
+ # def test_create_instance_with_networks_enabled(self):
+ # self.ext_mgr.extensions = {'os-networks': 'fake'}
+ # net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # requested_networks = [{'uuid': net_uuid}]
+ # params = {'networks': requested_networks}
+ # old_create = compute_api.API.create
+
+ # def create(*args, **kwargs):
+ # result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
+ # self.assertEqual(kwargs['requested_networks'], result)
+ # return old_create(*args, **kwargs)
+
+ # self.stubs.Set(compute_api.API, 'create', create)
+ # self._test_create_extra(params)
+
+ def test_create_instance_with_port_with_no_fixed_ips(self):
+ port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port_id}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortRequiresFixedIP(port_id=port_id)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_user_data_too_large(self, mock_create):
+ mock_create.side_effect = exception.InstanceUserDataTooLarge(
+ maxsize=1, length=2)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_with_network_with_no_subnet(self):
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkRequiresSubnet(network_uuid=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_non_unique_secgroup_name(self):
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks,
+ 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NoUniqueMatch("No Unique match found for ...")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_networks_disabled_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
+ None, None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_disabled(self):
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['requested_networks'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_pass_disabled(self):
+ # test with admin passwords disabled See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ # proper local hrefs must start with 'http://localhost/v3/'
+ self.flags(enable_instance_password=False)
+ image_href = 'http://localhost/v2/fake/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_name_too_long(self):
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['name'] = 'X' * 256
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_name_all_blank_spaces(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/flavors/3'
+ body = {
+ 'server': {
+ 'name': ' ' * 64,
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v3/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance(self):
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_extension_create_exception(self):
+ def fake_keypair_server_create(self, server_dict,
+ create_kwargs):
+ raise KeyError
+
+ self.stubs.Set(keypairs.Keypairs, 'server_create',
+ fake_keypair_server_create)
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v3/images/%s' % image_uuid
+ flavor_ref = 'http://localhost/123/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPInternalServerError,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ # proper local hrefs must start with 'http://localhost/v3/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self._check_admin_password_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_too_much_metadata(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata']['vote'] = 'fiddletown'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_too_long(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {('a' * 260): '12345'}
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_value_too_long(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'key1': ('a' * 260)}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_blank(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'': 'abcd'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_not_dict(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = 'string'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_key_not_string(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {1: 'test'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_metadata_value_not_string(self):
+ self.flags(quota_metadata_items=1)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['metadata'] = {'test': ['a', 'list']}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_user_data_malformed_bad_request(self):
+ params = {'user_data': 'u1234'}
+ self.assertRaises(exception.ValidationError,
+ self._test_create_extra, params)
+
+ def test_create_instance_invalid_key_name(self):
+ image_href = 'http://localhost/v2/images/2'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['key_name'] = 'nonexistentkey'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_valid_key_name(self):
+ self.body['server']['key_name'] = 'key'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_password_len(res["server"])
+
+ def test_create_instance_invalid_flavor_href(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = 'http://localhost/v2/flavors/asdf'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_invalid_flavor_id_int(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = -1
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_bad_flavor_href(self):
+ image_href = 'http://localhost/v2/images/2'
+ flavor_ref = 'http://localhost/v2/flavors/17'
+ self.body['server']['imageRef'] = image_href
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_bad_href(self):
+ image_href = 'asdf'
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_local_href(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_admin_password(self):
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ server = res['server']
+ self.assertEqual(server['adminPass'],
+ self.body['server']['adminPass'])
+
+ def test_create_instance_admin_password_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, body=self.body).obj
+
+ self.assertIn('server', res)
+ self.assertIn('adminPass', self.body['server'])
+
+ def test_create_instance_admin_password_empty(self):
+ self.body['server']['flavorRef'] = 3
+ self.body['server']['adminPass'] = ''
+ self.req.body = jsonutils.dumps(self.body)
+
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body)
+
+ def test_create_location(self):
+ selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+ self.req.body = jsonutils.dumps(self.body)
+ robj = self.controller.create(self.req, body=self.body)
+
+ self.assertEqual(robj['Location'], selfhref)
+
+ def _do_test_create_instance_above_quota(self, resource, allowed, quota,
+ expected_msg):
+ fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
+ self.body['server']['flavorRef'] = 3
+ self.req.body = jsonutils.dumps(self.body)
+ try:
+ self.controller.create(self.req, body=self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_instances(self):
+ msg = _('Quota exceeded for instances: Requested 1, but'
+ ' already used 10 of 10 instances')
+ self._do_test_create_instance_above_quota('instances', 0, 10, msg)
+
+ def test_create_instance_above_quota_ram(self):
+ msg = _('Quota exceeded for ram: Requested 4096, but'
+ ' already used 8192 of 10240 ram')
+ self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
+
+ def test_create_instance_above_quota_cores(self):
+ msg = _('Quota exceeded for cores: Requested 2, but'
+ ' already used 9 of 10 cores')
+ self._do_test_create_instance_above_quota('cores', 1, 10, msg)
+
+ def test_create_instance_above_quota_server_group_members(self):
+ ctxt = context.get_admin_context()
+ fake_group = objects.InstanceGroup(ctxt)
+ fake_group.create()
+
+ def fake_count(context, name, group, user_id):
+ self.assertEqual(name, "server_group_members")
+ self.assertEqual(group.uuid, fake_group.uuid)
+ self.assertEqual(user_id,
+ self.req.environ['nova.context'].user_id)
+ return 10
+
+ def fake_limit_check(context, **kwargs):
+ if 'server_group_members' in kwargs:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
+ self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
+ self.req.body = jsonutils.dumps(self.body)
+ expected_msg = "Quota exceeded, too many servers in group"
+
+ try:
+ self.controller.create(self.req, body=self.body).obj
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_server_groups(self):
+
+ def fake_reserve(contex, **deltas):
+ if 'server_groups' in deltas:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.body['os:scheduler_hints'] = {'group': 'fake_group'}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many server groups."
+
+ try:
+ self.controller.create(self.req, body=self.body).obj
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_with_neutronv2_port_in_use(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortInUse(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_public_network_non_admin(self, mock_create):
+ public_network_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ params = {'networks': [{'uuid': public_network_uuid}]}
+ self.req.body = jsonutils.dumps(self.body)
+ mock_create.side_effect = exception.ExternalNetworkAttachForbidden(
+ network_uuid=public_network_uuid)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_multiple_instance_with_specified_ip_neutronv2(self,
+ _api_mock):
+ _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
+ reason="")
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ address = '10.0.0.1'
+ requested_networks = [{'uuid': network, 'fixed_ip': address,
+ 'port': port}]
+ params = {'networks': requested_networks}
+ self.body['server']['max_count'] = 2
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_multiple_instance_with_neutronv2_port(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+ self.body['server']['max_count'] = 2
+
+ def fake_create(*args, **kwargs):
+ msg = _("Unable to launch multiple instances with"
+ " a single configured port ID. Please launch your"
+ " instance one by one with different ports.")
+ raise exception.MultiplePortsNotApplicable(reason=msg)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neturonv2_not_found_network(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkNotFound(network_id=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neutronv2_port_not_found(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortNotFound(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_network_ambiguous(self, mock_create):
+ mock_create.side_effect = exception.NetworkAmbiguous()
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, {})
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InstanceExists(
+ name='instance-name'))
+ def test_create_instance_raise_instance_exists(self, mock_create):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create,
+ self.req, body=self.body)
+
+
+class ServersControllerCreateTestWithMock(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestWithMock, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ self.body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.controller.create(self.req, body=self.body).obj['server']
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
+ create_mock):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.2.3'
+ requested_networks = [{'uuid': network, 'fixed_ip': address}]
+ params = {'networks': requested_networks}
+ create_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address=address,
+ instance_uuid=network)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+ self.assertEqual(1, len(create_mock.call_args_list))
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_create_instance_with_invalid_volume_error(self, create_mock):
+ # Tests that InvalidVolume is translated to a 400 error.
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {})
+
+
+class ServersViewBuilderTest(test.TestCase):
+
+ def setUp(self):
+ super(ServersViewBuilderTest, self).setUp()
+ CONF.set_override('host', 'localhost', group='glance')
+ self.flags(use_ipv6=True)
+ db_inst = fakes.stub_instance(
+ id=1,
+ image_ref="5",
+ uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
+ display_name="test_server",
+ include_fake_metadata=False)
+
+ privates = ['172.19.0.1']
+ publics = ['192.168.0.3']
+ public6s = ['b33f::fdee:ddff:fecc:bbaa']
+
+ def nw_info(*args, **kwargs):
+ return [(None, {'label': 'public',
+ 'ips': [dict(ip=ip) for ip in publics],
+ 'ip6s': [dict(ip=ip) for ip in public6s]}),
+ (None, {'label': 'private',
+ 'ips': [dict(ip=ip) for ip in privates]})]
+
+ def floaters(*args, **kwargs):
+ return []
+
+ fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
+ floaters)
+
+ self.uuid = db_inst['uuid']
+ self.view_builder = views.servers.ViewBuilderV3()
+ self.request = fakes.HTTPRequestV3.blank("")
+ self.request.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(
+ self.request.context,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ **db_inst)
+
+ def test_get_flavor_valid_instance_type(self):
+ flavor_bookmark = "http://localhost/flavors/1"
+ expected = {"id": "1",
+ "links": [{"rel": "bookmark",
+ "href": flavor_bookmark}]}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, expected)
+
+ def test_build_server(self):
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_with_project_id(self):
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v3/servers/%s" %
+ self.uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/servers/%s" % self.uuid,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail(self):
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_fault(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "name": "test_server",
+ "status": "ERROR",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ "fault": {
+ "code": 404,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "HTTPNotFound",
+ "details": "Stock details for test",
+ },
+ }
+ }
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.request.context,
+ self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ # Regardless of vm_state deleted servers sholud be DELETED
+ self.assertEqual("DELETED", output['server']['status'])
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_not_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error",
+ 'details': 'Stock details for test'}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error',
+ details='')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_but_active(self):
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertNotIn('fault', output['server'])
+
+ def test_build_server_detail_active_status(self):
+ # set the power state of the instance to running
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 100,
+ "name": "test_server",
+ "status": "ACTIVE",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+ def test_build_server_detail_with_metadata(self):
+
+ metadata = []
+ metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
+ metadata = nova_utils.metadata_to_dict(metadata)
+ self.instance['metadata'] = metadata
+
+ image_bookmark = "http://localhost/images/5"
+ flavor_bookmark = "http://localhost/flavors/1"
+ self_link = "http://localhost/v3/servers/%s" % self.uuid
+ bookmark_link = "http://localhost/servers/%s" % self.uuid
+ expected_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ {'version': 6, 'addr': '2001:db8:0:1::1',
+ 'OS-EXT-IPS:type': 'fixed',
+ 'OS-EXT-IPS-MAC:mac_addr': 'aa:aa:aa:aa:aa:aa'},
+ ]
+ },
+ "metadata": {"Open": "Stack"},
+ "links": [
+ {
+ "rel": "self",
+ "href": self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": bookmark_link,
+ },
+ ],
+ }
+ }
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output, matchers.DictMatches(expected_server))
+
+
+class ServersAllExtensionsTestCase(test.TestCase):
+ """Servers tests using default API router with all extensions enabled.
+
+ The intent here is to catch cases where extensions end up throwing
+ an exception because of a malformed request before the core API
+ gets a chance to validate the request and return a 422 response.
+
+ For example, AccessIPsController extends servers.Controller::
+
+ | @wsgi.extends
+ | def create(self, req, resp_obj, body):
+ | context = req.environ['nova.context']
+ | if authorize(context) and 'server' in resp_obj.obj:
+ | resp_obj.attach(xml=AccessIPTemplate())
+ | server = resp_obj.obj['server']
+ | self._extend_server(req, server)
+
+ we want to ensure that the extension isn't barfing on an invalid
+ body.
+ """
+
+ def setUp(self):
+ super(ServersAllExtensionsTestCase, self).setUp()
+ self.app = compute.APIRouterV3()
+
+ def test_create_missing_server(self):
+ # Test create with malformed body.
+
+ def fake_create(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_missing_server(self):
+ # Test update with malformed body.
+
+ def fake_update(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ req = fakes.HTTPRequestV3.blank('/servers/1')
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(400, res.status_int)
+
+
+class ServersInvalidRequestTestCase(test.TestCase):
+ """Tests of places we throw 400 Bad Request from."""
+
+ def setUp(self):
+ super(ServersInvalidRequestTestCase, self).setUp()
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+
+ def _invalid_server_create(self, body):
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+ def test_create_server_no_body(self):
+ self._invalid_server_create(body=None)
+
+ def test_create_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._invalid_server_create(body=body)
+
+ def test_create_server_malformed_entity(self):
+ body = {'server': 'string'}
+ self._invalid_server_create(body=body)
+
+ def _unprocessable_server_update(self, body):
+ req = fakes.HTTPRequestV3.blank('/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, FAKE_UUID, body=body)
+
+ def test_update_server_no_body(self):
+ self._invalid_server_create(body=None)
+
+ def test_update_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._invalid_server_create(body=body)
+
+ def test_create_update_malformed_entity(self):
+ body = {'server': 'string'}
+ self._invalid_server_create(body=body)
+
+
+class FakeExt(extensions.V3APIExtensionBase):
+ name = "DiskConfig"
+ alias = 'os-disk-config'
+ version = 1
+
+ def fake_extension_point(self, *args, **kwargs):
+ pass
+
+ def get_controller_extensions(self):
+ return []
+
+ def get_resources(self):
+ return []
+
+
+class TestServersExtensionPoint(test.NoDBTestCase):
+ def setUp(self):
+ super(TestServersExtensionPoint, self).setUp()
+ CONF.set_override('extensions_whitelist', ['os-disk-config'],
+ 'osapi_v3')
+ self.stubs.Set(disk_config, 'DiskConfig', FakeExt)
+
+ def _test_load_extension_point(self, name):
+ setattr(FakeExt, 'server_%s' % name,
+ FakeExt.fake_extension_point)
+ ext_info = plugins.LoadedExtensionInfo()
+ controller = servers.ServersController(extension_info=ext_info)
+ self.assertEqual(
+ 'os-disk-config',
+ list(getattr(controller,
+ '%s_extension_manager' % name))[0].obj.alias)
+ delattr(FakeExt, 'server_%s' % name)
+
+ def test_load_update_extension_point(self):
+ self._test_load_extension_point('update')
+
+ def test_load_rebuild_extension_point(self):
+ self._test_load_extension_point('rebuild')
+
+ def test_load_create_extension_point(self):
+ self._test_load_extension_point('create')
+
+ def test_load_resize_extension_point(self):
+ self._test_load_extension_point('resize')
+
+
+class TestServersExtensionSchema(test.NoDBTestCase):
+ def setUp(self):
+ super(TestServersExtensionSchema, self).setUp()
+ CONF.set_override('extensions_whitelist', ['disk_config'], 'osapi_v3')
+
+ def _test_load_extension_schema(self, name):
+ setattr(FakeExt, 'get_server_%s_schema' % name,
+ FakeExt.fake_extension_point)
+ ext_info = plugins.LoadedExtensionInfo()
+ controller = servers.ServersController(extension_info=ext_info)
+ self.assertTrue(hasattr(controller, '%s_schema_manager' % name))
+
+ delattr(FakeExt, 'get_server_%s_schema' % name)
+ return getattr(controller, 'schema_server_%s' % name)
+
+ def test_load_create_extension_point(self):
+ # The expected is the schema combination of base and keypairs
+ # because of the above extensions_whitelist.
+ expected_schema = copy.deepcopy(servers_schema.base_create)
+ expected_schema['properties']['server']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('create')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_update_extension_point(self):
+ # keypair extension does not contain update_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_update)
+ expected_schema['properties']['server']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('update')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_rebuild_extension_point(self):
+ # keypair extension does not contain rebuild_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_rebuild)
+ expected_schema['properties']['rebuild']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('rebuild')
+ self.assertEqual(expected_schema, actual_schema)
+
+ def test_load_resize_extension_point(self):
+ # keypair extension does not contain resize_server() and
+ # here checks that any extension is not added to the schema.
+ expected_schema = copy.deepcopy(servers_schema.base_resize)
+ expected_schema['properties']['resize']['properties'].update(
+ disk_config_schema.server_create)
+
+ actual_schema = self._test_load_extension_schema('resize')
+ self.assertEqual(expected_schema, actual_schema)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py
new file mode 100644
index 0000000000..072992cbb6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_services.py
@@ -0,0 +1,453 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import calendar
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+import webob.exc
+
+from nova.api.openstack.compute.plugins.v3 import services
+from nova import availability_zones
+from nova.compute import cells_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.servicegroup.drivers import db as db_driver
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.objects import test_service
+
+
+fake_services_list = [
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host1',
+ id=1,
+ disabled=True,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test1'),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host1',
+ id=2,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ disabled_reason='test2'),
+ dict(test_service.fake_service,
+ binary='nova-scheduler',
+ host='host2',
+ id=3,
+ disabled=False,
+ topic='scheduler',
+ updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason=None),
+ dict(test_service.fake_service,
+ binary='nova-compute',
+ host='host2',
+ id=4,
+ disabled=True,
+ topic='compute',
+ updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ disabled_reason='test4'),
+ ]
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {}
+
+
+class FakeRequestWithService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"binary": "nova-compute"}
+
+
+class FakeRequestWithHost(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1"}
+
+
+class FakeRequestWithHostService(object):
+ environ = {"nova.context": context.get_admin_context()}
+ GET = {"host": "host1", "binary": "nova-compute"}
+
+
+def fake_service_get_all(services):
+ def service_get_all(context, filters=None, set_zones=False):
+ if set_zones or 'availability_zone' in filters:
+ return availability_zones.set_availability_zones(context,
+ services)
+ return services
+ return service_get_all
+
+
+def fake_db_api_service_get_all(context, disabled=None):
+ return fake_services_list
+
+
+def fake_db_service_get_by_host_binary(services):
+ def service_get_by_host_binary(context, host, binary):
+ for service in services:
+ if service['host'] == host and service['binary'] == binary:
+ return service
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
+ return service_get_by_host_binary
+
+
+def fake_service_get_by_host_binary(context, host, binary):
+ fake = fake_db_service_get_by_host_binary(fake_services_list)
+ return fake(context, host, binary)
+
+
+def _service_get_by_id(services, value):
+ for service in services:
+ if service['id'] == value:
+ return service
+ return None
+
+
+def fake_db_service_update(services):
+ def service_update(context, service_id, values):
+ service = _service_get_by_id(services, service_id)
+ if service is None:
+ raise exception.ServiceNotFound(service_id=service_id)
+ return service
+ return service_update
+
+
+def fake_service_update(context, service_id, values):
+ fake = fake_db_service_update(fake_services_list)
+ return fake(context, service_id, values)
+
+
+def fake_utcnow():
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
+
+
+fake_utcnow.override_time = None
+
+
+def fake_utcnow_ts():
+ d = fake_utcnow()
+ return calendar.timegm(d.utctimetuple())
+
+
+class ServicesTest(test.TestCase):
+
+ def setUp(self):
+ super(ServicesTest, self).setUp()
+
+ self.controller = services.ServiceController()
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ self.stubs.Set(self.controller.host_api, "service_get_all",
+ fake_service_get_all(fake_services_list))
+
+ self.stubs.Set(db, "service_get_by_args",
+ fake_db_service_get_by_host_binary(fake_services_list))
+ self.stubs.Set(db, "service_update",
+ fake_db_service_update(fake_services_list))
+
+ def test_services_list(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'id': 1,
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'id': 3,
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'disabled_reason': None},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_host(self):
+ req = FakeRequestWithHost()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'id': 1,
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_service(self):
+ req = FakeRequestWithService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'nova-compute',
+ 'host': 'host2',
+ 'id': 4,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_service_list_with_host_service(self):
+ req = FakeRequestWithHostService()
+ res_dict = self.controller.index(req)
+ response = {'services': [
+ {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'id': 2,
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
+ self.assertEqual(res_dict, response)
+
+ def test_services_enable(self):
+ def _service_update(context, service_id, values):
+ self.assertIsNone(values['disabled_reason'])
+ return dict(test_service.fake_service, id=service_id)
+
+ self.stubs.Set(db, "service_update", _service_update)
+
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ res_dict = self.controller.update(req, "enable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'enabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_enable_with_invalid_host(self):
+ body = {'service': {'host': 'invalid',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ def test_services_enable_with_invalid_binary(self):
+ body = {'service': {'host': 'host1',
+ 'binary': 'invalid'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/enable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "enable",
+ body)
+
+ # This test is just to verify that the servicegroup API gets used when
+ # calling this API.
+ def test_services_with_exception(self):
+ def dummy_is_up(self, dummy):
+ raise KeyError()
+
+ self.stubs.Set(db_driver.DbDriver, 'is_up', dummy_is_up)
+ req = FakeRequestWithHostService()
+ self.assertRaises(webob.exc.HTTPInternalServerError,
+ self.controller.index, req)
+
+ def test_services_disable(self):
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ res_dict = self.controller.update(req, "disable", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertNotIn('disabled_reason', res_dict['service'])
+
+ def test_services_disable_with_invalid_host(self):
+ body = {'service': {'host': 'invalid',
+ 'binary': 'nova-compute'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_with_invalid_binary(self):
+ body = {'service': {'host': 'host1',
+ 'binary': 'invalid'}}
+ req = fakes.HTTPRequestV3.blank('/os-services/disable')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update,
+ req,
+ "disable",
+ body)
+
+ def test_services_disable_log_reason(self):
+ req = \
+ fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test-reason'}}
+ res_dict = self.controller.update(req, "disable-log-reason", body)
+
+ self.assertEqual(res_dict['service']['status'], 'disabled')
+ self.assertEqual(res_dict['service']['disabled_reason'], 'test-reason')
+
+ def test_mandatory_reason_field(self):
+ req = \
+ fakes.HTTPRequestV3.blank('/os-services/disable-log-reason')
+ body = {'service': {'host': 'host1',
+ 'binary': 'nova-compute'}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, "disable-log-reason", body)
+
+ def test_invalid_reason_field(self):
+ reason = ' '
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'a' * 256
+ self.assertFalse(self.controller._is_valid_as_reason(reason))
+ reason = 'it\'s a valid reason.'
+ self.assertTrue(self.controller._is_valid_as_reason(reason))
+
+ def test_services_delete(self):
+ request = fakes.HTTPRequestV3.blank('/v3/os-services/1',
+ use_admin_context=True)
+ request.method = 'DELETE'
+
+ with mock.patch.object(self.controller.host_api,
+ 'service_delete') as service_delete:
+ self.controller.delete(request, '1')
+ service_delete.assert_called_once_with(
+ request.environ['nova.context'], '1')
+ self.assertEqual(self.controller.delete.wsgi_code, 204)
+
+ def test_services_delete_not_found(self):
+ request = fakes.HTTPRequestV3.blank('/v3/os-services/abc',
+ use_admin_context=True)
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, 'abc')
+
+
+class ServicesCellsTest(test.TestCase):
+ def setUp(self):
+ super(ServicesCellsTest, self).setUp()
+
+ host_api = cells_api.HostAPI()
+
+ self.controller = services.ServiceController()
+ self.controller.host_api = host_api
+
+ self.stubs.Set(timeutils, "utcnow", fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", fake_utcnow_ts)
+
+ services_list = []
+ for service in fake_services_list:
+ service = service.copy()
+ service['id'] = 'cell1@%d' % service['id']
+ services_list.append(service)
+
+ self.stubs.Set(host_api.cells_rpcapi, "service_get_all",
+ fake_service_get_all(services_list))
+
+ def test_services_detail(self):
+ req = FakeRequest()
+ res_dict = self.controller.index(req)
+ utc = iso8601.iso8601.Utc()
+ response = {'services': [
+ {'id': 'cell1@1',
+ 'binary': 'nova-scheduler',
+ 'host': 'host1',
+ 'zone': 'internal',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2,
+ tzinfo=utc),
+ 'disabled_reason': 'test1'},
+ {'id': 'cell1@2',
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5,
+ tzinfo=utc),
+ 'disabled_reason': 'test2'},
+ {'id': 'cell1@3',
+ 'binary': 'nova-scheduler',
+ 'host': 'host2',
+ 'zone': 'internal',
+ 'status': 'enabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34,
+ tzinfo=utc),
+ 'disabled_reason': None},
+ {'id': 'cell1@4',
+ 'binary': 'nova-compute',
+ 'host': 'host2',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38,
+ tzinfo=utc),
+ 'disabled_reason': 'test4'}]}
+ self.assertEqual(res_dict, response)
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py
new file mode 100644
index 0000000000..b0b71a0229
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_suspend_server.py
@@ -0,0 +1,48 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack.compute.plugins.v3 import suspend_server
+from nova.tests.unit.api.openstack.compute.plugins.v3 import \
+ admin_only_action_common
+from nova.tests.unit.api.openstack import fakes
+
+
+class SuspendServerTests(admin_only_action_common.CommonTests):
+ def setUp(self):
+ super(SuspendServerTests, self).setUp()
+ self.controller = suspend_server.SuspendServerController()
+ self.compute_api = self.controller.compute_api
+
+ def _fake_controller(*args, **kwargs):
+ return self.controller
+
+ self.stubs.Set(suspend_server, 'SuspendServerController',
+ _fake_controller)
+ self.app = fakes.wsgi_app_v21(init_only=('servers',
+ 'os-suspend-server'),
+ fake_auth_context=self.context)
+ self.mox.StubOutWithMock(self.compute_api, 'get')
+
+ def test_suspend_resume(self):
+ self._test_actions(['suspend', 'resume'])
+
+ def test_suspend_resume_with_non_existed_instance(self):
+ self._test_actions_with_non_existed_instance(['suspend', 'resume'])
+
+ def test_suspend_resume_raise_conflict_on_invalid_state(self):
+ self._test_actions_raise_conflict_on_invalid_state(['suspend',
+ 'resume'])
+
+ def test_actions_with_locked_instance(self):
+ self._test_actions_with_locked_instance(['suspend', 'resume'])
diff --git a/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py b/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py
new file mode 100644
index 0000000000..0e10c283f7
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/plugins/v3/test_user_data.py
@@ -0,0 +1,195 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import datetime
+import uuid
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers
+from nova.api.openstack.compute.plugins.v3 import user_data
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+
+
+CONF = cfg.CONF
+FAKE_UUID = fakes.FAKE_UUID
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+class ServersControllerCreateTest(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers.ServersController(extension_info=ext_info)
+ CONF.set_override('extensions_blacklist', 'os-user-data',
+ 'osapi_v3')
+ self.no_user_data_controller = servers.ServersController(
+ extension_info=ext_info)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ user_data.ATTRIBUTE_NAME: None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ fakes.stub_out_nw_api(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+
+ def _test_create_extra(self, params, no_image=False,
+ override_controller=None):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ if no_image:
+ server.pop('imageRef', None)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequestV3.blank('/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+ return server
+
+ def test_create_instance_with_user_data_disabled(self):
+ params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('user_data', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(
+ params,
+ override_controller=self.no_user_data_controller)
+
+ def test_create_instance_with_user_data_enabled(self):
+ params = {user_data.ATTRIBUTE_NAME: base64.b64encode('fake')}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIn('user_data', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_user_data(self):
+ value = base64.b64encode("A random string")
+ params = {user_data.ATTRIBUTE_NAME: value}
+ server = self._test_create_extra(params)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_user_data(self):
+ value = "A random string"
+ params = {user_data.ATTRIBUTE_NAME: value}
+ self.assertRaises(exception.ValidationError,
+ self._test_create_extra, params)
diff --git a/nova/tests/api/openstack/compute/schemas/__init__.py b/nova/tests/unit/api/openstack/compute/schemas/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/api/openstack/compute/schemas/__init__.py
+++ b/nova/tests/unit/api/openstack/compute/schemas/__init__.py
diff --git a/nova/tests/api/openstack/compute/schemas/test_schemas.py b/nova/tests/unit/api/openstack/compute/schemas/test_schemas.py
index c6ce82057e..c6ce82057e 100644
--- a/nova/tests/api/openstack/compute/schemas/test_schemas.py
+++ b/nova/tests/unit/api/openstack/compute/schemas/test_schemas.py
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml
index df4368bf41..df4368bf41 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/mixed.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml
index 3343a7be59..3343a7be59 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml
index f67c5a82fe..f67c5a82fe 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/invalid/partial2.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml
index 36aa3936e7..36aa3936e7 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/empty.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml
index 59eafc8608..59eafc8608 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/full.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml
index 751b626258..751b626258 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/flavors/valid/refs.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml
index 8f7bf208ae..8f7bf208ae 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/mixed.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml
index 435294e27c..435294e27c 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/no-metadata.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml
index 5637cce787..5637cce787 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml
index db5e974621..db5e974621 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/invalid/partial2.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml
index 05e0b8241c..05e0b8241c 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/empty.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/empty.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml
index 4f148db625..4f148db625 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/full.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/full.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml
index 1dfedd2c77..1dfedd2c77 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/images/valid/refs.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/images/valid/refs.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml
index c941472beb..c941472beb 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/mixed.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml
index 721ce84327..721ce84327 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml
index 474b3a084e..474b3a084e 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial2.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml
index 6455fe899a..6455fe899a 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/invalid/partial3.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml
index 97f5ee44e6..97f5ee44e6 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/detailed.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml
index b2f3666245..b2f3666245 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/empty.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/full.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml
index fbd6202a76..fbd6202a76 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/full.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/full.xml
diff --git a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml
index e1212e985f..e1212e985f 100644
--- a/nova/tests/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml
+++ b/nova/tests/unit/api/openstack/compute/schemas/v1.1/servers/valid/refs.xml
diff --git a/nova/tests/unit/api/openstack/compute/test_api.py b/nova/tests/unit/api/openstack/compute/test_api.py
new file mode 100644
index 0000000000..f86c04d4bd
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_api.py
@@ -0,0 +1,186 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+import webob.dec
+import webob.exc
+
+from nova.api import openstack as openstack_api
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class APITest(test.NoDBTestCase):
+
+ def _wsgi_app(self, inner_app):
+ # simpler version of the app than fakes.wsgi_app
+ return openstack_api.FaultWrapper(inner_app)
+
+ def test_malformed_json(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '{'
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_malformed_xml(self):
+ req = webob.Request.blank('/')
+ req.method = 'POST'
+ req.body = '<hi im not xml>'
+ req.headers["content-type"] = "application/xml"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_vendor_content_type_json(self):
+ ctype = 'application/vnd.openstack.compute+json'
+
+ req = webob.Request.blank('/')
+ req.headers['Accept'] = ctype
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, ctype)
+
+ jsonutils.loads(res.body)
+
+ def test_vendor_content_type_xml(self):
+ ctype = 'application/vnd.openstack.compute+xml'
+
+ req = webob.Request.blank('/')
+ req.headers['Accept'] = ctype
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, ctype)
+
+ etree.XML(res.body)
+
+ def test_exceptions_are_converted_to_faults_webob_exc(self):
+ @webob.dec.wsgify
+ def raise_webob_exc(req):
+ raise webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+
+ # api.application = raise_webob_exc
+ api = self._wsgi_app(raise_webob_exc)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ def test_exceptions_are_converted_to_faults_api_fault(self):
+ @webob.dec.wsgify
+ def raise_api_fault(req):
+ exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc')
+ return wsgi.Fault(exc)
+
+ # api.application = raise_api_fault
+ api = self._wsgi_app(raise_api_fault)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('itemNotFound', resp.body)
+ self.assertEqual(resp.status_int, 404, resp.body)
+
+ def test_exceptions_are_converted_to_faults_exception(self):
+ @webob.dec.wsgify
+ def fail(req):
+ raise Exception("Threw an exception")
+
+ # api.application = fail
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('{"computeFault', resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def test_exceptions_are_converted_to_faults_exception_xml(self):
+ @webob.dec.wsgify
+ def fail(req):
+ raise Exception("Threw an exception")
+
+ # api.application = fail
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/.xml').get_response(api)
+ self.assertIn('<computeFault', resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def _do_test_exception_safety_reflected_in_faults(self, expose):
+ class ExceptionWithSafety(exception.NovaException):
+ safe = expose
+
+ @webob.dec.wsgify
+ def fail(req):
+ raise ExceptionWithSafety('some explanation')
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn('{"computeFault', resp.body)
+ expected = ('ExceptionWithSafety: some explanation' if expose else
+ 'The server has either erred or is incapable '
+ 'of performing the requested operation.')
+ self.assertIn(expected, resp.body)
+ self.assertEqual(resp.status_int, 500, resp.body)
+
+ def test_safe_exceptions_are_described_in_faults(self):
+ self._do_test_exception_safety_reflected_in_faults(True)
+
+ def test_unsafe_exceptions_are_not_described_in_faults(self):
+ self._do_test_exception_safety_reflected_in_faults(False)
+
+ def _do_test_exception_mapping(self, exception_type, msg):
+ @webob.dec.wsgify
+ def fail(req):
+ raise exception_type(msg)
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertIn(msg, resp.body)
+ self.assertEqual(resp.status_int, exception_type.code, resp.body)
+
+ if hasattr(exception_type, 'headers'):
+ for (key, value) in exception_type.headers.iteritems():
+ self.assertIn(key, resp.headers)
+ self.assertEqual(resp.headers[key], str(value))
+
+ def test_quota_error_mapping(self):
+ self._do_test_exception_mapping(exception.QuotaError, 'too many used')
+
+ def test_non_nova_notfound_exception_mapping(self):
+ class ExceptionWithCode(Exception):
+ code = 404
+
+ self._do_test_exception_mapping(ExceptionWithCode,
+ 'NotFound')
+
+ def test_non_nova_exception_mapping(self):
+ class ExceptionWithCode(Exception):
+ code = 417
+
+ self._do_test_exception_mapping(ExceptionWithCode,
+ 'Expectation failed')
+
+ def test_exception_with_none_code_throws_500(self):
+ class ExceptionWithNoneCode(Exception):
+ code = None
+
+ @webob.dec.wsgify
+ def fail(req):
+ raise ExceptionWithNoneCode()
+
+ api = self._wsgi_app(fail)
+ resp = webob.Request.blank('/').get_response(api)
+ self.assertEqual(500, resp.status_int)
diff --git a/nova/tests/unit/api/openstack/compute/test_auth.py b/nova/tests/unit/api/openstack/compute/test_auth.py
new file mode 100644
index 0000000000..0386623b5d
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_auth.py
@@ -0,0 +1,61 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNoAuthMiddleware(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNoAuthMiddleware, self).setUp()
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_networking(self.stubs)
+
+ def test_authorize_user(self):
+ req = webob.Request.blank('/v2')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/user1_project")
+
+ def test_authorize_user_trailing_slash(self):
+ # make sure it works with trailing slash on the request
+ req = webob.Request.blank('/v2/')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/user1_project")
+
+ def test_auth_token_no_empty_headers(self):
+ req = webob.Request.blank('/v2')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertNotIn('X-CDN-Management-Url', result.headers)
+ self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/unit/api/openstack/compute/test_consoles.py b/nova/tests/unit/api/openstack/compute/test_consoles.py
new file mode 100644
index 0000000000..3ba99899c0
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_consoles.py
@@ -0,0 +1,293 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid as stdlib_uuid
+
+from lxml import etree
+from oslo.utils import timeutils
+import webob
+
+from nova.api.openstack.compute import consoles
+from nova.compute import vm_states
+from nova import console
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+class FakeInstanceDB(object):
+
+ def __init__(self):
+ self.instances_by_id = {}
+ self.ids_by_uuid = {}
+ self.max_id = 0
+
+ def return_server_by_id(self, context, id):
+ if id not in self.instances_by_id:
+ self._add_server(id=id)
+ return dict(self.instances_by_id[id])
+
+ def return_server_by_uuid(self, context, uuid):
+ if uuid not in self.ids_by_uuid:
+ self._add_server(uuid=uuid)
+ return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
+
+ def _add_server(self, id=None, uuid=None):
+ if id is None:
+ id = self.max_id + 1
+ if uuid is None:
+ uuid = str(stdlib_uuid.uuid4())
+ instance = stub_instance(id, uuid=uuid)
+ self.instances_by_id[id] = instance
+ self.ids_by_uuid[uuid] = id
+ if id > self.max_id:
+ self.max_id = id
+
+
+def stub_instance(id, user_id='fake', project_id='fake', host=None,
+ vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0):
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "admin_pass": "",
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "memory_mb": 0,
+ "vcpus": 0,
+ "root_gb": 0,
+ "hostname": "",
+ "host": host,
+ "instance_type": {},
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": timeutils.utcnow(),
+ "terminated_at": timeutils.utcnow(),
+ "availability_zone": "",
+ "display_name": server_name,
+ "display_description": "",
+ "locked": False,
+ "metadata": [],
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress}
+
+ return instance
+
+
+class ConsolesControllerTest(test.NoDBTestCase):
+ def setUp(self):
+ super(ConsolesControllerTest, self).setUp()
+ self.flags(verbose=True)
+ self.instance_db = FakeInstanceDB()
+ self.stubs.Set(db, 'instance_get',
+ self.instance_db.return_server_by_id)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ self.instance_db.return_server_by_uuid)
+ self.uuid = str(stdlib_uuid.uuid4())
+ self.url = '/v2/fake/servers/%s/consoles' % self.uuid
+ self.controller = consoles.Controller()
+
+ def test_create_console(self):
+ def fake_create_console(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+ return {}
+ self.stubs.Set(console.api.API, 'create_console', fake_create_console)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller.create(req, self.uuid, None)
+
+ def test_show_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool, instance_name='inst-0001')
+
+ expected = {'console': {'id': 20,
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'password': 'fake_password',
+ 'instance_name': 'inst-0001',
+ 'console_type': 'fake_type'}}
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ res_dict = self.controller.show(req, self.uuid, '20')
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_show_console_unknown_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_show_console_unknown_instance(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
+ req, self.uuid, '20')
+
+ def test_list_consoles(self):
+ def fake_get_consoles(cons_self, context, instance_id):
+ self.assertEqual(instance_id, self.uuid)
+
+ pool1 = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ cons1 = dict(id=10, password='fake_password',
+ port='fake_port', pool=pool1)
+ pool2 = dict(console_type='fake_type2',
+ public_hostname='fake_hostname2')
+ cons2 = dict(id=11, password='fake_password2',
+ port='fake_port2', pool=pool2)
+ return [cons1, cons2]
+
+ expected = {'consoles':
+ [{'console': {'id': 10, 'console_type': 'fake_type'}},
+ {'console': {'id': 11, 'console_type': 'fake_type2'}}]}
+
+ self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ res_dict = self.controller.index(req, self.uuid)
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_delete_console(self):
+ def fake_get_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+ pool = dict(console_type='fake_type',
+ public_hostname='fake_hostname')
+ return dict(id=console_id, password='fake_password',
+ port='fake_port', pool=pool)
+
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ self.assertEqual(instance_id, self.uuid)
+ self.assertEqual(console_id, 20)
+
+ self.stubs.Set(console.api.API, 'get_console', fake_get_console)
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.controller.delete(req, self.uuid, '20')
+
+ def test_delete_console_unknown_console(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.ConsoleNotFound(console_id=console_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+ def test_delete_console_unknown_instance(self):
+ def fake_delete_console(cons_self, context, instance_id, console_id):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+
+ self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
+
+ req = fakes.HTTPRequest.blank(self.url + '/20')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
+ req, self.uuid, '20')
+
+
+class TestConsolesXMLSerializer(test.NoDBTestCase):
+ def test_show(self):
+ fixture = {'console': {'id': 20,
+ 'password': 'fake_password',
+ 'port': 'fake_port',
+ 'host': 'fake_hostname',
+ 'console_type': 'fake_type'}}
+
+ output = consoles.ConsoleTemplate().serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'console')
+ self.assertEqual(res_tree.xpath('id')[0].text, '20')
+ self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
+ self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
+ self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
+ self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
+
+ def test_index(self):
+ fixture = {'consoles': [{'console': {'id': 10,
+ 'console_type': 'fake_type'}},
+ {'console': {'id': 11,
+ 'console_type': 'fake_type2'}}]}
+
+ output = consoles.ConsolesTemplate().serialize(fixture)
+ res_tree = etree.XML(output)
+
+ self.assertEqual(res_tree.tag, 'consoles')
+ self.assertEqual(len(res_tree), 2)
+ self.assertEqual(res_tree[0].tag, 'console')
+ self.assertEqual(res_tree[1].tag, 'console')
+ self.assertEqual(len(res_tree[0]), 1)
+ self.assertEqual(res_tree[0][0].tag, 'console')
+ self.assertEqual(len(res_tree[1]), 1)
+ self.assertEqual(res_tree[1][0].tag, 'console')
+ self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
+ self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
+ self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
+ 'fake_type')
+ self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
+ 'fake_type2')
diff --git a/nova/tests/unit/api/openstack/compute/test_extensions.py b/nova/tests/unit/api/openstack/compute/test_extensions.py
new file mode 100644
index 0000000000..cf84fc1f84
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_extensions.py
@@ -0,0 +1,747 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import iso8601
+from lxml import etree
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import extensions as compute_extensions
+from nova.api.openstack import extensions as base_extensions
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+from nova import exception
+import nova.policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+CONF = cfg.CONF
+
+NS = "{http://docs.openstack.org/common/api/v1.0}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+response_body = "Try to say this Mr. Knox, sir..."
+extension_body = "I am not a fox!"
+
+
+class StubController(object):
+
+ def __init__(self, body):
+ self.body = body
+
+ def index(self, req):
+ return self.body
+
+ def create(self, req, body):
+ msg = 'All aboard the fail train!'
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ def show(self, req, id):
+ raise webob.exc.HTTPNotFound()
+
+
+class StubActionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return self.body
+
+
+class StubControllerExtension(base_extensions.ExtensionDescriptor):
+ name = 'twaadle'
+
+ def __init__(self):
+ pass
+
+
+class StubEarlyExtensionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.extends
+ def index(self, req):
+ yield self.body
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, id, body):
+ yield self.body
+
+
+class StubLateExtensionController(wsgi.Controller):
+ def __init__(self, body):
+ self.body = body
+
+ @wsgi.extends
+ def index(self, req, resp_obj):
+ return self.body
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp_obj, id, body):
+ return self.body
+
+
+class StubExtensionManager(object):
+ """Provides access to Tweedle Beetles."""
+
+ name = "Tweedle Beetle Extension"
+ alias = "TWDLBETL"
+
+ def __init__(self, resource_ext=None, action_ext=None, request_ext=None,
+ controller_ext=None):
+ self.resource_ext = resource_ext
+ self.action_ext = action_ext
+ self.request_ext = request_ext
+ self.controller_ext = controller_ext
+ self.extra_resource_ext = None
+
+ def get_resources(self):
+ resource_exts = []
+ if self.resource_ext:
+ resource_exts.append(self.resource_ext)
+ if self.extra_resource_ext:
+ resource_exts.append(self.extra_resource_ext)
+ return resource_exts
+
+ def get_actions(self):
+ action_exts = []
+ if self.action_ext:
+ action_exts.append(self.action_ext)
+ return action_exts
+
+ def get_request_extensions(self):
+ request_extensions = []
+ if self.request_ext:
+ request_extensions.append(self.request_ext)
+ return request_extensions
+
+ def get_controller_extensions(self):
+ controller_extensions = []
+ if self.controller_ext:
+ controller_extensions.append(self.controller_ext)
+ return controller_extensions
+
+
+class ExtensionTestCase(test.TestCase):
+ def setUp(self):
+ super(ExtensionTestCase, self).setUp()
+ ext_list = CONF.osapi_compute_extension[:]
+ fox = ('nova.tests.unit.api.openstack.compute.extensions.'
+ 'foxinsocks.Foxinsocks')
+ if fox not in ext_list:
+ ext_list.append(fox)
+ self.flags(osapi_compute_extension=ext_list)
+ self.fake_context = nova.context.RequestContext('fake', 'fake')
+
+ def test_extension_authorizer_throws_exception_if_policy_fails(self):
+ target = {'project_id': '1234',
+ 'user_id': '5678'}
+ self.mox.StubOutWithMock(nova.policy, 'enforce')
+ nova.policy.enforce(self.fake_context,
+ "compute_extension:used_limits_for_admin",
+ target).AndRaise(
+ exception.PolicyNotAuthorized(
+ action="compute_extension:used_limits_for_admin"))
+ self.mox.ReplayAll()
+ authorize = base_extensions.extension_authorizer('compute',
+ 'used_limits_for_admin'
+ )
+ self.assertRaises(exception.PolicyNotAuthorized, authorize,
+ self.fake_context, target=target)
+
+ def test_core_authorizer_throws_exception_if_policy_fails(self):
+ target = {'project_id': '1234',
+ 'user_id': '5678'}
+ self.mox.StubOutWithMock(nova.policy, 'enforce')
+ nova.policy.enforce(self.fake_context,
+ "compute:used_limits_for_admin",
+ target).AndRaise(
+ exception.PolicyNotAuthorized(
+ action="compute:used_limits_for_admin"))
+ self.mox.ReplayAll()
+ authorize = base_extensions.core_authorizer('compute',
+ 'used_limits_for_admin'
+ )
+ self.assertRaises(exception.PolicyNotAuthorized, authorize,
+ self.fake_context, target=target)
+
+
+class ExtensionControllerTest(ExtensionTestCase):
+
+ def setUp(self):
+ super(ExtensionControllerTest, self).setUp()
+ self.ext_list = [
+ "AdminActions",
+ "Aggregates",
+ "AssistedVolumeSnapshots",
+ "AvailabilityZone",
+ "Agents",
+ "Certificates",
+ "Cloudpipe",
+ "CloudpipeUpdate",
+ "ConsoleOutput",
+ "Consoles",
+ "Createserverext",
+ "DeferredDelete",
+ "DiskConfig",
+ "ExtendedAvailabilityZone",
+ "ExtendedFloatingIps",
+ "ExtendedIps",
+ "ExtendedIpsMac",
+ "ExtendedVIFNet",
+ "Evacuate",
+ "ExtendedStatus",
+ "ExtendedVolumes",
+ "ExtendedServerAttributes",
+ "FixedIPs",
+ "FlavorAccess",
+ "FlavorDisabled",
+ "FlavorExtraSpecs",
+ "FlavorExtraData",
+ "FlavorManage",
+ "FlavorRxtx",
+ "FlavorSwap",
+ "FloatingIps",
+ "FloatingIpDns",
+ "FloatingIpPools",
+ "FloatingIpsBulk",
+ "Fox In Socks",
+ "Hosts",
+ "ImageSize",
+ "InstanceActions",
+ "Keypairs",
+ "Multinic",
+ "MultipleCreate",
+ "QuotaClasses",
+ "Quotas",
+ "ExtendedQuotas",
+ "Rescue",
+ "SchedulerHints",
+ "SecurityGroupDefaultRules",
+ "SecurityGroups",
+ "ServerDiagnostics",
+ "ServerListMultiStatus",
+ "ServerPassword",
+ "ServerStartStop",
+ "Services",
+ "SimpleTenantUsage",
+ "UsedLimits",
+ "UserData",
+ "VirtualInterfaces",
+ "VolumeAttachmentUpdate",
+ "Volumes",
+ ]
+ self.ext_list.sort()
+
+ def test_list_extensions_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ # Make sure we have all the extensions, extra extensions being OK.
+ data = jsonutils.loads(response.body)
+ names = [str(x['name']) for x in data['extensions']
+ if str(x['name']) in self.ext_list]
+ names.sort()
+ self.assertEqual(names, self.ext_list)
+
+ # Ensure all the timestamps are valid according to iso8601
+ for ext in data['extensions']:
+ iso8601.parse_date(ext['updated'])
+
+ # Make sure that at least Fox in Sox is correct.
+ (fox_ext, ) = [
+ x for x in data['extensions'] if x['alias'] == 'FOXNSOX']
+ self.assertEqual(fox_ext, {
+ 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0',
+ 'name': 'Fox In Socks',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'The Fox In Socks Extension.',
+ 'alias': 'FOXNSOX',
+ 'links': []
+ },
+ )
+
+ for ext in data['extensions']:
+ url = '/fake/extensions/%s' % ext['alias']
+ request = webob.Request.blank(url)
+ response = request.get_response(app)
+ output = jsonutils.loads(response.body)
+ self.assertEqual(output['extension']['alias'], ext['alias'])
+
+ def test_get_extension_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions/FOXNSOX")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ data = jsonutils.loads(response.body)
+ self.assertEqual(data['extension'], {
+ "namespace": "http://www.fox.in.socks/api/ext/pie/v1.0",
+ "name": "Fox In Socks",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "The Fox In Socks Extension.",
+ "alias": "FOXNSOX",
+ "links": []})
+
+ def test_get_non_existing_extension_json(self):
+ app = compute.APIRouter(init_only=('extensions',))
+ request = webob.Request.blank("/fake/extensions/4")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+
+ def test_list_extensions_xml(self):
+ app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
+ request = webob.Request.blank("/fake/extensions")
+ request.accept = "application/xml"
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+
+ root = etree.XML(response.body)
+ self.assertEqual(root.tag.split('extensions')[0], NS)
+
+ # Make sure we have all the extensions, extras extensions being OK.
+ exts = root.findall('{0}extension'.format(NS))
+ self.assertTrue(len(exts) >= len(self.ext_list))
+
+ # Make sure that at least Fox in Sox is correct.
+ (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX']
+ self.assertEqual(fox_ext.get('name'), 'Fox In Socks')
+ self.assertEqual(fox_ext.get('namespace'),
+ 'http://www.fox.in.socks/api/ext/pie/v1.0')
+ self.assertEqual(fox_ext.get('updated'), '2011-01-22T13:25:27-06:00')
+ self.assertEqual(fox_ext.findtext('{0}description'.format(NS)),
+ 'The Fox In Socks Extension.')
+
+ xmlutil.validate_schema(root, 'extensions')
+
+ def test_get_extension_xml(self):
+ app = compute.APIRouter(init_only=('servers', 'flavors', 'extensions'))
+ request = webob.Request.blank("/fake/extensions/FOXNSOX")
+ request.accept = "application/xml"
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ xml = response.body
+
+ root = etree.XML(xml)
+ self.assertEqual(root.tag.split('extension')[0], NS)
+ self.assertEqual(root.get('alias'), 'FOXNSOX')
+ self.assertEqual(root.get('name'), 'Fox In Socks')
+ self.assertEqual(root.get('namespace'),
+ 'http://www.fox.in.socks/api/ext/pie/v1.0')
+ self.assertEqual(root.get('updated'), '2011-01-22T13:25:27-06:00')
+ self.assertEqual(root.findtext('{0}description'.format(NS)),
+ 'The Fox In Socks Extension.')
+
+ xmlutil.validate_schema(root, 'extension')
+
+
+class ResourceExtensionTest(ExtensionTestCase):
+
+ def test_no_extension_present(self):
+ manager = StubExtensionManager(None)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/blah")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+
+ def test_get_resources(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_get_resources_with_controller(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_bad_request(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ request.method = "POST"
+ response = request.get_response(app)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "All aboard the fail train!",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+ def test_non_exist_resource(self):
+ res_ext = base_extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/1")
+ response = request.get_response(app)
+ self.assertEqual(404, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "itemNotFound": {
+ "message": "The resource could not be found.",
+ "code": 404
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+
+class InvalidExtension(object):
+
+ alias = "THIRD"
+
+
+class ExtensionManagerTest(ExtensionTestCase):
+
+ response_body = "Try to say this Mr. Knox, sir..."
+
+ def test_get_resources(self):
+ app = compute.APIRouter()
+ request = webob.Request.blank("/fake/foxnsocks")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_invalid_extensions(self):
+ # Don't need the serialization middleware here because we're
+ # not testing any serialization
+ compute.APIRouter()
+ ext_mgr = compute_extensions.ExtensionManager()
+ ext_mgr.register(InvalidExtension())
+ self.assertTrue(ext_mgr.is_loaded('FOXNSOX'))
+ self.assertFalse(ext_mgr.is_loaded('THIRD'))
+
+
+class ActionExtensionTest(ExtensionTestCase):
+
+ def _send_server_action_request(self, url, body):
+ app = compute.APIRouter(init_only=('servers',))
+ request = webob.Request.blank(url)
+ request.method = 'POST'
+ request.content_type = 'application/json'
+ request.body = jsonutils.dumps(body)
+ response = request.get_response(app)
+ return response
+
+ def test_extended_action(self):
+ body = dict(add_tweedle=dict(name="test"))
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Added.", response.body)
+
+ body = dict(delete_tweedle=dict(name="test"))
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Deleted.", response.body)
+
+ def test_invalid_action(self):
+ body = dict(blah=dict(name="test")) # Doesn't exist
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "There is no such action: blah",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+ def test_non_exist_action(self):
+ body = dict(blah=dict(name="test"))
+ url = "/fake/fdsa/1/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(404, response.status_int)
+
+ def test_failed_action(self):
+ body = dict(fail=dict(name="test"))
+ url = "/fake/servers/abcd/action"
+ response = self._send_server_action_request(url, body)
+ self.assertEqual(400, response.status_int)
+ self.assertEqual('application/json', response.content_type)
+ body = jsonutils.loads(response.body)
+ expected = {
+ "badRequest": {
+ "message": "Tweedle fail",
+ "code": 400
+ }
+ }
+ self.assertThat(expected, matchers.DictMatches(body))
+
+
+class RequestExtensionTest(ExtensionTestCase):
+
+ def test_get_resources_with_stub_mgr(self):
+ class GooGoose(wsgi.Controller):
+ @wsgi.extends
+ def show(self, req, resp_obj, id):
+ # only handle JSON responses
+ resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
+
+ req_ext = base_extensions.ControllerExtension(
+ StubControllerExtension(), 'flavors', GooGoose())
+
+ manager = StubExtensionManager(None, None, None, req_ext)
+ app = fakes.wsgi_app(ext_mgr=manager)
+ request = webob.Request.blank("/v2/fake/flavors/1?chewing=bluegoo")
+ request.environ['api.version'] = '2'
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ response_data = jsonutils.loads(response.body)
+ self.assertEqual('bluegoo', response_data['flavor']['googoose'])
+
+ def test_get_resources_with_mgr(self):
+
+ app = fakes.wsgi_app(init_only=('flavors',))
+ request = webob.Request.blank("/v2/fake/flavors/1?chewing=newblue")
+ request.environ['api.version'] = '2'
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ response_data = jsonutils.loads(response.body)
+ self.assertEqual('newblue', response_data['flavor']['googoose'])
+ self.assertEqual("Pig Bands!", response_data['big_bands'])
+
+
+class ControllerExtensionTest(ExtensionTestCase):
+ def test_controller_extension_early(self):
+ controller = StubController(response_body)
+ res_ext = base_extensions.ResourceExtension('tweedles', controller)
+ ext_controller = StubEarlyExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_extension_late(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubController(dict(foo=response_body))
+ res_ext = base_extensions.ResourceExtension('tweedles', controller)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_extension_late_inherited_resource(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubController(dict(foo=response_body))
+ parent_ext = base_extensions.ResourceExtension('tweedles', controller)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=parent_ext,
+ controller_ext=cont_ext)
+ child_ext = base_extensions.ResourceExtension('beetles', controller,
+ inherits='tweedles')
+ manager.extra_resource_ext = child_ext
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/beetles")
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_action_extension_early(self):
+ controller = StubActionController(response_body)
+ actions = dict(action='POST')
+ res_ext = base_extensions.ResourceExtension('tweedles', controller,
+ member_actions=actions)
+ ext_controller = StubEarlyExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/foo/action")
+ request.method = 'POST'
+ request.headers['Content-Type'] = 'application/json'
+ request.body = jsonutils.dumps(dict(fooAction=True))
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+ def test_controller_action_extension_late(self):
+ # Need a dict for the body to convert to a ResponseObject
+ controller = StubActionController(dict(foo=response_body))
+ actions = dict(action='POST')
+ res_ext = base_extensions.ResourceExtension('tweedles', controller,
+ member_actions=actions)
+
+ ext_controller = StubLateExtensionController(extension_body)
+ extension = StubControllerExtension()
+ cont_ext = base_extensions.ControllerExtension(extension, 'tweedles',
+ ext_controller)
+
+ manager = StubExtensionManager(resource_ext=res_ext,
+ controller_ext=cont_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/tweedles/foo/action")
+ request.method = 'POST'
+ request.headers['Content-Type'] = 'application/json'
+ request.body = jsonutils.dumps(dict(fooAction=True))
+ response = request.get_response(app)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(extension_body, response.body)
+
+
+class ExtensionsXMLSerializerTest(test.TestCase):
+
+ def test_serialize_extension(self):
+ serializer = base_extensions.ExtensionTemplate()
+ data = {'extension': {
+ 'name': 'ext1',
+ 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
+ 'alias': 'RS-PIE',
+ 'updated': '2011-01-22T13:25:27-06:00',
+ 'description': 'Adds the capability to share an image.',
+ 'links': [{'rel': 'describedby',
+ 'type': 'application/pdf',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
+ {'rel': 'describedby',
+ 'type': 'application/vnd.sun.wadl+xml',
+ 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
+
+ xml = serializer.serialize(data)
+ root = etree.XML(xml)
+ ext_dict = data['extension']
+ self.assertEqual(root.findtext('{0}description'.format(NS)),
+ ext_dict['description'])
+
+ for key in ['name', 'namespace', 'alias', 'updated']:
+ self.assertEqual(root.get(key), ext_dict[key])
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(ext_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ xmlutil.validate_schema(root, 'extension')
+
+ def test_serialize_extensions(self):
+ serializer = base_extensions.ExtensionsTemplate()
+ data = {"extensions": [{
+ "name": "Public Image Extension",
+ "namespace": "http://foo.com/api/ext/pie/v1.0",
+ "alias": "RS-PIE",
+ "updated": "2011-01-22T13:25:27-06:00",
+ "description": "Adds the capability to share an image.",
+ "links": [{"rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://foo.com/api/ext/cs-pie.pdf"},
+ {"rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://foo.com/api/ext/cs-pie.wadl"}]},
+ {"name": "Cloud Block Storage",
+ "namespace": "http://foo.com/api/ext/cbs/v1.0",
+ "alias": "RS-CBS",
+ "updated": "2011-01-12T11:22:33-06:00",
+ "description": "Allows mounting cloud block storage.",
+ "links": [{"rel": "describedby",
+ "type": "application/pdf",
+ "href": "http://foo.com/api/ext/cs-cbs.pdf"},
+ {"rel": "describedby",
+ "type": "application/vnd.sun.wadl+xml",
+ "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
+
+ xml = serializer.serialize(data)
+ root = etree.XML(xml)
+ ext_elems = root.findall('{0}extension'.format(NS))
+ self.assertEqual(len(ext_elems), 2)
+ for i, ext_elem in enumerate(ext_elems):
+ ext_dict = data['extensions'][i]
+ self.assertEqual(ext_elem.findtext('{0}description'.format(NS)),
+ ext_dict['description'])
+
+ for key in ['name', 'namespace', 'alias', 'updated']:
+ self.assertEqual(ext_elem.get(key), ext_dict[key])
+
+ link_nodes = ext_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(ext_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ xmlutil.validate_schema(root, 'extensions')
+
+
+class ExtensionControllerIdFormatTest(test.TestCase):
+
+ def _bounce_id(self, test_id):
+
+ class BounceController(object):
+ def show(self, req, id):
+ return id
+ res_ext = base_extensions.ResourceExtension('bounce',
+ BounceController())
+ manager = StubExtensionManager(res_ext)
+ app = compute.APIRouter(manager)
+ request = webob.Request.blank("/fake/bounce/%s" % test_id)
+ response = request.get_response(app)
+ return response.body
+
+ def test_id_with_xml_format(self):
+ result = self._bounce_id('foo.xml')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_json_format(self):
+ result = self._bounce_id('foo.json')
+ self.assertEqual(result, 'foo')
+
+ def test_id_with_bad_format(self):
+ result = self._bounce_id('foo.bad')
+ self.assertEqual(result, 'foo.bad')
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
new file mode 100644
index 0000000000..265b50ac85
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -0,0 +1,943 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+import six.moves.urllib.parse as urlparse
+import webob
+
+from nova.api.openstack import common
+from nova.api.openstack.compute import flavors as flavors_v2
+from nova.api.openstack.compute.plugins.v3 import flavors as flavors_v3
+from nova.api.openstack import xmlutil
+import nova.compute.flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+
+
+FAKE_FLAVORS = {
+ 'flavor 1': {
+ "flavorid": '1',
+ "name": 'flavor 1',
+ "memory_mb": '256',
+ "root_gb": '10',
+ "ephemeral_gb": '20',
+ "swap": '10',
+ "disabled": False,
+ "vcpus": '',
+ },
+ 'flavor 2': {
+ "flavorid": '2',
+ "name": 'flavor 2',
+ "memory_mb": '512',
+ "root_gb": '20',
+ "ephemeral_gb": '10',
+ "swap": '5',
+ "disabled": False,
+ "vcpus": '',
+ },
+}
+
+
+def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
+ return FAKE_FLAVORS['flavor %s' % flavorid]
+
+
+def fake_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ if marker in ['99999']:
+ raise exception.MarkerNotFound(marker)
+
+ def reject_min(db_attr, filter_attr):
+ return (filter_attr in filters and
+ int(flavor[db_attr]) < int(filters[filter_attr]))
+
+ filters = filters or {}
+ res = []
+ for (flavor_name, flavor) in FAKE_FLAVORS.items():
+ if reject_min('memory_mb', 'min_memory_mb'):
+ continue
+ elif reject_min('root_gb', 'min_root_gb'):
+ continue
+
+ res.append(flavor)
+
+ res = sorted(res, key=lambda item: item[sort_key])
+ output = []
+ marker_found = True if marker is None else False
+ for flavor in res:
+ if not marker_found and marker == flavor['flavorid']:
+ marker_found = True
+ elif marker_found:
+ if limit is None or len(output) < int(limit):
+ output.append(flavor)
+
+ return output
+
+
+def fake_get_limit_and_marker(request, max_limit=1):
+ params = common.get_pagination_params(request)
+ limit = params.get('limit', max_limit)
+ limit = min(max_limit, limit)
+ marker = params.get('marker')
+
+ return limit, marker
+
+
+def empty_get_all_flavors_sorted_list(context=None, inactive=False,
+ filters=None, sort_key='flavorid',
+ sort_dir='asc', limit=None, marker=None):
+ return []
+
+
+def return_flavor_not_found(flavor_id, ctxt=None):
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
+
+
+class FlavorsTestV21(test.TestCase):
+ _prefix = "/v3"
+ Controller = flavors_v3.FlavorsController
+ fake_request = fakes.HTTPRequestV3
+ _rspv = "v3"
+ _fake = ""
+
+ def setUp(self):
+ super(FlavorsTestV21, self).setUp()
+ self.flags(osapi_compute_extension=[])
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
+ fake_get_all_flavors_sorted_list)
+ self.stubs.Set(nova.compute.flavors,
+ "get_flavor_by_flavor_id",
+ fake_flavor_get_by_flavor_id)
+ self.controller = self.Controller()
+
+ def _set_expected_body(self, expected, ephemeral, swap, disabled):
+ # NOTE(oomichi): On v2.1 API, some extensions of v2.0 are merged
+ # as core features and we can get the following parameters as the
+ # default.
+ expected['OS-FLV-EXT-DATA:ephemeral'] = ephemeral
+ expected['OS-FLV-DISABLED:disabled'] = disabled
+ expected['swap'] = swap
+
+ def test_get_flavor_by_invalid_id(self):
+ self.stubs.Set(nova.compute.flavors,
+ "get_flavor_by_flavor_id",
+ return_flavor_not_found)
+ req = self.fake_request.blank(self._prefix + '/flavors/asdf')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, 'asdf')
+
+ def test_get_flavor_by_id(self):
+ req = self.fake_request.blank(self._prefix + '/flavors/1')
+ flavor = self.controller.show(req, '1')
+ expected = {
+ "flavor": {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ }
+ self._set_expected_body(expected['flavor'], ephemeral='20',
+ swap='10', disabled=False)
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_with_custom_link_prefix(self):
+ self.flags(osapi_compute_link_prefix='http://zoo.com:42',
+ osapi_glance_link_prefix='http://circus.com:34')
+ req = self.fake_request.blank(self._prefix + '/flavors/1')
+ flavor = self.controller.show(req, '1')
+ expected = {
+ "flavor": {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://zoo.com:42/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://zoo.com:42" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ }
+ self._set_expected_body(expected['flavor'], ephemeral='20',
+ swap='10', disabled=False)
+ self.assertEqual(expected, flavor)
+
+ def test_get_flavor_list(self):
+ req = self.fake_request.blank(self._prefix + '/flavors')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_with_marker(self):
+ self.maxDiff = None
+ url = self._prefix + '/flavors?limit=1&marker=1'
+ req = self.fake_request.blank(url)
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ 'flavors_links': [
+ {'href': 'http://localhost/' + self._rspv +
+ '/flavors?limit=1&marker=2',
+ 'rel': 'next'}
+ ]
+ }
+ self.assertThat(flavor, matchers.DictMatches(expected))
+
+ def test_get_flavor_list_with_invalid_marker(self):
+ req = self.fake_request.blank(self._prefix + '/flavors?marker=99999')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_detail_with_limit(self):
+ url = self._prefix + '/flavors/detail?limit=1'
+ req = self.fake_request.blank(url)
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['1'], 'marker': ['1']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_with_limit(self):
+ req = self.fake_request.blank(self._prefix + '/flavors?limit=2')
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ }
+ ]
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/' + self._rspv + '/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['2'], 'marker': ['2']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_with_default_limit(self):
+ self.stubs.Set(common, "get_limit_and_marker",
+ fake_get_limit_and_marker)
+ self.flags(osapi_max_limit=1)
+ req = fakes.HTTPRequest.blank('/v2/fake/flavors?limit=2')
+ response = self.controller.index(req)
+ response_list = response["flavors"]
+ response_links = response["flavors_links"]
+
+ expected_flavors = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/1",
+ }
+ ]
+ }
+ ]
+
+ self.assertEqual(response_list, expected_flavors)
+ self.assertEqual(response_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(response_links[0]['href'])
+ self.assertEqual('/v2/fake/flavors', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ self.assertThat({'limit': ['2'], 'marker': ['1']},
+ matchers.DictMatches(params))
+
+ def test_get_flavor_list_detail(self):
+ req = self.fake_request.blank(self._prefix + '/flavors/detail')
+ flavor = self.controller.detail(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self._set_expected_body(expected['flavors'][0], ephemeral='20',
+ swap='10', disabled=False)
+ self._set_expected_body(expected['flavors'][1], ephemeral='10',
+ swap='5', disabled=False)
+ self.assertEqual(expected, flavor)
+
+ def test_get_empty_flavor_list(self):
+ self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
+ empty_get_all_flavors_sorted_list)
+
+ req = self.fake_request.blank(self._prefix + '/flavors')
+ flavors = self.controller.index(req)
+ expected = {'flavors': []}
+ self.assertEqual(flavors, expected)
+
+ def test_get_flavor_list_filter_min_ram(self):
+ # Flavor lists may be filtered by minRam.
+ req = self.fake_request.blank(self._prefix + '/flavors?minRam=512')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_filter_invalid_min_ram(self):
+ # Ensure you cannot list flavors with invalid minRam param.
+ req = self.fake_request.blank(self._prefix + '/flavors?minRam=NaN')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_list_filter_min_disk(self):
+ # Flavor lists may be filtered by minDisk.
+ req = self.fake_request.blank(self._prefix + '/flavors?minDisk=20')
+ flavor = self.controller.index(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_filter_invalid_min_disk(self):
+ # Ensure you cannot list flavors with invalid minDisk param.
+ req = self.fake_request.blank(self._prefix + '/flavors?minDisk=NaN')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_flavor_list_detail_min_ram_and_min_disk(self):
+ """Tests that filtering work on flavor details and that minRam and
+ minDisk filters can be combined
+ """
+ req = self.fake_request.blank(self._prefix + '/flavors/detail'
+ '?minRam=256&minDisk=20')
+ flavor = self.controller.detail(req)
+ expected = {
+ "flavors": [
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/" + self._rspv +
+ "/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost" + self._fake +
+ "/flavors/2",
+ },
+ ],
+ },
+ ],
+ }
+ self._set_expected_body(expected['flavors'][0], ephemeral='10',
+ swap='5', disabled=False)
+ self.assertEqual(expected, flavor)
+
+
+class FlavorsTestV20(FlavorsTestV21):
+ _prefix = "/v2/fake"
+ Controller = flavors_v2.Controller
+ fake_request = fakes.HTTPRequest
+ _rspv = "v2/fake"
+ _fake = "/fake"
+
+ def _set_expected_body(self, expected, ephemeral, swap, disabled):
+ pass
+
+
+class FlavorsXMLSerializationTest(test.TestCase):
+
+ def test_xml_declaration(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": "12",
+ "name": "asdf",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": "12",
+ "name": "asdf",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavor')
+ flavor_dict = fixture['flavor']
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(root.get(key), str(flavor_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_handles_integers(self):
+ serializer = flavors_v2.FlavorTemplate()
+
+ fixture = {
+ "flavor": {
+ "id": 12,
+ "name": "asdf",
+ "ram": 256,
+ "disk": 10,
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/12",
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavor')
+ flavor_dict = fixture['flavor']
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(root.get(key), str(flavor_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_detail(self):
+ serializer = flavors_v2.FlavorsTemplate()
+
+ fixture = {
+ "flavors": [
+ {
+ "id": "23",
+ "name": "flavor 23",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/23",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/23",
+ },
+ ],
+ },
+ {
+ "id": "13",
+ "name": "flavor 13",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/13",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/13",
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 2)
+ for i, flavor_elem in enumerate(flavor_elems):
+ flavor_dict = fixture['flavors'][i]
+
+ for key in ['name', 'id', 'ram', 'disk']:
+ self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
+
+ link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index(self):
+ serializer = flavors_v2.MinimalFlavorsTemplate()
+
+ fixture = {
+ "flavors": [
+ {
+ "id": "23",
+ "name": "flavor 23",
+ "ram": "512",
+ "disk": "20",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/23",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/23",
+ },
+ ],
+ },
+ {
+ "id": "13",
+ "name": "flavor 13",
+ "ram": "256",
+ "disk": "10",
+ "vcpus": "",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/flavors/13",
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/flavors/13",
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 2)
+ for i, flavor_elem in enumerate(flavor_elems):
+ flavor_dict = fixture['flavors'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(flavor_elem.get(key), str(flavor_dict[key]))
+
+ link_nodes = flavor_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(flavor_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_empty(self):
+ serializer = flavors_v2.MinimalFlavorsTemplate()
+
+ fixture = {
+ "flavors": [],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'flavors')
+ flavor_elems = root.findall('{0}flavor'.format(NS))
+ self.assertEqual(len(flavor_elems), 0)
+
+
+class DisabledFlavorsWithRealDBTestV21(test.TestCase):
+ """Tests that disabled flavors should not be shown nor listed."""
+ Controller = flavors_v3.FlavorsController
+ _prefix = "/v3"
+ fake_request = fakes.HTTPRequestV3
+
+ def setUp(self):
+ super(DisabledFlavorsWithRealDBTestV21, self).setUp()
+
+ # Add a new disabled type to the list of flavors
+ self.req = self.fake_request.blank(self._prefix + '/flavors')
+ self.context = self.req.environ['nova.context']
+ self.admin_context = context.get_admin_context()
+
+ self.disabled_type = self._create_disabled_instance_type()
+ self.inst_types = db.flavor_get_all(
+ self.admin_context)
+ self.controller = self.Controller()
+
+ def tearDown(self):
+ db.flavor_destroy(
+ self.admin_context, self.disabled_type['name'])
+
+ super(DisabledFlavorsWithRealDBTestV21, self).tearDown()
+
+ def _create_disabled_instance_type(self):
+ inst_types = db.flavor_get_all(self.admin_context)
+
+ inst_type = inst_types[0]
+
+ del inst_type['id']
+ inst_type['name'] += '.disabled'
+ inst_type['flavorid'] = unicode(max(
+ [int(flavor['flavorid']) for flavor in inst_types]) + 1)
+ inst_type['disabled'] = True
+
+ disabled_type = db.flavor_create(
+ self.admin_context, inst_type)
+
+ return disabled_type
+
+ def test_index_should_not_list_disabled_flavors_to_user(self):
+ self.context.is_admin = False
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assertIn(disabled_flavorid, db_flavorids)
+ self.assertEqual(db_flavorids - set([disabled_flavorid]),
+ api_flavorids)
+
+ def test_index_should_list_disabled_flavors_to_admin(self):
+ self.context.is_admin = True
+
+ flavor_list = self.controller.index(self.req)['flavors']
+ api_flavorids = set(f['id'] for f in flavor_list)
+
+ db_flavorids = set(i['flavorid'] for i in self.inst_types)
+ disabled_flavorid = str(self.disabled_type['flavorid'])
+
+ self.assertIn(disabled_flavorid, db_flavorids)
+ self.assertEqual(db_flavorids, api_flavorids)
+
+ def test_show_should_include_disabled_flavor_for_user(self):
+ """Counterintuitively we should show disabled flavors to all users and
+ not just admins. The reason is that, when a user performs a server-show
+ request, we want to be able to display the pretty flavor name ('512 MB
+ Instance') and not just the flavor-id even if the flavor id has been
+ marked disabled.
+ """
+ self.context.is_admin = False
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+
+ def test_show_should_include_disabled_flavor_for_admin(self):
+ self.context.is_admin = True
+
+ flavor = self.controller.show(
+ self.req, self.disabled_type['flavorid'])['flavor']
+
+ self.assertEqual(flavor['name'], self.disabled_type['name'])
+
+
+class DisabledFlavorsWithRealDBTestV20(DisabledFlavorsWithRealDBTestV21):
+ """Tests that disabled flavors should not be shown nor listed."""
+ Controller = flavors_v2.Controller
+ _prefix = "/v2/fake"
+ fake_request = fakes.HTTPRequest
+
+
+class ParseIsPublicTestV21(test.TestCase):
+ Controller = flavors_v3.FlavorsController
+
+ def setUp(self):
+ super(ParseIsPublicTestV21, self).setUp()
+ self.controller = self.Controller()
+
+ def assertPublic(self, expected, is_public):
+ self.assertIs(expected, self.controller._parse_is_public(is_public),
+ '%s did not return %s' % (is_public, expected))
+
+ def test_None(self):
+ self.assertPublic(True, None)
+
+ def test_truthy(self):
+ self.assertPublic(True, True)
+ self.assertPublic(True, 't')
+ self.assertPublic(True, 'true')
+ self.assertPublic(True, 'yes')
+ self.assertPublic(True, '1')
+
+ def test_falsey(self):
+ self.assertPublic(False, False)
+ self.assertPublic(False, 'f')
+ self.assertPublic(False, 'false')
+ self.assertPublic(False, 'no')
+ self.assertPublic(False, '0')
+
+ def test_string_none(self):
+ self.assertPublic(None, 'none')
+ self.assertPublic(None, 'None')
+
+ def test_other(self):
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.assertPublic, None, 'other')
+
+
+class ParseIsPublicTestV20(ParseIsPublicTestV21):
+ Controller = flavors_v2.Controller
diff --git a/nova/tests/unit/api/openstack/compute/test_image_metadata.py b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
new file mode 100644
index 0000000000..6de8ddf6f6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
@@ -0,0 +1,366 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import image_metadata
+from nova.api.openstack.compute.plugins.v3 import image_metadata \
+ as image_metadata_v21
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import image_fixtures
+
+IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
+CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
+
+
+def get_image_123():
+ return copy.deepcopy(IMAGE_FIXTURES)[0]
+
+
+class ImageMetaDataTestV21(test.NoDBTestCase):
+ controller_class = image_metadata_v21.ImageMetadataController
+
+ def setUp(self):
+ super(ImageMetaDataTestV21, self).setUp()
+ self.controller = self.controller_class()
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_index(self, get_all_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ res_dict = self.controller.index(req, '123')
+ expected = {'metadata': {'key1': 'value1'}}
+ self.assertEqual(res_dict, expected)
+ get_all_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_show(self, get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ res_dict = self.controller.show(req, '123', 'key1')
+ self.assertIn('meta', res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('value1', res_dict['meta']['key1'])
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_show_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key9')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, '123', 'key9')
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_show_image_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, '100', 'key9')
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_create(self, get_mocked, update_mocked, quota_mocked):
+ mock_result = copy.deepcopy(get_image_123())
+ mock_result['properties']['key7'] = 'value7'
+ update_mocked.return_value = mock_result
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'POST'
+ body = {"metadata": {"key7": "value7"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, '123', body)
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key1': 'value1', # existing meta
+ 'key7': 'value7' # new meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
+ self.assertEqual(expected_output, res)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_create_image_not_found(self, _get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
+ req.method = 'POST'
+ body = {"metadata": {"key7": "value7"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, '100', body)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_all(self, get_mocked, update_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'PUT'
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.update_all(req, '123', body)
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key9': 'value9' # replace meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'metadata': {'key9': 'value9'}}
+ self.assertEqual(expected_output, res)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata')
+ req.method = 'PUT'
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update_all, req, '100', body)
+ self.assertFalse(quota_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "zz"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.update(req, '123', 'key1', body)
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {
+ 'key1': 'zz' # changed meta
+ }
+ quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ expected_output = {'meta': {'key1': 'zz'}}
+ self.assertEqual(res, expected_output)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "zz"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update, req, '100', 'key1', body)
+ self.assertFalse(quota_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get')
+ def test_update_item_bad_body(self, get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"key1": "zz"}
+ req.body = ''
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'key1', body)
+ self.assertFalse(get_mocked.called)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPRequestEntityTooLarge(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get')
+ def test_update_item_too_many_keys(self, get_mocked, update_mocked,
+ _quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"metadata": {"foo": "bar"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'key1', body)
+ self.assertFalse(get_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR)
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
+ quota_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/bad')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, '123', 'bad', body)
+ self.assertFalse(quota_mocked.called)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete(self, _get_mocked, update_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, '123', 'key1')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {}
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ self.assertIsNone(res)
+
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
+ req.method = 'DELETE'
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, '123', 'blah')
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotFound(image_id='100'))
+ def test_delete_image_not_found(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/100/metadata/key1')
+ req.method = 'DELETE'
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, '100', 'key1')
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPForbidden(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_too_many_metadata_items_on_create(self, _get_mocked,
+ update_mocked, _quota_mocked):
+ body = {"metadata": {"foo": "bar"}}
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, '123', body)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch(CHK_QUOTA_STR,
+ side_effect=webob.exc.HTTPForbidden(
+ explanation='', headers={'Retry-After': 0}))
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_too_many_metadata_items_on_put(self, _get_mocked,
+ update_mocked, _quota_mocked):
+ body = {"metadata": {"foo": "bar"}}
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/blah')
+ req.method = 'PUT'
+ body = {"meta": {"blah": "blah"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update, req, '123', 'blah', body)
+ self.assertFalse(update_mocked.called)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_update(self, _get_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update, req, '123', 'key1', body)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_update_all(self, _get_mocked):
+ image_id = 131
+ # see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
+
+ req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
+ % image_id)
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.update_all, req, image_id, body)
+
+ @mock.patch('nova.image.api.API.get',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_image_not_authorized_create(self, _get_mocked):
+ image_id = 131
+ # see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
+
+ req = fakes.HTTPRequest.blank('/v2/fake/images/%s/metadata/key1'
+ % image_id)
+ req.method = 'POST'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, image_id, body)
+
+
+class ImageMetaDataTestV2(ImageMetaDataTestV21):
+ controller_class = image_metadata.Controller
+
+ # NOTE(cyeoh): This duplicate unittest is necessary for a race condition
+ # with the V21 unittests. It's mock issue.
+ @mock.patch('nova.image.api.API.update')
+ @mock.patch('nova.image.api.API.get', return_value=get_image_123())
+ def test_delete(self, _get_mocked, update_mocked):
+ req = fakes.HTTPRequest.blank('/v2/fake/images/123/metadata/key1')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, '123', 'key1')
+ expected = copy.deepcopy(get_image_123())
+ expected['properties'] = {}
+ update_mocked.assert_called_once_with(mock.ANY, '123', expected,
+ data=None, purge_props=True)
+
+ self.assertIsNone(res)
diff --git a/nova/tests/unit/api/openstack/compute/test_images.py b/nova/tests/unit/api/openstack/compute/test_images.py
new file mode 100644
index 0000000000..ad55f9a86e
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_images.py
@@ -0,0 +1,1046 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests of the new image services, both as a service layer,
+and as a WSGI layer
+"""
+
+import copy
+
+from lxml import etree
+import mock
+import webob
+
+from nova.api.openstack.compute import images
+from nova.api.openstack.compute.plugins.v3 import images as images_v21
+from nova.api.openstack.compute.views import images as images_view
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova.image import glance
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import image_fixtures
+from nova.tests.unit import matchers
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+NOW_API_FORMAT = "2010-10-11T10:30:22Z"
+IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
+
+
+class ImagesControllerTestV21(test.NoDBTestCase):
+ """Test of the OpenStack API /images application controller w/Glance.
+ """
+ image_controller_class = images_v21.ImagesController
+ url_base = '/v3'
+ bookmark_base = ''
+ http_request = fakes.HTTPRequestV3
+
+ def setUp(self):
+ """Run before each test."""
+ super(ImagesControllerTestV21, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fakes.stub_out_compute_api_backup(self.stubs)
+
+ self.controller = self.image_controller_class()
+ self.url_prefix = "http://localhost%s/images" % self.url_base
+ self.bookmark_prefix = "http://localhost%s/images" % self.bookmark_base
+ self.uuid = 'fa95aaf5-ab3b-4cd8-88c0-2be7dd051aaf'
+ self.server_uuid = "aa640691-d1a7-4a67-9d3c-d35ee6b3cc74"
+ self.server_href = (
+ "http://localhost%s/servers/%s" % (self.url_base,
+ self.server_uuid))
+ self.server_bookmark = (
+ "http://localhost%s/servers/%s" % (self.bookmark_base,
+ self.server_uuid))
+ self.alternate = "%s/images/%s"
+
+ self.expected_image_123 = {
+ "image": {'id': '123',
+ 'name': 'public image',
+ 'metadata': {'key1': 'value1'},
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ 'minDisk': 10,
+ 'progress': 100,
+ 'minRam': 128,
+ "links": [{
+ "rel": "self",
+ "href": "%s/123" % self.url_prefix
+ },
+ {
+ "rel": "bookmark",
+ "href":
+ "%s/123" % self.bookmark_prefix
+ },
+ {
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image",
+ "href": self.alternate %
+ (glance.generate_glance_url(),
+ 123),
+ }],
+ },
+ }
+
+ self.expected_image_124 = {
+ "image": {'id': '124',
+ 'name': 'queued snapshot',
+ 'metadata': {
+ u'instance_uuid': self.server_uuid,
+ u'user_id': u'fake',
+ },
+ 'updated': NOW_API_FORMAT,
+ 'created': NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 25,
+ 'minDisk': 0,
+ 'minRam': 0,
+ 'server': {
+ 'id': self.server_uuid,
+ "links": [{
+ "rel": "self",
+ "href": self.server_href,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.server_bookmark,
+ }],
+ },
+ "links": [{
+ "rel": "self",
+ "href": "%s/124" % self.url_prefix
+ },
+ {
+ "rel": "bookmark",
+ "href":
+ "%s/124" % self.bookmark_prefix
+ },
+ {
+ "rel": "alternate",
+ "type":
+ "application/vnd.openstack.image",
+ "href": self.alternate %
+ (glance.generate_glance_url(),
+ 124),
+ }],
+ },
+ }
+
+ @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[0])
+ def test_get_image(self, get_mocked):
+ request = self.http_request.blank(self.url_base + 'images/123')
+ actual_image = self.controller.show(request, '123')
+ self.assertThat(actual_image,
+ matchers.DictMatches(self.expected_image_123))
+ get_mocked.assert_called_once_with(mock.ANY, '123')
+
+ @mock.patch('nova.image.api.API.get', return_value=IMAGE_FIXTURES[1])
+ def test_get_image_with_custom_prefix(self, _get_mocked):
+ self.flags(osapi_compute_link_prefix='https://zoo.com:42',
+ osapi_glance_link_prefix='http://circus.com:34')
+ fake_req = self.http_request.blank(self.url_base + 'images/124')
+ actual_image = self.controller.show(fake_req, '124')
+
+ expected_image = self.expected_image_124
+ expected_image["image"]["links"][0]["href"] = (
+ "https://zoo.com:42%s/images/124" % self.url_base)
+ expected_image["image"]["links"][1]["href"] = (
+ "https://zoo.com:42%s/images/124" % self.bookmark_base)
+ expected_image["image"]["links"][2]["href"] = (
+ "http://circus.com:34/images/124")
+ expected_image["image"]["server"]["links"][0]["href"] = (
+ "https://zoo.com:42%s/servers/%s" % (self.url_base,
+ self.server_uuid))
+ expected_image["image"]["server"]["links"][1]["href"] = (
+ "https://zoo.com:42%s/servers/%s" % (self.bookmark_base,
+ self.server_uuid))
+
+ self.assertThat(actual_image, matchers.DictMatches(expected_image))
+
+ @mock.patch('nova.image.api.API.get', side_effect=exception.NotFound)
+ def test_get_image_404(self, _get_mocked):
+ fake_req = self.http_request.blank(self.url_base + 'images/unknown')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, fake_req, 'unknown')
+
+ @mock.patch('nova.image.api.API.get_all', return_value=IMAGE_FIXTURES)
+ def test_get_image_details(self, get_all_mocked):
+ request = self.http_request.blank(self.url_base + 'images/detail')
+ response = self.controller.detail(request)
+
+ get_all_mocked.assert_called_once_with(mock.ANY, filters={})
+ response_list = response["images"]
+
+ image_125 = copy.deepcopy(self.expected_image_124["image"])
+ image_125['id'] = '125'
+ image_125['name'] = 'saving snapshot'
+ image_125['progress'] = 50
+ image_125["links"][0]["href"] = "%s/125" % self.url_prefix
+ image_125["links"][1]["href"] = "%s/125" % self.bookmark_prefix
+ image_125["links"][2]["href"] = (
+ "%s/images/125" % glance.generate_glance_url())
+
+ image_126 = copy.deepcopy(self.expected_image_124["image"])
+ image_126['id'] = '126'
+ image_126['name'] = 'active snapshot'
+ image_126['status'] = 'ACTIVE'
+ image_126['progress'] = 100
+ image_126["links"][0]["href"] = "%s/126" % self.url_prefix
+ image_126["links"][1]["href"] = "%s/126" % self.bookmark_prefix
+ image_126["links"][2]["href"] = (
+ "%s/images/126" % glance.generate_glance_url())
+
+ image_127 = copy.deepcopy(self.expected_image_124["image"])
+ image_127['id'] = '127'
+ image_127['name'] = 'killed snapshot'
+ image_127['status'] = 'ERROR'
+ image_127['progress'] = 0
+ image_127["links"][0]["href"] = "%s/127" % self.url_prefix
+ image_127["links"][1]["href"] = "%s/127" % self.bookmark_prefix
+ image_127["links"][2]["href"] = (
+ "%s/images/127" % glance.generate_glance_url())
+
+ image_128 = copy.deepcopy(self.expected_image_124["image"])
+ image_128['id'] = '128'
+ image_128['name'] = 'deleted snapshot'
+ image_128['status'] = 'DELETED'
+ image_128['progress'] = 0
+ image_128["links"][0]["href"] = "%s/128" % self.url_prefix
+ image_128["links"][1]["href"] = "%s/128" % self.bookmark_prefix
+ image_128["links"][2]["href"] = (
+ "%s/images/128" % glance.generate_glance_url())
+
+ image_129 = copy.deepcopy(self.expected_image_124["image"])
+ image_129['id'] = '129'
+ image_129['name'] = 'pending_delete snapshot'
+ image_129['status'] = 'DELETED'
+ image_129['progress'] = 0
+ image_129["links"][0]["href"] = "%s/129" % self.url_prefix
+ image_129["links"][1]["href"] = "%s/129" % self.bookmark_prefix
+ image_129["links"][2]["href"] = (
+ "%s/images/129" % glance.generate_glance_url())
+
+ image_130 = copy.deepcopy(self.expected_image_123["image"])
+ image_130['id'] = '130'
+ image_130['name'] = None
+ image_130['metadata'] = {}
+ image_130['minDisk'] = 0
+ image_130['minRam'] = 0
+ image_130["links"][0]["href"] = "%s/130" % self.url_prefix
+ image_130["links"][1]["href"] = "%s/130" % self.bookmark_prefix
+ image_130["links"][2]["href"] = (
+ "%s/images/130" % glance.generate_glance_url())
+
+ image_131 = copy.deepcopy(self.expected_image_123["image"])
+ image_131['id'] = '131'
+ image_131['name'] = None
+ image_131['metadata'] = {}
+ image_131['minDisk'] = 0
+ image_131['minRam'] = 0
+ image_131["links"][0]["href"] = "%s/131" % self.url_prefix
+ image_131["links"][1]["href"] = "%s/131" % self.bookmark_prefix
+ image_131["links"][2]["href"] = (
+ "%s/images/131" % glance.generate_glance_url())
+
+ expected = [self.expected_image_123["image"],
+ self.expected_image_124["image"],
+ image_125, image_126, image_127,
+ image_128, image_129, image_130,
+ image_131]
+
+ self.assertThat(expected, matchers.DictListMatches(response_list))
+
+ @mock.patch('nova.image.api.API.get_all')
+ def test_get_image_details_with_limit(self, get_all_mocked):
+ request = self.http_request.blank(self.url_base +
+ 'images/detail?limit=2')
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={})
+
+ @mock.patch('nova.image.api.API.get_all')
+ def test_get_image_details_with_limit_and_page_size(self, get_all_mocked):
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?limit=2&page_size=1')
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, limit=2, filters={},
+ page_size=1)
+
+ @mock.patch('nova.image.api.API.get_all')
+ def _detail_request(self, filters, request, get_all_mocked):
+ self.controller.detail(request)
+ get_all_mocked.assert_called_once_with(mock.ANY, filters=filters)
+
+ def test_image_detail_filter_with_name(self):
+ filters = {'name': 'testname'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?name=testname')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_status(self):
+ filters = {'status': 'active'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?status=ACTIVE')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_property(self):
+ filters = {'property-test': '3'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?property-test=3')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_server_href(self):
+ filters = {'property-instance_uuid': self.uuid}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?server=' + self.uuid)
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_server_uuid(self):
+ filters = {'property-instance_uuid': self.uuid}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?server=' + self.uuid)
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_changes_since(self):
+ filters = {'changes-since': '2011-01-24T17:08Z'}
+ request = self.http_request.blank(self.url_base + 'images/detail'
+ '?changes-since=2011-01-24T17:08Z')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_with_type(self):
+ filters = {'property-image_type': 'BASE'}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?type=BASE')
+ self._detail_request(filters, request)
+
+ def test_image_detail_filter_not_supported(self):
+ filters = {'status': 'active'}
+ request = self.http_request.blank(
+ self.url_base + 'images/detail?status='
+ 'ACTIVE&UNSUPPORTEDFILTER=testname')
+ self._detail_request(filters, request)
+
+ def test_image_detail_no_filters(self):
+ filters = {}
+ request = self.http_request.blank(self.url_base + 'images/detail')
+ self._detail_request(filters, request)
+
+ @mock.patch('nova.image.api.API.get_all', side_effect=exception.Invalid)
+ def test_image_detail_invalid_marker(self, _get_all_mocked):
+ request = self.http_request.blank(self.url_base + '?marker=invalid')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.detail,
+ request)
+
+ def test_generate_alternate_link(self):
+ view = images_view.ViewBuilder()
+ request = self.http_request.blank(self.url_base + 'images/1')
+ generated_url = view._get_alternate_link(request, 1)
+ actual_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(generated_url, actual_url)
+
+ def _check_response(self, controller_method, response, expected_code):
+ self.assertEqual(expected_code, controller_method.wsgi_code)
+
+ @mock.patch('nova.image.api.API.delete')
+ def test_delete_image(self, delete_mocked):
+ request = self.http_request.blank(self.url_base + 'images/124')
+ request.method = 'DELETE'
+ response = self.controller.delete(request, '124')
+ self._check_response(self.controller.delete, response, 204)
+ delete_mocked.assert_called_once_with(mock.ANY, '124')
+
+ @mock.patch('nova.image.api.API.delete',
+ side_effect=exception.ImageNotAuthorized(image_id='123'))
+ def test_delete_deleted_image(self, _delete_mocked):
+ # If you try to delete a deleted image, you get back 403 Forbidden.
+ request = self.http_request.blank(self.url_base + 'images/123')
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete,
+ request, '123')
+
+ @mock.patch('nova.image.api.API.delete',
+ side_effect=exception.ImageNotFound(image_id='123'))
+ def test_delete_image_not_found(self, _delete_mocked):
+ request = self.http_request.blank(self.url_base + 'images/300')
+ request.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, request, '300')
+
+
+class ImagesControllerTestV2(ImagesControllerTestV21):
+ image_controller_class = images.Controller
+ url_base = '/v2/fake'
+ bookmark_base = '/fake'
+ http_request = fakes.HTTPRequest
+
+ def _check_response(self, controller_method, response, expected_code):
+ self.assertEqual(expected_code, response.status_int)
+
+
+class ImageXMLSerializationTest(test.NoDBTestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_UUID = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
+ SERVER_HREF = 'http://localhost/v2/fake/servers/' + SERVER_UUID
+ SERVER_BOOKMARK = 'http://localhost/fake/servers/' + SERVER_UUID
+ IMAGE_HREF = 'http://localhost/v2/fake/images/%s'
+ IMAGE_NEXT = 'http://localhost/v2/fake/images?limit=%s&marker=%s'
+ IMAGE_BOOKMARK = 'http://localhost/fake/images/%s'
+
+ def test_xml_declaration(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 10,
+ 'minDisk': 100,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_zero_metadata(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {},
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_image_no_metadata_key(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ meta_nodes = root.findall('{0}meta'.format(ATOMNS))
+ self.assertEqual(len(meta_nodes), 0)
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_no_server(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertIsNone(server_root)
+
+ def test_show_with_min_ram(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minRam': 256,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minRam']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_show_with_min_disk(self):
+ serializer = images.ImageTemplate()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'progress': 80,
+ 'minDisk': 5,
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'image')
+ image_dict = fixture['image']
+
+ for key in ['name', 'id', 'updated', 'created', 'status', 'progress',
+ 'minDisk']:
+ self.assertEqual(root.get(key), str(image_dict[key]))
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = image_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ server_root = root.find('{0}server'.format(NS))
+ self.assertEqual(server_root.get('id'), image_dict['server']['id'])
+ link_nodes = server_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['server']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_with_links(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'Image2',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ],
+ 'images_links': [
+ {
+ 'rel': 'next',
+ 'href': self.IMAGE_NEXT % (2, 2),
+ }
+ ],
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ # Check images_links
+ images_links = root.findall('{0}link'.format(ATOMNS))
+ for i, link in enumerate(fixture['images_links']):
+ for key, value in link.items():
+ self.assertEqual(images_links[i].get(key), value)
+
+ def test_index_zero_images(self):
+ serializer = images.MinimalImagesTemplate()
+
+ fixtures = {
+ 'images': [],
+ }
+
+ output = serializer.serialize(fixtures)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 0)
+
+ def test_detail(self):
+ serializer = images.ImagesTemplate()
+
+ fixture = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'ACTIVE',
+ 'server': {
+ 'id': self.SERVER_UUID,
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 1,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 1,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ 'id': '2',
+ 'name': 'Image2',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'status': 'SAVING',
+ 'progress': 80,
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % 2,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.IMAGE_BOOKMARK % 2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]
+ }
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'images')
+ image_elems = root.findall('{0}image'.format(NS))
+ self.assertEqual(len(image_elems), 2)
+ for i, image_elem in enumerate(image_elems):
+ image_dict = fixture['images'][i]
+
+ for key in ['name', 'id', 'updated', 'created', 'status']:
+ self.assertEqual(image_elem.get(key), str(image_dict[key]))
+
+ link_nodes = image_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(image_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
new file mode 100644
index 0000000000..47da849b28
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -0,0 +1,1016 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests dealing with HTTP rate-limiting.
+"""
+
+import httplib
+import StringIO
+from xml.dom import minidom
+
+from lxml import etree
+import mock
+from oslo.serialization import jsonutils
+import six
+import webob
+
+from nova.api.openstack.compute import limits
+from nova.api.openstack.compute.plugins.v3 import limits as limits_v3
+from nova.api.openstack.compute import views
+from nova.api.openstack import wsgi
+from nova.api.openstack import xmlutil
+import nova.context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+from nova import utils
+
+
+TEST_LIMITS = [
+ limits.Limit("GET", "/delayed", "^/delayed", 1,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "*", ".*", 7, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("POST", "/servers", "^/servers", 3,
+ utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "*", "", 10, utils.TIME_UNITS['MINUTE']),
+ limits.Limit("PUT", "/servers", "^/servers", 5,
+ utils.TIME_UNITS['MINUTE']),
+]
+NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/common/api/v1.0'
+}
+
+
+class BaseLimitTestSuite(test.NoDBTestCase):
+ """Base test suite which provides relevant stubs and time abstraction."""
+
+ def setUp(self):
+ super(BaseLimitTestSuite, self).setUp()
+ self.time = 0.0
+ self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+ self.absolute_limits = {}
+
+ def stub_get_project_quotas(context, project_id, usages=True):
+ return dict((k, dict(limit=v))
+ for k, v in self.absolute_limits.items())
+
+ self.stubs.Set(nova.quota.QUOTAS, "get_project_quotas",
+ stub_get_project_quotas)
+
+ def _get_time(self):
+ """Return the "time" according to this test suite."""
+ return self.time
+
+
+class LimitsControllerTestV21(BaseLimitTestSuite):
+ """Tests for `limits.LimitsController` class."""
+ limits_controller = limits_v3.LimitsController
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimitsControllerTestV21, self).setUp()
+ self.controller = wsgi.Resource(self.limits_controller())
+ self.ctrler = self.limits_controller()
+
+ def _get_index_request(self, accept_header="application/json",
+ tenant_id=None):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ if tenant_id:
+ request = webob.Request.blank("/?tenant_id=%s" % tenant_id)
+
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ context = nova.context.RequestContext('testuser', 'testproject')
+ request.environ["nova.context"] = context
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "changes-since*", "changes-since",
+ 5, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ # Test getting empty limit details in JSON.
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ self._test_index_json()
+
+ def test_index_json_by_tenant(self):
+ self._test_index_json('faketenant')
+
+ def _test_index_json(self, tenant_id=None):
+ # Test getting limit details in JSON.
+ request = self._get_index_request(tenant_id=tenant_id)
+ context = request.environ["nova.context"]
+ if tenant_id is None:
+ tenant_id = context.project_id
+
+ request = self._populate_limits(request)
+ self.absolute_limits = {
+ 'ram': 512,
+ 'instances': 5,
+ 'cores': 21,
+ 'key_pairs': 10,
+ 'floating_ips': 10,
+ 'security_groups': 10,
+ 'security_group_rules': 20,
+ }
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ {
+ "verb": "POST",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "HOUR",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+ {
+ "regex": "changes-since",
+ "uri": "changes-since*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "maxTotalFloatingIps": 10,
+ "maxSecurityGroups": 10,
+ "maxSecurityGroupRules": 20,
+ },
+ },
+ }
+
+ def _get_project_quotas(context, project_id, usages=True):
+ return dict((k, dict(limit=v))
+ for k, v in self.absolute_limits.items())
+
+ with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
+ get_project_quotas:
+ get_project_quotas.side_effect = _get_project_quotas
+
+ response = request.get_response(self.controller)
+
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ get_project_quotas.assert_called_once_with(context, tenant_id,
+ usages=False)
+
+
+class LimitsControllerTestV2(LimitsControllerTestV21):
+ limits_controller = limits.LimitsController
+
+ def _populate_limits_diff_regex(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("GET", "*", "*.*", 10, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_index_diff_regex(self):
+ # Test getting limit details in JSON.
+ request = self._get_index_request()
+ request = self._populate_limits_diff_regex(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+ {
+ "regex": "*.*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": "1970-01-01T00:00:00Z",
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {},
+ },
+ }
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def _test_index_absolute_limits_json(self, expected):
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body['limits']['absolute'])
+
+ def test_index_ignores_extra_absolute_limits_json(self):
+ self.absolute_limits = {'unknown_limit': 9001}
+ self._test_index_absolute_limits_json({})
+
+ def test_index_absolute_ram_json(self):
+ self.absolute_limits = {'ram': 1024}
+ self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024})
+
+ def test_index_absolute_cores_json(self):
+ self.absolute_limits = {'cores': 17}
+ self._test_index_absolute_limits_json({'maxTotalCores': 17})
+
+ def test_index_absolute_instances_json(self):
+ self.absolute_limits = {'instances': 19}
+ self._test_index_absolute_limits_json({'maxTotalInstances': 19})
+
+ def test_index_absolute_metadata_json(self):
+ # NOTE: both server metadata and image metadata are overloaded
+ # into metadata_items
+ self.absolute_limits = {'metadata_items': 23}
+ expected = {
+ 'maxServerMeta': 23,
+ 'maxImageMeta': 23,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_index_absolute_injected_files(self):
+ self.absolute_limits = {
+ 'injected_files': 17,
+ 'injected_file_content_bytes': 86753,
+ }
+ expected = {
+ 'maxPersonality': 17,
+ 'maxPersonalitySize': 86753,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_index_absolute_security_groups(self):
+ self.absolute_limits = {
+ 'security_groups': 8,
+ 'security_group_rules': 16,
+ }
+ expected = {
+ 'maxSecurityGroups': 8,
+ 'maxSecurityGroupRules': 16,
+ }
+ self._test_index_absolute_limits_json(expected)
+
+ def test_limit_create(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.create,
+ req, {})
+
+ def test_limit_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.delete,
+ req, 1)
+
+ def test_limit_detail(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.detail,
+ req)
+
+ def test_limit_show(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.show,
+ req, 1)
+
+ def test_limit_update(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/limits')
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.ctrler.update,
+ req, 1, {})
+
+
+class MockLimiter(limits.Limiter):
+ pass
+
+
+class LimitMiddlewareTest(BaseLimitTestSuite):
+ """Tests for the `limits.RateLimitingMiddleware` class."""
+
+ @webob.dec.wsgify
+ def _empty_app(self, request):
+ """Do-nothing WSGI app."""
+ pass
+
+ def setUp(self):
+ """Prepare middleware for use through fake WSGI app."""
+ super(LimitMiddlewareTest, self).setUp()
+ _limits = '(GET, *, .*, 1, MINUTE)'
+ self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
+ "%s.MockLimiter" %
+ self.__class__.__module__)
+
+ def test_limit_class(self):
+ # Test that middleware selected correct limiter class.
+ self.assertIsInstance(self.app._limiter, MockLimiter)
+
+ def test_good_request(self):
+ # Test successful GET request through middleware.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ def test_limited_request_json(self):
+ # Test a rate-limited (429) GET request through middleware.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 429)
+
+ self.assertIn('Retry-After', response.headers)
+ retry_after = int(response.headers['Retry-After'])
+ self.assertAlmostEqual(retry_after, 60, 1)
+
+ body = jsonutils.loads(response.body)
+ expected = "Only 1 GET request(s) can be made to * every minute."
+ value = body["overLimit"]["details"].strip()
+ self.assertEqual(value, expected)
+
+ self.assertIn("retryAfter", body["overLimit"])
+ retryAfter = body["overLimit"]["retryAfter"]
+ self.assertEqual(retryAfter, "60")
+
+ def test_limited_request_xml(self):
+ # Test a rate-limited (429) response as XML.
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ request.accept = "application/xml"
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 429)
+
+ root = minidom.parseString(response.body).childNodes[0]
+ expected = "Only 1 GET request(s) can be made to * every minute."
+
+ self.assertIsNotNone(root.attributes.getNamedItem("retryAfter"))
+ retryAfter = root.attributes.getNamedItem("retryAfter").value
+ self.assertEqual(retryAfter, "60")
+
+ details = root.getElementsByTagName("details")
+ self.assertEqual(details.length, 1)
+
+ value = details.item(0).firstChild.data.strip()
+ self.assertEqual(value, expected)
+
+
+class LimitTest(BaseLimitTestSuite):
+ """Tests for the `limits.Limit` class."""
+
+ def test_GET_no_delay(self):
+ # Test a limit handles 1 GET per second.
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+ self.assertEqual(0, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ def test_GET_delay(self):
+ # Test two calls to 1 GET per second limit.
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(1, delay)
+ self.assertEqual(1, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ self.time += 4
+
+ delay = limit("GET", "/anything")
+ self.assertIsNone(delay)
+ self.assertEqual(4, limit.next_request)
+ self.assertEqual(4, limit.last_request)
+
+
+class ParseLimitsTest(BaseLimitTestSuite):
+ """Tests for the default limits parser in the in-memory
+ `limits.Limiter` class.
+ """
+
+ def test_invalid(self):
+ # Test that parse_limits() handles invalid input correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ ';;;;;')
+
+ def test_bad_rule(self):
+ # Test that parse_limits() handles bad rules correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ 'GET, *, .*, 20, minute')
+
+ def test_missing_arg(self):
+ # Test that parse_limits() handles missing args correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20)')
+
+ def test_bad_value(self):
+ # Test that parse_limits() handles bad values correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, foo, minute)')
+
+ def test_bad_unit(self):
+ # Test that parse_limits() handles bad units correctly.
+ self.assertRaises(ValueError, limits.Limiter.parse_limits,
+ '(GET, *, .*, 20, lightyears)')
+
+ def test_multiple_rules(self):
+ # Test that parse_limits() handles multiple rules correctly.
+ try:
+ l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
+ '(PUT, /foo*, /foo.*, 10, hour);'
+ '(POST, /bar*, /bar.*, 5, second);'
+ '(Say, /derp*, /derp.*, 1, day)')
+ except ValueError as e:
+ assert False, six.text_type(e)
+
+ # Make sure the number of returned limits are correct
+ self.assertEqual(len(l), 4)
+
+ # Check all the verbs...
+ expected = ['GET', 'PUT', 'POST', 'SAY']
+ self.assertEqual([t.verb for t in l], expected)
+
+ # ...the URIs...
+ expected = ['*', '/foo*', '/bar*', '/derp*']
+ self.assertEqual([t.uri for t in l], expected)
+
+ # ...the regexes...
+ expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
+ self.assertEqual([t.regex for t in l], expected)
+
+ # ...the values...
+ expected = [20, 10, 5, 1]
+ self.assertEqual([t.value for t in l], expected)
+
+ # ...and the units...
+ expected = [utils.TIME_UNITS['MINUTE'], utils.TIME_UNITS['HOUR'],
+ utils.TIME_UNITS['SECOND'], utils.TIME_UNITS['DAY']]
+ self.assertEqual([t.unit for t in l], expected)
+
+
+class LimiterTest(BaseLimitTestSuite):
+ """Tests for the in-memory `limits.Limiter` class."""
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimiterTest, self).setUp()
+ userlimits = {'limits.user3': '',
+ 'limits.user0': '(get, *, .*, 4, minute);'
+ '(put, *, .*, 2, minute)'}
+ self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
+
+ def _check(self, num, verb, url, username=None):
+ """Check and yield results from checks."""
+ for x in xrange(num):
+ yield self.limiter.check_for_delay(verb, url, username)[0]
+
+ def _check_sum(self, num, verb, url, username=None):
+ """Check and sum results from checks."""
+ results = self._check(num, verb, url, username)
+ return sum(item for item in results if item)
+
+ def test_no_delay_GET(self):
+ """Simple test to ensure no delay on a single call for a limit verb we
+ didn"t set.
+ """
+ delay = self.limiter.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_no_delay_PUT(self):
+ # Simple test to ensure no delay on a single call for a known limit.
+ delay = self.limiter.check_for_delay("PUT", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_delay_PUT(self):
+ """Ensure the 11th PUT will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_POST(self):
+ """Ensure the 8th POST will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 7
+ results = list(self._check(7, "POST", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = 60.0 / 7.0
+ results = self._check_sum(1, "POST", "/anything")
+ self.assertAlmostEqual(expected, results, 8)
+
+ def test_delay_GET(self):
+ # Ensure the 11th GET will result in NO delay.
+ expected = [None] * 11
+ results = list(self._check(11, "GET", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = [None] * 4 + [15.0]
+ results = list(self._check(5, "GET", "/foo", "user0"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_servers(self):
+ """Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is
+ still OK after 5 requests...but then after 11 total requests, PUT
+ limiting kicks in.
+ """
+ # First 6 requests on PUT /servers
+ expected = [None] * 5 + [12.0]
+ results = list(self._check(6, "PUT", "/servers"))
+ self.assertEqual(expected, results)
+
+ # Next 5 request on PUT /anything
+ expected = [None] * 4 + [6.0]
+ results = list(self._check(5, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_wait(self):
+ """Ensure after hitting the limit and then waiting for the correct
+ amount of time, the limit will be lifted.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ # Advance time
+ self.time += 6.0
+
+ expected = [None, 6.0]
+ results = list(self._check(2, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_delays(self):
+ # Ensure multiple requests still get a delay.
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = [None] * 2 + [30.0] * 8
+ results = list(self._check(10, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ def test_user_limit(self):
+ # Test user-specific limits.
+ self.assertEqual(self.limiter.levels['user3'], [])
+ self.assertEqual(len(self.limiter.levels['user0']), 2)
+
+ def test_multiple_users(self):
+ # Tests involving multiple users.
+ # User0
+ expected = [None] * 2 + [30.0] * 8
+ results = list(self._check(10, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ # User1
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ # User2
+ expected = [None] * 10 + [6.0] * 5
+ results = list(self._check(15, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ # User3
+ expected = [None] * 20
+ results = list(self._check(20, "PUT", "/anything", "user3"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [4.0] * 5
+ results = list(self._check(5, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ # User0 again
+ expected = [28.0]
+ results = list(self._check(1, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+ self.time += 28.0
+
+ expected = [None, 30.0]
+ results = list(self._check(2, "PUT", "/anything", "user0"))
+ self.assertEqual(expected, results)
+
+
+class WsgiLimiterTest(BaseLimitTestSuite):
+ """Tests for `limits.WsgiLimiter` class."""
+
+ def setUp(self):
+ """Run before each test."""
+ super(WsgiLimiterTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+
+ def _request_data(self, verb, path):
+ """Get data describing a limit request verb/path."""
+ return jsonutils.dumps({"verb": verb, "path": path})
+
+ def _request(self, verb, url, username=None):
+ """Make sure that POSTing to the given url causes the given username
+ to perform the given action. Make the internal rate limiter return
+ delay and make sure that the WSGI app returns the correct response.
+ """
+ if username:
+ request = webob.Request.blank("/%s" % username)
+ else:
+ request = webob.Request.blank("/")
+
+ request.method = "POST"
+ request.body = self._request_data(verb, url)
+ response = request.get_response(self.app)
+
+ if "X-Wait-Seconds" in response.headers:
+ self.assertEqual(response.status_int, 403)
+ return response.headers["X-Wait-Seconds"]
+
+ self.assertEqual(response.status_int, 204)
+
+ def test_invalid_methods(self):
+ # Only POSTs should work.
+ for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
+ request = webob.Request.blank("/", method=method)
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 405)
+
+ def test_good_url(self):
+ delay = self._request("GET", "/something")
+ self.assertIsNone(delay)
+
+ def test_escaping(self):
+ delay = self._request("GET", "/something/jump%20up")
+ self.assertIsNone(delay)
+
+ def test_response_to_delays(self):
+ delay = self._request("GET", "/delayed")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, '60.00')
+
+ def test_response_to_delays_usernames(self):
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertIsNone(delay)
+
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, '60.00')
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, '60.00')
+
+
+class FakeHttplibSocket(object):
+ """Fake `httplib.HTTPResponse` replacement."""
+
+ def __init__(self, response_string):
+ """Initialize new `FakeHttplibSocket`."""
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """Fake `httplib.HTTPConnection`."""
+
+ def __init__(self, app, host):
+ """Initialize `FakeHttplibConnection`."""
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, body="", headers=None):
+ """Requests made via this connection actually get translated and routed
+ into our WSGI app, we then wait for the response and turn it back into
+ an `httplib.HTTPResponse`.
+ """
+ if not headers:
+ headers = {}
+
+ req = webob.Request.blank(path)
+ req.method = method
+ req.headers = headers
+ req.host = self.host
+ req.body = body
+
+ resp = str(req.get_response(self.app))
+ resp = "HTTP/1.0 %s" % resp
+ sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(sock)
+ self.http_response.begin()
+
+ def getresponse(self):
+ """Return our generated response from the request."""
+ return self.http_response
+
+
+def wire_HTTPConnection_to_WSGI(host, app):
+ """Monkeypatches HTTPConnection so that if you try to connect to host, you
+ are instead routed straight to the given WSGI app.
+
+ After calling this method, when any code calls
+
+ httplib.HTTPConnection(host)
+
+ the connection object will be a fake. Its requests will be sent directly
+ to the given WSGI app rather than through a socket.
+
+ Code connecting to hosts other than host will not be affected.
+
+ This method may be called multiple times to map different hosts to
+ different apps.
+
+ This method returns the original HTTPConnection object, so that the caller
+ can restore the default HTTPConnection interface (for all hosts).
+ """
+ class HTTPConnectionDecorator(object):
+ """Wraps the real HTTPConnection class so that when you instantiate
+ the class you might instead get a fake instance.
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __call__(self, connection_host, *args, **kwargs):
+ if connection_host == host:
+ return FakeHttplibConnection(app, host)
+ else:
+ return self.wrapped(connection_host, *args, **kwargs)
+
+ oldHTTPConnection = httplib.HTTPConnection
+ httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
+ return oldHTTPConnection
+
+
+class WsgiLimiterProxyTest(BaseLimitTestSuite):
+ """Tests for the `limits.WsgiLimiterProxy` class."""
+
+ def setUp(self):
+ """Do some nifty HTTP/WSGI magic which allows for WSGI to be called
+ directly by something like the `httplib` library.
+ """
+ super(WsgiLimiterProxyTest, self).setUp()
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+ self.oldHTTPConnection = (
+ wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
+ self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
+
+ def test_200(self):
+ # Successful request test.
+ delay = self.proxy.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_403(self):
+ # Forbidden request test.
+ delay = self.proxy.check_for_delay("GET", "/delayed")
+ self.assertEqual(delay, (None, None))
+
+ delay, error = self.proxy.check_for_delay("GET", "/delayed")
+ error = error.strip()
+
+ expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
+ "made to /delayed every minute.")
+
+ self.assertEqual((delay, error), expected)
+
+ def tearDown(self):
+ # restore original HTTPConnection object
+ httplib.HTTPConnection = self.oldHTTPConnection
+ super(WsgiLimiterProxyTest, self).tearDown()
+
+
+class LimitsViewBuilderTest(test.NoDBTestCase):
+ def setUp(self):
+ super(LimitsViewBuilderTest, self).setUp()
+ self.view_builder = views.limits.ViewBuilder()
+ self.rate_limits = [{"URI": "*",
+ "regex": ".*",
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "resetTime": 1311272226},
+ {"URI": "*/servers",
+ "regex": "^/servers",
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "resetTime": 1311272226}]
+ self.absolute_limits = {"metadata_items": 1,
+ "injected_files": 5,
+ "injected_file_content_bytes": 5}
+
+ def test_build_limits(self):
+ expected_limits = {"limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{"value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-07-21T18:17:06Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{"value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-07-21T18:17:06Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 5}}}
+
+ output = self.view_builder.build(self.rate_limits,
+ self.absolute_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
+
+ def test_build_limits_empty_limits(self):
+ expected_limits = {"limits": {"rate": [],
+ "absolute": {}}}
+
+ abs_limits = {}
+ rate_limits = []
+ output = self.view_builder.build(rate_limits, abs_limits)
+ self.assertThat(output, matchers.DictMatches(expected_limits))
+
+
+class LimitsXMLSerializationTest(test.NoDBTestCase):
+ def test_xml_declaration(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_index(self):
+ serializer = limits.LimitsTemplate()
+ fixture = {
+ "limits": {
+ "rate": [{
+ "uri": "*",
+ "regex": ".*",
+ "limit": [{
+ "value": 10,
+ "verb": "POST",
+ "remaining": 2,
+ "unit": "MINUTE",
+ "next-available": "2011-12-15T22:42:45Z"}]},
+ {"uri": "*/servers",
+ "regex": "^/servers",
+ "limit": [{
+ "value": 50,
+ "verb": "POST",
+ "remaining": 10,
+ "unit": "DAY",
+ "next-available": "2011-12-15T22:42:45Z"}]}],
+ "absolute": {"maxServerMeta": 1,
+ "maxImageMeta": 1,
+ "maxPersonality": 5,
+ "maxPersonalitySize": 10240}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ # verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 4)
+ for limit in absolutes:
+ name = limit.get('name')
+ value = limit.get('value')
+ self.assertEqual(value, str(fixture['limits']['absolute'][name]))
+
+ # verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 2)
+ for i, rate in enumerate(rates):
+ for key in ['uri', 'regex']:
+ self.assertEqual(rate.get(key),
+ str(fixture['limits']['rate'][i][key]))
+ rate_limits = rate.xpath('ns:limit', namespaces=NS)
+ self.assertEqual(len(rate_limits), 1)
+ for j, limit in enumerate(rate_limits):
+ for key in ['verb', 'value', 'remaining', 'unit',
+ 'next-available']:
+ self.assertEqual(limit.get(key),
+ str(fixture['limits']['rate'][i]['limit'][j][key]))
+
+ def test_index_no_limits(self):
+ serializer = limits.LimitsTemplate()
+
+ fixture = {"limits": {
+ "rate": [],
+ "absolute": {}}}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'limits')
+
+ # verify absolute limits
+ absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
+ self.assertEqual(len(absolutes), 0)
+
+ # verify rate limits
+ rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
+ self.assertEqual(len(rates), 0)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
new file mode 100644
index 0000000000..16f8ce14bf
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -0,0 +1,1556 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import servers
+from nova.compute import api as compute_api
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+FAKE_UUID = fakes.FAKE_UUID
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+
+def return_server_not_found(*arg, **kwarg):
+ raise exception.NotFound()
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, kwargs, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
+ return inst
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance, password):
+ self.instance_id = instance['uuid']
+ self.password = password
+
+
+class ServerActionsControllerTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServerActionsControllerTest, self).setUp()
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ host='fake_host'))
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ fakes.stub_out_nw_api(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.flags(allow_instance_snapshots=True,
+ enable_instance_password=True)
+ self.uuid = FAKE_UUID
+ self.url = '/v2/fake/servers/%s/action' % self.uuid
+ self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+
+ class FakeExtManager(object):
+ def is_loaded(self, ext):
+ return False
+
+ self.controller = servers.Controller(ext_mgr=FakeExtManager())
+ self.compute_api = self.controller.compute_api
+ self.context = context.RequestContext('fake', 'fake')
+ self.app = fakes.wsgi_app(init_only=('servers',),
+ fake_auth_context=self.context)
+
+ def _make_request(self, url, body):
+ req = webob.Request.blank('/v2/fake' + url)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.content_type = 'application/json'
+ return req.get_response(self.app)
+
+ def _stub_instance_get(self, uuid=None):
+ self.mox.StubOutWithMock(compute_api.API, 'get')
+ if uuid is None:
+ uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_db_instance(
+ id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance)
+
+ self.compute_api.get(self.context, uuid,
+ want_objects=True).AndReturn(instance)
+ return instance
+
+ def _test_locked_instance(self, action, method=None, body_map=None,
+ compute_api_args_map=None):
+ if method is None:
+ method = action
+ if body_map is None:
+ body_map = {}
+ if compute_api_args_map is None:
+ compute_api_args_map = {}
+
+ instance = self._stub_instance_get()
+ args, kwargs = compute_api_args_map.get(action, ((), {}))
+
+ getattr(compute_api.API, method)(self.context, instance,
+ *args, **kwargs).AndRaise(
+ exception.InstanceIsLocked(instance_uuid=instance['uuid']))
+
+ self.mox.ReplayAll()
+
+ res = self._make_request('/servers/%s/action' % instance['uuid'],
+ {action: body_map.get(action)})
+ self.assertEqual(409, res.status_int)
+ # Do these here instead of tearDown because this method is called
+ # more than once for the same test case
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_actions_with_locked_instance(self):
+ actions = ['resize', 'confirmResize', 'revertResize', 'reboot',
+ 'rebuild']
+
+ method_translations = {'confirmResize': 'confirm_resize',
+ 'revertResize': 'revert_resize'}
+
+ body_map = {'resize': {'flavorRef': '2'},
+ 'reboot': {'type': 'HARD'},
+ 'rebuild': {'imageRef': self.image_uuid,
+ 'adminPass': 'TNc53Dr8s7vw'}}
+
+ args_map = {'resize': (('2'), {}),
+ 'confirmResize': ((), {}),
+ 'reboot': (('HARD',), {}),
+ 'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'),
+ {'files_to_inject': None})}
+
+ for action in actions:
+ method = method_translations.get(action)
+ self.mox.StubOutWithMock(compute_api.API, method or action)
+ self._test_locked_instance(action, method=method,
+ body_map=body_map,
+ compute_api_args_map=args_map)
+
+ def test_server_change_password(self):
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ # note,the mock still contains the password.
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_not_a_string(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_server_change_password_bad_request(self):
+ body = {'changePassword': {'pass': '12345'}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_server_change_password_empty_string(self):
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(compute_api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': ''}}
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_change_password(req, FAKE_UUID, body)
+
+ self.assertEqual(mock_method.instance_id, self.uuid)
+ self.assertEqual(mock_method.password, '')
+
+ def test_server_change_password_none(self):
+ body = {'changePassword': {'adminPass': None}}
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_change_password,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_soft(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_incorrect_type(self):
+ body = dict(reboot=dict(type="NOT_A_TYPE"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_missing_type(self):
+ body = dict(reboot=dict())
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_none(self):
+ body = dict(reboot=dict(type=None))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_not_found(self):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server_not_found)
+
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_reboot,
+ req, str(uuid.uuid4()), body)
+
+ def test_reboot_raises_conflict_on_invalid_state(self):
+ body = dict(reboot=dict(type="HARD"))
+
+ def fake_reboot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="SOFT"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING))
+ self.controller._action_reboot(req, FAKE_UUID, body)
+
+ def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.REBOOTING_HARD))
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_preserve_ephemeral_is_ignored_when_ext_not_loaded(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "preserve_ephemeral": False,
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), files_to_inject=None)
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ def _test_rebuild_preserve_ephemeral(self, value=None):
+ def fake_is_loaded(ext):
+ return ext == 'os-preserve-ephemeral-rebuild'
+ self.stubs.Set(self.controller.ext_mgr, 'is_loaded', fake_is_loaded)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE,
+ host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+ if value is not None:
+ body['rebuild']['preserve_ephemeral'] = value
+
+ req = fakes.HTTPRequest.blank(self.url)
+ context = req.environ['nova.context']
+
+ self.mox.StubOutWithMock(compute_api.API, 'rebuild')
+
+ if value is not None:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), preserve_ephemeral=value,
+ files_to_inject=None)
+ else:
+ compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
+ mox.IgnoreArg(), files_to_inject=None)
+ self.mox.ReplayAll()
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ def test_rebuild_preserve_ephemeral_true(self):
+ self._test_rebuild_preserve_ephemeral(True)
+
+ def test_rebuild_preserve_ephemeral_false(self):
+ self._test_rebuild_preserve_ephemeral(False)
+
+ def test_rebuild_preserve_ephemeral_default(self):
+ self._test_rebuild_preserve_ephemeral()
+
+ def test_rebuild_accepted_minimum(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(len(body['server']['adminPass']),
+ CONF.password_length)
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_instance_with_image_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ # proper local hrefs must start with 'http://localhost/v2/'
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_uuid,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_instance_with_image_href_uses_uuid(self):
+ info = dict(image_href_in_call=None)
+
+ def rebuild(self2, context, instance, image_href, *args, **kwargs):
+ info['image_href_in_call'] = image_href
+
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.stubs.Set(compute_api.API, 'rebuild', rebuild)
+
+ # proper local hrefs must start with 'http://localhost/v2/'
+ body = {
+ 'rebuild': {
+ 'imageRef': self.image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/a/action')
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+ self.assertEqual(info['image_href_in_call'], self.image_uuid)
+
+ def test_rebuild_accepted_minimum_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ robj = self.controller._action_rebuild(req, FAKE_UUID, body)
+ body = robj.obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn("adminPass", body['server'])
+
+ self.assertEqual(robj['location'], self_href)
+
+ def test_rebuild_raises_conflict_on_invalid_state(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ def fake_rebuild(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accepted_with_metadata(self):
+ metadata = {'new': 'metadata'}
+
+ return_server = fakes.fake_instance_get(metadata=metadata,
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": metadata,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['metadata'], metadata)
+
+ def test_rebuild_accepted_with_bad_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": "stack",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_with_too_large_metadata(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "metadata": {
+ 256 * "k": "value"
+ }
+ }
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild, req,
+ FAKE_UUID, body)
+
+ def test_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ "imageId": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_bad_personality(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": "INVALID b64",
+ }]
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_personality(self):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": base64.b64encode("Test String"),
+ }]
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertNotIn('personality', body['server'])
+
+ def test_rebuild_admin_pass(self):
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertEqual(body['server']['adminPass'], 'asdf')
+
+ def test_rebuild_admin_pass_pass_disabled(self):
+ # run with enable_instance_password disabled to verify adminPass
+ # is missing from response. See lp bug 921814
+ self.flags(enable_instance_password=False)
+
+ return_server = fakes.fake_instance_get(image_ref='2',
+ vm_state=vm_states.ACTIVE, host='fake_host')
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "adminPass": "asdf",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_rebuild(req, FAKE_UUID, body).obj
+
+ self.assertEqual(body['server']['image']['id'], '2')
+ self.assertNotIn('adminPass', body['server'])
+
+ def test_rebuild_server_not_found(self):
+ def server_not_found(self, instance_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_with_bad_image(self):
+ body = {
+ "rebuild": {
+ "imageRef": "foo",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_accessIP(self):
+ attributes = {
+ 'access_ip_v4': '172.19.0.1',
+ 'access_ip_v6': 'fe80::1',
+ }
+
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ "accessIPv4": "172.19.0.1",
+ "accessIPv6": "fe80::1",
+ },
+ }
+
+ data = {'changes': {}}
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ data['instance'] = orig_get(*args, **kwargs)
+ return data['instance']
+
+ def fake_save(context, **kwargs):
+ data['changes'].update(data['instance'].obj_get_changes())
+
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ req = fakes.HTTPRequest.blank(self.url)
+
+ self.controller._action_rebuild(req, FAKE_UUID, body)
+
+ self.assertEqual(self._image_href, data['changes']['image_ref'])
+ self.assertEqual("", data['changes']['kernel_id'])
+ self.assertEqual("", data['changes']['ramdisk_id'])
+ self.assertEqual(task_states.REBUILDING, data['changes']['task_state'])
+ self.assertEqual(0, data['changes']['progress'])
+ for attr, value in attributes.items():
+ self.assertEqual(value, str(data['changes'][attr]))
+
+ def test_rebuild_when_kernel_not_exists(self):
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_rebuild_proper_kernel_ram(self):
+ instance_meta = {'kernel_id': None, 'ramdisk_id': None}
+
+ orig_get = compute_api.API.get
+
+ def wrap_get(*args, **kwargs):
+ inst = orig_get(*args, **kwargs)
+ instance_meta['instance'] = inst
+ return inst
+
+ def fake_save(context, **kwargs):
+ instance = instance_meta['instance']
+ for key in instance_meta.keys():
+ if key in instance.obj_what_changed():
+ instance_meta[key] = instance[key]
+
+ def return_image_meta(*args, **kwargs):
+ image_meta_table = {
+ '1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
+ '2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6':
+ {'id': 3, 'status': 'active', 'container_format': 'raw',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
+ }
+ image_id = args[2]
+ try:
+ image_meta = image_meta_table[str(image_id)]
+ except KeyError:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ return image_meta
+
+ self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
+ self.stubs.Set(compute_api.API, 'get', wrap_get)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+ body = {
+ "rebuild": {
+ "imageRef": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.controller._action_rebuild(req, FAKE_UUID, body).obj
+ self.assertEqual(instance_meta['kernel_id'], '1')
+ self.assertEqual(instance_meta['ramdisk_id'], '2')
+
+ @mock.patch.object(compute_api.API, 'rebuild')
+ def test_rebuild_instance_raise_auto_disk_config_exc(self, mock_rebuild):
+ body = {
+ "rebuild": {
+ "imageRef": self._image_href,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ mock_rebuild.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ req, FAKE_UUID, body)
+
+ def test_resize_server(self):
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(compute_api.API, 'resize', resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_server_no_flavor(self):
+ body = dict(resize=dict())
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_server_no_flavor_ref(self):
+ body = dict(resize=dict(flavorRef=None))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_server_not_found(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ self.stubs.Set(compute_api.API, 'get', return_server_not_found)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_image_exceptions(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ self.resize_called = 0
+ image_id = 'fake_image_id'
+
+ exceptions = [
+ (exception.ImageNotAuthorized(image_id=image_id),
+ webob.exc.HTTPUnauthorized),
+ (exception.ImageNotFound(image_id=image_id),
+ webob.exc.HTTPBadRequest),
+ (exception.Invalid, webob.exc.HTTPBadRequest),
+ (exception.NoValidHost(reason='Bad host'),
+ webob.exc.HTTPBadRequest),
+ (exception.AutoDiskConfigDisabledByImage(image=image_id),
+ webob.exc.HTTPBadRequest),
+ ]
+
+ raised, expected = map(iter, zip(*exceptions))
+
+ def _fake_resize(obj, context, instance, flavor_id):
+ self.resize_called += 1
+ raise raised.next()
+
+ self.stubs.Set(compute_api.API, 'resize', _fake_resize)
+
+ for call_no in range(len(exceptions)):
+ req = fakes.HTTPRequest.blank(self.url)
+ next_exception = expected.next()
+ actual = self.assertRaises(next_exception,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+ if (isinstance(exceptions[call_no][0],
+ exception.NoValidHost)):
+ self.assertEqual(actual.explanation,
+ 'No valid host was found. Bad host')
+ elif (isinstance(exceptions[call_no][0],
+ exception.AutoDiskConfigDisabledByImage)):
+ self.assertEqual(actual.explanation,
+ 'Requested image fake_image_id has automatic'
+ ' disk resize disabled.')
+ self.assertEqual(self.resize_called, call_no + 1)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.CannotResizeDisk(reason=''))
+ def test_resize_raises_cannot_resize_disk(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.FlavorNotFound(reason='',
+ flavor_id='fake_id'))
+ def test_resize_raises_flavor_not_found(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_with_too_many_instances(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.TooManyInstances(message="TooManyInstance")
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_resize_raises_conflict_on_invalid_state(self):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ def fake_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'resize', fake_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch('nova.compute.api.API.resize',
+ side_effect=exception.NoValidHost(reason=''))
+ def test_resize_raises_no_valid_host(self, mock_resize):
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ @mock.patch.object(compute_api.API, 'resize')
+ def test_resize_instance_raise_auto_disk_config_exc(self, mock_resize):
+ mock_resize.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ body = dict(resize=dict(flavorRef="http://localhost/3"))
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_server(self):
+ body = dict(confirmResize=None)
+
+ self.confirm_resize_called = False
+
+ def cr_mock(*args):
+ self.confirm_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.confirm_resize_called, True)
+
+ def test_confirm_resize_migration_not_found(self):
+ body = dict(confirmResize=None)
+
+ def confirm_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'confirm_resize',
+ confirm_resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_confirm_resize_raises_conflict_on_invalid_state(self):
+ body = dict(confirmResize=None)
+
+ def fake_confirm_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'confirm_resize',
+ fake_confirm_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_confirm_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_migration_not_found(self):
+ body = dict(revertResize=None)
+
+ def revert_resize_mock(*args):
+ raise exception.MigrationNotFoundByStatus(instance_id=1,
+ status='finished')
+
+ self.stubs.Set(compute_api.API,
+ 'revert_resize',
+ revert_resize_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_revert_resize_server_not_found(self):
+ body = dict(revertResize=None)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob. exc.HTTPNotFound,
+ self.controller._action_revert_resize,
+ req, "bad_server_id", body)
+
+ def test_revert_resize_server(self):
+ body = dict(revertResize=None)
+
+ self.revert_resize_called = False
+
+ def revert_mock(*args):
+ self.revert_resize_called = True
+
+ self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ body = self.controller._action_revert_resize(req, FAKE_UUID, body)
+
+ self.assertEqual(self.revert_resize_called, True)
+
+ def test_revert_resize_raises_conflict_on_invalid_state(self):
+ body = dict(revertResize=None)
+
+ def fake_revert_resize(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+
+ self.stubs.Set(compute_api.API, 'revert_resize',
+ fake_revert_resize)
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_revert_resize,
+ req, FAKE_UUID, body)
+
+ def test_create_image(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v2/fake/images/123', location)
+
+ def test_create_image_glance_link_prefix(self):
+ self.flags(osapi_glance_link_prefix='https://glancehost')
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('https://glancehost/v2/fake/images/123', location)
+
+ def test_create_image_name_too_long(self):
+ long_name = 'a' * 260
+ body = {
+ 'createImage': {
+ 'name': long_name,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image, req,
+ FAKE_UUID, body)
+
+ def _do_test_create_volume_backed_image(self, extra_properties):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+
+ if extra_properties:
+ body['createImage']['metadata'] = extra_properties
+
+ image_service = glance.get_default_image_service()
+
+ bdm = [dict(volume_id=_fake_id('a'),
+ volume_size=1,
+ device_name='vda',
+ delete_on_termination=False)]
+ props = dict(kernel_id=_fake_id('b'),
+ ramdisk_id=_fake_id('c'),
+ root_device_name='/dev/vda',
+ block_device_mapping=bdm)
+ original_image = dict(properties=props,
+ container_format='ami',
+ status='active',
+ is_public=True)
+
+ image_service.create(None, original_image)
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref=original_image['id'],
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake')
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ self.mox.ReplayAll()
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost/v2/fake/images/', '')
+ image = image_service.show(None, image_id)
+
+ self.assertEqual(image['name'], 'snapshot_of_volume_backed')
+ properties = image['properties']
+ self.assertEqual(properties['kernel_id'], _fake_id('b'))
+ self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
+ self.assertEqual(properties['root_device_name'], '/dev/vda')
+ self.assertEqual(properties['bdm_v2'], True)
+ bdms = properties['block_device_mapping']
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['boot_index'], 0)
+ self.assertEqual(bdms[0]['source_type'], 'snapshot')
+ self.assertEqual(bdms[0]['destination_type'], 'volume')
+ self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
+ for fld in ('connection_info', 'id',
+ 'instance_uuid', 'device_name'):
+ self.assertNotIn(fld, bdms[0])
+ for k in extra_properties.keys():
+ self.assertEqual(properties[k], extra_properties[k])
+
+ def test_create_volume_backed_image_no_metadata(self):
+ self._do_test_create_volume_backed_image({})
+
+ def test_create_volume_backed_image_with_metadata(self):
+ self._do_test_create_volume_backed_image(dict(ImageType='Gold',
+ ImageVersion='2.0'))
+
+ def _test_create_volume_backed_image_with_metadata_from_volume(
+ self, extra_metadata=None):
+
+ def _fake_id(x):
+ return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
+
+ body = dict(createImage=dict(name='snapshot_of_volume_backed'))
+ if extra_metadata:
+ body['createImage']['metadata'] = extra_metadata
+
+ image_service = glance.get_default_image_service()
+
+ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
+ use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': _fake_id('a'),
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'device_name': 'vda',
+ 'snapshot_id': 1,
+ 'boot_index': 0,
+ 'delete_on_termination': False,
+ 'no_device': None})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
+
+ instance = fakes.fake_instance_get(image_ref='',
+ vm_state=vm_states.ACTIVE,
+ root_device_name='/dev/vda')
+ self.stubs.Set(db, 'instance_get_by_uuid', instance)
+
+ fake_metadata = {'test_key1': 'test_value1',
+ 'test_key2': 'test_value2'}
+ volume = dict(id=_fake_id('a'),
+ size=1,
+ host='fake',
+ display_description='fake',
+ volume_image_metadata=fake_metadata)
+ snapshot = dict(id=_fake_id('d'))
+ self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
+ volume_api = self.controller.compute_api.volume_api
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
+ volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
+
+ req = fakes.HTTPRequest.blank(self.url)
+
+ self.mox.ReplayAll()
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+ location = response.headers['Location']
+ image_id = location.replace('http://localhost/v2/fake/images/', '')
+ image = image_service.show(None, image_id)
+
+ properties = image['properties']
+ self.assertEqual(properties['test_key1'], 'test_value1')
+ self.assertEqual(properties['test_key2'], 'test_value2')
+ if extra_metadata:
+ for key, val in extra_metadata.items():
+ self.assertEqual(properties[key], val)
+
+ def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume()
+
+ def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
+ self._test_create_volume_backed_image_with_metadata_from_volume(
+ extra_metadata={'a': 'b'})
+
+ def test_create_image_snapshots_disabled(self):
+ """Don't permit a snapshot if the allow_instance_snapshots flag is
+ False
+ """
+ self.flags(allow_instance_snapshots=False)
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ response = self.controller._action_create_image(req, FAKE_UUID, body)
+
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v2/fake/images/123', location)
+
+ def test_create_image_with_too_much_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {},
+ },
+ }
+ for num in range(CONF.quota_metadata_items + 1):
+ body['createImage']['metadata']['foo%i' % num] = "bar"
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_blank_name(self):
+ body = {
+ 'createImage': {
+ 'name': '',
+ }
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+ def test_create_image_raises_conflict_on_invalid_state(self):
+ def snapshot(*args, **kwargs):
+ raise exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ self.stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+ body = {
+ "createImage": {
+ "name": "test_snapshot",
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_create_image,
+ req, FAKE_UUID, body)
+
+
+class TestServerActionXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerActionXMLDeserializer, self).setUp()
+ self.deserializer = servers.ActionDeserializer()
+
+ def test_create_image(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_create_image_with_metadata(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createImage>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_change_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass="1234pass"/> """
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "changePassword": {
+ "adminPass": "1234pass",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_change_pass_no_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/> """
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_change_pass_empty_pass(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <changePassword
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ adminPass=""/> """
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "changePassword": {
+ "adminPass": "",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_reboot(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ type="HARD"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "reboot": {
+ "type": "HARD",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_reboot_no_type(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <reboot
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ flavorRef="http://localhost/flavors/3"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "resize": {"flavorRef": "http://localhost/flavors/3"},
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_no_flavor_ref(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <resize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_confirm_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <confirmResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "confirmResize": None,
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_revert_resize(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <revertResize
+ xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "revertResize": None,
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_rebuild(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ imageRef="http://localhost/images/1">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+ </rebuild>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "name": "new-server-test",
+ "imageRef": "http://localhost/images/1",
+ "metadata": {
+ "My Server Name": "Apache1",
+ },
+ "personality": [
+ {"path": "/etc/banner.txt", "contents": "Mg=="},
+ ],
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_rebuild_minimum(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/1",
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_rebuild_no_imageRef(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+ </rebuild>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_rebuild_blank_name(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"
+ name=""/>"""
+ self.assertRaises(AttributeError,
+ self.deserializer.deserialize,
+ serial_request,
+ 'action')
+
+ def test_rebuild_preserve_ephemeral_passed(self):
+ serial_request = """<?xml version="1.0" encoding="UTF-8"?>
+ <rebuild
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ imageRef="http://localhost/images/1"
+ preserve_ephemeral="true"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/1",
+ "preserve_ephemeral": True,
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
new file mode 100644
index 0000000000..ba9126f0f1
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -0,0 +1,771 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+import webob
+
+from nova.api.openstack.compute.plugins.v3 import server_metadata \
+ as server_metadata_v21
+from nova.api.openstack.compute import server_metadata as server_metadata_v2
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import vm_states
+import nova.db
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+
+
+CONF = cfg.CONF
+
+
+def return_create_instance_metadata_max(context, server_id, metadata, delete):
+ return stub_max_server_metadata()
+
+
+def return_create_instance_metadata(context, server_id, metadata, delete):
+ return stub_server_metadata()
+
+
+def fake_instance_save(inst, **kwargs):
+ inst.metadata = stub_server_metadata()
+ inst.obj_reset_changes()
+
+
+def return_server_metadata(context, server_id):
+ if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
+ msg = 'id %s must be a uuid in return server metadata' % server_id
+ raise Exception(msg)
+ return stub_server_metadata()
+
+
+def return_empty_server_metadata(context, server_id):
+ return {}
+
+
+def delete_server_metadata(context, server_id, key):
+ pass
+
+
+def stub_server_metadata():
+ metadata = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ }
+ return metadata
+
+
+def stub_max_server_metadata():
+ metadata = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items):
+ metadata['metadata']['key%i' % num] = "blah"
+ return metadata
+
+
+def return_server(context, server_id, columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE})
+
+
+def return_server_by_uuid(context, server_uuid,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'metadata': stub_server_metadata(),
+ 'vm_state': vm_states.ACTIVE})
+
+
+def return_server_nonexistent(context, server_id,
+ columns_to_join=None, use_slave=False):
+ raise exception.InstanceNotFound(instance_id=server_id)
+
+
+def fake_change_instance_metadata(self, context, instance, diff):
+ pass
+
+
+class ServerMetaDataTestV21(test.TestCase):
+ validation_ex = exception.ValidationError
+ validation_ex_large = validation_ex
+
+ def setUp(self):
+ super(ServerMetaDataTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
+
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+ self._set_up_resources()
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v21.ServerMetadataController()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequestV3.blank(self.url + param_url)
+
+ def test_index(self):
+ req = self._get_request()
+ res_dict = self.controller.index(req, self.uuid)
+
+ expected = {
+ 'metadata': {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ 'key3': 'value3',
+ },
+ }
+ self.assertEqual(expected, res_dict)
+
+ def test_index_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_nonexistent)
+ req = self._get_request()
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.index, req, self.url)
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request()
+ res_dict = self.controller.index(req, self.uuid)
+ expected = {'metadata': {}}
+ self.assertEqual(expected, res_dict)
+
+ def test_show(self):
+ req = self._get_request('/key2')
+ res_dict = self.controller.show(req, self.uuid, 'key2')
+ expected = {"meta": {'key2': 'value2'}}
+ self.assertEqual(expected, res_dict)
+
+ def test_show_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_nonexistent)
+ req = self._get_request('/key2')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, self.uuid, 'key2')
+
+ def test_show_meta_not_found(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request('/key6')
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.show, req, self.uuid, 'key6')
+
+ def test_delete(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+ self.stubs.Set(nova.db, 'instance_metadata_delete',
+ delete_server_metadata)
+ req = self._get_request('/key2')
+ req.method = 'DELETE'
+ res = self.controller.delete(req, self.uuid, 'key2')
+
+ self.assertIsNone(res)
+
+ def test_delete_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request('/key1')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, self.uuid, 'key1')
+
+ def test_delete_meta_not_found(self):
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = self._get_request('/key6')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.delete, req, self.uuid, 'key6')
+
+ def test_create(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = "application/json"
+ body = {"metadata": {"key9": "value9"}}
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.create(req, self.uuid, body=body)
+
+ body['metadata'].update({
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ })
+ self.assertEqual(body, res_dict)
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=None)
+
+ def test_create_item_empty_key(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_item_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_item_key_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"metadata": {("a" * 260): "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create,
+ req, self.uuid, body=body)
+
+ def test_create_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ body = {"meta": {}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ body = {"metadata": ['asdf']}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_create_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request()
+ req.method = 'POST'
+ body = {"metadata": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, self.uuid, body=body)
+
+ def test_update_metadata(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ response = self.controller.update_all(req, self.uuid, body=expected)
+ self.assertEqual(expected, response)
+
+ def test_update_all(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {
+ 'metadata': {
+ 'key10': 'value10',
+ 'key99': 'value99',
+ },
+ }
+ req.body = jsonutils.dumps(expected)
+ res_dict = self.controller.update_all(req, self.uuid, body=expected)
+
+ self.assertEqual(expected, res_dict)
+
+ def test_update_all_empty_container(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'metadata': {}}
+ req.body = jsonutils.dumps(expected)
+ res_dict = self.controller.update_all(req, self.uuid, body=expected)
+
+ self.assertEqual(expected, res_dict)
+
+ def test_update_all_empty_body_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=None)
+
+ def test_update_all_with_non_dict_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url + '/bad')
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=body)
+
+ def test_update_all_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'meta': {}}
+ req.body = jsonutils.dumps(expected)
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=expected)
+
+ def test_update_all_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ expected = {'metadata': ['asdf']}
+ req.body = jsonutils.dumps(expected)
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=expected)
+
+ def test_update_all_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ req = self._get_request()
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ body = {'metadata': {'key10': 'value10'}}
+ req.body = jsonutils.dumps(body)
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update_all, req, '100', body=body)
+
+ def test_update_all_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'PUT'
+ body = {"metadata": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex, self.controller.update_all,
+ req, self.uuid, body=body)
+
+ def test_update_item(self):
+ self.stubs.Set(objects.Instance, 'save', fake_instance_save)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res_dict = self.controller.update(req, self.uuid, 'key1', body=body)
+ expected = {"meta": {'key1': 'value1'}}
+ self.assertEqual(expected, res_dict)
+
+ def test_update_item_nonexistent_server(self):
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_nonexistent)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.update, req, self.uuid, 'key1',
+ body=body)
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=None)
+
+ def test_update_malformed_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'meta': {}}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=expected)
+
+ def test_update_malformed_data(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'metadata': ['asdf']}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=expected)
+
+ def test_update_item_empty_key(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, '',
+ body=body)
+
+ def test_update_item_key_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {("a" * 260): "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update,
+ req, self.uuid, ("a" * 260), body=body)
+
+ def test_update_item_value_too_long(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": ("a" * 260)}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update,
+ req, self.uuid, "key1", body=body)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/key1')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1", "key2": "value2"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'key1',
+ body=body)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/bad')
+ req.method = 'PUT'
+ body = {"meta": {"key1": "value1"}}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, self.uuid, 'bad',
+ body=body)
+
+ def test_update_item_non_dict(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request('/bad')
+ req.method = 'PUT'
+ body = {"meta": None}
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'bad',
+ body=body)
+
+ def test_update_empty_container(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = fakes.HTTPRequest.blank(self.url)
+ req.method = 'PUT'
+ expected = {'metadata': {}}
+ req.body = jsonutils.dumps(expected)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(self.validation_ex,
+ self.controller.update, req, self.uuid, 'bad',
+ body=expected)
+
+ def test_too_many_metadata_items_on_create(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'POST'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, req, self.uuid, body=data)
+
+ def test_invalid_metadata_items_on_create(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+
+ # test for long key
+ data = {"metadata": {"a" * 260: "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create, req, self.uuid, body=data)
+
+ # test for long value
+ data = {"metadata": {"key": "v" * 260}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.create, req, self.uuid, body=data)
+
+ # test for empty key.
+ data = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex,
+ self.controller.create, req, self.uuid, body=data)
+
+ def test_too_many_metadata_items_on_update_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'PUT'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.update_all,
+ req, self.uuid, body=data)
+
+ def test_invalid_metadata_items_on_update_item(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(CONF.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ req = self._get_request()
+ req.method = 'PUT'
+ req.body = jsonutils.dumps(data)
+ req.headers["content-type"] = "application/json"
+
+ # test for long key
+ data = {"metadata": {"a" * 260: "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+ # test for long value
+ data = {"metadata": {"key": "v" * 260}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex_large,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+ # test for empty key.
+ data = {"metadata": {"": "value1"}}
+ req.body = jsonutils.dumps(data)
+ self.assertRaises(self.validation_ex,
+ self.controller.update_all, req, self.uuid,
+ body=data)
+
+
+class ServerMetaDataTestV2(ServerMetaDataTestV21):
+ validation_ex = webob.exc.HTTPBadRequest
+ validation_ex_large = webob.exc.HTTPRequestEntityTooLarge
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v2.Controller()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequest.blank(self.url + param_url)
+
+
+class BadStateServerMetaDataTestV21(test.TestCase):
+
+ def setUp(self):
+ super(BadStateServerMetaDataTestV21, self).setUp()
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db, 'instance_metadata_get',
+ return_server_metadata)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+ self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ self._return_server_in_build_by_uuid)
+ self.stubs.Set(nova.db, 'instance_metadata_delete',
+ delete_server_metadata)
+ self._set_up_resources()
+
+ def _set_up_resources(self):
+ self.controller = server_metadata_v21.ServerMetadataController()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequestV3.blank(self.url + param_url)
+
+ def test_invalid_state_on_delete(self):
+ req = self._get_request('/key2')
+ req.method = 'DELETE'
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, self.uuid, 'key2')
+
+ def test_invalid_state_on_update_metadata(self):
+ self.stubs.Set(nova.db, 'instance_metadata_update',
+ return_create_instance_metadata)
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'key1': 'updatedvalue',
+ 'key29': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
+ req, self.uuid, body=expected)
+
+ def _return_server_in_build(self, context, server_id,
+ columns_to_join=None):
+ return fake_instance.fake_db_instance(
+ **{'id': server_id,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING})
+
+ def _return_server_in_build_by_uuid(self, context, server_uuid,
+ columns_to_join=None, use_slave=False):
+ return fake_instance.fake_db_instance(
+ **{'id': 1,
+ 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
+ 'name': 'fake',
+ 'locked': False,
+ 'vm_state': vm_states.BUILDING})
+
+ @mock.patch.object(nova.compute.api.API, 'update_instance_metadata',
+ side_effect=exception.InstanceIsLocked(instance_uuid=0))
+ def test_instance_lock_update_metadata(self, mock_update):
+ req = self._get_request()
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ expected = {
+ 'metadata': {
+ 'keydummy': 'newkey',
+ }
+ }
+ req.body = jsonutils.dumps(expected)
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.update_all,
+ req, self.uuid, body=expected)
+
+
+class BadStateServerMetaDataTestV2(BadStateServerMetaDataTestV21):
+ def _set_up_resources(self):
+ self.controller = server_metadata_v2.Controller()
+ self.uuid = str(uuid.uuid4())
+ self.url = '/v1.1/fake/servers/%s/metadata' % self.uuid
+
+ def _get_request(self, param_url=''):
+ return fakes.HTTPRequest.blank(self.url + param_url)
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
new file mode 100644
index 0000000000..c37df741ec
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -0,0 +1,4625 @@
+# Copyright 2010-2011 OpenStack Foundation
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import contextlib
+import datetime
+import urllib
+import uuid
+
+import iso8601
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+import six.moves.urllib.parse as urlparse
+import testtools
+import webob
+
+from nova.api.openstack import compute
+from nova.api.openstack.compute import ips
+from nova.api.openstack.compute import servers
+from nova.api.openstack.compute import views
+from nova.api.openstack import extensions
+from nova.api.openstack import xmlutil
+from nova.compute import api as compute_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import manager
+from nova.network.neutronv2 import api as neutron_api
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_keypair
+from nova.tests.unit import utils
+from nova import utils as nova_utils
+
+CONF = cfg.CONF
+CONF.import_opt('password_length', 'nova.utils')
+
+FAKE_UUID = fakes.FAKE_UUID
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+XPATH_NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/compute/api/v1.1'
+}
+
+INSTANCE_IDS = {FAKE_UUID: 1}
+
+FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_servers_empty(context, *args, **kwargs):
+ return []
+
+
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+ update_cells=True,
+ columns_to_join=None,
+ ):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return (inst, inst)
+
+
+def instance_update(context, instance_uuid, values, update_cells=True):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ inst = dict(inst, **values)
+ return inst
+
+
+def fake_compute_api(cls, req, id):
+ return True
+
+
+class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+
+class Base64ValidationTest(test.TestCase):
+ def setUp(self):
+ super(Base64ValidationTest, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def test_decode_base64(self):
+ value = "A random string"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_binary(self):
+ value = "\x00\x12\x75\x99"
+ result = self.controller._decode_base64(base64.b64encode(value))
+ self.assertEqual(result, value)
+
+ def test_decode_base64_whitespace(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = "\n \n%s\t%s\n" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertEqual(result, value)
+
+ def test_decode_base64_invalid(self):
+ invalid = "A random string"
+ result = self.controller._decode_base64(invalid)
+ self.assertIsNone(result)
+
+ def test_decode_base64_illegal_bytes(self):
+ value = "A random string"
+ encoded = base64.b64encode(value)
+ white = ">\x01%s*%s()" % (encoded[:2], encoded[2:])
+ result = self.controller._decode_base64(white)
+ self.assertIsNone(result)
+
+
+class NeutronV2Subclass(neutron_api.API):
+ """Used to ensure that API handles subclasses properly."""
+ pass
+
+
+class ControllerTest(test.TestCase):
+
+ def setUp(self):
+ super(ControllerTest, self).setUp()
+ self.flags(verbose=True, use_ipv6=False)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ return_server = fakes.fake_instance_get()
+ return_servers = fakes.fake_instance_get_all_by_filters()
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_server)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ instance_update_and_get_original)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+ self.ips_controller = ips.Controller()
+ policy.reset()
+ policy.init()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+
+class ServersControllerTest(ControllerTest):
+ def test_can_check_loaded_extensions(self):
+ self.ext_mgr.extensions = {'os-fake': None}
+ self.assertTrue(self.controller.ext_mgr.is_loaded('os-fake'))
+ self.assertFalse(self.controller.ext_mgr.is_loaded('os-not-loaded'))
+
+ def test_requested_networks_prefix(self):
+ uuid = 'br-00000000-0000-0000-0000-000000000000'
+ requested_networks = [{'uuid': uuid}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertIn((uuid, None), res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(network, None, None, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_enabled_with_network_and_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_disabled_with_port(self):
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ self.assertRaises(
+ webob.exc.HTTPBadRequest,
+ self.controller._get_requested_networks,
+ requested_networks)
+
+ def test_requested_networks_api_enabled_with_v2_subclass(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_requested_networks_neutronv2_subclass_with_port(self):
+ cls = ('nova.tests.unit.api.openstack.compute' +
+ '.test_servers.NeutronV2Subclass')
+ self.flags(network_api_class=cls)
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port}]
+ res = self.controller._get_requested_networks(requested_networks)
+ self.assertEqual([(None, None, port, None)], res.as_tuples())
+
+ def test_get_server_by_uuid(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ res_dict = self.controller.show(req, FAKE_UUID)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+
+ def test_unique_host_id(self):
+ """Create two servers with the same host and different
+ project_ids and check that the hostId's are unique.
+ """
+ def return_instance_with_host(self, *args, **kwargs):
+ project_id = str(uuid.uuid4())
+ return fakes.stub_instance(id=1, uuid=FAKE_UUID,
+ project_id=project_id,
+ host='fake_host')
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ return_instance_with_host)
+ self.stubs.Set(db, 'instance_get',
+ return_instance_with_host)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ server1 = self.controller.show(req, FAKE_UUID)
+ server2 = self.controller.show(req, FAKE_UUID)
+
+ self.assertNotEqual(server1['server']['hostId'],
+ server2['server']['hostId'])
+
+ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
+ status="ACTIVE", progress=100):
+ return {
+ "server": {
+ "id": uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": progress,
+ "name": "server1",
+ "status": status,
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "hostId": '',
+ "image": {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100'},
+ {'version': 6, 'addr': '2001:db8:0:1::1'}
+ ]
+ },
+ "metadata": {
+ "seq": "1",
+ },
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/servers/%s" % uuid,
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/servers/%s" % uuid,
+ },
+ ],
+ }
+ }
+
+ def test_get_server_by_id(self):
+ self.flags(use_ipv6=True)
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status="BUILD",
+ progress=0)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_active_status_by_id(self):
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_with_id_image_ref_by_id(self):
+ image_ref = "10"
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_id = "1"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ new_return_server = fakes.fake_instance_get(
+ vm_state=vm_states.ACTIVE, image_ref=image_ref,
+ flavor_id=flavor_id, progress=100)
+ self.stubs.Set(db, 'instance_get_by_uuid', new_return_server)
+
+ uuid = FAKE_UUID
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+
+ def test_get_server_addresses_from_cache(self):
+ pub0 = ('172.19.0.1', '172.19.0.2',)
+ pub1 = ('1.2.3.4',)
+ pub2 = ('b33f::fdee:ddff:fecc:bbaa',)
+ priv0 = ('192.168.0.3', '192.168.0.4',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'public',
+ 'subnets': [{'cidr': '172.19.0.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': '1.2.3.0/16',
+ 'ips': [_ip(ip) for ip in pub1]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub2]}]}},
+ {'address': 'bb:bb:bb:bb:bb:bb',
+ 'id': 2,
+ 'network': {'bridge': 'br1',
+ 'id': 2,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip(ip) for ip in priv0]}]}}]
+
+ return_server = fakes.fake_instance_get(nw_cache=nw_cache)
+ self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % FAKE_UUID)
+ res_dict = self.ips_controller.index(req, FAKE_UUID)
+
+ expected = {
+ 'addresses': {
+ 'private': [
+ {'version': 4, 'addr': '192.168.0.3'},
+ {'version': 4, 'addr': '192.168.0.4'},
+ ],
+ 'public': [
+ {'version': 4, 'addr': '172.19.0.1'},
+ {'version': 4, 'addr': '172.19.0.2'},
+ {'version': 4, 'addr': '1.2.3.4'},
+ {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+ ],
+ },
+ }
+ self.assertThat(res_dict, matchers.DictMatches(expected))
+
+ def test_get_server_addresses_nonexistent_network(self):
+ url = '/fake/servers/%s/ips/network_0' % FAKE_UUID
+ req = fakes.HTTPRequest.blank(url)
+ self.assertRaises(webob.exc.HTTPNotFound, self.ips_controller.show,
+ req, FAKE_UUID, 'network_0')
+
+ def test_get_server_addresses_nonexistent_server(self):
+ def fake_instance_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+
+ server_id = str(uuid.uuid4())
+ req = fakes.HTTPRequest.blank('/fake/servers/%s/ips' % server_id)
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.ips_controller.index, req, server_id)
+
+ def test_get_server_list_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ res_dict = self.controller.index(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_list_with_reservation_id(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?reservation_id=foo')
+ res_dict = self.controller.index(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_empty(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list_with_reservation_id_details(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?'
+ 'reservation_id=foo')
+ res_dict = self.controller.detail(req)
+
+ i = 0
+ for s in res_dict['servers']:
+ self.assertEqual(s.get('name'), 'server%d' % (i + 1))
+ i += 1
+
+ def test_get_server_list(self):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ res_dict = self.controller.index(req)
+
+ self.assertEqual(len(res_dict['servers']), 5)
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertIsNone(s.get('image', None))
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/fake/servers/%s" % s['id'],
+ },
+ {
+ "rel": "bookmark",
+ "href": "http://localhost/fake/servers/%s" % s['id'],
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=3')
+ res_dict = self.controller.index(req)
+
+ servers = res_dict['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res_dict['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected_params = {'limit': ['3'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected_params))
+
+ def test_get_servers_with_limit_bad_value(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_server_details_empty(self):
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_empty)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ num_servers = len(res_dict['servers'])
+ self.assertEqual(0, num_servers)
+
+ def test_get_server_details_with_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=3')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_server_details_with_limit_bad_value(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?limit=aaa')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.detail, req)
+
+ def test_get_server_details_with_limit_and_other_params(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail'
+ '?limit=3&blah=2:t')
+ res = self.controller.detail(req)
+
+ servers = res['servers']
+ self.assertEqual([s['id'] for s in servers],
+ [fakes.get_fake_uuid(i) for i in xrange(len(servers))])
+
+ servers_links = res['servers_links']
+ self.assertEqual(servers_links[0]['rel'], 'next')
+
+ href_parts = urlparse.urlparse(servers_links[0]['href'])
+ self.assertEqual('/v2/fake/servers/detail', href_parts.path)
+ params = urlparse.parse_qs(href_parts.query)
+ expected = {'limit': ['3'], 'blah': ['2:t'],
+ 'marker': [fakes.get_fake_uuid(2)]}
+ self.assertThat(params, matchers.DictMatches(expected))
+
+ def test_get_servers_with_too_big_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=30')
+ res_dict = self.controller.index(req)
+ self.assertNotIn('servers_links', res_dict)
+
+ def test_get_servers_with_bad_limit(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_marker(self):
+ url = '/v2/fake/servers?marker=%s' % fakes.get_fake_uuid(2)
+ req = fakes.HTTPRequest.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ["server4", "server5"])
+
+ def test_get_servers_with_limit_and_marker(self):
+ url = '/v2/fake/servers?limit=2&marker=%s' % fakes.get_fake_uuid(1)
+ req = fakes.HTTPRequest.blank(url)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual([s['name'] for s in servers], ['server3', 'server4'])
+
+ def test_get_servers_with_bad_marker(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?limit=2&marker=asdf')
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_get_servers_with_bad_option(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?unknownoption=whee')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_image(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('image', search_opts)
+ self.assertEqual(search_opts['image'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?image=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_tenant_id_filter_converts_to_project_id_for_admin(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'newfake')
+ self.assertFalse(filters.get('tenant_id'))
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers'
+ '?all_tenants=1&tenant_id=newfake',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_normal(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_one(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_zero(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=0',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_false(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=false',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_param_invalid(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertNotIn('all_tenants', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=xxx',
+ use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.index, req)
+
+ def test_admin_restricted_tenant(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertEqual(filters['project_id'], 'fake')
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers',
+ use_admin_context=True)
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_pass_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None, use_slave=False):
+ self.assertIsNotNone(filters)
+ self.assertNotIn('project_id', filters)
+ return [fakes.stub_instance(100)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
+ res = self.controller.index(req)
+
+ self.assertIn('servers', res)
+
+ def test_all_tenants_fail_policy(self):
+ def fake_get_all(context, filters=None, sort_key=None,
+ sort_dir='desc', limit=None, marker=None,
+ columns_to_join=None):
+ self.assertIsNotNone(filters)
+ return [fakes.stub_instance(100)]
+
+ rules = {
+ "compute:get_all_tenants":
+ common_policy.parse_rule("project_id:non_fake"),
+ "compute:get_all":
+ common_policy.parse_rule("project_id:fake"),
+ }
+
+ policy.set_rules(rules)
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?all_tenants=1')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
+ def test_get_servers_allows_flavor(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('flavor', search_opts)
+ # flavor is an integer ID
+ self.assertEqual(search_opts['flavor'], '12345')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?flavor=12345')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_with_bad_flavor(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?flavor=abcde')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 0)
+
+ def test_get_server_details_with_bad_flavor(self):
+ req = fakes.HTTPRequest.blank('/fake/servers/detail?flavor=abcde')
+ servers = self.controller.detail(req)['servers']
+
+ self.assertThat(servers, testtools.matchers.HasLength(0))
+
+ def test_get_servers_allows_status(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], [vm_states.ACTIVE])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=active')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_allows_multi_status(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=error')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(deleted=False,
+ vm_state=[vm_states.ACTIVE,
+ vm_states.ERROR],
+ project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_system_metadata_filter(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ expected_system_metadata = u'{"some_value": "some_key"}'
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=error&system_metadata=' +
+ urllib.quote(expected_system_metadata),
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(
+ deleted=False, vm_state=[vm_states.ACTIVE, vm_states.ERROR],
+ system_metadata=expected_system_metadata, project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_flavor_not_found(self, get_all_mock):
+ get_all_mock.side_effect = exception.FlavorNotFound(flavor_id=1)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&flavor=abc')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(0, len(servers))
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def test_get_servers_allows_invalid_status(self, get_all_mock):
+ server_uuid0 = str(uuid.uuid4())
+ server_uuid1 = str(uuid.uuid4())
+ db_list = [fakes.stub_instance(100, uuid=server_uuid0),
+ fakes.stub_instance(101, uuid=server_uuid1)]
+ get_all_mock.return_value = instance_obj._make_instance_list(
+ context, instance_obj.InstanceList(), db_list, FIELDS)
+
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers?status=active&status=invalid')
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(2, len(servers))
+ self.assertEqual(server_uuid0, servers[0]['id'])
+ self.assertEqual(server_uuid1, servers[1]['id'])
+ expected_search_opts = dict(deleted=False,
+ vm_state=[vm_states.ACTIVE],
+ project_id='fake')
+ get_all_mock.assert_called_once_with(mock.ANY,
+ search_opts=expected_search_opts, limit=mock.ANY,
+ marker=mock.ANY, want_objects=mock.ANY)
+
+ def test_get_servers_allows_task_status(self):
+ server_uuid = str(uuid.uuid4())
+ task_state = task_states.REBOOTING
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('task_state', search_opts)
+ self.assertEqual([task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING],
+ search_opts['task_state'])
+ db_list = [fakes.stub_instance(100, uuid=server_uuid,
+ task_state=task_state)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/servers?status=reboot')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_resize_status(self):
+ # Test when resize status, it maps list of vm states.
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'],
+ [vm_states.ACTIVE, vm_states.STOPPED])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=resize')
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_invalid_status(self):
+ # Test getting servers by invalid status.
+ req = fakes.HTTPRequest.blank('/fake/servers?status=baloney',
+ use_admin_context=False)
+ servers = self.controller.index(req)['servers']
+ self.assertEqual(len(servers), 0)
+
+ def test_get_servers_deleted_status_as_user(self):
+ req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
+ use_admin_context=False)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.detail, req)
+
+ def test_get_servers_deleted_status_as_admin(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIn('vm_state', search_opts)
+ self.assertEqual(search_opts['vm_state'], ['deleted'])
+
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?status=deleted',
+ use_admin_context=True)
+
+ servers = self.controller.detail(req)['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_name(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('name', search_opts)
+ self.assertEqual(search_opts['name'], 'whee.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?name=whee.*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since(self):
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('changes-since', search_opts)
+ changes_since = datetime.datetime(2011, 1, 24, 17, 8, 1,
+ tzinfo=iso8601.iso8601.UTC)
+ self.assertEqual(search_opts['changes-since'], changes_since)
+ self.assertNotIn('deleted', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ params = 'changes-since=2011-01-24T17:08:01Z'
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_changes_since_bad_value(self):
+ params = 'changes-since=asdf'
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % params)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
+
+ def test_get_servers_admin_filters_as_user(self):
+ """Test getting servers by admin-only or unknown options when
+ context is not admin. Make sure the admin and unknown options
+ are stripped before they get to compute_api.get_all()
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ self.assertIn('ip', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertNotIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str)
+ res = self.controller.index(req)
+
+ servers = res['servers']
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_options_as_admin(self):
+ """Test getting servers by admin-only or unknown options when
+ context is admin. All options should be passed
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ # Allowed by user
+ self.assertIn('name', search_opts)
+ # OSAPI converts status to vm_state
+ self.assertIn('vm_state', search_opts)
+ # Allowed only by admins with admin API on
+ self.assertIn('ip', search_opts)
+ self.assertIn('unknown_option', search_opts)
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ query_str = "name=foo&ip=10.*&status=active&unknown_option=meow"
+ req = fakes.HTTPRequest.blank('/fake/servers?%s' % query_str,
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_allows_ip(self):
+ """Test getting servers by ip."""
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip', search_opts)
+ self.assertEqual(search_opts['ip'], '10\..*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?ip=10\..*')
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_servers_admin_allows_ip6(self):
+ """Test getting servers by ip6 with admin_api enabled and
+ admin context
+ """
+ server_uuid = str(uuid.uuid4())
+
+ def fake_get_all(compute_self, context, search_opts=None,
+ sort_key=None, sort_dir='desc',
+ limit=None, marker=None, want_objects=False):
+ self.assertIsNotNone(search_opts)
+ self.assertIn('ip6', search_opts)
+ self.assertEqual(search_opts['ip6'], 'ffff.*')
+ db_list = [fakes.stub_instance(100, uuid=server_uuid)]
+ return instance_obj._make_instance_list(
+ context, objects.InstanceList(), db_list, FIELDS)
+
+ self.stubs.Set(compute_api.API, 'get_all', fake_get_all)
+
+ req = fakes.HTTPRequest.blank('/fake/servers?ip6=ffff.*',
+ use_admin_context=True)
+ servers = self.controller.index(req)['servers']
+
+ self.assertEqual(len(servers), 1)
+ self.assertEqual(servers[0]['id'], server_uuid)
+
+ def test_get_all_server_details(self):
+ expected_flavor = {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/fake/flavors/1',
+ },
+ ],
+ }
+ expected_image = {
+ "id": "10",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": 'http://localhost/fake/images/10',
+ },
+ ],
+ }
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+ self.assertEqual(s['image'], expected_image)
+ self.assertEqual(s['flavor'], expected_flavor)
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], str(i + 1))
+
+ def test_get_all_server_details_with_host(self):
+ """We want to make sure that if two instances are on the same host,
+ then they return the same hostId. If two instances are on different
+ hosts, they should return different hostId's. In this test, there
+ are 5 instances - 2 on one host and 3 on another.
+ """
+
+ def return_servers_with_host(context, *args, **kwargs):
+ return [fakes.stub_instance(i + 1, 'fake', 'fake', host=i % 2,
+ uuid=fakes.get_fake_uuid(i))
+ for i in xrange(5)]
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ return_servers_with_host)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/detail')
+ res_dict = self.controller.detail(req)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(server_list):
+ self.assertEqual(s['id'], fakes.get_fake_uuid(i))
+ self.assertEqual(s['hostId'], host_ids[i % 2])
+ self.assertEqual(s['name'], 'server%d' % (i + 1))
+
+
+class ServersControllerUpdateTest(ControllerTest):
+
+ def _get_request(self, body=None, content_type='json', options=None):
+ if options:
+ self.stubs.Set(db, 'instance_get',
+ fakes.fake_instance_get(**options))
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = 'application/%s' % content_type
+ req.body = jsonutils.dumps(body)
+ return req
+
+ def test_update_server_all_attributes(self):
+ body = {'server': {
+ 'name': 'server_test',
+ 'accessIPv4': '0.0.0.0',
+ 'accessIPv6': 'beef::0123',
+ }}
+ req = self._get_request(body, {'name': 'server_test',
+ 'access_ipv4': '0.0.0.0',
+ 'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+ self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
+ self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
+
+ def test_update_server_invalid_xml_raises_lookup(self):
+ body = """<?xml version="1.0" encoding="TF-8"?>
+ <metadata
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key="Label"></meta>"""
+ req = self._get_request(body, content_type='xml')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_invalid_xml_raises_expat(self):
+ body = """<?xml version="1.0" encoding="UTF-8"?>
+ <metadata
+ xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key="Label"></meta>"""
+ req = self._get_request(body, content_type='xml')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_name(self):
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_name_too_long(self):
+ body = {'server': {'name': 'x' * 256}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_name_all_blank_spaces(self):
+ body = {'server': {'name': ' ' * 64}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv4(self):
+ body = {'server': {'accessIPv4': '0.0.0.0'}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '0.0.0.0')
+
+ def test_update_server_access_ipv4_bad_format(self):
+ body = {'server': {'accessIPv4': 'bad_format'}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv4_none(self):
+ body = {'server': {'accessIPv4': None}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '')
+
+ def test_update_server_access_ipv4_blank(self):
+ body = {'server': {'accessIPv4': ''}}
+ req = self._get_request(body, {'access_ipv4': '0.0.0.0'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv4'], '')
+
+ def test_update_server_access_ipv6(self):
+ body = {'server': {'accessIPv6': 'beef::0123'}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], 'beef::123')
+
+ def test_update_server_access_ipv6_bad_format(self):
+ body = {'server': {'accessIPv6': 'bad_format'}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_access_ipv6_none(self):
+ body = {'server': {'accessIPv6': None}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], '')
+
+ def test_update_server_access_ipv6_blank(self):
+ body = {'server': {'accessIPv6': ''}}
+ req = self._get_request(body, {'access_ipv6': 'beef::0123'})
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['accessIPv6'], '')
+
+ def test_update_server_personality(self):
+ body = {
+ 'server': {
+ 'personality': []
+ }
+ }
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, FAKE_UUID, body)
+
+ def test_update_server_adminPass_ignored(self):
+ inst_dict = dict(name='server_test', adminPass='bacon')
+ body = dict(server=inst_dict)
+
+ def server_update(context, id, params):
+ filtered_dict = {
+ 'display_name': 'server_test',
+ }
+ self.assertEqual(params, filtered_dict)
+ filtered_dict['uuid'] = id
+ return filtered_dict
+
+ self.stubs.Set(db, 'instance_update', server_update)
+ # FIXME (comstud)
+ # self.stubs.Set(db, 'instance_get',
+ # return_server_with_attributes(name='server_test'))
+
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = jsonutils.dumps(body)
+ res_dict = self.controller.update(req, FAKE_UUID, body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+
+ def test_update_server_not_found(self):
+ def fake_get(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_not_found_on_update(self):
+ def fake_update(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body)
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
+ req, FAKE_UUID, body)
+
+ def test_update_server_policy_fail(self):
+ rule = {'compute:update': common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ body = {'server': {'name': 'server_test'}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.update, req, FAKE_UUID, body)
+
+
+class ServersControllerDeleteTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerDeleteTest, self).setUp()
+ self.server_delete_called = False
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ def _create_delete_request(self, uuid):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
+ req.method = 'DELETE'
+ return req
+
+ def _delete_server_instance(self, uuid=FAKE_UUID):
+ req = self._create_delete_request(uuid)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.controller.delete(req, uuid)
+
+ def test_delete_server_instance(self):
+ self._delete_server_instance()
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_not_found(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self._delete_server_instance,
+ uuid='non-existent-uuid')
+
+ def test_delete_locked_server(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(compute_api.API, delete_types.SOFT_DELETE,
+ fakes.fake_actions_to_locked_server)
+ self.stubs.Set(compute_api.API, delete_types.DELETE,
+ fakes.fake_actions_to_locked_server)
+
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.delete,
+ req, FAKE_UUID)
+
+ def test_delete_server_instance_while_building(self):
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
+ request = self._create_delete_request(FAKE_UUID)
+ self.controller.delete(request, FAKE_UUID)
+
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_while_deleting_host_up(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING,
+ host='fake_host'))
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ @classmethod
+ def fake_get_by_compute_host(cls, context, host):
+ return {'updated_at': timeutils.utcnow()}
+ self.stubs.Set(objects.Service, 'get_by_compute_host',
+ fake_get_by_compute_host)
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete request can be ignored, because it's been accepted and
+ # forwarded to the compute service already.
+ self.assertFalse(self.server_delete_called)
+
+ def test_delete_server_instance_while_deleting_host_down(self):
+ fake_network.stub_out_network_cleanup(self.stubs)
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING,
+ host='fake_host'))
+ self.stubs.Set(objects.Instance, 'save',
+ lambda *args, **kwargs: None)
+
+ @classmethod
+ def fake_get_by_compute_host(cls, context, host):
+ return {'updated_at': datetime.datetime.min}
+ self.stubs.Set(objects.Service, 'get_by_compute_host',
+ fake_get_by_compute_host)
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete request would be ignored, because it's been accepted before
+ # but since the host is down, api should remove the instance anyway.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_while_resize(self):
+ req = self._create_delete_request(FAKE_UUID)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ task_state=task_states.RESIZE_PREP))
+
+ self.controller.delete(req, FAKE_UUID)
+ # Delete shoud be allowed in any case, even during resizing,
+ # because it may get stuck.
+ self.assertTrue(self.server_delete_called)
+
+ def test_delete_server_instance_if_not_launched(self):
+ self.flags(reclaim_instance_interval=3600)
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'DELETE'
+
+ self.server_delete_called = False
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(launched_at=None))
+
+ def instance_destroy_mock(*args, **kwargs):
+ self.server_delete_called = True
+ deleted_at = timeutils.utcnow()
+ return fake_instance.fake_db_instance(deleted_at=deleted_at)
+ self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+
+ self.controller.delete(req, FAKE_UUID)
+ # delete() should be called for instance which has never been active,
+ # even if reclaim_instance_interval has been set.
+ self.assertEqual(self.server_delete_called, True)
+
+
+class ServersControllerRebuildInstanceTest(ControllerTest):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ image_href = 'http://localhost/v2/fake/images/%s' % image_uuid
+
+ def setUp(self):
+ super(ServersControllerRebuildInstanceTest, self).setUp()
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
+ self.body = {
+ 'rebuild': {
+ 'name': 'new_name',
+ 'imageRef': self.image_href,
+ 'metadata': {
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+ self.req = fakes.HTTPRequest.blank('/fake/servers/a/action')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def test_rebuild_instance_with_access_ipv4_bad_format(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ self.body['rebuild']['accessIPv4'] = 'bad_format'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata']['hello'] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_blank_metadata_key(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata'][''] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_metadata_key_too_long(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata'][('a' * 260)] = 'world'
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_metadata_value_too_long(self):
+ self.body['rebuild']['accessIPv4'] = '0.0.0.0'
+ self.body['rebuild']['accessIPv6'] = 'fead::1234'
+ self.body['rebuild']['metadata']['key1'] = ('a' * 260)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_fails_when_min_ram_too_small(self):
+ # make min_ram larger than our instance ram size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="4096", min_disk="10")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_fails_when_min_disk_too_small(self):
+ # make min_disk larger than our instance disk size
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="100000")
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req,
+ FAKE_UUID, self.body)
+
+ def test_rebuild_instance_image_too_large(self):
+ # make image size larger than our instance disk size
+ size = str(1000 * (1024 ** 3))
+
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='active', size=size)
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_deleted_image(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True,
+ status='DELETED')
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_get_image)
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_onset_file_limit_over_quota(self):
+ def fake_get_image(self, context, image_href, **kwargs):
+ return dict(id='76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ name='public image', is_public=True, status='active')
+
+ with contextlib.nested(
+ mock.patch.object(fake._FakeImageService, 'show',
+ side_effect=fake_get_image),
+ mock.patch.object(self.controller.compute_api, 'rebuild',
+ side_effect=exception.OnsetFileLimitExceeded)
+ ) as (
+ show_mock, rebuild_mock
+ ):
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_instance_with_access_ipv6_bad_format(self):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ self.body['rebuild']['accessIPv4'] = '1.2.3.4'
+ self.body['rebuild']['accessIPv6'] = 'bad_format'
+ self.body['rebuild']['metadata']['hello'] = 'world'
+ self.req.body = jsonutils.dumps(self.body)
+ self.req.headers["content-type"] = "application/json"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, self.body)
+
+ def test_rebuild_instance_with_null_image_ref(self):
+ self.body['rebuild']['imageRef'] = None
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_rebuild, self.req, FAKE_UUID,
+ self.body)
+
+
+class ServerStatusTest(test.TestCase):
+
+ def setUp(self):
+ super(ServerStatusTest, self).setUp()
+ fakes.stub_out_nw_api(self.stubs)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def _fake_get_server(context, req, id):
+ return fakes.stub_instance(id)
+
+ self.stubs.Set(self.controller, '_get_server', _fake_get_server)
+
+ def _get_with_state(self, vm_state, task_state=None):
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_state,
+ task_state=task_state))
+
+ request = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ return self.controller.show(request, FAKE_UUID)
+
+ def _req_with_policy_fail(self, policy_rule_name):
+ rule = {'compute:%s' % policy_rule_name:
+ common_policy.parse_rule('role:admin')}
+ policy.set_rules(rule)
+ return fakes.HTTPRequest.blank('/fake/servers/1234/action')
+
+ def test_active(self):
+ response = self._get_with_state(vm_states.ACTIVE)
+ self.assertEqual(response['server']['status'], 'ACTIVE')
+
+ def test_reboot(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING)
+ self.assertEqual(response['server']['status'], 'REBOOT')
+
+ def test_reboot_hard(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBOOTING_HARD)
+ self.assertEqual(response['server']['status'], 'HARD_REBOOT')
+
+ def test_reboot_resize_policy_fail(self):
+ req = self._req_with_policy_fail('reboot')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_reboot, req, '1234',
+ {'reboot': {'type': 'HARD'}})
+
+ def test_rebuild(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.REBUILDING)
+ self.assertEqual(response['server']['status'], 'REBUILD')
+
+ def test_rebuild_error(self):
+ response = self._get_with_state(vm_states.ERROR)
+ self.assertEqual(response['server']['status'], 'ERROR')
+
+ def test_resize(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.RESIZE_PREP)
+ self.assertEqual(response['server']['status'], 'RESIZE')
+
+ def test_confirm_resize_policy_fail(self):
+ req = self._req_with_policy_fail('confirm_resize')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_confirm_resize, req, '1234', {})
+
+ def test_verify_resize(self):
+ response = self._get_with_state(vm_states.RESIZED, None)
+ self.assertEqual(response['server']['status'], 'VERIFY_RESIZE')
+
+ def test_revert_resize(self):
+ response = self._get_with_state(vm_states.RESIZED,
+ task_states.RESIZE_REVERTING)
+ self.assertEqual(response['server']['status'], 'REVERT_RESIZE')
+
+ def test_revert_resize_policy_fail(self):
+ req = self._req_with_policy_fail('revert_resize')
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_revert_resize, req, '1234', {})
+
+ def test_password_update(self):
+ response = self._get_with_state(vm_states.ACTIVE,
+ task_states.UPDATING_PASSWORD)
+ self.assertEqual(response['server']['status'], 'PASSWORD')
+
+ def test_stopped(self):
+ response = self._get_with_state(vm_states.STOPPED)
+ self.assertEqual(response['server']['status'], 'SHUTOFF')
+
+
+class ServersControllerCreateTest(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ fakes.stub_out_nw_api(self.stubs)
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ self.volume_id = 'fake'
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': inst_type,
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "config_drive": None,
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ "security_groups": inst['security_groups'],
+ })
+
+ self.instance_cache_by_id[instance['id']] = instance
+ self.instance_cache_by_uuid[instance['uuid']] = instance
+ return instance
+
+ def instance_get(context, instance_id):
+ """Stub for compute/api create() pulling in instance after
+ scheduling
+ """
+ return self.instance_cache_by_id[instance_id]
+
+ def instance_update(context, uuid, values):
+ instance = self.instance_cache_by_uuid[uuid]
+ instance.update(values)
+ return instance
+
+ def server_update(context, instance_uuid, params, update_cells=False):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return inst
+
+ def server_update_and_get_original(
+ context, instance_uuid, params, update_cells=False,
+ columns_to_join=None):
+ inst = self.instance_cache_by_uuid[instance_uuid]
+ inst.update(params)
+ return (inst, inst)
+
+ def fake_method(*args, **kwargs):
+ pass
+
+ def project_get_networks(context, user_id):
+ return dict(id='1', host='localhost')
+
+ def queue_get_for(context, *args):
+ return 'network_topic'
+
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
+ self.stubs.Set(db, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(db, 'project_get_networks',
+ project_get_networks)
+ self.stubs.Set(db, 'instance_create', instance_create)
+ self.stubs.Set(db, 'instance_system_metadata_update',
+ fake_method)
+ self.stubs.Set(db, 'instance_get', instance_get)
+ self.stubs.Set(db, 'instance_update', instance_update)
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ server_update_and_get_original)
+ self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
+ fake_method)
+ self.body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+ self.bdm = [{'delete_on_termination': 1,
+ 'device_name': 123,
+ 'volume_size': 1,
+ 'volume_id': '11111111-1111-1111-1111-111111111111'}]
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _check_admin_pass_len(self, server_dict):
+ """utility function - check server_dict for adminPass length."""
+ self.assertEqual(CONF.password_length,
+ len(server_dict["adminPass"]))
+
+ def _check_admin_pass_missing(self, server_dict):
+ """utility function - check server_dict for absence of adminPass."""
+ self.assertNotIn("adminPass", server_dict)
+
+ def _test_create_instance(self, flavor=2):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ self.body['server']['imageRef'] = image_uuid
+ self.body['server']['flavorRef'] = flavor
+ self.req.body = jsonutils.dumps(self.body)
+ server = self.controller.create(self.req, self.body).obj['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_private_flavor(self):
+ values = {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': '1324',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': False,
+ }
+ db.flavor_create(context.get_admin_context(), values)
+ self.assertRaises(webob.exc.HTTPBadRequest, self._test_create_instance,
+ flavor=1324)
+
+ def test_create_server_bad_image_href(self):
+ image_href = 1
+ self.body['server']['imageRef'] = image_href,
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ def test_create_server_with_invalid_networks_parameter(self):
+ self.ext_mgr.extensions = {'os-networks': 'fake'}
+ self.body['server']['networks'] = {
+ 'uuid': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_server_with_deleted_image(self):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(
+ context, '')
+ image_service.update(context, image_uuid, {'status': 'DELETED'})
+ self.addCleanup(image_service.update, context, image_uuid,
+ {'status': 'active'})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ 'Image 76fa36fc-c930-4bf3-8c8a-ea2a2420deb6 is not active.'):
+ self.controller.create(self.req, self.body)
+
+ def test_create_server_image_too_large(self):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ # Get the fake image service so we can set the status to deleted
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_uuid)
+ image = image_service.show(context, image_id)
+ orig_size = image['size']
+ new_size = str(1000 * (1024 ** 3))
+ image_service.update(context, image_uuid, {'size': new_size})
+
+ self.addCleanup(image_service.update, context, image_uuid,
+ {'size': orig_size})
+
+ self.body['server']['flavorRef'] = 2
+ self.req.body = jsonutils.dumps(self.body)
+ with testtools.ExpectedException(
+ webob.exc.HTTPBadRequest,
+ "Flavor's disk is too small for requested image."):
+ self.controller.create(self.req, self.body)
+
+ def test_create_instance_invalid_negative_min(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = -1
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_negative_max(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = -1
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_alpha_min(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = 'abcd',
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_instance_invalid_alpha_max(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = 'abcd',
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req,
+ self.body)
+
+ def test_create_multiple_instances(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_len(res["server"])
+
+ def test_create_multiple_instances_pass_disabled(self):
+ """Test creating multiple instances but not asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.flags(enable_instance_password=False)
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_missing(res["server"])
+
+ def test_create_multiple_instances_resv_id_return(self):
+ """Test creating multiple instances with asking for
+ reservation_id
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['return_reservation_id'] = True
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body)
+ reservation_id = res.obj.get('reservation_id')
+ self.assertNotEqual(reservation_id, "")
+ self.assertIsNotNone(reservation_id)
+ self.assertTrue(len(reservation_id) > 1)
+
+ def test_create_multiple_instances_with_multiple_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested with a list of block device mappings for volumes.
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'},
+ {'device_name': 'foo2', 'volume_id': 'vol-yyyy'}
+ ]
+ params = {
+ 'block_device_mapping': bdm,
+ 'min_count': min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(len(kwargs['block_device_mapping']), 2)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+
+ def test_create_multiple_instances_with_single_volume_bdm(self):
+ """Test that a BadRequest is raised if multiple instances
+ are requested to boot from a single volume.
+ """
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ bdm = [{'device_name': 'foo1', 'volume_id': 'vol-xxxx'}]
+ params = {
+ 'block_device_mapping': bdm,
+ 'min_count': min_count
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['block_device_mapping']['volume_id'],
+ 'vol-xxxx')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params, no_image=True)
+
+ def test_create_multiple_instance_with_non_integer_max_count(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['max_count'] = 2.5
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_multiple_instance_with_non_integer_min_count(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ self.body['server']['min_count'] = 2.5
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_image_ref_is_bookmark(self):
+ image_href = 'http://localhost/fake/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_image_ref_is_invalid(self):
+ image_uuid = 'this_is_not_a_valid_uuid'
+ image_href = 'http://localhost/fake/images/%s' % image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance()
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertIn('server',
+ self.controller.create(self.req, self.body).obj)
+
+ def test_create_instance_with_security_group_enabled(self):
+ self.ext_mgr.extensions = {'os-security-groups': 'fake'}
+ group = 'foo'
+ old_create = compute_api.API.create
+
+ def sec_group_get(ctx, proj, name):
+ if name == group:
+ return True
+ else:
+ raise exception.SecurityGroupNotFoundForProject(
+ project_id=proj, security_group_id=name)
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['security_group'], [group])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(db, 'security_group_get_by_name', sec_group_get)
+ # negative test
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra,
+ {'security_groups': [{'name': 'bogus'}]})
+ # positive test - extra assert in create path
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra({'security_groups': [{'name': group}]})
+
+ def test_create_instance_with_non_unique_secgroup_name(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks,
+ 'security_groups': [{'name': 'dup'}, {'name': 'dup'}]}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NoUniqueMatch("No Unique match found for ...")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_port_with_no_fixed_ips(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port_id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'port': port_id}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortRequiresFixedIP(port_id=port_id)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_user_data_too_large(self, mock_create):
+ mock_create.side_effect = exception.InstanceUserDataTooLarge(
+ maxsize=1, length=2)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_raise_auto_disk_config_exc(self, mock_create):
+ mock_create.side_effect = exception.AutoDiskConfigDisabledByImage(
+ image='dummy')
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, self.body)
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InstanceExists(
+ name='instance-name'))
+ def test_create_instance_raise_instance_exists(self, mock_create):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_with_network_with_no_subnet(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkRequiresSubnet(network_uuid=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_access_ip(self):
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_access_ip_pass_disabled(self):
+ # test with admin passwords disabled See lp bug 921814
+ self.flags(enable_instance_password=False)
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_bad_format_access_ip_v4(self):
+ self.body['server']['accessIPv4'] = 'bad_format'
+ self.body['server']['accessIPv6'] = 'fead::1234'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_bad_format_access_ip_v6(self):
+ self.body['server']['accessIPv4'] = '1.2.3.4'
+ self.body['server']['accessIPv6'] = 'bad_format'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance_name_all_blank_spaces(self):
+ self.body['server']['name'] = ' ' * 64
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_name_too_long(self):
+ self.body['server']['name'] = 'X' * 256
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ self.req, self.body)
+
+ def test_create_instance(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_len(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self._check_admin_pass_missing(server)
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
+ def test_create_instance_numa_topology_wrong(self, numa_constraints_mock):
+ numa_constraints_mock.side_effect = (
+ exception.ImageNUMATopologyIncomplete)
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_too_much_metadata(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata']['vote'] = 'fiddletown'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_too_long(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {('a' * 260): '12345'}
+
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_value_too_long(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'key1': ('a' * 260)}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_blank(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'': 'abcd'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_not_dict(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = 'string'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_key_not_string(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {1: 'test'}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_metadata_value_not_string(self):
+ self.flags(quota_metadata_items=1)
+ self.body['server']['metadata'] = {'test': ['a', 'list']}
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_user_data_malformed_bad_request(self):
+ self.ext_mgr.extensions = {'os-user-data': 'fake'}
+ params = {'user_data': 'u1234!'}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch('nova.compute.api.API.create',
+ side_effect=exception.KeypairNotFound(name='nonexistentkey',
+ user_id=1))
+ def test_create_instance_invalid_key_name(self, mock_create):
+ self.body['server']['key_name'] = 'nonexistentkey'
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_valid_key_name(self):
+ self.body['server']['key_name'] = 'key'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ self.assertEqual(FAKE_UUID, res["server"]["id"])
+ self._check_admin_pass_len(res["server"])
+
+ def test_create_instance_invalid_flavor_href(self):
+ flavor_ref = 'http://localhost/v2/flavors/asdf'
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_invalid_flavor_id_int(self):
+ flavor_ref = -1
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_bad_flavor_href(self):
+ flavor_ref = 'http://localhost/v2/flavors/17'
+ self.body['server']['flavorRef'] = flavor_ref
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_with_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.body['server']['config_drive'] = "true"
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.body['server']['config_drive'] = 'adcd'
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_without_config_drive(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_with_config_drive_disabled(self):
+ config_drive = [{'config_drive': 'foo'}]
+ params = {'config_drive': config_drive}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['config_drive'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_bad_href(self):
+ image_href = 'asdf'
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_instance_local_href(self):
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertEqual(FAKE_UUID, server['id'])
+
+ def test_create_instance_admin_pass(self):
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertEqual(server['adminPass'], self.body['server']['adminPass'])
+
+ def test_create_instance_admin_pass_pass_disabled(self):
+ self.flags(enable_instance_password=False)
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = 'testpass'
+ self.req.body = jsonutils.dumps(self.body)
+ res = self.controller.create(self.req, self.body).obj
+
+ server = res['server']
+ self.assertIn('adminPass', self.body['server'])
+ self.assertNotIn('adminPass', server)
+
+ def test_create_instance_admin_pass_empty(self):
+ self.body['server']['flavorRef'] = 3,
+ self.body['server']['adminPass'] = ''
+ self.req.body = jsonutils.dumps(self.body)
+
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, self.body)
+
+ def test_create_instance_with_security_group_disabled(self):
+ group = 'foo'
+ params = {'security_groups': [{'name': group}]}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ # NOTE(vish): if the security groups extension is not
+ # enabled, then security groups passed in
+ # are ignored.
+ self.assertEqual(kwargs['security_group'], ['default'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_disk_config_enabled(self):
+ self.ext_mgr.extensions = {'OS-DCF': 'fake'}
+ # NOTE(vish): the extension converts OS-DCF:disk_config into
+ # auto_disk_config, so we are testing with
+ # the_internal_value
+ params = {'auto_disk_config': 'AUTO'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['auto_disk_config'], 'AUTO')
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_disk_config_disabled(self):
+ params = {'auto_disk_config': True}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['auto_disk_config'], False)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_scheduler_hints_enabled(self):
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake'}
+ hints = {'a': 'b'}
+ params = {'scheduler_hints': hints}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], hints)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_scheduler_hints_disabled(self):
+ hints = {'a': 'b'}
+ params = {'scheduler_hints': hints}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_volumes_enabled_no_image(self):
+ """Test that the create will fail if there is no image
+ and no bdms supplied in the request
+ """
+ self.ext_mgr.extensions = {'os-volumes': 'fake'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {}, no_image=True)
+
+ def test_create_instance_with_bdm_v2_enabled_no_image(self):
+ self.ext_mgr.extensions = {'os-block-device-mapping-v2-boot': 'fake'}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertNotIn('imageRef', kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {}, no_image=True)
+
+ def test_create_instance_with_user_data_enabled(self):
+ self.ext_mgr.extensions = {'os-user-data': 'fake'}
+ user_data = 'fake'
+ params = {'user_data': user_data}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['user_data'], user_data)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_user_data_disabled(self):
+ user_data = 'fake'
+ params = {'user_data': user_data}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['user_data'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_keypairs_enabled(self):
+ self.ext_mgr.extensions = {'os-keypairs': 'fake'}
+ key_name = 'green'
+
+ params = {'key_name': key_name}
+ old_create = compute_api.API.create
+
+ # NOTE(sdague): key pair goes back to the database,
+ # so we need to stub it out for tests
+ def key_pair_get(context, user_id, name):
+ return dict(test_keypair.fake_keypair,
+ public_key='FAKE_KEY',
+ fingerprint='FAKE_FINGERPRINT',
+ name=name)
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['key_name'], key_name)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_keypairs_disabled(self):
+ key_name = 'green'
+
+ params = {'key_name': key_name}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['key_name'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_availability_zone_enabled(self):
+ self.ext_mgr.extensions = {'os-availability-zone': 'fake'}
+ availability_zone = 'fake'
+ params = {'availability_zone': availability_zone}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['availability_zone'], availability_zone)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+
+ try:
+ self._test_create_extra(params)
+ except webob.exc.HTTPBadRequest as e:
+ expected = 'The requested availability zone is not available'
+ self.assertEqual(e.explanation, expected)
+ admin_context = context.get_admin_context()
+ db.service_create(admin_context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(admin_context,
+ {'name': 'agg1'}, {'availability_zone': availability_zone})
+ db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
+ self._test_create_extra(params)
+
+ def test_create_instance_with_availability_zone_disabled(self):
+ availability_zone = 'fake'
+ params = {'availability_zone': availability_zone}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_enabled(self):
+ self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
+ min_count = 2
+ max_count = 3
+ params = {
+ 'min_count': min_count,
+ 'max_count': max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 2)
+ self.assertEqual(kwargs['max_count'], 3)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_multiple_create_disabled(self):
+ min_count = 2
+ max_count = 3
+ params = {
+ 'min_count': min_count,
+ 'max_count': max_count,
+ }
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertEqual(kwargs['min_count'], 1)
+ self.assertEqual(kwargs['max_count'], 1)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_enabled(self):
+ self.ext_mgr.extensions = {'os-networks': 'fake'}
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_neutronv2_port_in_use(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortInUse(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neturonv2_not_found_network(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ requested_networks = [{'uuid': network}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.NetworkNotFound(network_id=network)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_neutronv2_port_not_found(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ raise exception.PortNotFound(port_id=port)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_multiple_instance_with_specified_ip_neutronv2(self,
+ _api_mock):
+ _api_mock.side_effect = exception.InvalidFixedIpAndMaxCountRequest(
+ reason="")
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ address = '10.0.0.1'
+ self.body['server']['max_count'] = 2
+ requested_networks = [{'uuid': network, 'fixed_ip': address,
+ 'port': port}]
+ params = {'networks': requested_networks}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_multiple_instance_with_neutronv2_port(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.body['server']['max_count'] = 2
+ requested_networks = [{'uuid': network, 'port': port}]
+ params = {'networks': requested_networks}
+
+ def fake_create(*args, **kwargs):
+ msg = _("Unable to launch multiple instances with"
+ " a single configured port ID. Please launch your"
+ " instance one by one with different ports.")
+ raise exception.MultiplePortsNotApplicable(reason=msg)
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+
+ def test_create_instance_with_networks_disabled_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ result = [('76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', None,
+ None, None)]
+ self.assertEqual(result, kwargs['requested_networks'].as_tuples())
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_networks_disabled(self):
+ self.ext_mgr.extensions = {}
+ net_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ requested_networks = [{'uuid': net_uuid}]
+ params = {'networks': requested_networks}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['requested_networks'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_invalid_personality(self):
+
+ def fake_create(*args, **kwargs):
+ codec = 'utf8'
+ content = 'b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA=='
+ start_position = 19
+ end_position = 20
+ msg = 'invalid start byte'
+ raise UnicodeDecodeError(codec, content, start_position,
+ end_position, msg)
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+ self.body['server']['personality'] = [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "b25zLiINCg0KLVJpY2hhcmQgQ$$%QQmFjaA==",
+ },
+ ]
+ self.req.body = jsonutils.dumps(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, self.body)
+
+ def test_create_location(self):
+ selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ self.req.body = jsonutils.dumps(self.body)
+ robj = self.controller.create(self.req, self.body)
+ self.assertEqual(robj['Location'], selfhref)
+
+ def _do_test_create_instance_above_quota(self, resource, allowed, quota,
+ expected_msg):
+ fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
+ self.body['server']['flavorRef'] = 3
+ self.req.body = jsonutils.dumps(self.body)
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_instances(self):
+ msg = _('Quota exceeded for instances: Requested 1, but'
+ ' already used 10 of 10 instances')
+ self._do_test_create_instance_above_quota('instances', 0, 10, msg)
+
+ def test_create_instance_above_quota_ram(self):
+ msg = _('Quota exceeded for ram: Requested 4096, but'
+ ' already used 8192 of 10240 ram')
+ self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
+
+ def test_create_instance_above_quota_cores(self):
+ msg = _('Quota exceeded for cores: Requested 2, but'
+ ' already used 9 of 10 cores')
+ self._do_test_create_instance_above_quota('cores', 1, 10, msg)
+
+ def test_create_instance_above_quota_group_members(self):
+ ctxt = context.get_admin_context()
+ fake_group = objects.InstanceGroup(ctxt)
+ fake_group.create()
+
+ def fake_count(context, name, group, user_id):
+ self.assertEqual(name, "server_group_members")
+ self.assertEqual(group.uuid, fake_group.uuid)
+ self.assertEqual(user_id,
+ self.req.environ['nova.context'].user_id)
+ return 10
+
+ def fake_limit_check(context, **kwargs):
+ if 'server_group_members' in kwargs:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
+ self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
+ 'os-server-group-quotas': 'fake'}
+ self.body['server']['scheduler_hints'] = {'group': fake_group.uuid}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many servers in group"
+
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_server_groups(self):
+
+ def fake_reserve(contex, **deltas):
+ if 'server_groups' in deltas:
+ raise exception.OverQuota(overs={})
+
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stubs.Set(fakes.QUOTAS, 'reserve', fake_reserve)
+ self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
+ 'os-server-group-quotas': 'fake'}
+ self.body['server']['scheduler_hints'] = {'group': 'fake-group'}
+ self.req.body = jsonutils.dumps(self.body)
+
+ expected_msg = "Quota exceeded, too many server groups."
+
+ try:
+ self.controller.create(self.req, self.body).obj['server']
+ self.fail('expected quota to be exceeded')
+ except webob.exc.HTTPForbidden as e:
+ self.assertEqual(e.explanation, expected_msg)
+
+
+class ServersControllerCreateTestWithMock(test.TestCase):
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/123/flavors/3'
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestWithMock, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+ self.instance_cache_by_id = {}
+ self.instance_cache_by_uuid = {}
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ self.volume_id = 'fake'
+
+ self.body = {
+ 'server': {
+ 'min_count': 2,
+ 'name': 'server_test',
+ 'imageRef': self.image_uuid,
+ 'flavorRef': self.flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "MQ==",
+ },
+ ],
+ },
+ }
+
+ self.req = fakes.HTTPRequest.blank('/fake/servers')
+ self.req.method = 'POST'
+ self.req.headers["content-type"] = "application/json"
+
+ def _test_create_extra(self, params, no_image=False):
+ self.body['server']['flavorRef'] = 2
+ if no_image:
+ self.body['server'].pop('imageRef', None)
+ self.body['server'].update(params)
+ self.req.body = jsonutils.dumps(self.body)
+ self.controller.create(self.req, self.body).obj['server']
+
+ @mock.patch.object(compute_api.API, 'create')
+ def test_create_instance_with_neutronv2_fixed_ip_already_in_use(self,
+ create_mock):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.2.3'
+ requested_networks = [{'uuid': network, 'fixed_ip': address}]
+ params = {'networks': requested_networks}
+ create_mock.side_effect = exception.FixedIpAlreadyInUse(
+ address=address,
+ instance_uuid=network)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, params)
+ self.assertEqual(1, len(create_mock.call_args_list))
+
+ @mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_create_instance_with_invalid_volume_error(self, create_mock):
+ # Tests that InvalidVolume is translated to a 400 error.
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._test_create_extra, {})
+
+
+class TestServerCreateRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerCreateRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers.CreateDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_alternate_namespace_prefix(self):
+ serial_request = """
+<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
+ </ns2:server>
+ """
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ 'metadata': {"hello": "world"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_scheduler_hints_and_alternate_namespace_prefix(self):
+ serial_request = """
+<ns2:server xmlns:ns2="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <ns2:metadata><ns2:meta key="hello">world</ns2:meta></ns2:metadata>
+ <os:scheduler_hints
+ xmlns:os="http://docs.openstack.org/compute/ext/scheduler-hints/api/v2">
+ <hypervisor>xen</hypervisor>
+ <near>eb999657-dd6b-464e-8713-95c532ac3b18</near>
+ </os:scheduler_hints>
+ </ns2:server>
+ """
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ 'OS-SCH-HNT:scheduler_hints': {
+ 'hypervisor': ['xen'],
+ 'near': ['eb999657-dd6b-464e-8713-95c532ac3b18']
+ },
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {
+ "hello": "world"
+ }
+ }
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ipv4(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv4="1.2.3.4"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv4": "1.2.3.4",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ipv6(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv6="fead::1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv6": "fead::1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_access_ip(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ accessIPv4="1.2.3.4"
+ accessIPv6="fead::1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_admin_pass(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2"
+ adminPass="1234"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "adminPass": "1234",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_image_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="http://localhost:8774/v2/images/2"
+ flavorRef="3"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "http://localhost:8774/v2/images/2",
+ "flavorRef": "3",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_flavor_link(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="http://localhost:8774/v2/flavors/3"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "http://localhost:8774/v2/flavors/3",
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_empty_metadata_personality(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {},
+ "personality": [],
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_multiple_metadata_items(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <metadata>
+ <meta key="one">two</meta>
+ <meta key="open">snack</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "metadata": {"one": "two", "open": "snack"},
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_multiple_personality_files(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test"
+ imageRef="1"
+ flavorRef="2">
+ <personality>
+ <file path="/etc/banner.txt">MQ==</file>
+ <file path="/etc/hosts">Mg==</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "2",
+ "personality": [
+ {"path": "/etc/banner.txt", "contents": "MQ=="},
+ {"path": "/etc/hosts", "contents": "Mg=="},
+ ],
+ },
+ }
+ self.assertThat(request['body'], matchers.DictMatches(expected))
+
+ def test_spec_request(self):
+ image_bookmark_link = ("http://servers.api.openstack.org/1234/"
+ "images/52415800-8b69-11e0-9b19-734f6f006e54")
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ imageRef="%s"
+ flavorRef="52415800-8b69-11e0-9b19-734f1195ff37"
+ name="new-server-test">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">Mg==</file>
+ </personality>
+</server>""" % (image_bookmark_link)
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "new-server-test",
+ "imageRef": ("http://servers.api.openstack.org/1234/"
+ "images/52415800-8b69-11e0-9b19-734f6f006e54"),
+ "flavorRef": "52415800-8b69-11e0-9b19-734f1195ff37",
+ "metadata": {"My Server Name": "Apache1"},
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "Mg==",
+ },
+ ],
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_empty_networks(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_two_networks(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ <network uuid="2" fixed_ip="10.0.2.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
+ {"uuid": "2", "fixed_ip": "10.0.2.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_second_network_node_ignored(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ </networks>
+ <networks>
+ <network uuid="2" fixed_ip="10.0.2.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_missing_id(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network fixed_ip="10.0.1.12"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_missing_fixed_ip(self):
+ serial_request = """
+<server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1"/>
+ </networks>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_empty_id(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="" fixed_ip="10.0.1.12"/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "", "fixed_ip": "10.0.1.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_one_network_empty_fixed_ip(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip=""/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": ""}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_networks_duplicate_ids(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <networks>
+ <network uuid="1" fixed_ip="10.0.1.12"/>
+ <network uuid="1" fixed_ip="10.0.2.12"/>
+ </networks>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "networks": [{"uuid": "1", "fixed_ip": "10.0.1.12"},
+ {"uuid": "1", "fixed_ip": "10.0.2.12"}],
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_availability_zone(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ availability_zone="some_zone:some_host">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "availability_zone": "some_zone:some_host",
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_multiple_create_args(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ min_count="1" max_count="3" return_reservation_id="True">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "min_count": "1",
+ "max_count": "3",
+ "return_reservation_id": True,
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_disk_config(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ name="new-server-test" imageRef="1" flavorRef="1"
+ OS-DCF:diskConfig="AUTO">
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "OS-DCF:diskConfig": "AUTO",
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_scheduler_hints(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ xmlns:OS-SCH-HNT=
+ "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2"
+ name="new-server-test" imageRef="1" flavorRef="1">
+ <OS-SCH-HNT:scheduler_hints>
+ <different_host>
+ 7329b667-50c7-46a6-b913-cb2a09dfeee0
+ </different_host>
+ <different_host>
+ f31efb24-34d2-43e1-8b44-316052956a39
+ </different_host>
+ </OS-SCH-HNT:scheduler_hints>
+ </server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "OS-SCH-HNT:scheduler_hints": {
+ "different_host": [
+ "7329b667-50c7-46a6-b913-cb2a09dfeee0",
+ "f31efb24-34d2-43e1-8b44-316052956a39",
+ ]
+ }
+ }}
+ self.assertEqual(request['body'], expected)
+
+ def test_request_with_config_drive(self):
+ serial_request = """
+ <server xmlns="http://docs.openstack.org/compute/api/v2"
+ name="config_drive_test"
+ imageRef="1"
+ flavorRef="1"
+ config_drive="true"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {
+ "server": {
+ "name": "config_drive_test",
+ "imageRef": "1",
+ "flavorRef": "1",
+ "config_drive": "true"
+ },
+ }
+ self.assertEqual(request['body'], expected)
+
+ def test_corrupt_xml(self):
+ """Should throw a 400 error on corrupt xml."""
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ self.deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class TestServerActionRequestXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ super(TestServerActionRequestXMLDeserializer, self).setUp()
+ self.deserializer = servers.ActionDeserializer()
+
+ def _generate_request(self, action, disk_cfg, ref):
+ return """
+<%(action)s xmlns="http://docs.openstack.org/compute/api/v1.1"
+ xmlns:OS-DCF="http://docs.openstack.org/compute/ext/disk_config/api/v1.1"
+ %(disk_config)s="MANUAL" %(ref)s="1"/>""" % (
+ {'action': action, 'disk_config': disk_cfg, 'ref': ref})
+
+ def _generate_expected(self, action, ref):
+ return {
+ "%s" % action: {
+ "%s" % ref: "1",
+ "OS-DCF:diskConfig": "MANUAL",
+ },
+ }
+
+ def test_rebuild_request(self):
+ serial_request = self._generate_request("rebuild", "OS-DCF:diskConfig",
+ "imageRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("rebuild", "imageRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_rebuild_request_auto_disk_config_compat(self):
+ serial_request = self._generate_request("rebuild", "auto_disk_config",
+ "imageRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("rebuild", "imageRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_request(self):
+ serial_request = self._generate_request("resize", "OS-DCF:diskConfig",
+ "flavorRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("resize", "flavorRef")
+ self.assertEqual(request['body'], expected)
+
+ def test_resize_request_auto_disk_config_compat(self):
+ serial_request = self._generate_request("resize", "auto_disk_config",
+ "flavorRef")
+ request = self.deserializer.deserialize(serial_request)
+ expected = self._generate_expected("resize", "flavorRef")
+ self.assertEqual(request['body'], expected)
+
+
+class TestAddressesXMLSerialization(test.TestCase):
+
+ index_serializer = ips.AddressesTemplate()
+ show_serializer = ips.NetworkTemplate()
+
+ def _serializer_test_data(self):
+ return {
+ 'network_2': [
+ {'addr': '192.168.0.1', 'version': 4},
+ {'addr': 'fe80::beef', 'version': 6},
+ ],
+ }
+
+ def test_xml_declaration(self):
+ output = self.show_serializer.serialize(self._serializer_test_data())
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ output = self.show_serializer.serialize(self._serializer_test_data())
+ root = etree.XML(output)
+ network = self._serializer_test_data()['network_2']
+ self.assertEqual(str(root.get('id')), 'network_2')
+ ip_elems = root.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+ def test_index(self):
+ fixture = {
+ 'addresses': {
+ 'network_1': [
+ {'addr': '192.168.0.3', 'version': 4},
+ {'addr': '192.168.0.5', 'version': 4},
+ ],
+ 'network_2': [
+ {'addr': '192.168.0.1', 'version': 4},
+ {'addr': 'fe80::beef', 'version': 6},
+ ],
+ },
+ }
+ output = self.index_serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'addresses')
+ addresses_dict = fixture['addresses']
+ network_elems = root.findall('{0}network'.format(NS))
+ self.assertEqual(len(network_elems), 2)
+ for i, network_elem in enumerate(network_elems):
+ network = addresses_dict.items()[i]
+ self.assertEqual(str(network_elem.get('id')), str(network[0]))
+ ip_elems = network_elem.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[1][z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+
+class ServersViewBuilderTest(test.TestCase):
+
+ image_bookmark = "http://localhost/fake/images/5"
+ flavor_bookmark = "http://localhost/fake/flavors/1"
+
+ def setUp(self):
+ super(ServersViewBuilderTest, self).setUp()
+ self.flags(use_ipv6=True)
+ db_inst = fakes.stub_instance(
+ id=1,
+ image_ref="5",
+ uuid="deadbeef-feed-edee-beef-d0ea7beefedd",
+ display_name="test_server",
+ include_fake_metadata=False)
+
+ privates = ['172.19.0.1']
+ publics = ['192.168.0.3']
+ public6s = ['b33f::fdee:ddff:fecc:bbaa']
+
+ def nw_info(*args, **kwargs):
+ return [(None, {'label': 'public',
+ 'ips': [dict(ip=ip) for ip in publics],
+ 'ip6s': [dict(ip=ip) for ip in public6s]}),
+ (None, {'label': 'private',
+ 'ips': [dict(ip=ip) for ip in privates]})]
+
+ def floaters(*args, **kwargs):
+ return []
+
+ fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
+ floaters)
+
+ self.uuid = db_inst['uuid']
+ self.view_builder = views.servers.ViewBuilder()
+ self.request = fakes.HTTPRequest.blank("/v2/fake")
+ self.request.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(
+ self.request.context,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ **db_inst)
+ self.self_link = "http://localhost/v2/fake/servers/%s" % self.uuid
+ self.bookmark_link = "http://localhost/fake/servers/%s" % self.uuid
+ self.expected_detailed_server = {
+ "server": {
+ "id": self.uuid,
+ "user_id": "fake_user",
+ "tenant_id": "fake_project",
+ "updated": "2010-11-11T11:00:00Z",
+ "created": "2010-10-10T12:00:00Z",
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "accessIPv4": "",
+ "accessIPv6": "",
+ "hostId": '',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ 'test1': [
+ {'version': 4, 'addr': '192.168.1.100'},
+ {'version': 6, 'addr': '2001:db8:0:1::1'}
+ ]
+ },
+ "metadata": {},
+ "links": [
+ {
+ "rel": "self",
+ "href": self.self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.bookmark_link,
+ },
+ ],
+ }
+ }
+
+ self.expected_server = {
+ "server": {
+ "id": self.uuid,
+ "name": "test_server",
+ "links": [
+ {
+ "rel": "self",
+ "href": self.self_link,
+ },
+ {
+ "rel": "bookmark",
+ "href": self.bookmark_link,
+ },
+ ],
+ }
+ }
+
+ def test_get_flavor_valid_flavor(self):
+ expected = {"id": "1",
+ "links": [{"rel": "bookmark",
+ "href": self.flavor_bookmark}]}
+ result = self.view_builder._get_flavor(self.request, self.instance)
+ self.assertEqual(result, expected)
+
+ def test_build_server(self):
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_server))
+
+ def test_build_server_with_project_id(self):
+
+ output = self.view_builder.basic(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_server))
+
+ def test_build_server_detail(self):
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_no_image(self):
+ self.instance["image_ref"] = ""
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertEqual(output['server']['image'], "")
+
+ def test_build_server_detail_with_fault(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ self.expected_detailed_server["server"]["status"] = "ERROR"
+ self.expected_detailed_server["server"]["fault"] = {
+ "code": 404,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "HTTPNotFound",
+ "details": "Stock details for test",
+ }
+ del self.expected_detailed_server["server"]["progress"]
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.request.context,
+ self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ # Regardless of the vm_state deleted servers sholud have DELETED status
+ self.expected_detailed_server["server"]["status"] = "DELETED"
+ self.expected_detailed_server["server"]["fault"] = {
+ "code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found",
+ }
+ del self.expected_detailed_server["server"]["progress"]
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_fault_no_details_not_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error",
+ 'details': 'Stock details for test'}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_no_details_admin(self):
+ self.instance['vm_state'] = vm_states.ERROR
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context,
+ self.uuid,
+ code=500,
+ message='Error',
+ details='')
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "Error"}
+
+ self.request.environ['nova.context'].is_admin = True
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
+ def test_build_server_detail_with_fault_but_active(self):
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+ self.instance['fault'] = fake_instance.fake_fault_obj(
+ self.request.context, self.uuid)
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertNotIn('fault', output['server'])
+
+ def test_build_server_detail_active_status(self):
+ # set the power state of the instance to running
+ self.instance['vm_state'] = vm_states.ACTIVE
+ self.instance['progress'] = 100
+
+ self.expected_detailed_server["server"]["status"] = "ACTIVE"
+ self.expected_detailed_server["server"]["progress"] = 100
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_accessipv4(self):
+
+ access_ip_v4 = '1.2.3.4'
+ self.instance['access_ip_v4'] = access_ip_v4
+
+ self.expected_detailed_server["server"]["accessIPv4"] = access_ip_v4
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_accessipv6(self):
+
+ access_ip_v6 = 'fead::1234'
+ self.instance['access_ip_v6'] = access_ip_v6
+
+ self.expected_detailed_server["server"]["accessIPv6"] = access_ip_v6
+
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+ def test_build_server_detail_with_metadata(self):
+
+ metadata = []
+ metadata.append(models.InstanceMetadata(key="Open", value="Stack"))
+ metadata = nova_utils.metadata_to_dict(metadata)
+ self.instance['metadata'] = metadata
+
+ self.expected_detailed_server["server"]["metadata"] = {"Open": "Stack"}
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
+
+class ServerXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_HREF = 'http://localhost/v2/servers/%s' % FAKE_UUID
+ SERVER_NEXT = 'http://localhost/v2/servers?limit=%s&marker=%s'
+ SERVER_BOOKMARK = 'http://localhost/servers/%s' % FAKE_UUID
+ IMAGE_BOOKMARK = 'http://localhost/images/5'
+ FLAVOR_BOOKMARK = 'http://localhost/flavors/1'
+ USERS_ATTRIBUTES = ['name', 'id', 'created', 'accessIPv4',
+ 'updated', 'progress', 'status', 'hostId',
+ 'accessIPv6']
+ ADMINS_ATTRIBUTES = USERS_ATTRIBUTES + ['adminPass']
+
+ def setUp(self):
+ super(ServerXMLSerializationTest, self).setUp()
+ self.body = {
+ "server": {
+ 'id': FAKE_UUID,
+ 'user_id': 'fake_user_id',
+ 'tenant_id': 'fake_tenant_id',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server-" + u'\u89e3\u7801',
+ "status": "BUILD",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.IMAGE_BOOKMARK,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": self.FLAVOR_BOOKMARK,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ 'links': [
+ {
+ 'href': self.SERVER_HREF,
+ 'rel': 'self',
+ },
+ {
+ 'href': self.SERVER_BOOKMARK,
+ 'rel': 'bookmark',
+ },
+ ],
+ }
+ }
+
+ def _validate_xml(self, root, server_dict):
+
+ link_nodes = root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ metadata_root = root.find('{0}metadata'.format(NS))
+ metadata_elems = metadata_root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = server_dict['metadata'].items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ image_root = root.find('{0}image'.format(NS))
+ self.assertEqual(image_root.get('id'), server_dict['image']['id'])
+ link_nodes = image_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['image']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ flavor_root = root.find('{0}flavor'.format(NS))
+ self.assertEqual(flavor_root.get('id'), server_dict['flavor']['id'])
+ link_nodes = flavor_root.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 1)
+ for i, link in enumerate(server_dict['flavor']['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ addresses_root = root.find('{0}addresses'.format(NS))
+ addresses_dict = server_dict['addresses']
+ network_elems = addresses_root.findall('{0}network'.format(NS))
+ self.assertEqual(len(network_elems), 2)
+ for i, network_elem in enumerate(network_elems):
+ network = addresses_dict.items()[i]
+ self.assertEqual(str(network_elem.get('id')), str(network[0]))
+ ip_elems = network_elem.findall('{0}ip'.format(NS))
+ for z, ip_elem in enumerate(ip_elems):
+ ip = network[1][z]
+ self.assertEqual(str(ip_elem.get('version')),
+ str(ip['version']))
+ self.assertEqual(str(ip_elem.get('addr')),
+ str(ip['addr']))
+
+ def _validate_required_attributes(self, root, server_dict, attributes):
+ for key in attributes:
+ expected = server_dict[key]
+ if not isinstance(expected, six.text_type):
+ expected = str(expected)
+ self.assertEqual(expected, root.get(key))
+
+ def test_xml_declaration(self):
+ serializer = servers.ServerTemplate()
+
+ output = serializer.serialize(self.body)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_show(self):
+ serializer = servers.ServerTemplate()
+
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.USERS_ATTRIBUTES)
+ self._validate_xml(root, server_dict)
+
+ def test_create(self):
+ serializer = servers.FullServerTemplate()
+
+ self.body["server"]["adminPass"] = "test_password"
+
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.ADMINS_ATTRIBUTES)
+ self._validate_xml(root, server_dict)
+
+ def test_index(self):
+ serializer = servers.MinimalServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "name": "test_server",
+ 'links': [
+ {
+ 'href': expected_server_href,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "name": "test_server_2",
+ 'links': [
+ {
+ 'href': expected_server_href_2,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark_2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ for key in ['name', 'id']:
+ self.assertEqual(server_elem.get(key), str(server_dict[key]))
+
+ link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ def test_index_with_servers_links(self):
+ serializer = servers.MinimalServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_next = self.SERVER_NEXT % (2, 2)
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "name": "test_server",
+ 'links': [
+ {
+ 'href': expected_server_href,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "name": "test_server_2",
+ 'links': [
+ {
+ 'href': expected_server_href_2,
+ 'rel': 'self',
+ },
+ {
+ 'href': expected_server_bookmark_2,
+ 'rel': 'bookmark',
+ },
+ ],
+ },
+ ],
+ "servers_links": [
+ {
+ 'rel': 'next',
+ 'href': expected_server_next,
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ for key in ['name', 'id']:
+ self.assertEqual(server_elem.get(key), str(server_dict[key]))
+
+ link_nodes = server_elem.findall('{0}link'.format(ATOMNS))
+ self.assertEqual(len(link_nodes), 2)
+ for i, link in enumerate(server_dict['links']):
+ for key, value in link.items():
+ self.assertEqual(link_nodes[i].get(key), value)
+
+ # Check servers_links
+ servers_links = root.findall('{0}link'.format(ATOMNS))
+ for i, link in enumerate(fixture['servers_links']):
+ for key, value in link.items():
+ self.assertEqual(servers_links[i].get(key), value)
+
+ def test_detail(self):
+ serializer = servers.ServersTemplate()
+
+ uuid1 = fakes.get_fake_uuid(1)
+ expected_server_href = 'http://localhost/v2/servers/%s' % uuid1
+ expected_server_bookmark = 'http://localhost/servers/%s' % uuid1
+ expected_image_bookmark = self.IMAGE_BOOKMARK
+ expected_flavor_bookmark = self.FLAVOR_BOOKMARK
+
+ uuid2 = fakes.get_fake_uuid(2)
+ expected_server_href_2 = 'http://localhost/v2/servers/%s' % uuid2
+ expected_server_bookmark_2 = 'http://localhost/servers/%s' % uuid2
+ fixture = {"servers": [
+ {
+ "id": fakes.get_fake_uuid(1),
+ "user_id": "fake",
+ "tenant_id": "fake",
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 0,
+ "name": "test_server",
+ "status": "BUILD",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "1",
+ },
+ "links": [
+ {
+ "href": expected_server_href,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ {
+ "id": fakes.get_fake_uuid(2),
+ "user_id": 'fake',
+ "tenant_id": 'fake',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ "progress": 100,
+ "name": "test_server_2",
+ "status": "ACTIVE",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "fead::1234",
+ "hostId": 'e4d909c290d0fb1ca068ffaddf22cbd0',
+ "image": {
+ "id": "5",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_image_bookmark,
+ },
+ ],
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "rel": "bookmark",
+ "href": expected_flavor_bookmark,
+ },
+ ],
+ },
+ "addresses": {
+ "network_one": [
+ {
+ "version": 4,
+ "addr": "67.23.10.138",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.138",
+ },
+ ],
+ "network_two": [
+ {
+ "version": 4,
+ "addr": "67.23.10.139",
+ },
+ {
+ "version": 6,
+ "addr": "::babe:67.23.10.139",
+ },
+ ],
+ },
+ "metadata": {
+ "Open": "Stack",
+ "Number": "2",
+ },
+ "links": [
+ {
+ "href": expected_server_href_2,
+ "rel": "self",
+ },
+ {
+ "href": expected_server_bookmark_2,
+ "rel": "bookmark",
+ },
+ ],
+ },
+ ]}
+
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'servers')
+ server_elems = root.findall('{0}server'.format(NS))
+ self.assertEqual(len(server_elems), 2)
+ for i, server_elem in enumerate(server_elems):
+ server_dict = fixture['servers'][i]
+ self._validate_required_attributes(server_elem, server_dict,
+ self.USERS_ATTRIBUTES)
+ self._validate_xml(server_elem, server_dict)
+
+ def test_update(self):
+ serializer = servers.ServerTemplate()
+
+ self.body["server"]["fault"] = {
+ "code": 500,
+ "created": self.TIMESTAMP,
+ "message": "Error Message",
+ "details": "Fault details",
+ }
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.USERS_ATTRIBUTES)
+
+ self._validate_xml(root, server_dict)
+ fault_root = root.find('{0}fault'.format(NS))
+ fault_dict = server_dict['fault']
+ self.assertEqual(fault_root.get("code"), str(fault_dict["code"]))
+ self.assertEqual(fault_root.get("created"), fault_dict["created"])
+ msg_elem = fault_root.find('{0}message'.format(NS))
+ self.assertEqual(msg_elem.text, fault_dict["message"])
+ det_elem = fault_root.find('{0}details'.format(NS))
+ self.assertEqual(det_elem.text, fault_dict["details"])
+
+ def test_action(self):
+ serializer = servers.FullServerTemplate()
+
+ self.body["server"]["adminPass"] = u'\u89e3\u7801'
+ output = serializer.serialize(self.body)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'server')
+
+ server_dict = self.body['server']
+
+ self._validate_required_attributes(root, server_dict,
+ self.ADMINS_ATTRIBUTES)
+
+ self._validate_xml(root, server_dict)
+
+
+class ServersAllExtensionsTestCase(test.TestCase):
+ """Servers tests using default API router with all extensions enabled.
+
+ The intent here is to catch cases where extensions end up throwing
+ an exception because of a malformed request before the core API
+ gets a chance to validate the request and return a 422 response.
+
+ For example, ServerDiskConfigController extends servers.Controller::
+
+ | @wsgi.extends
+ | def create(self, req, body):
+ | if 'server' in body:
+ | self._set_disk_config(body['server'])
+ | resp_obj = (yield)
+ | self._show(req, resp_obj)
+
+ we want to ensure that the extension isn't barfing on an invalid
+ body.
+ """
+
+ def setUp(self):
+ super(ServersAllExtensionsTestCase, self).setUp()
+ self.app = compute.APIRouter()
+
+ def test_create_missing_server(self):
+ # Test create with malformed body.
+
+ def fake_create(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'create', fake_create)
+
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(422, res.status_int)
+
+ def test_update_missing_server(self):
+ # Test update with malformed body.
+
+ def fake_update(*args, **kwargs):
+ raise test.TestingException("Should not reach the compute API.")
+
+ self.stubs.Set(compute_api.API, 'update', fake_update)
+
+ req = fakes.HTTPRequest.blank('/fake/servers/1')
+ req.method = 'PUT'
+ req.content_type = 'application/json'
+ body = {'foo': {'a': 'b'}}
+
+ req.body = jsonutils.dumps(body)
+ res = req.get_response(self.app)
+ self.assertEqual(422, res.status_int)
+
+
+class ServersUnprocessableEntityTestCase(test.TestCase):
+ """Tests of places we throw 422 Unprocessable Entity from."""
+
+ def setUp(self):
+ super(ServersUnprocessableEntityTestCase, self).setUp()
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def _unprocessable_server_create(self, body):
+ req = fakes.HTTPRequest.blank('/fake/servers')
+ req.method = 'POST'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.create, req, body)
+
+ def test_create_server_no_body(self):
+ self._unprocessable_server_create(body=None)
+
+ def test_create_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_server_create(body=body)
+
+ def test_create_server_malformed_entity(self):
+ body = {'server': 'string'}
+ self._unprocessable_server_create(body=body)
+
+ def _unprocessable_server_update(self, body):
+ req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
+ req.method = 'PUT'
+
+ self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ self.controller.update, req, FAKE_UUID, body)
+
+ def test_update_server_no_body(self):
+ self._unprocessable_server_update(body=None)
+
+ def test_update_server_missing_server(self):
+ body = {'foo': {'a': 'b'}}
+ self._unprocessable_server_update(body=body)
+
+ def test_create_update_malformed_entity(self):
+ body = {'server': 'string'}
+ self._unprocessable_server_update(body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_urlmap.py b/nova/tests/unit/api/openstack/compute/test_urlmap.py
new file mode 100644
index 0000000000..c95cb95d2c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_urlmap.py
@@ -0,0 +1,171 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+import webob
+
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+import nova.tests.unit.image.fake
+
+
+class UrlmapTest(test.NoDBTestCase):
+ def setUp(self):
+ super(UrlmapTest, self).setUp()
+ fakes.stub_out_rate_limiting(self.stubs)
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+
+ def tearDown(self):
+ super(UrlmapTest, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_path_version_v1_1(self):
+ # Test URL path specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/v1.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_content_type_version_v1_1(self):
+ # Test Content-Type specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=1.1"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_accept_version_v1_1(self):
+ # Test Accept header specifying v1.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=1.1"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_path_version_v2(self):
+ # Test URL path specifying v2 returns v2 content.
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_content_type_version_v2(self):
+ # Test Content-Type specifying v2 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=2"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_accept_version_v2(self):
+ # Test Accept header specifying v2 returns v2 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2"
+ res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.0')
+
+ def test_path_content_type(self):
+ # Test URL path specifying JSON returns JSON content.
+ url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app(init_only=('images',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['image']['id'],
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+
+ def test_accept_content_type(self):
+ # Test Accept header specifying JSON returns JSON content.
+ url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml;q=0.8, application/json"
+ res = req.get_response(fakes.wsgi_app(init_only=('images',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['image']['id'],
+ 'cedef40a-ed67-4d10-800e-17455edce175')
+
+ def test_path_version_v21(self):
+ # Test URL path specifying v2.1 returns v2.1 content.
+ req = webob.Request.blank('/v2.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_content_type_version_v21(self):
+ # Test Content-Type specifying v2.1 returns v2 content.
+ req = webob.Request.blank('/')
+ req.content_type = "application/json;version=2.1"
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_accept_version_v21(self):
+ # Test Accept header specifying v2.1 returns v2.1 content.
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2.1"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('versions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['version']['id'], 'v2.1')
+
+ def test_path_content_type_v21(self):
+ # Test URL path specifying JSON returns JSON content.
+ url = '/v2.1/fake/extensions/extensions.json'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['extension']['name'], 'Extensions')
+
+ def test_accept_content_type_v21(self):
+ # Test Accept header specifying JSON returns JSON content.
+ url = '/v2.1/fake/extensions/extensions'
+ req = webob.Request.blank(url)
+ req.accept = "application/xml;q=0.8, application/json"
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('extensions',)))
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ body = jsonutils.loads(res.body)
+ self.assertEqual(body['extension']['name'], 'Extensions')
diff --git a/nova/tests/api/openstack/compute/test_v21_extensions.py b/nova/tests/unit/api/openstack/compute/test_v21_extensions.py
index 7998dc82e5..7998dc82e5 100644
--- a/nova/tests/api/openstack/compute/test_v21_extensions.py
+++ b/nova/tests/unit/api/openstack/compute/test_v21_extensions.py
diff --git a/nova/tests/unit/api/openstack/compute/test_v3_auth.py b/nova/tests/unit/api/openstack/compute/test_v3_auth.py
new file mode 100644
index 0000000000..e728fa89d6
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_v3_auth.py
@@ -0,0 +1,62 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import context
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class TestNoAuthMiddlewareV3(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNoAuthMiddlewareV3, self).setUp()
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_networking(self.stubs)
+
+ def test_authorize_user(self):
+ req = webob.Request.blank('/v2/fake')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/fake")
+
+ def test_authorize_user_trailing_slash(self):
+ # make sure it works with trailing slash on the request
+ req = webob.Request.blank('/v2/fake/')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertEqual(result.headers['X-Server-Management-Url'],
+ "http://localhost/v2/fake")
+
+ def test_auth_token_no_empty_headers(self):
+ req = webob.Request.blank('/v2/fake')
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
+ req.headers['X-Auth-Project-Id'] = 'user1_project'
+ result = req.get_response(fakes.wsgi_app_v21(use_no_auth=True))
+ self.assertEqual(result.status, '204 No Content')
+ self.assertNotIn('X-CDN-Management-Url', result.headers)
+ self.assertNotIn('X-Storage-Url', result.headers)
diff --git a/nova/tests/api/openstack/compute/test_v3_extensions.py b/nova/tests/unit/api/openstack/compute/test_v3_extensions.py
index da6aa43d7f..da6aa43d7f 100644
--- a/nova/tests/api/openstack/compute/test_v3_extensions.py
+++ b/nova/tests/unit/api/openstack/compute/test_v3_extensions.py
diff --git a/nova/tests/unit/api/openstack/compute/test_versions.py b/nova/tests/unit/api/openstack/compute/test_versions.py
new file mode 100644
index 0000000000..fabd15e01c
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_versions.py
@@ -0,0 +1,797 @@
+# Copyright 2010-2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import uuid as stdlib_uuid
+
+import feedparser
+from lxml import etree
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.openstack.compute import versions
+from nova.api.openstack.compute import views
+from nova.api.openstack import xmlutil
+from nova import test
+from nova.tests.unit.api.openstack import common
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import matchers
+
+
+NS = {
+ 'atom': 'http://www.w3.org/2005/Atom',
+ 'ns': 'http://docs.openstack.org/common/api/v1.0'
+}
+
+
+EXP_LINKS = {
+ 'v2.0': {
+ 'html': 'http://docs.openstack.org/',
+ },
+ 'v2.1': {
+ 'html': 'http://docs.openstack.org/'
+ },
+}
+
+
+EXP_VERSIONS = {
+ "v2.0": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml;version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json;version=2",
+ },
+ ],
+ },
+ "v2.1": {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "updated": "2013-07-23T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2.1/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.1']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ }
+}
+
+
+class VersionsTestV20(test.NoDBTestCase):
+
+ def test_get_version_list(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ versions = jsonutils.loads(res.body)["versions"]
+ expected = [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ }],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "updated": "2013-07-23T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ }],
+ },
+ ]
+ self.assertEqual(versions, expected)
+
+ def test_get_version_list_302(self):
+ req = webob.Request.blank('/v2')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 302)
+ redirect_req = webob.Request.blank('/v2/')
+ self.assertEqual(res.location, redirect_req.url)
+
+ def _test_get_version_2_detail(self, url, accept=None):
+ if accept is None:
+ accept = "application/json"
+ req = webob.Request.blank(url)
+ req.accept = accept
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {
+ "version": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/"
+ "vnd.openstack.compute+xml;version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/"
+ "vnd.openstack.compute+json;version=2",
+ },
+ ],
+ },
+ }
+ self.assertEqual(expected, version)
+
+ def test_get_version_2_detail(self):
+ self._test_get_version_2_detail('/v2/')
+
+ def test_get_version_2_detail_content_type(self):
+ accept = "application/json;version=2"
+ self._test_get_version_2_detail('/', accept=accept)
+
+ def test_get_version_2_versions_invalid(self):
+ req = webob.Request.blank('/v2/versions/1234')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
+ def test_get_version_2_detail_xml(self):
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ version = etree.XML(res.body)
+ xmlutil.validate_schema(version, 'version')
+
+ expected = EXP_VERSIONS['v2.0']
+ self.assertTrue(version.xpath('/ns:version', namespaces=NS))
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.compare_media_types(media_types,
+ expected['media-types']))
+ for key in ['id', 'status', 'updated']:
+ self.assertEqual(version.get(key), expected[key])
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/'}]
+ + expected['links']))
+
+ def test_get_version_list_xml(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ root = etree.XML(res.body)
+ xmlutil.validate_schema(root, 'versions')
+
+ self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
+ versions = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(versions), 2)
+
+ for i, v in enumerate(['v2.0', 'v2.1']):
+ version = versions[i]
+ expected = EXP_VERSIONS[v]
+ for key in ['id', 'status', 'updated']:
+ self.assertEqual(version.get(key), expected[key])
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link,
+ [{'rel': 'self', 'href': 'http://localhost/%s/' % v}]))
+
+ def test_get_version_2_detail_atom(self):
+ req = webob.Request.blank('/v2/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual("application/atom+xml", res.content_type)
+
+ xmlutil.validate_schema(etree.XML(res.body), 'atom')
+
+ f = feedparser.parse(res.body)
+ self.assertEqual(f.feed.title, 'About This Version')
+ self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/v2/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 2)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+ self.assertEqual(entry.links[1], {
+ 'href': EXP_LINKS['v2.0']['html'],
+ 'type': 'text/html',
+ 'rel': 'describedby'})
+
+ def test_get_version_list_atom(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/atom+xml")
+
+ f = feedparser.parse(res.body)
+ self.assertEqual(f.feed.title, 'Available API Versions')
+ self.assertEqual(f.feed.updated, '2013-07-23T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 2)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ entry = f.entries[1]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.1')
+ self.assertEqual(entry.updated, '2013-07-23T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.1 EXPERIMENTAL (2013-07-23T11:33:21Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ def test_multi_choice_image(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v2/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2"
+ },
+ ],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "links": [
+ {
+ "href": "http://localhost/v2/images/1",
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type":
+ "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ },
+ ], }
+
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
+
+ def test_multi_choice_image_xml(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/xml")
+
+ root = etree.XML(res.body)
+ self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
+ versions = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(versions), 2)
+
+ version = versions[0]
+ self.assertEqual(version.get('id'), 'v2.0')
+ self.assertEqual(version.get('status'), 'CURRENT')
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.0']['media-types']
+ ))
+
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
+
+ version = versions[1]
+ self.assertEqual(version.get('id'), 'v2.1')
+ self.assertEqual(version.get('status'), 'EXPERIMENTAL')
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.
+ compare_media_types(media_types,
+ EXP_VERSIONS['v2.1']['media-types']
+ ))
+
+ links = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(links,
+ [{'rel': 'self', 'href': 'http://localhost/v2/images/1'}]))
+
+ def test_multi_choice_server_atom(self):
+ """Make sure multi choice responses do not have content-type
+ application/atom+xml (should use default of json)
+ """
+ req = webob.Request.blank('/servers')
+ req.accept = "application/atom+xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ def test_multi_choice_server(self):
+ uuid = str(stdlib_uuid.uuid4())
+ req = webob.Request.blank('/servers/' + uuid)
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 300)
+ self.assertEqual(res.content_type, "application/json")
+
+ expected = {
+ "choices": [
+ {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "links": [
+ {
+ "href": "http://localhost/v2/servers/" + uuid,
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2"
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2"
+ },
+ ],
+ },
+ {
+ "id": "v2.1",
+ "status": "EXPERIMENTAL",
+ "links": [
+ {
+ "href": "http://localhost/v2/servers/" + uuid,
+ "rel": "self",
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/json",
+ "type":
+ "application/vnd.openstack.compute+json;version=2.1",
+ }
+ ],
+ },
+ ], }
+
+ self.assertThat(jsonutils.loads(res.body),
+ matchers.DictMatches(expected))
+
+
+class VersionsViewBuilderTests(test.NoDBTestCase):
+ def test_view_builder(self):
+ base_url = "http://example.org/"
+
+ version_data = {
+ "v3.2.1": {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ }
+ }
+
+ expected = {
+ "versions": [
+ {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "updated": "2011-07-18T11:30:00Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://example.org/v2/",
+ },
+ ],
+ }
+ ]
+ }
+
+ builder = views.versions.ViewBuilder(base_url)
+ output = builder.build_versions(version_data)
+
+ self.assertEqual(output, expected)
+
+ def test_generate_href(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('v2')
+
+ self.assertEqual(actual, expected)
+
+ def test_generate_href_v21(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('v2.1')
+
+ self.assertEqual(actual, expected)
+
+ def test_generate_href_unknown(self):
+ base_url = "http://example.org/app/"
+
+ expected = "http://example.org/app/v2/"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href('foo')
+
+ self.assertEqual(actual, expected)
+
+
+class VersionsSerializerTests(test.NoDBTestCase):
+ def test_versions_list_xml_serializer(self):
+ versions_data = {
+ 'versions': [
+ {
+ "id": "2.7",
+ "updated": "2011-07-18T11:30:00Z",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/v2",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.VersionsTemplate()
+ response = serializer.serialize(versions_data)
+
+ root = etree.XML(response)
+ xmlutil.validate_schema(root, 'versions')
+
+ self.assertTrue(root.xpath('/ns:versions', namespaces=NS))
+ version_elems = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(len(version_elems), 1)
+ version = version_elems[0]
+ self.assertEqual(version.get('id'), versions_data['versions'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['versions'][0]['status'])
+
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link, [{
+ 'rel': 'self',
+ 'href': 'http://test/v2',
+ 'type': 'application/atom+xml'}]))
+
+ def test_versions_multi_xml_serializer(self):
+ versions_data = {
+ 'choices': [
+ {
+ "id": "2.7",
+ "updated": "2011-07-18T11:30:00Z",
+ "status": "DEPRECATED",
+ "media-types": EXP_VERSIONS['v2.0']['media-types'],
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/v2/images",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.ChoicesTemplate()
+ response = serializer.serialize(versions_data)
+
+ root = etree.XML(response)
+ self.assertTrue(root.xpath('/ns:choices', namespaces=NS))
+ (version,) = root.xpath('ns:version', namespaces=NS)
+ self.assertEqual(version.get('id'), versions_data['choices'][0]['id'])
+ self.assertEqual(version.get('status'),
+ versions_data['choices'][0]['status'])
+
+ media_types = list(version)[0]
+ self.assertEqual(media_types.tag.split('}')[1], "media-types")
+
+ media_types = version.xpath('ns:media-types/ns:media-type',
+ namespaces=NS)
+ self.assertTrue(common.compare_media_types(media_types,
+ versions_data['choices'][0]['media-types']))
+
+ (link,) = version.xpath('atom:link', namespaces=NS)
+ self.assertTrue(common.compare_links(link,
+ versions_data['choices'][0]['links']))
+
+ def test_versions_list_atom_serializer(self):
+ versions_data = {
+ 'versions': [
+ {
+ "id": "2.9.8",
+ "updated": "2011-07-20T11:40:00Z",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://test/2.9.8",
+ },
+ ],
+ },
+ ]
+ }
+
+ serializer = versions.VersionsAtomSerializer()
+ response = serializer.serialize(versions_data)
+ f = feedparser.parse(response)
+
+ self.assertEqual(f.feed.title, 'Available API Versions')
+ self.assertEqual(f.feed.updated, '2011-07-20T11:40:00Z')
+ self.assertEqual(f.feed.id, 'http://test/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://test/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://test/2.9.8')
+ self.assertEqual(entry.title, 'Version 2.9.8')
+ self.assertEqual(entry.updated, '2011-07-20T11:40:00Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version 2.9.8 CURRENT (2011-07-20T11:40:00Z)')
+ self.assertEqual(len(entry.links), 1)
+ self.assertEqual(entry.links[0]['href'], 'http://test/2.9.8')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+
+ def test_version_detail_atom_serializer(self):
+ versions_data = {
+ "version": {
+ "id": "v2.0",
+ "status": "CURRENT",
+ "updated": "2011-01-21T11:33:21Z",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v2/",
+ },
+ {
+ "rel": "describedby",
+ "type": "text/html",
+ "href": EXP_LINKS['v2.0']['html'],
+ },
+ ],
+ "media-types": [
+ {
+ "base": "application/xml",
+ "type": "application/vnd.openstack.compute+xml"
+ ";version=2",
+ },
+ {
+ "base": "application/json",
+ "type": "application/vnd.openstack.compute+json"
+ ";version=2",
+ }
+ ],
+ },
+ }
+
+ serializer = versions.VersionAtomSerializer()
+ response = serializer.serialize(versions_data)
+ f = feedparser.parse(response)
+
+ self.assertEqual(f.feed.title, 'About This Version')
+ self.assertEqual(f.feed.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(f.feed.id, 'http://localhost/v2/')
+ self.assertEqual(f.feed.author, 'Rackspace')
+ self.assertEqual(f.feed.author_detail.href,
+ 'http://www.rackspace.com/')
+ self.assertEqual(f.feed.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(f.feed.links[0]['rel'], 'self')
+
+ self.assertEqual(len(f.entries), 1)
+ entry = f.entries[0]
+ self.assertEqual(entry.id, 'http://localhost/v2/')
+ self.assertEqual(entry.title, 'Version v2.0')
+ self.assertEqual(entry.updated, '2011-01-21T11:33:21Z')
+ self.assertEqual(len(entry.content), 1)
+ self.assertEqual(entry.content[0].value,
+ 'Version v2.0 CURRENT (2011-01-21T11:33:21Z)')
+ self.assertEqual(len(entry.links), 2)
+ self.assertEqual(entry.links[0]['href'], 'http://localhost/v2/')
+ self.assertEqual(entry.links[0]['rel'], 'self')
+ self.assertEqual(entry.links[1], {
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': EXP_LINKS['v2.0']['html']})
+
+ def test_multi_choice_image_with_body(self):
+ req = webob.Request.blank('/images/1')
+ req.accept = "application/json"
+ req.method = 'POST'
+ req.content_type = "application/json"
+ req.body = "{\"foo\": \"bar\"}"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(300, res.status_int)
+ self.assertEqual("application/json", res.content_type)
+
+ def test_get_version_list_with_body(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ req.method = 'POST'
+ req.content_type = "application/json"
+ req.body = "{\"foo\": \"bar\"}"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ self.assertEqual("application/json", res.content_type)
+
+
+# NOTE(oomichi): Now version API of v2.0 covers "/"(root).
+# So this class tests "/v2.1" only for v2.1 API.
+class VersionsTestV21(test.NoDBTestCase):
+ exp_versions = copy.deepcopy(EXP_VERSIONS)
+ exp_versions['v2.0']['links'].insert(0,
+ {'href': 'http://localhost/v2.1/', 'rel': 'self'},
+ )
+
+ def test_get_version_list_302(self):
+ req = webob.Request.blank('/v2.1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 302)
+ redirect_req = webob.Request.blank('/v2.1/')
+ self.assertEqual(res.location, redirect_req.url)
+
+ def test_get_version_21_detail(self):
+ req = webob.Request.blank('/v2.1/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_v21_detail(self):
+ req = webob.Request.blank('/v2.1/fake/versions/v2.1')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_v20_detail(self):
+ req = webob.Request.blank('/v2.1/fake/versions/v2.0')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.0']}
+ self.assertEqual(expected, version)
+
+ def test_get_version_21_versions_invalid(self):
+ req = webob.Request.blank('/v2.1/versions/1234')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_version_21_detail_content_type(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json;version=2.1"
+ res = req.get_response(fakes.wsgi_app_v21())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ version = jsonutils.loads(res.body)
+ expected = {"version": self.exp_versions['v2.1']}
+ self.assertEqual(expected, version)
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
new file mode 100644
index 0000000000..34c072a634
--- /dev/null
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -0,0 +1,662 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import uuid
+
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import routes
+import six
+import webob
+import webob.dec
+import webob.request
+
+from nova.api import auth as api_auth
+from nova.api import openstack as openstack_api
+from nova.api.openstack import auth
+from nova.api.openstack import compute
+from nova.api.openstack.compute import limits
+from nova.api.openstack.compute import versions
+from nova.api.openstack import urlmap
+from nova.api.openstack import wsgi as os_wsgi
+from nova.compute import api as compute_api
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova.db.sqlalchemy import models
+from nova import exception as exc
+import nova.netconf
+from nova.network import api as network_api
+from nova import quota
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_network
+from nova.tests.unit.objects import test_keypair
+from nova import utils
+from nova import wsgi
+
+
+QUOTAS = quota.QUOTAS
+
+
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+FAKE_UUIDS = {}
+
+
+class Context(object):
+ pass
+
+
+class FakeRouter(wsgi.Router):
+ def __init__(self, ext_mgr=None):
+ pass
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ res = webob.Response()
+ res.status = '200'
+ res.headers['X-Test-Success'] = 'True'
+ return res
+
+
+@webob.dec.wsgify
+def fake_wsgi(self, req):
+ return self.application
+
+
+def wsgi_app(inner_app_v2=None, fake_auth_context=None,
+ use_no_auth=False, ext_mgr=None, init_only=None):
+ if not inner_app_v2:
+ inner_app_v2 = compute.APIRouter(ext_mgr, init_only)
+
+ if use_no_auth:
+ api_v2 = openstack_api.FaultWrapper(auth.NoAuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app_v2)))
+ else:
+ if fake_auth_context is not None:
+ ctxt = fake_auth_context
+ else:
+ ctxt = context.RequestContext('fake', 'fake', auth_token=True)
+ api_v2 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(inner_app_v2)))
+
+ mapper = urlmap.URLMap()
+ mapper['/v2'] = api_v2
+ mapper['/v1.1'] = api_v2
+ mapper['/'] = openstack_api.FaultWrapper(versions.Versions())
+ return mapper
+
+
+def wsgi_app_v21(inner_app_v21=None, fake_auth_context=None,
+ use_no_auth=False, ext_mgr=None, init_only=None):
+ if not inner_app_v21:
+ inner_app_v21 = compute.APIRouterV21(init_only)
+
+ if use_no_auth:
+ api_v21 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
+ limits.RateLimitingMiddleware(inner_app_v21)))
+ else:
+ if fake_auth_context is not None:
+ ctxt = fake_auth_context
+ else:
+ ctxt = context.RequestContext('fake', 'fake', auth_token=True)
+ api_v21 = openstack_api.FaultWrapper(api_auth.InjectContext(ctxt,
+ limits.RateLimitingMiddleware(inner_app_v21)))
+
+ mapper = urlmap.URLMap()
+ mapper['/v2'] = api_v21
+ mapper['/v2.1'] = api_v21
+ return mapper
+
+
+def stub_out_key_pair_funcs(stubs, have_key_pair=True):
+ def key_pair(context, user_id):
+ return [dict(test_keypair.fake_keypair,
+ name='key', public_key='public_key')]
+
+ def one_key_pair(context, user_id, name):
+ if name == 'key':
+ return dict(test_keypair.fake_keypair,
+ name='key', public_key='public_key')
+ else:
+ raise exc.KeypairNotFound(user_id=user_id, name=name)
+
+ def no_key_pair(context, user_id):
+ return []
+
+ if have_key_pair:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ stubs.Set(nova.db, 'key_pair_get', one_key_pair)
+ else:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
+
+
+def stub_out_rate_limiting(stubs):
+ def fake_rate_init(self, app):
+ super(limits.RateLimitingMiddleware, self).__init__(app)
+ self.application = app
+
+ stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
+ '__init__', fake_rate_init)
+
+ stubs.Set(nova.api.openstack.compute.limits.RateLimitingMiddleware,
+ '__call__', fake_wsgi)
+
+
+def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
+ def fake_reserve(context, **deltas):
+ requested = deltas.pop(resource, 0)
+ if requested > allowed:
+ quotas = dict(instances=1, cores=1, ram=1)
+ quotas[resource] = quota
+ usages = dict(instances=dict(in_use=0, reserved=0),
+ cores=dict(in_use=0, reserved=0),
+ ram=dict(in_use=0, reserved=0))
+ usages[resource]['in_use'] = (quotas[resource] * 0.9 -
+ allowed)
+ usages[resource]['reserved'] = quotas[resource] * 0.1
+ headroom = dict(
+ (res, value - (usages[res]['in_use'] + usages[res]['reserved']))
+ for res, value in quotas.iteritems()
+ )
+ raise exc.OverQuota(overs=[resource], quotas=quotas,
+ usages=usages, headroom=headroom)
+ stubs.Set(QUOTAS, 'reserve', fake_reserve)
+
+
+def stub_out_networking(stubs):
+ def get_my_ip():
+ return '127.0.0.1'
+ stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
+
+
+def stub_out_compute_api_snapshot(stubs):
+
+ def snapshot(self, context, instance, name, extra_properties=None):
+ # emulate glance rejecting image names which are too long
+ if len(name) > 256:
+ raise exc.Invalid
+ return dict(id='123', status='ACTIVE', name=name,
+ properties=extra_properties)
+
+ stubs.Set(compute_api.API, 'snapshot', snapshot)
+
+
+class stub_out_compute_api_backup(object):
+
+ def __init__(self, stubs):
+ self.stubs = stubs
+ self.extra_props_last_call = None
+ stubs.Set(compute_api.API, 'backup', self.backup)
+
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None):
+ self.extra_props_last_call = extra_properties
+ props = dict(backup_type=backup_type,
+ rotation=rotation)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
+
+
+def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
+
+
+def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
+ def get_floating_ips_by_fixed_address(self, context, fixed_ip):
+ return ['1.2.3.4']
+
+ if func is None:
+ func = get_floating_ips_by_fixed_address
+ stubs.Set(network_api.API, 'get_floating_ips_by_fixed_address', func)
+
+
+def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
+ if not private:
+ private = '192.168.0.3'
+ if not publics:
+ publics = ['1.2.3.4']
+
+ class Fake:
+ def get_instance_nw_info(*args, **kwargs):
+ pass
+
+ def get_floating_ips_by_fixed_address(*args, **kwargs):
+ return publics
+
+ def validate_networks(self, context, networks, max_count):
+ return max_count
+
+ def create_pci_requests_for_sriov_ports(self, context,
+ system_metadata,
+ requested_networks):
+ pass
+
+ if cls is None:
+ cls = Fake
+ stubs.Set(network_api, 'API', cls)
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs)
+
+
+class FakeToken(object):
+ id_count = 0
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __init__(self, **kwargs):
+ FakeToken.id_count += 1
+ self.id = FakeToken.id_count
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+
+
+class FakeRequestContext(context.RequestContext):
+ def __init__(self, *args, **kwargs):
+ kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token')
+ return super(FakeRequestContext, self).__init__(*args, **kwargs)
+
+
+class HTTPRequest(os_wsgi.Request):
+
+ @staticmethod
+ def blank(*args, **kwargs):
+ kwargs['base_url'] = 'http://localhost/v2'
+ use_admin_context = kwargs.pop('use_admin_context', False)
+ out = os_wsgi.Request.blank(*args, **kwargs)
+ out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
+ is_admin=use_admin_context)
+ return out
+
+
+class HTTPRequestV3(os_wsgi.Request):
+
+ @staticmethod
+ def blank(*args, **kwargs):
+ kwargs['base_url'] = 'http://localhost/v3'
+ use_admin_context = kwargs.pop('use_admin_context', False)
+ out = os_wsgi.Request.blank(*args, **kwargs)
+ out.environ['nova.context'] = FakeRequestContext('fake_user', 'fake',
+ is_admin=use_admin_context)
+ return out
+
+
+class TestRouter(wsgi.Router):
+ def __init__(self, controller, mapper=None):
+ if not mapper:
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests",
+ controller=os_wsgi.Resource(controller))
+ super(TestRouter, self).__init__(mapper)
+
+
+class FakeAuthDatabase(object):
+ data = {}
+
+ @staticmethod
+ def auth_token_get(context, token_hash):
+ return FakeAuthDatabase.data.get(token_hash, None)
+
+ @staticmethod
+ def auth_token_create(context, token):
+ fake_token = FakeToken(created_at=timeutils.utcnow(), **token)
+ FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
+ return fake_token
+
+ @staticmethod
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
+
+
+class FakeRateLimiter(object):
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ return self.application
+
+
+def create_info_cache(nw_cache):
+ if nw_cache is None:
+ pub0 = ('192.168.1.100',)
+ pub1 = ('2001:db8:0:1::1',)
+
+ def _ip(ip):
+ return {'address': ip, 'type': 'fixed'}
+
+ nw_cache = [
+ {'address': 'aa:aa:aa:aa:aa:aa',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'test1',
+ 'subnets': [{'cidr': '192.168.1.0/24',
+ 'ips': [_ip(ip) for ip in pub0]},
+ {'cidr': 'b33f::/64',
+ 'ips': [_ip(ip) for ip in pub1]}]}}]
+
+ if not isinstance(nw_cache, six.string_types):
+ nw_cache = jsonutils.dumps(nw_cache)
+
+ return {
+ "info_cache": {
+ "network_info": nw_cache,
+ "deleted": False,
+ "created_at": None,
+ "deleted_at": None,
+ "updated_at": None,
+ }
+ }
+
+
+def get_fake_uuid(token=0):
+ if token not in FAKE_UUIDS:
+ FAKE_UUIDS[token] = str(uuid.uuid4())
+ return FAKE_UUIDS[token]
+
+
+def fake_instance_get(**kwargs):
+ def _return_server(context, uuid, columns_to_join=None, use_slave=False):
+ return stub_instance(1, **kwargs)
+ return _return_server
+
+
+def fake_actions_to_locked_server(self, context, instance, *args, **kwargs):
+ raise exc.InstanceIsLocked(instance_uuid=instance['uuid'])
+
+
+def fake_instance_get_all_by_filters(num_servers=5, **kwargs):
+ def _return_servers(context, *args, **kwargs):
+ servers_list = []
+ marker = None
+ limit = None
+ found_marker = False
+ if "marker" in kwargs:
+ marker = kwargs["marker"]
+ if "limit" in kwargs:
+ limit = kwargs["limit"]
+
+ if 'columns_to_join' in kwargs:
+ kwargs.pop('columns_to_join')
+
+ if 'use_slave' in kwargs:
+ kwargs.pop('use_slave')
+
+ for i in xrange(num_servers):
+ uuid = get_fake_uuid(i)
+ server = stub_instance(id=i + 1, uuid=uuid,
+ **kwargs)
+ servers_list.append(server)
+ if marker is not None and uuid == marker:
+ found_marker = True
+ servers_list = []
+ if marker is not None and not found_marker:
+ raise exc.MarkerNotFound(marker=marker)
+ if limit is not None:
+ servers_list = servers_list[:limit]
+ return servers_list
+ return _return_servers
+
+
+def stub_instance(id, user_id=None, project_id=None, host=None,
+ node=None, vm_state=None, task_state=None,
+ reservation_id="", uuid=FAKE_UUID, image_ref="10",
+ flavor_id="1", name=None, key_name='',
+ access_ipv4=None, access_ipv6=None, progress=0,
+ auto_disk_config=False, display_name=None,
+ include_fake_metadata=True, config_drive=None,
+ power_state=None, nw_cache=None, metadata=None,
+ security_groups=None, root_device_name=None,
+ limit=None, marker=None,
+ launched_at=timeutils.utcnow(),
+ terminated_at=timeutils.utcnow(),
+ availability_zone='', locked_by=None, cleaned=False,
+ memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0):
+ if user_id is None:
+ user_id = 'fake_user'
+ if project_id is None:
+ project_id = 'fake_project'
+
+ if metadata:
+ metadata = [{'key': k, 'value': v} for k, v in metadata.items()]
+ elif include_fake_metadata:
+ metadata = [models.InstanceMetadata(key='seq', value=str(id))]
+ else:
+ metadata = []
+
+ inst_type = flavors.get_flavor_by_flavor_id(int(flavor_id))
+ sys_meta = flavors.save_flavor_info({}, inst_type)
+
+ if host is not None:
+ host = str(host)
+
+ if key_name:
+ key_data = 'FAKE'
+ else:
+ key_data = ''
+
+ if security_groups is None:
+ security_groups = [{"id": 1, "name": "test", "description": "Foo:",
+ "project_id": "project", "user_id": "user",
+ "created_at": None, "updated_at": None,
+ "deleted_at": None, "deleted": False}]
+
+ # ReservationID isn't sent back, hack it in there.
+ server_name = name or "server%s" % id
+ if reservation_id != "":
+ server_name = "reservation_%s" % (reservation_id, )
+
+ info_cache = create_info_cache(nw_cache)
+
+ instance = {
+ "id": int(id),
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "deleted_at": datetime.datetime(2010, 12, 12, 10, 0, 0),
+ "deleted": None,
+ "user_id": user_id,
+ "project_id": project_id,
+ "image_ref": image_ref,
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": key_name,
+ "key_data": key_data,
+ "config_drive": config_drive,
+ "vm_state": vm_state or vm_states.BUILDING,
+ "task_state": task_state,
+ "power_state": power_state,
+ "memory_mb": memory_mb,
+ "vcpus": vcpus,
+ "root_gb": root_gb,
+ "ephemeral_gb": ephemeral_gb,
+ "ephemeral_key_uuid": None,
+ "hostname": display_name or server_name,
+ "host": host,
+ "node": node,
+ "instance_type_id": 1,
+ "instance_type": inst_type,
+ "user_data": "",
+ "reservation_id": reservation_id,
+ "mac_address": "",
+ "scheduled_at": timeutils.utcnow(),
+ "launched_at": launched_at,
+ "terminated_at": terminated_at,
+ "availability_zone": availability_zone,
+ "display_name": display_name or server_name,
+ "display_description": "",
+ "locked": locked_by is not None,
+ "locked_by": locked_by,
+ "metadata": metadata,
+ "access_ip_v4": access_ipv4,
+ "access_ip_v6": access_ipv6,
+ "uuid": uuid,
+ "progress": progress,
+ "auto_disk_config": auto_disk_config,
+ "name": "instance-%s" % id,
+ "shutdown_terminate": True,
+ "disable_terminate": False,
+ "security_groups": security_groups,
+ "root_device_name": root_device_name,
+ "system_metadata": utils.dict_to_metadata(sys_meta),
+ "pci_devices": [],
+ "vm_mode": "",
+ "default_swap_device": "",
+ "default_ephemeral_device": "",
+ "launched_on": "",
+ "cell_name": "",
+ "architecture": "",
+ "os_type": "",
+ "cleaned": cleaned}
+
+ instance.update(info_cache)
+ instance['info_cache']['instance_uuid'] = instance['uuid']
+
+ return instance
+
+
+def stub_volume(id, **kwargs):
+ volume = {
+ 'id': id,
+ 'user_id': 'fakeuser',
+ 'project_id': 'fakeproject',
+ 'host': 'fakehost',
+ 'size': 1,
+ 'availability_zone': 'fakeaz',
+ 'instance_uuid': 'fakeuuid',
+ 'mountpoint': '/',
+ 'status': 'fakestatus',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': 'displayname',
+ 'display_description': 'displaydesc',
+ 'created_at': datetime.datetime(1999, 1, 1, 1, 1, 1),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'vol_type_name'}}
+
+ volume.update(kwargs)
+ return volume
+
+
+def stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ vol = stub_volume('1')
+ vol['size'] = size
+ vol['display_name'] = name
+ vol['display_description'] = description
+ try:
+ vol['snapshot_id'] = snapshot['id']
+ except (KeyError, TypeError):
+ vol['snapshot_id'] = None
+ vol['availability_zone'] = param.get('availability_zone', 'fakeaz')
+ return vol
+
+
+def stub_volume_update(self, context, *args, **param):
+ pass
+
+
+def stub_volume_delete(self, context, *args, **param):
+ pass
+
+
+def stub_volume_get(self, context, volume_id):
+ return stub_volume(volume_id)
+
+
+def stub_volume_notfound(self, context, volume_id):
+ raise exc.VolumeNotFound(volume_id=volume_id)
+
+
+def stub_volume_get_all(context, search_opts=None):
+ return [stub_volume(100, project_id='fake'),
+ stub_volume(101, project_id='superfake'),
+ stub_volume(102, project_id='superduperfake')]
+
+
+def stub_volume_check_attach(self, context, *args, **param):
+ pass
+
+
+def stub_snapshot(id, **kwargs):
+ snapshot = {
+ 'id': id,
+ 'volume_id': 12,
+ 'status': 'available',
+ 'volume_size': 100,
+ 'created_at': timeutils.utcnow(),
+ 'display_name': 'Default name',
+ 'display_description': 'Default description',
+ 'project_id': 'fake'
+ }
+
+ snapshot.update(kwargs)
+ return snapshot
+
+
+def stub_snapshot_create(self, context, volume_id, name, description):
+ return stub_snapshot(100, volume_id=volume_id, display_name=name,
+ display_description=description)
+
+
+def stub_compute_volume_snapshot_create(self, context, volume_id, create_info):
+ return {'snapshot': {'id': 100, 'volumeId': volume_id}}
+
+
+def stub_snapshot_delete(self, context, snapshot_id):
+ if snapshot_id == '-1':
+ raise exc.NotFound
+
+
+def stub_compute_volume_snapshot_delete(self, context, volume_id, snapshot_id,
+ delete_info):
+ pass
+
+
+def stub_snapshot_get(self, context, snapshot_id):
+ if snapshot_id == '-1':
+ raise exc.NotFound
+ return stub_snapshot(snapshot_id)
+
+
+def stub_snapshot_get_all(self, context):
+ return [stub_snapshot(100, project_id='fake'),
+ stub_snapshot(101, project_id='superfake'),
+ stub_snapshot(102, project_id='superduperfake')]
+
+
+def stub_bdm_get_all_by_instance(context, instance_uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'volume_id1', 'instance_uuid': instance_uuid}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'volume_id2', 'instance_uuid': instance_uuid})]
+
+
+def fake_get_available_languages():
+ existing_translations = ['en_GB', 'en_AU', 'de', 'zh_CN', 'en_US']
+ return existing_translations
+
+
+def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
new file mode 100644
index 0000000000..a61f70cf95
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -0,0 +1,764 @@
+# Copyright 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suites for 'common' code used throughout the OpenStack HTTP API.
+"""
+
+import xml.dom.minidom as minidom
+
+from lxml import etree
+import mock
+import six
+from testtools import matchers
+import webob
+import webob.exc
+import webob.multidict
+
+from nova.api.openstack import common
+from nova.api.openstack import xmlutil
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import exception
+from nova import test
+from nova.tests.unit import utils
+
+
+NS = "{http://docs.openstack.org/compute/api/v1.1}"
+ATOMNS = "{http://www.w3.org/2005/Atom}"
+
+
+class LimiterTest(test.TestCase):
+ """Unit tests for the `nova.api.openstack.common.limited` method which
+ takes in a list of items and, depending on the 'offset' and 'limit' GET
+ params, returns a subset or complete set of the given items.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ super(LimiterTest, self).setUp()
+ self.tiny = range(1)
+ self.small = range(10)
+ self.medium = range(1000)
+ self.large = range(10000)
+
+ def test_limiter_offset_zero(self):
+ # Test offset key works with 0.
+ req = webob.Request.blank('/?offset=0')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_offset_medium(self):
+ # Test offset key works with a medium sized number.
+ req = webob.Request.blank('/?offset=10')
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), self.small[10:])
+ self.assertEqual(common.limited(self.medium, req), self.medium[10:])
+ self.assertEqual(common.limited(self.large, req), self.large[10:1010])
+
+ def test_limiter_offset_over_max(self):
+ # Test offset key works with a number over 1000 (max_limit).
+ req = webob.Request.blank('/?offset=1001')
+ self.assertEqual(common.limited(self.tiny, req), [])
+ self.assertEqual(common.limited(self.small, req), [])
+ self.assertEqual(common.limited(self.medium, req), [])
+ self.assertEqual(
+ common.limited(self.large, req), self.large[1001:2001])
+
+ def test_limiter_offset_blank(self):
+ # Test offset key works with a blank offset.
+ req = webob.Request.blank('/?offset=')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_offset_bad(self):
+ # Test offset key works with a BAD offset.
+ req = webob.Request.blank(u'/?offset=\u0020aa')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_nothing(self):
+ # Test request with no offset or limit.
+ req = webob.Request.blank('/')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_zero(self):
+ # Test limit of zero.
+ req = webob.Request.blank('/?limit=0')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_medium(self):
+ # Test limit of 10.
+ req = webob.Request.blank('/?limit=10')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium[:10])
+ self.assertEqual(common.limited(self.large, req), self.large[:10])
+
+ def test_limiter_limit_over_max(self):
+ # Test limit of 3000.
+ req = webob.Request.blank('/?limit=3000')
+ self.assertEqual(common.limited(self.tiny, req), self.tiny)
+ self.assertEqual(common.limited(self.small, req), self.small)
+ self.assertEqual(common.limited(self.medium, req), self.medium)
+ self.assertEqual(common.limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_and_offset(self):
+ # Test request with both limit and offset.
+ items = range(2000)
+ req = webob.Request.blank('/?offset=1&limit=3')
+ self.assertEqual(common.limited(items, req), items[1:4])
+ req = webob.Request.blank('/?offset=3&limit=0')
+ self.assertEqual(common.limited(items, req), items[3:1003])
+ req = webob.Request.blank('/?offset=3&limit=1500')
+ self.assertEqual(common.limited(items, req), items[3:1003])
+ req = webob.Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(common.limited(items, req), [])
+
+ def test_limiter_custom_max_limit(self):
+ # Test a max_limit other than 1000.
+ items = range(2000)
+ req = webob.Request.blank('/?offset=1&limit=3')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[1:4])
+ req = webob.Request.blank('/?offset=3&limit=0')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
+ req = webob.Request.blank('/?offset=3&limit=2500')
+ self.assertEqual(
+ common.limited(items, req, max_limit=2000), items[3:])
+ req = webob.Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(common.limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ # Test a negative limit.
+ req = webob.Request.blank('/?limit=-3000')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+ def test_limiter_negative_offset(self):
+ # Test a negative offset.
+ req = webob.Request.blank('/?offset=-30')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
+
+
+class SortParamUtilsTest(test.TestCase):
+
+ def test_get_sort_params_defaults(self):
+ '''Verifies the default sort key and direction.'''
+ sort_keys, sort_dirs = common.get_sort_params({})
+ self.assertEqual(['created_at'], sort_keys)
+ self.assertEqual(['desc'], sort_dirs)
+
+ def test_get_sort_params_override_defaults(self):
+ '''Verifies that the defaults can be overriden.'''
+ sort_keys, sort_dirs = common.get_sort_params({}, default_key='key1',
+ default_dir='dir1')
+ self.assertEqual(['key1'], sort_keys)
+ self.assertEqual(['dir1'], sort_dirs)
+
+ sort_keys, sort_dirs = common.get_sort_params({}, default_key=None,
+ default_dir=None)
+ self.assertEqual([], sort_keys)
+ self.assertEqual([], sort_dirs)
+
+ def test_get_sort_params_single_value(self):
+ '''Verifies a single sort key and direction.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ params.add('sort_dir', 'dir1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1'], sort_keys)
+ self.assertEqual(['dir1'], sort_dirs)
+
+ def test_get_sort_params_single_with_default(self):
+ '''Verifies a single sort value with a default.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1'], sort_keys)
+ # sort_key was supplied, sort_dir should be defaulted
+ self.assertEqual(['desc'], sort_dirs)
+
+ params = webob.multidict.MultiDict()
+ params.add('sort_dir', 'dir1')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['created_at'], sort_keys)
+ # sort_dir was supplied, sort_key should be defaulted
+ self.assertEqual(['dir1'], sort_dirs)
+
+ def test_get_sort_params_multiple_values(self):
+ '''Verifies multiple sort parameter values.'''
+ params = webob.multidict.MultiDict()
+ params.add('sort_key', 'key1')
+ params.add('sort_key', 'key2')
+ params.add('sort_key', 'key3')
+ params.add('sort_dir', 'dir1')
+ params.add('sort_dir', 'dir2')
+ params.add('sort_dir', 'dir3')
+ sort_keys, sort_dirs = common.get_sort_params(params)
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dirs)
+ # Also ensure that the input parameters are not modified
+ sort_key_vals = []
+ sort_dir_vals = []
+ while 'sort_key' in params:
+ sort_key_vals.append(params.pop('sort_key'))
+ while 'sort_dir' in params:
+ sort_dir_vals.append(params.pop('sort_dir'))
+ self.assertEqual(['key1', 'key2', 'key3'], sort_key_vals)
+ self.assertEqual(['dir1', 'dir2', 'dir3'], sort_dir_vals)
+ self.assertEqual(0, len(params))
+
+
+class PaginationParamsTest(test.TestCase):
+ """Unit tests for the `nova.api.openstack.common.get_pagination_params`
+ method which takes in a request object and returns 'marker' and 'limit'
+ GET params.
+ """
+
+ def test_no_params(self):
+ # Test no params.
+ req = webob.Request.blank('/')
+ self.assertEqual(common.get_pagination_params(req), {})
+
+ def test_valid_marker(self):
+ # Test valid marker param.
+ req = webob.Request.blank(
+ '/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
+
+ def test_valid_limit(self):
+ # Test valid limit param.
+ req = webob.Request.blank('/?limit=10')
+ self.assertEqual(common.get_pagination_params(req), {'limit': 10})
+
+ def test_invalid_limit(self):
+ # Test invalid limit param.
+ req = webob.Request.blank('/?limit=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_marker(self):
+ # Test valid limit and marker parameters.
+ marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
+ req = webob.Request.blank('/?limit=20&marker=%s' % marker)
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': marker, 'limit': 20})
+
+ def test_valid_page_size(self):
+ # Test valid page_size param.
+ req = webob.Request.blank('/?page_size=10')
+ self.assertEqual(common.get_pagination_params(req),
+ {'page_size': 10})
+
+ def test_invalid_page_size(self):
+ # Test invalid page_size param.
+ req = webob.Request.blank('/?page_size=-2')
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_page_size(self):
+ # Test valid limit and page_size parameters.
+ req = webob.Request.blank('/?limit=20&page_size=5')
+ self.assertEqual(common.get_pagination_params(req),
+ {'page_size': 5, 'limit': 20})
+
+
+class MiscFunctionsTest(test.TestCase):
+
+ def test_remove_major_version_from_href(self):
+ fixture = 'http://www.testsite.com/v1/images'
+ expected = 'http://www.testsite.com/images'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href(self):
+ fixture = 'http://www.testsite.com/v1.1/images'
+ expected = 'http://www.testsite.com/images'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_2(self):
+ fixture = 'http://www.testsite.com/v1.1/'
+ expected = 'http://www.testsite.com/'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_3(self):
+ fixture = 'http://www.testsite.com/v10.10'
+ expected = 'http://www.testsite.com'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_4(self):
+ fixture = 'http://www.testsite.com/v1.1/images/v10.5'
+ expected = 'http://www.testsite.com/images/v10.5'
+ actual = common.remove_version_from_href(fixture)
+ self.assertEqual(actual, expected)
+
+ def test_remove_version_from_href_bad_request(self):
+ fixture = 'http://www.testsite.com/1.1/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_2(self):
+ fixture = 'http://www.testsite.com/v/images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_remove_version_from_href_bad_request_3(self):
+ fixture = 'http://www.testsite.com/v1.1images'
+ self.assertRaises(ValueError,
+ common.remove_version_from_href,
+ fixture)
+
+ def test_get_id_from_href_with_int_url(self):
+ fixture = 'http://www.testsite.com/dir/45'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_int(self):
+ fixture = '45'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_int_url_query(self):
+ fixture = 'http://www.testsite.com/dir/45?asdf=jkl'
+ actual = common.get_id_from_href(fixture)
+ expected = '45'
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid_url(self):
+ fixture = 'http://www.testsite.com/dir/abc123'
+ actual = common.get_id_from_href(fixture)
+ expected = "abc123"
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid_url_query(self):
+ fixture = 'http://www.testsite.com/dir/abc123?asdf=jkl'
+ actual = common.get_id_from_href(fixture)
+ expected = "abc123"
+ self.assertEqual(actual, expected)
+
+ def test_get_id_from_href_with_uuid(self):
+ fixture = 'abc123'
+ actual = common.get_id_from_href(fixture)
+ expected = 'abc123'
+ self.assertEqual(actual, expected)
+
+ def test_raise_http_conflict_for_instance_invalid_state(self):
+ exc = exception.InstanceInvalidState(attr='fake_attr',
+ state='fake_state', method='fake_method',
+ instance_uuid='fake')
+ try:
+ common.raise_http_conflict_for_instance_invalid_state(exc,
+ 'meow', 'fake_server_id')
+ except webob.exc.HTTPConflict as e:
+ self.assertEqual(six.text_type(e),
+ "Cannot 'meow' instance fake_server_id while it is in "
+ "fake_attr fake_state")
+ else:
+ self.fail("webob.exc.HTTPConflict was not raised")
+
+ def test_check_img_metadata_properties_quota_valid_metadata(self):
+ ctxt = utils.get_test_admin_context()
+ metadata1 = {"key": "value"}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata1)
+ self.assertIsNone(actual)
+
+ metadata2 = {"key": "v" * 260}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata2)
+ self.assertIsNone(actual)
+
+ metadata3 = {"key": ""}
+ actual = common.check_img_metadata_properties_quota(ctxt, metadata3)
+ self.assertIsNone(actual)
+
+ def test_check_img_metadata_properties_quota_inv_metadata(self):
+ ctxt = utils.get_test_admin_context()
+ metadata1 = {"a" * 260: "value"}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata1)
+
+ metadata2 = {"": "value"}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata2)
+
+ metadata3 = "invalid metadata"
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ common.check_img_metadata_properties_quota, ctxt, metadata3)
+
+ metadata4 = None
+ self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
+ metadata4))
+ metadata5 = {}
+ self.assertIsNone(common.check_img_metadata_properties_quota(ctxt,
+ metadata5))
+
+ def test_status_from_state(self):
+ for vm_state in (vm_states.ACTIVE, vm_states.STOPPED):
+ for task_state in (task_states.RESIZE_PREP,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH):
+ actual = common.status_from_state(vm_state, task_state)
+ expected = 'RESIZE'
+ self.assertEqual(expected, actual)
+
+ def test_status_rebuild_from_state(self):
+ for vm_state in (vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.ERROR):
+ for task_state in (task_states.REBUILDING,
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ task_states.REBUILD_SPAWNING):
+ actual = common.status_from_state(vm_state, task_state)
+ expected = 'REBUILD'
+ self.assertEqual(expected, actual)
+
+ def test_task_and_vm_state_from_status(self):
+ fixture1 = ['reboot']
+ actual = common.task_and_vm_state_from_status(fixture1)
+ expected = [vm_states.ACTIVE], [task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING]
+ self.assertEqual(expected, actual)
+
+ fixture2 = ['resize']
+ actual = common.task_and_vm_state_from_status(fixture2)
+ expected = ([vm_states.ACTIVE, vm_states.STOPPED],
+ [task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP])
+ self.assertEqual(expected, actual)
+
+ fixture3 = ['resize', 'reboot']
+ actual = common.task_and_vm_state_from_status(fixture3)
+ expected = ([vm_states.ACTIVE, vm_states.STOPPED],
+ [task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED,
+ task_states.REBOOTING,
+ task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP])
+ self.assertEqual(expected, actual)
+
+
+class TestCollectionLinks(test.NoDBTestCase):
+ """Tests the _get_collection_links method."""
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_less_than_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict(limit=10))
+ type(req).params = params
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items, "ignored", "uuid")
+
+ self.assertFalse(href_link_mock.called)
+ self.assertThat(results, matchers.HasLength(0))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_given_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict(limit=1))
+ type(req).params = params
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_default_limit(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ params = mock.PropertyMock(return_value=dict())
+ type(req).params = params
+ self.flags(osapi_max_limit=1)
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+ @mock.patch('nova.api.openstack.common.ViewBuilder._get_next_link')
+ def test_items_equals_default_limit_with_given(self, href_link_mock):
+ items = [
+ {"uuid": "123"}
+ ]
+ req = mock.MagicMock()
+ # Given limit is greater than default max, only return default max
+ params = mock.PropertyMock(return_value=dict(limit=2))
+ type(req).params = params
+ self.flags(osapi_max_limit=1)
+
+ builder = common.ViewBuilder()
+ results = builder._get_collection_links(req, items,
+ mock.sentinel.coll_key,
+ "uuid")
+
+ href_link_mock.assert_called_once_with(req, "123",
+ mock.sentinel.coll_key)
+ self.assertThat(results, matchers.HasLength(1))
+
+
+class MetadataXMLDeserializationTest(test.TestCase):
+
+ deserializer = common.MetadataXMLDeserializer()
+
+ def test_create(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEqual(output, expected)
+
+ def test_create_empty(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"/>"""
+ output = self.deserializer.deserialize(request_body, 'create')
+ expected = {"body": {"metadata": {}}}
+ self.assertEqual(output, expected)
+
+ def test_update_all(self):
+ request_body = """
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key='123'>asdf</meta>
+ <meta key='567'>jkl;</meta>
+ </metadata>"""
+ output = self.deserializer.deserialize(request_body, 'update_all')
+ expected = {"body": {"metadata": {"123": "asdf", "567": "jkl;"}}}
+ self.assertEqual(output, expected)
+
+ def test_update(self):
+ request_body = """
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1"
+ key='123'>asdf</meta>"""
+ output = self.deserializer.deserialize(request_body, 'update')
+ expected = {"body": {"meta": {"123": "asdf"}}}
+ self.assertEqual(output, expected)
+
+
+class MetadataXMLSerializationTest(test.TestCase):
+
+ def test_xml_declaration(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+
+ output = serializer.serialize(fixture)
+ has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
+ self.assertTrue(has_dec)
+
+ def test_index(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_index_null(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ None: None,
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_index_unicode(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ u'three': u'Jos\xe9',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 1)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(metadata_elem.text.strip(), meta_value)
+
+ def test_show(self):
+ serializer = common.MetaItemTemplate()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ meta_dict = fixture['meta']
+ (meta_key, meta_value) = meta_dict.items()[0]
+ self.assertEqual(str(root.get('key')), str(meta_key))
+ self.assertEqual(root.text.strip(), meta_value)
+
+ def test_update_all(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'key6': 'value6',
+ 'key4': 'value4',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 2)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+
+ def test_update_item(self):
+ serializer = common.MetaItemTemplate()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ meta_dict = fixture['meta']
+ (meta_key, meta_value) = meta_dict.items()[0]
+ self.assertEqual(str(root.get('key')), str(meta_key))
+ self.assertEqual(root.text.strip(), meta_value)
+
+ def test_create(self):
+ serializer = common.MetadataTemplate()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.serialize(fixture)
+ root = etree.XML(output)
+ xmlutil.validate_schema(root, 'metadata')
+ metadata_dict = fixture['metadata']
+ metadata_elems = root.findall('{0}meta'.format(NS))
+ self.assertEqual(len(metadata_elems), 3)
+ for i, metadata_elem in enumerate(metadata_elems):
+ (meta_key, meta_value) = metadata_dict.items()[i]
+ self.assertEqual(str(metadata_elem.get('key')), str(meta_key))
+ self.assertEqual(str(metadata_elem.text).strip(), str(meta_value))
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">value2</meta>
+ <meta key="key9">value9</meta>
+ <meta key="key1">value1</meta>
+ </metadata>
+ """.replace(" ", "").replace("\n", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_metadata_deserializer(self):
+ """Should throw a 400 error on corrupt xml."""
+ deserializer = common.MetadataXMLDeserializer()
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ deserializer.deserialize,
+ utils.killer_xml_body())
+
+
+class LinkPrefixTest(test.NoDBTestCase):
+
+ def test_update_link_prefix(self):
+ vb = common.ViewBuilder()
+ result = vb._update_link_prefix("http://192.168.0.243:24/",
+ "http://127.0.0.1/compute")
+ self.assertEqual("http://127.0.0.1/compute", result)
+
+ result = vb._update_link_prefix("http://foo.x.com/v1",
+ "http://new.prefix.com")
+ self.assertEqual("http://new.prefix.com/v1", result)
+
+ result = vb._update_link_prefix(
+ "http://foo.x.com/v1",
+ "http://new.prefix.com:20455/new_extra_prefix")
+ self.assertEqual("http://new.prefix.com:20455/new_extra_prefix/v1",
+ result)
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/unit/api/openstack/test_faults.py
index b52a7e5896..b52a7e5896 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/unit/api/openstack/test_faults.py
diff --git a/nova/tests/unit/api/openstack/test_mapper.py b/nova/tests/unit/api/openstack/test_mapper.py
new file mode 100644
index 0000000000..b872be546f
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_mapper.py
@@ -0,0 +1,46 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+
+from nova.api import openstack as openstack_api
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class MapperTest(test.NoDBTestCase):
+ def test_resource_project_prefix(self):
+ class Controller(object):
+ def index(self, req):
+ return 'foo'
+
+ app = fakes.TestRouter(Controller(),
+ openstack_api.ProjectMapper())
+ req = webob.Request.blank('/1234/tests')
+ resp = req.get_response(app)
+ self.assertEqual(resp.body, 'foo')
+ self.assertEqual(resp.status_int, 200)
+
+ def test_resource_no_project_prefix(self):
+ class Controller(object):
+ def index(self, req):
+ return 'foo'
+
+ app = fakes.TestRouter(Controller(),
+ openstack_api.PlainMapper())
+ req = webob.Request.blank('/tests')
+ resp = req.get_response(app)
+ self.assertEqual(resp.body, 'foo')
+ self.assertEqual(resp.status_int, 200)
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
new file mode 100644
index 0000000000..7607101628
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -0,0 +1,1244 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import inspect
+
+import webob
+
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova import exception
+from nova import i18n
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import utils
+
+
+class RequestTest(test.NoDBTestCase):
+ def test_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123', method='POST')
+ request.body = "<body />"
+ self.assertIsNone(request.get_content_type())
+
+ def test_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123', method='POST')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(exception.InvalidContentType,
+ request.get_content_type)
+
+ def test_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_from_accept(self):
+ for content_type in ('application/xml',
+ 'application/vnd.openstack.compute+xml',
+ 'application/json',
+ 'application/vnd.openstack.compute+json'):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = content_type
+ result = request.best_match_content_type()
+ self.assertEqual(result, content_type)
+
+ def test_content_type_from_accept_best(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = ("application/json; q=0.3, "
+ "application/xml; q=0.9")
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_cache_and_retrieve_instances(self):
+ request = wsgi.Request.blank('/foo')
+ instances = []
+ for x in xrange(3):
+ instances.append({'uuid': 'uuid%s' % x})
+ # Store 2
+ request.cache_db_instances(instances[:2])
+ # Store 1
+ request.cache_db_instance(instances[2])
+ self.assertEqual(request.get_db_instance('uuid0'),
+ instances[0])
+ self.assertEqual(request.get_db_instance('uuid1'),
+ instances[1])
+ self.assertEqual(request.get_db_instance('uuid2'),
+ instances[2])
+ self.assertIsNone(request.get_db_instance('uuid3'))
+ self.assertEqual(request.get_db_instances(),
+ {'uuid0': instances[0],
+ 'uuid1': instances[1],
+ 'uuid2': instances[2]})
+
+ def test_cache_and_retrieve_compute_nodes(self):
+ request = wsgi.Request.blank('/foo')
+ compute_nodes = []
+ for x in xrange(3):
+ compute_nodes.append({'id': 'id%s' % x})
+ # Store 2
+ request.cache_db_compute_nodes(compute_nodes[:2])
+ # Store 1
+ request.cache_db_compute_node(compute_nodes[2])
+ self.assertEqual(request.get_db_compute_node('id0'),
+ compute_nodes[0])
+ self.assertEqual(request.get_db_compute_node('id1'),
+ compute_nodes[1])
+ self.assertEqual(request.get_db_compute_node('id2'),
+ compute_nodes[2])
+ self.assertIsNone(request.get_db_compute_node('id3'))
+ self.assertEqual(request.get_db_compute_nodes(),
+ {'id0': compute_nodes[0],
+ 'id1': compute_nodes[1],
+ 'id2': compute_nodes[2]})
+
+ def test_from_request(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'bogus;q=1.1, en-gb;q=0.7,en-us,en;q=.5,*;q=.7'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_US')
+
+ def test_asterisk(self):
+ # asterisk should match first available if there
+ # are not any other available matches
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = '*,es;q=.5'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_GB')
+
+ def test_prefix(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'zh'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'zh_CN')
+
+ def test_secondary(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'nn,en-gb;q=.5'
+ request.headers = {'Accept-Language': accepted}
+ self.assertEqual(request.best_match_language(), 'en_GB')
+
+ def test_none_found(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = 'nb-no'
+ request.headers = {'Accept-Language': accepted}
+ self.assertIs(request.best_match_language(), None)
+
+ def test_no_lang_header(self):
+ self.stubs.Set(i18n, 'get_available_languages',
+ fakes.fake_get_available_languages)
+
+ request = wsgi.Request.blank('/')
+ accepted = ''
+ request.headers = {'Accept-Language': accepted}
+ self.assertIs(request.best_match_language(), None)
+
+
+class ActionDispatcherTest(test.NoDBTestCase):
+ def test_dispatch(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ self.assertEqual(serializer.dispatch({}, action='create'), 'pants')
+
+ def test_dispatch_action_None(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.dispatch({}, action=None), 'trousers')
+
+ def test_dispatch_default(self):
+ serializer = wsgi.ActionDispatcher()
+ serializer.create = lambda x: 'pants'
+ serializer.default = lambda x: 'trousers'
+ self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
+
+
+class DictSerializerTest(test.NoDBTestCase):
+ def test_dispatch_default(self):
+ serializer = wsgi.DictSerializer()
+ self.assertEqual(serializer.serialize({}, 'update'), '')
+
+
+class XMLDictSerializerTest(test.NoDBTestCase):
+ def test_xml(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
+ serializer = wsgi.XMLDictSerializer(xmlns="asdf")
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+ def test_xml_contains_unicode(self):
+ input_dict = dict(test=u'\u89e3\u7801')
+ expected_xml = '<test>\xe8\xa7\xa3\xe7\xa0\x81</test>'
+ serializer = wsgi.XMLDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(expected_xml, result)
+
+
+class JSONDictSerializerTest(test.NoDBTestCase):
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
+ expected_json = '{"servers":{"a":[2,3]}}'
+ serializer = wsgi.JSONDictSerializer()
+ result = serializer.serialize(input_dict)
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_json)
+
+
+class TextDeserializerTest(test.NoDBTestCase):
+ def test_dispatch_default(self):
+ deserializer = wsgi.TextDeserializer()
+ self.assertEqual(deserializer.deserialize({}, 'update'), {})
+
+
+class JSONDeserializerTest(test.NoDBTestCase):
+ def test_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = {
+ 'body': {
+ 'a': {
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
+ 'd': {'e': '1'},
+ 'f': '1',
+ },
+ },
+ }
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+ def test_json_valid_utf8(self):
+ data = """{"server": {"min_count": 1, "flavorRef": "1",
+ "name": "\xe6\xa6\x82\xe5\xbf\xb5",
+ "imageRef": "10bab10c-1304-47d",
+ "max_count": 1}} """
+ as_dict = {
+ 'body': {
+ u'server': {
+ u'min_count': 1, u'flavorRef': u'1',
+ u'name': u'\u6982\u5ff5',
+ u'imageRef': u'10bab10c-1304-47d',
+ u'max_count': 1
+ }
+ }
+ }
+ deserializer = wsgi.JSONDeserializer()
+ self.assertEqual(deserializer.deserialize(data), as_dict)
+
+ def test_json_invalid_utf8(self):
+ """Send invalid utf-8 to JSONDeserializer."""
+ data = """{"server": {"min_count": 1, "flavorRef": "1",
+ "name": "\xf0\x28\x8c\x28",
+ "imageRef": "10bab10c-1304-47d",
+ "max_count": 1}} """
+
+ deserializer = wsgi.JSONDeserializer()
+ self.assertRaises(exception.MalformedRequestBody,
+ deserializer.deserialize, data)
+
+
+class XMLDeserializerTest(test.NoDBTestCase):
+ def test_xml(self):
+ xml = """
+ <a a1="1" a2="2">
+ <bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
+ <d><e>1</e></d>
+ <f>1</f>
+ </a>
+ """.strip()
+ as_dict = {
+ 'body': {
+ 'a': {
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
+ 'd': {'e': '1'},
+ 'f': '1',
+ },
+ },
+ }
+ metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
+ deserializer = wsgi.XMLDeserializer(metadata=metadata)
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_empty(self):
+ xml = '<a></a>'
+ as_dict = {"body": {"a": {}}}
+ deserializer = wsgi.XMLDeserializer()
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_valid_utf8(self):
+ xml = """ <a><name>\xe6\xa6\x82\xe5\xbf\xb5</name></a> """
+ deserializer = wsgi.XMLDeserializer()
+ as_dict = {'body': {u'a': {u'name': u'\u6982\u5ff5'}}}
+ self.assertEqual(deserializer.deserialize(xml), as_dict)
+
+ def test_xml_invalid_utf8(self):
+ """Send invalid utf-8 to XMLDeserializer."""
+ xml = """ <a><name>\xf0\x28\x8c\x28</name></a> """
+ deserializer = wsgi.XMLDeserializer()
+ self.assertRaises(exception.MalformedRequestBody,
+ deserializer.deserialize, xml)
+
+
+class ResourceTest(test.NoDBTestCase):
+
+ def get_req_id_header_name(self, request):
+ header_name = 'x-openstack-request-id'
+ if utils.get_api_version(request) < 3:
+ header_name = 'x-compute-request-id'
+
+ return header_name
+
+ def test_resource_call_with_method_get(self):
+ class Controller(object):
+ def index(self, req):
+ return 'success'
+
+ app = fakes.TestRouter(Controller())
+ # the default method is GET
+ req = webob.Request.blank('/tests')
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.content_type = 'application/json'
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+
+ def test_resource_call_with_method_post(self):
+ class Controller(object):
+ @extensions.expected_errors(400)
+ def create(self, req, body):
+ if expected_body != body:
+ msg = "The request body invalid"
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ return "success"
+ # verify the method: POST
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests', method="POST",
+ content_type='application/json')
+ req.body = '{"body": {"key": "value"}}'
+ expected_body = {'body': {
+ "key": "value"
+ }
+ }
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # verify without body
+ expected_body = None
+ req.body = None
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # the body is validated in the controller
+ expected_body = {'body': None}
+ response = req.get_response(app)
+ expected_unsupported_type_body = ('{"badRequest": '
+ '{"message": "The request body invalid", "code": 400}}')
+ self.assertEqual(response.status_int, 400)
+ self.assertEqual(expected_unsupported_type_body, response.body)
+
+ def test_resource_call_with_method_put(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ if expected_body != body:
+ msg = "The request body invalid"
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+ return "success"
+ # verify the method: PUT
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests/test_id', method="PUT",
+ content_type='application/json')
+ req.body = '{"body": {"key": "value"}}'
+ expected_body = {'body': {
+ "key": "value"
+ }
+ }
+ response = req.get_response(app)
+ self.assertEqual(response.body, 'success')
+ self.assertEqual(response.status_int, 200)
+ req.body = None
+ expected_body = None
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ # verify no content_type is contained in the request
+ req.content_type = None
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ expected_unsupported_type_body = ('{"badRequest": '
+ '{"message": "Unsupported Content-Type", "code": 400}}')
+ self.assertEqual(response.status_int, 400)
+ self.assertEqual(expected_unsupported_type_body, response.body)
+
+ def test_resource_call_with_method_delete(self):
+ class Controller(object):
+ def delete(self, req, id):
+ return "success"
+
+ # verify the method: DELETE
+ app = fakes.TestRouter(Controller())
+ req = webob.Request.blank('/tests/test_id', method="DELETE")
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+ # ignore the body
+ req.body = '{"body": {"key": "value"}}'
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, 'success')
+
+ def test_resource_not_authorized(self):
+ class Controller(object):
+ def index(self, req):
+ raise exception.Forbidden()
+
+ req = webob.Request.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertEqual(response.status_int, 403)
+
+ def test_dispatch(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'index', None, '')
+ actual = resource.dispatch(method, None, {'pants': 'off'})
+ expected = 'off'
+ self.assertEqual(actual, expected)
+
+ def test_get_method_unknown_controller_method(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(AttributeError, resource.get_method,
+ None, 'create', None, '')
+
+ def test_get_method_action_json(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/json',
+ '{"fooAction": true}')
+ self.assertEqual(controller._action_foo, method)
+
+ def test_get_method_action_xml(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/xml',
+ '<fooAction>true</fooAction>')
+ self.assertEqual(controller._action_foo, method)
+
+ def test_get_method_action_corrupt_xml(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(
+ exception.MalformedRequestBody,
+ resource.get_method,
+ None, 'action',
+ 'application/xml',
+ utils.killer_xml_body())
+
+ def test_get_method_action_bad_body(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(exception.MalformedRequestBody, resource.get_method,
+ None, 'action', 'application/json', '{}')
+
+ def test_get_method_unknown_controller_action(self):
+ class Controller(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(KeyError, resource.get_method,
+ None, 'action', 'application/json',
+ '{"barAction": true}')
+
+ def test_get_method_action_method(self):
+ class Controller():
+ def action(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/xml',
+ '<fooAction>true</fooAction')
+ self.assertEqual(controller.action, method)
+
+ def test_get_action_args(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ env = {
+ 'wsgiorg.routing_args': [None, {
+ 'controller': None,
+ 'format': None,
+ 'action': 'update',
+ 'id': 12,
+ }],
+ }
+
+ expected = {'action': 'update', 'id': 12}
+
+ self.assertEqual(resource.get_action_args(env), expected)
+
+ def test_get_body_bad_content(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/none'
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertIsNone(content_type)
+ self.assertEqual(body, '')
+
+ def test_get_body_no_content_type(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertIsNone(content_type)
+ self.assertEqual(body, 'foo')
+
+ def test_get_body_no_content_body(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/json'
+ request.body = ''
+
+ content_type, body = resource.get_body(request)
+ self.assertEqual('application/json', content_type)
+ self.assertEqual(body, '')
+
+ def test_get_body(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ request = wsgi.Request.blank('/', method='POST')
+ request.headers['Content-Type'] = 'application/json'
+ request.body = 'foo'
+
+ content_type, body = resource.get_body(request)
+ self.assertEqual(content_type, 'application/json')
+ self.assertEqual(body, 'foo')
+
+ def test_get_request_id_with_dict_response_body(self):
+ class Controller(wsgi.Controller):
+ def index(self, req):
+ return {'foo': 'bar'}
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertIn('nova.context', req.environ)
+ self.assertEqual(response.body, '{"foo": "bar"}')
+ self.assertEqual(response.status_int, 200)
+
+ def test_no_request_id_with_str_response_body(self):
+ class Controller(wsgi.Controller):
+ def index(self, req):
+ return 'foo'
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ # NOTE(alaski): This test is really to ensure that a str response
+ # doesn't error. Not having a request_id header is a side effect of
+ # our wsgi setup, ideally it would be there.
+ expected_header = self.get_req_id_header_name(req)
+ self.assertFalse(hasattr(response.headers, expected_header))
+ self.assertEqual(response.body, 'foo')
+ self.assertEqual(response.status_int, 200)
+
+ def test_get_request_id_no_response_body(self):
+ class Controller(object):
+ def index(self, req):
+ pass
+
+ req = fakes.HTTPRequest.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertIn('nova.context', req.environ)
+ self.assertEqual(response.body, '')
+ self.assertEqual(response.status_int, 200)
+
+ def test_deserialize_badtype(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertRaises(exception.InvalidContentType,
+ resource.deserialize,
+ controller.index, 'application/none', 'foo')
+
+ def test_deserialize_default(self):
+ class JSONDeserializer(object):
+ def deserialize(self, body):
+ return 'json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, body):
+ return 'xml'
+
+ class Controller(object):
+ @wsgi.deserializers(xml=XMLDeserializer)
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller, json=JSONDeserializer)
+
+ obj = resource.deserialize(controller.index, 'application/json', 'foo')
+ self.assertEqual(obj, 'json')
+
+ def test_deserialize_decorator(self):
+ class JSONDeserializer(object):
+ def deserialize(self, body):
+ return 'json'
+
+ class XMLDeserializer(object):
+ def deserialize(self, body):
+ return 'xml'
+
+ class Controller(object):
+ @wsgi.deserializers(xml=XMLDeserializer)
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller, json=JSONDeserializer)
+
+ obj = resource.deserialize(controller.index, 'application/xml', 'foo')
+ self.assertEqual(obj, 'xml')
+
+ def test_register_actions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ @wsgi.action('barAction')
+ def _action_bar(self, req, id, body):
+ return body
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertEqual({}, resource.wsgi_actions)
+
+ extended = ControllerExtended()
+ resource.register_actions(extended)
+ self.assertEqual({
+ 'fooAction': extended._action_foo,
+ 'barAction': extended._action_bar,
+ }, resource.wsgi_actions)
+
+ def test_register_extensions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends
+ def index(self, req, resp_obj, pants=None):
+ return None
+
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp, id, body):
+ return None
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+ self.assertEqual({}, resource.wsgi_extensions)
+ self.assertEqual({}, resource.wsgi_action_extensions)
+
+ extended = ControllerExtended()
+ resource.register_extensions(extended)
+ self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
+ self.assertEqual({'fooAction': [extended._action_foo]},
+ resource.wsgi_action_extensions)
+
+ def test_get_method_extensions(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends
+ def index(self, req, resp_obj, pants=None):
+ return None
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_extensions(extended)
+ method, extensions = resource.get_method(None, 'index', None, '')
+ self.assertEqual(method, controller.index)
+ self.assertEqual(extensions, [extended.index])
+
+ def test_get_method_action_extensions(self):
+ class Controller(wsgi.Controller):
+ def index(self, req, pants=None):
+ return pants
+
+ @wsgi.action('fooAction')
+ def _action_foo(self, req, id, body):
+ return body
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.extends(action='fooAction')
+ def _action_foo(self, req, resp_obj, id, body):
+ return None
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_extensions(extended)
+ method, extensions = resource.get_method(None, 'action',
+ 'application/json',
+ '{"fooAction": true}')
+ self.assertEqual(method, controller._action_foo)
+ self.assertEqual(extensions, [extended._action_foo])
+
+ def test_get_method_action_whitelist_extensions(self):
+ class Controller(wsgi.Controller):
+ def index(self, req, pants=None):
+ return pants
+
+ class ControllerExtended(wsgi.Controller):
+ @wsgi.action('create')
+ def _create(self, req, body):
+ pass
+
+ @wsgi.action('delete')
+ def _delete(self, req, id):
+ pass
+
+ controller = Controller()
+ extended = ControllerExtended()
+ resource = wsgi.Resource(controller)
+ resource.register_actions(extended)
+
+ method, extensions = resource.get_method(None, 'create',
+ 'application/json',
+ '{"create": true}')
+ self.assertEqual(method, extended._create)
+ self.assertEqual(extensions, [])
+
+ method, extensions = resource.get_method(None, 'delete', None, None)
+ self.assertEqual(method, extended._delete)
+ self.assertEqual(extensions, [])
+
+ def test_pre_process_extensions_regular(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return None
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ self.assertEqual(called, [])
+ self.assertIsNone(response)
+ self.assertEqual(list(post), [extension2, extension1])
+
+ def test_pre_process_extensions_generator(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ called.append('pre1')
+ yield
+ called.append('post1')
+
+ def extension2(req):
+ called.append('pre2')
+ yield
+ called.append('post2')
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ post = list(post)
+ self.assertEqual(called, ['pre1', 'pre2'])
+ self.assertIsNone(response)
+ self.assertEqual(len(post), 2)
+ self.assertTrue(inspect.isgenerator(post[0]))
+ self.assertTrue(inspect.isgenerator(post[1]))
+
+ for gen in post:
+ try:
+ gen.send(None)
+ except StopIteration:
+ continue
+
+ self.assertEqual(called, ['pre1', 'pre2', 'post2', 'post1'])
+
+ def test_pre_process_extensions_generator_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ called.append('pre1')
+ yield 'foo'
+
+ def extension2(req):
+ called.append('pre2')
+
+ extensions = [extension1, extension2]
+ response, post = resource.pre_process_extensions(extensions, None, {})
+ self.assertEqual(called, ['pre1'])
+ self.assertEqual(response, 'foo')
+ self.assertEqual(post, [])
+
+ def test_post_process_extensions_regular(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return None
+
+ response = resource.post_process_extensions([extension2, extension1],
+ None, None, {})
+ self.assertEqual(called, [2, 1])
+ self.assertIsNone(response)
+
+ def test_post_process_extensions_regular_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req, resp_obj):
+ called.append(1)
+ return None
+
+ def extension2(req, resp_obj):
+ called.append(2)
+ return 'foo'
+
+ response = resource.post_process_extensions([extension2, extension1],
+ None, None, {})
+ self.assertEqual(called, [2])
+ self.assertEqual(response, 'foo')
+
+ def test_post_process_extensions_generator(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ yield
+ called.append(1)
+
+ def extension2(req):
+ yield
+ called.append(2)
+
+ ext1 = extension1(None)
+ ext1.next()
+ ext2 = extension2(None)
+ ext2.next()
+
+ response = resource.post_process_extensions([ext2, ext1],
+ None, None, {})
+
+ self.assertEqual(called, [2, 1])
+ self.assertIsNone(response)
+
+ def test_post_process_extensions_generator_response(self):
+ class Controller(object):
+ def index(self, req, pants=None):
+ return pants
+
+ controller = Controller()
+ resource = wsgi.Resource(controller)
+
+ called = []
+
+ def extension1(req):
+ yield
+ called.append(1)
+
+ def extension2(req):
+ yield
+ called.append(2)
+ yield 'foo'
+
+ ext1 = extension1(None)
+ ext1.next()
+ ext2 = extension2(None)
+ ext2.next()
+
+ response = resource.post_process_extensions([ext2, ext1],
+ None, None, {})
+
+ self.assertEqual(called, [2])
+ self.assertEqual(response, 'foo')
+
+ def test_resource_exception_handler_type_error(self):
+ # A TypeError should be translated to a Fault/HTTP 400.
+ def foo(a,):
+ return a
+
+ try:
+ with wsgi.ResourceExceptionHandler():
+ foo() # generate a TypeError
+ self.fail("Should have raised a Fault (HTTP 400)")
+ except wsgi.Fault as fault:
+ self.assertEqual(400, fault.status_int)
+
+ def test_resource_headers_are_utf8(self):
+ resp = webob.Response(status_int=202)
+ resp.headers['x-header1'] = 1
+ resp.headers['x-header2'] = u'header2'
+ resp.headers['x-header3'] = u'header3'
+
+ class Controller(object):
+ def index(self, req):
+ return resp
+
+ req = webob.Request.blank('/tests')
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+
+ for hdr, val in response.headers.iteritems():
+ # All headers must be utf8
+ self.assertIsInstance(hdr, str)
+ self.assertIsInstance(val, str)
+ self.assertEqual(response.headers['x-header1'], '1')
+ self.assertEqual(response.headers['x-header2'], 'header2')
+ self.assertEqual(response.headers['x-header3'], 'header3')
+
+ def test_resource_valid_utf8_body(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ return body
+
+ req = webob.Request.blank('/tests/test_id', method="PUT")
+ body = """ {"name": "\xe6\xa6\x82\xe5\xbf\xb5" } """
+ expected_body = '{"name": "\\u6982\\u5ff5"}'
+ req.body = body
+ req.headers['Content-Type'] = 'application/json'
+ app = fakes.TestRouter(Controller())
+ response = req.get_response(app)
+ self.assertEqual(response.body, expected_body)
+ self.assertEqual(response.status_int, 200)
+
+ def test_resource_invalid_utf8(self):
+ class Controller(object):
+ def update(self, req, id, body):
+ return body
+
+ req = webob.Request.blank('/tests/test_id', method="PUT")
+ body = """ {"name": "\xf0\x28\x8c\x28" } """
+ req.body = body
+ req.headers['Content-Type'] = 'application/json'
+ app = fakes.TestRouter(Controller())
+ self.assertRaises(UnicodeDecodeError, req.get_response, app)
+
+
+class ResponseObjectTest(test.NoDBTestCase):
+ def test_default_code(self):
+ robj = wsgi.ResponseObject({})
+ self.assertEqual(robj.code, 200)
+
+ def test_modified_code(self):
+ robj = wsgi.ResponseObject({})
+ robj._default_code = 202
+ self.assertEqual(robj.code, 202)
+
+ def test_override_default_code(self):
+ robj = wsgi.ResponseObject({}, code=404)
+ self.assertEqual(robj.code, 404)
+
+ def test_override_modified_code(self):
+ robj = wsgi.ResponseObject({}, code=404)
+ robj._default_code = 202
+ self.assertEqual(robj.code, 404)
+
+ def test_set_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ self.assertEqual(robj.headers, {'header': 'foo'})
+
+ def test_get_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ self.assertEqual(robj['hEADER'], 'foo')
+
+ def test_del_header(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ del robj['hEADER']
+ self.assertNotIn('header', robj.headers)
+
+ def test_header_isolation(self):
+ robj = wsgi.ResponseObject({})
+ robj['Header'] = 'foo'
+ hdrs = robj.headers
+ hdrs['hEADER'] = 'bar'
+ self.assertEqual(robj['hEADER'], 'foo')
+
+ def test_default_serializers(self):
+ robj = wsgi.ResponseObject({})
+ self.assertEqual(robj.serializers, {})
+
+ def test_bind_serializers(self):
+ robj = wsgi.ResponseObject({}, json='foo')
+ robj._bind_method_serializers(dict(xml='bar', json='baz'))
+ self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
+
+ def test_get_serializer(self):
+ robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ _mtype, serializer = robj.get_serializer(content_type)
+ self.assertEqual(serializer, mtype)
+
+ def test_get_serializer_defaults(self):
+ robj = wsgi.ResponseObject({})
+ default_serializers = dict(json='json', xml='xml', atom='atom')
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ self.assertRaises(exception.InvalidContentType,
+ robj.get_serializer, content_type)
+ _mtype, serializer = robj.get_serializer(content_type,
+ default_serializers)
+ self.assertEqual(serializer, mtype)
+
+ def test_serialize(self):
+ class JSONSerializer(object):
+ def serialize(self, obj):
+ return 'json'
+
+ class XMLSerializer(object):
+ def serialize(self, obj):
+ return 'xml'
+
+ class AtomSerializer(object):
+ def serialize(self, obj):
+ return 'atom'
+
+ robj = wsgi.ResponseObject({}, code=202,
+ json=JSONSerializer,
+ xml=XMLSerializer,
+ atom=AtomSerializer)
+ robj['X-header1'] = 'header1'
+ robj['X-header2'] = 'header2'
+ robj['X-header3'] = 3
+ robj['X-header-unicode'] = u'header-unicode'
+
+ for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
+ request = wsgi.Request.blank('/tests/123')
+ response = robj.serialize(request, content_type)
+
+ self.assertEqual(response.headers['Content-Type'], content_type)
+ for hdr, val in response.headers.iteritems():
+ # All headers must be utf8
+ self.assertIsInstance(hdr, str)
+ self.assertIsInstance(val, str)
+ self.assertEqual(response.headers['X-header1'], 'header1')
+ self.assertEqual(response.headers['X-header2'], 'header2')
+ self.assertEqual(response.headers['X-header3'], '3')
+ self.assertEqual(response.status_int, 202)
+ self.assertEqual(response.body, mtype)
+
+
+class ValidBodyTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ValidBodyTest, self).setUp()
+ self.controller = wsgi.Controller()
+
+ def test_is_valid_body(self):
+ body = {'foo': {}}
+ self.assertTrue(self.controller.is_valid_body(body, 'foo'))
+
+ def test_is_valid_body_none(self):
+ wsgi.Resource(controller=None)
+ self.assertFalse(self.controller.is_valid_body(None, 'foo'))
+
+ def test_is_valid_body_empty(self):
+ wsgi.Resource(controller=None)
+ self.assertFalse(self.controller.is_valid_body({}, 'foo'))
+
+ def test_is_valid_body_no_entity(self):
+ wsgi.Resource(controller=None)
+ body = {'bar': {}}
+ self.assertFalse(self.controller.is_valid_body(body, 'foo'))
+
+ def test_is_valid_body_malformed_entity(self):
+ wsgi.Resource(controller=None)
+ body = {'foo': 'bar'}
+ self.assertFalse(self.controller.is_valid_body(body, 'foo'))
diff --git a/nova/tests/unit/api/openstack/test_xmlutil.py b/nova/tests/unit/api/openstack/test_xmlutil.py
new file mode 100644
index 0000000000..19186889bb
--- /dev/null
+++ b/nova/tests/unit/api/openstack/test_xmlutil.py
@@ -0,0 +1,948 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from xml.dom import minidom
+
+from lxml import etree
+
+from nova.api.openstack import xmlutil
+from nova import exception
+from nova import test
+from nova.tests.unit import utils as tests_utils
+
+
+class SelectorTest(test.NoDBTestCase):
+ obj_for_test = {
+ 'test': {
+ 'name': 'test',
+ 'values': [1, 2, 3],
+ 'attrs': {
+ 'foo': 1,
+ 'bar': 2,
+ 'baz': 3,
+ },
+ },
+ }
+
+ def test_repr(self):
+ sel = xmlutil.Selector()
+ self.assertEqual(repr(sel), "Selector()")
+
+ def test_empty_selector(self):
+ sel = xmlutil.EmptyStringSelector()
+ self.assertEqual(len(sel.chain), 0)
+ self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
+ self.assertEqual(
+ repr(self.obj_for_test),
+ "{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
+ "{'baz': 3, 'foo': 1, 'bar': 2}}}")
+
+ def test_dict_selector(self):
+ sel = xmlutil.Selector('test')
+ self.assertEqual(len(sel.chain), 1)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel(self.obj_for_test),
+ self.obj_for_test['test'])
+
+ def test_datum_selector(self):
+ sel = xmlutil.Selector('test', 'name')
+ self.assertEqual(len(sel.chain), 2)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel.chain[1], 'name')
+ self.assertEqual(sel(self.obj_for_test), 'test')
+
+ def test_list_selector(self):
+ sel = xmlutil.Selector('test', 'values', 0)
+ self.assertEqual(len(sel.chain), 3)
+ self.assertEqual(sel.chain[0], 'test')
+ self.assertEqual(sel.chain[1], 'values')
+ self.assertEqual(sel.chain[2], 0)
+ self.assertEqual(sel(self.obj_for_test), 1)
+
+ def test_items_selector(self):
+ sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
+ self.assertEqual(len(sel.chain), 3)
+ self.assertEqual(sel.chain[2], xmlutil.get_items)
+ for key, val in sel(self.obj_for_test):
+ self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
+
+ def test_missing_key_selector(self):
+ sel = xmlutil.Selector('test2', 'attrs')
+ self.assertIsNone(sel(self.obj_for_test))
+ self.assertRaises(KeyError, sel, self.obj_for_test, True)
+
+ def test_constant_selector(self):
+ sel = xmlutil.ConstantSelector('Foobar')
+ self.assertEqual(sel.value, 'Foobar')
+ self.assertEqual(sel(self.obj_for_test), 'Foobar')
+ self.assertEqual(repr(sel), "'Foobar'")
+
+
+class TemplateElementTest(test.NoDBTestCase):
+ def test_element_initial_attributes(self):
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
+ c=4, d=5, e=6)
+
+ # Verify all the attributes are as expected
+ expected = dict(a=1, b=2, c=4, d=5, e=6)
+ for k, v in expected.items():
+ self.assertEqual(elem.attrib[k].chain[0], v)
+ self.assertTrue(repr(elem))
+
+ def test_element_get_attributes(self):
+ expected = dict(a=1, b=2, c=3)
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=expected)
+
+ # Verify that get() retrieves the attributes
+ for k, v in expected.items():
+ self.assertEqual(elem.get(k).chain[0], v)
+
+ def test_element_set_attributes(self):
+ attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
+
+ # Create a bare template element with no attributes
+ elem = xmlutil.TemplateElement('test')
+
+ # Set the attribute values
+ for k, v in attrs.items():
+ elem.set(k, v)
+
+ # Now verify what got set
+ self.assertEqual(len(elem.attrib['a'].chain), 1)
+ self.assertEqual(elem.attrib['a'].chain[0], 'a')
+ self.assertEqual(len(elem.attrib['b'].chain), 1)
+ self.assertEqual(elem.attrib['b'].chain[0], 'foo')
+ self.assertEqual(elem.attrib['c'], attrs['c'])
+
+ def test_element_attribute_keys(self):
+ attrs = dict(a=1, b=2, c=3, d=4)
+ expected = set(attrs.keys())
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=attrs)
+
+ # Now verify keys
+ self.assertEqual(set(elem.keys()), expected)
+
+ def test_element_attribute_items(self):
+ expected = dict(a=xmlutil.Selector(1),
+ b=xmlutil.Selector(2),
+ c=xmlutil.Selector(3))
+ keys = set(expected.keys())
+
+ # Create a template element with some attributes
+ elem = xmlutil.TemplateElement('test', attrib=expected)
+
+ # Now verify items
+ for k, v in elem.items():
+ self.assertEqual(expected[k], v)
+ keys.remove(k)
+
+ # Did we visit all keys?
+ self.assertEqual(len(keys), 0)
+
+ def test_element_selector_none(self):
+ # Create a template element with no selector
+ elem = xmlutil.TemplateElement('test')
+
+ self.assertEqual(len(elem.selector.chain), 0)
+
+ def test_element_selector_string(self):
+ # Create a template element with a string selector
+ elem = xmlutil.TemplateElement('test', selector='test')
+
+ self.assertEqual(len(elem.selector.chain), 1)
+ self.assertEqual(elem.selector.chain[0], 'test')
+
+ def test_element_selector(self):
+ sel = xmlutil.Selector('a', 'b')
+
+ # Create a template element with an explicit selector
+ elem = xmlutil.TemplateElement('test', selector=sel)
+
+ self.assertEqual(elem.selector, sel)
+
+ def test_element_subselector_none(self):
+ # Create a template element with no subselector
+ elem = xmlutil.TemplateElement('test')
+
+ self.assertIsNone(elem.subselector)
+
+ def test_element_subselector_string(self):
+ # Create a template element with a string subselector
+ elem = xmlutil.TemplateElement('test', subselector='test')
+
+ self.assertEqual(len(elem.subselector.chain), 1)
+ self.assertEqual(elem.subselector.chain[0], 'test')
+
+ def test_element_subselector(self):
+ sel = xmlutil.Selector('a', 'b')
+
+ # Create a template element with an explicit subselector
+ elem = xmlutil.TemplateElement('test', subselector=sel)
+
+ self.assertEqual(elem.subselector, sel)
+
+ def test_element_append_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a child element
+ child = xmlutil.TemplateElement('child')
+
+ # Append the child to the parent
+ elem.append(child)
+
+ # Verify that the child was added
+ self.assertEqual(len(elem), 1)
+ self.assertEqual(elem[0], child)
+ self.assertIn('child', elem)
+ self.assertEqual(elem['child'], child)
+
+ # Ensure that multiple children of the same name are rejected
+ child2 = xmlutil.TemplateElement('child')
+ self.assertRaises(KeyError, elem.append, child2)
+
+ def test_element_extend_children(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Verify that the children were added
+ self.assertEqual(len(elem), 3)
+ for idx in range(len(elem)):
+ self.assertEqual(children[idx], elem[idx])
+ self.assertIn(children[idx].tag, elem)
+ self.assertEqual(elem[children[idx].tag], children[idx])
+
+ # Ensure that multiple children of the same name are rejected
+ children2 = [
+ xmlutil.TemplateElement('child4'),
+ xmlutil.TemplateElement('child1'),
+ ]
+ self.assertRaises(KeyError, elem.extend, children2)
+
+ # Also ensure that child4 was not added
+ self.assertEqual(len(elem), 3)
+ self.assertEqual(elem[-1].tag, 'child3')
+
+ def test_element_insert_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Create a child to insert
+ child = xmlutil.TemplateElement('child4')
+
+ # Insert it
+ elem.insert(1, child)
+
+ # Ensure the child was inserted in the right place
+ self.assertEqual(len(elem), 4)
+ children.insert(1, child)
+ for idx in range(len(elem)):
+ self.assertEqual(children[idx], elem[idx])
+ self.assertIn(children[idx].tag, elem)
+ self.assertEqual(elem[children[idx].tag], children[idx])
+
+ # Ensure that multiple children of the same name are rejected
+ child2 = xmlutil.TemplateElement('child2')
+ self.assertRaises(KeyError, elem.insert, 2, child2)
+
+ def test_element_remove_child(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Make sure the element starts off empty
+ self.assertEqual(len(elem), 0)
+
+ # Create a few children
+ children = [
+ xmlutil.TemplateElement('child1'),
+ xmlutil.TemplateElement('child2'),
+ xmlutil.TemplateElement('child3'),
+ ]
+
+ # Extend the parent by those children
+ elem.extend(children)
+
+ # Create a test child to remove
+ child = xmlutil.TemplateElement('child2')
+
+ # Try to remove it
+ self.assertRaises(ValueError, elem.remove, child)
+
+ # Ensure that no child was removed
+ self.assertEqual(len(elem), 3)
+
+ # Now remove a legitimate child
+ elem.remove(children[1])
+
+ # Ensure that the child was removed
+ self.assertEqual(len(elem), 2)
+ self.assertEqual(elem[0], children[0])
+ self.assertEqual(elem[1], children[2])
+ self.assertEqual('child2' in elem, False)
+
+ # Ensure the child cannot be retrieved by name
+ def get_key(elem, key):
+ return elem[key]
+ self.assertRaises(KeyError, get_key, elem, 'child2')
+
+ def test_element_text(self):
+ # Create an element
+ elem = xmlutil.TemplateElement('test')
+
+ # Ensure that it has no text
+ self.assertIsNone(elem.text)
+
+ # Try setting it to a string and ensure it becomes a selector
+ elem.text = 'test'
+ self.assertEqual(hasattr(elem.text, 'chain'), True)
+ self.assertEqual(len(elem.text.chain), 1)
+ self.assertEqual(elem.text.chain[0], 'test')
+
+ # Try resetting the text to None
+ elem.text = None
+ self.assertIsNone(elem.text)
+
+ # Now make up a selector and try setting the text to that
+ sel = xmlutil.Selector()
+ elem.text = sel
+ self.assertEqual(elem.text, sel)
+
+ # Finally, try deleting the text and see what happens
+ del elem.text
+ self.assertIsNone(elem.text)
+
+ def test_apply_attrs(self):
+ # Create a template element
+ attrs = dict(attr1=xmlutil.ConstantSelector(1),
+ attr2=xmlutil.ConstantSelector(2))
+ tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
+
+ # Create an etree element
+ elem = etree.Element('test')
+
+ # Apply the template to the element
+ tmpl_elem.apply(elem, None)
+
+ # Now, verify the correct attributes were set
+ for k, v in elem.items():
+ self.assertEqual(str(attrs[k].value), v)
+
+ def test_apply_text(self):
+ # Create a template element
+ tmpl_elem = xmlutil.TemplateElement('test')
+ tmpl_elem.text = xmlutil.ConstantSelector(1)
+
+ # Create an etree element
+ elem = etree.Element('test')
+
+ # Apply the template to the element
+ tmpl_elem.apply(elem, None)
+
+ # Now, verify the text was set
+ self.assertEqual(str(tmpl_elem.text.value), elem.text)
+
+ def test__render(self):
+ attrs = dict(attr1=xmlutil.ConstantSelector(1),
+ attr2=xmlutil.ConstantSelector(2),
+ attr3=xmlutil.ConstantSelector(3))
+
+ # Create a master template element
+ master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
+
+ # Create a couple of slave template element
+ slave_elems = [
+ xmlutil.TemplateElement('test', attr2=attrs['attr2']),
+ xmlutil.TemplateElement('test', attr3=attrs['attr3']),
+ ]
+
+ # Try the render
+ elem = master_elem._render(None, None, slave_elems, None)
+
+ # Verify the particulars of the render
+ self.assertEqual(elem.tag, 'test')
+ self.assertEqual(len(elem.nsmap), 0)
+ for k, v in elem.items():
+ self.assertEqual(str(attrs[k].value), v)
+
+ # Create a parent for the element to be rendered
+ parent = etree.Element('parent')
+
+ # Try the render again...
+ elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
+
+ # Verify the particulars of the render
+ self.assertEqual(len(parent), 1)
+ self.assertEqual(parent[0], elem)
+ self.assertEqual(len(elem.nsmap), 1)
+ self.assertEqual(elem.nsmap['a'], 'foo')
+
+ def test_render(self):
+ # Create a template element
+ tmpl_elem = xmlutil.TemplateElement('test')
+ tmpl_elem.text = xmlutil.Selector()
+
+ # Create the object we're going to render
+ obj = ['elem1', 'elem2', 'elem3', 'elem4']
+
+ # Try a render with no object
+ elems = tmpl_elem.render(None, None)
+ self.assertEqual(len(elems), 0)
+
+ # Try a render with one object
+ elems = tmpl_elem.render(None, 'foo')
+ self.assertEqual(len(elems), 1)
+ self.assertEqual(elems[0][0].text, 'foo')
+ self.assertEqual(elems[0][1], 'foo')
+
+ # Now, try rendering an object with multiple entries
+ parent = etree.Element('parent')
+ elems = tmpl_elem.render(parent, obj)
+ self.assertEqual(len(elems), 4)
+
+ # Check the results
+ for idx in range(len(obj)):
+ self.assertEqual(elems[idx][0].text, obj[idx])
+ self.assertEqual(elems[idx][1], obj[idx])
+
+ # Check with a subselector
+ tmpl_elem = xmlutil.TemplateElement(
+ 'test',
+ subselector=xmlutil.ConstantSelector('foo'))
+ parent = etree.Element('parent')
+
+ # Try a render with no object
+ elems = tmpl_elem.render(parent, obj)
+ self.assertEqual(len(elems), 4)
+
+ def test_subelement(self):
+ # Try the SubTemplateElement constructor
+ parent = xmlutil.SubTemplateElement(None, 'parent')
+ self.assertEqual(parent.tag, 'parent')
+ self.assertEqual(len(parent), 0)
+
+ # Now try it with a parent element
+ child = xmlutil.SubTemplateElement(parent, 'child')
+ self.assertEqual(child.tag, 'child')
+ self.assertEqual(len(parent), 1)
+ self.assertEqual(parent[0], child)
+
+ def test_wrap(self):
+ # These are strange methods, but they make things easier
+ elem = xmlutil.TemplateElement('test')
+ self.assertEqual(elem.unwrap(), elem)
+ self.assertEqual(elem.wrap().root, elem)
+
+ def test_dyntag(self):
+ obj = ['a', 'b', 'c']
+
+ # Create a template element with a dynamic tag
+ tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
+
+ # Try the render
+ parent = etree.Element('parent')
+ elems = tmpl_elem.render(parent, obj)
+
+ # Verify the particulars of the render
+ self.assertEqual(len(elems), len(obj))
+ for idx in range(len(obj)):
+ self.assertEqual(elems[idx][0].tag, obj[idx])
+
+ def test_tree(self):
+ # Create a template element
+ elem = xmlutil.TemplateElement('test', attr3='attr3')
+ elem.text = 'test'
+ self.assertEqual(elem.tree(),
+ "<test !selector=Selector() "
+ "!text=Selector('test',) "
+ "attr3=Selector('attr3',)"
+ "/>")
+
+ # Create a template element
+ elem = xmlutil.TemplateElement('test2')
+
+ # Create a child element
+ child = xmlutil.TemplateElement('child')
+
+ # Append the child to the parent
+ elem.append(child)
+
+ self.assertEqual(elem.tree(),
+ "<test2 !selector=Selector()>"
+ "<child !selector=Selector()/></test2>")
+
+
+class TemplateTest(test.NoDBTestCase):
+ def test_tree(self):
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+ self.assertTrue(tmpl.tree())
+
+ def test_wrap(self):
+ # These are strange methods, but they make things easier
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+ self.assertEqual(tmpl.unwrap(), elem)
+ self.assertEqual(tmpl.wrap(), tmpl)
+
+ def test__siblings(self):
+ # Set up a basic template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem)
+
+ # Check that we get the right siblings
+ siblings = tmpl._siblings()
+ self.assertEqual(len(siblings), 1)
+ self.assertEqual(siblings[0], elem)
+
+ def test__nsmap(self):
+ # Set up a basic template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
+
+ # Check out that we get the right namespace dictionary
+ nsmap = tmpl._nsmap()
+ self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
+ self.assertEqual(len(nsmap), 1)
+ self.assertEqual(nsmap['a'], 'foo')
+
+ def test_master_attach(self):
+ # Set up a master template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.MasterTemplate(elem, 1)
+
+ # Make sure it has a root but no slaves
+ self.assertEqual(tmpl.root, elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+ self.assertTrue(repr(tmpl))
+
+ # Try to attach an invalid slave
+ bad_elem = xmlutil.TemplateElement('test2')
+ self.assertRaises(ValueError, tmpl.attach, bad_elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Try to attach an invalid and a valid slave
+ good_elem = xmlutil.TemplateElement('test')
+ self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Try to attach an inapplicable template
+ class InapplicableTemplate(xmlutil.Template):
+ def apply(self, master):
+ return False
+ inapp_tmpl = InapplicableTemplate(good_elem)
+ tmpl.attach(inapp_tmpl)
+ self.assertEqual(len(tmpl.slaves), 0)
+
+ # Now try attaching an applicable template
+ tmpl.attach(good_elem)
+ self.assertEqual(len(tmpl.slaves), 1)
+ self.assertEqual(tmpl.slaves[0].root, good_elem)
+
+ def test_master_copy(self):
+ # Construct a master template
+ elem = xmlutil.TemplateElement('test')
+ tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
+
+ # Give it a slave
+ slave = xmlutil.TemplateElement('test')
+ tmpl.attach(slave)
+
+ # Construct a copy
+ copy = tmpl.copy()
+
+ # Check to see if we actually managed a copy
+ self.assertNotEqual(tmpl, copy)
+ self.assertEqual(tmpl.root, copy.root)
+ self.assertEqual(tmpl.version, copy.version)
+ self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
+ self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
+ self.assertEqual(len(tmpl.slaves), len(copy.slaves))
+ self.assertEqual(tmpl.slaves[0], copy.slaves[0])
+
+ def test_slave_apply(self):
+ # Construct a master template
+ elem = xmlutil.TemplateElement('test')
+ master = xmlutil.MasterTemplate(elem, 3)
+
+ # Construct a slave template with applicable minimum version
+ slave = xmlutil.SlaveTemplate(elem, 2)
+ self.assertEqual(slave.apply(master), True)
+ self.assertTrue(repr(slave))
+
+ # Construct a slave template with equal minimum version
+ slave = xmlutil.SlaveTemplate(elem, 3)
+ self.assertEqual(slave.apply(master), True)
+
+ # Construct a slave template with inapplicable minimum version
+ slave = xmlutil.SlaveTemplate(elem, 4)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with applicable version range
+ slave = xmlutil.SlaveTemplate(elem, 2, 4)
+ self.assertEqual(slave.apply(master), True)
+
+ # Construct a slave template with low version range
+ slave = xmlutil.SlaveTemplate(elem, 1, 2)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with high version range
+ slave = xmlutil.SlaveTemplate(elem, 4, 5)
+ self.assertEqual(slave.apply(master), False)
+
+ # Construct a slave template with matching version range
+ slave = xmlutil.SlaveTemplate(elem, 3, 3)
+ self.assertEqual(slave.apply(master), True)
+
+ def test__serialize(self):
+ # Our test object to serialize
+ obj = {
+ 'test': {
+ 'name': 'foobar',
+ 'values': [1, 2, 3, 4],
+ 'attrs': {
+ 'a': 1,
+ 'b': 2,
+ 'c': 3,
+ 'd': 4,
+ },
+ 'image': {
+ 'name': 'image_foobar',
+ 'id': 42,
+ },
+ },
+ }
+
+ # Set up our master template
+ root = xmlutil.TemplateElement('test', selector='test',
+ name='name')
+ value = xmlutil.SubTemplateElement(root, 'value', selector='values')
+ value.text = xmlutil.Selector()
+ attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
+ xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
+ key=0, value=1)
+ master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
+
+ # Set up our slave template
+ root_slave = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_slave, 'image',
+ selector='image', id='id')
+ image.text = xmlutil.Selector('name')
+ slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
+
+ # Attach the slave to the master...
+ master.attach(slave)
+
+ # Try serializing our object
+ siblings = master._siblings()
+ nsmap = master._nsmap()
+ result = master._serialize(None, obj, siblings, nsmap)
+
+ # Now we get to manually walk the element tree...
+ self.assertEqual(result.tag, 'test')
+ self.assertEqual(len(result.nsmap), 2)
+ self.assertEqual(result.nsmap['f'], 'foo')
+ self.assertEqual(result.nsmap['b'], 'bar')
+ self.assertEqual(result.get('name'), obj['test']['name'])
+ for idx, val in enumerate(obj['test']['values']):
+ self.assertEqual(result[idx].tag, 'value')
+ self.assertEqual(result[idx].text, str(val))
+ idx += 1
+ self.assertEqual(result[idx].tag, 'attrs')
+ for attr in result[idx]:
+ self.assertEqual(attr.tag, 'attr')
+ self.assertEqual(attr.get('value'),
+ str(obj['test']['attrs'][attr.get('key')]))
+ idx += 1
+ self.assertEqual(result[idx].tag, 'image')
+ self.assertEqual(result[idx].get('id'),
+ str(obj['test']['image']['id']))
+ self.assertEqual(result[idx].text, obj['test']['image']['name'])
+
+ templ = xmlutil.Template(None)
+ self.assertEqual(templ.serialize(None), '')
+
+ def test_serialize_with_colon_tagname_support(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ # Set up our master template
+ root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
+ colon_ns=True)
+ value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
+ colon_ns=True)
+ value.text = xmlutil.Selector()
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test__serialize_with_empty_datum_selector(self):
+ # Our test object to serialize
+ obj = {
+ 'test': {
+ 'name': 'foobar',
+ 'image': ''
+ },
+ }
+
+ root = xmlutil.TemplateElement('test', selector='test',
+ name='name')
+ master = xmlutil.MasterTemplate(root, 1)
+ root_slave = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_slave, 'image',
+ selector='image')
+ image.set('id')
+ xmlutil.make_links(image, 'links')
+ slave = xmlutil.SlaveTemplate(root_slave, 1)
+ master.attach(slave)
+
+ siblings = master._siblings()
+ result = master._serialize(None, obj, siblings)
+ self.assertEqual(result.tag, 'test')
+ self.assertEqual(result[0].tag, 'image')
+ self.assertEqual(result[0].get('id'), str(obj['test']['image']))
+
+
+class MasterTemplateBuilder(xmlutil.TemplateBuilder):
+ def construct(self):
+ elem = xmlutil.TemplateElement('test')
+ return xmlutil.MasterTemplate(elem, 1)
+
+
+class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
+ def construct(self):
+ elem = xmlutil.TemplateElement('test')
+ return xmlutil.SlaveTemplate(elem, 1)
+
+
+class TemplateBuilderTest(test.NoDBTestCase):
+ def test_master_template_builder(self):
+ # Make sure the template hasn't been built yet
+ self.assertIsNone(MasterTemplateBuilder._tmpl)
+
+ # Now, construct the template
+ tmpl1 = MasterTemplateBuilder()
+
+ # Make sure that there is a template cached...
+ self.assertIsNotNone(MasterTemplateBuilder._tmpl)
+
+ # Make sure it wasn't what was returned...
+ self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure it doesn't get rebuilt
+ cached = MasterTemplateBuilder._tmpl
+ tmpl2 = MasterTemplateBuilder()
+ self.assertEqual(MasterTemplateBuilder._tmpl, cached)
+
+ # Make sure we're always getting fresh copies
+ self.assertNotEqual(tmpl1, tmpl2)
+
+ # Make sure we can override the copying behavior
+ tmpl3 = MasterTemplateBuilder(False)
+ self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
+
+ def test_slave_template_builder(self):
+ # Make sure the template hasn't been built yet
+ self.assertIsNone(SlaveTemplateBuilder._tmpl)
+
+ # Now, construct the template
+ tmpl1 = SlaveTemplateBuilder()
+
+ # Make sure there is a template cached...
+ self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
+
+ # Make sure it was what was returned...
+ self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure it doesn't get rebuilt
+ tmpl2 = SlaveTemplateBuilder()
+ self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+
+ # Make sure we're always getting the cached copy
+ self.assertEqual(tmpl1, tmpl2)
+
+
+class MiscellaneousXMLUtilTests(test.NoDBTestCase):
+ def test_validate_schema(self):
+ xml = '''<?xml version='1.0' encoding='UTF-8'?>
+<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+<meta key="key6">value6</meta><meta key="key4">value4</meta>
+</metadata>
+'''
+ xmlutil.validate_schema(xml, 'metadata')
+ # No way to test the return value of validate_schema.
+ # It just raises an exception when something is wrong.
+ self.assertTrue(True)
+
+ def test_make_links(self):
+ elem = xmlutil.TemplateElement('image', selector='image')
+ self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
+
+ def test_make_flat_dict(self):
+ expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<wrapper><a>foo</a><b>bar</b></wrapper>')
+ root = xmlutil.make_flat_dict('wrapper')
+ tmpl = xmlutil.MasterTemplate(root, 1)
+ result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
+ self.assertEqual(result, expected_xml)
+
+ expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
+'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
+"</ns0:wrapper>")
+ root = xmlutil.make_flat_dict('wrapper', ns='ns')
+ tmpl = xmlutil.MasterTemplate(root, 1)
+ result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
+ self.assertEqual(result, expected_xml)
+
+ def test_make_flat_dict_with_colon_tagname_support(self):
+ # Our test object to serialize
+ obj = {'extra_specs': {'foo:bar': '999'}}
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
+ '</extra_specs>'))
+ # Set up our master template
+ root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_make_flat_dict_with_parent(self):
+ # Our test object to serialize
+ obj = {"device": {"id": 1,
+ "extra_info": {"key1": "value1",
+ "key2": "value2"}}}
+
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<device id="1"><extra_info><key2>value2</key2>'
+ '<key1>value1</key1></extra_info></device>'))
+
+ root = xmlutil.TemplateElement('device', selector='device')
+ root.set('id')
+ extra = xmlutil.make_flat_dict('extra_info', root=root)
+ root.append(extra)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_make_flat_dict_with_dicts(self):
+ # Our test object to serialize
+ obj = {"device": {"id": 1,
+ "extra_info": {"key1": "value1",
+ "key2": "value2"}}}
+
+ expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
+ '<device><id>1</id><extra_info><key2>value2</key2>'
+ '<key1>value1</key1></extra_info></device>'))
+
+ root = xmlutil.make_flat_dict('device', selector='device',
+ ignore_sub_dicts=True)
+ extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
+ root.append(extra)
+ master = xmlutil.MasterTemplate(root, 1)
+ result = master.serialize(obj)
+ self.assertEqual(expected_xml, result)
+
+ def test_safe_parse_xml(self):
+
+ normal_body = ('<?xml version="1.0" ?>'
+ '<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
+
+ dom = xmlutil.safe_minidom_parse_string(normal_body)
+ # Some versions of minidom inject extra newlines so we ignore them
+ result = str(dom.toxml()).replace('\n', '')
+ self.assertEqual(normal_body, result)
+
+ self.assertRaises(exception.MalformedRequestBody,
+ xmlutil.safe_minidom_parse_string,
+ tests_utils.killer_xml_body())
+
+
+class SafeParserTestCase(test.NoDBTestCase):
+ def test_external_dtd(self):
+ xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+ <html>
+ <head/>
+ <body>html with dtd</body>
+ </html>""")
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_external_file(self):
+ xml_string = """<!DOCTYPE external [
+ <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
+ ]>
+ <root>&ee;</root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
+
+ def test_notation(self):
+ xml_string = """<?xml version="1.0" standalone="no"?>
+ <!-- comment data -->
+ <!DOCTYPE x [
+ <!NOTATION notation SYSTEM "notation.jpeg">
+ ]>
+ <root attr1="value1">
+ </root>"""
+
+ parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
+ forbid_entities=True)
+ self.assertRaises(ValueError,
+ minidom.parseString,
+ xml_string, parser)
diff --git a/nova/tests/api/test_auth.py b/nova/tests/unit/api/test_auth.py
index e11c611b3a..e11c611b3a 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/unit/api/test_auth.py
diff --git a/nova/tests/api/test_compute_req_id.py b/nova/tests/unit/api/test_compute_req_id.py
index bbdbfab726..bbdbfab726 100644
--- a/nova/tests/api/test_compute_req_id.py
+++ b/nova/tests/unit/api/test_compute_req_id.py
diff --git a/nova/tests/api/test_validator.py b/nova/tests/unit/api/test_validator.py
index e9e349194a..e9e349194a 100644
--- a/nova/tests/api/test_validator.py
+++ b/nova/tests/unit/api/test_validator.py
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/unit/api/test_wsgi.py
index aecfb8e219..aecfb8e219 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/unit/api/test_wsgi.py
diff --git a/nova/tests/bundle/1mb.manifest.xml b/nova/tests/unit/bundle/1mb.manifest.xml
index 01648a5441..01648a5441 100644
--- a/nova/tests/bundle/1mb.manifest.xml
+++ b/nova/tests/unit/bundle/1mb.manifest.xml
diff --git a/nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml b/nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
index 73d7ace006..73d7ace006 100644
--- a/nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
+++ b/nova/tests/unit/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
diff --git a/nova/tests/bundle/1mb.part.0 b/nova/tests/unit/bundle/1mb.part.0
index 15a1657c57..15a1657c57 100644
--- a/nova/tests/bundle/1mb.part.0
+++ b/nova/tests/unit/bundle/1mb.part.0
Binary files differ
diff --git a/nova/tests/bundle/1mb.part.1 b/nova/tests/unit/bundle/1mb.part.1
index 2f0406e2d1..2f0406e2d1 100644
--- a/nova/tests/bundle/1mb.part.1
+++ b/nova/tests/unit/bundle/1mb.part.1
diff --git a/nova/tests/cast_as_call.py b/nova/tests/unit/cast_as_call.py
index f75600a3b5..f75600a3b5 100644
--- a/nova/tests/cast_as_call.py
+++ b/nova/tests/unit/cast_as_call.py
diff --git a/nova/tests/cells/__init__.py b/nova/tests/unit/cells/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/cells/__init__.py
+++ b/nova/tests/unit/cells/__init__.py
diff --git a/nova/tests/unit/cells/fakes.py b/nova/tests/unit/cells/fakes.py
new file mode 100644
index 0000000000..983e450262
--- /dev/null
+++ b/nova/tests/unit/cells/fakes.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fakes For Cells tests.
+"""
+
+from oslo.config import cfg
+
+from nova.cells import driver
+from nova.cells import manager as cells_manager
+from nova.cells import state as cells_state
+from nova.cells import utils as cells_utils
+import nova.db
+from nova.db import base
+from nova import exception
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+
+
+# Fake Cell Hierarchy
+FAKE_TOP_LEVEL_CELL_NAME = 'api-cell'
+FAKE_CELL_LAYOUT = [{'child-cell1': []},
+ {'child-cell2': [{'grandchild-cell1': []}]},
+ {'child-cell3': [{'grandchild-cell2': []},
+ {'grandchild-cell3': []}]},
+ {'child-cell4': []}]
+
+# build_cell_stub_infos() below will take the above layout and create
+# a fake view of the DB from the perspective of each of the cells.
+# For each cell, a CellStubInfo will be created with this info.
+CELL_NAME_TO_STUB_INFO = {}
+
+
+class FakeDBApi(object):
+ """Cells uses a different DB in each cell. This means in order to
+ stub out things differently per cell, I need to create a fake DBApi
+ object that is instantiated by each fake cell.
+ """
+ def __init__(self, cell_db_entries):
+ self.cell_db_entries = cell_db_entries
+
+ def __getattr__(self, key):
+ return getattr(nova.db, key)
+
+ def cell_get_all(self, ctxt):
+ return self.cell_db_entries
+
+ def compute_node_get_all(self, ctxt):
+ return []
+
+ def instance_get_all_by_filters(self, ctxt, *args, **kwargs):
+ return []
+
+ def instance_get_by_uuid(self, ctxt, instance_uuid):
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+class FakeCellsDriver(driver.BaseCellsDriver):
+ pass
+
+
+class FakeCellState(cells_state.CellState):
+ def send_message(self, message):
+ message_runner = get_message_runner(self.name)
+ orig_ctxt = message.ctxt
+ json_message = message.to_json()
+ message = message_runner.message_from_json(json_message)
+ # Restore this so we can use mox and verify same context
+ message.ctxt = orig_ctxt
+ message.process()
+
+
+class FakeCellStateManager(cells_state.CellStateManagerDB):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellStateManager, self).__init__(*args,
+ cell_state_cls=FakeCellState, **kwargs)
+
+
+class FakeCellsManager(cells_manager.CellsManager):
+ def __init__(self, *args, **kwargs):
+ super(FakeCellsManager, self).__init__(*args,
+ cell_state_manager=FakeCellStateManager,
+ **kwargs)
+
+
+class CellStubInfo(object):
+ def __init__(self, test_case, cell_name, db_entries):
+ self.test_case = test_case
+ self.cell_name = cell_name
+ self.db_entries = db_entries
+
+ def fake_base_init(_self, *args, **kwargs):
+ _self.db = FakeDBApi(db_entries)
+
+ test_case.stubs.Set(base.Base, '__init__', fake_base_init)
+ self.cells_manager = FakeCellsManager()
+ # Fix the cell name, as it normally uses CONF.cells.name
+ msg_runner = self.cells_manager.msg_runner
+ msg_runner.our_name = self.cell_name
+ self.cells_manager.state_manager.my_cell_state.name = self.cell_name
+
+
+def _build_cell_transport_url(cur_db_id):
+ username = 'username%s' % cur_db_id
+ password = 'password%s' % cur_db_id
+ hostname = 'rpc_host%s' % cur_db_id
+ port = 3090 + cur_db_id
+ virtual_host = 'rpc_vhost%s' % cur_db_id
+
+ return 'rabbit://%s:%s@%s:%s/%s' % (username, password, hostname, port,
+ virtual_host)
+
+
+def _build_cell_stub_info(test_case, our_name, parent_path, children):
+ cell_db_entries = []
+ cur_db_id = 1
+ sep_char = cells_utils.PATH_CELL_SEP
+ if parent_path:
+ cell_db_entries.append(
+ dict(id=cur_db_id,
+ name=parent_path.split(sep_char)[-1],
+ is_parent=True,
+ transport_url=_build_cell_transport_url(cur_db_id)))
+ cur_db_id += 1
+ our_path = parent_path + sep_char + our_name
+ else:
+ our_path = our_name
+ for child in children:
+ for child_name, grandchildren in child.items():
+ _build_cell_stub_info(test_case, child_name, our_path,
+ grandchildren)
+ cell_entry = dict(id=cur_db_id,
+ name=child_name,
+ transport_url=_build_cell_transport_url(
+ cur_db_id),
+ is_parent=False)
+ cell_db_entries.append(cell_entry)
+ cur_db_id += 1
+ stub_info = CellStubInfo(test_case, our_name, cell_db_entries)
+ CELL_NAME_TO_STUB_INFO[our_name] = stub_info
+
+
+def _build_cell_stub_infos(test_case):
+ _build_cell_stub_info(test_case, FAKE_TOP_LEVEL_CELL_NAME, '',
+ FAKE_CELL_LAYOUT)
+
+
+def init(test_case):
+ global CELL_NAME_TO_STUB_INFO
+ test_case.flags(driver='nova.tests.unit.cells.fakes.FakeCellsDriver',
+ group='cells')
+ CELL_NAME_TO_STUB_INFO = {}
+ _build_cell_stub_infos(test_case)
+
+
+def _get_cell_stub_info(cell_name):
+ return CELL_NAME_TO_STUB_INFO[cell_name]
+
+
+def get_state_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.state_manager
+
+
+def get_cell_state(cur_cell_name, tgt_cell_name):
+ state_manager = get_state_manager(cur_cell_name)
+ cell = state_manager.child_cells.get(tgt_cell_name)
+ if cell is None:
+ cell = state_manager.parent_cells.get(tgt_cell_name)
+ return cell
+
+
+def get_cells_manager(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager
+
+
+def get_message_runner(cell_name):
+ return _get_cell_stub_info(cell_name).cells_manager.msg_runner
+
+
+def stub_tgt_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['targeted']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_method(test_case, cell_name, method_name, method):
+ msg_runner = get_message_runner(cell_name)
+ tgt_msg_methods = msg_runner.methods_by_type['broadcast']
+ setattr(tgt_msg_methods, method_name, method)
+
+
+def stub_bcast_methods(test_case, method_name, method):
+ for cell_name in CELL_NAME_TO_STUB_INFO.keys():
+ stub_bcast_method(test_case, cell_name, method_name, method)
diff --git a/nova/tests/unit/cells/test_cells_filters.py b/nova/tests/unit/cells/test_cells_filters.py
new file mode 100644
index 0000000000..0ae832f6c8
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_filters.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2012-2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for cells scheduler filters.
+"""
+
+from nova.cells import filters
+from nova import context
+from nova.db.sqlalchemy import models
+from nova import test
+from nova.tests.unit.cells import fakes
+
+
+class FiltersTestCase(test.NoDBTestCase):
+ """Makes sure the proper filters are in the directory."""
+
+ def test_all_filters(self):
+ filter_classes = filters.all_filters()
+ class_names = [cls.__name__ for cls in filter_classes]
+ self.assertIn("TargetCellFilter", class_names)
+
+
+class _FilterTestClass(test.NoDBTestCase):
+ """Base class for testing individual filter plugins."""
+ filter_cls_name = None
+
+ def setUp(self):
+ super(_FilterTestClass, self).setUp()
+ fakes.init(self)
+ self.msg_runner = fakes.get_message_runner('api-cell')
+ self.scheduler = self.msg_runner.scheduler
+ self.my_cell_state = self.msg_runner.state_manager.get_my_state()
+ self.filter_handler = filters.CellFilterHandler()
+ self.filter_classes = self.filter_handler.get_matching_classes(
+ [self.filter_cls_name])
+ self.context = context.RequestContext('fake', 'fake',
+ is_admin=True)
+
+ def _filter_cells(self, cells, filter_properties):
+ return self.filter_handler.get_filtered_objects(self.filter_classes,
+ cells,
+ filter_properties)
+
+
+class ImagePropertiesFilter(_FilterTestClass):
+ filter_cls_name = \
+ 'nova.cells.filters.image_properties.ImagePropertiesFilter'
+
+ def setUp(self):
+ super(ImagePropertiesFilter, self).setUp()
+ self.cell1 = models.Cell()
+ self.cell2 = models.Cell()
+ self.cell3 = models.Cell()
+ self.cells = [self.cell1, self.cell2, self.cell3]
+ for cell in self.cells:
+ cell.capabilities = {}
+ self.filter_props = {'context': self.context, 'request_spec': {}}
+
+ def test_missing_image_properties(self):
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_missing_hypervisor_version_requires(self):
+ self.filter_props['request_spec'] = {'image': {'properties': {}}}
+ for cell in self.cells:
+ cell.capabilities = {"prominent_hypervisor_version": set([u"6.2"])}
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_missing_hypervisor_version_in_cells(self):
+ image = {'properties': {'hypervisor_version_requires': '>6.2.1'}}
+ self.filter_props['request_spec'] = {'image': image}
+ self.cell1.capabilities = {"prominent_hypervisor_version": set([])}
+ self.assertEqual(self.cells,
+ self._filter_cells(self.cells, self.filter_props))
+
+ def test_cells_matching_hypervisor_version(self):
+ image = {'properties': {'hypervisor_version_requires': '>6.0, <=6.3'}}
+ self.filter_props['request_spec'] = {'image': image}
+
+ self.cell1.capabilities = {"prominent_hypervisor_version":
+ set([u"6.2"])}
+ self.cell2.capabilities = {"prominent_hypervisor_version":
+ set([u"6.3"])}
+ self.cell3.capabilities = {"prominent_hypervisor_version":
+ set([u"6.0"])}
+
+ self.assertEqual([self.cell1, self.cell2],
+ self._filter_cells(self.cells, self.filter_props))
+
+ # assert again to verify filter doesn't mutate state
+ # LP bug #1325705
+ self.assertEqual([self.cell1, self.cell2],
+ self._filter_cells(self.cells, self.filter_props))
+
+
+class TestTargetCellFilter(_FilterTestClass):
+ filter_cls_name = 'nova.cells.filters.target_cell.TargetCellFilter'
+
+ def test_missing_scheduler_hints(self):
+ cells = [1, 2, 3]
+ # No filtering
+ filter_props = {'context': self.context}
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_no_target_cell_hint(self):
+ cells = [1, 2, 3]
+ filter_props = {'scheduler_hints': {},
+ 'context': self.context}
+ # No filtering
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_me(self):
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'fake!cell!path'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': self.context}
+ # Only myself in the list.
+ self.assertEqual([self.my_cell_state],
+ self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_me_but_not_admin(self):
+ ctxt = context.RequestContext('fake', 'fake')
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'fake!cell!path'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': ctxt}
+ # No filtering, because not an admin.
+ self.assertEqual(cells, self._filter_cells(cells, filter_props))
+
+ def test_target_cell_specified_not_me(self):
+ info = {}
+
+ def _fake_build_instances(ctxt, cell, sched_kwargs):
+ info['ctxt'] = ctxt
+ info['cell'] = cell
+ info['sched_kwargs'] = sched_kwargs
+
+ self.stubs.Set(self.msg_runner, 'build_instances',
+ _fake_build_instances)
+ cells = [1, 2, 3]
+ target_cell = 'fake!cell!path'
+ current_cell = 'not!the!same'
+ filter_props = {'scheduler_hints': {'target_cell': target_cell},
+ 'routing_path': current_cell,
+ 'scheduler': self.scheduler,
+ 'context': self.context,
+ 'host_sched_kwargs': 'meow'}
+ # None is returned to bypass further scheduling.
+ self.assertIsNone(self._filter_cells(cells, filter_props))
+ # The filter should have re-scheduled to the child cell itself.
+ expected_info = {'ctxt': self.context,
+ 'cell': 'fake!cell!path',
+ 'sched_kwargs': 'meow'}
+ self.assertEqual(expected_info, info)
diff --git a/nova/tests/unit/cells/test_cells_manager.py b/nova/tests/unit/cells/test_cells_manager.py
new file mode 100644
index 0000000000..ca77abd1d2
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_manager.py
@@ -0,0 +1,808 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsManager
+"""
+import copy
+import datetime
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova import context
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_server_actions
+
+CONF = cfg.CONF
+CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
+
+
+FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
+FAKE_SERVICES = [dict(id=1, host='host1',
+ compute_node=[FAKE_COMPUTE_NODES[0]]),
+ dict(id=2, host='host2',
+ compute_node=[FAKE_COMPUTE_NODES[1]]),
+ dict(id=3, host='host3', compute_node=[])]
+FAKE_TASK_LOGS = [dict(id=1, host='host1'),
+ dict(id=2, host='host2')]
+
+
+class CellsManagerClassTestCase(test.NoDBTestCase):
+ """Test case for CellsManager class."""
+
+ def setUp(self):
+ super(CellsManagerClassTestCase, self).setUp()
+ fakes.init(self)
+ # pick a child cell to use for tests.
+ self.our_cell = 'grandchild-cell1'
+ self.cells_manager = fakes.get_cells_manager(self.our_cell)
+ self.msg_runner = self.cells_manager.msg_runner
+ self.state_manager = fakes.get_state_manager(self.our_cell)
+ self.driver = self.cells_manager.driver
+ self.ctxt = 'fake_context'
+
+ def _get_fake_response(self, raw_response=None, exc=False):
+ if exc:
+ return messaging.Response('fake', test.TestingException(),
+ True)
+ if raw_response is None:
+ raw_response = 'fake-response'
+ return messaging.Response('fake', raw_response, False)
+
+ def test_get_cell_info_for_neighbors(self):
+ self.mox.StubOutWithMock(self.cells_manager.state_manager,
+ 'get_cell_info_for_neighbors')
+ self.cells_manager.state_manager.get_cell_info_for_neighbors()
+ self.mox.ReplayAll()
+ self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
+
+ def test_post_start_hook_child_cell(self):
+ self.mox.StubOutWithMock(self.driver, 'start_servers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
+
+ self.driver.start_servers(self.msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ self.cells_manager._update_our_parents(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager.post_start_hook()
+
+ def test_post_start_hook_middle_cell(self):
+ cells_manager = fakes.get_cells_manager('child-cell2')
+ msg_runner = cells_manager.msg_runner
+ driver = cells_manager.driver
+
+ self.mox.StubOutWithMock(driver, 'start_servers')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capabilities')
+ self.mox.StubOutWithMock(msg_runner,
+ 'ask_children_for_capacities')
+
+ driver.start_servers(msg_runner)
+ context.get_admin_context().AndReturn(self.ctxt)
+ msg_runner.ask_children_for_capabilities(self.ctxt)
+ msg_runner.ask_children_for_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ cells_manager.post_start_hook()
+
+ def test_update_our_parents(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capabilities')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'tell_parents_our_capacities')
+
+ self.msg_runner.tell_parents_our_capabilities(self.ctxt)
+ self.msg_runner.tell_parents_our_capacities(self.ctxt)
+ self.mox.ReplayAll()
+ self.cells_manager._update_our_parents(self.ctxt)
+
+ def test_build_instances(self):
+ build_inst_kwargs = {'instances': [1, 2]}
+ self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
+ our_cell = self.msg_runner.state_manager.get_my_state()
+ self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.cells_manager.build_instances(self.ctxt,
+ build_inst_kwargs=build_inst_kwargs)
+
+ def test_run_compute_api_method(self):
+ # Args should just be silently passed through
+ cell_name = 'fake-cell-name'
+ method_info = 'fake-method-info'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'run_compute_api_method')
+ fake_response = self._get_fake_response()
+ self.msg_runner.run_compute_api_method(self.ctxt,
+ cell_name,
+ method_info,
+ True).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.run_compute_api_method(
+ self.ctxt, cell_name=cell_name, method_info=method_info,
+ call=True)
+ self.assertEqual('fake-response', response)
+
+ def test_instance_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
+ self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_update_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_destroy_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
+ self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_destroy_at_top(self.ctxt,
+ instance='fake-instance')
+
+ def test_instance_delete_everywhere(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_delete_everywhere')
+ self.msg_runner.instance_delete_everywhere(self.ctxt,
+ 'fake-instance',
+ 'fake-type')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_delete_everywhere(
+ self.ctxt, instance='fake-instance',
+ delete_type='fake-type')
+
+ def test_instance_fault_create_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_fault_create_at_top')
+ self.msg_runner.instance_fault_create_at_top(self.ctxt,
+ 'fake-fault')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_fault_create_at_top(
+ self.ctxt, instance_fault='fake-fault')
+
+ def test_bw_usage_update_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'bw_usage_update_at_top')
+ self.msg_runner.bw_usage_update_at_top(self.ctxt,
+ 'fake-bw-info')
+ self.mox.ReplayAll()
+ self.cells_manager.bw_usage_update_at_top(
+ self.ctxt, bw_update_info='fake-bw-info')
+
+ def test_heal_instances(self):
+ self.flags(instance_updated_at_threshold=1000,
+ instance_update_num_instances=2,
+ group='cells')
+
+ fake_context = context.RequestContext('fake', 'fake')
+ stalled_time = timeutils.utcnow()
+ updated_since = stalled_time - datetime.timedelta(seconds=1000)
+
+ def utcnow():
+ return stalled_time
+
+ call_info = {'get_instances': 0, 'sync_instances': []}
+
+ instances = ['instance1', 'instance2', 'instance3']
+
+ def get_instances_to_sync(context, **kwargs):
+ self.assertEqual(context, fake_context)
+ call_info['shuffle'] = kwargs.get('shuffle')
+ call_info['project_id'] = kwargs.get('project_id')
+ call_info['updated_since'] = kwargs.get('updated_since')
+ call_info['get_instances'] += 1
+ return iter(instances)
+
+ def instance_get_by_uuid(context, uuid):
+ return instances[int(uuid[-1]) - 1]
+
+ def sync_instance(context, instance):
+ self.assertEqual(context, fake_context)
+ call_info['sync_instances'].append(instance)
+
+ self.stubs.Set(cells_utils, 'get_instances_to_sync',
+ get_instances_to_sync)
+ self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
+ instance_get_by_uuid)
+ self.stubs.Set(self.cells_manager, '_sync_instance',
+ sync_instance)
+ self.stubs.Set(timeutils, 'utcnow', utcnow)
+
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 1)
+ # Only first 2
+ self.assertEqual(call_info['sync_instances'],
+ instances[:2])
+
+ call_info['sync_instances'] = []
+ self.cells_manager._heal_instances(fake_context)
+ self.assertEqual(call_info['shuffle'], True)
+ self.assertIsNone(call_info['project_id'])
+ self.assertEqual(call_info['updated_since'], updated_since)
+ self.assertEqual(call_info['get_instances'], 2)
+ # Now the last 1 and the first 1
+ self.assertEqual(call_info['sync_instances'],
+ [instances[-1], instances[0]])
+
+ def test_sync_instances(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'sync_instances')
+ self.msg_runner.sync_instances(self.ctxt, 'fake-project',
+ 'fake-time', 'fake-deleted')
+ self.mox.ReplayAll()
+ self.cells_manager.sync_instances(self.ctxt,
+ project_id='fake-project',
+ updated_since='fake-time',
+ deleted='fake-deleted')
+
+ def test_service_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of services.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ services = []
+ for service in FAKE_SERVICES:
+ services.append(copy.deepcopy(service))
+ expected_service = copy.deepcopy(service)
+ cells_utils.add_cell_to_service(expected_service, cell_name)
+ expected_response.append(expected_service)
+ response = messaging.Response(cell_name, services, False)
+ responses.append(response)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_all')
+ self.msg_runner.service_get_all(self.ctxt,
+ 'fake-filters').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_all(self.ctxt,
+ filters='fake-filters')
+ self.assertEqual(expected_response, response)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'service_get_by_compute_host')
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_SERVICES[0])
+ cells_utils.add_cell_to_service(expected_response, fake_cell)
+
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.msg_runner.service_get_by_compute_host(self.ctxt,
+ fake_cell, 'fake-host').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.service_get_by_compute_host(self.ctxt,
+ host_name=cell_and_host)
+ self.assertEqual(expected_response, response)
+
+ def test_get_host_uptime(self):
+ fake_cell = 'parent!fake-cell'
+ fake_host = 'fake-host'
+ fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
+ host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+ fake_response = messaging.Response(fake_cell, host_uptime, False)
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_host_uptime')
+ self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
+ AndReturn(fake_response)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_host_uptime(self.ctxt,
+ fake_cell_and_host)
+ self.assertEqual(host_uptime, response)
+
+ def test_service_update(self):
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(
+ fake_cell, FAKE_SERVICES[0], False)
+ expected_response = copy.deepcopy(FAKE_SERVICES[0])
+ cells_utils.add_cell_to_service(expected_response, fake_cell)
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ params_to_update = {'disabled': True}
+
+ self.mox.StubOutWithMock(self.msg_runner, 'service_update')
+ self.msg_runner.service_update(self.ctxt,
+ fake_cell, 'fake-host', 'nova-api',
+ params_to_update).AndReturn(fake_response)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.service_update(
+ self.ctxt, host_name=cell_and_host, binary='nova-api',
+ params_to_update=params_to_update)
+ self.assertEqual(expected_response, response)
+
+ def test_service_delete(self):
+ fake_cell = 'fake-cell'
+ service_id = '1'
+ cell_service_id = cells_utils.cell_with_item(fake_cell, service_id)
+
+ with mock.patch.object(self.msg_runner,
+ 'service_delete') as service_delete:
+ self.cells_manager.service_delete(self.ctxt, cell_service_id)
+ service_delete.assert_called_once_with(
+ self.ctxt, fake_cell, service_id)
+
+ def test_proxy_rpc_to_manager(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'proxy_rpc_to_manager')
+ fake_response = self._get_fake_response()
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ topic = "%s.%s" % (CONF.compute_topic, cell_and_host)
+ self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
+ 'fake-host', topic, 'fake-rpc-msg',
+ True, -1).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
+ topic=topic, rpc_message='fake-rpc-msg', call=True,
+ timeout=-1)
+ self.assertEqual('fake-response', response)
+
+ def _build_task_log_responses(self, num):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of task log
+ # entries. Manager should turn these into a single list of
+ # task log entries.
+ for i in xrange(num):
+ cell_name = 'path!to!cell%i' % i
+ task_logs = []
+ for task_log in FAKE_TASK_LOGS:
+ task_logs.append(copy.deepcopy(task_log))
+ expected_task_log = copy.deepcopy(task_log)
+ cells_utils.add_cell_to_task_log(expected_task_log,
+ cell_name)
+ expected_response.append(expected_task_log)
+ response = messaging.Response(cell_name, task_logs, False)
+ responses.append(response)
+ return expected_response, responses
+
+ def test_task_log_get_all(self):
+ expected_response, responses = self._build_task_log_responses(3)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, None,
+ 'fake-name', 'fake-begin',
+ 'fake-end', host=None, state=None).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_task_log_get_all_with_cell_but_no_host_filters(self):
+ expected_response, responses = self._build_task_log_responses(1)
+ # Host filter only has cell name.
+ cell_and_host = 'fake-cell'
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'task_log_get_all')
+ self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
+ 'fake-name', 'fake-begin', 'fake-end', host=None,
+ state='fake-state').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.task_log_get_all(self.ctxt,
+ task_name='fake-name',
+ period_beginning='fake-begin', period_ending='fake-end',
+ host=cell_and_host, state='fake-state')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_get_all(self):
+ responses = []
+ expected_response = []
+ # 3 cells... so 3 responses. Each response is a list of computes.
+ # Manager should turn these into a single list of responses.
+ for i in xrange(3):
+ cell_name = 'path!to!cell%i' % i
+ compute_nodes = []
+ for compute_node in FAKE_COMPUTE_NODES:
+ compute_nodes.append(copy.deepcopy(compute_node))
+ expected_compute_node = copy.deepcopy(compute_node)
+ cells_utils.add_cell_to_compute_node(expected_compute_node,
+ cell_name)
+ expected_response.append(expected_compute_node)
+ response = messaging.Response(cell_name, compute_nodes, False)
+ responses.append(response)
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get_all')
+ self.msg_runner.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match').AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get_all(self.ctxt,
+ hypervisor_match='fake-match')
+ self.assertEqual(expected_response, response)
+
+ def test_compute_node_stats(self):
+ raw_resp1 = {'key1': 1, 'key2': 2}
+ raw_resp2 = {'key2': 1, 'key3': 2}
+ raw_resp3 = {'key3': 1, 'key4': 2}
+ responses = [messaging.Response('cell1', raw_resp1, False),
+ messaging.Response('cell2', raw_resp2, False),
+ messaging.Response('cell2', raw_resp3, False)]
+ expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_stats')
+ self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_stats(self.ctxt)
+ self.assertEqual(expected_resp, response)
+
+ def test_compute_node_get(self):
+ fake_cell = 'fake-cell'
+ fake_response = messaging.Response(fake_cell,
+ FAKE_COMPUTE_NODES[0],
+ False)
+ expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
+ cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
+ cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'compute_node_get')
+ self.msg_runner.compute_node_get(self.ctxt,
+ 'fake-cell', 'fake-id').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.compute_node_get(self.ctxt,
+ compute_id=cell_and_id)
+ self.assertEqual(expected_response, response)
+
+ def test_actions_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+ fake_response = messaging.Response('fake-cell', [fake_act], False)
+ expected_response = [fake_act]
+ self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
+ self.msg_runner.actions_get(self.ctxt, 'fake-cell',
+ 'fake-uuid').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
+ 'fake-uuid')
+ self.assertEqual(expected_response, response)
+
+ def test_action_get_by_request_id(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+ fake_response = messaging.Response('fake-cell', fake_act, False)
+ expected_response = fake_act
+ self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
+ self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
+ 'fake-uuid', 'req-fake').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.action_get_by_request_id(self.ctxt,
+ 'fake-cell',
+ 'fake-uuid',
+ 'req-fake')
+ self.assertEqual(expected_response, response)
+
+ def test_action_events_get(self):
+ fake_action_id = fake_server_actions.FAKE_ACTION_ID1
+ fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
+ fake_response = messaging.Response('fake-cell', fake_events, False)
+ expected_response = fake_events
+ self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
+ self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
+ 'fake-action').AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
+ 'fake-action')
+ self.assertEqual(expected_response, response)
+
+ def test_consoleauth_delete_tokens(self):
+ instance_uuid = 'fake-instance-uuid'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'consoleauth_delete_tokens')
+ self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
+ self.mox.ReplayAll()
+ self.cells_manager.consoleauth_delete_tokens(self.ctxt,
+ instance_uuid=instance_uuid)
+
+ def test_get_capacities(self):
+ cell_name = 'cell_name'
+ response = {"ram_free":
+ {"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
+ self.mox.StubOutWithMock(self.state_manager,
+ 'get_capacities')
+ self.state_manager.get_capacities(cell_name).AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.get_capacities(self.ctxt, cell_name))
+
+ def test_validate_console_port(self):
+ instance_uuid = 'fake-instance-uuid'
+ cell_name = 'fake-cell-name'
+ instance = {'cell_name': cell_name}
+ console_port = 'fake-console-port'
+ console_type = 'fake-console-type'
+
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'validate_console_port')
+ self.mox.StubOutWithMock(self.cells_manager.db,
+ 'instance_get_by_uuid')
+ fake_response = self._get_fake_response()
+
+ self.cells_manager.db.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn(instance)
+ self.msg_runner.validate_console_port(self.ctxt, cell_name,
+ instance_uuid, console_port,
+ console_type).AndReturn(fake_response)
+ self.mox.ReplayAll()
+ response = self.cells_manager.validate_console_port(self.ctxt,
+ instance_uuid=instance_uuid, console_port=console_port,
+ console_type=console_type)
+ self.assertEqual('fake-response', response)
+
+ def test_bdm_update_or_create_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'bdm_update_or_create_at_top')
+ self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ 'fake-bdm',
+ create='foo')
+ self.mox.ReplayAll()
+ self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
+ 'fake-bdm',
+ create='foo')
+
+ def test_bdm_destroy_at_top(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
+ self.msg_runner.bdm_destroy_at_top(self.ctxt,
+ 'fake_instance_uuid',
+ device_name='fake_device_name',
+ volume_id='fake_volume_id')
+
+ self.mox.ReplayAll()
+ self.cells_manager.bdm_destroy_at_top(self.ctxt,
+ 'fake_instance_uuid',
+ device_name='fake_device_name',
+ volume_id='fake_volume_id')
+
+ def test_get_migrations(self):
+ filters = {'status': 'confirmed'}
+ cell1_migrations = [{'id': 123}]
+ cell2_migrations = [{'id': 456}]
+ fake_responses = [self._get_fake_response(cell1_migrations),
+ self._get_fake_response(cell2_migrations)]
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_migrations')
+ self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
+ AndReturn(fake_responses)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_migrations(self.ctxt, filters)
+
+ self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
+
+ def test_get_migrations_for_a_given_cell(self):
+ filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
+ target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
+ migrations = [{'id': 123}]
+ fake_responses = [self._get_fake_response(migrations)]
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'get_migrations')
+ self.msg_runner.get_migrations(self.ctxt, target_cell, False,
+ filters).AndReturn(fake_responses)
+ self.mox.ReplayAll()
+
+ response = self.cells_manager.get_migrations(self.ctxt, filters)
+ self.assertEqual(migrations, response)
+
+ def test_instance_update_from_api(self):
+ self.mox.StubOutWithMock(self.msg_runner,
+ 'instance_update_from_api')
+ self.msg_runner.instance_update_from_api(self.ctxt,
+ 'fake-instance',
+ 'exp_vm', 'exp_task',
+ 'admin_reset')
+ self.mox.ReplayAll()
+ self.cells_manager.instance_update_from_api(
+ self.ctxt, instance='fake-instance',
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset='admin_reset')
+
+ def test_start_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
+ self.msg_runner.start_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
+
+ def test_stop_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
+ self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
+ do_cast='meow')
+ self.mox.ReplayAll()
+ self.cells_manager.stop_instance(self.ctxt,
+ instance='fake-instance',
+ do_cast='meow')
+
+ def test_cell_create(self):
+ values = 'values'
+ response = 'created_cell'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_create')
+ self.state_manager.cell_create(self.ctxt, values).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_create(self.ctxt, values))
+
+ def test_cell_update(self):
+ cell_name = 'cell_name'
+ values = 'values'
+ response = 'updated_cell'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_update')
+ self.state_manager.cell_update(self.ctxt, cell_name, values).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_update(self.ctxt, cell_name,
+ values))
+
+ def test_cell_delete(self):
+ cell_name = 'cell_name'
+ response = 1
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_delete')
+ self.state_manager.cell_delete(self.ctxt, cell_name).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_delete(self.ctxt, cell_name))
+
+ def test_cell_get(self):
+ cell_name = 'cell_name'
+ response = 'cell_info'
+ self.mox.StubOutWithMock(self.state_manager,
+ 'cell_get')
+ self.state_manager.cell_get(self.ctxt, cell_name).\
+ AndReturn(response)
+ self.mox.ReplayAll()
+ self.assertEqual(response,
+ self.cells_manager.cell_get(self.ctxt, cell_name))
+
+ def test_reboot_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
+ self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
+ 'HARD')
+ self.mox.ReplayAll()
+ self.cells_manager.reboot_instance(self.ctxt,
+ instance='fake-instance',
+ reboot_type='HARD')
+
+ def test_suspend_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
+ self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.suspend_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_resume_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
+ self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.resume_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_terminate_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
+ self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.terminate_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_soft_delete_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
+ self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.soft_delete_instance(self.ctxt,
+ instance='fake-instance')
+
+ def test_resize_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
+ self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
+ 'fake-flavor', 'fake-updates')
+ self.mox.ReplayAll()
+ self.cells_manager.resize_instance(
+ self.ctxt, instance='fake-instance', flavor='fake-flavor',
+ extra_instance_updates='fake-updates')
+
+ def test_live_migrate_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
+ self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
+ 'fake-block', 'fake-commit',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.cells_manager.live_migrate_instance(
+ self.ctxt, instance='fake-instance',
+ block_migration='fake-block', disk_over_commit='fake-commit',
+ host_name='fake-host')
+
+ def test_revert_resize(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
+ self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
+
+ def test_confirm_resize(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
+ self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
+
+ def test_reset_network(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
+ self.msg_runner.reset_network(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
+
+ def test_inject_network_info(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
+ self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
+ self.mox.ReplayAll()
+ self.cells_manager.inject_network_info(self.ctxt,
+ instance='fake-instance')
+
+ def test_snapshot_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
+ self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
+ 'fake-id')
+ self.mox.ReplayAll()
+ self.cells_manager.snapshot_instance(self.ctxt,
+ instance='fake-instance',
+ image_id='fake-id')
+
+ def test_backup_instance(self):
+ self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
+ self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
+ 'fake-id', 'backup-type',
+ 'rotation')
+ self.mox.ReplayAll()
+ self.cells_manager.backup_instance(self.ctxt,
+ instance='fake-instance',
+ image_id='fake-id',
+ backup_type='backup-type',
+ rotation='rotation')
+
+ def test_set_admin_password(self):
+ with mock.patch.object(self.msg_runner,
+ 'set_admin_password') as set_admin_password:
+ self.cells_manager.set_admin_password(self.ctxt,
+ instance='fake-instance', new_pass='fake-password')
+ set_admin_password.assert_called_once_with(self.ctxt,
+ 'fake-instance', 'fake-password')
diff --git a/nova/tests/unit/cells/test_cells_messaging.py b/nova/tests/unit/cells/test_cells_messaging.py
new file mode 100644
index 0000000000..dc15fd1079
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_messaging.py
@@ -0,0 +1,2129 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells Messaging module
+"""
+
+import contextlib
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging as oslo_messaging
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.cells import messaging
+from nova.cells import utils as cells_utils
+from nova.compute import delete_types
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.objects import base as objects_base
+from nova.objects import fields as objects_fields
+from nova.openstack.common import uuidutils
+from nova import rpc
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_server_actions
+
+CONF = cfg.CONF
+CONF.import_opt('name', 'nova.cells.opts', group='cells')
+
+
+class CellsMessageClassesTestCase(test.TestCase):
+ """Test case for the main Cells Message classes."""
+ def setUp(self):
+ super(CellsMessageClassesTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self.our_name = 'api-cell'
+ self.msg_runner = fakes.get_message_runner(self.our_name)
+ self.state_manager = self.msg_runner.state_manager
+
+ def test_reverse_path(self):
+ path = 'a!b!c!d'
+ expected = 'd!c!b!a'
+ rev_path = messaging._reverse_path(path)
+ self.assertEqual(rev_path, expected)
+
+ def test_response_cell_name_from_path(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2!cell1')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input))
+
+ def test_response_cell_name_from_path_neighbor_only(self):
+ # test array with tuples of inputs/expected outputs
+ test_paths = [('cell1', 'cell1'),
+ ('cell1!cell2', 'cell2!cell1'),
+ ('cell1!cell2!cell3', 'cell3!cell2')]
+
+ for test_input, expected_output in test_paths:
+ self.assertEqual(expected_output,
+ messaging._response_cell_name_from_path(test_input,
+ neighbor_only=True))
+
+ def test_targeted_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertFalse(tgt_message.need_response)
+ self.assertEqual(self.our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ child_cell = self.state_manager.get_child_cell('child-cell2')
+ self.assertEqual(child_cell, next_hop)
+
+ def test_create_targeted_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ target_cell = 'child-cell1!api-cell'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ tgt_message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ self.assertEqual(self.ctxt, tgt_message.ctxt)
+ self.assertEqual(method, tgt_message.method_name)
+ self.assertEqual(method_kwargs, tgt_message.method_kwargs)
+ self.assertEqual(direction, tgt_message.direction)
+ self.assertEqual(target_cell, target_cell)
+ self.assertFalse(tgt_message.fanout)
+ self.assertTrue(tgt_message.need_response)
+ self.assertEqual(our_name, tgt_message.routing_path)
+ self.assertEqual(1, tgt_message.hop_count)
+ self.assertEqual(99, tgt_message.max_hop_count)
+ self.assertFalse(tgt_message.is_broadcast)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ parent_cell = msg_runner.state_manager.get_parent_cell('api-cell')
+ self.assertEqual(parent_cell, next_hop)
+
+ def test_targeted_message_when_target_is_cell_state(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_child_cell('child-cell2')
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell!child-cell2', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_targeted_message_when_target_cell_state_is_me(self):
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ target_cell = self.state_manager.get_my_state()
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ self.assertEqual('api-cell', tgt_message.target_cell)
+ # Correct next hop?
+ next_hop = tgt_message._get_next_hop()
+ self.assertEqual(target_cell, next_hop)
+
+ def test_create_broadcast_message(self):
+ self.flags(max_hop_count=99, group='cells')
+ self.flags(name='api-cell', max_hop_count=99, group='cells')
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertFalse(bcast_message.need_response)
+ self.assertEqual(self.our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ child_cells = self.state_manager.get_child_cells()
+ self.assertEqual(child_cells, next_hops)
+
+ def test_create_broadcast_message_with_response(self):
+ self.flags(max_hop_count=99, group='cells')
+ our_name = 'child-cell1'
+ msg_runner = fakes.get_message_runner(our_name)
+ method = 'fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs, direction, need_response=True)
+ self.assertEqual(self.ctxt, bcast_message.ctxt)
+ self.assertEqual(method, bcast_message.method_name)
+ self.assertEqual(method_kwargs, bcast_message.method_kwargs)
+ self.assertEqual(direction, bcast_message.direction)
+ self.assertFalse(bcast_message.fanout)
+ self.assertTrue(bcast_message.need_response)
+ self.assertEqual(our_name, bcast_message.routing_path)
+ self.assertEqual(1, bcast_message.hop_count)
+ self.assertEqual(99, bcast_message.max_hop_count)
+ self.assertTrue(bcast_message.is_broadcast)
+ # Correct next hops?
+ next_hops = bcast_message._get_next_hops()
+ parent_cells = msg_runner.state_manager.get_parent_cells()
+ self.assertEqual(parent_cells, next_hops)
+
+ def test_self_targeted_message(self):
+ target_cell = 'api-cell'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'api-cell', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_child_targeted_message(self):
+ target_cell = 'api-cell!child-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_child_targeted_message_with_object(self):
+ target_cell = 'api-cell!child-cell1'
+ method = 'our_fake_method'
+ direction = 'down'
+
+ call_info = {}
+
+ class CellsMsgingTestObject(objects_base.NovaObject):
+ """Test object. We just need 1 field in order to test
+ that this gets serialized properly.
+ """
+ fields = {'test': objects_fields.StringField()}
+
+ test_obj = CellsMsgingTestObject()
+ test_obj.test = 'meow'
+
+ method_kwargs = dict(obj=test_obj, arg1=1, arg2=2)
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'child-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+ self.assertEqual(3, len(call_info['kwargs']))
+ self.assertEqual(1, call_info['kwargs']['arg1'])
+ self.assertEqual(2, call_info['kwargs']['arg2'])
+ # Verify we get a new object with what we expect.
+ obj = call_info['kwargs']['obj']
+ self.assertIsInstance(obj, CellsMsgingTestObject)
+ self.assertNotEqual(id(test_obj), id(obj))
+ self.assertEqual(test_obj.test, obj.test)
+
+ def test_grandchild_targeted_message(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell)
+ tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+
+ def test_grandchild_targeted_message_with_response(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ call_info = {}
+
+ def our_fake_method(message, **kwargs):
+ call_info['context'] = message.ctxt
+ call_info['routing_path'] = message.routing_path
+ call_info['kwargs'] = kwargs
+ return 'our_fake_response'
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+
+ self.assertEqual(self.ctxt, call_info['context'])
+ self.assertEqual(method_kwargs, call_info['kwargs'])
+ self.assertEqual(target_cell, call_info['routing_path'])
+ self.assertFalse(response.failure)
+ self.assertEqual(response.value_or_raise(), 'our_fake_response')
+
+ def test_grandchild_targeted_message_with_error(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('this should be returned')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_grandchild_targeted_message_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ target_cell = 'api-cell!child-cell2!grandchild-cell1'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('should not be reached')
+
+ fakes.stub_tgt_method(self, 'grandchild-cell1', 'our_fake_method',
+ our_fake_method)
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellMaxHopCountReached,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell(self):
+ target_cell = 'api-cell!child-cell2!grandchild-cell4'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_targeted_message_invalid_cell2(self):
+ target_cell = 'unknown-cell!child-cell2'
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ tgt_message = messaging._TargetedMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs, direction,
+ target_cell,
+ need_response=True)
+ response = tgt_message.process()
+ self.assertTrue(response.failure)
+ self.assertRaises(exception.CellRoutingInconsistency,
+ response.value_or_raise)
+
+ def test_broadcast_routing(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself).
+ self.assertEqual(len(cells), 8)
+
+ def test_broadcast_routing_up(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'up'
+ msg_runner = fakes.get_message_runner('grandchild-cell3')
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(msg_runner, self.ctxt,
+ method, method_kwargs,
+ direction,
+ run_locally=True)
+ bcast_message.process()
+ # Paths are reversed, since going 'up'
+ expected = set(['grandchild-cell3', 'grandchild-cell3!child-cell3',
+ 'grandchild-cell3!child-cell3!api-cell'])
+ self.assertEqual(expected, cells)
+
+ def test_broadcast_routing_without_ourselves(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ cells = set()
+
+ def our_fake_method(message, **kwargs):
+ cells.add(message.routing_path)
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=False)
+ bcast_message.process()
+ # fakes creates 8 cells (including ourself). So we should see
+ # only 7 here.
+ self.assertEqual(len(cells), 7)
+
+ def test_broadcast_routing_with_response(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_response_max_hops(self):
+ self.flags(max_hop_count=2, group='cells')
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ # Should only get responses from our immediate children (and
+ # ourselves)
+ self.assertEqual(len(responses), 5)
+ for response in responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ def test_broadcast_routing_with_all_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ for response in responses:
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+ def test_broadcast_routing_with_two_erroring(self):
+ method = 'our_fake_method'
+ method_kwargs = dict(arg1=1, arg2=2)
+ direction = 'down'
+
+ def our_fake_method_failing(message, **kwargs):
+ raise test.TestingException('fake failure')
+
+ def our_fake_method(message, **kwargs):
+ return 'response-%s' % message.routing_path
+
+ fakes.stub_bcast_methods(self, 'our_fake_method', our_fake_method)
+ fakes.stub_bcast_method(self, 'child-cell2', 'our_fake_method',
+ our_fake_method_failing)
+ fakes.stub_bcast_method(self, 'grandchild-cell3', 'our_fake_method',
+ our_fake_method_failing)
+
+ bcast_message = messaging._BroadcastMessage(self.msg_runner,
+ self.ctxt, method,
+ method_kwargs,
+ direction,
+ run_locally=True,
+ need_response=True)
+ responses = bcast_message.process()
+ self.assertEqual(len(responses), 8)
+ failure_responses = [resp for resp in responses if resp.failure]
+ success_responses = [resp for resp in responses if not resp.failure]
+ self.assertEqual(len(failure_responses), 2)
+ self.assertEqual(len(success_responses), 6)
+
+ for response in success_responses:
+ self.assertFalse(response.failure)
+ self.assertEqual('response-%s' % response.cell_name,
+ response.value_or_raise())
+
+ for response in failure_responses:
+ self.assertIn(response.cell_name, ['api-cell!child-cell2',
+ 'api-cell!child-cell3!grandchild-cell3'])
+ self.assertTrue(response.failure)
+ self.assertRaises(test.TestingException, response.value_or_raise)
+
+
+class CellsTargetedMethodsTestCase(test.TestCase):
+ """Test case for _TargetedMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+ def setUp(self):
+ super(CellsTargetedMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs('api-cell', 'api-cell!child-cell2')
+
+ def _setup_attrs(self, source_cell, target_cell):
+ self.tgt_cell_name = target_cell
+ self.src_msg_runner = fakes.get_message_runner(source_cell)
+ self.src_state_manager = self.src_msg_runner.state_manager
+ tgt_shortname = target_cell.split('!')[-1]
+ self.tgt_cell_mgr = fakes.get_cells_manager(tgt_shortname)
+ self.tgt_msg_runner = self.tgt_cell_mgr.msg_runner
+ self.tgt_scheduler = self.tgt_msg_runner.scheduler
+ self.tgt_state_manager = self.tgt_msg_runner.state_manager
+ methods_cls = self.tgt_msg_runner.methods_by_type['targeted']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_compute_api = methods_cls.compute_api
+ self.tgt_host_api = methods_cls.host_api
+ self.tgt_db_inst = methods_cls.db
+ self.tgt_c_rpcapi = methods_cls.compute_rpcapi
+
+ def test_build_instances(self):
+ build_inst_kwargs = {'filter_properties': {},
+ 'key1': 'value1',
+ 'key2': 'value2'}
+ self.mox.StubOutWithMock(self.tgt_scheduler, 'build_instances')
+ self.tgt_scheduler.build_instances(self.ctxt, build_inst_kwargs)
+ self.mox.ReplayAll()
+ self.src_msg_runner.build_instances(self.ctxt, self.tgt_cell_name,
+ build_inst_kwargs)
+
+ def test_run_compute_api_method(self):
+
+ instance_uuid = 'fake_instance_uuid'
+ method_info = {'method': 'backup',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'backup')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn('fake_instance')
+ self.tgt_compute_api.backup(self.ctxt, 'fake_instance', 2, 3,
+ arg1='val1', arg2='val2').AndReturn('fake_result')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def _run_compute_api_method_expects_object(self, tgt_compute_api_function,
+ method_name,
+ expected_attrs=None):
+ # runs compute api methods which expects instance to be an object
+ instance_uuid = 'fake_instance_uuid'
+ method_info = {'method': method_name,
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn('fake_instance')
+
+ def get_instance_mock():
+ # NOTE(comstud): This block of code simulates the following
+ # mox code:
+ #
+ # self.mox.StubOutWithMock(objects, 'Instance',
+ # use_mock_anything=True)
+ # self.mox.StubOutWithMock(objects.Instance,
+ # '_from_db_object')
+ # instance_mock = self.mox.CreateMock(objects.Instance)
+ # objects.Instance().AndReturn(instance_mock)
+ #
+ # Unfortunately, the above code fails on py27 do to some
+ # issue with the Mock object do to similar issue as this:
+ # https://code.google.com/p/pymox/issues/detail?id=35
+ #
+ class FakeInstance(object):
+ @classmethod
+ def _from_db_object(cls, ctxt, obj, db_obj, **kwargs):
+ pass
+
+ instance_mock = FakeInstance()
+
+ def fake_instance():
+ return instance_mock
+
+ self.stubs.Set(objects, 'Instance', fake_instance)
+ self.mox.StubOutWithMock(instance_mock, '_from_db_object')
+ return instance_mock
+
+ instance = get_instance_mock()
+ instance._from_db_object(self.ctxt,
+ instance,
+ 'fake_instance',
+ expected_attrs=expected_attrs
+ ).AndReturn(instance)
+ tgt_compute_api_function(self.ctxt, instance, 2, 3,
+ arg1='val1', arg2='val2').AndReturn('fake_result')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_run_compute_api_method_expects_obj(self):
+ # Run compute_api start method
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'start')
+ self._run_compute_api_method_expects_object(self.tgt_compute_api.start,
+ 'start')
+
+ def test_run_compute_api_method_expects_obj_with_info_cache(self):
+ # Run compute_api shelve method as it requires info_cache and
+ # metadata to be present in instance object
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'shelve')
+ self._run_compute_api_method_expects_object(
+ self.tgt_compute_api.shelve, 'shelve',
+ expected_attrs=['metadata', 'info_cache'])
+
+ def test_run_compute_api_method_unknown_instance(self):
+ # Unknown instance should send a broadcast up that instance
+ # is gone.
+ instance_uuid = 'fake_instance_uuid'
+ instance = {'uuid': instance_uuid}
+ method_info = {'method': 'reboot',
+ 'method_args': (instance_uuid, 2, 3),
+ 'method_kwargs': {'arg1': 'val1', 'arg2': 'val2'}}
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ 'fake_instance_uuid').AndRaise(
+ exception.InstanceNotFound(instance_id=instance_uuid))
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.run_compute_api_method(
+ self.ctxt,
+ self.tgt_cell_name,
+ method_info,
+ True)
+ self.assertRaises(exception.InstanceNotFound,
+ response.value_or_raise)
+
+ def test_update_capabilities(self):
+ # Route up to API
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capabs = {'cap1': set(['val1', 'val2']),
+ 'cap2': set(['val3'])}
+ # The list(set([])) seems silly, but we can't assume the order
+ # of the list... This behavior should match the code we're
+ # testing... which is check that a set was converted to a list.
+ expected_capabs = {'cap1': list(set(['val1', 'val2'])),
+ 'cap2': ['val3']}
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capabilities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capabilities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.src_state_manager.get_our_capabilities().AndReturn(capabs)
+ self.tgt_state_manager.update_cell_capabilities('child-cell2',
+ expected_capabs)
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ def test_update_capacities(self):
+ self._setup_attrs('child-cell2', 'child-cell2!api-cell')
+ capacs = 'fake_capacs'
+ self.mox.StubOutWithMock(self.src_state_manager,
+ 'get_our_capacities')
+ self.mox.StubOutWithMock(self.tgt_state_manager,
+ 'update_cell_capacities')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.src_state_manager.get_our_capacities().AndReturn(capacs)
+ self.tgt_state_manager.update_cell_capacities('child-cell2',
+ capacs)
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ def test_announce_capabilities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capabilities')
+ self.tgt_msg_runner.tell_parents_our_capabilities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capabilities(self.ctxt)
+
+ def test_announce_capacities(self):
+ self._setup_attrs('api-cell', 'api-cell!child-cell1')
+ # To make this easier to test, make us only have 1 child cell.
+ cell_state = self.src_state_manager.child_cells['child-cell1']
+ self.src_state_manager.child_cells = {'child-cell1': cell_state}
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'tell_parents_our_capacities')
+ self.tgt_msg_runner.tell_parents_our_capacities(self.ctxt)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.ask_children_for_capacities(self.ctxt)
+
+ def test_service_get_by_compute_host(self):
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name).AndReturn('fake-service')
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.service_get_by_compute_host(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name)
+ result = response.value_or_raise()
+ self.assertEqual('fake-service', result)
+
+ def test_service_update(self):
+ binary = 'nova-compute'
+ fake_service = dict(id=42, host='fake_host', binary='nova-compute',
+ topic='compute')
+ fake_compute = dict(
+ id=7116, service_id=42, host='fake_host', vcpus=0, memory_mb=0,
+ local_gb=0, vcpus_used=0, memory_mb_used=0, local_gb_used=0,
+ hypervisor_type=0, hypervisor_version=0, hypervisor_hostname=0,
+ free_ram_mb=0, free_disk_gb=0, current_workload=0, running_vms=0,
+ cpu_info='HAL', disk_available_least=0)
+ params_to_update = {'disabled': True, 'report_count': 13}
+
+ ctxt = context.RequestContext('fake_user', 'fake_project',
+ is_admin=True)
+ # We use the real DB for this test, as it's too hard to reach the
+ # host_api to mock out its DB methods
+ db.service_create(ctxt, fake_service)
+ db.compute_node_create(ctxt, fake_compute)
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.service_update(
+ ctxt, self.tgt_cell_name,
+ 'fake_host', binary, params_to_update)
+ result = response.value_or_raise()
+ result.pop('created_at', None)
+ result.pop('updated_at', None)
+ result.pop('disabled_reason', None)
+ expected_result = dict(
+ deleted=0, deleted_at=None,
+ binary=fake_service['binary'],
+ disabled=True, # We just updated this..
+ report_count=13, # ..and this
+ host='fake_host', id=42,
+ topic='compute')
+ self.assertEqual(expected_result, result)
+
+ def test_service_delete(self):
+ fake_service = dict(id=42, host='fake_host', binary='nova-compute',
+ topic='compute')
+
+ ctxt = self.ctxt.elevated()
+ db.service_create(ctxt, fake_service)
+
+ self.src_msg_runner.service_delete(
+ ctxt, self.tgt_cell_name, fake_service['id'])
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, ctxt, fake_service['id'])
+
+ def test_proxy_rpc_to_manager_call(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+
+ target = oslo_messaging.Target(topic='fake-topic')
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpc, 'get_client')
+ rpc.get_client(target).AndReturn(rpcclient)
+ rpcclient.prepare(timeout=5).AndReturn(rpcclient)
+ rpcclient.call(mox.IgnoreArg(),
+ 'fake_rpc_method').AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, True, timeout=5)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_proxy_rpc_to_manager_cast(self):
+ fake_topic = 'fake-topic'
+ fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
+ fake_host_name = 'fake-host-name'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'service_get_by_compute_host')
+ self.tgt_db_inst.service_get_by_compute_host(self.ctxt,
+ fake_host_name)
+
+ target = oslo_messaging.Target(topic='fake-topic')
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpc, 'get_client')
+ rpc.get_client(target).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'fake_rpc_method')
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.proxy_rpc_to_manager(
+ self.ctxt,
+ self.tgt_cell_name,
+ fake_host_name,
+ fake_topic,
+ fake_rpc_message, False, timeout=None)
+
+ def test_task_log_get_all_targeted(self):
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+ self.tgt_db_inst.task_log_get_all(self.ctxt, task_name,
+ begin, end, host=host,
+ state=state).AndReturn(['fake_result'])
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.task_log_get_all(self.ctxt,
+ self.tgt_cell_name, task_name, begin, end, host=host,
+ state=state)
+ self.assertIsInstance(response, list)
+ self.assertEqual(1, len(response))
+ result = response[0].value_or_raise()
+ self.assertEqual(['fake_result'], result)
+
+ def test_compute_node_get(self):
+ compute_id = 'fake-id'
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get')
+ self.tgt_db_inst.compute_node_get(self.ctxt,
+ compute_id).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.compute_node_get(self.ctxt,
+ self.tgt_cell_name, compute_id)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_actions_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'actions_get')
+ self.tgt_db_inst.actions_get(self.ctxt,
+ 'fake-uuid').AndReturn([fake_act])
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.actions_get(self.ctxt,
+ self.tgt_cell_name,
+ 'fake-uuid')
+ result = response.value_or_raise()
+ self.assertEqual([jsonutils.to_primitive(fake_act)], result)
+
+ def test_action_get_by_request_id(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'action_get_by_request_id')
+ self.tgt_db_inst.action_get_by_request_id(self.ctxt,
+ 'fake-uuid', 'req-fake').AndReturn(fake_act)
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.action_get_by_request_id(self.ctxt,
+ self.tgt_cell_name, 'fake-uuid', 'req-fake')
+ result = response.value_or_raise()
+ self.assertEqual(jsonutils.to_primitive(fake_act), result)
+
+ def test_action_events_get(self):
+ fake_action_id = fake_server_actions.FAKE_ACTION_ID1
+ fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'action_events_get')
+ self.tgt_db_inst.action_events_get(self.ctxt,
+ 'fake-action').AndReturn(fake_events)
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.action_events_get(self.ctxt,
+ self.tgt_cell_name,
+ 'fake-action')
+ result = response.value_or_raise()
+ self.assertEqual(jsonutils.to_primitive(fake_events), result)
+
+ def test_validate_console_port(self):
+ instance_uuid = 'fake_instance_uuid'
+ instance = {'uuid': instance_uuid}
+ console_port = 'fake-port'
+ console_type = 'fake-type'
+
+ self.mox.StubOutWithMock(self.tgt_c_rpcapi, 'validate_console_port')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_get_by_uuid')
+
+ self.tgt_db_inst.instance_get_by_uuid(self.ctxt,
+ instance_uuid).AndReturn(instance)
+ self.tgt_c_rpcapi.validate_console_port(self.ctxt,
+ instance, console_port, console_type).AndReturn('fake_result')
+
+ self.mox.ReplayAll()
+
+ response = self.src_msg_runner.validate_console_port(self.ctxt,
+ self.tgt_cell_name, instance_uuid, console_port,
+ console_type)
+ result = response.value_or_raise()
+ self.assertEqual('fake_result', result)
+
+ def test_get_migrations_for_a_given_cell(self):
+ filters = {'cell_name': 'child-cell2', 'status': 'confirmed'}
+ migrations_in_progress = [{'id': 123}]
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ 'get_migrations')
+
+ self.tgt_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_in_progress)
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ self.tgt_cell_name, False, filters)
+ result = responses[0].value_or_raise()
+ self.assertEqual(migrations_in_progress, result)
+
+ def test_get_migrations_for_an_invalid_cell(self):
+ filters = {'cell_name': 'invalid_Cell', 'status': 'confirmed'}
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ 'api_cell!invalid_cell', False, filters)
+
+ self.assertEqual(0, len(responses))
+
+ def test_call_compute_api_with_obj(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ self.mox.StubOutWithMock(instance, 'refresh')
+ # Using 'snapshot' for this test, because it
+ # takes args and kwargs.
+ self.mox.StubOutWithMock(self.tgt_compute_api, 'snapshot')
+ instance.refresh(self.ctxt)
+ self.tgt_compute_api.snapshot(
+ self.ctxt, instance, 'name',
+ extra_properties='props').AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.tgt_methods_cls._call_compute_api_with_obj(
+ self.ctxt, instance, 'snapshot', 'name',
+ extra_properties='props')
+ self.assertEqual('foo', result)
+
+ def test_call_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with mock.patch.object(instance, 'refresh', side_effect=error):
+ self.assertRaises(exception.InstanceInfoCacheNotFound,
+ self.tgt_methods_cls._call_compute_api_with_obj,
+ self.ctxt, instance, 'snapshot')
+
+ def test_call_delete_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with contextlib.nested(
+ mock.patch.object(instance, 'refresh',
+ side_effect=error),
+ mock.patch.object(self.tgt_compute_api, 'delete')) as (inst,
+ delete):
+ self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt,
+ instance,
+ 'delete')
+ delete.assert_called_once_with(self.ctxt, instance)
+
+ def test_call_compute_with_obj_unknown_instance(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = None
+ self.mox.StubOutWithMock(instance, 'refresh')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ instance.refresh(self.ctxt).AndRaise(
+ exception.InstanceNotFound(instance_id=instance.uuid))
+
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt,
+ {'uuid': instance.uuid})
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceNotFound,
+ self.tgt_methods_cls._call_compute_api_with_obj,
+ self.ctxt, instance, 'snapshot', 'name')
+
+ def _instance_update_helper(self, admin_state_reset):
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+
+ instance = objects.Instance()
+ instance.cell_name = self.tgt_cell_name
+ instance.obj_reset_changes()
+ instance.task_state = 'meow'
+ instance.vm_state = 'wuff'
+ instance.user_data = 'foo'
+ instance.metadata = {'meta': 'data'}
+ instance.system_metadata = {'system': 'metadata'}
+ self.assertEqual(set(['user_data', 'vm_state', 'task_state',
+ 'metadata', 'system_metadata']),
+ instance.obj_what_changed())
+
+ self.mox.StubOutWithMock(instance, 'save')
+
+ def _check_object(*args, **kwargs):
+ # task_state and vm_state changes should have been cleared
+ # before calling save()
+ if admin_state_reset:
+ self.assertEqual(
+ set(['user_data', 'vm_state', 'task_state']),
+ instance.obj_what_changed())
+ else:
+ self.assertEqual(set(['user_data']),
+ instance.obj_what_changed())
+
+ instance.save(self.ctxt, expected_task_state='exp_task',
+ expected_vm_state='exp_vm').WithSideEffects(
+ _check_object)
+
+ self.mox.ReplayAll()
+
+ self.tgt_methods_cls.instance_update_from_api(
+ message,
+ instance,
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset=admin_state_reset)
+
+ def test_instance_update_from_api(self):
+ self._instance_update_helper(False)
+
+ def test_instance_update_from_api_admin_state_reset(self):
+ self._instance_update_helper(True)
+
+ def _test_instance_action_method(self, method, args, kwargs,
+ expected_args, expected_kwargs,
+ expect_result):
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = expect_result
+
+ meth_cls = self.tgt_methods_cls
+ self.mox.StubOutWithMock(meth_cls, '_call_compute_api_with_obj')
+
+ method_corrections = {
+ 'terminate': 'delete',
+ }
+ api_method = method_corrections.get(method, method)
+
+ meth_cls._call_compute_api_with_obj(
+ self.ctxt, 'fake-instance', api_method,
+ *expected_args, **expected_kwargs).AndReturn('meow')
+
+ self.mox.ReplayAll()
+
+ method_translations = {'revert_resize': 'revert_resize',
+ 'confirm_resize': 'confirm_resize',
+ 'reset_network': 'reset_network',
+ 'inject_network_info': 'inject_network_info',
+ 'set_admin_password': 'set_admin_password',
+ }
+ tgt_method = method_translations.get(method,
+ '%s_instance' % method)
+ result = getattr(meth_cls, tgt_method)(
+ message, 'fake-instance', *args, **kwargs)
+ if expect_result:
+ self.assertEqual('meow', result)
+
+ def test_start_instance(self):
+ self._test_instance_action_method('start', (), {}, (), {}, False)
+
+ def test_stop_instance_cast(self):
+ self._test_instance_action_method('stop', (), {}, (),
+ {'do_cast': True}, False)
+
+ def test_stop_instance_call(self):
+ self._test_instance_action_method('stop', (), {}, (),
+ {'do_cast': False}, True)
+
+ def test_reboot_instance(self):
+ kwargs = dict(reboot_type='HARD')
+ self._test_instance_action_method('reboot', (), kwargs, (),
+ kwargs, False)
+
+ def test_suspend_instance(self):
+ self._test_instance_action_method('suspend', (), {}, (), {}, False)
+
+ def test_resume_instance(self):
+ self._test_instance_action_method('resume', (), {}, (), {}, False)
+
+ def test_get_host_uptime(self):
+ host_name = "fake-host"
+ host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+ self.mox.StubOutWithMock(self.tgt_host_api, 'get_host_uptime')
+ self.tgt_host_api.get_host_uptime(self.ctxt, host_name).\
+ AndReturn(host_uptime)
+ self.mox.ReplayAll()
+ response = self.src_msg_runner.get_host_uptime(self.ctxt,
+ self.tgt_cell_name,
+ host_name)
+ expected_host_uptime = response.value_or_raise()
+ self.assertEqual(host_uptime, expected_host_uptime)
+
+ def test_terminate_instance(self):
+ self._test_instance_action_method('terminate',
+ (), {}, (), {}, False)
+
+ def test_soft_delete_instance(self):
+ self._test_instance_action_method(delete_types.SOFT_DELETE,
+ (), {}, (), {}, False)
+
+ def test_pause_instance(self):
+ self._test_instance_action_method('pause', (), {}, (), {}, False)
+
+ def test_unpause_instance(self):
+ self._test_instance_action_method('unpause', (), {}, (), {}, False)
+
+ def test_resize_instance(self):
+ kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
+ extra_instance_updates=dict(cow='moo'))
+ expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
+ self._test_instance_action_method('resize', (), kwargs,
+ (), expected_kwargs,
+ False)
+
+ def test_live_migrate_instance(self):
+ kwargs = dict(block_migration='fake-block-mig',
+ disk_over_commit='fake-commit',
+ host_name='fake-host')
+ expected_args = ('fake-block-mig', 'fake-commit', 'fake-host')
+ self._test_instance_action_method('live_migrate', (), kwargs,
+ expected_args, {}, False)
+
+ def test_revert_resize(self):
+ self._test_instance_action_method('revert_resize',
+ (), {}, (), {}, False)
+
+ def test_confirm_resize(self):
+ self._test_instance_action_method('confirm_resize',
+ (), {}, (), {}, False)
+
+ def test_reset_network(self):
+ self._test_instance_action_method('reset_network',
+ (), {}, (), {}, False)
+
+ def test_inject_network_info(self):
+ self._test_instance_action_method('inject_network_info',
+ (), {}, (), {}, False)
+
+ def test_snapshot_instance(self):
+ inst = objects.Instance()
+ meth_cls = self.tgt_methods_cls
+
+ self.mox.StubOutWithMock(inst, 'refresh')
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'snapshot_instance')
+
+ def check_state(expected_task_state=None):
+ self.assertEqual(task_states.IMAGE_SNAPSHOT_PENDING,
+ inst.task_state)
+
+ inst.refresh()
+ inst.save(expected_task_state=[None]).WithSideEffects(check_state)
+
+ meth_cls.compute_rpcapi.snapshot_instance(self.ctxt,
+ inst, 'image-id')
+
+ self.mox.ReplayAll()
+
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = False
+
+ meth_cls.snapshot_instance(message, inst, image_id='image-id')
+
+ def test_backup_instance(self):
+ inst = objects.Instance()
+ meth_cls = self.tgt_methods_cls
+
+ self.mox.StubOutWithMock(inst, 'refresh')
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(meth_cls.compute_rpcapi, 'backup_instance')
+
+ def check_state(expected_task_state=None):
+ self.assertEqual(task_states.IMAGE_BACKUP, inst.task_state)
+
+ inst.refresh()
+ inst.save(expected_task_state=[None]).WithSideEffects(check_state)
+
+ meth_cls.compute_rpcapi.backup_instance(self.ctxt,
+ inst,
+ 'image-id',
+ 'backup-type',
+ 'rotation')
+
+ self.mox.ReplayAll()
+
+ class FakeMessage(object):
+ pass
+
+ message = FakeMessage()
+ message.ctxt = self.ctxt
+ message.need_response = False
+
+ meth_cls.backup_instance(message, inst,
+ image_id='image-id',
+ backup_type='backup-type',
+ rotation='rotation')
+
+ def test_set_admin_password(self):
+ args = ['fake-password']
+ self._test_instance_action_method('set_admin_password', args, {}, args,
+ {}, False)
+
+
+class CellsBroadcastMethodsTestCase(test.TestCase):
+ """Test case for _BroadcastMessageMethods class. Most of these
+ tests actually test the full path from the MessageRunner through
+ to the functionality of the message method. Hits 2 birds with 1
+ stone, even though it's a little more than a unit test.
+ """
+
+ def setUp(self):
+ super(CellsBroadcastMethodsTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs()
+
+ def _setup_attrs(self, up=True):
+ mid_cell = 'child-cell2'
+ if up:
+ src_cell = 'grandchild-cell1'
+ tgt_cell = 'api-cell'
+ else:
+ src_cell = 'api-cell'
+ tgt_cell = 'grandchild-cell1'
+
+ self.src_msg_runner = fakes.get_message_runner(src_cell)
+ methods_cls = self.src_msg_runner.methods_by_type['broadcast']
+ self.src_methods_cls = methods_cls
+ self.src_db_inst = methods_cls.db
+ self.src_compute_api = methods_cls.compute_api
+ self.src_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ if not up:
+ # fudge things so we only have 1 child to broadcast to
+ state_manager = self.src_msg_runner.state_manager
+ for cell in state_manager.get_child_cells():
+ if cell.name != 'child-cell2':
+ del state_manager.child_cells[cell.name]
+
+ self.mid_msg_runner = fakes.get_message_runner(mid_cell)
+ methods_cls = self.mid_msg_runner.methods_by_type['broadcast']
+ self.mid_methods_cls = methods_cls
+ self.mid_db_inst = methods_cls.db
+ self.mid_compute_api = methods_cls.compute_api
+ self.mid_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ self.tgt_msg_runner = fakes.get_message_runner(tgt_cell)
+ methods_cls = self.tgt_msg_runner.methods_by_type['broadcast']
+ self.tgt_methods_cls = methods_cls
+ self.tgt_db_inst = methods_cls.db
+ self.tgt_compute_api = methods_cls.compute_api
+ self.tgt_ca_rpcapi = methods_cls.consoleauth_rpcapi
+
+ def test_at_the_top(self):
+ self.assertTrue(self.tgt_methods_cls._at_the_top())
+ self.assertFalse(self.mid_methods_cls._at_the_top())
+ self.assertFalse(self.src_methods_cls._at_the_top())
+
+ def test_apply_expected_states_building(self):
+ instance_info = {'vm_state': vm_states.BUILDING}
+ expected = dict(instance_info,
+ expected_vm_state=[vm_states.BUILDING, None])
+ self.src_methods_cls._apply_expected_states(instance_info)
+ self.assertEqual(expected, instance_info)
+
+ def test_apply_expected_states_resize_finish(self):
+ instance_info = {'task_state': task_states.RESIZE_FINISH}
+ exp_states = [task_states.RESIZE_FINISH,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_PREP]
+ expected = dict(instance_info, expected_task_state=exp_states)
+ self.src_methods_cls._apply_expected_states(instance_info)
+ self.assertEqual(expected, instance_info)
+
+ def _test_instance_update_at_top(self, net_info, exists=True):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'network_info': net_info}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'uuid': 'fake_uuid',
+ 'security_groups': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+ expected_sys_metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ expected_info_cache = {'network_info': "[]"}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
+ expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
+ 'other': 'meow',
+ 'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'instance_info_cache_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'instance_info_cache_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_create')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'instance_info_cache_update')
+ mock = self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
+ expected_instance,
+ update_cells=False)
+ if not exists:
+ mock.AndRaise(exception.InstanceNotFound(instance_id='fake_uuid'))
+ self.tgt_db_inst.instance_create(self.ctxt,
+ expected_instance)
+ self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
+ expected_info_cache)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
+
+ def test_instance_update_at_top(self):
+ self._test_instance_update_at_top("[]")
+
+ def test_instance_update_at_top_netinfo_list(self):
+ self._test_instance_update_at_top([])
+
+ def test_instance_update_at_top_netinfo_model(self):
+ self._test_instance_update_at_top(network_model.NetworkInfo())
+
+ def test_instance_update_at_top_does_not_already_exist(self):
+ self._test_instance_update_at_top([], exists=False)
+
+ def test_instance_update_at_top_with_building_state(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'uuid': 'fake_uuid',
+ 'security_groups': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'vm_state': vm_states.BUILDING,
+ 'other': 'meow'}
+ expected_sys_metadata = {'key1': 'value1',
+ 'key2': 'value2'}
+ expected_info_cache = {'other': 'moo'}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
+ expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
+ 'other': 'meow',
+ 'vm_state': vm_states.BUILDING,
+ 'expected_vm_state': [vm_states.BUILDING, None],
+ 'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'instance_info_cache_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'instance_info_cache_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_update')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'instance_info_cache_update')
+ self.tgt_db_inst.instance_update(self.ctxt, 'fake_uuid',
+ expected_instance,
+ update_cells=False)
+ self.tgt_db_inst.instance_info_cache_update(self.ctxt, 'fake_uuid',
+ expected_info_cache)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_update_at_top(self.ctxt, fake_instance)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake_uuid'}
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_db_inst, 'instance_destroy')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'instance_destroy')
+ self.tgt_db_inst.instance_destroy(self.ctxt, 'fake_uuid',
+ update_cells=False)
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_destroy_at_top(self.ctxt, fake_instance)
+
+ def test_instance_hard_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api, delete_types.DELETE)
+
+ self.mox.StubOutWithMock(self.mid_compute_api, delete_types.DELETE)
+ self.mox.StubOutWithMock(self.tgt_compute_api, delete_types.DELETE)
+
+ self.mid_compute_api.delete(self.ctxt, instance)
+ self.tgt_compute_api.delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, delete_types.DELETE)
+
+ def test_instance_soft_delete_everywhere(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ instance = {'uuid': 'meow'}
+
+ # Should not be called in src (API cell)
+ self.mox.StubOutWithMock(self.src_compute_api,
+ delete_types.SOFT_DELETE)
+
+ self.mox.StubOutWithMock(self.mid_compute_api,
+ delete_types.SOFT_DELETE)
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ delete_types.SOFT_DELETE)
+
+ self.mid_compute_api.soft_delete(self.ctxt, instance)
+ self.tgt_compute_api.soft_delete(self.ctxt, instance)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.instance_delete_everywhere(self.ctxt,
+ instance, delete_types.SOFT_DELETE)
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 1,
+ 'message': 'fake-message',
+ 'details': 'fake-details'}
+
+ if_mock = mock.Mock(spec_set=objects.InstanceFault)
+
+ def _check_create():
+ self.assertEqual('fake-message', if_mock.message)
+ self.assertEqual('fake-details', if_mock.details)
+ # Should not be set
+ self.assertNotEqual(1, if_mock.id)
+
+ if_mock.create.side_effect = _check_create
+
+ with mock.patch.object(objects, 'InstanceFault') as if_obj_mock:
+ if_obj_mock.return_value = if_mock
+ self.src_msg_runner.instance_fault_create_at_top(
+ self.ctxt, fake_instance_fault)
+
+ if_obj_mock.assert_called_once_with(context=self.ctxt)
+ if_mock.create.assert_called_once_with()
+
+ def test_bw_usage_update_at_top(self):
+ fake_bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_last_ctr_in',
+ 'last_ctr_out': 'fake_last_ctr_out',
+ 'last_refreshed': 'fake_last_refreshed'}
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst, 'bw_usage_update')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'bw_usage_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'bw_usage_update')
+ self.tgt_db_inst.bw_usage_update(self.ctxt, **fake_bw_update_info)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bw_usage_update_at_top(self.ctxt,
+ fake_bw_update_info)
+
+ def test_sync_instances(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ project_id = 'fake_project_id'
+ updated_since_raw = 'fake_updated_since_raw'
+ updated_since_parsed = 'fake_updated_since_parsed'
+ deleted = 'fake_deleted'
+
+ instance1 = dict(uuid='fake_uuid1', deleted=False)
+ instance2 = dict(uuid='fake_uuid2', deleted=True)
+ fake_instances = [instance1, instance2]
+
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(self.tgt_msg_runner,
+ 'instance_destroy_at_top')
+
+ self.mox.StubOutWithMock(timeutils, 'parse_isotime')
+ self.mox.StubOutWithMock(cells_utils, 'get_instances_to_sync')
+
+ # Middle cell.
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn([])
+
+ # Bottom/Target cell
+ timeutils.parse_isotime(updated_since_raw).AndReturn(
+ updated_since_parsed)
+ cells_utils.get_instances_to_sync(self.ctxt,
+ updated_since=updated_since_parsed,
+ project_id=project_id,
+ deleted=deleted).AndReturn(fake_instances)
+ self.tgt_msg_runner.instance_update_at_top(self.ctxt, instance1)
+ self.tgt_msg_runner.instance_destroy_at_top(self.ctxt, instance2)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.sync_instances(self.ctxt,
+ project_id, updated_since_raw, deleted)
+
+ def test_service_get_all_with_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=None).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters={})
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_service_get_all_without_disabled(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ disabled = False
+ filters = {'disabled': disabled}
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'service_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'service_get_all')
+
+ self.src_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([1, 2])
+ self.mid_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([3])
+ self.tgt_db_inst.service_get_all(ctxt,
+ disabled=disabled).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.service_get_all(ctxt,
+ filters=filters)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_task_log_get_all_broadcast(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ task_name = 'fake_task_name'
+ begin = 'fake_begin'
+ end = 'fake_end'
+ host = 'fake_host'
+ state = 'fake_state'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'task_log_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'task_log_get_all')
+
+ self.src_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([1, 2])
+ self.mid_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([3])
+ self.tgt_db_inst.task_log_get_all(ctxt, task_name,
+ begin, end, host=host, state=state).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.task_log_get_all(ctxt, None,
+ task_name, begin, end, host=host, state=state)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.mid_db_inst, 'compute_node_get_all')
+ self.mox.StubOutWithMock(self.tgt_db_inst, 'compute_node_get_all')
+
+ self.src_db_inst.compute_node_get_all(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_get_all(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_get_all(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_get_all_with_hyp_match(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+ hypervisor_match = 'meow'
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_search_by_hypervisor')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_search_by_hypervisor')
+
+ self.src_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([3])
+ self.tgt_db_inst.compute_node_search_by_hypervisor(ctxt,
+ hypervisor_match).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_get_all(ctxt,
+ hypervisor_match=hypervisor_match)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_compute_node_stats(self):
+ # Reset this, as this is a broadcast down.
+ self._setup_attrs(up=False)
+
+ ctxt = self.ctxt.elevated()
+
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'compute_node_statistics')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'compute_node_statistics')
+
+ self.src_db_inst.compute_node_statistics(ctxt).AndReturn([1, 2])
+ self.mid_db_inst.compute_node_statistics(ctxt).AndReturn([3])
+ self.tgt_db_inst.compute_node_statistics(ctxt).AndReturn([4, 5])
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.compute_node_stats(ctxt)
+ response_values = [(resp.cell_name, resp.value_or_raise())
+ for resp in responses]
+ expected = [('api-cell!child-cell2!grandchild-cell1', [4, 5]),
+ ('api-cell!child-cell2', [3]),
+ ('api-cell', [1, 2])]
+ self.assertEqual(expected, response_values)
+
+ def test_consoleauth_delete_tokens(self):
+ fake_uuid = 'fake-instance-uuid'
+
+ # To show these should not be called in src/mid-level cell
+ self.mox.StubOutWithMock(self.src_ca_rpcapi,
+ 'delete_tokens_for_instance')
+ self.mox.StubOutWithMock(self.mid_ca_rpcapi,
+ 'delete_tokens_for_instance')
+
+ self.mox.StubOutWithMock(self.tgt_ca_rpcapi,
+ 'delete_tokens_for_instance')
+ self.tgt_ca_rpcapi.delete_tokens_for_instance(self.ctxt, fake_uuid)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.consoleauth_delete_tokens(self.ctxt, fake_uuid)
+
+ def test_bdm_update_or_create_with_none_create(self):
+ fake_bdm = {'id': 'fake_id',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update_or_create')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update_or_create')
+ self.tgt_db_inst.block_device_mapping_update_or_create(
+ self.ctxt, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=None)
+
+ def test_bdm_update_or_create_with_true_create(self):
+ fake_bdm = {'id': 'fake_id',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_create')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_create')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_create')
+ self.tgt_db_inst.block_device_mapping_create(
+ self.ctxt, fake_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=True)
+
+ def test_bdm_update_or_create_with_false_create_vol_id(self):
+ fake_bdm = {'id': 'fake_id',
+ 'instance_uuid': 'fake_instance_uuid',
+ 'device_name': 'fake_device_name',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ fake_inst_bdms = [{'id': 1,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'},
+ {'id': 2,
+ 'volume_id': 'fake_volume_id',
+ 'device_name': 'not-a-match'},
+ {'id': 3,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'}]
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update')
+
+ self.tgt_db_inst.block_device_mapping_get_all_by_instance(
+ self.ctxt, 'fake_instance_uuid').AndReturn(
+ fake_inst_bdms)
+ # Should try to update ID 2.
+ self.tgt_db_inst.block_device_mapping_update(
+ self.ctxt, 2, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=False)
+
+ def test_bdm_update_or_create_with_false_create_dev_name(self):
+ fake_bdm = {'id': 'fake_id',
+ 'instance_uuid': 'fake_instance_uuid',
+ 'device_name': 'fake_device_name',
+ 'volume_id': 'fake_volume_id'}
+ expected_bdm = fake_bdm.copy()
+ expected_bdm.pop('id')
+
+ fake_inst_bdms = [{'id': 1,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'},
+ {'id': 2,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'fake_device_name'},
+ {'id': 3,
+ 'volume_id': 'not-a-match',
+ 'device_name': 'not-a-match'}]
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_update')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_update')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_update')
+
+ self.tgt_db_inst.block_device_mapping_get_all_by_instance(
+ self.ctxt, 'fake_instance_uuid').AndReturn(
+ fake_inst_bdms)
+ # Should try to update ID 2.
+ self.tgt_db_inst.block_device_mapping_update(
+ self.ctxt, 2, expected_bdm, legacy=False)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_update_or_create_at_top(self.ctxt,
+ fake_bdm,
+ create=False)
+
+ def test_bdm_destroy_by_volume(self):
+ fake_instance_uuid = 'fake-instance-uuid'
+ fake_volume_id = 'fake-volume-name'
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_volume')
+ self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_volume(
+ self.ctxt, fake_instance_uuid, fake_volume_id)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
+ volume_id=fake_volume_id)
+
+ def test_bdm_destroy_by_device(self):
+ fake_instance_uuid = 'fake-instance-uuid'
+ fake_device_name = 'fake-device-name'
+
+ # Shouldn't be called for these 2 cells
+ self.mox.StubOutWithMock(self.src_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+ self.mox.StubOutWithMock(self.mid_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+
+ self.mox.StubOutWithMock(self.tgt_db_inst,
+ 'block_device_mapping_destroy_by_instance_and_device')
+ self.tgt_db_inst.block_device_mapping_destroy_by_instance_and_device(
+ self.ctxt, fake_instance_uuid, fake_device_name)
+
+ self.mox.ReplayAll()
+
+ self.src_msg_runner.bdm_destroy_at_top(self.ctxt, fake_instance_uuid,
+ device_name=fake_device_name)
+
+ def test_get_migrations(self):
+ self._setup_attrs(up=False)
+ filters = {'status': 'confirmed'}
+ migrations_from_cell1 = [{'id': 123}]
+ migrations_from_cell2 = [{'id': 456}]
+ self.mox.StubOutWithMock(self.mid_compute_api,
+ 'get_migrations')
+
+ self.mid_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_from_cell1)
+
+ self.mox.StubOutWithMock(self.tgt_compute_api,
+ 'get_migrations')
+
+ self.tgt_compute_api.get_migrations(self.ctxt, filters).\
+ AndReturn(migrations_from_cell2)
+
+ self.mox.ReplayAll()
+
+ responses = self.src_msg_runner.get_migrations(
+ self.ctxt,
+ None, False, filters)
+ self.assertEqual(2, len(responses))
+ for response in responses:
+ self.assertIn(response.value_or_raise(), [migrations_from_cell1,
+ migrations_from_cell2])
diff --git a/nova/tests/unit/cells/test_cells_rpc_driver.py b/nova/tests/unit/cells/test_cells_rpc_driver.py
new file mode 100644
index 0000000000..7efba3765b
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_rpc_driver.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPC Communication Driver
+"""
+
+import mox
+from oslo.config import cfg
+from oslo import messaging as oslo_messaging
+
+from nova.cells import messaging
+from nova.cells import rpc_driver
+from nova import context
+from nova import rpc
+from nova import test
+from nova.tests.unit.cells import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('rpc_driver_queue_base', 'nova.cells.rpc_driver',
+ group='cells')
+
+
+class CellsRPCDriverTestCase(test.NoDBTestCase):
+ """Test case for Cells communication via RPC."""
+
+ def setUp(self):
+ super(CellsRPCDriverTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self.driver = rpc_driver.CellsRPCDriver()
+
+ def test_start_servers(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ fake_msg_runner = fakes.get_message_runner('api-cell')
+
+ class FakeInterCellRPCDispatcher(object):
+ def __init__(_self, msg_runner):
+ self.assertEqual(fake_msg_runner, msg_runner)
+
+ self.stubs.Set(rpc_driver, 'InterCellRPCDispatcher',
+ FakeInterCellRPCDispatcher)
+ self.mox.StubOutWithMock(rpc, 'get_server')
+
+ for message_type in messaging.MessageRunner.get_message_types():
+ topic = 'cells.intercell42.' + message_type
+ target = oslo_messaging.Target(topic=topic, server=CONF.host)
+ endpoints = [mox.IsA(FakeInterCellRPCDispatcher)]
+
+ rpcserver = self.mox.CreateMockAnything()
+ rpc.get_server(target, endpoints=endpoints).AndReturn(rpcserver)
+ rpcserver.start()
+
+ self.mox.ReplayAll()
+
+ self.driver.start_servers(fake_msg_runner)
+
+ def test_stop_servers(self):
+ call_info = {'stopped': []}
+
+ class FakeRPCServer(object):
+ def stop(self):
+ call_info['stopped'].append(self)
+
+ fake_servers = [FakeRPCServer() for x in xrange(5)]
+ self.driver.rpc_servers = fake_servers
+ self.driver.stop_servers()
+ self.assertEqual(fake_servers, call_info['stopped'])
+
+ def test_send_message_to_cell_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', cell_state, fanout=False)
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell.targeted').AndReturn(rpcclient)
+
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_send_message_to_cell_fanout_cast(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._TargetedMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', cell_state, fanout=True)
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell.targeted').AndReturn(rpcclient)
+
+ rpcclient.prepare(fanout=True).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_rpc_topic_uses_message_type(self):
+ self.flags(rpc_driver_queue_base='cells.intercell42', group='cells')
+ msg_runner = fakes.get_message_runner('api-cell')
+ cell_state = fakes.get_cell_state('api-cell', 'child-cell2')
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', fanout=True)
+ message.message_type = 'fake-message-type'
+
+ expected_server_params = {'hostname': 'rpc_host2',
+ 'password': 'password2',
+ 'port': 3092,
+ 'username': 'username2',
+ 'virtual_host': 'rpc_vhost2'}
+ expected_url = ('rabbit://%(username)s:%(password)s@'
+ '%(hostname)s:%(port)d/%(virtual_host)s' %
+ expected_server_params)
+
+ def check_transport_url(cell_state):
+ return cell_state.db_info['transport_url'] == expected_url
+
+ rpcapi = self.driver.intercell_rpcapi
+ rpcclient = self.mox.CreateMockAnything()
+
+ self.mox.StubOutWithMock(rpcapi, '_get_client')
+ rpcapi._get_client(
+ mox.Func(check_transport_url),
+ 'cells.intercell42.fake-message-type').AndReturn(rpcclient)
+
+ rpcclient.prepare(fanout=True).AndReturn(rpcclient)
+ rpcclient.cast(mox.IgnoreArg(), 'process_message',
+ message=message.to_json())
+
+ self.mox.ReplayAll()
+
+ self.driver.send_message_to_cell(cell_state, message)
+
+ def test_process_message(self):
+ msg_runner = fakes.get_message_runner('api-cell')
+ dispatcher = rpc_driver.InterCellRPCDispatcher(msg_runner)
+ message = messaging._BroadcastMessage(msg_runner,
+ self.ctxt, 'fake', {}, 'down', fanout=True)
+
+ call_info = {}
+
+ def _fake_message_from_json(json_message):
+ call_info['json_message'] = json_message
+ self.assertEqual(message.to_json(), json_message)
+ return message
+
+ def _fake_process():
+ call_info['process_called'] = True
+
+ self.stubs.Set(msg_runner, 'message_from_json',
+ _fake_message_from_json)
+ self.stubs.Set(message, 'process', _fake_process)
+
+ dispatcher.process_message(self.ctxt, message.to_json())
+ self.assertEqual(message.to_json(), call_info['json_message'])
+ self.assertTrue(call_info['process_called'])
diff --git a/nova/tests/unit/cells/test_cells_rpcapi.py b/nova/tests/unit/cells/test_cells_rpcapi.py
new file mode 100644
index 0000000000..398b96d8ae
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_rpcapi.py
@@ -0,0 +1,760 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Cells RPCAPI
+"""
+
+from oslo.config import cfg
+import six
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+CONF.import_opt('topic', 'nova.cells.opts', group='cells')
+
+
+class CellsAPITestCase(test.NoDBTestCase):
+ """Test case for cells.api interfaces."""
+
+ def setUp(self):
+ super(CellsAPITestCase, self).setUp()
+ self.fake_topic = 'fake_topic'
+ self.fake_context = 'fake_context'
+ self.flags(topic=self.fake_topic, enable=True, group='cells')
+ self.cells_rpcapi = cells_rpcapi.CellsAPI()
+
+ def _stub_rpc_method(self, rpc_method, result):
+ call_info = {}
+
+ orig_prepare = self.cells_rpcapi.client.prepare
+
+ def fake_rpc_prepare(**kwargs):
+ if 'version' in kwargs:
+ call_info['version'] = kwargs.pop('version')
+ return self.cells_rpcapi.client
+
+ def fake_csv(version):
+ return orig_prepare(version).can_send_version()
+
+ def fake_rpc_method(ctxt, method, **kwargs):
+ call_info['context'] = ctxt
+ call_info['method'] = method
+ call_info['args'] = kwargs
+ return result
+
+ self.stubs.Set(self.cells_rpcapi.client, 'prepare', fake_rpc_prepare)
+ self.stubs.Set(self.cells_rpcapi.client, 'can_send_version', fake_csv)
+ self.stubs.Set(self.cells_rpcapi.client, rpc_method, fake_rpc_method)
+
+ return call_info
+
+ def _check_result(self, call_info, method, args, version=None):
+ self.assertEqual(self.cells_rpcapi.client.target.topic,
+ self.fake_topic)
+ self.assertEqual(self.fake_context, call_info['context'])
+ self.assertEqual(method, call_info['method'])
+ self.assertEqual(args, call_info['args'])
+ if version is not None:
+ self.assertIn('version', call_info)
+ self.assertIsInstance(call_info['version'], six.string_types,
+ msg="Message version %s is not a string" %
+ call_info['version'])
+ self.assertEqual(version, call_info['version'])
+ else:
+ self.assertNotIn('version', call_info)
+
+ def test_cast_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': False}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.cast_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+
+ def test_call_compute_api_method(self):
+ fake_cell_name = 'fake_cell_name'
+ fake_method = 'fake_method'
+ fake_method_args = (1, 2)
+ fake_method_kwargs = {'kwarg1': 10, 'kwarg2': 20}
+ fake_response = 'fake_response'
+
+ expected_method_info = {'method': fake_method,
+ 'method_args': fake_method_args,
+ 'method_kwargs': fake_method_kwargs}
+ expected_args = {'method_info': expected_method_info,
+ 'cell_name': fake_cell_name,
+ 'call': True}
+
+ call_info = self._stub_rpc_method('call', fake_response)
+
+ result = self.cells_rpcapi.call_compute_api_method(self.fake_context,
+ fake_cell_name, fake_method,
+ *fake_method_args, **fake_method_kwargs)
+ self._check_result(call_info, 'run_compute_api_method',
+ expected_args)
+ self.assertEqual(fake_response, result)
+
+ def test_build_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.build_instances(
+ self.fake_context, instances=['1', '2'],
+ image={'fake': 'image'}, arg1=1, arg2=2, arg3=3)
+
+ expected_args = {'build_inst_kwargs': {'instances': ['1', '2'],
+ 'image': {'fake': 'image'},
+ 'arg1': 1,
+ 'arg2': 2,
+ 'arg3': 3}}
+ self._check_result(call_info, 'build_instances',
+ expected_args, version='1.8')
+
+ def test_get_capacities(self):
+ capacity_info = {"capacity": "info"}
+ call_info = self._stub_rpc_method('call',
+ result=capacity_info)
+ result = self.cells_rpcapi.get_capacities(self.fake_context,
+ cell_name="name")
+ self._check_result(call_info, 'get_capacities',
+ {'cell_name': 'name'}, version='1.9')
+ self.assertEqual(capacity_info, result)
+
+ def test_instance_update_at_top(self):
+ fake_info_cache = {'id': 1,
+ 'instance': 'fake_instance',
+ 'other': 'moo'}
+ fake_sys_metadata = [{'id': 1,
+ 'key': 'key1',
+ 'value': 'value1'},
+ {'id': 2,
+ 'key': 'key2',
+ 'value': 'value2'}]
+ fake_instance = {'id': 2,
+ 'security_groups': 'fake',
+ 'instance_type': 'fake',
+ 'volumes': 'fake',
+ 'cell_name': 'fake',
+ 'name': 'fake',
+ 'metadata': 'fake',
+ 'info_cache': fake_info_cache,
+ 'system_metadata': fake_sys_metadata,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_update_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_update_at_top',
+ expected_args)
+
+ def test_instance_destroy_at_top(self):
+ fake_instance = {'uuid': 'fake-uuid'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_destroy_at_top(
+ self.fake_context, fake_instance)
+
+ expected_args = {'instance': fake_instance}
+ self._check_result(call_info, 'instance_destroy_at_top',
+ expected_args)
+
+ def test_instance_delete_everywhere(self):
+ instance = fake_instance.fake_instance_obj(self.fake_context)
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_delete_everywhere(
+ self.fake_context, instance,
+ 'fake-type')
+
+ expected_args = {'instance': instance,
+ 'delete_type': 'fake-type'}
+ self._check_result(call_info, 'instance_delete_everywhere',
+ expected_args, version='1.27')
+
+ def test_instance_fault_create_at_top(self):
+ fake_instance_fault = {'id': 2,
+ 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_fault_create_at_top(
+ self.fake_context, fake_instance_fault)
+
+ expected_args = {'instance_fault': fake_instance_fault}
+ self._check_result(call_info, 'instance_fault_create_at_top',
+ expected_args)
+
+ def test_bw_usage_update_at_top(self):
+ update_args = ('fake_uuid', 'fake_mac', 'fake_start_period',
+ 'fake_bw_in', 'fake_bw_out', 'fake_ctr_in',
+ 'fake_ctr_out')
+ update_kwargs = {'last_refreshed': 'fake_refreshed'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bw_usage_update_at_top(
+ self.fake_context, *update_args, **update_kwargs)
+
+ bw_update_info = {'uuid': 'fake_uuid',
+ 'mac': 'fake_mac',
+ 'start_period': 'fake_start_period',
+ 'bw_in': 'fake_bw_in',
+ 'bw_out': 'fake_bw_out',
+ 'last_ctr_in': 'fake_ctr_in',
+ 'last_ctr_out': 'fake_ctr_out',
+ 'last_refreshed': 'fake_refreshed'}
+
+ expected_args = {'bw_update_info': bw_update_info}
+ self._check_result(call_info, 'bw_usage_update_at_top',
+ expected_args)
+
+ def test_get_cell_info_for_neighbors(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_cell_info_for_neighbors(
+ self.fake_context)
+ self._check_result(call_info, 'get_cell_info_for_neighbors', {},
+ version='1.1')
+ self.assertEqual(result, 'fake_response')
+
+ def test_sync_instances(self):
+ call_info = self._stub_rpc_method('cast', None)
+ self.cells_rpcapi.sync_instances(self.fake_context,
+ project_id='fake_project', updated_since='fake_time',
+ deleted=True)
+
+ expected_args = {'project_id': 'fake_project',
+ 'updated_since': 'fake_time',
+ 'deleted': True}
+ self._check_result(call_info, 'sync_instances', expected_args,
+ version='1.1')
+
+ def test_service_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ fake_filters = {'key1': 'val1', 'key2': 'val2'}
+ result = self.cells_rpcapi.service_get_all(self.fake_context,
+ filters=fake_filters)
+
+ expected_args = {'filters': fake_filters}
+ self._check_result(call_info, 'service_get_all', expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_get_by_compute_host(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.service_get_by_compute_host(
+ self.fake_context, host_name='fake-host-name')
+ expected_args = {'host_name': 'fake-host-name'}
+ self._check_result(call_info, 'service_get_by_compute_host',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_get_host_uptime(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.get_host_uptime(
+ self.fake_context, host_name='fake-host-name')
+ expected_args = {'host_name': 'fake-host-name'}
+ self._check_result(call_info, 'get_host_uptime',
+ expected_args,
+ version='1.17')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_update(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.service_update(
+ self.fake_context, host_name='fake-host-name',
+ binary='nova-api', params_to_update={'disabled': True})
+ expected_args = {
+ 'host_name': 'fake-host-name',
+ 'binary': 'nova-api',
+ 'params_to_update': {'disabled': True}}
+ self._check_result(call_info, 'service_update',
+ expected_args,
+ version='1.7')
+ self.assertEqual(result, 'fake_response')
+
+ def test_service_delete(self):
+ call_info = self._stub_rpc_method('call', None)
+ cell_service_id = 'cell@id'
+ result = self.cells_rpcapi.service_delete(
+ self.fake_context, cell_service_id=cell_service_id)
+ expected_args = {'cell_service_id': cell_service_id}
+ self._check_result(call_info, 'service_delete',
+ expected_args, version='1.26')
+ self.assertIsNone(result)
+
+ def test_proxy_rpc_to_manager(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.proxy_rpc_to_manager(
+ self.fake_context, rpc_message='fake-msg',
+ topic='fake-topic', call=True, timeout=-1)
+ expected_args = {'rpc_message': 'fake-msg',
+ 'topic': 'fake-topic',
+ 'call': True,
+ 'timeout': -1}
+ self._check_result(call_info, 'proxy_rpc_to_manager',
+ expected_args,
+ version='1.2')
+ self.assertEqual(result, 'fake_response')
+
+ def test_task_log_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.task_log_get_all(self.fake_context,
+ task_name='fake_name',
+ period_beginning='fake_begin',
+ period_ending='fake_end',
+ host='fake_host',
+ state='fake_state')
+
+ expected_args = {'task_name': 'fake_name',
+ 'period_beginning': 'fake_begin',
+ 'period_ending': 'fake_end',
+ 'host': 'fake_host',
+ 'state': 'fake_state'}
+ self._check_result(call_info, 'task_log_get_all', expected_args,
+ version='1.3')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get_all(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get_all(self.fake_context,
+ hypervisor_match='fake-match')
+
+ expected_args = {'hypervisor_match': 'fake-match'}
+ self._check_result(call_info, 'compute_node_get_all', expected_args,
+ version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_stats(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_stats(self.fake_context)
+ expected_args = {}
+ self._check_result(call_info, 'compute_node_stats',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_compute_node_get(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.compute_node_get(self.fake_context,
+ 'fake_compute_id')
+ expected_args = {'compute_id': 'fake_compute_id'}
+ self._check_result(call_info, 'compute_node_get',
+ expected_args, version='1.4')
+ self.assertEqual(result, 'fake_response')
+
+ def test_actions_get(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.actions_get(self.fake_context,
+ fake_instance)
+ expected_args = {'cell_name': 'region!child',
+ 'instance_uuid': fake_instance['uuid']}
+ self._check_result(call_info, 'actions_get', expected_args,
+ version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_actions_get_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.actions_get, self.fake_context,
+ fake_instance)
+
+ def test_action_get_by_request_id(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.action_get_by_request_id(self.fake_context,
+ fake_instance,
+ 'req-fake')
+ expected_args = {'cell_name': 'region!child',
+ 'instance_uuid': fake_instance['uuid'],
+ 'request_id': 'req-fake'}
+ self._check_result(call_info, 'action_get_by_request_id',
+ expected_args, version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_action_get_by_request_id_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.action_get_by_request_id,
+ self.fake_context, fake_instance, 'req-fake')
+
+ def test_action_events_get(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': 'region!child'}
+ call_info = self._stub_rpc_method('call', 'fake_response')
+ result = self.cells_rpcapi.action_events_get(self.fake_context,
+ fake_instance,
+ 'fake-action')
+ expected_args = {'cell_name': 'region!child',
+ 'action_id': 'fake-action'}
+ self._check_result(call_info, 'action_events_get', expected_args,
+ version='1.5')
+ self.assertEqual(result, 'fake_response')
+
+ def test_action_events_get_no_cell(self):
+ fake_instance = {'uuid': 'fake-uuid', 'cell_name': None}
+ self.assertRaises(exception.InstanceUnknownCell,
+ self.cells_rpcapi.action_events_get,
+ self.fake_context, fake_instance, 'fake-action')
+
+ def test_consoleauth_delete_tokens(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.consoleauth_delete_tokens(self.fake_context,
+ 'fake-uuid')
+
+ expected_args = {'instance_uuid': 'fake-uuid'}
+ self._check_result(call_info, 'consoleauth_delete_tokens',
+ expected_args, version='1.6')
+
+ def test_validate_console_port(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.validate_console_port(self.fake_context,
+ 'fake-uuid', 'fake-port', 'fake-type')
+
+ expected_args = {'instance_uuid': 'fake-uuid',
+ 'console_port': 'fake-port',
+ 'console_type': 'fake-type'}
+ self._check_result(call_info, 'validate_console_port',
+ expected_args, version='1.6')
+ self.assertEqual(result, 'fake_response')
+
+ def test_bdm_update_or_create_at_top(self):
+ fake_bdm = {'id': 2, 'other': 'meow'}
+
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bdm_update_or_create_at_top(
+ self.fake_context, fake_bdm, create='fake-create')
+
+ expected_args = {'bdm': fake_bdm, 'create': 'fake-create'}
+ self._check_result(call_info, 'bdm_update_or_create_at_top',
+ expected_args, version='1.28')
+
+ def test_bdm_destroy_at_top(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.bdm_destroy_at_top(self.fake_context,
+ 'fake-uuid',
+ device_name='fake-device',
+ volume_id='fake-vol')
+
+ expected_args = {'instance_uuid': 'fake-uuid',
+ 'device_name': 'fake-device',
+ 'volume_id': 'fake-vol'}
+ self._check_result(call_info, 'bdm_destroy_at_top',
+ expected_args, version='1.10')
+
+ def test_get_migrations(self):
+ call_info = self._stub_rpc_method('call', None)
+ filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
+
+ self.cells_rpcapi.get_migrations(self.fake_context, filters)
+
+ expected_args = {'filters': filters}
+ self._check_result(call_info, 'get_migrations', expected_args,
+ version="1.11")
+
+ def test_instance_update_from_api(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.instance_update_from_api(
+ self.fake_context, 'fake-instance',
+ expected_vm_state='exp_vm',
+ expected_task_state='exp_task',
+ admin_state_reset='admin_reset')
+
+ expected_args = {'instance': 'fake-instance',
+ 'expected_vm_state': 'exp_vm',
+ 'expected_task_state': 'exp_task',
+ 'admin_state_reset': 'admin_reset'}
+ self._check_result(call_info, 'instance_update_from_api',
+ expected_args, version='1.16')
+
+ def test_start_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.start_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'start_instance',
+ expected_args, version='1.12')
+
+ def test_stop_instance_cast(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.stop_instance(
+ self.fake_context, 'fake-instance', do_cast=True)
+
+ expected_args = {'instance': 'fake-instance',
+ 'do_cast': True}
+ self._check_result(call_info, 'stop_instance',
+ expected_args, version='1.12')
+
+ def test_stop_instance_call(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.stop_instance(
+ self.fake_context, 'fake-instance', do_cast=False)
+
+ expected_args = {'instance': 'fake-instance',
+ 'do_cast': False}
+ self._check_result(call_info, 'stop_instance',
+ expected_args, version='1.12')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_create(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_create(self.fake_context, 'values')
+
+ expected_args = {'values': 'values'}
+ self._check_result(call_info, 'cell_create',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_update(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_update(self.fake_context,
+ 'cell_name', 'values')
+
+ expected_args = {'cell_name': 'cell_name',
+ 'values': 'values'}
+ self._check_result(call_info, 'cell_update',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_delete(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_delete(self.fake_context,
+ 'cell_name')
+
+ expected_args = {'cell_name': 'cell_name'}
+ self._check_result(call_info, 'cell_delete',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_cell_get(self):
+ call_info = self._stub_rpc_method('call', 'fake_response')
+
+ result = self.cells_rpcapi.cell_get(self.fake_context,
+ 'cell_name')
+
+ expected_args = {'cell_name': 'cell_name'}
+ self._check_result(call_info, 'cell_get',
+ expected_args, version='1.13')
+ self.assertEqual(result, 'fake_response')
+
+ def test_reboot_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.reboot_instance(
+ self.fake_context, 'fake-instance',
+ block_device_info='ignored', reboot_type='HARD')
+
+ expected_args = {'instance': 'fake-instance',
+ 'reboot_type': 'HARD'}
+ self._check_result(call_info, 'reboot_instance',
+ expected_args, version='1.14')
+
+ def test_pause_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.pause_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'pause_instance',
+ expected_args, version='1.19')
+
+ def test_unpause_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.unpause_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'unpause_instance',
+ expected_args, version='1.19')
+
+ def test_suspend_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.suspend_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'suspend_instance',
+ expected_args, version='1.15')
+
+ def test_resume_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.resume_instance(
+ self.fake_context, 'fake-instance')
+
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'resume_instance',
+ expected_args, version='1.15')
+
+ def test_terminate_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.terminate_instance(self.fake_context,
+ 'fake-instance', [])
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'terminate_instance',
+ expected_args, version='1.18')
+
+ def test_soft_delete_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.soft_delete_instance(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'soft_delete_instance',
+ expected_args, version='1.18')
+
+ def test_resize_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.resize_instance(self.fake_context,
+ 'fake-instance',
+ dict(cow='moo'),
+ 'fake-hint',
+ 'fake-flavor',
+ 'fake-reservations')
+ expected_args = {'instance': 'fake-instance',
+ 'flavor': 'fake-flavor',
+ 'extra_instance_updates': dict(cow='moo')}
+ self._check_result(call_info, 'resize_instance',
+ expected_args, version='1.20')
+
+ def test_live_migrate_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.live_migrate_instance(self.fake_context,
+ 'fake-instance',
+ 'fake-host',
+ 'fake-block',
+ 'fake-commit')
+ expected_args = {'instance': 'fake-instance',
+ 'block_migration': 'fake-block',
+ 'disk_over_commit': 'fake-commit',
+ 'host_name': 'fake-host'}
+ self._check_result(call_info, 'live_migrate_instance',
+ expected_args, version='1.20')
+
+ def test_revert_resize(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.revert_resize(self.fake_context,
+ 'fake-instance',
+ 'fake-migration',
+ 'fake-dest',
+ 'resvs')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'revert_resize',
+ expected_args, version='1.21')
+
+ def test_confirm_resize(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.confirm_resize(self.fake_context,
+ 'fake-instance',
+ 'fake-migration',
+ 'fake-source',
+ 'resvs')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'confirm_resize',
+ expected_args, version='1.21')
+
+ def test_reset_network(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.reset_network(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'reset_network',
+ expected_args, version='1.22')
+
+ def test_inject_network_info(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.inject_network_info(self.fake_context,
+ 'fake-instance')
+ expected_args = {'instance': 'fake-instance'}
+ self._check_result(call_info, 'inject_network_info',
+ expected_args, version='1.23')
+
+ def test_snapshot_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.snapshot_instance(self.fake_context,
+ 'fake-instance',
+ 'image-id')
+ expected_args = {'instance': 'fake-instance',
+ 'image_id': 'image-id'}
+ self._check_result(call_info, 'snapshot_instance',
+ expected_args, version='1.24')
+
+ def test_backup_instance(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.backup_instance(self.fake_context,
+ 'fake-instance',
+ 'image-id',
+ 'backup-type',
+ 'rotation')
+ expected_args = {'instance': 'fake-instance',
+ 'image_id': 'image-id',
+ 'backup_type': 'backup-type',
+ 'rotation': 'rotation'}
+ self._check_result(call_info, 'backup_instance',
+ expected_args, version='1.24')
+
+ def test_set_admin_password(self):
+ call_info = self._stub_rpc_method('cast', None)
+
+ self.cells_rpcapi.set_admin_password(self.fake_context,
+ 'fake-instance', 'fake-password')
+
+ expected_args = {'instance': 'fake-instance',
+ 'new_pass': 'fake-password'}
+ self._check_result(call_info, 'set_admin_password',
+ expected_args, version='1.29')
diff --git a/nova/tests/unit/cells/test_cells_scheduler.py b/nova/tests/unit/cells/test_cells_scheduler.py
new file mode 100644
index 0000000000..23a115eaa1
--- /dev/null
+++ b/nova/tests/unit/cells/test_cells_scheduler.py
@@ -0,0 +1,530 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For CellsScheduler
+"""
+import copy
+import time
+
+from oslo.config import cfg
+
+from nova import block_device
+from nova.cells import filters
+from nova.cells import weights
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit.cells import fakes
+from nova.tests.unit import fake_instance
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
+CONF.import_opt('scheduler_filter_classes', 'nova.cells.scheduler',
+ group='cells')
+CONF.import_opt('scheduler_weight_classes', 'nova.cells.scheduler',
+ group='cells')
+
+
+class FakeFilterClass1(filters.BaseCellFilter):
+ pass
+
+
+class FakeFilterClass2(filters.BaseCellFilter):
+ pass
+
+
+class FakeWeightClass1(weights.BaseCellWeigher):
+ pass
+
+
+class FakeWeightClass2(weights.BaseCellWeigher):
+ pass
+
+
+class CellsSchedulerTestCase(test.TestCase):
+ """Test case for CellsScheduler class."""
+
+ def setUp(self):
+ super(CellsSchedulerTestCase, self).setUp()
+ self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
+ group='cells')
+ self._init_cells_scheduler()
+
+ def _init_cells_scheduler(self):
+ fakes.init(self)
+ self.msg_runner = fakes.get_message_runner('api-cell')
+ self.scheduler = self.msg_runner.scheduler
+ self.state_manager = self.msg_runner.state_manager
+ self.my_cell_state = self.state_manager.get_my_state()
+ self.ctxt = context.RequestContext('fake', 'fake')
+ instance_uuids = []
+ for x in xrange(3):
+ instance_uuids.append(uuidutils.generate_uuid())
+ self.instance_uuids = instance_uuids
+ self.instances = [{'uuid': uuid} for uuid in instance_uuids]
+ self.request_spec = {
+ 'instance_uuids': instance_uuids,
+ 'instance_properties': self.instances[0],
+ 'instance_type': 'fake_type',
+ 'image': 'fake_image'}
+ self.build_inst_kwargs = {
+ 'instances': self.instances,
+ 'image': 'fake_image',
+ 'filter_properties': {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ def test_create_instances_here(self):
+ # Just grab the first instance type
+ inst_type = db.flavor_get(self.ctxt, 1)
+ image = {'properties': {}}
+ instance_uuids = self.instance_uuids
+ instance_props = {'id': 'removed',
+ 'security_groups': 'removed',
+ 'info_cache': 'removed',
+ 'name': 'instance-00000001',
+ 'hostname': 'meow',
+ 'display_name': 'moo',
+ 'image_ref': 'fake_image_ref',
+ 'user_id': self.ctxt.user_id,
+ # Test these as lists
+ 'metadata': [{'key': 'moo', 'value': 'cow'}],
+ 'system_metadata': [{'key': 'meow', 'value': 'cat'}],
+ 'project_id': self.ctxt.project_id}
+
+ call_info = {'uuids': []}
+ block_device_mapping = [block_device.create_image_bdm(
+ 'fake_image_ref')]
+
+ def _fake_instance_update_at_top(_ctxt, instance):
+ call_info['uuids'].append(instance['uuid'])
+
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ _fake_instance_update_at_top)
+
+ self.scheduler._create_instances_here(self.ctxt, instance_uuids,
+ instance_props, inst_type, image,
+ ['default'], block_device_mapping)
+ self.assertEqual(instance_uuids, call_info['uuids'])
+
+ for instance_uuid in instance_uuids:
+ instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
+ meta = utils.instance_meta(instance)
+ self.assertEqual('cow', meta['moo'])
+ sys_meta = utils.instance_sys_meta(instance)
+ self.assertEqual('cat', sys_meta['meow'])
+ self.assertEqual('meow', instance['hostname'])
+ self.assertEqual('moo-%s' % instance['uuid'],
+ instance['display_name'])
+ self.assertEqual('fake_image_ref', instance['image_ref'])
+
+ def test_build_instances_selects_child_cell(self):
+ # Make sure there's no capacity info so we're sure to
+ # select a child cell
+ our_cell_info = self.state_manager.get_my_state()
+ our_cell_info.capacities = {}
+
+ call_info = {'times': 0}
+
+ orig_fn = self.msg_runner.build_instances
+
+ def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
+ # This gets called twice. Once for our running it
+ # in this cell.. and then it'll get called when the
+ # child cell is picked. So, first time.. just run it
+ # like normal.
+ if not call_info['times']:
+ call_info['times'] += 1
+ return orig_fn(ctxt, target_cell, build_inst_kwargs)
+ call_info['ctxt'] = ctxt
+ call_info['target_cell'] = target_cell
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.msg_runner, 'build_instances',
+ msg_runner_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ child_cells = self.state_manager.get_child_cells()
+ self.assertIn(call_info['target_cell'], child_cells)
+
+ def test_build_instances_selects_current_cell(self):
+ # Make sure there's no child cells so that we will be
+ # selected
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+ build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+ instances = [fake_instance.fake_instance_obj(ctxt, **instance)
+ for instance in self.instances]
+ return instances
+
+ def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
+ call_info['build_inst_kwargs'] = build_inst_kwargs
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ build_inst_kwargs)
+
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.build_inst_kwargs['instances'][0],
+ call_info['instance_properties'])
+ self.assertEqual(
+ self.build_inst_kwargs['filter_properties']['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
+ self.assertEqual(self.build_inst_kwargs['security_groups'],
+ call_info['security_groups'])
+ self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
+ call_info['block_device_mapping'])
+ self.assertEqual(build_inst_kwargs,
+ call_info['build_inst_kwargs'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+
+ def test_build_instances_retries_when_no_cells_avail(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ call_info = {'num_tries': 0, 'errored_uuids': []}
+
+ def fake_grab_target_cells(filter_properties):
+ call_info['num_tries'] += 1
+ raise exception.NoCellsAvailable()
+
+ def fake_sleep(_secs):
+ return
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids'].append(instance_uuid)
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
+ self.stubs.Set(time, 'sleep', fake_sleep)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ self.build_inst_kwargs)
+
+ self.assertEqual(8, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
+
+ def test_schedule_method_on_random_exception(self):
+ self.flags(scheduler_retries=7, group='cells')
+
+ instances = [{'uuid': uuid} for uuid in self.instance_uuids]
+ method_kwargs = {
+ 'image': 'fake_image',
+ 'instances': instances,
+ 'filter_properties': {}}
+
+ call_info = {'num_tries': 0,
+ 'errored_uuids1': [],
+ 'errored_uuids2': []}
+
+ def fake_grab_target_cells(filter_properties):
+ call_info['num_tries'] += 1
+ raise test.TestingException()
+
+ def fake_instance_update(ctxt, instance_uuid, values):
+ self.assertEqual(vm_states.ERROR, values['vm_state'])
+ call_info['errored_uuids1'].append(instance_uuid)
+
+ def fake_instance_update_at_top(ctxt, instance):
+ self.assertEqual(vm_states.ERROR, instance['vm_state'])
+ call_info['errored_uuids2'].append(instance['uuid'])
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'image': image}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells',
+ fake_grab_target_cells)
+ self.stubs.Set(db, 'instance_update', fake_instance_update)
+ self.stubs.Set(self.msg_runner, 'instance_update_at_top',
+ fake_instance_update_at_top)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+
+ self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
+ method_kwargs)
+ # Shouldn't retry
+ self.assertEqual(1, call_info['num_tries'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
+ self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
+
+ def test_filter_schedule_skipping(self):
+ # if a filter handles scheduling, short circuit
+
+ def _grab(filter_properties):
+ return None
+
+ self.stubs.Set(self.scheduler, '_grab_target_cells', _grab)
+
+ def _test(self, *args):
+ raise test.TestingException("shouldn't be called")
+
+ try:
+ self.scheduler._schedule_build_to_cells(None, None, None, _test,
+ None)
+ except test.TestingException:
+ self.fail("Scheduling did not properly short circuit")
+
+ def test_cells_filter_args_correct(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeFilterClass1',
+ our_path + '.' + 'FakeFilterClass2']
+ self.flags(scheduler_filter_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+
+ def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ def fake_get_filtered_objs(filter_classes, cells, filt_properties):
+ call_info['filt_classes'] = filter_classes
+ call_info['filt_cells'] = cells
+ call_info['filt_props'] = filt_properties
+ return cells
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'instance_properties': instances[0],
+ 'image': image,
+ 'instance_type': 'fake_type'}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+ filter_handler = self.scheduler.filter_handler
+ self.stubs.Set(filter_handler, 'get_filtered_objects',
+ fake_get_filtered_objs)
+
+ host_sched_kwargs = {'image': 'fake_image',
+ 'instances': self.instances,
+ 'filter_properties':
+ {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+ # Our cell was selected.
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ # Filter args are correct
+ expected_filt_props = {'context': self.ctxt,
+ 'scheduler': self.scheduler,
+ 'routing_path': self.my_cell_state.name,
+ 'host_sched_kwargs': host_sched_kwargs,
+ 'request_spec': self.request_spec,
+ 'instance_type': 'fake_type'}
+ self.assertEqual(expected_filt_props, call_info['filt_props'])
+ self.assertEqual([FakeFilterClass1, FakeFilterClass2],
+ call_info['filt_classes'])
+ self.assertEqual([self.my_cell_state], call_info['filt_cells'])
+
+ def test_cells_filter_returning_none(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeFilterClass1',
+ our_path + '.' + 'FakeFilterClass2']
+ self.flags(scheduler_filter_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {'scheduled': False}
+
+ def fake_create_instances_here(ctxt, request_spec):
+ # Should not be called
+ call_info['scheduled'] = True
+
+ def fake_get_filtered_objs(filter_classes, cells, filt_properties):
+ # Should cause scheduling to be skipped. Means that the
+ # filter did it.
+ return None
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ filter_handler = self.scheduler.filter_handler
+ self.stubs.Set(filter_handler, 'get_filtered_objects',
+ fake_get_filtered_objs)
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, {})
+ self.assertFalse(call_info['scheduled'])
+
+ def test_cells_weight_args_correct(self):
+ # Re-init our fakes with some filters.
+ our_path = 'nova.tests.unit.cells.test_cells_scheduler'
+ cls_names = [our_path + '.' + 'FakeWeightClass1',
+ our_path + '.' + 'FakeWeightClass2']
+ self.flags(scheduler_weight_classes=cls_names, group='cells')
+ self._init_cells_scheduler()
+
+ # Make sure there's no child cells so that we will be
+ # selected. Makes stubbing easier.
+ self.state_manager.child_cells = {}
+
+ call_info = {}
+
+ def fake_create_instances_here(ctxt, instance_uuids,
+ instance_properties, instance_type, image, security_groups,
+ block_device_mapping):
+ call_info['ctxt'] = ctxt
+ call_info['instance_uuids'] = instance_uuids
+ call_info['instance_properties'] = instance_properties
+ call_info['instance_type'] = instance_type
+ call_info['image'] = image
+ call_info['security_groups'] = security_groups
+ call_info['block_device_mapping'] = block_device_mapping
+
+ def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
+ call_info['host_sched_kwargs'] = host_sched_kwargs
+
+ def fake_get_weighed_objs(weight_classes, cells, filt_properties):
+ call_info['weight_classes'] = weight_classes
+ call_info['weight_cells'] = cells
+ call_info['weight_props'] = filt_properties
+ return [weights.WeightedCell(cells[0], 0.0)]
+
+ def fake_build_request_spec(ctxt, image, instances):
+ request_spec = {
+ 'instance_uuids': [inst['uuid'] for inst in instances],
+ 'instance_properties': instances[0],
+ 'image': image,
+ 'instance_type': 'fake_type'}
+ return request_spec
+
+ self.stubs.Set(self.scheduler, '_create_instances_here',
+ fake_create_instances_here)
+ self.stubs.Set(scheduler_utils, 'build_request_spec',
+ fake_build_request_spec)
+ self.stubs.Set(self.scheduler.compute_task_api,
+ 'build_instances', fake_rpc_build_instances)
+ weight_handler = self.scheduler.weight_handler
+ self.stubs.Set(weight_handler, 'get_weighed_objects',
+ fake_get_weighed_objs)
+
+ host_sched_kwargs = {'image': 'fake_image',
+ 'instances': self.instances,
+ 'filter_properties':
+ {'instance_type': 'fake_type'},
+ 'security_groups': 'fake_sec_groups',
+ 'block_device_mapping': 'fake_bdm'}
+
+ self.msg_runner.build_instances(self.ctxt,
+ self.my_cell_state, host_sched_kwargs)
+ # Our cell was selected.
+ self.assertEqual(self.ctxt, call_info['ctxt'])
+ self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
+ self.assertEqual(self.request_spec['instance_properties'],
+ call_info['instance_properties'])
+ self.assertEqual(self.request_spec['instance_type'],
+ call_info['instance_type'])
+ self.assertEqual(self.request_spec['image'], call_info['image'])
+ self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
+ # Weight args are correct
+ expected_filt_props = {'context': self.ctxt,
+ 'scheduler': self.scheduler,
+ 'routing_path': self.my_cell_state.name,
+ 'host_sched_kwargs': host_sched_kwargs,
+ 'request_spec': self.request_spec,
+ 'instance_type': 'fake_type'}
+ self.assertEqual(expected_filt_props, call_info['weight_props'])
+ self.assertEqual([FakeWeightClass1, FakeWeightClass2],
+ call_info['weight_classes'])
+ self.assertEqual([self.my_cell_state], call_info['weight_cells'])
diff --git a/nova/tests/cells/test_cells_state_manager.py b/nova/tests/unit/cells/test_cells_state_manager.py
index 6c52448111..6c52448111 100644
--- a/nova/tests/cells/test_cells_state_manager.py
+++ b/nova/tests/unit/cells/test_cells_state_manager.py
diff --git a/nova/tests/cells/test_cells_utils.py b/nova/tests/unit/cells/test_cells_utils.py
index 44141150b6..44141150b6 100644
--- a/nova/tests/cells/test_cells_utils.py
+++ b/nova/tests/unit/cells/test_cells_utils.py
diff --git a/nova/tests/cells/test_cells_weights.py b/nova/tests/unit/cells/test_cells_weights.py
index 5f0a0ac783..5f0a0ac783 100644
--- a/nova/tests/cells/test_cells_weights.py
+++ b/nova/tests/unit/cells/test_cells_weights.py
diff --git a/nova/tests/cert/__init__.py b/nova/tests/unit/cert/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/cert/__init__.py
+++ b/nova/tests/unit/cert/__init__.py
diff --git a/nova/tests/cert/test_rpcapi.py b/nova/tests/unit/cert/test_rpcapi.py
index ee20c477cd..ee20c477cd 100644
--- a/nova/tests/cert/test_rpcapi.py
+++ b/nova/tests/unit/cert/test_rpcapi.py
diff --git a/nova/tests/cmd/__init__.py b/nova/tests/unit/cmd/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/cmd/__init__.py
+++ b/nova/tests/unit/cmd/__init__.py
diff --git a/nova/tests/cmd/test_idmapshift.py b/nova/tests/unit/cmd/test_idmapshift.py
index 2f0fe06bc0..2f0fe06bc0 100644
--- a/nova/tests/cmd/test_idmapshift.py
+++ b/nova/tests/unit/cmd/test_idmapshift.py
diff --git a/nova/tests/compute/__init__.py b/nova/tests/unit/compute/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/compute/__init__.py
+++ b/nova/tests/unit/compute/__init__.py
diff --git a/nova/tests/compute/eventlet_utils.py b/nova/tests/unit/compute/eventlet_utils.py
index 6d70c0a063..6d70c0a063 100644
--- a/nova/tests/compute/eventlet_utils.py
+++ b/nova/tests/unit/compute/eventlet_utils.py
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/unit/compute/fake_resource_tracker.py
index b0fec2042b..b0fec2042b 100644
--- a/nova/tests/compute/fake_resource_tracker.py
+++ b/nova/tests/unit/compute/fake_resource_tracker.py
diff --git a/nova/tests/compute/monitors/__init__.py b/nova/tests/unit/compute/monitors/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/compute/monitors/__init__.py
+++ b/nova/tests/unit/compute/monitors/__init__.py
diff --git a/nova/tests/compute/monitors/test_cpu_monitor.py b/nova/tests/unit/compute/monitors/test_cpu_monitor.py
index 04977cd47f..04977cd47f 100644
--- a/nova/tests/compute/monitors/test_cpu_monitor.py
+++ b/nova/tests/unit/compute/monitors/test_cpu_monitor.py
diff --git a/nova/tests/unit/compute/monitors/test_monitors.py b/nova/tests/unit/compute/monitors/test_monitors.py
new file mode 100644
index 0000000000..e846479483
--- /dev/null
+++ b/nova/tests/unit/compute/monitors/test_monitors.py
@@ -0,0 +1,144 @@
+# Copyright 2013 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for resource monitors."""
+
+from nova.compute import monitors
+from nova import test
+
+
+class FakeResourceMonitor(monitors.ResourceMonitorBase):
+ def _update_data(self):
+ self._data['foo.metric1'] = '1000'
+ self._data['foo.metric2'] = '99.999'
+ self._data['timestamp'] = '123'
+
+ @monitors.ResourceMonitorBase.add_timestamp
+ def _get_foo_metric1(self, **kwargs):
+ return self._data.get("foo.metric1")
+
+ @monitors.ResourceMonitorBase.add_timestamp
+ def _get_foo_metric2(self, **kwargs):
+ return self._data.get("foo.metric2")
+
+
+class FakeMonitorClass1(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 1232,
+ 'name': 'key1',
+ 'value': 2600,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key1']
+
+
+class FakeMonitorClass2(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 123,
+ 'name': 'key2',
+ 'value': 1600,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key2']
+
+
+class FakeMonitorClass3(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ data = [{'timestamp': 1234,
+ 'name': 'key1',
+ 'value': 1200,
+ 'source': 'libvirt'}]
+ return data
+
+ def get_metric_names(self):
+ return ['key1']
+
+
+class FakeMonitorClass4(monitors.ResourceMonitorBase):
+ def get_metrics(self, **kwargs):
+ raise test.TestingException()
+
+ def get_metric_names(self):
+ raise test.TestingException()
+
+
+class ResourceMonitorBaseTestCase(test.TestCase):
+ def setUp(self):
+ super(ResourceMonitorBaseTestCase, self).setUp()
+ self.monitor = FakeResourceMonitor(None)
+
+ def test_get_metric_names(self):
+ names = self.monitor.get_metric_names()
+ self.assertEqual(2, len(names))
+ self.assertIn("foo.metric1", names)
+ self.assertIn("foo.metric2", names)
+
+ def test_get_metrics(self):
+ metrics_raw = self.monitor.get_metrics()
+ names = self.monitor.get_metric_names()
+ metrics = {}
+ for metric in metrics_raw:
+ self.assertIn(metric['name'], names)
+ self.assertEqual(metric["timestamp"], '123')
+ metrics[metric['name']] = metric['value']
+
+ self.assertEqual(metrics["foo.metric1"], '1000')
+ self.assertEqual(metrics["foo.metric2"], '99.999')
+
+
+class ResourceMonitorsTestCase(test.TestCase):
+ """Test case for monitors."""
+
+ def setUp(self):
+ super(ResourceMonitorsTestCase, self).setUp()
+ self.monitor_handler = monitors.ResourceMonitorHandler()
+ fake_monitors = [
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
+ self.flags(compute_available_monitors=fake_monitors)
+
+ classes = self.monitor_handler.get_matching_classes(
+ ['nova.compute.monitors.all_monitors'])
+ self.class_map = {}
+ for cls in classes:
+ self.class_map[cls.__name__] = cls
+
+ def test_choose_monitors_not_found(self):
+ self.flags(compute_monitors=['FakeMonitorClass5', 'FakeMonitorClass4'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 0)
+
+ def test_choose_monitors_bad(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakePluginClass3'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 1)
+
+ def test_choose_monitors(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 2)
+
+ def test_choose_monitors_none(self):
+ self.flags(compute_monitors=[])
+ monitor_classes = self.monitor_handler.choose_monitors(self)
+ self.assertEqual(len(monitor_classes), 0)
+
+ def test_all_monitors(self):
+ # Double check at least a couple of known monitors exist
+ self.assertIn('ComputeDriverCPUMonitor', self.class_map)
diff --git a/nova/tests/compute/test_arch.py b/nova/tests/unit/compute/test_arch.py
index 0aab95c2ae..0aab95c2ae 100644
--- a/nova/tests/compute/test_arch.py
+++ b/nova/tests/unit/compute/test_arch.py
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
new file mode 100644
index 0000000000..50218f24c7
--- /dev/null
+++ b/nova/tests/unit/compute/test_claims.py
@@ -0,0 +1,320 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for resource tracker claims."""
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.compute import claims
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit.pci import fakes as pci_fakes
+from nova.virt import hardware
+
+
+class FakeResourceHandler(object):
+ test_called = False
+ usage_is_instance = False
+
+ def test_resources(self, usage, limits):
+ self.test_called = True
+ self.usage_is_itype = usage.get('name') is 'fakeitype'
+ return []
+
+
+class DummyTracker(object):
+ icalled = False
+ rcalled = False
+ pci_tracker = pci_manager.PciDevTracker()
+ ext_resources_handler = FakeResourceHandler()
+
+ def abort_instance_claim(self, *args, **kwargs):
+ self.icalled = True
+
+ def drop_resize_claim(self, *args, **kwargs):
+ self.rcalled = True
+
+ def new_pci_tracker(self):
+ self.pci_tracker = pci_manager.PciDevTracker()
+
+
+@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+class ClaimTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ClaimTestCase, self).setUp()
+ self.resources = self._fake_resources()
+ self.tracker = DummyTracker()
+
+ def _claim(self, limits=None, overhead=None, **kwargs):
+ numa_topology = kwargs.pop('numa_topology', None)
+ instance = self._fake_instance(**kwargs)
+ if numa_topology:
+ db_numa_topology = {
+ 'id': 1, 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': None,
+ 'instance_uuid': instance['uuid'],
+ 'numa_topology': numa_topology.to_json()
+ }
+ else:
+ db_numa_topology = None
+ if overhead is None:
+ overhead = {'memory_mb': 0}
+ with mock.patch.object(
+ db, 'instance_extra_get_by_instance_uuid',
+ return_value=db_numa_topology):
+ return claims.Claim('context', instance, self.tracker,
+ self.resources, overhead=overhead,
+ limits=limits)
+
+ def _fake_instance(self, **kwargs):
+ instance = {
+ 'uuid': str(uuid.uuid1()),
+ 'memory_mb': 1024,
+ 'root_gb': 10,
+ 'ephemeral_gb': 5,
+ 'vcpus': 1,
+ 'system_metadata': {},
+ 'numa_topology': None
+ }
+ instance.update(**kwargs)
+ return instance
+
+ def _fake_instance_type(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'name': 'fakeitype',
+ 'memory_mb': 1,
+ 'vcpus': 1,
+ 'root_gb': 1,
+ 'ephemeral_gb': 2
+ }
+ instance_type.update(**kwargs)
+ return instance_type
+
+ def _fake_resources(self, values=None):
+ resources = {
+ 'memory_mb': 2048,
+ 'memory_mb_used': 0,
+ 'free_ram_mb': 2048,
+ 'local_gb': 20,
+ 'local_gb_used': 0,
+ 'free_disk_gb': 20,
+ 'vcpus': 2,
+ 'vcpus_used': 0,
+ 'numa_topology': hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(1, [1, 2], 512),
+ hardware.VirtNUMATopologyCellUsage(2, [3, 4], 512)]
+ ).to_json()
+ }
+ if values:
+ resources.update(values)
+ return resources
+
+ def test_memory_unlimited(self, mock_get):
+ self._claim(memory_mb=99999999)
+
+ def test_disk_unlimited_root(self, mock_get):
+ self._claim(root_gb=999999)
+
+ def test_disk_unlimited_ephemeral(self, mock_get):
+ self._claim(ephemeral_gb=999999)
+
+ def test_memory_with_overhead(self, mock_get):
+ overhead = {'memory_mb': 8}
+ limits = {'memory_mb': 2048}
+ self._claim(memory_mb=2040, limits=limits,
+ overhead=overhead)
+
+ def test_memory_with_overhead_insufficient(self, mock_get):
+ overhead = {'memory_mb': 9}
+ limits = {'memory_mb': 2048}
+
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim, limits=limits, overhead=overhead,
+ memory_mb=2040)
+
+ def test_memory_oversubscription(self, mock_get):
+ self._claim(memory_mb=4096)
+
+ def test_memory_insufficient(self, mock_get):
+ limits = {'memory_mb': 8192}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim, limits=limits, memory_mb=16384)
+
+ def test_disk_oversubscription(self, mock_get):
+ limits = {'disk_gb': 60}
+ self._claim(root_gb=10, ephemeral_gb=40,
+ limits=limits)
+
+ def test_disk_insufficient(self, mock_get):
+ limits = {'disk_gb': 45}
+ self.assertRaisesRegexp(
+ exception.ComputeResourcesUnavailable,
+ "disk",
+ self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
+
+ def test_disk_and_memory_insufficient(self, mock_get):
+ limits = {'disk_gb': 45, 'memory_mb': 8192}
+ self.assertRaisesRegexp(
+ exception.ComputeResourcesUnavailable,
+ "memory.*disk",
+ self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
+ memory_mb=16384)
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_pass(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ mock_get.return_value = objects.InstancePCIRequests(
+ requests=[request])
+ self.assertIsNone(claim._test_pci())
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_fail(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v1',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ mock_get.return_value = objects.InstancePCIRequests(
+ requests=[request])
+ claim._test_pci()
+
+ @pci_fakes.patch_pci_whitelist
+ def test_pci_pass_no_requests(self, mock_get):
+ dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+ self.tracker.new_pci_tracker()
+ self.tracker.pci_tracker.set_hvdevs([dev_dict])
+ claim = self._claim()
+ self.assertIsNone(claim._test_pci())
+
+ def test_ext_resources(self, mock_get):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
+
+ def test_numa_topology_no_limit(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ self._claim(numa_topology=huge_instance)
+
+ def test_numa_topology_fails(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ limit_topo = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 1, [1, 2], 512, cpu_limit=2, memory_limit=512),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, [3, 4], 512, cpu_limit=2, memory_limit=512)])
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self._claim,
+ limits={'numa_topology': limit_topo.to_json()},
+ numa_topology=huge_instance)
+
+ def test_numa_topology_passes(self, mock_get):
+ huge_instance = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5]), 2048)])
+ limit_topo = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 1, [1, 2], 512, cpu_limit=5, memory_limit=4096),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, [3, 4], 512, cpu_limit=5, memory_limit=4096)])
+ self._claim(limits={'numa_topology': limit_topo.to_json()},
+ numa_topology=huge_instance)
+
+ def test_abort(self, mock_get):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.icalled)
+
+ def _abort(self):
+ claim = None
+ try:
+ with self._claim(memory_mb=4096) as claim:
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ return claim
+
+
+class ResizeClaimTestCase(ClaimTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+ self.instance = self._fake_instance()
+ self.get_numa_constraint_patch = None
+
+ def _claim(self, limits=None, overhead=None, **kwargs):
+ instance_type = self._fake_instance_type(**kwargs)
+ numa_constraint = kwargs.pop('numa_topology', None)
+ if overhead is None:
+ overhead = {'memory_mb': 0}
+ with mock.patch.object(
+ hardware.VirtNUMAInstanceTopology, 'get_constraints',
+ return_value=numa_constraint):
+ return claims.ResizeClaim('context', self.instance, instance_type,
+ {}, self.tracker, self.resources,
+ overhead=overhead, limits=limits)
+
+ def _set_pci_request(self, claim):
+ request = [{'count': 1,
+ 'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
+ }]
+ claim.instance.update(
+ system_metadata={'new_pci_requests': jsonutils.dumps(request)})
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_ext_resources(self, mock_get):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_abort(self, mock_get):
+ claim = self._abort()
+ self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
new file mode 100644
index 0000000000..8f4d73dd79
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute.py
@@ -0,0 +1,11415 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for compute service."""
+
+import base64
+import contextlib
+import datetime
+import operator
+import sys
+import time
+import traceback
+import uuid
+
+from eventlet import greenthread
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+import testtools
+from testtools import matchers as testtools_matchers
+
+import nova
+from nova import availability_zones
+from nova import block_device
+from nova import compute
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import manager as conductor_manager
+from nova.console import type as ctype
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.network import api as network_api
+from nova.network import model as network_model
+from nova.network.security_group import openstack_driver
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import block_device as block_device_obj
+from nova.objects import instance as instance_obj
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova import policy
+from nova import quota
+from nova import test
+from nova.tests.unit.compute import eventlet_utils
+from nova.tests.unit.compute import fake_resource_tracker
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_migration
+from nova import utils
+from nova.virt import block_device as driver_block_device
+from nova.virt import event
+from nova.virt import fake
+from nova.virt import hardware
+from nova.volume import cinder
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
+CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+NODENAME = 'fakenode1'
+
+
+def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
+
+
+def get_primitive_instance_by_uuid(context, instance_uuid):
+ """Helper method to get an instance and then convert it to
+ a primitive form using jsonutils.
+ """
+ instance = db.instance_get_by_uuid(context, instance_uuid)
+ return jsonutils.to_primitive(instance)
+
+
+def unify_instance(instance):
+ """Return a dict-like instance for both object-initiated and
+ model-initiated sources that can reasonably be compared.
+ """
+ newdict = dict()
+ for k, v in instance.iteritems():
+ if isinstance(v, datetime.datetime):
+ # NOTE(danms): DB models and Instance objects have different
+ # timezone expectations
+ v = v.replace(tzinfo=None)
+ elif k == 'fault':
+ # NOTE(danms): DB models don't have 'fault'
+ continue
+ elif k == 'pci_devices':
+ # NOTE(yonlig.he) pci devices need lazy loading
+ # fake db does not support it yet.
+ continue
+ newdict[k] = v
+ return newdict
+
+
+class FakeSchedulerAPI(object):
+
+ def run_instance(self, ctxt, request_spec, admin_password,
+ injected_files, requested_networks, is_first_time,
+ filter_properties):
+ pass
+
+ def live_migration(self, ctxt, block_migration, disk_over_commit,
+ instance, dest):
+ pass
+
+ def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
+ filter_properties, reservations):
+ pass
+
+
+class FakeComputeTaskAPI(object):
+
+ def resize_instance(self, context, instance, extra_instance_updates,
+ scheduler_hint, flavor, reservations):
+ pass
+
+
+class BaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+ self.flags(network_manager='nova.network.manager.FlatManager')
+ fake.set_nodes([NODENAME])
+ self.flags(use_local=True, group='conductor')
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.compute = importutils.import_object(CONF.compute_manager)
+ # execute power syncing synchronously for testing:
+ self.compute._sync_power_pool = eventlet_utils.SyncPool()
+
+ # override tracker with a version that doesn't need the database:
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, NODENAME)
+ self.compute._resource_tracker_dict[NODENAME] = fake_rt
+
+ def fake_get_compute_nodes_in_db(context, use_slave=False):
+ fake_compute_nodes = [{'local_gb': 259,
+ 'vcpus_used': 0,
+ 'deleted': 0,
+ 'hypervisor_type': 'powervm',
+ 'created_at': '2013-04-01T00:27:06.000000',
+ 'local_gb_used': 0,
+ 'updated_at': '2013-04-03T00:35:41.000000',
+ 'hypervisor_hostname': 'fake_phyp1',
+ 'memory_mb_used': 512,
+ 'memory_mb': 131072,
+ 'current_workload': 0,
+ 'vcpus': 16,
+ 'cpu_info': 'ppc64,powervm,3940',
+ 'running_vms': 0,
+ 'free_disk_gb': 259,
+ 'service_id': 7,
+ 'hypervisor_version': 7,
+ 'disk_available_least': 265856,
+ 'deleted_at': None,
+ 'free_ram_mb': 130560,
+ 'metrics': '',
+ 'stats': '',
+ 'numa_topology': '',
+ 'id': 2,
+ 'host_ip': '127.0.0.1'}]
+ return [objects.ComputeNode._from_db_object(
+ context, objects.ComputeNode(), cn)
+ for cn in fake_compute_nodes]
+
+ def fake_compute_node_delete(context, compute_node_id):
+ self.assertEqual(2, compute_node_id)
+
+ self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
+ fake_get_compute_nodes_in_db)
+ self.stubs.Set(db, 'compute_node_delete',
+ fake_compute_node_delete)
+
+ self.compute.update_available_resource(
+ context.get_admin_context())
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+ self.none_quotas = objects.Quotas.from_reservations(
+ self.context, None)
+
+ def fake_show(meh, context, id, **kwargs):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ fake_rpcapi = FakeSchedulerAPI()
+ fake_taskapi = FakeComputeTaskAPI()
+ self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
+ self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
+
+ fake_network.set_stub_network_methods(self.stubs)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+
+ def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
+ self.assertFalse(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ self.stubs.Set(network_api.API, 'allocate_for_instance',
+ fake_allocate_for_instance)
+ self.compute_api = compute.API()
+
+ # Just to make long lines short
+ self.rt = self.compute._get_resource_tracker(NODENAME)
+
+ def tearDown(self):
+ timeutils.clear_time_override()
+ ctxt = context.get_admin_context()
+ fake_image.FakeImageService_reset()
+ instances = db.instance_get_all(ctxt)
+ for instance in instances:
+ db.instance_destroy(ctxt, instance['uuid'])
+ fake.restore_nodes()
+ super(BaseTestCase, self).tearDown()
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny',
+ services=False):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ def make_fake_sys_meta():
+ sys_meta = params.pop("system_metadata", {})
+ inst_type = flavors.get_flavor_by_name(type_name)
+ for key in flavors.system_metadata_flavor_props:
+ sys_meta['instance_type_%s' % key] = inst_type[key]
+ return sys_meta
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['task_state'] = None
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
+ type_id = flavors.get_flavor_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ inst['system_metadata'] = make_fake_sys_meta()
+ inst['locked'] = False
+ inst['created_at'] = timeutils.utcnow()
+ inst['updated_at'] = timeutils.utcnow()
+ inst['launched_at'] = timeutils.utcnow()
+ inst['security_groups'] = []
+ inst.update(params)
+ if services:
+ _create_service_entries(self.context.elevated(),
+ [['fake_zone', [inst['host']]]])
+ return db.instance_create(self.context, inst)
+
+ def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
+ services=False):
+ db_inst = self._create_fake_instance(params, type_name=type_name,
+ services=services)
+ return objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_inst,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+
+ def _create_instance_type(self, params=None):
+ """Create a test instance type."""
+ if not params:
+ params = {}
+
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = 1024
+ inst['vcpus'] = 1
+ inst['root_gb'] = 20
+ inst['ephemeral_gb'] = 10
+ inst['flavorid'] = '1'
+ inst['swap'] = 2048
+ inst['rxtx_factor'] = 1
+ inst.update(params)
+ return db.flavor_create(context, inst)['id']
+
+ def _create_group(self):
+ values = {'name': 'testgroup',
+ 'description': 'testgroup',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+ return db.security_group_create(self.context, values)
+
+ def _stub_migrate_server(self):
+ def _fake_migrate_server(*args, **kwargs):
+ pass
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ 'migrate_server', _fake_migrate_server)
+
+ def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
+ if not aggr:
+ aggr = self.api.create_aggregate(self.context, aggr_name, zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'], host)
+ return aggr
+
+
+class ComputeVolumeTestCase(BaseTestCase):
+
+ def setUp(self):
+ super(ComputeVolumeTestCase, self).setUp()
+ self.volume_id = 'fake'
+ self.fetched_attempts = 0
+ self.instance = {
+ 'id': 'fake',
+ 'uuid': 'fake',
+ 'name': 'fake',
+ 'root_device_name': '/dev/vda',
+ }
+ self.fake_volume = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': self.volume_id, 'device_name': '/dev/vdb'})
+ self.instance_object = objects.Instance._from_db_object(
+ self.context, objects.Instance(),
+ fake_instance.fake_db_instance())
+ self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
+ {'id': self.volume_id,
+ 'attach_status': 'detached'})
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ lambda *a, **kw: {})
+ self.stubs.Set(self.compute.volume_api, 'terminate_connection',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'attach',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'detach',
+ lambda *a, **kw: None)
+ self.stubs.Set(self.compute.volume_api, 'check_attach',
+ lambda *a, **kw: None)
+ self.stubs.Set(greenthread, 'sleep',
+ lambda *a, **kw: None)
+
+ def store_cinfo(context, *args, **kwargs):
+ self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
+ return self.fake_volume
+
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update',
+ store_cinfo)
+ self.stubs.Set(self.compute.conductor_api,
+ 'block_device_mapping_update_or_create',
+ store_cinfo)
+ self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
+ self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
+
+ def test_attach_volume_serial(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ with (mock.patch.object(cinder.API, 'get_volume_encryption_metadata',
+ return_value={})):
+ instance = self._create_fake_instance_obj()
+ self.compute.attach_volume(self.context, self.volume_id,
+ '/dev/vdb', instance, bdm=fake_bdm)
+ self.assertEqual(self.cinfo.get('serial'), self.volume_id)
+
+ def test_attach_volume_raises(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance_obj()
+
+ def fake_attach(*args, **kwargs):
+ raise test.TestingException
+
+ with contextlib.nested(
+ mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
+ 'attach'),
+ mock.patch.object(cinder.API, 'unreserve_volume'),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'destroy')
+ ) as (mock_attach, mock_unreserve, mock_destroy):
+ mock_attach.side_effect = fake_attach
+ self.assertRaises(
+ test.TestingException, self.compute.attach_volume,
+ self.context, 'fake', '/dev/vdb',
+ instance, bdm=fake_bdm)
+ self.assertTrue(mock_unreserve.called)
+ self.assertTrue(mock_destroy.called)
+
+ def test_detach_volume_api_raises(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance()
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_detach_volume'),
+ mock.patch.object(self.compute.volume_api, 'detach'),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id'),
+ mock.patch.object(fake_bdm, 'destroy')
+ ) as (mock_internal_detach, mock_detach, mock_get, mock_destroy):
+ mock_detach.side_effect = test.TestingException
+ mock_get.return_value = fake_bdm
+ self.assertRaises(
+ test.TestingException, self.compute.detach_volume,
+ self.context, 'fake', instance)
+ mock_internal_detach.assert_called_once_with(self.context,
+ instance,
+ fake_bdm)
+ self.assertTrue(mock_destroy.called)
+
+ def test_attach_volume_no_bdm(self):
+ fake_bdm = objects.BlockDeviceMapping(**self.fake_volume)
+ instance = self._create_fake_instance_obj()
+
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id', return_value=fake_bdm),
+ mock.patch.object(self.compute, '_attach_volume')
+ ) as (mock_get_by_id, mock_attach):
+ self.compute.attach_volume(self.context, 'fake', '/dev/vdb',
+ instance, bdm=None)
+ mock_get_by_id.assert_called_once_with(self.context, 'fake')
+ self.assertTrue(mock_attach.called)
+
+ def test_await_block_device_created_too_slow(self):
+ self.flags(block_device_allocate_retries=2)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def never_get(context, vol_id):
+ return {
+ 'status': 'creating',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(self.compute.volume_api, 'get', never_get)
+ self.assertRaises(exception.VolumeNotCreated,
+ self.compute._await_block_device_map_created,
+ self.context, '1')
+
+ def test_await_block_device_created_slow(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=4)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def slow_get(context, vol_id):
+ if self.fetched_attempts < 2:
+ self.fetched_attempts += 1
+ return {
+ 'status': 'creating',
+ 'id': 'blah',
+ }
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', slow_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(attempts, 3)
+
+ def test_await_block_device_created_retries_negative(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=-1)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def volume_get(context, vol_id):
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', volume_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(1, attempts)
+
+ def test_await_block_device_created_retries_zero(self):
+ c = self.compute
+ self.flags(block_device_allocate_retries=0)
+ self.flags(block_device_allocate_retries_interval=0.1)
+
+ def volume_get(context, vol_id):
+ return {
+ 'status': 'available',
+ 'id': 'blah',
+ }
+
+ self.stubs.Set(c.volume_api, 'get', volume_get)
+ attempts = c._await_block_device_map_created(self.context, '1')
+ self.assertEqual(1, attempts)
+
+ def test_boot_volume_serial(self):
+ with (
+ mock.patch.object(objects.BlockDeviceMapping, 'save')
+ ) as mock_save:
+ block_device_mapping = [
+ block_device.BlockDeviceDict({
+ 'id': 1,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': '/dev/vdb',
+ 'delete_on_termination': False,
+ })]
+ prepped_bdm = self.compute._prep_block_device(
+ self.context, self.instance, block_device_mapping)
+ mock_save.assert_called_once_with(self.context)
+ volume_driver_bdm = prepped_bdm['block_device_mapping'][0]
+ self.assertEqual(volume_driver_bdm['connection_info']['serial'],
+ self.volume_id)
+
+ def test_boot_volume_metadata(self, metadata=True):
+ def volume_api_get(*args, **kwargs):
+ if metadata:
+ return {
+ 'size': 1,
+ 'volume_image_metadata': {'vol_test_key': 'vol_test_value',
+ 'min_ram': u'128',
+ 'min_disk': u'256',
+ 'size': u'536870912'
+ },
+ }
+ else:
+ return {}
+
+ self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
+
+ expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
+ 'size': 0, 'status': 'active'}
+
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'delete_on_termination': False,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ if metadata:
+ self.assertEqual(image_meta['properties']['vol_test_key'],
+ 'vol_test_value')
+ self.assertEqual(128, image_meta['min_ram'])
+ self.assertEqual(256, image_meta['min_disk'])
+ self.assertEqual(units.Gi, image_meta['size'])
+ else:
+ self.assertEqual(expected_no_metadata, image_meta)
+
+ # Test it with new-style BDMs
+ block_device_mapping = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': self.volume_id,
+ 'delete_on_termination': False,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping, legacy_bdm=False)
+ if metadata:
+ self.assertEqual(image_meta['properties']['vol_test_key'],
+ 'vol_test_value')
+ self.assertEqual(128, image_meta['min_ram'])
+ self.assertEqual(256, image_meta['min_disk'])
+ self.assertEqual(units.Gi, image_meta['size'])
+ else:
+ self.assertEqual(expected_no_metadata, image_meta)
+
+ def test_boot_volume_no_metadata(self):
+ self.test_boot_volume_metadata(metadata=False)
+
+ def test_boot_image_metadata(self, metadata=True):
+ def image_api_get(*args, **kwargs):
+ if metadata:
+ return {
+ 'properties': {'img_test_key': 'img_test_value'}
+ }
+ else:
+ return {}
+
+ self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
+
+ block_device_mapping = [{
+ 'boot_index': 0,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': "fake-image",
+ 'delete_on_termination': True,
+ }]
+
+ image_meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping, legacy_bdm=False)
+
+ if metadata:
+ self.assertEqual('img_test_value',
+ image_meta['properties']['img_test_key'])
+ else:
+ self.assertEqual(image_meta, {})
+
+ def test_boot_image_no_metadata(self):
+ self.test_boot_image_metadata(metadata=False)
+
+ def test_poll_bandwidth_usage_not_implemented(self):
+ ctxt = context.get_admin_context()
+
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_bw_counters')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(time, 'time')
+ self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
+ # Following methods will be called
+ utils.last_completed_audit_period().AndReturn((0, 0))
+ time.time().AndReturn(10)
+ # Note - time called two more times from Log
+ time.time().AndReturn(20)
+ time.time().AndReturn(21)
+ objects.InstanceList.get_by_host(ctxt, 'fake-mini',
+ use_slave=True).AndReturn([])
+ self.compute.driver.get_all_bw_counters([]).AndRaise(
+ NotImplementedError)
+ self.mox.ReplayAll()
+
+ self.flags(bandwidth_poll_interval=1)
+ self.compute._poll_bandwidth_usage(ctxt)
+ # A second call won't call the stubs again as the bandwidth
+ # poll is now disabled
+ self.compute._poll_bandwidth_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ @mock.patch.object(objects.InstanceList, 'get_by_host')
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
+ fake_instance = mock.Mock(uuid='fake-instance-uuid')
+ mock_get_by_host.return_value = [fake_instance]
+
+ volume_bdm = mock.Mock(id=1, is_volume=True)
+ not_volume_bdm = mock.Mock(id=2, is_volume=False)
+ mock_get_by_inst.return_value = [volume_bdm, not_volume_bdm]
+
+ expected_host_bdms = [{'instance': fake_instance,
+ 'instance_bdms': [volume_bdm]}]
+
+ got_host_bdms = self.compute._get_host_volume_bdms('fake-context')
+ mock_get_by_host.assert_called_once_with('fake-context',
+ self.compute.host)
+ mock_get_by_inst.assert_called_once_with('fake-context',
+ 'fake-instance-uuid',
+ use_slave=False)
+ self.assertEqual(expected_host_bdms, got_host_bdms)
+
+ def test_poll_volume_usage_disabled(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ # None of the mocks should be called.
+ self.mox.ReplayAll()
+
+ self.flags(volume_usage_poll_interval=0)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_poll_volume_usage_returns_no_vols(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
+ # Following methods are called.
+ utils.last_completed_audit_period().AndReturn((0, 0))
+ self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_poll_volume_usage_with_data(self):
+ ctxt = 'MockContext'
+ self.mox.StubOutWithMock(utils, 'last_completed_audit_period')
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(self.compute, '_update_volume_usage_cache')
+ self.stubs.Set(self.compute.driver, 'get_all_volume_usage',
+ lambda x, y: [3, 4])
+ # All the mocks are called
+ utils.last_completed_audit_period().AndReturn((10, 20))
+ self.compute._get_host_volume_bdms(ctxt,
+ use_slave=True).AndReturn([1, 2])
+ self.compute._update_volume_usage_cache(ctxt, [3, 4])
+ self.mox.ReplayAll()
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(ctxt)
+ self.mox.UnsetStubs()
+
+ def test_detach_volume_usage(self):
+ # Test that detach volume update the volume usage cache table correctly
+ instance = self._create_fake_instance_obj()
+ bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'device_name': '/dev/vdb',
+ 'connection_info': '{}', 'instance_uuid': instance['uuid'],
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 1})
+ host_volume_bdms = {'id': 1, 'device_name': '/dev/vdb',
+ 'connection_info': '{}', 'instance_uuid': instance['uuid'],
+ 'volume_id': 1}
+
+ self.mox.StubOutWithMock(db, 'block_device_mapping_get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
+ self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
+
+ # The following methods will be called
+ db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
+ AndReturn(bdm)
+ self.compute.driver.block_stats(instance['name'], 'vdb').\
+ AndReturn([1L, 30L, 1L, 20L, None])
+ self.compute._get_host_volume_bdms(self.context,
+ use_slave=True).AndReturn(
+ host_volume_bdms)
+ self.compute.driver.get_all_volume_usage(
+ self.context, host_volume_bdms).AndReturn(
+ [{'volume': 1,
+ 'rd_req': 1,
+ 'rd_bytes': 10,
+ 'wr_req': 1,
+ 'wr_bytes': 5,
+ 'instance': instance}])
+ db.block_device_mapping_get_by_volume_id(self.context, 1, []).\
+ AndReturn(bdm)
+
+ self.mox.ReplayAll()
+
+ def fake_get_volume_encryption_metadata(self, context, volume_id):
+ return {}
+ self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
+ fake_get_volume_encryption_metadata)
+
+ self.compute.attach_volume(self.context, 1, '/dev/vdb', instance)
+
+ # Poll volume usage & then detach the volume. This will update the
+ # total fields in the volume usage cache.
+ self.flags(volume_usage_poll_interval=10)
+ self.compute._poll_volume_usage(self.context)
+ # Check that a volume.usage and volume.attach notification was sent
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+
+ self.compute.detach_volume(self.context, 1, instance)
+
+ # Check that volume.attach, 2 volume.usage, and volume.detach
+ # notifications were sent
+ self.assertEqual(4, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.instance.volume.attach', msg.event_type)
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual('volume.usage', msg.event_type)
+ payload = msg.payload
+ self.assertEqual(instance['uuid'], payload['instance_id'])
+ self.assertEqual('fake', payload['user_id'])
+ self.assertEqual('fake', payload['tenant_id'])
+ self.assertEqual(1, payload['reads'])
+ self.assertEqual(30, payload['read_bytes'])
+ self.assertEqual(1, payload['writes'])
+ self.assertEqual(20, payload['write_bytes'])
+ self.assertIsNone(payload['availability_zone'])
+ msg = fake_notifier.NOTIFICATIONS[3]
+ self.assertEqual('compute.instance.volume.detach', msg.event_type)
+
+ # Check the database for the
+ volume_usages = db.vol_get_usage_by_time(self.context, 0)
+ self.assertEqual(1, len(volume_usages))
+ volume_usage = volume_usages[0]
+ self.assertEqual(0, volume_usage['curr_reads'])
+ self.assertEqual(0, volume_usage['curr_read_bytes'])
+ self.assertEqual(0, volume_usage['curr_writes'])
+ self.assertEqual(0, volume_usage['curr_write_bytes'])
+ self.assertEqual(1, volume_usage['tot_reads'])
+ self.assertEqual(30, volume_usage['tot_read_bytes'])
+ self.assertEqual(1, volume_usage['tot_writes'])
+ self.assertEqual(20, volume_usage['tot_write_bytes'])
+
+ def test_prepare_image_mapping(self):
+ swap_size = 1
+ ephemeral_size = 1
+ instance_type = {'swap': swap_size,
+ 'ephemeral_gb': ephemeral_size}
+ mappings = [
+ {'virtual': 'ami', 'device': 'sda1'},
+ {'virtual': 'root', 'device': '/dev/sda1'},
+
+ {'virtual': 'swap', 'device': 'sdb4'},
+
+ {'virtual': 'ephemeral0', 'device': 'sdc1'},
+ {'virtual': 'ephemeral1', 'device': 'sdc2'},
+ ]
+
+ preped_bdm = self.compute_api._prepare_image_mapping(
+ instance_type, mappings)
+
+ expected_result = [
+ {
+ 'device_name': '/dev/sdb4',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': swap_size
+ },
+ {
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'boot_index': -1,
+ 'volume_size': ephemeral_size
+ },
+ {
+ 'device_name': '/dev/sdc2',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'boot_index': -1,
+ 'volume_size': ephemeral_size
+ }
+ ]
+
+ for expected, got in zip(expected_result, preped_bdm):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ def test_validate_bdm(self):
+ def fake_get(self, context, res_id):
+ return {'id': res_id}
+
+ def fake_check_attach(*args, **kwargs):
+ pass
+
+ self.stubs.Set(cinder.API, 'get', fake_get)
+ self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
+ self.stubs.Set(cinder.API, 'check_attach',
+ fake_check_attach)
+
+ volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
+ snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
+ image_id = '77777777-aaaa-bbbb-cccc-555555555555'
+
+ instance = self._create_fake_instance()
+ instance_type = {'swap': 1, 'ephemeral_gb': 2}
+ mappings = [
+ {
+ 'device_name': '/dev/sdb4',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': 1
+ },
+ {
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': 1,
+ 'volume_size': 6
+ },
+ {
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'snapshot_id': snapshot_id,
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': 0,
+ 'volume_size': 4
+ },
+ {
+ 'device_name': '/dev/sda3',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': 2,
+ 'volume_size': 1
+ }
+ ]
+
+ # Make sure it passes at first
+ self.compute_api._validate_bdm(self.context, instance,
+ instance_type, mappings)
+
+ # Boot sequence
+ mappings[2]['boot_index'] = 2
+ self.assertRaises(exception.InvalidBDMBootSequence,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ mappings[2]['boot_index'] = 0
+
+ # number of local block_devices
+ self.flags(max_local_block_devices=1)
+ self.assertRaises(exception.InvalidBDMLocalsLimit,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ ephemerals = [
+ {
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': -1,
+ 'volume_size': 1
+ },
+ {
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'volume_id': volume_id,
+ 'guest_format': None,
+ 'boot_index': -1,
+ 'volume_size': 1
+ }]
+
+ self.flags(max_local_block_devices=4)
+ # More ephemerals are OK as long as they are not over the size limit
+ self.compute_api._validate_bdm(self.context, instance,
+ instance_type, mappings + ephemerals)
+
+ # Ephemerals over the size limit
+ ephemerals[0]['volume_size'] = 3
+ self.assertRaises(exception.InvalidBDMEphemeralSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + ephemerals)
+ self.assertRaises(exception.InvalidBDMEphemeralSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + [ephemerals[0]])
+
+ # Swap over the size limit
+ mappings[0]['volume_size'] = 3
+ self.assertRaises(exception.InvalidBDMSwapSize,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings)
+ mappings[0]['volume_size'] = 1
+
+ additional_swap = [
+ {
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'guest_format': 'swap',
+ 'boot_index': -1,
+ 'volume_size': 1
+ }]
+
+ # More than one swap
+ self.assertRaises(exception.InvalidBDMFormat,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + additional_swap)
+
+ image_no_size = [
+ {
+ 'device_name': '/dev/sda4',
+ 'source_type': 'image',
+ 'image_id': image_id,
+ 'destination_type': 'volume',
+ 'boot_index': -1,
+ 'volume_size': None,
+ }]
+ self.assertRaises(exception.InvalidBDM,
+ self.compute_api._validate_bdm,
+ self.context, instance, instance_type,
+ mappings + image_no_size)
+
+ def test_validate_bdm_media_service_exceptions(self):
+ instance_type = {'swap': 1, 'ephemeral_gb': 1}
+ all_mappings = [{'id': 1,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'snapshot_id': None,
+ 'volume_id': self.volume_id,
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ 'delete_on_termination': False}]
+
+ # First we test a list of invalid status values that should result
+ # in an InvalidVolume exception being raised.
+ status_values = (
+ # First two check that the status is 'available'.
+ ('creating', 'detached'),
+ ('error', 'detached'),
+ # Checks that the attach_status is 'detached'.
+ ('available', 'attached')
+ )
+
+ for status, attach_status in status_values:
+ def fake_volume_get(self, ctxt, volume_id):
+ return {'id': volume_id,
+ 'status': status,
+ 'attach_status': attach_status}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api._validate_bdm,
+ self.context, self.instance,
+ instance_type, all_mappings)
+
+ # Now we test a 404 case that results in InvalidBDMVolume.
+ def fake_volume_get_not_found(self, context, volume_id):
+ raise exception.VolumeNotFound(volume_id)
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
+ self.assertRaises(exception.InvalidBDMVolume,
+ self.compute_api._validate_bdm,
+ self.context, self.instance,
+ instance_type, all_mappings)
+
+ # Check that the volume status is 'available' and attach_status is
+ # 'detached' and accept the request if so
+ def fake_volume_get_ok(self, context, volume_id):
+ return {'id': volume_id,
+ 'status': 'available',
+ 'attach_status': 'detached'}
+ self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
+
+ self.compute_api._validate_bdm(self.context, self.instance,
+ instance_type, all_mappings)
+
+ def test_volume_snapshot_create(self):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.volume_snapshot_create, self.context,
+ self.instance_object, 'fake_id', {})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.volume_snapshot_create, self.context,
+ self.instance_object, 'fake_id', {})
+
+ def test_volume_snapshot_delete(self):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.volume_snapshot_delete, self.context,
+ self.instance_object, 'fake_id', 'fake_id2', {})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.volume_snapshot_delete, self.context,
+ self.instance_object, 'fake_id', 'fake_id2', {})
+
+ @mock.patch.object(cinder.API, 'create',
+ side_effect=exception.OverQuota(overs='volumes'))
+ def test_prep_block_device_over_quota_failure(self, mock_create):
+ instance = self._create_fake_instance()
+ bdms = [
+ block_device.BlockDeviceDict({
+ 'boot_index': 0,
+ 'guest_format': None,
+ 'connection_info': None,
+ 'device_type': u'disk',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'image_id': 1,
+ 'device_name': '/dev/vdb',
+ })]
+ self.assertRaises(exception.InvalidBDM,
+ compute_manager.ComputeManager()._prep_block_device,
+ self.context, instance, bdms)
+ self.assertTrue(mock_create.called)
+
+ @mock.patch.object(nova.virt.block_device, 'get_swap')
+ @mock.patch.object(nova.virt.block_device, 'convert_blanks')
+ @mock.patch.object(nova.virt.block_device, 'convert_images')
+ @mock.patch.object(nova.virt.block_device, 'convert_snapshots')
+ @mock.patch.object(nova.virt.block_device, 'convert_volumes')
+ @mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_swap')
+ @mock.patch.object(nova.virt.block_device, 'attach_block_devices')
+ def test_prep_block_device_with_blanks(self, attach_block_devices,
+ convert_swap, convert_ephemerals,
+ convert_volumes, convert_snapshots,
+ convert_images, convert_blanks,
+ get_swap):
+ instance = self._create_fake_instance()
+ instance['root_device_name'] = '/dev/vda'
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 1,
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 2}))
+ bdms = [blank_volume1, blank_volume2, root_volume]
+
+ def fake_attach_block_devices(bdm, *args, **kwargs):
+ return bdm
+
+ convert_swap.return_value = []
+ convert_ephemerals.return_value = []
+ convert_volumes.return_value = [blank_volume1, blank_volume2]
+ convert_snapshots.return_value = []
+ convert_images.return_value = [root_volume]
+ convert_blanks.return_value = []
+ attach_block_devices.side_effect = fake_attach_block_devices
+ get_swap.return_value = []
+
+ expected_block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': [],
+ 'ephemerals': [],
+ 'block_device_mapping': bdms
+ }
+
+ manager = compute_manager.ComputeManager()
+ manager.use_legacy_block_device_info = False
+ block_device_info = manager._prep_block_device(self.context, instance,
+ bdms)
+
+ convert_swap.assert_called_once_with(bdms)
+ convert_ephemerals.assert_called_once_with(bdms)
+ convert_volumes.assert_called_once_with(bdms)
+ convert_snapshots.assert_called_once_with(bdms)
+ convert_images.assert_called_once_with(bdms)
+ convert_blanks.assert_called_once_with(bdms)
+
+ self.assertEqual(expected_block_device_info, block_device_info)
+ self.assertEqual(4, attach_block_devices.call_count)
+ get_swap.assert_called_once_with([])
+
+
+class ComputeTestCase(BaseTestCase):
+ def test_wrap_instance_fault(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise NotImplementedError()
+
+ self.assertRaises(NotImplementedError, failer,
+ self.compute, self.context, instance=inst)
+
+ self.assertTrue(called['fault_added'])
+
+ def test_wrap_instance_fault_instance_in_args(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise NotImplementedError()
+
+ self.assertRaises(NotImplementedError, failer,
+ self.compute, self.context, inst)
+
+ self.assertTrue(called['fault_added'])
+
+ def test_wrap_instance_fault_no_instance(self):
+ inst = {"uuid": "fake_uuid"}
+
+ called = {'fault_added': False}
+
+ def did_it_add_fault(*args):
+ called['fault_added'] = True
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ did_it_add_fault)
+
+ @compute_manager.wrap_instance_fault
+ def failer(self2, context, instance):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.assertRaises(exception.InstanceNotFound, failer,
+ self.compute, self.context, inst)
+
+ self.assertFalse(called['fault_added'])
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self, context, instance):
+ pass
+
+ fake_event(self.compute, self.context, instance=inst)
+
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event_return(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self, context, instance):
+ return True
+
+ retval = fake_event(self.compute, self.context, instance=inst)
+
+ self.assertTrue(retval)
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ def test_wrap_instance_event_log_exception(self, mock_finish, mock_start):
+ inst = {"uuid": "fake_uuid"}
+
+ @compute_manager.wrap_instance_event
+ def fake_event(self2, context, instance):
+ raise exception.NovaException()
+
+ self.assertRaises(exception.NovaException, fake_event,
+ self.compute, self.context, instance=inst)
+
+ self.assertTrue(mock_start.called)
+ self.assertTrue(mock_finish.called)
+ args, kwargs = mock_finish.call_args
+ self.assertIsInstance(kwargs['exc_val'], exception.NovaException)
+
+ def test_object_compat(self):
+ db_inst = fake_instance.fake_db_instance()
+
+ @compute_manager.object_compat
+ def test_fn(_self, context, instance):
+ self.assertIsInstance(instance, objects.Instance)
+ self.assertEqual(instance.uuid, db_inst['uuid'])
+ test_fn(None, self.context, instance=db_inst)
+
+ def test_object_compat_more_positional_args(self):
+ db_inst = fake_instance.fake_db_instance()
+
+ @compute_manager.object_compat
+ def test_fn(_self, context, instance, pos_arg_1, pos_arg_2):
+ self.assertIsInstance(instance, objects.Instance)
+ self.assertEqual(instance.uuid, db_inst['uuid'])
+ self.assertEqual(pos_arg_1, 'fake_pos_arg1')
+ self.assertEqual(pos_arg_2, 'fake_pos_arg2')
+
+ test_fn(None, self.context, db_inst, 'fake_pos_arg1', 'fake_pos_arg2')
+
+ def test_create_instance_with_img_ref_associates_config_drive(self):
+ # Make sure create associates a config drive.
+
+ instance = self._create_fake_instance_obj(
+ params={'config_drive': '1234', })
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {},
+ [], None, None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertTrue(instance['config_drive'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_create_instance_associates_config_drive(self):
+ # Make sure create associates a config drive.
+
+ instance = self._create_fake_instance_obj(
+ params={'config_drive': '1234', })
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {},
+ [], None, None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertTrue(instance['config_drive'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_create_instance_unlimited_memory(self):
+ # Default of memory limit=None is unlimited.
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ params = {"memory_mb": 999999999999}
+ filter_properties = {'limits': {'memory_mb': None}}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
+
+ def test_create_instance_unlimited_disk(self):
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ params = {"root_gb": 999999999999,
+ "ephemeral_gb": 99999999999}
+ filter_properties = {'limits': {'disk_gb': None}}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_multiple_instances_then_starve(self):
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ filter_properties = {'limits': {'memory_mb': 4096, 'disk_gb': 1000}}
+ params = {"memory_mb": 1024, "root_gb": 128, "ephemeral_gb": 128}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
+ self.assertEqual(256, self.rt.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+ self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
+ self.assertEqual(768, self.rt.compute_node['local_gb_used'])
+
+ params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
+ instance = self._create_fake_instance_obj(params)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance,
+ {}, filter_properties, [], None, None, True, None, False)
+
+ def test_create_multiple_instance_with_neutron_port(self):
+ instance_type = flavors.get_default_flavor()
+
+ def fake_is_neutron():
+ return True
+ self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='adadds')])
+ self.assertRaises(exception.MultiplePortsNotApplicable,
+ self.compute_api.create,
+ self.context,
+ instance_type=instance_type,
+ image_href=None,
+ max_count=2,
+ requested_networks=requested_networks)
+
+ def test_create_instance_with_oversubscribed_ram(self):
+ # Test passing of oversubscribed ram policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.45)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'memory_mb': oversub_limit_mb}
+ filter_properties = {'limits': limits}
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
+
+ def test_create_instance_with_oversubscribed_ram_fail(self):
+ """Test passing of oversubscribed ram policy from the scheduler, but
+ with insufficient memory.
+ """
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_mem_mb = resources['memory_mb']
+
+ oversub_limit_mb = total_mem_mb * 1.5
+ instance_mb = int(total_mem_mb * 1.55)
+
+ # build an instance, specifying an amount of memory that exceeds
+ # both total_mem_mb and the oversubscribed limit:
+ params = {"memory_mb": instance_mb, "root_gb": 128,
+ "ephemeral_gb": 128}
+ instance = self._create_fake_instance(params)
+
+ filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}
+
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_with_oversubscribed_cpu(self):
+ # Test passing of oversubscribed cpu policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+ limits = {'vcpu': 3}
+ filter_properties = {'limits': limits}
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ self.assertEqual(1, resources['vcpus'])
+
+ # build an instance, specifying an amount of memory that exceeds
+ # total_mem_mb, but is less than the oversubscribed limit:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 2}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
+
+ # create one more instance:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 1}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(3, self.rt.compute_node['vcpus_used'])
+
+ # delete the instance:
+ instance['vm_state'] = vm_states.DELETED
+ self.rt.update_usage(self.context,
+ instance=instance)
+
+ self.assertEqual(2, self.rt.compute_node['vcpus_used'])
+
+ # now oversubscribe vcpus and fail:
+ params = {"memory_mb": 10, "root_gb": 1,
+ "ephemeral_gb": 1, "vcpus": 2}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'vcpu': 3}
+ filter_properties = {'limits': limits}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_with_oversubscribed_disk(self):
+ # Test passing of oversubscribed disk policy from the scheduler.
+
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_disk_gb = resources['local_gb']
+
+ oversub_limit_gb = total_disk_gb * 1.5
+ instance_gb = int(total_disk_gb * 1.45)
+
+ # build an instance, specifying an amount of disk that exceeds
+ # total_disk_gb, but is less than the oversubscribed limit:
+ params = {"root_gb": instance_gb, "memory_mb": 10}
+ instance = self._create_fake_instance_obj(params)
+
+ limits = {'disk_gb': oversub_limit_gb}
+ filter_properties = {'limits': limits}
+ self.compute.run_instance(self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
+
+ def test_create_instance_with_oversubscribed_disk_fail(self):
+ """Test passing of oversubscribed disk policy from the scheduler, but
+ with insufficient disk.
+ """
+ self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
+ self.rt.update_available_resource(self.context.elevated())
+
+ # get total memory as reported by virt driver:
+ resources = self.compute.driver.get_available_resource(NODENAME)
+ total_disk_gb = resources['local_gb']
+
+ oversub_limit_gb = total_disk_gb * 1.5
+ instance_gb = int(total_disk_gb * 1.55)
+
+ # build an instance, specifying an amount of disk that exceeds
+ # total_disk_gb, but is less than the oversubscribed limit:
+ params = {"root_gb": instance_gb, "memory_mb": 10}
+ instance = self._create_fake_instance(params)
+
+ limits = {'disk_gb': oversub_limit_gb}
+ filter_properties = {'limits': limits}
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.compute.run_instance, self.context, instance, {},
+ filter_properties, [], None, None, True, None, False)
+
+ def test_create_instance_without_node_param(self):
+ instance = self._create_fake_instance_obj({'node': None})
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_create_instance_no_image(self):
+ # Create instance with no image provided.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
+ def test_default_access_ip(self):
+ self.flags(default_access_ip_network_name='test1')
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = self._create_fake_instance_obj()
+
+ orig_update = self.compute._instance_update
+
+ # Make sure the access_ip_* updates happen in the same DB
+ # update as the set to ACTIVE.
+ def _instance_update(ctxt, instance_uuid, **kwargs):
+ if kwargs.get('vm_state', None) == vm_states.ACTIVE:
+ self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
+ self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
+ return orig_update(ctxt, instance_uuid, **kwargs)
+
+ self.stubs.Set(self.compute, '_instance_update', _instance_update)
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertEqual(instance['access_ip_v4'], '192.168.1.100')
+ self.assertEqual(instance['access_ip_v6'],
+ '2001:db8:0:1:dcad:beff:feef:1')
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_no_default_access_ip(self):
+ instance = self._create_fake_instance_obj()
+
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instances = db.instance_get_all(self.context)
+ instance = instances[0]
+
+ self.assertFalse(instance['access_ip_v4'])
+ self.assertFalse(instance['access_ip_v6'])
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_fail_to_schedule_persists(self):
+ # check the persistence of the ERROR(scheduling) state.
+ params = {'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING}
+ self._create_fake_instance(params=params)
+ # check state is failed even after the periodic poll
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': task_states.SCHEDULING})
+
+ def test_run_instance_setup_block_device_mapping_fail(self):
+ """block device mapping failure test.
+
+ Make sure that when there is a block device mapping problem,
+ the instance goes to ERROR state, keeping the task state
+ """
+ def fake(*args, **kwargs):
+ raise exception.InvalidBDM()
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_prep_block_device', fake)
+ instance = self._create_fake_instance()
+ self.assertRaises(exception.InvalidBDM, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+
+ @mock.patch('nova.compute.manager.ComputeManager._prep_block_device',
+ side_effect=exception.OverQuota(overs='volumes'))
+ def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
+ """block device mapping over quota failure test.
+
+ Make sure when we're over volume quota according to Cinder client, the
+ appropriate exception is raised and the instances to ERROR state, keep
+ the task state.
+ """
+ instance = self._create_fake_instance()
+ self.assertRaises(exception.OverQuota, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.assertTrue(mock_prep_block_dev.called)
+
+ def test_run_instance_spawn_fail(self):
+ """spawn failure test.
+
+ Make sure that when there is a spawning problem,
+ the instance goes to ERROR state, keeping the task state.
+ """
+ def fake(*args, **kwargs):
+ raise test.TestingException()
+ self.stubs.Set(self.compute.driver, 'spawn', fake)
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(test.TestingException, self.compute.run_instance,
+ self.context, instance=instance, request_spec={},
+ filter_properties={}, requested_networks=[],
+ injected_files=None, admin_password=None,
+ is_first_time=True, node=None,
+ legacy_bdm_in_spec=False)
+ # check state is failed even after the periodic poll
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+ self.compute.periodic_tasks(context.get_admin_context())
+ self._assert_state({'vm_state': vm_states.ERROR,
+ 'task_state': None})
+
+ def test_run_instance_dealloc_network_instance_not_found(self):
+ """spawn network deallocate test.
+
+ Make sure that when an instance is not found during spawn
+ that the network is deallocated
+ """
+ instance = self._create_fake_instance_obj()
+
+ def fake(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id="fake")
+
+ self.stubs.Set(self.compute.driver, 'spawn', fake)
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ self.compute._deallocate_network(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ def test_run_instance_bails_on_missing_instance(self):
+ # Make sure that run_instance() will quickly ignore a deleted instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_instance_update(self, *a, **args):
+ called['instance_update'] = True
+ raise exception.InstanceNotFound(instance_id='foo')
+ self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('instance_update', called)
+
+ def test_run_instance_bails_on_deleting_instance(self):
+ # Make sure that run_instance() will quickly ignore a deleting instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_instance_update(self, *a, **args):
+ called['instance_update'] = True
+ raise exception.UnexpectedDeletingTaskStateError(
+ expected='scheduling', actual='deleting')
+ self.stubs.Set(self.compute, '_instance_update', fake_instance_update)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('instance_update', called)
+
+ def test_run_instance_bails_on_missing_instance_2(self):
+ # Make sure that run_instance() will quickly ignore a deleted instance
+ called = {}
+ instance = self._create_fake_instance()
+
+ def fake_default_block_device_names(self, *a, **args):
+ called['default_block_device_names'] = True
+ raise exception.InstanceNotFound(instance_id='foo')
+ self.stubs.Set(self.compute, '_default_block_device_names',
+ fake_default_block_device_names)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertIn('default_block_device_names', called)
+
+ def test_can_terminate_on_error_state(self):
+ # Make sure that the instance can be terminated in ERROR state.
+ # check failed to schedule --> terminate
+ params = {'vm_state': vm_states.ERROR}
+ instance = self._create_fake_instance_obj(params=params)
+ self.compute.terminate_instance(self.context, instance, [], [])
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ self.context, instance['uuid'])
+ # Double check it's not there for admins, either.
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ self.context.elevated(), instance['uuid'])
+
+ def test_run_terminate(self):
+ # Make sure it is possible to run and terminate instance.
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+
+ admin_deleted_context = context.get_admin_context(
+ read_deleted="only")
+ instance = db.instance_get_by_uuid(admin_deleted_context,
+ instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.DELETED)
+ self.assertIsNone(instance['task_state'])
+
+ def test_run_terminate_with_vol_attached(self):
+ """Make sure it is possible to run and terminate instance with volume
+ attached
+ """
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ def fake_check_attach(*args, **kwargs):
+ pass
+
+ def fake_reserve_volume(*args, **kwargs):
+ pass
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+
+ def fake_terminate_connection(self, context, volume_id, connector):
+ pass
+
+ def fake_detach(self, context, volume_id):
+ pass
+
+ bdms = []
+
+ def fake_rpc_reserve_block_device_name(self, context, instance, device,
+ volume_id, **kwargs):
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 1,
+ 'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc'})
+ bdm.create(context)
+ bdms.append(bdm)
+ return bdm
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
+ fake_reserve_volume)
+ self.stubs.Set(cinder.API, 'terminate_connection',
+ fake_terminate_connection)
+ self.stubs.Set(cinder.API, 'detach', fake_detach)
+ self.stubs.Set(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name',
+ fake_rpc_reserve_block_device_name)
+
+ self.compute_api.attach_volume(self.context, instance, 1,
+ '/dev/vdc')
+
+ self.compute.terminate_instance(self.context,
+ instance, bdms, [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+ bdms = db.block_device_mapping_get_all_by_instance(self.context,
+ instance['uuid'])
+ self.assertEqual(len(bdms), 0)
+
+ def test_run_terminate_no_image(self):
+ """Make sure instance started without image (from volume)
+ can be termintad without issues
+ """
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self._assert_state({'vm_state': vm_states.ACTIVE,
+ 'task_state': None})
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+ instances = db.instance_get_all(self.context)
+ self.assertEqual(len(instances), 0)
+
+ def test_terminate_no_network(self):
+ # This is as reported in LP bug 1008875
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+ self.mox.ReplayAll()
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After terminating instances: %s", instances)
+ self.assertEqual(len(instances), 0)
+
+ def test_run_terminate_timestamps(self):
+ # Make sure timestamps are set for launched and destroyed.
+ instance = self._create_fake_instance_obj()
+ instance['launched_at'] = None
+ self.assertIsNone(instance['launched_at'])
+ self.assertIsNone(instance['deleted_at'])
+ launch = timeutils.utcnow()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.refresh()
+ self.assertTrue(instance['launched_at'].replace(tzinfo=None) > launch)
+ self.assertIsNone(instance['deleted_at'])
+ terminate = timeutils.utcnow()
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ with utils.temporary_mutation(self.context, read_deleted='only'):
+ instance = db.instance_get_by_uuid(self.context,
+ instance['uuid'])
+ self.assertTrue(instance['launched_at'].replace(
+ tzinfo=None) < terminate)
+ self.assertTrue(instance['deleted_at'].replace(
+ tzinfo=None) > terminate)
+
+ def test_run_terminate_deallocate_net_failure_sets_error_state(self):
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ def _fake_deallocate_network(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_deallocate_network',
+ _fake_deallocate_network)
+
+ try:
+ self.compute.terminate_instance(self.context, instance, [], [])
+ except test.TestingException:
+ pass
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ERROR)
+
+ def test_stop(self):
+ # Ensure instance can be stopped.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ inst_uuid = instance['uuid']
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_start(self):
+ # Ensure instance can be started.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ extra = ['system_metadata', 'metadata']
+ inst_uuid = instance['uuid']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_stop_start_no_image(self):
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.POWERING_OFF})
+ extra = ['system_metadata', 'metadata']
+ inst_uuid = instance['uuid']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ inst_uuid,
+ expected_attrs=extra)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue(self):
+ # Ensure instance can be rescued and unrescued.
+
+ called = {'rescued': False,
+ 'unrescued': False}
+
+ def fake_rescue(self, context, instance_ref, network_info, image_meta,
+ rescue_password):
+ called['rescued'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+
+ def fake_unrescue(self, instance_ref, network_info):
+ called['unrescued'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ fake_unrescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.task_state = task_states.RESCUING
+ instance.save()
+ self.compute.rescue_instance(self.context, instance, None)
+ self.assertTrue(called['rescued'])
+ instance.task_state = task_states.UNRESCUING
+ instance.save()
+ self.compute.unrescue_instance(self.context, instance)
+ self.assertTrue(called['unrescued'])
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue_notifications(self):
+ # Ensure notifications on instance rescue.
+ def fake_rescue(self, context, instance_ref, network_info, image_meta,
+ rescue_password):
+ pass
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance.task_state = task_states.RESCUING
+ instance.save()
+ self.compute.rescue_instance(self.context, instance, None)
+
+ expected_notifications = ['compute.instance.rescue.start',
+ 'compute.instance.exists',
+ 'compute.instance.rescue.end']
+ self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
+ expected_notifications)
+ for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
+ self.assertEqual(msg.event_type, expected_notifications[n])
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertIn('rescue_image_name', msg.payload)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_unrescue_notifications(self):
+ # Ensure notifications on instance rescue.
+ def fake_unrescue(self, instance_ref, network_info):
+ pass
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ fake_unrescue)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance.task_state = task_states.UNRESCUING
+ instance.save()
+ self.compute.unrescue_instance(self.context, instance)
+
+ expected_notifications = ['compute.instance.unrescue.start',
+ 'compute.instance.unrescue.end']
+ self.assertEqual([m.event_type for m in fake_notifier.NOTIFICATIONS],
+ expected_notifications)
+ for n, msg in enumerate(fake_notifier.NOTIFICATIONS):
+ self.assertEqual(msg.event_type, expected_notifications[n])
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rescue_handle_err(self):
+ # If the driver fails to rescue, instance state should remain the same
+ # and the exception should be converted to InstanceNotRescuable
+ inst_obj = self._create_fake_instance_obj()
+ self.mox.StubOutWithMock(self.compute, '_get_rescue_image')
+ self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
+
+ self.compute._get_rescue_image(
+ mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
+ nova.virt.fake.FakeDriver.rescue(
+ mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
+ ).AndRaise(RuntimeError("Try again later"))
+
+ self.mox.ReplayAll()
+
+ expected_message = ('Instance %s cannot be rescued: '
+ 'Driver Error: Try again later' % inst_obj.uuid)
+ inst_obj.vm_state = 'some_random_state'
+
+ with testtools.ExpectedException(
+ exception.InstanceNotRescuable, expected_message):
+ self.compute.rescue_instance(
+ self.context, instance=inst_obj,
+ rescue_password='password')
+
+ self.assertEqual('some_random_state', inst_obj.vm_state)
+
+ @mock.patch.object(nova.compute.utils, "get_image_metadata")
+ @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
+ def test_rescue_with_image_specified(self, mock_rescue,
+ mock_get_image_metadata):
+
+ image_ref = "image-ref"
+ rescue_image_meta = {}
+ params = {"task_state": task_states.RESCUING}
+ instance = self._create_fake_instance_obj(params=params)
+
+ ctxt = context.get_admin_context()
+ mock_context = mock.Mock()
+ mock_context.elevated.return_value = ctxt
+
+ mock_get_image_metadata.return_value = rescue_image_meta
+
+ self.compute.rescue_instance(mock_context, instance=instance,
+ rescue_password="password", rescue_image_ref=image_ref)
+
+ mock_get_image_metadata.assert_called_with(ctxt,
+ self.compute.image_api,
+ image_ref, instance)
+ mock_rescue.assert_called_with(ctxt, instance, [],
+ rescue_image_meta, 'password')
+ self.compute.terminate_instance(ctxt, instance, [], [])
+
+ @mock.patch.object(nova.compute.utils, "get_image_metadata")
+ @mock.patch.object(nova.virt.fake.FakeDriver, "rescue")
+ def test_rescue_with_base_image_when_image_not_specified(self,
+ mock_rescue, mock_get_image_metadata):
+
+ image_ref = "image-ref"
+ system_meta = {"image_base_image_ref": image_ref}
+ rescue_image_meta = {}
+ params = {"task_state": task_states.RESCUING,
+ "system_metadata": system_meta}
+ instance = self._create_fake_instance_obj(params=params)
+
+ ctxt = context.get_admin_context()
+ mock_context = mock.Mock()
+ mock_context.elevated.return_value = ctxt
+
+ mock_get_image_metadata.return_value = rescue_image_meta
+
+ self.compute.rescue_instance(mock_context, instance=instance,
+ rescue_password="password")
+
+ mock_get_image_metadata.assert_called_with(ctxt,
+ self.compute.image_api,
+ image_ref, instance)
+ mock_rescue.assert_called_with(ctxt, instance, [],
+ rescue_image_meta, 'password')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_power_on(self):
+ # Ensure instance can be powered on.
+
+ called = {'power_on': False}
+
+ def fake_driver_power_on(self, context, instance, network_info,
+ block_device_info):
+ called['power_on'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
+ fake_driver_power_on)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ instance['uuid'],
+ expected_attrs=extra)
+ inst_obj.task_state = task_states.POWERING_ON
+ inst_obj.save(self.context)
+ self.compute.start_instance(self.context, instance=inst_obj)
+ self.assertTrue(called['power_on'])
+ self.compute.terminate_instance(self.context, inst_obj, [], [])
+
+ def test_power_off(self):
+ # Ensure instance can be powered off.
+
+ called = {'power_off': False}
+
+ def fake_driver_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
+ called['power_off'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
+ fake_driver_power_off)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ extra = ['system_metadata', 'metadata']
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ instance['uuid'],
+ expected_attrs=extra)
+ inst_obj.task_state = task_states.POWERING_OFF
+ inst_obj.save(self.context)
+ self.compute.stop_instance(self.context, instance=inst_obj)
+ self.assertTrue(called['power_off'])
+ self.compute.terminate_instance(self.context, inst_obj, [], [])
+
+ def test_pause(self):
+ # Ensure instance can be paused and unpaused.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None, None, True,
+ None, False)
+ instance.task_state = task_states.PAUSING
+ instance.save()
+ fake_notifier.NOTIFICATIONS = []
+ self.compute.pause_instance(self.context, instance=instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.pause.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.pause.end')
+ instance.task_state = task_states.UNPAUSING
+ instance.save()
+ fake_notifier.NOTIFICATIONS = []
+ self.compute.unpause_instance(self.context, instance=instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.unpause.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.unpause.end')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_suspend(self):
+ # ensure instance can be suspended and resumed.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.task_state = task_states.SUSPENDING
+ instance.save()
+ self.compute.suspend_instance(self.context, instance)
+ instance.task_state = task_states.RESUMING
+ instance.save()
+ self.compute.resume_instance(self.context, instance)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_suspend_error(self):
+ # Ensure vm_state is ERROR when suspend error occurs.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ with mock.patch.object(self.compute.driver, 'suspend',
+ side_effect=test.TestingException):
+ self.assertRaises(test.TestingException,
+ self.compute.suspend_instance,
+ self.context,
+ instance=instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance.uuid)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_suspend_not_implemented(self):
+ # Ensure expected exception is raised and the vm_state of instance
+ # restore to original value if suspend is not implemented by driver
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ with mock.patch.object(self.compute.driver, 'suspend',
+ side_effect=NotImplementedError('suspend test')):
+ self.assertRaises(NotImplementedError,
+ self.compute.suspend_instance,
+ self.context,
+ instance=instance)
+
+ instance = db.instance_get_by_uuid(self.context, instance.uuid)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def test_suspend_rescued(self):
+ # ensure rescued instance can be suspended and resumed.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.vm_state = vm_states.RESCUED
+ instance.task_state = task_states.SUSPENDING
+ instance.save()
+
+ self.compute.suspend_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
+
+ instance.task_state = task_states.RESUMING
+ instance.save()
+ self.compute.resume_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.RESCUED)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resume_no_old_state(self):
+ # ensure a suspended instance with no old_vm_state is resumed to the
+ # ACTIVE state
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ instance.vm_state = vm_states.SUSPENDED
+ instance.task_state = task_states.RESUMING
+ instance.save()
+
+ self.compute.resume_instance(self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild(self):
+ # Ensure instance can be rebuilt.
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_driver(self):
+ # Make sure virt drivers can override default rebuild
+ called = {'rebuild': False}
+
+ def fake(**kwargs):
+ instance = kwargs['instance']
+ instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ instance.task_state = task_states.REBUILD_SPAWNING
+ instance.save(
+ expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
+ called['rebuild'] = True
+
+ self.stubs.Set(self.compute.driver, 'rebuild', fake)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.assertTrue(called['rebuild'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_no_image(self):
+ # Ensure instance can be rebuilt when started with no image.
+ params = {'image_ref': ''}
+ instance = self._create_fake_instance_obj(params)
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ '', '', injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata, bdms=[],
+ recreate=False, on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_launched_at_time(self):
+ # Ensure instance can be rebuilt.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=[],
+ new_pass="new_password",
+ orig_sys_metadata={},
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ instance.refresh()
+ self.assertEqual(cur_time,
+ instance['launched_at'].replace(tzinfo=None))
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rebuild_with_injected_files(self):
+ # Ensure instance can be rebuilt with injected files.
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ]
+
+ self.decoded_files = [
+ ('/a/b/c', 'foobarbaz'),
+ ]
+
+ def _spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info):
+ self.assertEqual(self.decoded_files, injected_files)
+
+ self.stubs.Set(self.compute.driver, 'spawn', _spawn)
+ instance = self._create_fake_instance_obj()
+ image_ref = instance['image_ref']
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ instance['uuid'])
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": task_states.REBUILDING})
+ self.compute.rebuild_instance(self.context, instance,
+ image_ref, image_ref,
+ injected_files=injected_files,
+ new_pass="new_password",
+ orig_sys_metadata=sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_reboot(self, soft,
+ test_delete=False, test_unrescue=False,
+ fail_reboot=False, fail_running=False):
+
+ reboot_type = soft and 'SOFT' or 'HARD'
+ task_pending = (soft and task_states.REBOOT_PENDING
+ or task_states.REBOOT_PENDING_HARD)
+ task_started = (soft and task_states.REBOOT_STARTED
+ or task_states.REBOOT_STARTED_HARD)
+ expected_task = (soft and task_states.REBOOTING
+ or task_states.REBOOTING_HARD)
+ expected_tasks = (soft and (task_states.REBOOTING,
+ task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED)
+ or (task_states.REBOOTING_HARD,
+ task_states.REBOOT_PENDING_HARD,
+ task_states.REBOOT_STARTED_HARD))
+
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ db_instance = fake_instance.fake_db_instance(
+ **dict(uuid='fake-instance',
+ power_state=power_state.NOSTATE,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+ instance = objects.Instance._from_db_object(econtext,
+ objects.Instance(),
+ db_instance)
+
+ updated_dbinstance1 = fake_instance.fake_db_instance(
+ **dict(uuid='updated-instance1',
+ power_state=10003,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+ updated_dbinstance2 = fake_instance.fake_db_instance(
+ **dict(uuid='updated-instance2',
+ power_state=10003,
+ vm_state=vm_states.ACTIVE,
+ task_state=expected_task,
+ launched_at=timeutils.utcnow()))
+
+ if test_unrescue:
+ instance.vm_state = vm_states.RESCUED
+ instance.obj_reset_changes()
+
+ fake_nw_model = network_model.NetworkInfo()
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 10001
+ fake_power_state2 = power_state.RUNNING
+ fake_power_state3 = 10002
+
+ # Beginning of calls we expect.
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_block_device_info(
+ econtext, instance).AndReturn(fake_block_dev_info)
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ db.instance_update_and_get_original(econtext, instance['uuid'],
+ {'task_state': task_pending,
+ 'expected_task_state': expected_tasks,
+ 'power_state': fake_power_state1},
+ update_cells=False,
+ columns_to_join=['system_metadata']
+ ).AndReturn((None,
+ updated_dbinstance1))
+ expected_nw_info = fake_nw_model
+ db.instance_update_and_get_original(econtext,
+ updated_dbinstance1['uuid'],
+ {'task_state': task_started,
+ 'expected_task_state': task_pending},
+ update_cells=False,
+ columns_to_join=['system_metadata']
+ ).AndReturn((None,
+ updated_dbinstance1))
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {
+ 'args': (econtext, instance, expected_nw_info,
+ reboot_type),
+ 'kwargs': {'block_device_info': fake_block_dev_info}}
+ fault = exception.InstanceNotFound(instance_id='instance-0000')
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ # NOTE(sirp): Since `bad_volumes_callback` is a function defined
+ # within `reboot_instance`, we don't have access to its value and
+ # can't stub it out, thus we skip that comparison.
+ kwargs.pop('bad_volumes_callback')
+ if fail_reboot:
+ raise fault
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ if not fail_reboot or fail_running:
+ new_power_state = fake_power_state2
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state2)
+ else:
+ new_power_state = fake_power_state3
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state3)
+
+ if test_delete:
+ fault = exception.InstanceNotFound(
+ instance_id=instance['uuid'])
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'power_state': new_power_state,
+ 'task_state': None,
+ 'vm_state': vm_states.ACTIVE},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndRaise(fault)
+ self.compute._notify_about_instance_usage(
+ econtext,
+ instance,
+ 'reboot.end')
+ elif fail_reboot and not fail_running:
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'vm_state': vm_states.ERROR},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndRaise(fault)
+ else:
+ db.instance_update_and_get_original(
+ econtext, updated_dbinstance1['uuid'],
+ {'power_state': new_power_state,
+ 'task_state': None,
+ 'vm_state': vm_states.ACTIVE},
+ update_cells=False,
+ columns_to_join=['system_metadata'],
+ ).AndReturn((None, updated_dbinstance2))
+ if fail_running:
+ self.compute._notify_about_instance_usage(econtext, instance,
+ 'reboot.error', fault=fault)
+ self.compute._notify_about_instance_usage(
+ econtext,
+ instance,
+ 'reboot.end')
+
+ self.mox.ReplayAll()
+
+ if not fail_reboot or fail_running:
+ self.compute.reboot_instance(self.context, instance=instance,
+ block_device_info=None,
+ reboot_type=reboot_type)
+ else:
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute.reboot_instance,
+ self.context, instance=instance,
+ block_device_info=None,
+ reboot_type=reboot_type)
+
+ self.assertEqual(expected_call_info, reboot_call_info)
+
+ def test_reboot_soft(self):
+ self._test_reboot(True)
+
+ def test_reboot_soft_and_delete(self):
+ self._test_reboot(True, True)
+
+ def test_reboot_soft_and_rescued(self):
+ self._test_reboot(True, False, True)
+
+ def test_reboot_soft_and_delete_and_rescued(self):
+ self._test_reboot(True, True, True)
+
+ def test_reboot_hard(self):
+ self._test_reboot(False)
+
+ def test_reboot_hard_and_delete(self):
+ self._test_reboot(False, True)
+
+ def test_reboot_hard_and_rescued(self):
+ self._test_reboot(False, False, True)
+
+ def test_reboot_hard_and_delete_and_rescued(self):
+ self._test_reboot(False, True, True)
+
+ def test_reboot_fail(self):
+ self._test_reboot(False, fail_reboot=True)
+
+ def test_reboot_fail_running(self):
+ self._test_reboot(False, fail_reboot=True,
+ fail_running=True)
+
+ def test_get_instance_block_device_info_source_image(self):
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3,
+ 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
+ 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'connection_info': '{"driver_volume_type": "rbd"}',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0
+ })])
+
+ with (mock.patch.object(
+ objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=bdms)
+ ) as mock_get_by_instance:
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, self._create_fake_instance())
+ )
+ expected = {
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': [{
+ 'connection_info': {
+ 'driver_volume_type': 'rbd'
+ },
+ 'mount_device': '/dev/vda',
+ 'delete_on_termination': False
+ }]
+ }
+ self.assertTrue(mock_get_by_instance.called)
+ self.assertEqual(block_device_info, expected)
+
+ def test_get_instance_block_device_info_passed_bdms(self):
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3,
+ 'volume_id': u'4cbc9e62-6ba0-45dd-b647-934942ead7d6',
+ 'device_name': '/dev/vdd',
+ 'connection_info': '{"driver_volume_type": "rbd"}',
+ 'source_type': 'volume',
+ 'destination_type': 'volume'})
+ ])
+ with (mock.patch.object(
+ objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')) as mock_get_by_instance:
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, self._create_fake_instance(), bdms=bdms)
+ )
+ expected = {
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': [{
+ 'connection_info': {
+ 'driver_volume_type': 'rbd'
+ },
+ 'mount_device': '/dev/vdd',
+ 'delete_on_termination': False
+ }]
+ }
+ self.assertFalse(mock_get_by_instance.called)
+ self.assertEqual(block_device_info, expected)
+
+ def test_get_instance_block_device_info_swap_and_ephemerals(self):
+ instance = self._create_fake_instance()
+
+ ephemeral0 = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 1,
+ 'boot_index': -1
+ })
+ ephemeral1 = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 2,
+ 'boot_index': -1
+ })
+ swap = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'volume_size': 1,
+ 'boot_index': -1
+ })
+
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [swap, ephemeral0, ephemeral1])
+
+ with (
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=bdms)
+ ) as mock_get_by_instance_uuid:
+ expected_block_device_info = {
+ 'swap': {'device_name': '/dev/vdd', 'swap_size': 1},
+ 'ephemerals': [{'device_name': '/dev/vdb', 'num': 0, 'size': 1,
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/vdc', 'num': 1, 'size': 2,
+ 'virtual_name': 'ephemeral1'}],
+ 'block_device_mapping': []
+ }
+
+ block_device_info = (
+ self.compute._get_instance_block_device_info(
+ self.context, instance)
+ )
+
+ mock_get_by_instance_uuid.assert_called_once_with(self.context,
+ instance['uuid'])
+ self.assertEqual(expected_block_device_info, block_device_info)
+
+ def test_inject_network_info(self):
+ # Ensure we can inject network info.
+ called = {'inject': False}
+
+ def fake_driver_inject_network(self, instance, network_info):
+ called['inject'] = True
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
+ fake_driver_inject_network)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.compute.inject_network_info(self.context, instance=instance)
+ self.assertTrue(called['inject'])
+ self.compute.terminate_instance(self.context,
+ instance, [], [])
+
+ def test_reset_network(self):
+ # Ensure we can reset networking on an instance.
+ called = {'count': 0}
+
+ def fake_driver_reset_network(self, instance):
+ called['count'] += 1
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
+ fake_driver_reset_network)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.compute.reset_network(self.context, instance)
+
+ self.assertEqual(called['count'], 1)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _get_snapshotting_instance(self):
+ # Ensure instance can be snapshotted.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
+ instance.save()
+ return instance
+
+ def test_snapshot(self):
+ inst_obj = self._get_snapshotting_instance()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_no_image(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj.image_ref = ''
+ inst_obj.save()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def _test_snapshot_fails(self, raise_during_cleanup, method,
+ expected_state=True):
+ def fake_snapshot(*args, **kwargs):
+ raise test.TestingException()
+
+ self.fake_image_delete_called = False
+
+ def fake_delete(self_, context, image_id):
+ self.fake_image_delete_called = True
+ if raise_during_cleanup:
+ raise Exception()
+
+ self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ inst_obj = self._get_snapshotting_instance()
+ if method == 'snapshot':
+ self.assertRaises(test.TestingException,
+ self.compute.snapshot_instance,
+ self.context, image_id='fakesnap',
+ instance=inst_obj)
+ else:
+ self.assertRaises(test.TestingException,
+ self.compute.backup_instance,
+ self.context, image_id='fakesnap',
+ instance=inst_obj, backup_type='fake',
+ rotation=1)
+
+ self.assertEqual(expected_state, self.fake_image_delete_called)
+ self._assert_state({'task_state': None})
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ def test_backup_fails(self, mock_rotate):
+ self._test_snapshot_fails(False, 'backup')
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ def test_backup_fails_cleanup_ignores_exception(self, mock_rotate):
+ self._test_snapshot_fails(True, 'backup')
+
+ @mock.patch.object(nova.compute.manager.ComputeManager, '_rotate_backups')
+ @mock.patch.object(nova.compute.manager.ComputeManager,
+ '_do_snapshot_instance')
+ def test_backup_fails_rotate_backup(self, mock_snap, mock_rotate):
+ mock_rotate.side_effect = test.TestingException()
+ self._test_snapshot_fails(True, 'backup', False)
+
+ def test_snapshot_fails(self):
+ self._test_snapshot_fails(False, 'snapshot')
+
+ def test_snapshot_fails_cleanup_ignores_exception(self):
+ self._test_snapshot_fails(True, 'snapshot')
+
+ def _test_snapshot_deletes_image_on_failure(self, status, exc):
+ self.fake_image_delete_called = False
+
+ def fake_show(self_, context, image_id, **kwargs):
+ self.assertEqual('fakesnap', image_id)
+ image = {'id': image_id,
+ 'status': status}
+ return image
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+
+ def fake_delete(self_, context, image_id):
+ self.fake_image_delete_called = True
+ self.assertEqual('fakesnap', image_id)
+
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ def fake_snapshot(*args, **kwargs):
+ raise exc
+
+ self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+
+ fake_image.stub_out_image_service(self.stubs)
+
+ inst_obj = self._get_snapshotting_instance()
+
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_fails_with_glance_error(self):
+ image_not_found = exception.ImageNotFound(image_id='fakesnap')
+ self._test_snapshot_deletes_image_on_failure('error', image_not_found)
+ self.assertFalse(self.fake_image_delete_called)
+ self._assert_state({'task_state': None})
+
+ def test_snapshot_fails_with_task_state_error(self):
+ deleting_state_error = exception.UnexpectedDeletingTaskStateError(
+ expected=task_states.IMAGE_SNAPSHOT, actual=task_states.DELETING)
+ self._test_snapshot_deletes_image_on_failure(
+ 'error', deleting_state_error)
+ self.assertTrue(self.fake_image_delete_called)
+ self._test_snapshot_deletes_image_on_failure(
+ 'active', deleting_state_error)
+ self.assertFalse(self.fake_image_delete_called)
+
+ def test_snapshot_fails_with_instance_not_found(self):
+ instance_not_found = exception.InstanceNotFound(instance_id='uuid')
+ self._test_snapshot_deletes_image_on_failure(
+ 'error', instance_not_found)
+ self.assertTrue(self.fake_image_delete_called)
+ self._test_snapshot_deletes_image_on_failure(
+ 'active', instance_not_found)
+ self.assertFalse(self.fake_image_delete_called)
+
+ def test_snapshot_handles_cases_when_instance_is_deleted(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj.task_state = task_states.DELETING
+ inst_obj.save()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def test_snapshot_handles_cases_when_instance_is_not_found(self):
+ inst_obj = self._get_snapshotting_instance()
+ inst_obj2 = objects.Instance.get_by_uuid(self.context, inst_obj.uuid)
+ inst_obj2.destroy()
+ self.compute.snapshot_instance(self.context, image_id='fakesnap',
+ instance=inst_obj)
+
+ def _assert_state(self, state_dict):
+ """Assert state of VM is equal to state passed as parameter."""
+ instances = db.instance_get_all(self.context)
+ self.assertEqual(len(instances), 1)
+
+ if 'vm_state' in state_dict:
+ self.assertEqual(state_dict['vm_state'], instances[0]['vm_state'])
+ if 'task_state' in state_dict:
+ self.assertEqual(state_dict['task_state'],
+ instances[0]['task_state'])
+ if 'power_state' in state_dict:
+ self.assertEqual(state_dict['power_state'],
+ instances[0]['power_state'])
+
+ def test_console_output(self):
+ # Make sure we can get console output from instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ output = self.compute.get_console_output(self.context,
+ instance=instance, tail_length=None)
+ self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_tail(self):
+ # Make sure we can get console output from instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ output = self.compute.get_console_output(self.context,
+ instance=instance, tail_length=2)
+ self.assertEqual(output, 'ANOTHER\nLAST LINE')
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_not_implemented(self):
+ def fake_not_implemented(*args, **kwargs):
+ raise NotImplementedError()
+
+ self.stubs.Set(self.compute.driver, 'get_console_output',
+ fake_not_implemented)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_console_output_instance_not_found(self):
+ def fake_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='fake-instance')
+
+ self.stubs.Set(self.compute.driver, 'get_console_output',
+ fake_not_found)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute.get_console_output, self.context,
+ instance, 0)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_novnc_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_vnc_console(self.context, 'novnc',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_validate_console_port_vnc(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleVNC(host="fake_host", port=5900)
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="novnc"))
+
+ def test_validate_console_port_spice(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
+
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="spice-html5"))
+
+ def test_validate_console_port_rdp(self):
+ self.flags(enabled=True, group='rdp')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleRDP(host="fake_host", port=5900)
+
+ self.stubs.Set(self.compute.driver, "get_rdp_console",
+ fake_driver_get_console)
+
+ self.assertTrue(self.compute.validate_console_port(
+ context=self.context, instance=instance, port=5900,
+ console_type="rdp-html5"))
+
+ def test_validate_console_port_wrong_port(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj()
+
+ def fake_driver_get_console(*args, **kwargs):
+ return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.assertFalse(self.compute.validate_console_port(
+ context=self.context, instance=instance, port="wrongport",
+ console_type="spice-html5"))
+
+ def test_xvpvnc_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ console = self.compute.get_vnc_console(self.context, 'xvpvnc',
+ instance=instance)
+ self.assertTrue(console)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_vnc_console_type(self):
+ # Raise useful error if console type is an unrecognised string.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_vnc_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_vnc_console_type(self):
+ # Raise useful error is console type is None.
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_vnc_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_get_vnc_console_not_implemented(self):
+ self.stubs.Set(self.compute.driver, 'get_vnc_console',
+ fake_not_implemented)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_vnc_console,
+ self.context, 'novnc', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.get_vnc_console,
+ self.context, 'novnc', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_spicehtml5_spice_console(self):
+ # Make sure we can a spice console for an instance.
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_spice_console(self.context, 'spice-html5',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_spice_console_type(self):
+ # Raise useful error if console type is an unrecognised string
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_spice_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_spice_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_spice_console_type(self):
+ # Raise useful error is console type is None
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_spice_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_spice_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_rdphtml5_rdp_console(self):
+ # Make sure we can a rdp console for an instance.
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ # Try with the full instance
+ console = self.compute.get_rdp_console(self.context, 'rdp-html5',
+ instance=instance)
+ self.assertTrue(console)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_invalid_rdp_console_type(self):
+ # Raise useful error if console type is an unrecognised string
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_rdp_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_rdp_console,
+ self.context, 'invalid', instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_missing_rdp_console_type(self):
+ # Raise useful error is console type is None
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.get_rdp_console,
+ self.context, None, instance=instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeInvalid,
+ self.compute.get_rdp_console,
+ self.context, None, instance=instance)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_vnc_console_instance_not_ready(self):
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=False, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_vnc_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_vnc_console, self.context, 'novnc',
+ instance=instance)
+
+ def test_spice_console_instance_not_ready(self):
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_spice_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_spice_console, self.context, 'spice-html5',
+ instance=instance)
+
+ def test_rdp_console_instance_not_ready(self):
+ self.flags(vnc_enabled=False)
+ self.flags(enabled=True, group='rdp')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ def fake_driver_get_console(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(self.compute.driver, "get_rdp_console",
+ fake_driver_get_console)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute.get_rdp_console, self.context, 'rdp-html5',
+ instance=instance)
+
+ def test_vnc_console_disabled(self):
+ self.flags(vnc_enabled=False)
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_vnc_console, self.context, 'novnc',
+ instance=instance)
+
+ def test_spice_console_disabled(self):
+ self.flags(enabled=False, group='spice')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_spice_console, self.context, 'spice-html5',
+ instance=instance)
+
+ def test_rdp_console_disabled(self):
+ self.flags(enabled=False, group='rdp')
+ instance = self._create_fake_instance_obj(
+ params={'vm_state': vm_states.BUILDING})
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.compute.get_rdp_console, self.context, 'rdp-html5',
+ instance=instance)
+
+ def test_diagnostics(self):
+ # Make sure we can get diagnostics for an instance.
+ expected_diagnostic = {'cpu0_time': 17300000000,
+ 'memory': 524288,
+ 'vda_errors': -1,
+ 'vda_read': 262144,
+ 'vda_read_req': 112,
+ 'vda_write': 5778432,
+ 'vda_write_req': 488,
+ 'vnet1_rx': 2070139,
+ 'vnet1_rx_drop': 0,
+ 'vnet1_rx_errors': 0,
+ 'vnet1_rx_packets': 26701,
+ 'vnet1_tx': 140208,
+ 'vnet1_tx_drop': 0,
+ 'vnet1_tx_errors': 0,
+ 'vnet1_tx_packets': 662,
+ }
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, [], None,
+ None, True, None, False)
+
+ diagnostics = self.compute.get_diagnostics(self.context,
+ instance=instance)
+ self.assertEqual(diagnostics, expected_diagnostic)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_instance_diagnostics(self):
+ # Make sure we can get diagnostics for an instance.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ diagnostics = self.compute.get_instance_diagnostics(self.context,
+ instance=instance)
+ expected = {'config_drive': True,
+ 'cpu_details': [{'time': 17300000000}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': 'fake-disk-id',
+ 'read_bytes': 262144,
+ 'read_requests': 112,
+ 'write_bytes': 5778432,
+ 'write_requests': 488}],
+ 'driver': 'fake',
+ 'hypervisor_os': 'fake-os',
+ 'memory_details': {'maximum': 524288, 'used': 0},
+ 'nic_details': [{'mac_address': '01:23:45:67:89:ab',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 2070139,
+ 'rx_packets': 26701,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 140208,
+ 'tx_packets': 662}],
+ 'state': 'running',
+ 'uptime': 46664,
+ 'version': '1.0'}
+ self.assertEqual(expected, diagnostics)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_add_fixed_ip_usage_notification(self):
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
+ dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'inject_network_info', dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'reset_network', dummy)
+
+ instance = self._create_fake_instance_obj()
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+ self.compute.add_fixed_ip_to_instance(self.context, network_id=1,
+ instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_remove_fixed_ip_usage_notification(self):
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
+ dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'inject_network_info', dummy)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ 'reset_network', dummy)
+
+ instance = self._create_fake_instance_obj()
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+ self.compute.remove_fixed_ip_from_instance(self.context, 1,
+ instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_usage_notification(self, request_spec=None):
+ # Ensure run instance generates appropriate usage notification.
+ request_spec = request_spec or {}
+ instance = self._create_fake_instance_obj()
+ expected_image_name = request_spec.get('image', {}).get('name', '')
+ self.compute.run_instance(self.context, instance, request_spec,
+ {}, [], None, None, True, None, False)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ instance.refresh()
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ # The last event is the one with the sugar in it.
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.create.end')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(expected_image_name, payload['image_name'])
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertEqual(payload['state'], 'active')
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertIn('fixed_ips', payload)
+ self.assertTrue(payload['launched_at'])
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.assertEqual('Success', payload['message'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_image_usage_notification(self):
+ request_spec = {'image': {'name': 'fake_name', 'key': 'value'}}
+ self.test_run_instance_usage_notification(request_spec=request_spec)
+
+ def test_run_instance_usage_notification_volume_meta(self):
+ # Volume's image metadata won't contain the image name
+ request_spec = {'image': {'key': 'value'}}
+ self.test_run_instance_usage_notification(request_spec=request_spec)
+
+ def test_run_instance_end_notification_on_abort(self):
+ # Test that an end notif is sent if the build is aborted
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+
+ def build_inst_abort(*args, **kwargs):
+ raise exception.BuildAbortException(reason="already deleted",
+ instance_uuid=instance_uuid)
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_abort)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.end')
+ self.assertEqual('INFO', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("already deleted"))
+
+ def test_run_instance_error_notification_on_reschedule(self):
+ # Test that error notif is sent if the build got rescheduled
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+
+ def build_inst_fail(*args, **kwargs):
+ raise exception.RescheduledException(instance_uuid=instance_uuid,
+ reason="something bad happened")
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
+
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+
+ self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.error')
+ self.assertEqual('ERROR', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("something bad happened"))
+
+ def test_run_instance_error_notification_on_failure(self):
+ # Test that error notif is sent if build fails hard
+ instance = self._create_fake_instance_obj()
+
+ def build_inst_fail(*args, **kwargs):
+ raise test.TestingException("i'm dying")
+
+ self.stubs.Set(self.compute, '_build_instance', build_inst_fail)
+
+ self.assertRaises(test.TestingException, self.compute.run_instance,
+ self.context, instance, {}, {}, [], None, None, True, None,
+ False)
+
+ self.assertTrue(len(fake_notifier.NOTIFICATIONS) >= 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+
+ self.assertEqual(msg.event_type, 'compute.instance.create.error')
+ self.assertEqual('ERROR', msg.priority)
+ payload = msg.payload
+ message = payload['message']
+ self.assertNotEqual(-1, message.find("i'm dying"))
+
+ def test_terminate_usage_notification(self):
+ # Ensure terminate_instance generates correct usage notification.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+
+ timeutils.set_time_override(old_time)
+
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ fake_notifier.NOTIFICATIONS = []
+ timeutils.set_time_override(cur_time)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 4)
+
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.delete.start')
+ msg1 = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg1.event_type, 'compute.instance.shutdown.start')
+ msg1 = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg1.event_type, 'compute.instance.shutdown.end')
+ msg1 = fake_notifier.NOTIFICATIONS[3]
+ self.assertEqual(msg1.event_type, 'compute.instance.delete.end')
+ payload = msg1.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertIn('terminated_at', payload)
+ self.assertIn('deleted_at', payload)
+ self.assertEqual(payload['terminated_at'], timeutils.strtime(cur_time))
+ self.assertEqual(payload['deleted_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_run_instance_existing(self):
+ # Ensure failure when running an instance that already exists.
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, instance, {}, {}, [], None,
+ None, True, None, False)
+ self.assertRaises(exception.InstanceExists,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, [], None, None, True,
+ None, False)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = self._create_fake_instance_obj()
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs,
+ security_groups=[], dhcp_options=None).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1))
+
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(
+ mox.IsA(instance_obj.Instance)).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ def _create_server_group(self):
+ group_instance = self._create_fake_instance_obj(
+ params=dict(host=self.compute.host))
+
+ instance_group = objects.InstanceGroup(self.context)
+ instance_group.user_id = self.user_id
+ instance_group.project_id = self.project_id
+ instance_group.name = 'messi'
+ instance_group.uuid = str(uuid.uuid4())
+ instance_group.members = [group_instance.uuid]
+ instance_group.policies = ['anti-affinity']
+ fake_notifier.NOTIFICATIONS = []
+ instance_group.create()
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(instance_group.name, msg.payload['name'])
+ self.assertEqual(instance_group.members, msg.payload['members'])
+ self.assertEqual(instance_group.policies, msg.payload['policies'])
+ self.assertEqual(instance_group.project_id, msg.payload['project_id'])
+ self.assertEqual(instance_group.uuid, msg.payload['uuid'])
+ self.assertEqual('servergroup.create', msg.event_type)
+ return instance_group
+
+ def _run_instance_reschedules_on_anti_affinity_violation(self, group,
+ hint):
+ instance = self._create_fake_instance_obj()
+ filter_properties = {'scheduler_hints': {'group': hint}}
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_instance,
+ self.context, {}, filter_properties,
+ [], None, None, True, None, instance,
+ None, False)
+
+ def test_run_instance_reschedules_on_anti_affinity_violation_by_name(self):
+ group = self._create_server_group()
+ self._run_instance_reschedules_on_anti_affinity_violation(group,
+ group.name)
+
+ def test_run_instance_reschedules_on_anti_affinity_violation_by_uuid(self):
+ group = self._create_server_group()
+ self._run_instance_reschedules_on_anti_affinity_violation(group,
+ group.uuid)
+
+ def test_instance_set_to_error_on_uncaught_exception(self):
+ # Test that instance is set to error state when exception is raised.
+ instance = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "deallocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=None,
+ security_groups=[], dhcp_options=None
+ ).AndRaise(messaging.RemoteError())
+ self.compute.network_api.deallocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None).MultipleTimes()
+
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(messaging.RemoteError,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, None, None, None,
+ True, None, False)
+
+ instance.refresh()
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_delete_instance_keeps_net_on_power_off_fail(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ exp = exception.InstancePowerOffFailure(reason='')
+ self.compute.driver.destroy(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(exp)
+ # mox will detect if _deallocate_network gets called unexpectedly
+ self.mox.ReplayAll()
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(exception.InstancePowerOffFailure,
+ self.compute._delete_instance,
+ self.context,
+ instance,
+ [],
+ self.none_quotas)
+
+ def test_delete_instance_loses_net_on_other_fail(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(self.compute, '_deallocate_network')
+ exp = test.TestingException()
+ self.compute.driver.destroy(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(exp)
+ self.compute._deallocate_network(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_fake_instance_obj()
+ self.assertRaises(test.TestingException,
+ self.compute._delete_instance,
+ self.context,
+ instance,
+ [],
+ self.none_quotas)
+
+ def test_delete_instance_deletes_console_auth_tokens(self):
+ instance = self._create_fake_instance_obj()
+ self.flags(vnc_enabled=True)
+
+ self.tokens_deleted = False
+
+ def fake_delete_tokens(*args, **kwargs):
+ self.tokens_deleted = True
+
+ cauth_rpcapi = self.compute.consoleauth_rpcapi
+ self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
+ fake_delete_tokens)
+
+ self.compute._delete_instance(self.context, instance, [],
+ self.none_quotas)
+
+ self.assertTrue(self.tokens_deleted)
+
+ def test_delete_instance_deletes_console_auth_tokens_cells(self):
+ instance = self._create_fake_instance_obj()
+ self.flags(vnc_enabled=True)
+ self.flags(enable=True, group='cells')
+
+ self.tokens_deleted = False
+
+ def fake_delete_tokens(*args, **kwargs):
+ self.tokens_deleted = True
+
+ cells_rpcapi = self.compute.cells_rpcapi
+ self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
+ fake_delete_tokens)
+
+ self.compute._delete_instance(self.context, instance,
+ [], self.none_quotas)
+
+ self.assertTrue(self.tokens_deleted)
+
+ def test_instance_termination_exception_sets_error(self):
+ """Test that we handle InstanceTerminationFailure
+ which is propagated up from the underlying driver.
+ """
+ instance = self._create_fake_instance_obj()
+
+ def fake_delete_instance(context, instance, bdms,
+ reservations=None):
+ raise exception.InstanceTerminationFailure(reason='')
+
+ self.stubs.Set(self.compute, '_delete_instance',
+ fake_delete_instance)
+
+ self.assertRaises(exception.InstanceTerminationFailure,
+ self.compute.terminate_instance,
+ self.context,
+ instance, [], [])
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ERROR)
+
+ def test_network_is_deallocated_on_spawn_failure(self):
+ # When a spawn fails the network must be deallocated.
+ instance = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute, "_prep_block_device")
+ self.compute._prep_block_device(
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(messaging.RemoteError('', '', ''))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(messaging.RemoteError,
+ self.compute.run_instance,
+ self.context, instance, {}, {}, None, None, None,
+ True, None, False)
+
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ non_admin_context = context.RequestContext(None,
+ None,
+ is_admin=False)
+
+ def check_task_state(task_state):
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_state)
+
+ instance.refresh()
+
+ # should fail with locked nonadmin context
+ self.compute_api.lock(self.context, instance)
+ self.assertRaises(exception.InstanceIsLocked,
+ self.compute_api.reboot,
+ non_admin_context, instance, 'SOFT')
+ check_task_state(None)
+
+ # should fail with invalid task state
+ self.compute_api.unlock(self.context, instance)
+ instance.task_state = task_states.REBOOTING
+ instance.save()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ non_admin_context, instance, 'SOFT')
+ check_task_state(task_states.REBOOTING)
+
+ # should succeed with admin context
+ instance.task_state = None
+ instance.save()
+ self.compute_api.reboot(self.context, instance, 'SOFT')
+ check_task_state(task_states.REBOOTING)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _check_locked_by(self, instance_uuid, locked_by):
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['locked'], locked_by is not None)
+ self.assertEqual(instance['locked_by'], locked_by)
+ return instance
+
+ def test_override_owner_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ admin_context = context.RequestContext('admin-user',
+ 'admin-project',
+ is_admin=True)
+
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ # Ensure that an admin can override the owner lock
+ self.compute_api.lock(self.context, instance)
+ self._check_locked_by(instance_uuid, 'owner')
+ self.compute_api.unlock(admin_context, instance)
+ self._check_locked_by(instance_uuid, None)
+
+ def test_upgrade_owner_lock(self):
+ # FIXME(comstud): This test is such crap. This is testing
+ # compute API lock functionality in a test class for the compute
+ # manager by running an instance. Hello? We should just have
+ # unit tests in test_compute_api that test the check_instance_lock
+ # decorator and make sure that appropriate compute_api methods
+ # have the decorator.
+ admin_context = context.RequestContext('admin-user',
+ 'admin-project',
+ is_admin=True)
+
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ # Ensure that an admin can upgrade the lock and that
+ # the owner can no longer unlock
+ self.compute_api.lock(self.context, instance)
+ self.compute_api.lock(admin_context, instance)
+ self._check_locked_by(instance_uuid, 'admin')
+ instance.refresh()
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.unlock,
+ self.context, instance)
+ self._check_locked_by(instance_uuid, 'admin')
+ self.compute_api.unlock(admin_context, instance)
+ self._check_locked_by(instance_uuid, None)
+
+ def _test_state_revert(self, instance, operation, pre_task_state,
+ kwargs=None, vm_state=None):
+ if kwargs is None:
+ kwargs = {}
+
+ # The API would have set task_state, so do that here to test
+ # that the state gets reverted on failure
+ db.instance_update(self.context, instance['uuid'],
+ {"task_state": pre_task_state})
+
+ orig_elevated = self.context.elevated
+ orig_notify = self.compute._notify_about_instance_usage
+
+ def _get_an_exception(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.context, 'elevated', _get_an_exception)
+ self.stubs.Set(self.compute,
+ '_notify_about_instance_usage', _get_an_exception)
+
+ func = getattr(self.compute, operation)
+
+ self.assertRaises(test.TestingException,
+ func, self.context, instance=instance, **kwargs)
+ # self.context.elevated() is called in tearDown()
+ self.stubs.Set(self.context, 'elevated', orig_elevated)
+ self.stubs.Set(self.compute,
+ '_notify_about_instance_usage', orig_notify)
+
+ # Fetch the instance's task_state and make sure it reverted to None.
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ if vm_state:
+ self.assertEqual(instance.vm_state, vm_state)
+ self.assertIsNone(instance["task_state"])
+
+ def test_state_revert(self):
+ # ensure that task_state is reverted after a failed operation.
+ migration = objects.Migration()
+ migration.instance_uuid = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+ migration.new_instance_type_id = '1'
+
+ actions = [
+ ("reboot_instance", task_states.REBOOTING,
+ {'block_device_info': [],
+ 'reboot_type': 'SOFT'}),
+ ("stop_instance", task_states.POWERING_OFF),
+ ("start_instance", task_states.POWERING_ON),
+ ("terminate_instance", task_states.DELETING,
+ {'bdms': [],
+ 'reservations': []},
+ vm_states.ERROR),
+ ("soft_delete_instance", task_states.SOFT_DELETING,
+ {'reservations': []}),
+ ("restore_instance", task_states.RESTORING),
+ ("rebuild_instance", task_states.REBUILDING,
+ {'orig_image_ref': None,
+ 'image_ref': None,
+ 'injected_files': [],
+ 'new_pass': '',
+ 'orig_sys_metadata': {},
+ 'bdms': [],
+ 'recreate': False,
+ 'on_shared_storage': False}),
+ ("set_admin_password", task_states.UPDATING_PASSWORD,
+ {'new_pass': None}),
+ ("rescue_instance", task_states.RESCUING,
+ {'rescue_password': None}),
+ ("unrescue_instance", task_states.UNRESCUING),
+ ("revert_resize", task_states.RESIZE_REVERTING,
+ {'migration': migration,
+ 'reservations': []}),
+ ("prep_resize", task_states.RESIZE_PREP,
+ {'image': {},
+ 'instance_type': {},
+ 'reservations': [],
+ 'request_spec': {},
+ 'filter_properties': {},
+ 'node': None}),
+ ("resize_instance", task_states.RESIZE_PREP,
+ {'migration': migration,
+ 'image': {},
+ 'reservations': [],
+ 'instance_type': {}}),
+ ("pause_instance", task_states.PAUSING),
+ ("unpause_instance", task_states.UNPAUSING),
+ ("suspend_instance", task_states.SUSPENDING),
+ ("resume_instance", task_states.RESUMING),
+ ]
+
+ self._stub_out_resize_network_methods()
+ instance = self._create_fake_instance_obj()
+ for operation in actions:
+ self._test_state_revert(instance, *operation)
+
+ def _ensure_quota_reservations_committed(self, instance):
+ """Mock up commit of quota reservations."""
+ reservations = list('fake_res')
+ self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
+ nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations,
+ project_id=instance['project_id'],
+ user_id=instance['user_id'])
+ self.mox.ReplayAll()
+ return reservations
+
+ def _ensure_quota_reservations_rolledback(self, instance):
+ """Mock up rollback of quota reservations."""
+ reservations = list('fake_res')
+ self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
+ nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations,
+ project_id=instance['project_id'],
+ user_id=instance['user_id'])
+ self.mox.ReplayAll()
+ return reservations
+
+ def test_quotas_successful_delete(self):
+ instance = self._create_fake_instance_obj()
+ resvs = self._ensure_quota_reservations_committed(instance)
+ self.compute.terminate_instance(self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def test_quotas_failed_delete(self):
+ instance = self._create_fake_instance_obj()
+
+ def fake_shutdown_instance(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute, '_shutdown_instance',
+ fake_shutdown_instance)
+
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.assertRaises(test.TestingException,
+ self.compute.terminate_instance,
+ self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def test_quotas_successful_soft_delete(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(task_state=task_states.SOFT_DELETING))
+ resvs = self._ensure_quota_reservations_committed(instance)
+ self.compute.soft_delete_instance(self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_failed_soft_delete(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(task_state=task_states.SOFT_DELETING))
+
+ def fake_soft_delete(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, delete_types.SOFT_DELETE,
+ fake_soft_delete)
+
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.assertRaises(test.TestingException,
+ self.compute.soft_delete_instance,
+ self.context, instance,
+ reservations=resvs)
+
+ def test_quotas_destroy_of_soft_deleted_instance(self):
+ instance = self._create_fake_instance_obj(
+ params=dict(vm_state=vm_states.SOFT_DELETED))
+ # Termination should be successful, but quota reservations
+ # rolled back because the instance was in SOFT_DELETED state.
+ resvs = self._ensure_quota_reservations_rolledback(instance)
+ self.compute.terminate_instance(self.context, instance,
+ bdms=[], reservations=resvs)
+
+ def _stub_out_resize_network_methods(self):
+ def fake(cls, ctxt, instance, *args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
+ self.stubs.Set(network_api.API, 'migrate_instance_start', fake)
+ self.stubs.Set(network_api.API, 'migrate_instance_finish', fake)
+
+ def _test_finish_resize(self, power_on):
+ # Contrived test to ensure finish_resize doesn't raise anything and
+ # also tests resize from ACTIVE or STOPPED state which determines
+ # if the resized instance is powered on or not.
+ vm_state = None
+ if power_on:
+ vm_state = vm_states.ACTIVE
+ else:
+ vm_state = vm_states.STOPPED
+ params = {'vm_state': vm_state}
+ instance = self._create_fake_instance_obj(params)
+ image = 'fake-image'
+ disk_info = 'fake-disk-info'
+ instance_type = flavors.get_default_flavor()
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=[], request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+
+ # NOTE(mriedem): make sure prep_resize set old_vm_state correctly
+ sys_meta = instance.system_metadata
+ self.assertIn('old_vm_state', sys_meta)
+ if power_on:
+ self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
+ else:
+ self.assertEqual(vm_states.STOPPED, sys_meta['old_vm_state'])
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ orig_mig_save = migration.save
+ orig_inst_save = instance.save
+ network_api = self.compute.network_api
+
+ self.mox.StubOutWithMock(network_api, 'setup_networks_on_host')
+ self.mox.StubOutWithMock(network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'finish_migration')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(migration, 'save')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+
+ def _mig_save(context):
+ self.assertEqual(migration.status, 'finished')
+ self.assertEqual(vm_state, instance.vm_state)
+ self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
+ orig_mig_save()
+
+ def _instance_save1():
+ self.assertEqual(instance_type['id'],
+ instance.instance_type_id)
+ orig_inst_save()
+
+ def _instance_save2(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_MIGRATED,
+ expected_task_state)
+ self.assertEqual(task_states.RESIZE_FINISH, instance.task_state)
+ orig_inst_save(expected_task_state=expected_task_state)
+
+ def _instance_save3(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_FINISH,
+ expected_task_state)
+ self.assertEqual(vm_states.RESIZED, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertIn('launched_at', instance.obj_what_changed())
+ orig_inst_save(expected_task_state=expected_task_state)
+
+ # First save to update flavor
+ instance.save().WithSideEffects(_instance_save1)
+
+ network_api.setup_networks_on_host(self.context, instance,
+ 'fake-mini')
+ network_api.migrate_instance_finish(self.context,
+ mox.IsA(dict),
+ mox.IsA(dict))
+
+ self.compute._get_instance_nw_info(
+ self.context, instance).AndReturn('fake-nwinfo1')
+
+ # 2nd save to update task state
+ exp_kwargs = dict(expected_task_state=task_states.RESIZE_MIGRATED)
+ instance.save(**exp_kwargs).WithSideEffects(_instance_save2)
+
+ self.compute._notify_about_instance_usage(
+ self.context, instance, 'finish_resize.start',
+ network_info='fake-nwinfo1')
+
+ self.compute._get_instance_block_device_info(
+ self.context, instance,
+ refresh_conn_info=True).AndReturn('fake-bdminfo')
+ # nova.conf sets the default flavor to m1.small and the test
+ # sets the default flavor to m1.tiny so they should be different
+ # which makes this a resize
+ self.compute.driver.finish_migration(self.context, migration,
+ instance, disk_info,
+ 'fake-nwinfo1',
+ image, True,
+ 'fake-bdminfo', power_on)
+ # Ensure instance status updates is after the migration finish
+ self.context.elevated().AndReturn(self.context)
+ migration.save(self.context).WithSideEffects(_mig_save)
+ exp_kwargs = dict(expected_task_state=task_states.RESIZE_FINISH)
+ instance.save(**exp_kwargs).WithSideEffects(_instance_save3)
+ self.compute._notify_about_instance_usage(
+ self.context, instance, 'finish_resize.end',
+ network_info='fake-nwinfo1')
+ # NOTE(comstud): This actually does the mox.ReplayAll()
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ self.compute.finish_resize(self.context,
+ migration=migration,
+ disk_info=disk_info, image=image, instance=instance,
+ reservations=reservations)
+
+ def test_finish_resize_from_active(self):
+ self._test_finish_resize(power_on=True)
+
+ def test_finish_resize_from_stopped(self):
+ self._test_finish_resize(power_on=False)
+
+ def test_finish_resize_with_volumes(self):
+ """Contrived test to ensure finish_resize doesn't raise anything."""
+
+ # create instance
+ instance = self._create_fake_instance_obj()
+
+ # create volume
+ volume_id = 'fake'
+ volume = {'instance_uuid': None,
+ 'device_name': None,
+ 'id': volume_id,
+ 'attach_status': 'detached'}
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': volume_id,
+ 'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc'})
+ bdm.create(self.context)
+
+ # stub out volume attach
+ def fake_volume_get(self, context, volume_id):
+ return volume
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ def fake_volume_check_attach(self, context, volume_id, instance):
+ pass
+ self.stubs.Set(cinder.API, "check_attach", fake_volume_check_attach)
+
+ def fake_get_volume_encryption_metadata(self, context, volume_id):
+ return {}
+ self.stubs.Set(cinder.API, 'get_volume_encryption_metadata',
+ fake_get_volume_encryption_metadata)
+
+ orig_connection_data = {
+ 'target_discovered': True,
+ 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
+ 'target_portal': '127.0.0.0.1:3260',
+ 'volume_id': volume_id,
+ }
+ connection_info = {
+ 'driver_volume_type': 'iscsi',
+ 'data': orig_connection_data,
+ }
+
+ def fake_init_conn(self, context, volume_id, session):
+ return connection_info
+ self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
+
+ def fake_attach(self, context, volume_id, instance_uuid, device_name,
+ mode='rw'):
+ volume['instance_uuid'] = instance_uuid
+ volume['device_name'] = device_name
+ self.stubs.Set(cinder.API, "attach", fake_attach)
+
+ # stub out virt driver attach
+ def fake_get_volume_connector(*args, **kwargs):
+ return {}
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ fake_get_volume_connector)
+
+ def fake_attach_volume(*args, **kwargs):
+ pass
+ self.stubs.Set(self.compute.driver, 'attach_volume',
+ fake_attach_volume)
+
+ # attach volume to instance
+ self.compute.attach_volume(self.context, volume['id'],
+ '/dev/vdc', instance, bdm=bdm)
+
+ # assert volume attached correctly
+ self.assertEqual(volume['device_name'], '/dev/vdc')
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance.uuid)
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['device_name'], volume['device_name'])
+ self.assertEqual(bdm['connection_info'],
+ jsonutils.dumps(connection_info))
+
+ # begin resize
+ instance_type = flavors.get_default_flavor()
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=[], request_spec={},
+ filter_properties={}, node=None)
+
+ # fake out detach for prep_resize (and later terminate)
+ def fake_terminate_connection(self, context, volume, connector):
+ connection_info['data'] = None
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ self._stub_out_resize_network_methods()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, reservations=[],
+ instance_type=jsonutils.to_primitive(instance_type))
+
+ # assert bdm is unchanged
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance.uuid)
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['device_name'], volume['device_name'])
+ cached_connection_info = jsonutils.loads(bdm['connection_info'])
+ self.assertEqual(cached_connection_info['data'],
+ orig_connection_data)
+ # but connection was terminated
+ self.assertIsNone(connection_info['data'])
+
+ # stub out virt driver finish_migration
+ def fake(*args, **kwargs):
+ pass
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ # new initialize connection
+ new_connection_data = dict(orig_connection_data)
+ new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
+ new_connection_data['target_iqn'] = new_iqn
+
+ def fake_init_conn_with_data(self, context, volume, session):
+ connection_info['data'] = new_connection_data
+ return connection_info
+ self.stubs.Set(cinder.API, "initialize_connection",
+ fake_init_conn_with_data)
+
+ self.compute.finish_resize(self.context,
+ migration=migration,
+ disk_info={}, image={}, instance=instance,
+ reservations=reservations)
+
+ # assert volume attached correctly
+ disk_info = db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])
+ self.assertEqual(len(disk_info), 1)
+ for bdm in disk_info:
+ self.assertEqual(bdm['connection_info'],
+ jsonutils.dumps(connection_info))
+
+ # stub out detach
+ def fake_detach(self, context, volume_uuid):
+ volume['device_path'] = None
+ volume['instance_uuid'] = None
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ # clean up
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_finish_resize_handles_error(self):
+ # Make sure we don't leave the instance in RESIZE on error.
+
+ def throw_up(*args, **kwargs):
+ raise test.TestingException()
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.driver, 'finish_migration', throw_up)
+
+ self._stub_out_resize_network_methods()
+
+ old_flavor_name = 'm1.tiny'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_type = flavors.get_flavor_by_name('m1.small')
+
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=reservations,
+ request_spec={}, filter_properties={},
+ node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ instance.refresh()
+ instance.task_state = task_states.RESIZE_MIGRATED
+ instance.save()
+ self.assertRaises(test.TestingException, self.compute.finish_resize,
+ self.context,
+ migration=migration,
+ disk_info={}, image={}, instance=instance,
+ reservations=reservations)
+ instance.refresh()
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ old_flavor = flavors.get_flavor_by_name(old_flavor_name)
+ self.assertEqual(old_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(old_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(old_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(old_flavor['id'], instance.instance_type_id)
+ self.assertNotEqual(instance_type['id'], instance.instance_type_id)
+
+ def test_save_instance_info(self):
+ old_flavor_name = 'm1.tiny'
+ new_flavor_name = 'm1.small'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
+ new_flavor = flavors.get_flavor_by_name(new_flavor_name)
+
+ self.compute._save_instance_info(instance, new_flavor,
+ instance.system_metadata)
+
+ self.assertEqual(new_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(new_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(new_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+
+ def test_rebuild_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ inst_ref = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context, inst_ref, {}, {}, None, None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+
+ fake_notifier.NOTIFICATIONS = []
+ instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
+ orig_sys_metadata = db.instance_system_metadata_get(self.context,
+ inst_ref['uuid'])
+ image_ref = instance["image_ref"]
+ new_image_ref = image_ref + '-new_image_ref'
+ db.instance_update(self.context, inst_ref['uuid'],
+ {'image_ref': new_image_ref})
+
+ password = "new_password"
+
+ inst_ref.task_state = task_states.REBUILDING
+ inst_ref.save()
+ self.compute.rebuild_instance(self.context,
+ inst_ref,
+ image_ref, new_image_ref,
+ injected_files=[],
+ new_pass=password,
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=[], recreate=False,
+ on_shared_storage=False)
+
+ inst_ref.refresh()
+
+ image_ref_url = glance.generate_image_url(image_ref)
+ new_image_ref_url = glance.generate_image_url(new_image_ref)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.exists')
+ self.assertEqual(msg.payload['image_ref_url'], image_ref_url)
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.rebuild.start')
+ self.assertEqual(msg.payload['image_ref_url'], new_image_ref_url)
+ self.assertEqual(msg.payload['image_name'], 'fake_name')
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.rebuild.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['image_name'], 'fake_name')
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], inst_ref['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
+ self.assertEqual(payload['image_ref_url'], new_image_ref_url)
+ self.compute.terminate_instance(self.context, inst_ref, [], [])
+
+ def test_finish_resize_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+ new_type = flavors.get_flavor_by_name('m1.small')
+ new_type = jsonutils.to_primitive(new_type)
+ new_type_id = new_type['id']
+ flavor_id = new_type['flavorid']
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.host = 'foo'
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=new_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+
+ self._stub_out_resize_network_methods()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, instance_type=new_type,
+ reservations=[])
+ timeutils.set_time_override(cur_time)
+ fake_notifier.NOTIFICATIONS = []
+
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.finish_resize.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.finish_resize.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.small')
+ self.assertEqual(str(payload['instance_type_id']), str(new_type_id))
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ self.assertEqual(payload['launched_at'], timeutils.strtime(cur_time))
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_notification(self):
+ # Ensure notifications on instance migrate/resize.
+ old_time = datetime.datetime(2012, 4, 1)
+ cur_time = datetime.datetime(2012, 12, 21, 12, 21)
+ timeutils.set_time_override(old_time)
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+ timeutils.set_time_override(cur_time)
+ fake_notifier.NOTIFICATIONS = []
+
+ instance.host = 'foo'
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ instance_type = flavors.get_default_flavor()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+ db.migration_get_by_instance_and_status(self.context.elevated(),
+ instance.uuid,
+ 'pre-migrating')
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.exists')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.resize.prep.start')
+ msg = fake_notifier.NOTIFICATIONS[2]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.resize.prep.end')
+ self.assertEqual(msg.priority, 'INFO')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance.uuid)
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ self.assertIn('display_name', payload)
+ self.assertIn('created_at', payload)
+ self.assertIn('launched_at', payload)
+ image_ref_url = glance.generate_image_url(FAKE_IMAGE_REF)
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_prep_resize_instance_migration_error_on_same_host(self):
+ """Ensure prep_resize raise a migration error if destination is set on
+ the same source host and allow_resize_to_same_host is false
+ """
+ self.flags(host="foo", allow_resize_to_same_host=False)
+
+ instance = self._create_fake_instance_obj()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = self.compute.host
+ instance.save()
+ instance_type = flavors.get_default_flavor()
+
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_prep_resize_instance_migration_error_on_none_host(self):
+ """Ensure prep_resize raises a migration error if destination host is
+ not defined
+ """
+ instance = self._create_fake_instance_obj()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = None
+ instance.save()
+ instance_type = flavors.get_default_flavor()
+
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_driver_error(self):
+ # Ensure instance status set to Error on resize error.
+
+ def throw_up(*args, **kwargs):
+ raise test.TestingException()
+
+ self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
+ throw_up)
+
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # verify
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_driver_rollback(self):
+ # Ensure instance status set to Running after rollback.
+
+ def throw_up(*args, **kwargs):
+ raise exception.InstanceFaultRollback(test.TestingException())
+
+ self.stubs.Set(self.compute.driver, 'migrate_disk_and_power_off',
+ throw_up)
+
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_resize_instance(self, clean_shutdown=True):
+ # Ensure instance can be migrated/resized.
+ instance = self._create_fake_instance_obj()
+ instance_type = flavors.get_default_flavor()
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type, image={}, reservations=[],
+ request_spec={}, filter_properties={}, node=None)
+
+ # verify 'old_vm_state' was set on system_metadata
+ instance.refresh()
+ sys_meta = instance.system_metadata
+ self.assertEqual(vm_states.ACTIVE, sys_meta['old_vm_state'])
+
+ self._stub_out_resize_network_methods()
+
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value='fake_bdms'),
+ mock.patch.object(
+ self.compute, '_get_instance_block_device_info',
+ return_value='fake_bdinfo'),
+ mock.patch.object(self.compute, '_terminate_volume_connections'),
+ mock.patch.object(self.compute, '_get_power_off_values',
+ return_value=(1, 2))
+ ) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
+ mock_terminate_vol_conn, mock_get_power_off_values):
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration, image={}, reservations=[],
+ instance_type=jsonutils.to_primitive(instance_type),
+ clean_shutdown=clean_shutdown)
+ mock_get_instance_vol_bdinfo.assert_called_once_with(
+ self.context, instance, bdms='fake_bdms')
+ mock_terminate_vol_conn.assert_called_once_with(self.context,
+ instance, 'fake_bdms')
+ mock_get_power_off_values.assert_caleld_once_with(self.context,
+ instance, clean_shutdown)
+ self.assertEqual(migration.dest_compute, instance.host)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance(self):
+ self._test_resize_instance()
+
+ def test_resize_instance_forced_shutdown(self):
+ self._test_resize_instance(clean_shutdown=False)
+
+ def _test_confirm_resize(self, power_on):
+ # Common test case method for confirm_resize
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_confirm_migration_driver(*args, **kwargs):
+ # Confirm the instance uses the new type in finish_resize
+ inst = args[1]
+ sys_meta = inst['system_metadata']
+ self.assertEqual(sys_meta['instance_type_flavorid'], '3')
+
+ old_vm_state = None
+ p_state = None
+ if power_on:
+ old_vm_state = vm_states.ACTIVE
+ p_state = power_state.RUNNING
+ else:
+ old_vm_state = vm_states.STOPPED
+ p_state = power_state.SHUTDOWN
+ params = {'vm_state': old_vm_state, 'power_state': p_state}
+ instance = self._create_fake_instance_obj(params)
+
+ self.flags(allow_resize_to_same_host=True)
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(self.compute.driver, 'confirm_migration',
+ fake_confirm_migration_driver)
+
+ self._stub_out_resize_network_methods()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ # Confirm the instance size before the resize starts
+ instance.refresh()
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+
+ instance.vm_state = old_vm_state
+ instance.power_state = p_state
+ instance.save()
+
+ new_instance_type_ref = db.flavor_get_by_flavor_id(
+ self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
+ self.compute.prep_resize(self.context,
+ instance=instance,
+ instance_type=new_instance_type_p,
+ image={}, reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
+ sys_meta = instance.system_metadata
+ self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration,
+ image={},
+ reservations=[],
+ instance_type=new_instance_type_p)
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ # Prove that the instance size is now the new size
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+
+ # Finally, confirm the resize and verify the new flavor is applied
+ instance.task_state = None
+ instance.save()
+ self.compute.confirm_resize(self.context, instance=instance,
+ reservations=reservations,
+ migration=migration)
+
+ instance.refresh()
+
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+ self.assertEqual('fake-mini', migration.source_compute)
+ self.assertEqual(old_vm_state, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(p_state, instance.power_state)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_confirm_resize_from_active(self):
+ self._test_confirm_resize(power_on=True)
+
+ def test_confirm_resize_from_stopped(self):
+ self._test_confirm_resize(power_on=False)
+
+ def _test_finish_revert_resize(self, power_on,
+ remove_old_vm_state=False):
+ """Convenience method that does most of the work for the
+ test_finish_revert_resize tests.
+ :param power_on -- True if testing resize from ACTIVE state, False if
+ testing resize from STOPPED state.
+ :param remove_old_vm_state -- True if testing a case where the
+ 'old_vm_state' system_metadata is not present when the
+ finish_revert_resize method is called.
+ """
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_finish_revert_migration_driver(*args, **kwargs):
+ # Confirm the instance uses the old type in finish_revert_resize
+ inst = args[1]
+ sys_meta = inst.system_metadata
+ self.assertEqual(sys_meta['instance_type_flavorid'], '1')
+
+ old_vm_state = None
+ if power_on:
+ old_vm_state = vm_states.ACTIVE
+ else:
+ old_vm_state = vm_states.STOPPED
+ params = {'vm_state': old_vm_state}
+ instance = self._create_fake_instance_obj(params)
+
+ self.stubs.Set(self.compute.driver, 'finish_migration', fake)
+ self.stubs.Set(self.compute.driver, 'finish_revert_migration',
+ fake_finish_revert_migration_driver)
+
+ self._stub_out_resize_network_methods()
+
+ reservations = self._ensure_quota_reservations_committed(instance)
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.refresh()
+ instance_type_ref = db.flavor_get(self.context,
+ instance.instance_type_id)
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+
+ old_vm_state = instance['vm_state']
+
+ instance.host = 'foo'
+ instance.vm_state = old_vm_state
+ instance.save()
+
+ new_instance_type_ref = db.flavor_get_by_flavor_id(
+ self.context, 3)
+ new_instance_type_p = jsonutils.to_primitive(new_instance_type_ref)
+ self.compute.prep_resize(self.context,
+ instance=instance,
+ instance_type=new_instance_type_p,
+ image={}, reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+
+ # NOTE(mriedem): ensure prep_resize set old_vm_state in system_metadata
+ sys_meta = instance.system_metadata
+ self.assertEqual(old_vm_state, sys_meta['old_vm_state'])
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.compute.resize_instance(self.context, instance=instance,
+ migration=migration,
+ image={},
+ reservations=[],
+ instance_type=new_instance_type_p)
+ self.compute.finish_resize(self.context,
+ migration=migration, reservations=[],
+ disk_info={}, image={}, instance=instance)
+
+ # Prove that the instance size is now the new size
+ instance_type_ref = db.flavor_get(self.context,
+ instance['instance_type_id'])
+ self.assertEqual(instance_type_ref['flavorid'], '3')
+
+ instance.task_state = task_states.RESIZE_REVERTING
+ instance.save()
+
+ self.compute.revert_resize(self.context,
+ migration=migration, instance=instance,
+ reservations=reservations)
+
+ instance.refresh()
+ if remove_old_vm_state:
+ # need to wipe out the old_vm_state from system_metadata
+ # before calling finish_revert_resize
+ sys_meta = instance.system_metadata
+ sys_meta.pop('old_vm_state')
+ # Have to reset for save() to work
+ instance.system_metadata = sys_meta
+ instance.save()
+
+ self.compute.finish_revert_resize(self.context,
+ migration=migration,
+ instance=instance, reservations=reservations)
+
+ self.assertIsNone(instance.task_state)
+
+ instance_type_ref = db.flavor_get(self.context,
+ instance['instance_type_id'])
+ self.assertEqual(instance_type_ref['flavorid'], '1')
+ self.assertEqual(instance.host, migration.source_compute)
+ if remove_old_vm_state:
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ else:
+ self.assertEqual(old_vm_state, instance.vm_state)
+
+ def test_finish_revert_resize_from_active(self):
+ self._test_finish_revert_resize(power_on=True)
+
+ def test_finish_revert_resize_from_stopped(self):
+ self._test_finish_revert_resize(power_on=False)
+
+ def test_finish_revert_resize_from_stopped_remove_old_vm_state(self):
+ # in this case we resize from STOPPED but end up with ACTIVE
+ # because the old_vm_state value is not present in
+ # finish_revert_resize
+ self._test_finish_revert_resize(power_on=False,
+ remove_old_vm_state=True)
+
+ def _test_cleanup_stored_instance_types(self, old, new, revert=False):
+ instance = self._create_fake_instance_obj()
+ migration = dict(old_instance_type_id=old,
+ new_instance_type_id=new)
+ instance.system_metadata = dict(instance_type_id=old)
+ sys_meta = dict(instance.system_metadata)
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ self.mox.StubOutWithMock(flavors, 'delete_flavor_info')
+ self.mox.StubOutWithMock(flavors, 'save_flavor_info')
+ if revert:
+ flavors.extract_flavor(instance, 'old_').AndReturn(
+ {'instance_type_id': old})
+ flavors.extract_flavor(instance).AndReturn(
+ {'instance_type_id': new})
+ flavors.save_flavor_info(
+ sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
+ else:
+ flavors.extract_flavor(instance).AndReturn(
+ {'instance_type_id': new})
+ flavors.extract_flavor(instance, 'old_').AndReturn(
+ {'instance_type_id': old})
+ flavors.delete_flavor_info(
+ sys_meta, 'old_').AndReturn(sys_meta)
+ flavors.delete_flavor_info(
+ sys_meta, 'new_').AndReturn(sys_meta)
+
+ self.mox.ReplayAll()
+ res = self.compute._cleanup_stored_instance_types(migration, instance,
+ revert)
+ self.assertEqual(res,
+ (sys_meta,
+ {'instance_type_id': revert and old or new},
+ {'instance_type_id': revert and new or old}))
+
+ def test_cleanup_stored_instance_types_for_resize(self):
+ self._test_cleanup_stored_instance_types('1', '2')
+
+ def test_cleanup_stored_instance_types_for_resize_with_update(self):
+ self._test_cleanup_stored_instance_types('1', '2', True)
+
+ def test_cleanup_stored_instance_types_for_migration(self):
+ self._test_cleanup_stored_instance_types('1', '1')
+
+ def test_cleanup_stored_instance_types_for_migration_with_update(self):
+ self._test_cleanup_stored_instance_types('1', '1', True)
+
+ def test_get_by_flavor_id(self):
+ flavor_type = flavors.get_flavor_by_flavor_id(1)
+ self.assertEqual(flavor_type['name'], 'm1.tiny')
+
+ def test_resize_same_source_fails(self):
+ """Ensure instance fails to migrate when source and destination are
+ the same host.
+ """
+ instance = self._create_fake_instance_obj()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.refresh()
+ instance_type = flavors.get_default_flavor()
+ self.assertRaises(exception.MigrationError, self.compute.prep_resize,
+ self.context, instance=instance,
+ instance_type=instance_type, image={},
+ reservations=reservations, request_spec={},
+ filter_properties={}, node=None)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_resize_instance_handles_migration_error(self):
+ # Ensure vm_state is ERROR when error occurs.
+ def raise_migration_failure(*args):
+ raise test.TestingException()
+ self.stubs.Set(self.compute.driver,
+ 'migrate_disk_and_power_off',
+ raise_migration_failure)
+
+ instance = self._create_fake_instance_obj()
+ reservations = self._ensure_quota_reservations_rolledback(instance)
+
+ instance_type = flavors.get_default_flavor()
+
+ instance_p = obj_base.obj_to_primitive(instance)
+ self.compute.run_instance(self.context, instance_p, {}, {}, None, None,
+ None, True, None, False)
+ instance.host = 'foo'
+ instance.save()
+ self.compute.prep_resize(self.context, instance=instance,
+ instance_type=instance_type,
+ image={}, reservations=reservations,
+ request_spec={}, filter_properties={},
+ node=None)
+ migration = objects.Migration.get_by_instance_and_status(
+ self.context.elevated(),
+ instance.uuid, 'pre-migrating')
+ instance.task_state = task_states.RESIZE_PREP
+ instance.save()
+ self.assertRaises(test.TestingException, self.compute.resize_instance,
+ self.context, instance=instance,
+ migration=migration, image={},
+ reservations=reservations,
+ instance_type=jsonutils.to_primitive(instance_type))
+ # NOTE(comstud): error path doesn't use objects, so our object
+ # is not updated. Refresh and compare against the DB.
+ instance.refresh()
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ # Confirm that no exception is raised if there is no fixed ip on
+ # pre_live_migration
+ instance = self._create_fake_instance_obj()
+ c = context.get_admin_context()
+
+ self.mox.ReplayAll()
+ self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
+ {'block_device_mapping': []},
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ def test_pre_live_migration_works_correctly(self):
+ # Confirm setup_compute_volume is called when volume is mounted.
+ def stupid(*args, **kwargs):
+ return fake_network.fake_get_instance_nw_info(self.stubs)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_get_instance_nw_info', stupid)
+
+ # creating instance testdata
+ instance = self._create_fake_instance_obj({'host': 'dummy'})
+ c = context.get_admin_context()
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
+ self.compute.driver.pre_live_migration(mox.IsA(c), mox.IsA(instance),
+ {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []},
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'ensure_filtering_rules_for_instance')
+ self.compute.driver.ensure_filtering_rules_for_instance(
+ mox.IsA(instance), nw_info)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host)
+
+ fake_notifier.NOTIFICATIONS = []
+ # start test
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False}
+ ret = self.compute.pre_live_migration(c, instance=instance,
+ block_migration=False, disk=None,
+ migrate_data=migrate_data)
+ self.assertIsNone(ret)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.pre.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.pre.end')
+
+ # cleanup
+ db.instance_destroy(c, instance['uuid'])
+
+ def test_live_migration_exception_rolls_back(self):
+ # Confirm exception when pre_live_migration fails.
+ c = context.get_admin_context()
+
+ instance = self._create_fake_instance_obj(
+ {'host': 'src_host',
+ 'task_state': task_states.MIGRATING})
+ updated_instance = self._create_fake_instance_obj(
+ {'host': 'fake-dest-host'})
+ dest_host = updated_instance['host']
+ fake_bdms = [
+ objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'vol1-id', 'source_type': 'volume',
+ 'destination_type': 'volume'})),
+ objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'vol2-id', 'source_type': 'volume',
+ 'destination_type': 'volume'}))
+ ]
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'get_instance_disk_info')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'pre_live_migration')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'remove_volume_connection')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'rollback_live_migration_at_destination')
+
+ block_device_info = {
+ 'swap': None, 'ephemerals': [], 'block_device_mapping': []}
+ self.compute.driver.get_instance_disk_info(
+ instance.name,
+ block_device_info=block_device_info).AndReturn('fake_disk')
+ self.compute.compute_rpcapi.pre_live_migration(c,
+ instance, True, 'fake_disk', dest_host,
+ {}).AndRaise(test.TestingException())
+
+ self.compute.network_api.setup_networks_on_host(c,
+ instance, self.compute.host)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(c,
+ instance.uuid).MultipleTimes().AndReturn(fake_bdms)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, instance, 'vol1-id', dest_host)
+ self.compute.compute_rpcapi.remove_volume_connection(
+ c, instance, 'vol2-id', dest_host)
+ self.compute.compute_rpcapi.rollback_live_migration_at_destination(
+ c, instance, dest_host, destroy_disks=True, migrate_data={})
+
+ # start test
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException,
+ self.compute.live_migration,
+ c, dest=dest_host, block_migration=True,
+ instance=instance, migrate_data={})
+ instance.refresh()
+ self.assertEqual('src_host', instance.host)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+
+ def test_live_migration_works_correctly(self):
+ # Confirm live_migration() works as expected correctly.
+ # creating instance testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+ instance.host = self.compute.host
+ dest = 'desthost'
+
+ migrate_data = {'is_shared_instance_path': False}
+
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'pre_live_migration')
+ self.compute.compute_rpcapi.pre_live_migration(
+ c, instance, False, None, dest, migrate_data)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': instance['host'], 'dest_compute': dest}
+ self.compute.network_api.migrate_instance_start(c, instance,
+ migration)
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination')
+ self.compute.compute_rpcapi.post_live_migration_at_destination(
+ c, instance, False, dest)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ instance['host'],
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.instance_events,
+ 'clear_events_for_instance')
+ self.compute.instance_events.clear_events_for_instance(
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ ret = self.compute.live_migration(c, dest=dest,
+ instance=instance,
+ block_migration=False,
+ migrate_data=migrate_data)
+ self.assertIsNone(ret)
+
+ # cleanup
+ instance.destroy(c)
+
+ def test_post_live_migration_no_shared_storage_working_correctly(self):
+ """Confirm post_live_migration() works correctly as expected
+ for non shared storage migration.
+ """
+ # Create stubs
+ result = {}
+ # No share storage live migration don't need to destroy at source
+ # server because instance has been migrated to destination, but a
+ # cleanup for block device and network are needed.
+
+ def fakecleanup(*args, **kwargs):
+ result['cleanup'] = True
+
+ self.stubs.Set(self.compute.driver, 'cleanup', fakecleanup)
+ dest = 'desthost'
+ srchost = self.compute.host
+
+ # creating testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': srchost,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED,
+ 'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+
+ # creating mocks
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(instance, [])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_start')
+ migration = {'source_compute': srchost, 'dest_compute': dest, }
+ self.compute.network_api.migrate_instance_start(c, instance,
+ migration)
+
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination')
+ self.compute.compute_rpcapi.post_live_migration_at_destination(
+ c, instance, False, dest)
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host,
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.instance_events,
+ 'clear_events_for_instance')
+ self.compute.instance_events.clear_events_for_instance(
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False}
+ self.compute._post_live_migration(c, instance, dest,
+ migrate_data=migrate_data)
+ self.assertIn('cleanup', result)
+ self.assertEqual(result['cleanup'], True)
+
+ def test_post_live_migration_working_correctly(self):
+ # Confirm post_live_migration() works as expected correctly.
+ dest = 'desthost'
+ srchost = self.compute.host
+
+ # creating testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': srchost,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+
+ instance.update({'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+ instance.save(c)
+
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'post_live_migration'),
+ mock.patch.object(self.compute.driver, 'unfilter_instance'),
+ mock.patch.object(self.compute.network_api,
+ 'migrate_instance_start'),
+ mock.patch.object(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination'),
+ mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_source'),
+ mock.patch.object(self.compute.network_api,
+ 'setup_networks_on_host'),
+ mock.patch.object(self.compute.instance_events,
+ 'clear_events_for_instance'),
+ mock.patch.object(self.compute, 'update_available_resource')
+ ) as (
+ post_live_migration, unfilter_instance,
+ migrate_instance_start, post_live_migration_at_destination,
+ post_live_migration_at_source, setup_networks_on_host,
+ clear_events, update_available_resource
+ ):
+ self.compute._post_live_migration(c, instance, dest)
+
+ post_live_migration.assert_has_calls([
+ mock.call(c, instance, {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []}, None)])
+ unfilter_instance.assert_has_calls([mock.call(instance, [])])
+ migration = {'source_compute': srchost,
+ 'dest_compute': dest, }
+ migrate_instance_start.assert_has_calls([
+ mock.call(c, instance, migration)])
+ post_live_migration_at_destination.assert_has_calls([
+ mock.call(c, instance, False, dest)])
+ post_live_migration_at_source.assert_has_calls(
+ [mock.call(c, instance, [])])
+ setup_networks_on_host.assert_has_calls([
+ mock.call(c, instance, self.compute.host, teardown=True)])
+ clear_events.assert_called_once_with(instance)
+ update_available_resource.assert_has_calls([mock.call(c)])
+
+ def test_post_live_migration_terminate_volume_connections(self):
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({
+ 'host': self.compute.host,
+ 'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ instance.update({'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
+ instance.save(c)
+
+ bdms = block_device_obj.block_device_make_list(c,
+ [fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'blank', 'guest_format': None,
+ 'destination_type': 'local'}),
+ fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id'}),
+ ])
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.network_api,
+ 'migrate_instance_start'),
+ mock.patch.object(self.compute.compute_rpcapi,
+ 'post_live_migration_at_destination'),
+ mock.patch.object(self.compute.network_api,
+ 'setup_networks_on_host'),
+ mock.patch.object(self.compute.instance_events,
+ 'clear_events_for_instance'),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info'),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid'),
+ mock.patch.object(self.compute.driver, 'get_volume_connector'),
+ mock.patch.object(cinder.API, 'terminate_connection')
+ ) as (
+ migrate_instance_start, post_live_migration_at_destination,
+ setup_networks_on_host, clear_events_for_instance,
+ get_instance_volume_block_device_info, get_by_instance_uuid,
+ get_volume_connector, terminate_connection
+ ):
+ get_by_instance_uuid.return_value = bdms
+ get_volume_connector.return_value = 'fake-connector'
+
+ self.compute._post_live_migration(c, instance, 'dest_host')
+
+ terminate_connection.assert_called_once_with(
+ c, 'fake-volume-id', 'fake-connector')
+
+ def _begin_post_live_migration_at_destination(self):
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
+
+ params = {'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED, }
+ self.instance = self._create_fake_instance_obj(params)
+
+ self.admin_ctxt = context.get_admin_context()
+ self.instance = objects.Instance._from_db_object(self.context,
+ objects.Instance(),
+ db.instance_get_by_uuid(self.admin_ctxt, self.instance['uuid']))
+
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ self.instance,
+ self.compute.host)
+ migration = {'source_compute': self.instance['host'],
+ 'dest_compute': self.compute.host, }
+ self.compute.network_api.migrate_instance_finish(
+ self.admin_ctxt, self.instance, migration)
+ fake_net_info = []
+ fake_block_dev_info = {'foo': 'bar'}
+ self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance,
+ fake_net_info,
+ False,
+ fake_block_dev_info)
+ self.compute._get_power_state(self.admin_ctxt,
+ self.instance).AndReturn(10001)
+
+ def _finish_post_live_migration_at_destination(self):
+ self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
+ mox.IgnoreArg(), self.compute.host)
+
+ fake_notifier.NOTIFICATIONS = []
+ self.mox.ReplayAll()
+
+ self.compute.post_live_migration_at_destination(self.admin_ctxt,
+ self.instance, False)
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.post.dest.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.post.dest.end')
+
+ return objects.Instance.get_by_uuid(self.admin_ctxt,
+ self.instance['uuid'])
+
+ def test_post_live_migration_at_destination_with_compute_info(self):
+ """The instance's node property should be updated correctly."""
+ self._begin_post_live_migration_at_destination()
+ hypervisor_hostname = 'fake_hypervisor_hostname'
+ fake_compute_info = objects.ComputeNode(
+ hypervisor_hostname=hypervisor_hostname)
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ fake_compute_info)
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertEqual(updated['node'], hypervisor_hostname)
+
+ def test_post_live_migration_at_destination_without_compute_info(self):
+ """The instance's node property should be set to None if we fail to
+ get compute_info.
+ """
+ self._begin_post_live_migration_at_destination()
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(
+ exception.NotFound())
+ updated = self._finish_post_live_migration_at_destination()
+ self.assertIsNone(updated['node'])
+
+ def test_rollback_live_migration_at_destination_correctly(self):
+ # creating instance testdata
+ c = context.get_admin_context()
+ instance = self._create_fake_instance_obj({'host': 'dummy'})
+
+ fake_notifier.NOTIFICATIONS = []
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'setup_networks_on_host')
+ self.compute.network_api.setup_networks_on_host(c, instance,
+ self.compute.host,
+ teardown=True)
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'rollback_live_migration_at_destination')
+ self.compute.driver.rollback_live_migration_at_destination(c,
+ instance, [], {'swap': None, 'ephemerals': [],
+ 'block_device_mapping': []},
+ destroy_disks=True, migrate_data=None)
+
+ # start test
+ self.mox.ReplayAll()
+ ret = self.compute.rollback_live_migration_at_destination(c,
+ instance=instance)
+ self.assertIsNone(ret)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.rollback.dest.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'compute.instance.live_migration.rollback.dest.end')
+
+ def test_run_kill_vm(self):
+ # Detect when a vm is terminated behind the scenes.
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("Running instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+
+ instance_name = instances[0]['name']
+ self.compute.driver.test_remove_vm(instance_name)
+
+ # Force the compute manager to do its periodic poll
+ ctxt = context.get_admin_context()
+ self.compute._sync_power_states(ctxt)
+
+ instances = db.instance_get_all(self.context)
+ LOG.info("After force-killing instances: %s", instances)
+ self.assertEqual(len(instances), 1)
+ self.assertIsNone(instances[0]['task_state'])
+
+ def _fill_fault(self, values):
+ extra = dict([(x, None) for x in ['created_at',
+ 'deleted_at',
+ 'updated_at',
+ 'deleted']])
+ extra['id'] = 1
+ extra['details'] = ''
+ extra.update(values)
+ return extra
+
+ def test_add_instance_fault(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+ self.assertIn('raise NotImplementedError', values['details'])
+ del values['details']
+
+ expected = {
+ 'code': 500,
+ 'message': 'test',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ try:
+ raise NotImplementedError('test')
+ except NotImplementedError:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError('test'),
+ exc_info)
+
+ def test_add_instance_fault_with_remote_error(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+ self.assertIn('raise messaging.RemoteError', values['details'])
+ del values['details']
+
+ expected = {
+ 'code': 500,
+ 'instance_uuid': instance['uuid'],
+ 'message': 'Remote error: test My Test Message\nNone.',
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ try:
+ raise messaging.RemoteError('test', 'My Test Message')
+ except messaging.RemoteError as exc:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance, exc, exc_info)
+
+ def test_add_instance_fault_user_error(self):
+ instance = self._create_fake_instance()
+ exc_info = None
+
+ def fake_db_fault_create(ctxt, values):
+
+ expected = {
+ 'code': 400,
+ 'message': 'fake details',
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ user_exc = exception.Invalid('fake details', code=400)
+
+ try:
+ raise user_exc
+ except exception.Invalid:
+ exc_info = sys.exc_info()
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance, user_exc, exc_info)
+
+ def test_add_instance_fault_no_exc_info(self):
+ instance = self._create_fake_instance()
+
+ def fake_db_fault_create(ctxt, values):
+ expected = {
+ 'code': 500,
+ 'message': 'test',
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError('test'))
+
+ def test_add_instance_fault_long_message(self):
+ instance = self._create_fake_instance()
+
+ message = 300 * 'a'
+
+ def fake_db_fault_create(ctxt, values):
+ expected = {
+ 'code': 500,
+ 'message': message[:255],
+ 'details': '',
+ 'instance_uuid': instance['uuid'],
+ 'host': self.compute.host
+ }
+ self.assertEqual(expected, values)
+ return self._fill_fault(expected)
+
+ self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+
+ ctxt = context.get_admin_context()
+ compute_utils.add_instance_fault_from_exc(ctxt,
+ instance,
+ NotImplementedError(message))
+
+ def _test_cleanup_running(self, action):
+ admin_context = context.get_admin_context()
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance1 = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+ instance2 = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1,
+ instance2])
+ self.flags(running_deleted_instance_timeout=3600,
+ running_deleted_instance_action=action)
+
+ return admin_context, instance1, instance2
+
+ def test_cleanup_running_deleted_instances_unrecognized_value(self):
+ admin_context = context.get_admin_context()
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance = self._create_fake_instance_obj({"deleted_at": deleted_at,
+ "deleted": True})
+ self.flags(running_deleted_instance_action='foo-action')
+
+ with mock.patch.object(
+ self.compute, '_get_instances_on_driver',
+ return_value=[instance]):
+ try:
+ # We cannot simply use an assertRaises here because the
+ # exception raised is too generally "Exception". To be sure
+ # that the exception raised is the expected one, we check
+ # the message.
+ self.compute._cleanup_running_deleted_instances(admin_context)
+ self.fail("Be sure this will never be executed.")
+ except Exception as e:
+ self.assertIn("Unrecognized value", six.text_type(e))
+
+ def test_cleanup_running_deleted_instances_reap(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('reap')
+ bdms = block_device_obj.block_device_make_list(ctxt, [])
+
+ self.mox.StubOutWithMock(self.compute, "_shutdown_instance")
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ "get_by_instance_uuid")
+ # Simulate an error and make sure cleanup proceeds with next instance.
+ self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
+ AndRaise(test.TestingException)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
+ inst1.uuid, use_slave=True).AndReturn(bdms)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
+ inst2.uuid, use_slave=True).AndReturn(bdms)
+ self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
+ AndReturn(None)
+
+ self.mox.StubOutWithMock(self.compute, "_cleanup_volumes")
+ self.compute._cleanup_volumes(ctxt, inst1['uuid'], bdms).\
+ AndReturn(None)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.compute.driver.set_bootable(inst1, False)
+ self.compute.driver.power_off(inst1)
+ self.compute.driver.set_bootable(inst2, False)
+ self.compute.driver.power_off(inst2)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown_notimpl(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.compute.driver.set_bootable(inst1, False).AndRaise(
+ NotImplementedError)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+ self.compute.driver.power_off(inst1)
+ self.compute.driver.set_bootable(inst2, False).AndRaise(
+ NotImplementedError)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+ self.compute.driver.power_off(inst2)
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_cleanup_running_deleted_instances_shutdown_error(self):
+ ctxt, inst1, inst2 = self._test_cleanup_running('shutdown')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'set_bootable')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+
+ self.mox.StubOutWithMock(compute_manager.LOG, 'exception')
+ e = test.TestingException('bad')
+
+ self.compute.driver.set_bootable(inst1, False)
+ self.compute.driver.power_off(inst1).AndRaise(e)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+
+ self.compute.driver.set_bootable(inst2, False)
+ self.compute.driver.power_off(inst2).AndRaise(e)
+ compute_manager.LOG.warn(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute._cleanup_running_deleted_instances(ctxt)
+
+ def test_running_deleted_instances(self):
+ admin_context = context.get_admin_context()
+
+ self.compute.host = 'host'
+
+ instance1 = {}
+ instance1['deleted'] = True
+ instance1['deleted_at'] = "sometimeago"
+
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.compute._get_instances_on_driver(
+ admin_context, {'deleted': True,
+ 'soft_deleted': False,
+ 'host': self.compute.host}).AndReturn([instance1])
+
+ self.mox.StubOutWithMock(timeutils, 'is_older_than')
+ timeutils.is_older_than('sometimeago',
+ CONF.running_deleted_instance_timeout).AndReturn(True)
+
+ self.mox.ReplayAll()
+ val = self.compute._running_deleted_instances(admin_context)
+ self.assertEqual(val, [instance1])
+
+ def test_get_instance_nw_info(self):
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ fake_inst = fake_instance.fake_db_instance(uuid='fake-instance')
+ fake_nw_info = network_model.NetworkInfo()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'get_instance_nw_info')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ db.instance_get_by_uuid(self.context, fake_inst['uuid']
+ ).AndReturn(fake_inst)
+ # NOTE(danms): compute manager will re-query since we're not giving
+ # it an instance with system_metadata. We're stubbing out the
+ # subsequent call so we don't need it, but keep this to make sure it
+ # does the right thing.
+ db.instance_get_by_uuid(self.context, fake_inst['uuid'],
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.compute.network_api.get_instance_nw_info(self.context,
+ mox.IsA(objects.Instance)).AndReturn(fake_nw_info)
+
+ self.mox.ReplayAll()
+
+ fake_inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), fake_inst, [])
+ result = self.compute._get_instance_nw_info(self.context,
+ fake_inst_obj)
+ self.assertEqual(fake_nw_info, result)
+
+ def _heal_instance_info_cache(self, _get_instance_nw_info_raise=False):
+ # Update on every call for the test
+ self.flags(heal_instance_info_cache_interval=-1)
+ ctxt = context.get_admin_context()
+
+ instance_map = {}
+ instances = []
+ for x in xrange(8):
+ inst_uuid = 'fake-uuid-%s' % x
+ instance_map[inst_uuid] = fake_instance.fake_db_instance(
+ uuid=inst_uuid, host=CONF.host, created_at=None)
+ # These won't be in our instance since they're not requested
+ instances.append(instance_map[inst_uuid])
+
+ call_info = {'get_all_by_host': 0, 'get_by_uuid': 0,
+ 'get_nw_info': 0, 'expected_instance': None}
+
+ def fake_instance_get_all_by_host(context, host,
+ columns_to_join, use_slave=False):
+ call_info['get_all_by_host'] += 1
+ self.assertEqual([], columns_to_join)
+ return instances[:]
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join, use_slave=False):
+ if instance_uuid not in instance_map:
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ call_info['get_by_uuid'] += 1
+ self.assertEqual(['system_metadata', 'info_cache'],
+ columns_to_join)
+ return instance_map[instance_uuid]
+
+ # NOTE(comstud): Override the stub in setUp()
+ def fake_get_instance_nw_info(context, instance, use_slave=False):
+ # Note that this exception gets caught in compute/manager
+ # and is ignored. However, the below increment of
+ # 'get_nw_info' won't happen, and you'll get an assert
+ # failure checking it below.
+ self.assertEqual(call_info['expected_instance']['uuid'],
+ instance['uuid'])
+ call_info['get_nw_info'] += 1
+ if _get_instance_nw_info_raise:
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+
+ self.stubs.Set(db, 'instance_get_all_by_host',
+ fake_instance_get_all_by_host)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+ self.stubs.Set(self.compute, '_get_instance_nw_info',
+ fake_get_instance_nw_info)
+
+ # Make an instance appear to be still Building
+ instances[0]['vm_state'] = vm_states.BUILDING
+ # Make an instance appear to be Deleting
+ instances[1]['task_state'] = task_states.DELETING
+ # '0', '1' should be skipped..
+ call_info['expected_instance'] = instances[2]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(0, call_info['get_by_uuid'])
+ self.assertEqual(1, call_info['get_nw_info'])
+
+ call_info['expected_instance'] = instances[3]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(1, call_info['get_by_uuid'])
+ self.assertEqual(2, call_info['get_nw_info'])
+
+ # Make an instance switch hosts
+ instances[4]['host'] = 'not-me'
+ # Make an instance disappear
+ instance_map.pop(instances[5]['uuid'])
+ # Make an instance switch to be Deleting
+ instances[6]['task_state'] = task_states.DELETING
+ # '4', '5', and '6' should be skipped..
+ call_info['expected_instance'] = instances[7]
+ self.compute._heal_instance_info_cache(ctxt)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(4, call_info['get_by_uuid'])
+ self.assertEqual(3, call_info['get_nw_info'])
+ # Should be no more left.
+ self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
+
+ # This should cause a DB query now, so get a list of instances
+ # where none can be processed to make sure we handle that case
+ # cleanly. Use just '0' (Building) and '1' (Deleting)
+ instances = instances[0:2]
+
+ self.compute._heal_instance_info_cache(ctxt)
+ # Should have called the list once more
+ self.assertEqual(2, call_info['get_all_by_host'])
+ # Stays the same because we remove invalid entries from the list
+ self.assertEqual(4, call_info['get_by_uuid'])
+ # Stays the same because we didn't find anything to process
+ self.assertEqual(3, call_info['get_nw_info'])
+
+ def test_heal_instance_info_cache(self):
+ self._heal_instance_info_cache()
+
+ def test_heal_instance_info_cache_with_exception(self):
+ self._heal_instance_info_cache(_get_instance_nw_info_raise=True)
+
+ @mock.patch('nova.objects.InstanceList.get_by_filters')
+ @mock.patch('nova.compute.api.API.unrescue')
+ def test_poll_rescued_instances(self, unrescue, get):
+ timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
+ not_timed_out_time = timeutils.utcnow()
+
+ instances = [objects.Instance(uuid='fake_uuid1',
+ vm_state=vm_states.RESCUED,
+ launched_at=timed_out_time),
+ objects.Instance(uuid='fake_uuid2',
+ vm_state=vm_states.RESCUED,
+ launched_at=timed_out_time),
+ objects.Instance(uuid='fake_uuid3',
+ vm_state=vm_states.RESCUED,
+ launched_at=not_timed_out_time)]
+ unrescued_instances = {'fake_uuid1': False, 'fake_uuid2': False}
+
+ def fake_instance_get_all_by_filters(context, filters,
+ expected_attrs=None,
+ use_slave=False):
+ self.assertEqual(["system_metadata"], expected_attrs)
+ return instances
+
+ get.side_effect = fake_instance_get_all_by_filters
+
+ def fake_unrescue(context, instance):
+ unrescued_instances[instance['uuid']] = True
+
+ unrescue.side_effect = fake_unrescue
+
+ self.flags(rescue_timeout=60)
+ ctxt = context.get_admin_context()
+
+ self.compute._poll_rescued_instances(ctxt)
+
+ for instance in unrescued_instances.values():
+ self.assertTrue(instance)
+
+ def test_poll_unconfirmed_resizes(self):
+ instances = [
+ fake_instance.fake_db_instance(uuid='fake_uuid1',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='noexist'),
+ fake_instance.fake_db_instance(uuid='fake_uuid2',
+ vm_state=vm_states.ERROR,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid3',
+ vm_state=vm_states.ACTIVE,
+ task_state=
+ task_states.REBOOTING),
+ fake_instance.fake_db_instance(uuid='fake_uuid4',
+ vm_state=vm_states.RESIZED,
+ task_state=None),
+ fake_instance.fake_db_instance(uuid='fake_uuid5',
+ vm_state=vm_states.ACTIVE,
+ task_state=None),
+ # The expceted migration result will be None instead of error
+ # since _poll_unconfirmed_resizes will not change it
+ # when the instance vm state is RESIZED and task state
+ # is deleting, see bug 1301696 for more detail
+ fake_instance.fake_db_instance(uuid='fake_uuid6',
+ vm_state=vm_states.RESIZED,
+ task_state='deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid7',
+ vm_state=vm_states.RESIZED,
+ task_state='soft-deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid8',
+ vm_state=vm_states.ACTIVE,
+ task_state='resize_finish')]
+ expected_migration_status = {'fake_uuid1': 'confirmed',
+ 'noexist': 'error',
+ 'fake_uuid2': 'error',
+ 'fake_uuid3': 'error',
+ 'fake_uuid4': None,
+ 'fake_uuid5': 'error',
+ 'fake_uuid6': None,
+ 'fake_uuid7': None,
+ 'fake_uuid8': None}
+ migrations = []
+ for i, instance in enumerate(instances, start=1):
+ fake_mig = test_migration.fake_db_migration()
+ fake_mig.update({'id': i,
+ 'instance_uuid': instance['uuid'],
+ 'status': None})
+ migrations.append(fake_mig)
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=None, use_slave=False):
+ self.assertIn('metadata', columns_to_join)
+ self.assertIn('system_metadata', columns_to_join)
+ # raise InstanceNotFound exception for uuid 'noexist'
+ if instance_uuid == 'noexist':
+ raise exception.InstanceNotFound(instance_id=instance_uuid)
+ for instance in instances:
+ if instance['uuid'] == instance_uuid:
+ return instance
+
+ def fake_migration_get_unconfirmed_by_dest_compute(context,
+ resize_confirm_window, dest_compute, use_slave=False):
+ self.assertEqual(dest_compute, CONF.host)
+ return migrations
+
+ def fake_migration_update(context, mid, updates):
+ for migration in migrations:
+ if migration['id'] == mid:
+ migration.update(updates)
+ return migration
+
+ def fake_confirm_resize(context, instance, migration=None):
+ # raise exception for 'fake_uuid4' to check migration status
+ # does not get set to 'error' on confirm_resize failure.
+ if instance['uuid'] == 'fake_uuid4':
+ raise test.TestingException('bomb')
+ self.assertIsNotNone(migration)
+ for migration2 in migrations:
+ if (migration2['instance_uuid'] ==
+ migration['instance_uuid']):
+ migration2['status'] = 'confirmed'
+
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+ self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
+ fake_migration_get_unconfirmed_by_dest_compute)
+ self.stubs.Set(db, 'migration_update', fake_migration_update)
+ self.stubs.Set(self.compute.compute_api, 'confirm_resize',
+ fake_confirm_resize)
+
+ def fetch_instance_migration_status(instance_uuid):
+ for migration in migrations:
+ if migration['instance_uuid'] == instance_uuid:
+ return migration['status']
+
+ self.flags(resize_confirm_window=60)
+ ctxt = context.get_admin_context()
+
+ self.compute._poll_unconfirmed_resizes(ctxt)
+
+ for instance_uuid, status in expected_migration_status.iteritems():
+ self.assertEqual(status,
+ fetch_instance_migration_status(instance_uuid))
+
+ def test_instance_build_timeout_mixed_instances(self):
+ # Tests that instances which failed to build within the configured
+ # instance_build_timeout value are set to error state.
+ self.flags(instance_build_timeout=30)
+ ctxt = context.get_admin_context()
+ created_at = timeutils.utcnow() + datetime.timedelta(seconds=-60)
+
+ filters = {'vm_state': vm_states.BUILDING, 'host': CONF.host}
+ # these are the ones that are expired
+ old_instances = []
+ for x in xrange(4):
+ instance = {'uuid': str(uuid.uuid4()), 'created_at': created_at}
+ instance.update(filters)
+ old_instances.append(fake_instance.fake_db_instance(**instance))
+
+ # not expired
+ instances = list(old_instances) # copy the contents of old_instances
+ new_instance = {
+ 'uuid': str(uuid.uuid4()),
+ 'created_at': timeutils.utcnow(),
+ }
+ sort_key = 'created_at'
+ sort_dir = 'desc'
+ new_instance.update(filters)
+ instances.append(fake_instance.fake_db_instance(**new_instance))
+
+ # need something to return from conductor_api.instance_update
+ # that is defined outside the for loop and can be used in the mock
+ # context
+ fake_instance_ref = {'host': CONF.host, 'node': 'fake'}
+
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(self.compute.db.sqlalchemy.api,
+ 'instance_get_all_by_filters',
+ return_value=instances),
+ mock.patch.object(self.compute.conductor_api, 'instance_update',
+ return_value=fake_instance_ref),
+ mock.patch.object(self.compute.driver, 'node_is_available',
+ return_value=False)
+ ) as (
+ instance_get_all_by_filters,
+ conductor_instance_update,
+ node_is_available
+ ):
+ # run the code
+ self.compute._check_instance_build_time(ctxt)
+ # check our assertions
+ instance_get_all_by_filters.assert_called_once_with(
+ ctxt, filters,
+ sort_key,
+ sort_dir,
+ marker=None,
+ columns_to_join=[],
+ use_slave=True,
+ limit=None)
+ self.assertThat(conductor_instance_update.mock_calls,
+ testtools_matchers.HasLength(len(old_instances)))
+ self.assertThat(node_is_available.mock_calls,
+ testtools_matchers.HasLength(len(old_instances)))
+ for inst in old_instances:
+ conductor_instance_update.assert_has_calls([
+ mock.call(ctxt, inst['uuid'],
+ vm_state=vm_states.ERROR)])
+ node_is_available.assert_has_calls([
+ mock.call(fake_instance_ref['node'])])
+
+ def test_get_resource_tracker_fail(self):
+ self.assertRaises(exception.NovaException,
+ self.compute._get_resource_tracker,
+ 'invalidnodename')
+
+ def test_instance_update_host_check(self):
+ # make sure rt usage doesn't happen if the host or node is different
+ def fail_get(nodename):
+ raise test.TestingException(_("wrong host/node"))
+ self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
+
+ instance = self._create_fake_instance({'host': 'someotherhost'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ instance = self._create_fake_instance({'node': 'someothernode'})
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ params = {'host': 'someotherhost', 'node': 'someothernode'}
+ instance = self._create_fake_instance(params)
+ self.compute._instance_update(self.context, instance['uuid'])
+
+ def test_destroy_evacuated_instance_on_shared_storage(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_is_instance_storage_shared')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute._is_instance_storage_shared(fake_context,
+ evacuated_instance).AndReturn(True)
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ False)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_destroy_evacuated_instance_with_disks(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_local')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_instance_shared_storage')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_cleanup')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute.driver.check_instance_shared_storage_local(fake_context,
+ evacuated_instance).AndReturn({'filename': 'tmpfilename'})
+ self.compute.compute_rpcapi.check_instance_shared_storage(fake_context,
+ evacuated_instance,
+ {'filename': 'tmpfilename'}).AndReturn(False)
+ self.compute.driver.check_instance_shared_storage_cleanup(fake_context,
+ {'filename': 'tmpfilename'})
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ True)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_destroy_evacuated_instance_not_implemented(self):
+ fake_context = context.get_admin_context()
+
+ # instances in central db
+ instances = [
+ # those are still related to this host
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host}),
+ self._create_fake_instance_obj(
+ {'host': self.compute.host})
+ ]
+
+ # those are already been evacuated to other host
+ evacuated_instance = self._create_fake_instance_obj(
+ {'host': 'otherhost'})
+
+ instances.append(evacuated_instance)
+
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_local')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_instance_shared_storage')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_instance_shared_storage_cleanup')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn(instances)
+ self.compute._get_instance_nw_info(fake_context,
+ evacuated_instance).AndReturn(
+ 'fake_network_info')
+ self.compute._get_instance_block_device_info(
+ fake_context, evacuated_instance).AndReturn('fake_bdi')
+ self.compute.driver.check_instance_shared_storage_local(fake_context,
+ evacuated_instance).AndRaise(NotImplementedError())
+ self.compute.driver.destroy(fake_context, evacuated_instance,
+ 'fake_network_info',
+ 'fake_bdi',
+ True)
+
+ self.mox.ReplayAll()
+ self.compute._destroy_evacuated_instances(fake_context)
+
+ def test_complete_partial_deletion(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance()
+ instance.id = 1
+ instance.uuid = 'fake-uuid'
+ instance.vm_state = vm_states.DELETED
+ instance.task_state = None
+ instance.system_metadata = {'fake_key': 'fake_value'}
+ instance.vcpus = 1
+ instance.memory_mb = 1
+ instance.project_id = 'fake-prj'
+ instance.user_id = 'fake-user'
+ instance.deleted = False
+
+ def fake_destroy():
+ instance.deleted = True
+
+ self.stubs.Set(instance, 'destroy', fake_destroy)
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda *a, **k: None)
+
+ self.stubs.Set(self.compute,
+ '_complete_deletion',
+ lambda *a, **k: None)
+
+ self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None)
+
+ self.compute._complete_partial_deletion(admin_context, instance)
+
+ self.assertNotEqual(0, instance.deleted)
+
+ def test_init_instance_for_partial_deletion(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance(admin_context)
+ instance.id = 1
+ instance.vm_state = vm_states.DELETED
+ instance.deleted = False
+
+ def fake_partial_deletion(context, instance):
+ instance['deleted'] = instance['id']
+
+ self.stubs.Set(self.compute,
+ '_complete_partial_deletion',
+ fake_partial_deletion)
+ self.compute._init_instance(admin_context, instance)
+
+ self.assertNotEqual(0, instance['deleted'])
+
+ def test_partial_deletion_raise_exception(self):
+ admin_context = context.get_admin_context()
+ instance = objects.Instance(admin_context)
+ instance.uuid = str(uuid.uuid4())
+ instance.vm_state = vm_states.DELETED
+ instance.deleted = False
+
+ self.mox.StubOutWithMock(self.compute, '_complete_partial_deletion')
+ self.compute._complete_partial_deletion(
+ admin_context, instance).AndRaise(ValueError)
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(admin_context, instance)
+
+ def test_add_remove_fixed_ip_updates_instance_updated_at(self):
+ def _noop(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.compute.network_api,
+ 'add_fixed_ip_to_instance', _noop)
+ self.stubs.Set(self.compute.network_api,
+ 'remove_fixed_ip_from_instance', _noop)
+
+ instance = self._create_fake_instance_obj()
+ updated_at_1 = instance['updated_at']
+
+ self.compute.add_fixed_ip_to_instance(self.context, 'fake', instance)
+ updated_at_2 = db.instance_get_by_uuid(self.context,
+ instance['uuid'])['updated_at']
+
+ self.compute.remove_fixed_ip_from_instance(self.context, 'fake',
+ instance)
+ updated_at_3 = db.instance_get_by_uuid(self.context,
+ instance['uuid'])['updated_at']
+
+ updated_ats = (updated_at_1, updated_at_2, updated_at_3)
+ self.assertEqual(len(updated_ats), len(set(updated_ats)))
+
+ def test_no_pending_deletes_for_soft_deleted_instances(self):
+ self.flags(reclaim_instance_interval=0)
+ ctxt = context.get_admin_context()
+
+ instance = self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ self.compute._run_pending_deletes(ctxt)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertFalse(instance['cleaned'])
+
+ def test_reclaim_queued_deletes(self):
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ # Active
+ self._create_fake_instance(params={'host': CONF.host})
+
+ # Deleted not old enough
+ self._create_fake_instance(params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': timeutils.utcnow()})
+
+ # Deleted old enough (only this one should be reclaimed)
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+
+ # Restoring
+ # NOTE(hanlind): This specifically tests for a race condition
+ # where restoring a previously soft deleted instance sets
+ # deleted_at back to None, causing reclaim to think it can be
+ # deleted, see LP #1186243.
+ self._create_fake_instance(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'task_state': task_states.RESTORING})
+
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.compute._delete_instance(
+ ctxt, mox.IsA(objects.Instance), [],
+ mox.IsA(objects.Quotas))
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_reclaim_queued_deletes_continue_on_error(self):
+ # Verify that reclaim continues on error.
+ self.flags(reclaim_instance_interval=3600)
+ ctxt = context.get_admin_context()
+
+ deleted_at = (timeutils.utcnow() -
+ datetime.timedelta(hours=1, minutes=5))
+ instance1 = self._create_fake_instance_obj(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+ instance2 = self._create_fake_instance_obj(
+ params={'host': CONF.host,
+ 'vm_state': vm_states.SOFT_DELETED,
+ 'deleted_at': deleted_at})
+ instances = []
+ instances.append(instance1)
+ instances.append(instance2)
+
+ self.mox.StubOutWithMock(objects.InstanceList,
+ 'get_by_filters')
+ self.mox.StubOutWithMock(self.compute, '_deleted_old_enough')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+
+ objects.InstanceList.get_by_filters(
+ ctxt, mox.IgnoreArg(),
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
+ use_slave=True
+ ).AndReturn(instances)
+
+ # The first instance delete fails.
+ self.compute._deleted_old_enough(instance1, 3600).AndReturn(True)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance1.uuid).AndReturn([])
+ self.compute._delete_instance(ctxt, instance1,
+ [], self.none_quotas).AndRaise(
+ test.TestingException)
+
+ # The second instance delete that follows.
+ self.compute._deleted_old_enough(instance2, 3600).AndReturn(True)
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance2.uuid).AndReturn([])
+ self.compute._delete_instance(ctxt, instance2,
+ [], self.none_quotas)
+
+ self.mox.ReplayAll()
+
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_sync_power_states(self):
+ ctxt = self.context.elevated()
+ self._create_fake_instance({'host': self.compute.host})
+ self._create_fake_instance({'host': self.compute.host})
+ self._create_fake_instance({'host': self.compute.host})
+ self.mox.StubOutWithMock(self.compute.driver, 'get_info')
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+
+ # Check to make sure task continues on error.
+ self.compute.driver.get_info(mox.IgnoreArg()).AndRaise(
+ exception.InstanceNotFound(instance_id='fake-uuid'))
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.NOSTATE).AndRaise(
+ exception.InstanceNotFound(instance_id='fake-uuid'))
+
+ self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
+ {'state': power_state.RUNNING})
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.RUNNING,
+ use_slave=True)
+ self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
+ {'state': power_state.SHUTDOWN})
+ self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
+ power_state.SHUTDOWN,
+ use_slave=True)
+ self.mox.ReplayAll()
+ self.compute._sync_power_states(ctxt)
+
+ def _test_lifecycle_event(self, lifecycle_event, power_state):
+ instance = self._create_fake_instance()
+ uuid = instance['uuid']
+
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+ if power_state is not None:
+ self.compute._sync_instance_power_state(
+ mox.IgnoreArg(),
+ mox.ContainsKeyValue('uuid', uuid),
+ power_state)
+ self.mox.ReplayAll()
+ self.compute.handle_events(event.LifecycleEvent(uuid, lifecycle_event))
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_lifecycle_events(self):
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_STOPPED,
+ power_state.SHUTDOWN)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_STARTED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_PAUSED,
+ power_state.PAUSED)
+ self._test_lifecycle_event(event.EVENT_LIFECYCLE_RESUMED,
+ power_state.RUNNING)
+ self._test_lifecycle_event(-1, None)
+
+ def test_lifecycle_event_non_existent_instance(self):
+ # No error raised for non-existent instance because of inherent race
+ # between database updates and hypervisor events. See bug #1180501.
+ event_instance = event.LifecycleEvent('does-not-exist',
+ event.EVENT_LIFECYCLE_STOPPED)
+ self.compute.handle_events(event_instance)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_migration_not_found(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.id = 0
+
+ mock_get_by_id.side_effect = exception.MigrationNotFound(
+ migration_id=0)
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(instance_obj.Instance, 'get_by_uuid')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_instance_not_found(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.id = 0
+
+ mock_get_by_id.side_effect = exception.InstanceNotFound(
+ instance_id=instance.uuid)
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_status_confirmed(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'confirmed'
+ migration.id = 0
+
+ mock_get_by_id.return_value = migration
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ @mock.patch.object(objects.Migration, 'get_by_id')
+ @mock.patch.object(objects.Quotas, 'rollback')
+ def test_confirm_resize_roll_back_quota_status_dummy(self,
+ mock_rollback, mock_get_by_id):
+ instance = self._create_fake_instance_obj()
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'dummy'
+ migration.id = 0
+
+ mock_get_by_id.return_value = migration
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ self.assertTrue(mock_rollback.called)
+
+ def test_allow_confirm_resize_on_instance_in_deleting_task_state(self):
+ instance = self._create_fake_instance_obj()
+ old_type = flavors.extract_flavor(instance)
+ new_type = flavors.get_flavor_by_flavor_id('4')
+ sys_meta = instance.system_metadata
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ old_type, 'old_')
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ new_type, 'new_')
+ sys_meta = flavors.save_flavor_info(sys_meta,
+ new_type)
+
+ fake_rt = self.mox.CreateMockAnything()
+
+ def fake_drop_resize_claim(*args, **kwargs):
+ pass
+
+ def fake_get_resource_tracker(self):
+ return fake_rt
+
+ def fake_setup_networks_on_host(self, *args, **kwargs):
+ pass
+
+ self.stubs.Set(fake_rt, 'drop_resize_claim', fake_drop_resize_claim)
+ self.stubs.Set(self.compute, '_get_resource_tracker',
+ fake_get_resource_tracker)
+ self.stubs.Set(self.compute.network_api, 'setup_networks_on_host',
+ fake_setup_networks_on_host)
+
+ migration = objects.Migration()
+ migration.instance_uuid = instance.uuid
+ migration.status = 'finished'
+ migration.create(self.context.elevated())
+
+ instance.task_state = task_states.DELETING
+ instance.vm_state = vm_states.RESIZED
+ instance.system_metadata = sys_meta
+ instance.save()
+
+ self.compute.confirm_resize(self.context, instance=instance,
+ migration=migration, reservations=[])
+ instance.refresh()
+ self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
+
+ def _get_instance_and_bdm_for_dev_defaults_tests(self):
+ instance = self._create_fake_instance_obj(
+ params={'root_device_name': '/dev/vda'})
+ block_device_mapping = block_device_obj.block_device_make_list(
+ self.context, [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0})])
+
+ return instance, block_device_mapping
+
+ def test_default_block_device_names_empty_instance_root_dev(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ instance.root_device_name = None
+ self.mox.StubOutWithMock(objects.Instance, 'save')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
+
+ def test_default_block_device_names_empty_root_device(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ bdms[0]['device_name'] = None
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
+ bdms[0].save().AndReturn(None)
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+
+ def test_default_block_device_names_no_root_device(self):
+ instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
+ instance.root_device_name = None
+ bdms[0]['device_name'] = None
+ self.mox.StubOutWithMock(objects.Instance, 'save')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_root_device_name')
+ self.mox.StubOutWithMock(self.compute,
+ '_default_device_names_for_instance')
+
+ self.compute._default_root_device_name(instance, mox.IgnoreArg(),
+ bdms[0]).AndReturn('/dev/vda')
+ bdms[0].save().AndReturn(None)
+ self.compute._default_device_names_for_instance(instance,
+ '/dev/vda', [], [],
+ [bdm for bdm in bdms])
+ self.mox.ReplayAll()
+ self.compute._default_block_device_names(self.context,
+ instance,
+ {}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
+
+ def test_default_block_device_names_with_blank_volumes(self):
+ instance = self._create_fake_instance_obj()
+ image_meta = {}
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1, 'instance_uuid': 'fake-instance',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 2, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ ephemeral = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 4, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local'}))
+ swap = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 5, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap'
+ }))
+ bdms = block_device_obj.block_device_make_list(
+ self.context, [root_volume, blank_volume1, blank_volume2,
+ ephemeral, swap])
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_default_root_device_name',
+ return_value='/dev/vda'),
+ mock.patch.object(objects.BlockDeviceMapping, 'save'),
+ mock.patch.object(self.compute,
+ '_default_device_names_for_instance')
+ ) as (default_root_device, object_save,
+ default_device_names):
+ self.compute._default_block_device_names(self.context, instance,
+ image_meta, bdms)
+ default_root_device.assert_called_once_with(instance, image_meta,
+ bdms[0])
+ self.assertEqual('/dev/vda', instance.root_device_name)
+ self.assertTrue(object_save.called)
+ default_device_names.assert_called_once_with(instance,
+ '/dev/vda', [bdms[-2]], [bdms[-1]],
+ [bdm for bdm in bdms[:-2]])
+
+ def test_reserve_block_device_name(self):
+ instance = self._create_fake_instance_obj(
+ params={'root_device_name': '/dev/vda'})
+ bdm = objects.BlockDeviceMapping(
+ **{'source_type': 'image', 'destination_type': 'local',
+ 'image_id': 'fake-image-id', 'device_name': '/dev/vda',
+ 'instance_uuid': instance.uuid})
+ bdm.create(self.context)
+
+ self.compute.reserve_block_device_name(self.context, instance,
+ '/dev/vdb', 'fake-volume-id',
+ 'virtio', 'disk')
+
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid)
+ bdms = list(bdms)
+ self.assertEqual(len(bdms), 2)
+ bdms.sort(key=operator.attrgetter('device_name'))
+ vol_bdm = bdms[1]
+ self.assertEqual(vol_bdm.source_type, 'volume')
+ self.assertEqual(vol_bdm.destination_type, 'volume')
+ self.assertEqual(vol_bdm.device_name, '/dev/vdb')
+ self.assertEqual(vol_bdm.volume_id, 'fake-volume-id')
+ self.assertEqual(vol_bdm.disk_bus, 'virtio')
+ self.assertEqual(vol_bdm.device_type, 'disk')
+
+
+class ComputeAPITestCase(BaseTestCase):
+ def setUp(self):
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ super(ComputeAPITestCase, self).setUp()
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+ self.security_group_api = (
+ openstack_driver.get_openstack_security_group_driver())
+
+ self.compute_api = compute.API(
+ security_group_api=self.security_group_api)
+ self.fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def fake_show(obj, context, image_id, **kwargs):
+ if image_id:
+ return self.fake_image
+ else:
+ raise exception.ImageNotFound(image_id=image_id)
+
+ self.fake_show = fake_show
+
+ def _run_instance(self, params=None):
+ instance = self._create_fake_instance_obj(params, services=True)
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instance.refresh()
+ self.assertIsNone(instance['task_state'])
+ return instance, instance_uuid
+
+ def test_ip_filtering(self):
+ info = [{
+ 'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{
+ 'cidr': '192.168.0.0/24',
+ 'ips': [{
+ 'address': '192.168.0.10',
+ 'type': 'fixed',
+ }]
+ }]
+ }
+ }]
+
+ info1 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
+ inst1 = objects.Instance(id=1, info_cache=info1)
+ info[0]['network']['subnets'][0]['ips'][0]['address'] = '192.168.0.20'
+ info2 = objects.InstanceInfoCache(network_info=jsonutils.dumps(info))
+ inst2 = objects.Instance(id=2, info_cache=info2)
+ instances = objects.InstanceList(objects=[inst1, inst2])
+
+ instances = self.compute_api._ip_filter(instances, {'ip': '.*10'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0].id, 1)
+
+ def test_create_with_too_little_ram(self):
+ # Test an instance type with too little memory.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['memory_mb'] = 1
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Now increase the inst_type memory and make sure all is fine.
+ inst_type['memory_mb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_too_little_disk(self):
+ # Test an instance type with too little disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+
+ self.fake_image['min_disk'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Now increase the inst_type disk space and make sure all is fine.
+ inst_type['root_gb'] = 2
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_too_large_image(self):
+ # Test an instance type with too little disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+
+ self.fake_image['size'] = '1073741825'
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.create, self.context,
+ inst_type, self.fake_image['id'])
+
+ # Reduce image to 1 GB limit and ensure it works
+ self.fake_image['size'] = '1073741824'
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_just_enough_ram_and_disk(self):
+ # Test an instance type with just enough ram and disk space.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 2
+ inst_type['memory_mb'] = 2
+
+ self.fake_image['min_ram'] = 2
+ self.fake_image['min_disk'] = 2
+ self.fake_image['name'] = 'fake_name'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_no_ram_and_disk_reqs(self):
+ # Test an instance type with no min_ram or min_disk.
+
+ inst_type = flavors.get_default_flavor()
+ inst_type['root_gb'] = 1
+ inst_type['memory_mb'] = 1
+
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ (refs, resv_id) = self.compute_api.create(self.context,
+ inst_type, self.fake_image['id'])
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_create_with_deleted_image(self):
+ # If we're given a deleted image by glance, we should not be able to
+ # build from it
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['name'] = 'fake_name'
+ self.fake_image['status'] = 'DELETED'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ expected_message = (
+ exception.ImageNotActive.msg_fmt % {'image_id':
+ self.fake_image['id']})
+ with testtools.ExpectedException(exception.ImageNotActive,
+ expected_message):
+ self.compute_api.create(self.context, inst_type,
+ self.fake_image['id'])
+
+ @mock.patch('nova.virt.hardware.VirtNUMAInstanceTopology.get_constraints')
+ def test_create_with_numa_topology(self, numa_constraints_mock):
+ inst_type = flavors.get_default_flavor()
+ # This is what the stubbed out method will return
+ fake_image_props = {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}
+
+ numa_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3, 4]), 512)])
+ numa_constraints_mock.return_value = numa_topology
+
+ instances, resv_id = self.compute_api.create(self.context, inst_type,
+ self.fake_image['id'])
+ numa_constraints_mock.assert_called_once_with(
+ inst_type, fake_image_props)
+ self.assertThat(numa_topology._to_dict(),
+ matchers.DictMatches(
+ instances[0].numa_topology
+ .topology_from_obj()._to_dict()))
+
+ def test_create_instance_defaults_display_name(self):
+ # Verify that an instance cannot be created without a display_name.
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(),
+ 'fake-image-uuid', **instance)
+ try:
+ self.assertIsNotNone(ref[0]['display_name'])
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_sets_system_metadata(self):
+ # Make sure image properties are copied into system metadata.
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='fake-image-uuid')
+ try:
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ ref[0]['uuid'])
+
+ image_props = {'image_kernel_id': 'fake_kernel_id',
+ 'image_ramdisk_id': 'fake_ramdisk_id',
+ 'image_something_else': 'meow', }
+ for key, value in image_props.iteritems():
+ self.assertIn(key, sys_metadata)
+ self.assertEqual(value, sys_metadata[key])
+
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_saves_type_in_system_metadata(self):
+ instance_type = flavors.get_default_flavor()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=instance_type,
+ image_href='some-fake-image')
+ try:
+ sys_metadata = db.instance_system_metadata_get(self.context,
+ ref[0]['uuid'])
+
+ instance_type_props = ['name', 'memory_mb', 'vcpus', 'root_gb',
+ 'ephemeral_gb', 'flavorid', 'swap',
+ 'rxtx_factor', 'vcpu_weight']
+ for key in instance_type_props:
+ sys_meta_key = "instance_type_%s" % key
+ self.assertIn(sys_meta_key, sys_metadata)
+ self.assertEqual(str(instance_type[key]),
+ str(sys_metadata[sys_meta_key]))
+
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_associates_security_groups(self):
+ # Make sure create associates security groups.
+ group = self._create_group()
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+ try:
+ self.assertEqual(len(db.security_group_get_by_instance(
+ self.context, ref[0]['uuid'])), 1)
+ group = db.security_group_get(self.context, group['id'])
+ self.assertEqual(1, len(group['instances']))
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_create_instance_with_invalid_security_group_raises(self):
+ instance_type = flavors.get_default_flavor()
+
+ pre_build_len = len(db.instance_get_all(self.context))
+ self.assertRaises(exception.SecurityGroupNotFoundForProject,
+ self.compute_api.create,
+ self.context,
+ instance_type=instance_type,
+ image_href=None,
+ security_group=['this_is_a_fake_sec_group'])
+ self.assertEqual(pre_build_len,
+ len(db.instance_get_all(self.context)))
+
+ def test_create_with_large_user_data(self):
+ # Test an instance type with too much user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.InstanceUserDataTooLarge,
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data=('1' * 65536))
+
+ def test_create_with_malformed_user_data(self):
+ # Test an instance type with malformed user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.InstanceUserDataMalformed,
+ self.compute_api.create, self.context, inst_type,
+ self.fake_image['id'], user_data='banana')
+
+ def test_create_with_base64_user_data(self):
+ # Test an instance type with ok much user data.
+
+ inst_type = flavors.get_default_flavor()
+
+ self.fake_image['min_ram'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ # NOTE(mikal): a string of length 48510 encodes to 65532 characters of
+ # base64
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ user_data=base64.encodestring('1' * 48510))
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_populate_instance_for_create(self):
+ base_options = {'image_ref': self.fake_image['id'],
+ 'system_metadata': {'fake': 'value'}}
+ instance = objects.Instance()
+ instance.update(base_options)
+ inst_type = flavors.get_flavor_by_name("m1.tiny")
+ instance = self.compute_api._populate_instance_for_create(
+ self.context,
+ instance,
+ self.fake_image,
+ 1,
+ security_groups=None,
+ instance_type=inst_type)
+ self.assertEqual(str(base_options['image_ref']),
+ instance['system_metadata']['image_base_image_ref'])
+ self.assertEqual(vm_states.BUILDING, instance['vm_state'])
+ self.assertEqual(task_states.SCHEDULING, instance['task_state'])
+ self.assertEqual(1, instance['launch_index'])
+ self.assertIsNotNone(instance.get('uuid'))
+ self.assertEqual([], instance.security_groups.objects)
+
+ def test_default_hostname_generator(self):
+ fake_uuids = [str(uuid.uuid4()) for x in xrange(4)]
+
+ orig_populate = self.compute_api._populate_instance_for_create
+
+ def _fake_populate(context, base_options, *args, **kwargs):
+ base_options['uuid'] = fake_uuids.pop(0)
+ return orig_populate(context, base_options, *args, **kwargs)
+
+ self.stubs.Set(self.compute_api,
+ '_populate_instance_for_create',
+ _fake_populate)
+
+ cases = [(None, 'server-%s' % fake_uuids[0]),
+ ('Hello, Server!', 'hello-server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
+ ('hello_server', 'hello-server')]
+ for display_name, hostname in cases:
+ (ref, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ display_name=display_name)
+ try:
+ self.assertEqual(ref[0]['hostname'], hostname)
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def test_instance_create_adds_to_instance_group(self):
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ group = objects.InstanceGroup(self.context)
+ group.uuid = str(uuid.uuid4())
+ group.create()
+
+ inst_type = flavors.get_default_flavor()
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': group.uuid})
+
+ group = objects.InstanceGroup.get_by_uuid(self.context, group.uuid)
+ self.assertIn(refs[0]['uuid'], group.members)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_instance_create_auto_creates_group(self):
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ inst_type = flavors.get_default_flavor()
+ (refs, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': 'groupname'})
+
+ group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
+ self.assertEqual('groupname', group.name)
+ self.assertIn('legacy', group.policies)
+ self.assertEqual(1, len(group.members))
+ self.assertIn(refs[0]['uuid'], group.members)
+
+ # On a second instance, make sure it gets added to the group that was
+ # auto-created above
+ (refs2, resv_id) = self.compute_api.create(
+ self.context, inst_type, self.fake_image['id'],
+ scheduler_hints={'group': 'groupname'})
+ group = objects.InstanceGroup.get_by_name(self.context, 'groupname')
+ self.assertEqual('groupname', group.name)
+ self.assertIn('legacy', group.policies)
+ self.assertEqual(2, len(group.members))
+ self.assertIn(refs[0]['uuid'], group.members)
+ self.assertIn(refs2[0]['uuid'], group.members)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_destroy_instance_disassociates_security_groups(self):
+ # Make sure destroying disassociates security groups.
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+ try:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+ group = db.security_group_get(self.context, group['id'])
+ self.assertEqual(0, len(group['instances']))
+ finally:
+ db.security_group_destroy(self.context, group['id'])
+
+ def test_destroy_security_group_disassociates_instances(self):
+ # Make sure destroying security groups disassociates instances.
+ group = self._create_group()
+
+ (ref, resv_id) = self.compute_api.create(
+ self.context,
+ instance_type=flavors.get_default_flavor(),
+ image_href='some-fake-image',
+ security_group=['testgroup'])
+
+ try:
+ db.security_group_destroy(self.context, group['id'])
+ admin_deleted_context = context.get_admin_context(
+ read_deleted="only")
+ group = db.security_group_get(admin_deleted_context, group['id'])
+ self.assertEqual(0, len(group['instances']))
+ finally:
+ db.instance_destroy(self.context, ref[0]['uuid'])
+
+ def _test_rebuild(self, vm_state):
+ instance = self._create_fake_instance_obj()
+ instance_uuid = instance['uuid']
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ instance = objects.Instance.get_by_uuid(self.context,
+ instance_uuid)
+ self.assertIsNone(instance.task_state)
+ # Set some image metadata that should get wiped out and reset
+ # as well as some other metadata that should be preserved.
+ instance.system_metadata.update({
+ 'image_kernel_id': 'old-data',
+ 'image_ramdisk_id': 'old_data',
+ 'image_something_else': 'old-data',
+ 'image_should_remove': 'bye-bye',
+ 'preserved': 'preserve this!'})
+
+ instance.save()
+
+ # Make sure Compute API updates the image_ref before casting to
+ # compute manager.
+ info = {'image_ref': None, 'clean': False}
+
+ def fake_rpc_rebuild(context, **kwargs):
+ info['image_ref'] = kwargs['instance'].image_ref
+ info['clean'] = kwargs['instance'].obj_what_changed() == set()
+
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
+ fake_rpc_rebuild)
+
+ image_ref = instance["image_ref"] + '-new_image_ref'
+ password = "new_password"
+
+ instance.vm_state = vm_state
+ instance.save()
+
+ self.compute_api.rebuild(self.context, instance, image_ref, password)
+ self.assertEqual(info['image_ref'], image_ref)
+ self.assertTrue(info['clean'])
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.REBUILDING)
+ sys_meta = dict([(k, v) for k, v in instance.system_metadata.items()
+ if not k.startswith('instance_type')])
+ self.assertEqual(sys_meta,
+ {'image_kernel_id': 'fake_kernel_id',
+ 'image_min_disk': '1',
+ 'image_ramdisk_id': 'fake_ramdisk_id',
+ 'image_something_else': 'meow',
+ 'preserved': 'preserve this!'})
+ instance.destroy()
+
+ def test_rebuild(self):
+ self._test_rebuild(vm_state=vm_states.ACTIVE)
+
+ def test_rebuild_in_error_state(self):
+ self._test_rebuild(vm_state=vm_states.ERROR)
+
+ def test_rebuild_in_error_not_launched(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': ''})
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ db.instance_update(self.context, instance['uuid'],
+ {"vm_state": vm_states.ERROR,
+ "launched_at": None})
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ instance['image_ref'],
+ "new password")
+
+ def test_rebuild_no_image(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': ''})
+ instance_uuid = instance.uuid
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+ self.compute_api.rebuild(self.context, instance, '', 'new_password')
+
+ instance = db.instance_get_by_uuid(self.context, instance_uuid)
+ self.assertEqual(instance['task_state'], task_states.REBUILDING)
+
+ def test_rebuild_with_deleted_image(self):
+ # If we're given a deleted image by glance, we should not be able to
+ # rebuild from it
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+ self.fake_image['name'] = 'fake_name'
+ self.fake_image['status'] = 'DELETED'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ expected_message = (
+ exception.ImageNotActive.msg_fmt % {'image_id':
+ self.fake_image['id']})
+ with testtools.ExpectedException(exception.ImageNotActive,
+ expected_message):
+ self.compute_api.rebuild(self.context, instance,
+ self.fake_image['id'], 'new_password')
+
+ def test_rebuild_with_too_little_ram(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_ram'] = 128
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image memory requirements and make sure it works
+ self.fake_image['min_ram'] = 64
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_too_little_disk(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_disk'] = 2
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image disk requirements and make sure it works
+ self.fake_image['min_disk'] = 1
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_just_enough_ram_and_disk(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['min_ram'] = 64
+ self.fake_image['min_disk'] = 1
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_no_ram_and_disk_reqs(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rebuild_with_too_large_image(self):
+ instance = self._create_fake_instance_obj(params={'image_ref': '1'})
+
+ def fake_extract_flavor(_inst, prefix):
+ self.assertEqual('', prefix)
+ return dict(test_flavor.fake_flavor, memory_mb=64, root_gb=1)
+
+ self.stubs.Set(flavors, 'extract_flavor',
+ fake_extract_flavor)
+
+ self.fake_image['size'] = '1073741825'
+ self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api.rebuild, self.context,
+ instance, self.fake_image['id'], 'new_password')
+
+ # Reduce image to 1 GB limit and ensure it works
+ self.fake_image['size'] = '1073741824'
+ self.compute_api.rebuild(self.context,
+ instance, self.fake_image['id'], 'new_password')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_hostname_create(self):
+ # Ensure instance hostname is set during creation.
+ inst_type = flavors.get_flavor_by_name('m1.tiny')
+ (instances, _) = self.compute_api.create(self.context,
+ inst_type,
+ image_href='some-fake-image',
+ display_name='test host')
+
+ self.assertEqual('test-host', instances[0]['hostname'])
+
+ def _fake_rescue_block_devices(self, instance, status="in-use"):
+ fake_bdms = block_device_obj.block_device_make_list(self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'boot_index': 0,
+ 'destination_type': 'volume',
+ 'volume_id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66'})])
+
+ volume = {'id': 'bf0b6b00-a20c-11e2-9e96-0800200c9a66',
+ 'state': 'active', 'instance_uuid': instance['uuid']}
+
+ return fake_bdms, volume
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_rescue_volume_backed_no_image(self, mock_get_vol, mock_get_bdms):
+ # Instance started without an image
+ params = {'image_ref': ''}
+ volume_backed_inst_1 = self._create_fake_instance_obj(params=params)
+ bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_1)
+
+ mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
+ mock_get_bdms.return_value = bdms
+
+ with mock.patch.object(self.compute, '_prep_block_device'):
+ self.compute.run_instance(self.context,
+ volume_backed_inst_1, {}, {}, None, None,
+ None, True, None, False)
+
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context,
+ volume_backed_inst_1)
+
+ self.compute.terminate_instance(self.context, volume_backed_inst_1,
+ [], [])
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_rescue_volume_backed_placeholder_image(self,
+ mock_get_vol,
+ mock_get_bdms):
+ # Instance started with a placeholder image (for metadata)
+ volume_backed_inst_2 = self._create_fake_instance_obj(
+ {'image_ref': 'my_placeholder_img',
+ 'root_device_name': '/dev/vda'})
+ bdms, volume = self._fake_rescue_block_devices(volume_backed_inst_2)
+
+ mock_get_vol.return_value = {'id': volume['id'], 'status': "in-use"}
+ mock_get_bdms.return_value = bdms
+
+ with mock.patch.object(self.compute, '_prep_block_device'):
+ self.compute.run_instance(self.context,
+ volume_backed_inst_2, {}, {}, None, None,
+ None, True, None, False)
+
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context,
+ volume_backed_inst_2)
+
+ self.compute.terminate_instance(self.context, volume_backed_inst_2,
+ [], [])
+
+ def test_get(self):
+ # Test get instance.
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ self.context, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(_context, _instance_uuid,
+ columns_to_join=None, use_slave=False):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
+
+ instance = self.compute_api.get(self.context, exp_instance['uuid'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_with_admin_context(self):
+ # Test get instance.
+ c = context.get_admin_context()
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ c, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(context, instance_uuid,
+ columns_to_join=None, use_slave=False):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
+
+ instance = self.compute_api.get(c, exp_instance['uuid'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_with_integer_id(self):
+ # Test get instance with an integer id.
+ exp_instance = self._create_fake_instance()
+ # NOTE(danms): Transform the db object in a similar way as
+ # the API method will do.
+ expected = obj_base.obj_to_primitive(
+ objects.Instance._from_db_object(
+ self.context, objects.Instance(), exp_instance,
+ instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
+
+ def fake_db_get(_context, _instance_id, columns_to_join=None):
+ return exp_instance
+
+ self.stubs.Set(db, 'instance_get', fake_db_get)
+
+ instance = self.compute_api.get(self.context, exp_instance['id'])
+ self.assertEqual(unify_instance(expected),
+ unify_instance(instance))
+
+ def test_get_all_by_name_regexp(self):
+ # Test searching instances by name (display_name).
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'display_name': 'woot'})
+ instance2 = self._create_fake_instance({
+ 'display_name': 'woo'})
+ instance3 = self._create_fake_instance({
+ 'display_name': 'not-woot'})
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^woo.*'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance1['uuid'], instance_uuids)
+ self.assertIn(instance2['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^woot.*'})
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertEqual(len(instances), 1)
+ self.assertIn(instance1['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '.*oot.*'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance1['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': '^n.*'})
+ self.assertEqual(len(instances), 1)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'name': 'noth.*'})
+ self.assertEqual(len(instances), 0)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_multiple_options_at_once(self):
+ # Test searching by multiple options at once.
+ c = context.get_admin_context()
+
+ def fake_network_info(ip):
+ info = [{
+ 'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {
+ 'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{
+ 'cidr': '192.168.0.0/24',
+ 'ips': [{
+ 'address': ip,
+ 'type': 'fixed',
+ }]
+ }]
+ }
+ }]
+ return jsonutils.dumps(info)
+
+ instance1 = self._create_fake_instance({
+ 'display_name': 'woot',
+ 'id': 1,
+ 'uuid': '00000000-0000-0000-0000-000000000010',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.1')}})
+ instance2 = self._create_fake_instance({
+ 'display_name': 'woo',
+ 'id': 20,
+ 'uuid': '00000000-0000-0000-0000-000000000020',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.2')}})
+ instance3 = self._create_fake_instance({
+ 'display_name': 'not-woot',
+ 'id': 30,
+ 'uuid': '00000000-0000-0000-0000-000000000030',
+ 'info_cache': {'network_info':
+ fake_network_info('192.168.0.3')}})
+
+ # ip ends up matching 2nd octet here.. so all 3 match ip
+ # but 'name' only matches one
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1', 'name': 'not.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance3['uuid'])
+
+ # ip ends up matching any ip with a '1' in the last octet..
+ # so instance 1 and 3.. but name should only match #1
+ # but 'name' only matches one
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1$', 'name': '^woo.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ # same as above but no match on name (name matches instance1
+ # but the ip query doesn't
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.2$', 'name': '^woot.*'})
+ self.assertEqual(len(instances), 0)
+
+ # ip matches all 3... ipv6 matches #2+#3...name matches #3
+ instances = self.compute_api.get_all(c,
+ search_opts={'ip': '.*\.1',
+ 'name': 'not.*',
+ 'ip6': '^.*12.*34.*'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance3['uuid'])
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_image(self):
+ # Test searching instances by image.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'image_ref': '1234'})
+ instance2 = self._create_fake_instance({'image_ref': '4567'})
+ instance3 = self._create_fake_instance({'image_ref': '4567'})
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '123'})
+ self.assertEqual(len(instances), 0)
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '1234'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ instances = self.compute_api.get_all(c, search_opts={'image': '4567'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ # Test passing a list as search arg
+ instances = self.compute_api.get_all(c,
+ search_opts={'image': ['1234', '4567']})
+ self.assertEqual(len(instances), 3)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_flavor(self):
+ # Test searching instances by image.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({'instance_type_id': 1})
+ instance2 = self._create_fake_instance({'instance_type_id': 2})
+ instance3 = self._create_fake_instance({'instance_type_id': 2})
+
+ # NOTE(comstud): Migrations set up the instance_types table
+ # for us. Therefore, we assume the following is true for
+ # these tests:
+ # instance_type_id 1 == flavor 3
+ # instance_type_id 2 == flavor 1
+ # instance_type_id 3 == flavor 4
+ # instance_type_id 4 == flavor 5
+ # instance_type_id 5 == flavor 2
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'flavor': 5})
+ self.assertEqual(len(instances), 0)
+
+ # ensure unknown filter maps to an exception
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.get_all, c,
+ search_opts={'flavor': 99})
+
+ instances = self.compute_api.get_all(c, search_opts={'flavor': 3})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['id'], instance1['id'])
+
+ instances = self.compute_api.get_all(c, search_opts={'flavor': 1})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_state(self):
+ # Test searching instances by state.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({
+ 'power_state': power_state.SHUTDOWN,
+ })
+ instance2 = self._create_fake_instance({
+ 'power_state': power_state.RUNNING,
+ })
+ instance3 = self._create_fake_instance({
+ 'power_state': power_state.RUNNING,
+ })
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.SUSPENDED})
+ self.assertEqual(len(instances), 0)
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.SHUTDOWN})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': power_state.RUNNING})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance2['uuid'], instance_uuids)
+ self.assertIn(instance3['uuid'], instance_uuids)
+
+ # Test passing a list as search arg
+ instances = self.compute_api.get_all(c,
+ search_opts={'power_state': [power_state.SHUTDOWN,
+ power_state.RUNNING]})
+ self.assertEqual(len(instances), 3)
+
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+
+ def test_get_all_by_metadata(self):
+ # Test searching instances by metadata.
+
+ c = context.get_admin_context()
+ instance0 = self._create_fake_instance()
+ instance1 = self._create_fake_instance({
+ 'metadata': {'key1': 'value1'}})
+ instance2 = self._create_fake_instance({
+ 'metadata': {'key2': 'value2'}})
+ instance3 = self._create_fake_instance({
+ 'metadata': {'key3': 'value3'}})
+ instance4 = self._create_fake_instance({
+ 'metadata': {'key3': 'value3',
+ 'key4': 'value4'}})
+
+ # get all instances
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u"{}"})
+ self.assertEqual(len(instances), 5)
+
+ # wrong key/value combination
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key1": "value3"}'})
+ self.assertEqual(len(instances), 0)
+
+ # non-existing keys
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key5": "value1"}'})
+ self.assertEqual(len(instances), 0)
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key2": "value2"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance2['uuid'])
+
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key3": "value3"}'})
+ self.assertEqual(len(instances), 2)
+ instance_uuids = [instance['uuid'] for instance in instances]
+ self.assertIn(instance3['uuid'], instance_uuids)
+ self.assertIn(instance4['uuid'], instance_uuids)
+
+ # multiple criteria as a dict
+ instances = self.compute_api.get_all(c,
+ search_opts={'metadata': u'{"key3": "value3","key4": "value4"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance4['uuid'])
+
+ # multiple criteria as a list
+ instances = self.compute_api.get_all(c,
+ search_opts=
+ {'metadata': u'[{"key4": "value4"},{"key3": "value3"}]'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance4['uuid'])
+
+ db.instance_destroy(c, instance0['uuid'])
+ db.instance_destroy(c, instance1['uuid'])
+ db.instance_destroy(c, instance2['uuid'])
+ db.instance_destroy(c, instance3['uuid'])
+ db.instance_destroy(c, instance4['uuid'])
+
+ def test_get_all_by_system_metadata(self):
+ # Test searching instances by system metadata.
+
+ c = context.get_admin_context()
+ instance1 = self._create_fake_instance({
+ 'system_metadata': {'key1': 'value1'}})
+
+ # find existing instance
+ instances = self.compute_api.get_all(c,
+ search_opts={'system_metadata': u'{"key1": "value1"}'})
+ self.assertEqual(len(instances), 1)
+ self.assertEqual(instances[0]['uuid'], instance1['uuid'])
+
+ def test_all_instance_metadata(self):
+ self._create_fake_instance({'metadata': {'key1': 'value1'},
+ 'user_id': 'user1',
+ 'project_id': 'project1'})
+
+ self._create_fake_instance({'metadata': {'key2': 'value2'},
+ 'user_id': 'user2',
+ 'project_id': 'project2'})
+
+ _context = self.context
+ _context.user_id = 'user1'
+ _context.project_id = 'project1'
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(1, len(metadata))
+ self.assertEqual(metadata[0]['key'], 'key1')
+
+ _context.user_id = 'user2'
+ _context.project_id = 'project2'
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(1, len(metadata))
+ self.assertEqual(metadata[0]['key'], 'key2')
+
+ _context = context.get_admin_context()
+ metadata = self.compute_api.get_all_instance_metadata(_context,
+ search_filts=[])
+ self.assertEqual(2, len(metadata))
+
+ def test_instance_metadata(self):
+ meta_changes = [None]
+ self.flags(notify_on_state_change='vm_state')
+
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ meta_changes[0] = diff
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ _context = context.get_admin_context()
+ instance = self._create_fake_instance_obj({'metadata':
+ {'key1': 'value1'}})
+
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1'})
+
+ self.compute_api.update_instance_metadata(_context, instance,
+ {'key2': 'value2'})
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key1': 'value1', 'key2': 'value2'})
+ self.assertEqual(meta_changes, [{'key2': ['+', 'value2']}])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], metadata)
+
+ new_metadata = {'key2': 'bah', 'key3': 'value3'}
+ self.compute_api.update_instance_metadata(_context, instance,
+ new_metadata, delete=True)
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, new_metadata)
+ self.assertEqual(meta_changes, [{
+ 'key1': ['-'],
+ 'key2': ['+', 'bah'],
+ 'key3': ['+', 'value3'],
+ }])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[1]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], metadata)
+
+ self.compute_api.delete_instance_metadata(_context, instance, 'key2')
+ metadata = self.compute_api.get_instance_metadata(_context, instance)
+ self.assertEqual(metadata, {'key3': 'value3'})
+ self.assertEqual(meta_changes, [{'key2': ['-']}])
+
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 3)
+ msg = fake_notifier.NOTIFICATIONS[2]
+ payload = msg.payload
+ self.assertIn('metadata', payload)
+ self.assertEqual(payload['metadata'], {'key3': 'value3'})
+
+ db.instance_destroy(_context, instance['uuid'])
+
+ def test_disallow_metadata_changes_during_building(self):
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
+ pass
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ fake_change_instance_metadata)
+
+ instance = self._create_fake_instance({'vm_state': vm_states.BUILDING})
+ instance = dict(instance)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.delete_instance_metadata, self.context,
+ instance, "key")
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.update_instance_metadata, self.context,
+ instance, "key")
+
+ def test_get_instance_faults(self):
+ # Get an instances latest fault.
+ instance = self._create_fake_instance()
+
+ fault_fixture = {
+ 'code': 404,
+ 'instance_uuid': instance['uuid'],
+ 'message': "HTTPNotFound",
+ 'details': "Stock details for test",
+ 'created_at': datetime.datetime(2010, 10, 10, 12, 0, 0),
+ }
+
+ def return_fault(_ctxt, instance_uuids):
+ return dict.fromkeys(instance_uuids, [fault_fixture])
+
+ self.stubs.Set(nova.db,
+ 'instance_fault_get_by_instance_uuids',
+ return_fault)
+
+ _context = context.get_admin_context()
+ output = self.compute_api.get_instance_faults(_context, [instance])
+ expected = {instance['uuid']: [fault_fixture]}
+ self.assertEqual(output, expected)
+
+ db.instance_destroy(_context, instance['uuid'])
+
+ @staticmethod
+ def _parse_db_block_device_mapping(bdm_ref):
+ attr_list = ('delete_on_termination', 'device_name', 'no_device',
+ 'virtual_name', 'volume_id', 'volume_size', 'snapshot_id')
+ bdm = {}
+ for attr in attr_list:
+ val = bdm_ref.get(attr, None)
+ if val:
+ bdm[attr] = val
+
+ return bdm
+
+ def test_update_block_device_mapping(self):
+ swap_size = ephemeral_size = 1
+ instance_type = {'swap': swap_size, 'ephemeral_gb': ephemeral_size}
+ instance = self._create_fake_instance_obj()
+ mappings = [
+ {'virtual': 'ami', 'device': 'sda1'},
+ {'virtual': 'root', 'device': '/dev/sda1'},
+
+ {'virtual': 'swap', 'device': 'sdb4'},
+ {'virtual': 'swap', 'device': 'sdb3'},
+ {'virtual': 'swap', 'device': 'sdb2'},
+ {'virtual': 'swap', 'device': 'sdb1'},
+
+ {'virtual': 'ephemeral0', 'device': 'sdc1'},
+ {'virtual': 'ephemeral1', 'device': 'sdc2'},
+ {'virtual': 'ephemeral2', 'device': 'sdc3'}]
+ block_device_mapping = [
+ # root
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
+ 'delete_on_termination': False},
+
+ # overwrite swap
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
+ {'device_name': '/dev/sdb4',
+ 'no_device': True},
+
+ # overwrite ephemeral
+ {'device_name': '/dev/sdc1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
+ {'device_name': '/dev/sdc4',
+ 'no_device': True},
+
+ # volume
+ {'device_name': '/dev/sdd1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
+ {'device_name': '/dev/sdd3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
+ {'device_name': '/dev/sdd4',
+ 'no_device': True}]
+
+ image_mapping = self.compute_api._prepare_image_mapping(
+ instance_type, mappings)
+ self.compute_api._update_block_device_mapping(
+ self.context, instance_type, instance['uuid'], image_mapping)
+
+ bdms = [block_device.BlockDeviceDict(bdm) for bdm in
+ db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])]
+ expected_result = [
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'swap', 'device_name': '/dev/sdb1',
+ 'volume_size': swap_size, 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc3', 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc1', 'delete_on_termination': True},
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': CONF.default_ephemeral_format,
+ 'device_name': '/dev/sdc2', 'delete_on_termination': True},
+ ]
+ bdms.sort(key=operator.itemgetter('device_name'))
+ expected_result.sort(key=operator.itemgetter('device_name'))
+ self.assertEqual(len(bdms), len(expected_result))
+ for expected, got in zip(expected_result, bdms):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ self.compute_api._update_block_device_mapping(
+ self.context, flavors.get_default_flavor(),
+ instance['uuid'], block_device_mapping)
+ bdms = [block_device.BlockDeviceDict(bdm) for bdm in
+ db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid'])]
+ expected_result = [
+ {'snapshot_id': '00000000-aaaa-bbbb-cccc-000000000000',
+ 'device_name': '/dev/sda1'},
+
+ {'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'swap', 'device_name': '/dev/sdb1',
+ 'volume_size': swap_size, 'delete_on_termination': True},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '11111111-aaaa-bbbb-cccc-111111111111',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '22222222-aaaa-bbbb-cccc-222222222222'},
+ {'device_name': '/dev/sdb4', 'no_device': True},
+
+ {'device_name': '/dev/sdc1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '44444444-aaaa-bbbb-cccc-555555555555'},
+ {'no_device': True, 'device_name': '/dev/sdc4'},
+
+ {'device_name': '/dev/sdd1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '55555555-aaaa-bbbb-cccc-666666666666',
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '66666666-aaaa-bbbb-cccc-777777777777'},
+ {'device_name': '/dev/sdd3',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': '77777777-aaaa-bbbb-cccc-888888888888'},
+ {'no_device': True, 'device_name': '/dev/sdd4'}]
+ bdms.sort(key=operator.itemgetter('device_name'))
+ expected_result.sort(key=operator.itemgetter('device_name'))
+ self.assertEqual(len(bdms), len(expected_result))
+ for expected, got in zip(expected_result, bdms):
+ self.assertThat(expected, matchers.IsSubDictOf(got))
+
+ for bdm in db.block_device_mapping_get_all_by_instance(
+ self.context, instance['uuid']):
+ db.block_device_mapping_destroy(self.context, bdm['id'])
+ instance.refresh()
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def _test_check_and_transform_bdm(self, bdms, expected_bdms,
+ image_bdms=None, base_options=None,
+ legacy_bdms=False,
+ legacy_image_bdms=False):
+ image_bdms = image_bdms or []
+ image_meta = {}
+ if image_bdms:
+ image_meta = {'properties': {'block_device_mapping': image_bdms}}
+ if not legacy_image_bdms:
+ image_meta['properties']['bdm_v2'] = True
+ base_options = base_options or {'root_device_name': 'vda',
+ 'image_ref': FAKE_IMAGE_REF}
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, image_meta, 1, 1, bdms, legacy_bdms)
+ self.assertThat(expected_bdms,
+ matchers.DictListMatches(transformed_bdm))
+
+ def test_check_and_transform_legacy_bdm_no_image_bdms(self):
+ legacy_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ expected_bdms = [block_device.BlockDeviceDict.from_legacy(
+ legacy_bdms[0])]
+ expected_bdms[0]['boot_index'] = 0
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ legacy_bdms=True)
+
+ def test_check_and_transform_legacy_bdm_legacy_image_bdms(self):
+ image_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ legacy_bdms = [
+ {'device_name': '/dev/vdb',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False}]
+ expected_bdms = [
+ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
+ block_device.BlockDeviceDict.from_legacy(image_bdms[0])]
+ expected_bdms[0]['boot_index'] = -1
+ expected_bdms[1]['boot_index'] = 0
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_bdms=True,
+ legacy_image_bdms=True)
+
+ def test_check_and_transform_legacy_bdm_image_bdms(self):
+ legacy_bdms = [
+ {'device_name': '/dev/vdb',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'delete_on_termination': False}]
+ image_bdms = [block_device.BlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444',
+ 'boot_index': 0})]
+ expected_bdms = [
+ block_device.BlockDeviceDict.from_legacy(legacy_bdms[0]),
+ image_bdms[0]]
+ expected_bdms[0]['boot_index'] = -1
+ self._test_check_and_transform_bdm(legacy_bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_bdms=True)
+
+ def test_check_and_transform_bdm_no_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ expected_bdms = bdms
+ self._test_check_and_transform_bdm(bdms, expected_bdms)
+
+ def test_check_and_transform_bdm_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ image_bdms = [block_device.BlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-444444444444'})]
+ expected_bdms = bdms + image_bdms
+ self._test_check_and_transform_bdm(bdms, expected_bdms,
+ image_bdms=image_bdms)
+
+ def test_check_and_transform_bdm_legacy_image_bdms(self):
+ bdms = [block_device.BlockDeviceDict({'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': FAKE_IMAGE_REF,
+ 'boot_index': 0})]
+ image_bdms = [{'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+ expected_bdms = [block_device.BlockDeviceDict.from_legacy(
+ image_bdms[0])]
+ expected_bdms[0]['boot_index'] = 0
+ self._test_check_and_transform_bdm(bdms, expected_bdms,
+ image_bdms=image_bdms,
+ legacy_image_bdms=True)
+
+ def test_check_and_transform_image(self):
+ base_options = {'root_device_name': 'vdb',
+ 'image_ref': FAKE_IMAGE_REF}
+ fake_legacy_bdms = [
+ {'device_name': '/dev/vda',
+ 'volume_id': '33333333-aaaa-bbbb-cccc-333333333333',
+ 'delete_on_termination': False}]
+
+ image_meta = {'properties': {'block_device_mapping': [
+ {'device_name': '/dev/vda',
+ 'snapshot_id': '33333333-aaaa-bbbb-cccc-333333333333'}]}}
+
+ # We get an image BDM
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
+ self.assertEqual(len(transformed_bdm), 2)
+
+ # No image BDM created if image already defines a root BDM
+ base_options['root_device_name'] = 'vda'
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, image_meta, 1, 1, [], True)
+ self.assertEqual(len(transformed_bdm), 1)
+
+ # No image BDM created
+ transformed_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, fake_legacy_bdms, True)
+ self.assertEqual(len(transformed_bdm), 1)
+
+ # Volumes with multiple instances fails
+ self.assertRaises(exception.InvalidRequest,
+ self.compute_api._check_and_transform_bdm,
+ base_options, {}, {}, 1, 2, fake_legacy_bdms, True)
+
+ checked_bdm = self.compute_api._check_and_transform_bdm(
+ base_options, {}, {}, 1, 1, transformed_bdm, True)
+ self.assertEqual(checked_bdm, transformed_bdm)
+
+ def test_volume_size(self):
+ ephemeral_size = 2
+ swap_size = 3
+ volume_size = 5
+
+ swap_bdm = {'source_type': 'blank', 'guest_format': 'swap'}
+ ephemeral_bdm = {'source_type': 'blank', 'guest_format': None}
+ volume_bdm = {'source_type': 'volume', 'volume_size': volume_size}
+
+ inst_type = {'ephemeral_gb': ephemeral_size, 'swap': swap_size}
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, ephemeral_bdm),
+ ephemeral_size)
+ ephemeral_bdm['volume_size'] = 42
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, ephemeral_bdm), 42)
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, swap_bdm),
+ swap_size)
+ swap_bdm['volume_size'] = 42
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, swap_bdm), 42)
+ self.assertEqual(
+ self.compute_api._volume_size(inst_type, volume_bdm),
+ volume_size)
+
+ def test_is_volume_backed_instance(self):
+ ctxt = self.context
+
+ instance = self._create_fake_instance({'image_ref': ''})
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, None))
+
+ instance = self._create_fake_instance({'root_device_name': 'vda'})
+ self.assertFalse(
+ self.compute_api.is_volume_backed_instance(
+ ctxt, instance,
+ block_device_obj.block_device_make_list(ctxt, [])))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'volume_id': 'fake_volume_id',
+ 'boot_index': 0,
+ 'destination_type': 'volume'})])
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'volume_id': 'fake_volume_id',
+ 'destination_type': 'local',
+ 'boot_index': 0,
+ 'snapshot_id': None}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vdb',
+ 'boot_index': 1,
+ 'destination_type': 'volume',
+ 'volume_id': 'c2ec2156-d75e-11e2-985b-5254009297d6',
+ 'snapshot_id': None})])
+ self.assertFalse(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ bdms = block_device_obj.block_device_make_list(ctxt,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'snapshot_id': 'de8836ac-d75e-11e2-8271-5254009297d6',
+ 'destination_type': 'volume',
+ 'boot_index': 0,
+ 'volume_id': None})])
+ self.assertTrue(
+ self.compute_api.is_volume_backed_instance(ctxt, instance, bdms))
+
+ def test_is_volume_backed_instance_no_bdms(self):
+ ctxt = self.context
+ instance = self._create_fake_instance()
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ ctxt, instance['uuid']).AndReturn(
+ block_device_obj.block_device_make_list(ctxt, []))
+ self.mox.ReplayAll()
+
+ self.compute_api.is_volume_backed_instance(ctxt, instance, None)
+
+ def test_reservation_id_one_instance(self):
+ """Verify building an instance has a reservation_id that
+ matches return value from create.
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image')
+ try:
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0]['reservation_id'], resv_id)
+ finally:
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_reservation_ids_two_instances(self):
+ """Verify building 2 instances at once results in a
+ reservation_id being returned equal to reservation id set
+ in both instances.
+ """
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2)
+ try:
+ self.assertEqual(len(refs), 2)
+ self.assertIsNotNone(resv_id)
+ finally:
+ for instance in refs:
+ self.assertEqual(instance['reservation_id'], resv_id)
+
+ db.instance_destroy(self.context, refs[0]['uuid'])
+
+ def test_multi_instance_display_name_template(self):
+ self.flags(multi_instance_display_name_template='%(name)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x')
+ self.assertEqual(refs[0]['hostname'], 'x')
+ self.assertEqual(refs[1]['display_name'], 'x')
+ self.assertEqual(refs[1]['hostname'], 'x')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(count)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-1')
+ self.assertEqual(refs[0]['hostname'], 'x-1')
+ self.assertEqual(refs[1]['display_name'], 'x-2')
+ self.assertEqual(refs[1]['hostname'], 'x-2')
+
+ self.flags(multi_instance_display_name_template='%(name)s-%(uuid)s')
+ (refs, resv_id) = self.compute_api.create(self.context,
+ flavors.get_default_flavor(), image_href='some-fake-image',
+ min_count=2, max_count=2, display_name='x')
+ self.assertEqual(refs[0]['display_name'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[0]['hostname'], 'x-%s' % refs[0]['uuid'])
+ self.assertEqual(refs[1]['display_name'], 'x-%s' % refs[1]['uuid'])
+ self.assertEqual(refs[1]['hostname'], 'x-%s' % refs[1]['uuid'])
+
+ def test_instance_architecture(self):
+ # Test the instance architecture.
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['architecture'], arch.X86_64)
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ def test_instance_unknown_architecture(self):
+ # Test if the architecture is unknown.
+ instance = self._create_fake_instance_obj(
+ params={'architecture': ''})
+ try:
+ self.compute.run_instance(self.context, instance, {}, {}, None,
+ None, None, True, None, False)
+ instance = db.instance_get_by_uuid(self.context,
+ instance['uuid'])
+ self.assertNotEqual(instance['architecture'], 'Unknown')
+ finally:
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_instance_name_template(self):
+ # Test the instance_name template.
+ self.flags(instance_name_template='instance-%d')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ self.flags(instance_name_template='instance-%(uuid)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], 'instance-%s' % i_ref['uuid'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ self.flags(instance_name_template='%(id)d-%(uuid)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], '%d-%s' %
+ (i_ref['id'], i_ref['uuid']))
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ # not allowed.. default is uuid
+ self.flags(instance_name_template='%(name)s')
+ i_ref = self._create_fake_instance()
+ self.assertEqual(i_ref['name'], i_ref['uuid'])
+ db.instance_destroy(self.context, i_ref['uuid'])
+
+ def test_add_remove_fixed_ip(self):
+ instance = self._create_fake_instance_obj(params={'host': CONF.host})
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.add_fixed_ip(self.context, instance, '1')
+ self.compute_api.remove_fixed_ip(self.context,
+ instance, '192.168.1.1')
+ self.compute_api.delete(self.context, instance)
+
+ def test_attach_volume_invalid(self):
+ self.assertRaises(exception.InvalidDevicePath,
+ self.compute_api.attach_volume,
+ self.context,
+ {'locked': False, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'launched_at': timeutils.utcnow()},
+ None,
+ '/invalid')
+
+ def test_no_attach_volume_in_rescue_state(self):
+ def fake(*args, **kwargs):
+ pass
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake)
+ self.stubs.Set(cinder.API, 'reserve_volume', fake)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context,
+ {'uuid': 'fake_uuid', 'locked': False,
+ 'vm_state': vm_states.RESCUED},
+ None,
+ '/dev/vdb')
+
+ def test_no_attach_volume_in_suspended_state(self):
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context,
+ {'uuid': 'fake_uuid', 'locked': False,
+ 'vm_state': vm_states.SUSPENDED},
+ {'id': 'fake-volume-id'},
+ '/dev/vdb')
+
+ def test_no_detach_volume_in_rescue_state(self):
+ # Ensure volume can be detached from instance
+
+ params = {'vm_state': vm_states.RESCUED}
+ instance = self._create_fake_instance(params=params)
+
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': instance['uuid']}
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume,
+ self.context, instance, volume)
+
+ @mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ @mock.patch.object(cinder.API, 'get')
+ def test_no_rescue_in_volume_state_attaching(self,
+ mock_get_vol,
+ mock_get_bdms):
+ # Make sure a VM cannot be rescued while volume is being attached
+ instance = self._create_fake_instance_obj()
+ bdms, volume = self._fake_rescue_block_devices(instance)
+
+ mock_get_vol.return_value = {'id': volume['id'],
+ 'status': "attaching"}
+ mock_get_bdms.return_value = bdms
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.rescue, self.context, instance)
+
+ def test_vnc_console(self):
+ # Make sure we can a vnc console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "novnc"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_vnc_console')
+ rpcapi.get_vnc_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_vnc_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_vnc_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_vnc_console,
+ self.context, instance, 'novnc')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_spice_console(self):
+ # Make sure we can a spice console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "spice-html5"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
+ rpcapi.get_spice_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_spice_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_spice_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_spice_console,
+ self.context, instance, 'spice')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_rdp_console(self):
+ # Make sure we can a rdp console for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = "rdp-html5"
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_console_host',
+ 'port': 'fake_console_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_console_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
+ rpcapi.get_rdp_console(
+ self.context, instance=fake_instance,
+ console_type=fake_console_type).AndReturn(fake_connect_info)
+
+ self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type, 'fake_console_host',
+ 'fake_console_port', 'fake_access_path', 'fake_uuid')
+
+ self.mox.ReplayAll()
+
+ console = self.compute_api.get_rdp_console(self.context,
+ fake_instance, fake_console_type)
+ self.assertEqual(console, {'url': 'fake_console_url'})
+
+ def test_get_rdp_console_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_rdp_console,
+ self.context, instance, 'rdp')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_serial_console(self):
+ # Make sure we can get a serial proxy url for an instance.
+
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_console_type = 'serial'
+ fake_connect_info = {'token': 'fake_token',
+ 'console_type': fake_console_type,
+ 'host': 'fake_serial_host',
+ 'port': 'fake_tcp_port',
+ 'internal_access_path': 'fake_access_path',
+ 'instance_uuid': fake_instance['uuid'],
+ 'access_url': 'fake_access_url'}
+
+ rpcapi = compute_rpcapi.ComputeAPI
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi, 'get_serial_console',
+ return_value=fake_connect_info),
+ mock.patch.object(self.compute_api.consoleauth_rpcapi,
+ 'authorize_console')
+ ) as (mock_get_serial_console, mock_authorize_console):
+ self.compute_api.consoleauth_rpcapi.authorize_console(
+ self.context, 'fake_token', fake_console_type,
+ 'fake_serial_host', 'fake_tcp_port',
+ 'fake_access_path', 'fake_uuid')
+
+ console = self.compute_api.get_serial_console(self.context,
+ fake_instance,
+ fake_console_type)
+ self.assertEqual(console, {'url': 'fake_access_url'})
+
+ def test_get_serial_console_no_host(self):
+ # Make sure an exception is raised when instance is not Active.
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_serial_console,
+ self.context, instance, 'serial')
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_console_output(self):
+ fake_instance = {'uuid': 'fake_uuid',
+ 'host': 'fake_compute_host'}
+ fake_tail_length = 699
+ fake_console_output = 'fake console output'
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_console_output')
+ rpcapi.get_console_output(
+ self.context, instance=fake_instance,
+ tail_length=fake_tail_length).AndReturn(fake_console_output)
+
+ self.mox.ReplayAll()
+
+ output = self.compute_api.get_console_output(self.context,
+ fake_instance, tail_length=fake_tail_length)
+ self.assertEqual(output, fake_console_output)
+
+ def test_console_output_no_host(self):
+ instance = self._create_fake_instance(params={'host': ''})
+
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.get_console_output,
+ self.context, instance)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_attach_interface(self):
+ new_type = flavors.get_flavor_by_flavor_id('4')
+ sys_meta = flavors.save_flavor_info({}, new_type)
+
+ instance = objects.Instance(image_ref='foo',
+ system_metadata=sys_meta)
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'allocate_port_for_instance')
+ nwinfo = [fake_network_cache_model.new_vif()]
+ network_id = nwinfo[0]['network']['id']
+ port_id = nwinfo[0]['id']
+ req_ip = '1.2.3.4'
+ self.compute.network_api.allocate_port_for_instance(
+ self.context, instance, port_id, network_id, req_ip
+ ).AndReturn(nwinfo)
+ self.mox.ReplayAll()
+ vif = self.compute.attach_interface(self.context,
+ instance,
+ network_id,
+ port_id,
+ req_ip)
+ self.assertEqual(vif['id'], network_id)
+ return nwinfo, port_id
+
+ def test_detach_interface(self):
+ nwinfo, port_id = self.test_attach_interface()
+ self.stubs.Set(self.compute.network_api,
+ 'deallocate_port_for_instance',
+ lambda a, b, c: [])
+ instance = objects.Instance()
+ instance.info_cache = objects.InstanceInfoCache.new(
+ self.context, 'fake-uuid')
+ instance.info_cache.network_info = network_model.NetworkInfo.hydrate(
+ nwinfo)
+ self.compute.detach_interface(self.context, instance, port_id)
+ self.assertEqual(self.compute.driver._interfaces, {})
+
+ def test_attach_volume(self):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id', 'device_name': '/dev/vdb'})
+ bdm = block_device_obj.BlockDeviceMapping()._from_db_object(
+ self.context,
+ block_device_obj.BlockDeviceMapping(),
+ fake_bdm)
+ instance = self._create_fake_instance()
+ fake_volume = {'id': 'fake-volume-id'}
+
+ with contextlib.nested(
+ mock.patch.object(cinder.API, 'get', return_value=fake_volume),
+ mock.patch.object(cinder.API, 'check_attach'),
+ mock.patch.object(cinder.API, 'reserve_volume'),
+ mock.patch.object(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name', return_value=bdm),
+ mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
+ ) as (mock_get, mock_check_attach, mock_reserve_vol, mock_reserve_bdm,
+ mock_attach):
+
+ self.compute_api.attach_volume(
+ self.context, instance, 'fake-volume-id',
+ '/dev/vdb', 'ide', 'cdrom')
+
+ mock_reserve_bdm.assert_called_once_with(
+ self.context, instance, '/dev/vdb', 'fake-volume-id',
+ disk_bus='ide', device_type='cdrom')
+ self.assertEqual(mock_get.call_args,
+ mock.call(self.context, 'fake-volume-id'))
+ self.assertEqual(mock_check_attach.call_args,
+ mock.call(
+ self.context, fake_volume, instance=instance))
+ mock_reserve_vol.assert_called_once_with(
+ self.context, 'fake-volume-id')
+ a, kw = mock_attach.call_args
+ self.assertEqual(kw['volume_id'], 'fake-volume-id')
+ self.assertEqual(kw['mountpoint'], '/dev/vdb')
+ self.assertEqual(kw['bdm'].device_name, '/dev/vdb')
+ self.assertEqual(kw['bdm'].volume_id, 'fake-volume-id')
+
+ def test_attach_volume_no_device(self):
+
+ called = {}
+
+ def fake_check_attach(*args, **kwargs):
+ called['fake_check_attach'] = True
+
+ def fake_reserve_volume(*args, **kwargs):
+ called['fake_reserve_volume'] = True
+
+ def fake_volume_get(self, context, volume_id):
+ called['fake_volume_get'] = True
+ return {'id': volume_id}
+
+ def fake_rpc_attach_volume(self, context, **kwargs):
+ called['fake_rpc_attach_volume'] = True
+
+ def fake_rpc_reserve_block_device_name(self, context, instance, device,
+ volume_id, **kwargs):
+ called['fake_rpc_reserve_block_device_name'] = True
+ bdm = block_device_obj.BlockDeviceMapping()
+ bdm['device_name'] = '/dev/vdb'
+ return bdm
+
+ self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
+ self.stubs.Set(cinder.API, 'reserve_volume',
+ fake_reserve_volume)
+ self.stubs.Set(compute_rpcapi.ComputeAPI,
+ 'reserve_block_device_name',
+ fake_rpc_reserve_block_device_name)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
+ fake_rpc_attach_volume)
+
+ instance = self._create_fake_instance()
+ self.compute_api.attach_volume(self.context, instance, 1, device=None)
+ self.assertTrue(called.get('fake_check_attach'))
+ self.assertTrue(called.get('fake_reserve_volume'))
+ self.assertTrue(called.get('fake_volume_get'))
+ self.assertTrue(called.get('fake_rpc_reserve_block_device_name'))
+ self.assertTrue(called.get('fake_rpc_attach_volume'))
+
+ def test_detach_volume(self):
+ # Ensure volume can be detached from instance
+ called = {}
+ instance = self._create_fake_instance()
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': instance['uuid']}
+
+ def fake_check_detach(*args, **kwargs):
+ called['fake_check_detach'] = True
+
+ def fake_begin_detaching(*args, **kwargs):
+ called['fake_begin_detaching'] = True
+
+ def fake_rpc_detach_volume(self, context, **kwargs):
+ called['fake_rpc_detach_volume'] = True
+
+ self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
+ self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
+ fake_rpc_detach_volume)
+
+ self.compute_api.detach_volume(self.context,
+ instance, volume)
+ self.assertTrue(called.get('fake_check_detach'))
+ self.assertTrue(called.get('fake_begin_detaching'))
+ self.assertTrue(called.get('fake_rpc_detach_volume'))
+
+ def test_detach_invalid_volume(self):
+ # Ensure exception is raised while detaching an un-attached volume
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'detached'}
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_unattached_volume(self):
+ # Ensure exception is raised when volume's idea of attached
+ # instance doesn't match.
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': 'uuid2'}
+
+ self.assertRaises(exception.VolumeUnattached,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_suspended_instance_fails(self):
+ instance = {'uuid': 'uuid1',
+ 'locked': False,
+ 'launched_at': timeutils.utcnow(),
+ 'vm_state': vm_states.SUSPENDED,
+ 'task_state': None}
+ volume = {'id': 1, 'attach_status': 'in-use',
+ 'instance_uuid': 'uuid2'}
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume, self.context,
+ instance, volume)
+
+ def test_detach_volume_libvirt_is_down(self):
+ # Ensure rollback during detach if libvirt goes down
+
+ called = {}
+ instance = self._create_fake_instance()
+
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vdb', 'volume_id': 1,
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'connection_info': '{"test": "test"}'})
+
+ def fake_libvirt_driver_instance_exists(_instance):
+ called['fake_libvirt_driver_instance_exists'] = True
+ return False
+
+ def fake_libvirt_driver_detach_volume_fails(*args, **kwargs):
+ called['fake_libvirt_driver_detach_volume_fails'] = True
+ raise AttributeError()
+
+ def fake_roll_detaching(*args, **kwargs):
+ called['fake_roll_detaching'] = True
+
+ self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
+ self.stubs.Set(self.compute.driver, "instance_exists",
+ fake_libvirt_driver_instance_exists)
+ self.stubs.Set(self.compute.driver, "detach_volume",
+ fake_libvirt_driver_detach_volume_fails)
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 1).AndReturn(objects.BlockDeviceMapping(
+ **fake_bdm))
+ self.mox.ReplayAll()
+
+ self.assertRaises(AttributeError, self.compute.detach_volume,
+ self.context, 1, instance)
+ self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
+ self.assertTrue(called.get('fake_roll_detaching'))
+
+ def test_detach_volume_not_found(self):
+ # Ensure that a volume can be detached even when it is removed
+ # from an instance but remaining in bdm. See bug #1367964.
+
+ instance = self._create_fake_instance()
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'volume_id': 'fake-id', 'device_name': '/dev/vdb',
+ 'connection_info': '{"test": "test"}'})
+ bdm = objects.BlockDeviceMapping(**fake_bdm)
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'detach_volume',
+ side_effect=exception.DiskNotFound('sdb')),
+ mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_id', return_value=bdm),
+ mock.patch.object(cinder.API, 'terminate_connection'),
+ mock.patch.object(bdm, 'destroy'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute.volume_api, 'detach'),
+ mock.patch.object(self.compute.driver, 'get_volume_connector',
+ return_value='fake-connector')
+ ) as (mock_detach_volume, mock_volume, mock_terminate_connection,
+ mock_destroy, mock_notify, mock_detach, mock_volume_connector):
+ self.compute.detach_volume(self.context, 'fake-id', instance)
+ self.assertTrue(mock_detach_volume.called)
+ mock_terminate_connection.assert_called_once_with(self.context,
+ 'fake-id',
+ 'fake-connector')
+ mock_destroy.assert_called_once_with()
+ mock_detach.assert_called_once_with(mock.ANY, 'fake-id')
+
+ def test_terminate_with_volumes(self):
+ # Make sure that volumes get detached during instance termination.
+ admin = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+
+ volume_id = 'fake'
+ values = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+ db.block_device_mapping_create(admin, values)
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume_id_param):
+ result["detached"] = volume_id_param == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume_id, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # Kill the instance and check that it was detached
+ bdms = db.block_device_mapping_get_all_by_instance(admin,
+ instance['uuid'])
+ self.compute.terminate_instance(admin, instance, bdms, [])
+
+ self.assertTrue(result["detached"])
+
+ def test_terminate_deletes_all_bdms(self):
+ admin = context.get_admin_context()
+ instance = self._create_fake_instance_obj()
+
+ img_bdm = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'delete_on_termination': False,
+ 'boot_index': 0,
+ 'image_id': 'fake_image'}
+ vol_bdm = {'instance_uuid': instance['uuid'],
+ 'device_name': '/dev/vdc',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'volume_id': 'fake_vol'}
+ bdms = []
+ for bdm in img_bdm, vol_bdm:
+ bdm_obj = objects.BlockDeviceMapping(**bdm)
+ bdm_obj.create(admin)
+ bdms.append(bdm_obj)
+
+ self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
+ self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
+ self.compute.run_instance(self.context, instance, {}, {}, None, None,
+ None, True, None, False)
+
+ self.compute.terminate_instance(self.context, instance, bdms, [])
+
+ bdms = db.block_device_mapping_get_all_by_instance(admin,
+ instance['uuid'])
+ self.assertEqual(len(bdms), 0)
+
+ def test_inject_network_info(self):
+ instance = self._create_fake_instance_obj(params={'host': CONF.host})
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'],
+ want_objects=True)
+ self.compute_api.inject_network_info(self.context, instance)
+ self.stubs.Set(self.compute_api.network_api,
+ 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_reset_network(self):
+ instance = self._create_fake_instance_obj()
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'],
+ want_objects=True)
+ self.compute_api.reset_network(self.context, instance)
+
+ def test_lock(self):
+ instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.lock(self.context, instance)
+
+ def test_unlock(self):
+ instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.unlock(self.context, instance)
+
+ def test_get_lock(self):
+ instance = self._create_fake_instance()
+ self.assertFalse(self.compute_api.get_lock(self.context, instance))
+ db.instance_update(self.context, instance['uuid'], {'locked': True})
+ self.assertTrue(self.compute_api.get_lock(self.context, instance))
+
+ def test_add_remove_security_group(self):
+ instance = self._create_fake_instance_obj()
+
+ self.compute.run_instance(self.context,
+ instance, {}, {}, None, None,
+ None, True, None, False)
+ instance = self.compute_api.get(self.context, instance['uuid'])
+ security_group_name = self._create_group()['name']
+
+ self.security_group_api.add_to_instance(self.context,
+ instance,
+ security_group_name)
+ self.security_group_api.remove_from_instance(self.context,
+ instance,
+ security_group_name)
+
+ def test_get_diagnostics(self):
+ instance = self._create_fake_instance_obj()
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
+ rpcapi.get_diagnostics(self.context, instance=instance)
+ self.mox.ReplayAll()
+
+ self.compute_api.get_diagnostics(self.context, instance)
+
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_get_instance_diagnostics(self):
+ instance = self._create_fake_instance_obj()
+
+ rpcapi = compute_rpcapi.ComputeAPI
+ self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics')
+ rpcapi.get_instance_diagnostics(self.context, instance=instance)
+ self.mox.ReplayAll()
+
+ self.compute_api.get_instance_diagnostics(self.context, instance)
+
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, instance)
+
+ def test_secgroup_refresh(self):
+ instance = self._create_fake_instance()
+
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1])
+
+ def test_secgroup_refresh_once(self):
+ instance = self._create_fake_instance()
+
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1, 2])
+
+ def test_secgroup_refresh_none(self):
+ def rule_get(*args, **kwargs):
+ mock_rule = db_fakes.FakeModel({'parent_group_id': 1})
+ return [mock_rule]
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': []})
+ return mock_group
+
+ self.stubs.Set(
+ self.compute_api.db,
+ 'security_group_rule_get_by_security_group_grantee',
+ rule_get)
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_members_refresh(self.context, [1])
+
+ def test_secrule_refresh(self):
+ instance = self._create_fake_instance()
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1])
+
+ def test_secrule_refresh_once(self):
+ instance = self._create_fake_instance()
+
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': [instance]})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ rpcapi.refresh_instance_security_rules(self.context,
+ instance['host'],
+ instance)
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
+
+ def test_secrule_refresh_none(self):
+ def group_get(*args, **kwargs):
+ mock_group = db_fakes.FakeModel({'instances': []})
+ return mock_group
+
+ self.stubs.Set(self.compute_api.db, 'security_group_get', group_get)
+
+ rpcapi = self.security_group_api.security_group_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'refresh_instance_security_rules')
+ self.mox.ReplayAll()
+
+ self.security_group_api.trigger_rules_refresh(self.context, [1, 2])
+
+ def test_live_migrate(self):
+ instance, instance_uuid = self._run_instance()
+
+ rpcapi = self.compute_api.compute_task_api
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
+ self.compute_api._record_action_start(self.context, instance,
+ 'live-migration')
+ rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
+ block_migration=True,
+ disk_over_commit=True)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.live_migrate(self.context, instance,
+ block_migration=True,
+ disk_over_commit=True,
+ host_name='fake_dest_host')
+
+ instance.refresh()
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_evacuate(self):
+ instance = self._create_fake_instance_obj(services=True)
+ self.assertIsNone(instance.task_state)
+
+ def fake_service_is_up(*args, **kwargs):
+ return False
+
+ def fake_rebuild_instance(*args, **kwargs):
+ instance.host = kwargs['host']
+ instance.save()
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
+ fake_rebuild_instance)
+ self.compute_api.evacuate(self.context.elevated(),
+ instance,
+ host='fake_dest_host',
+ on_shared_storage=True,
+ admin_password=None)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.REBUILDING)
+ self.assertEqual(instance.host, 'fake_dest_host')
+ instance.destroy()
+
+ def test_fail_evacuate_from_non_existing_host(self):
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['launched_at'] = timeutils.utcnow()
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ inst['node'] = NODENAME
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ instance = self._create_fake_instance_obj(inst)
+
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_fail_evacuate_from_running_host(self):
+ instance = self._create_fake_instance_obj(services=True)
+ self.assertIsNone(instance.task_state)
+
+ def fake_service_is_up(*args, **kwargs):
+ return True
+
+ self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
+ fake_service_is_up)
+
+ self.assertRaises(exception.ComputeServiceInUse,
+ self.compute_api.evacuate, self.context.elevated(), instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_fail_evacuate_instance_in_wrong_state(self):
+ states = [vm_states.BUILDING, vm_states.PAUSED, vm_states.SUSPENDED,
+ vm_states.RESCUED, vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.DELETED]
+ instances = [self._create_fake_instance_obj({'vm_state': state})
+ for state in states]
+
+ for instance in instances:
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.evacuate, self.context, instance,
+ host='fake_dest_host', on_shared_storage=True,
+ admin_password=None)
+ instance.destroy()
+
+ def test_get_migrations(self):
+ migration = test_migration.fake_db_migration(uuid="1234")
+ filters = {'host': 'host1'}
+ self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
+ db.migration_get_all_by_filters(self.context,
+ filters).AndReturn([migration])
+ self.mox.ReplayAll()
+
+ migrations = self.compute_api.get_migrations(self.context,
+ filters)
+ self.assertEqual(1, len(migrations))
+ self.assertEqual(migrations[0].id, migration['id'])
+
+
+def fake_rpc_method(context, method, **kwargs):
+ pass
+
+
+def _create_service_entries(context, values=[['avail_zone1', ['fake_host1',
+ 'fake_host2']],
+ ['avail_zone2', ['fake_host3']]]):
+ for (avail_zone, hosts) in values:
+ for host in hosts:
+ db.service_create(context,
+ {'host': host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+class ComputeAPIAggrTestCase(BaseTestCase):
+ """This is for unit coverage of aggregate-related methods
+ defined in nova.compute.api.
+ """
+
+ def setUp(self):
+ super(ComputeAPIAggrTestCase, self).setUp()
+ self.api = compute_api.AggregateAPI()
+ self.context = context.get_admin_context()
+ self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
+ self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
+
+ def test_aggregate_no_zone(self):
+ # Ensure we can create an aggregate without an availability zone
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ None)
+ self.api.delete_aggregate(self.context, aggr['id'])
+ db.aggregate_get(self.context.elevated(read_deleted='yes'),
+ aggr['id'])
+ self.assertRaises(exception.AggregateNotFound,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_check_az_for_aggregate(self):
+ # Ensure all conflict hosts can be returned
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host1 = values[0][1][0]
+ fake_host2 = values[0][1][1]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host1)
+ aggr1 = self._init_aggregate_with_host(aggr1, None, None, fake_host2)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host2)
+ aggr2 = self._init_aggregate_with_host(aggr2, None, None, fake_host1)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+
+ def test_update_aggregate(self):
+ # Ensure metadata can be updated.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.update_aggregate(self.context, aggr['id'],
+ {'name': 'new_fake_aggregate'})
+ self.assertIsNone(availability_zones._get_cache().get('cache'))
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.end')
+
+ def test_update_aggregate_no_az(self):
+ # Ensure metadata without availability zone can be
+ # updated,even the aggregate contains hosts belong
+ # to another availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'name': 'new_fake_aggregate'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr2 = self.api.update_aggregate(self.context, aggr2['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updateprop.end')
+
+ def test_update_aggregate_az_change(self):
+ # Ensure availability zone can be updated,
+ # when the aggregate is the only one with
+ # availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr1 = self.api.update_aggregate(self.context, aggr1['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_fails(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+ fake_host2 = values[0][1][1]
+ aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
+ None, fake_host2)
+ metadata = {'availability_zone': fake_zone}
+ aggr3 = self.api.update_aggregate(self.context, aggr3['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
+ msg = fake_notifier.NOTIFICATIONS[13]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[14]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_fails_with_nova_az(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ CONF.default_availability_zone,
+ fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate,
+ self.context, aggr2['id'], metadata)
+
+ def test_update_aggregate_metadata(self):
+ # Ensure metadata can be updated.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1',
+ 'foo_key2': 'foo_value2',
+ 'availability_zone': 'fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ availability_zones._get_cache().add('fake_key', 'fake_value')
+ aggr = self.api.update_aggregate_metadata(self.context, aggr['id'],
+ metadata)
+ self.assertIsNone(availability_zones._get_cache().get('fake_key'))
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+ fake_notifier.NOTIFICATIONS = []
+ metadata['foo_key1'] = None
+ expected_payload_meta_data = {'foo_key1': None,
+ 'foo_key2': 'foo_value2',
+ 'availability_zone': 'fake_zone'}
+ expected = self.api.update_aggregate_metadata(self.context,
+ aggr['id'], metadata)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
+ self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
+ self.assertEqual(expected_payload_meta_data, msg.payload['meta_data'])
+ self.assertThat(expected['metadata'],
+ matchers.DictMatches({'availability_zone': 'fake_zone',
+ 'foo_key2': 'foo_value2'}))
+
+ def test_update_aggregate_metadata_no_az(self):
+ # Ensure metadata without availability zone can be
+ # updated,even the aggregate contains hosts belong
+ # to another availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'foo_key2': 'foo_value3'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr2 = self.api.update_aggregate_metadata(self.context, aggr2['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+ self.assertThat(aggr2['metadata'],
+ matchers.DictMatches({'foo_key2': 'foo_value3'}))
+
+ def test_update_aggregate_metadata_az_change(self):
+ # Ensure availability zone can be updated,
+ # when the aggregate is the only one with
+ # availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr1 = self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ fake_notifier.NOTIFICATIONS = []
+ aggr1 = self.api.update_aggregate_metadata(self.context,
+ aggr1['id'], metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_update_aggregate_az_do_not_replace_existing_metadata(self):
+ # Ensure that that update of the aggregate availability zone
+ # does not replace the aggregate existing metadata
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1'}
+ aggr = self.api.update_aggregate_metadata(self.context,
+ aggr['id'],
+ metadata)
+ metadata = {'availability_zone': 'new_fake_zone'}
+ aggr = self.api.update_aggregate(self.context,
+ aggr['id'],
+ metadata)
+ self.assertThat(aggr['metadata'], matchers.DictMatches(
+ {'availability_zone': 'new_fake_zone', 'foo_key1': 'foo_value1'}))
+
+ def test_update_aggregate_metadata_az_fails(self):
+ # Ensure aggregate's availability zone can't be updated,
+ # when aggregate has hosts in other availability zone
+ fake_notifier.NOTIFICATIONS = []
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ fake_zone, fake_host)
+ aggr2 = self._init_aggregate_with_host(None, 'fake_aggregate2', None,
+ fake_host)
+ metadata = {'availability_zone': 'another_zone'}
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.update_aggregate_metadata,
+ self.context, aggr2['id'], metadata)
+ aggr3 = self._init_aggregate_with_host(None, 'fake_aggregate3',
+ None, fake_host)
+ metadata = {'availability_zone': fake_zone}
+ aggr3 = self.api.update_aggregate_metadata(self.context,
+ aggr3['id'],
+ metadata)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 15)
+ msg = fake_notifier.NOTIFICATIONS[13]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.start')
+ msg = fake_notifier.NOTIFICATIONS[14]
+ self.assertEqual(msg.event_type,
+ 'aggregate.updatemetadata.end')
+
+ def test_delete_aggregate(self):
+ # Ensure we can delete an aggregate.
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.create.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.create.end')
+ fake_notifier.NOTIFICATIONS = []
+ self.api.delete_aggregate(self.context, aggr['id'])
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.delete.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.delete.end')
+ db.aggregate_get(self.context.elevated(read_deleted='yes'),
+ aggr['id'])
+ self.assertRaises(exception.AggregateNotFound,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_delete_non_empty_aggregate(self):
+ # Ensure InvalidAggregateAction is raised when non empty aggregate.
+ _create_service_entries(self.context,
+ [['fake_availability_zone', ['fake_host']]])
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_availability_zone')
+ self.api.add_host_to_aggregate(self.context, aggr['id'], 'fake_host')
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.delete_aggregate, self.context, aggr['id'])
+
+ def test_add_host_to_aggregate(self):
+ # Ensure we can add a host to an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+
+ def fake_add_aggregate_host(*args, **kwargs):
+ hosts = kwargs["aggregate"]["hosts"]
+ self.assertIn(fake_host, hosts)
+
+ self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
+ fake_add_aggregate_host)
+
+ self.mox.StubOutWithMock(availability_zones,
+ 'update_host_availability_zone_cache')
+
+ def _stub_update_host_avail_zone_cache(host, az=None):
+ if az is not None:
+ availability_zones.update_host_availability_zone_cache(
+ self.context, host, az)
+ else:
+ availability_zones.update_host_availability_zone_cache(
+ self.context, host)
+
+ for (avail_zone, hosts) in values:
+ for host in hosts:
+ _stub_update_host_avail_zone_cache(
+ host, CONF.default_availability_zone)
+ _stub_update_host_avail_zone_cache(fake_host)
+ self.mox.ReplayAll()
+
+ fake_notifier.NOTIFICATIONS = []
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.addhost.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.addhost.end')
+ self.assertEqual(len(aggr['hosts']), 1)
+
+ def test_add_host_to_aggr_with_no_az(self):
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
+ fake_host)
+ aggr_no_az = self.api.create_aggregate(self.context, 'fake_aggregate2',
+ None)
+ aggr_no_az = self.api.add_host_to_aggregate(self.context,
+ aggr_no_az['id'],
+ fake_host)
+ self.assertIn(fake_host, aggr['hosts'])
+ self.assertIn(fake_host, aggr_no_az['hosts'])
+
+ def test_add_host_no_az_metadata(self):
+ # NOTE(mtreinish) based on how create works this is not how the
+ # the metadata is supposed to end up in the database but it has
+ # been seen. See lp bug #1209007. This test just confirms that
+ # the host is still added to the aggregate if there is no
+ # availability zone metadata.
+ def fake_aggregate_metadata_get_by_metadata_key(*args, **kwargs):
+ return {'meta_key': 'fake_value'}
+ self.stubs.Set(self.compute.db,
+ 'aggregate_metadata_get_by_metadata_key',
+ fake_aggregate_metadata_get_by_metadata_key)
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context, aggr['id'],
+ fake_host)
+ self.assertIn(fake_host, aggr['hosts'])
+
+ def test_add_host_to_multi_az(self):
+ # Ensure we can't add a host to different availability zone
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(aggr['hosts']), 1)
+ fake_zone2 = "another_zone"
+ aggr2 = self.api.create_aggregate(self.context,
+ 'fake_aggregate2', fake_zone2)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.add_host_to_aggregate,
+ self.context, aggr2['id'], fake_host)
+
+ def test_add_host_to_multi_az_with_nova_agg(self):
+ # Ensure we can't add a host if already existing in an agg with AZ set
+ # to default
+ values = _create_service_entries(self.context)
+ fake_host = values[0][1][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ CONF.default_availability_zone)
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], fake_host)
+ self.assertEqual(len(aggr['hosts']), 1)
+ fake_zone2 = "another_zone"
+ aggr2 = self.api.create_aggregate(self.context,
+ 'fake_aggregate2', fake_zone2)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.api.add_host_to_aggregate,
+ self.context, aggr2['id'], fake_host)
+
+ def test_add_host_to_aggregate_multiple(self):
+ # Ensure we can add multiple hosts to an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ for host in values[0][1]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], host)
+ self.assertEqual(len(aggr['hosts']), len(values[0][1]))
+
+ def test_add_host_to_aggregate_raise_not_found(self):
+ # Ensure ComputeHostNotFound is raised when adding invalid host.
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ fake_notifier.NOTIFICATIONS = []
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.api.add_host_to_aggregate,
+ self.context, aggr['id'], 'invalid_host')
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
+ 'compute.fake-mini')
+
+ def test_remove_host_from_aggregate_active(self):
+ # Ensure we can remove a host from an aggregate.
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ for host in values[0][1]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], host)
+ host_to_remove = values[0][1][0]
+
+ def fake_remove_aggregate_host(*args, **kwargs):
+ hosts = kwargs["aggregate"]["hosts"]
+ self.assertNotIn(host_to_remove, hosts)
+
+ self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
+ fake_remove_aggregate_host)
+
+ self.mox.StubOutWithMock(availability_zones,
+ 'update_host_availability_zone_cache')
+ availability_zones.update_host_availability_zone_cache(self.context,
+ host_to_remove)
+ self.mox.ReplayAll()
+
+ fake_notifier.NOTIFICATIONS = []
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ host_to_remove)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.event_type,
+ 'aggregate.removehost.start')
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual(msg.event_type,
+ 'aggregate.removehost.end')
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+
+ def test_remove_host_from_aggregate_raise_not_found(self):
+ # Ensure ComputeHostNotFound is raised when removing invalid host.
+ _create_service_entries(self.context, [['fake_zone', ['fake_host']]])
+ aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
+ 'fake_zone')
+ self.assertRaises(exception.ComputeHostNotFound,
+ self.api.remove_host_from_aggregate,
+ self.context, aggr['id'], 'invalid_host')
+
+ def test_aggregate_list(self):
+ aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ 'fake_zone')
+ metadata = {'foo_key1': 'foo_value1',
+ 'foo_key2': 'foo_value2'}
+ meta_aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate2',
+ 'fake_zone2')
+ self.api.update_aggregate_metadata(self.context, meta_aggregate['id'],
+ metadata)
+ aggregate_list = self.api.get_aggregate_list(self.context)
+ self.assertIn(aggregate['id'],
+ map(lambda x: x['id'], aggregate_list))
+ self.assertIn(meta_aggregate['id'],
+ map(lambda x: x['id'], aggregate_list))
+ self.assertIn('fake_aggregate',
+ map(lambda x: x['name'], aggregate_list))
+ self.assertIn('fake_aggregate2',
+ map(lambda x: x['name'], aggregate_list))
+ self.assertIn('fake_zone',
+ map(lambda x: x['availability_zone'], aggregate_list))
+ self.assertIn('fake_zone2',
+ map(lambda x: x['availability_zone'], aggregate_list))
+ test_meta_aggregate = aggregate_list[1]
+ self.assertIn('foo_key1', test_meta_aggregate.get('metadata'))
+ self.assertIn('foo_key2', test_meta_aggregate.get('metadata'))
+ self.assertEqual('foo_value1',
+ test_meta_aggregate.get('metadata')['foo_key1'])
+ self.assertEqual('foo_value2',
+ test_meta_aggregate.get('metadata')['foo_key2'])
+
+ def test_aggregate_list_with_hosts(self):
+ values = _create_service_entries(self.context)
+ fake_zone = values[0][0]
+ host_aggregate = self.api.create_aggregate(self.context,
+ 'fake_aggregate',
+ fake_zone)
+ self.api.add_host_to_aggregate(self.context, host_aggregate['id'],
+ values[0][1][0])
+ aggregate_list = self.api.get_aggregate_list(self.context)
+ aggregate = aggregate_list[0]
+ self.assertIn(values[0][1][0], aggregate.get('hosts'))
+
+
+class ComputeAggrTestCase(BaseTestCase):
+ """This is for unit coverage of aggregate-related methods
+ defined in nova.compute.manager.
+ """
+
+ def setUp(self):
+ super(ComputeAggrTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ values = {'name': 'test_aggr'}
+ az = {'availability_zone': 'test_zone'}
+ self.aggr = db.aggregate_create(self.context, values, metadata=az)
+
+ def test_add_aggregate_host(self):
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ fake_driver_add_to_aggregate.called = True
+ return {"foo": "bar"}
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+
+ self.compute.add_aggregate_host(self.context, host="host",
+ aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
+ self.assertTrue(fake_driver_add_to_aggregate.called)
+
+ def test_remove_aggregate_host(self):
+ def fake_driver_remove_from_aggregate(context, aggregate, host,
+ **_ignore):
+ fake_driver_remove_from_aggregate.called = True
+ self.assertEqual("host", host, "host")
+ return {"foo": "bar"}
+ self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ fake_driver_remove_from_aggregate)
+
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="host",
+ slave_info=None)
+ self.assertTrue(fake_driver_remove_from_aggregate.called)
+
+ def test_add_aggregate_host_passes_slave_info_to_driver(self):
+ def driver_add_to_aggregate(context, aggregate, host, **kwargs):
+ self.assertEqual(self.context, context)
+ self.assertEqual(aggregate['id'], self.aggr['id'])
+ self.assertEqual(host, "the_host")
+ self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ driver_add_to_aggregate)
+
+ self.compute.add_aggregate_host(self.context, host="the_host",
+ slave_info="SLAVE_INFO",
+ aggregate=jsonutils.to_primitive(self.aggr))
+
+ def test_remove_from_aggregate_passes_slave_info_to_driver(self):
+ def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
+ self.assertEqual(self.context, context)
+ self.assertEqual(aggregate['id'], self.aggr['id'])
+ self.assertEqual(host, "the_host")
+ self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+
+ self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ driver_remove_from_aggregate)
+
+ self.compute.remove_aggregate_host(self.context,
+ aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
+ slave_info="SLAVE_INFO")
+
+
+class ComputePolicyTestCase(BaseTestCase):
+
+ def setUp(self):
+ super(ComputePolicyTestCase, self).setUp()
+
+ self.compute_api = compute.API()
+
+ def test_actions_are_prefixed(self):
+ self.mox.StubOutWithMock(policy, 'enforce')
+ nova.policy.enforce(self.context, 'compute:reboot', {})
+ self.mox.ReplayAll()
+ compute_api.check_policy(self.context, 'reboot', {})
+
+ def test_wrapped_method(self):
+ instance = self._create_fake_instance_obj(params={'host': None,
+ 'cell_name': 'foo'})
+
+ # force delete to fail
+ rules = {"compute:delete": [["false:false"]]}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.delete, self.context, instance)
+
+ # reset rules to allow deletion
+ rules = {"compute:delete": []}
+ self.policy.set_rules(rules)
+
+ self.compute_api.delete(self.context, instance)
+
+ def test_create_fail(self):
+ rules = {"compute:create": [["false:false"]]}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1')
+
+ def test_create_attach_volume_fail(self):
+ rules = {
+ "compute:create": [],
+ "compute:create:attach_network": [["false:false"]],
+ "compute:create:attach_volume": [],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1',
+ requested_networks='blah',
+ block_device_mapping='blah')
+
+ def test_create_attach_network_fail(self):
+ rules = {
+ "compute:create": [],
+ "compute:create:attach_network": [],
+ "compute:create:attach_volume": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, '1', '1',
+ requested_networks='blah',
+ block_device_mapping='blah')
+
+ def test_get_fail(self):
+ instance = self._create_fake_instance()
+
+ rules = {
+ "compute:get": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get, self.context, instance['uuid'])
+
+ def test_get_all_fail(self):
+ rules = {
+ "compute:get_all": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get_all, self.context)
+
+ def test_get_instance_faults(self):
+ instance1 = self._create_fake_instance()
+ instance2 = self._create_fake_instance()
+ instances = [instance1, instance2]
+
+ rules = {
+ "compute:get_instance_faults": [["false:false"]],
+ }
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.get_instance_faults,
+ context.get_admin_context(), instances)
+
+ def test_force_host_fail(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [["role:fake"]],
+ "network:validate_networks": []}
+ self.policy.set_rules(rules)
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.compute_api.create, self.context, None, '1',
+ availability_zone='1:1')
+
+ def test_force_host_pass(self):
+ rules = {"compute:create": [],
+ "compute:create:forced_host": [],
+ "network:validate_networks": []}
+ self.policy.set_rules(rules)
+
+ self.compute_api.create(self.context, None, '1',
+ availability_zone='1:1')
+
+
+class DisabledInstanceTypesTestCase(BaseTestCase):
+ """Some instance-types are marked 'disabled' which means that they will not
+ show up in customer-facing listings. We do, however, want those
+ instance-types to be available for emergency migrations and for rebuilding
+ of existing instances.
+
+ One legitimate use of the 'disabled' field would be when phasing out a
+ particular instance-type. We still want customers to be able to use an
+ instance that of the old type, and we want Ops to be able perform
+ migrations against it, but we *don't* want customers building new
+ instances with the phased-out instance-type.
+ """
+ def setUp(self):
+ super(DisabledInstanceTypesTestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.inst_type = flavors.get_default_flavor()
+
+ def test_can_build_instance_from_visible_instance_type(self):
+ self.inst_type['disabled'] = False
+ # Assert that exception.FlavorNotFound is not raised
+ self.compute_api.create(self.context, self.inst_type,
+ image_href='some-fake-image')
+
+ def test_cannot_build_instance_from_disabled_instance_type(self):
+ self.inst_type['disabled'] = True
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.create, self.context, self.inst_type, None)
+
+ def test_can_resize_to_visible_instance_type(self):
+ instance = self._create_fake_instance_obj()
+ orig_get_flavor_by_flavor_id =\
+ flavors.get_flavor_by_flavor_id
+
+ def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
+ read_deleted="yes"):
+ instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ ctxt,
+ read_deleted)
+ instance_type['disabled'] = False
+ return instance_type
+
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+
+ self._stub_migrate_server()
+ self.compute_api.resize(self.context, instance, '4')
+
+ def test_cannot_resize_to_disabled_instance_type(self):
+ instance = self._create_fake_instance_obj()
+ orig_get_flavor_by_flavor_id = \
+ flavors.get_flavor_by_flavor_id
+
+ def fake_get_flavor_by_flavor_id(flavor_id, ctxt=None,
+ read_deleted="yes"):
+ instance_type = orig_get_flavor_by_flavor_id(flavor_id,
+ ctxt,
+ read_deleted)
+ instance_type['disabled'] = True
+ return instance_type
+
+ self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context, instance, '4')
+
+
+class ComputeReschedulingTestCase(BaseTestCase):
+ """Tests re-scheduling logic for new build requests."""
+
+ def setUp(self):
+ super(ComputeReschedulingTestCase, self).setUp()
+
+ self.expected_task_state = task_states.SCHEDULING
+
+ def fake_update(*args, **kwargs):
+ self.updated_task_state = kwargs.get('task_state')
+ self.stubs.Set(self.compute, '_instance_update', fake_update)
+
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
+ if not filter_properties:
+ filter_properties = {}
+
+ instance = self._create_fake_instance_obj()
+
+ admin_password = None
+ injected_files = None
+ requested_networks = None
+ is_first_time = False
+
+ scheduler_method = self.compute.scheduler_rpcapi.run_instance
+ method_args = (request_spec, admin_password, injected_files,
+ requested_networks, is_first_time, filter_properties)
+ return self.compute._reschedule(self.context, request_spec,
+ filter_properties, instance, scheduler_method,
+ method_args, self.expected_task_state, exc_info=exc_info)
+
+ def test_reschedule_no_filter_properties(self):
+ # no filter_properties will disable re-scheduling.
+ self.assertFalse(self._reschedule())
+
+ def test_reschedule_no_retry_info(self):
+ # no retry info will also disable re-scheduling.
+ filter_properties = {}
+ self.assertFalse(self._reschedule(filter_properties=filter_properties))
+
+ def test_reschedule_no_request_spec(self):
+ # no request spec will also disable re-scheduling.
+ retry = dict(num_attempts=1)
+ filter_properties = dict(retry=retry)
+ self.assertFalse(self._reschedule(filter_properties=filter_properties))
+
+ def test_reschedule_success(self):
+ retry = dict(num_attempts=1)
+ filter_properties = dict(retry=retry)
+ request_spec = {'instance_uuids': ['foo', 'bar']}
+ try:
+ raise test.TestingException("just need an exception")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ exc_str = traceback.format_exception_only(exc_info[0],
+ exc_info[1])
+
+ self.assertTrue(self._reschedule(filter_properties=filter_properties,
+ request_spec=request_spec, exc_info=exc_info))
+ self.assertEqual(1, len(request_spec['instance_uuids']))
+ self.assertEqual(self.updated_task_state, self.expected_task_state)
+ self.assertEqual(exc_str, filter_properties['retry']['exc'])
+
+
+class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
+ """Test re-scheduling logic for prep_resize requests."""
+
+ def setUp(self):
+ super(ComputeReschedulingResizeTestCase, self).setUp()
+ self.expected_task_state = task_states.RESIZE_PREP
+
+ def _reschedule(self, request_spec=None, filter_properties=None,
+ exc_info=None):
+ if not filter_properties:
+ filter_properties = {}
+
+ instance_uuid = str(uuid.uuid4())
+ instance = self._create_fake_instance_obj(
+ params={'uuid': instance_uuid})
+ instance_type = {}
+ reservations = None
+
+ scheduler_method = self.compute.compute_task_api.resize_instance
+ scheduler_hint = dict(filter_properties=filter_properties)
+ method_args = (instance, None, scheduler_hint, instance_type,
+ reservations)
+
+ return self.compute._reschedule(self.context, request_spec,
+ filter_properties, instance, scheduler_method,
+ method_args, self.expected_task_state, exc_info=exc_info)
+
+
+class InnerTestingException(Exception):
+ pass
+
+
+class ComputeRescheduleOrErrorTestCase(BaseTestCase):
+ """Test logic and exception handling around rescheduling or re-raising
+ original exceptions when builds fail.
+ """
+
+ def setUp(self):
+ super(ComputeRescheduleOrErrorTestCase, self).setUp()
+ self.instance = self._create_fake_instance_obj()
+
+ def test_reschedule_or_error_called(self):
+ """Basic sanity check to make sure _reschedule_or_error is called
+ when a build fails.
+ """
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ bdms = block_device_obj.block_device_make_list(self.context, [])
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), self.instance.uuid).AndReturn(bdms)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ [], mox.IgnoreArg(), [], None, set_access_ip=False).AndRaise(
+ test.TestingException("BuildError"))
+ self.compute._reschedule_or_error(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(), None, None, None,
+ False, None, {}, bdms, False).AndReturn(True)
+
+ self.mox.ReplayAll()
+ self.compute._run_instance(self.context, None, {}, None, None, None,
+ False, None, self.instance, False)
+
+ def test_shutdown_instance_fail(self):
+ """Test shutdown instance failing before re-scheduling logic can even
+ run.
+ """
+ instance_uuid = self.instance['uuid']
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(InnerTestingException("Error"))
+ self.compute._log_original_error(exc_info, instance_uuid)
+
+ self.mox.ReplayAll()
+
+ # should raise the deallocation exception, not the original build
+ # error:
+ self.assertRaises(InnerTestingException,
+ self.compute._reschedule_or_error, self.context,
+ self.instance, exc_info, None, None, None, False, None, {})
+
+ def test_shutdown_instance_fail_instance_info_cache_not_found(self):
+ # Covers the case that _shutdown_instance fails with an
+ # InstanceInfoCacheNotFound exception when getting instance network
+ # information prior to calling driver.destroy.
+ elevated_context = self.context.elevated()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=self.instance['uuid'])
+ with contextlib.nested(
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ side_effect=error),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info'),
+ mock.patch.object(self.compute.driver, 'destroy'),
+ mock.patch.object(self.compute, '_try_deallocate_network')
+ ) as (
+ elevated_mock,
+ _get_instance_nw_info_mock,
+ _get_instance_block_device_info_mock,
+ destroy_mock,
+ _try_deallocate_network_mock
+ ):
+ inst_obj = self.instance
+ self.compute._shutdown_instance(self.context, inst_obj,
+ bdms=[], notify=False)
+ # By asserting that _try_deallocate_network_mock was called
+ # exactly once, we know that _get_instance_nw_info raising
+ # InstanceInfoCacheNotFound did not make _shutdown_instance error
+ # out and driver.destroy was still called.
+ _try_deallocate_network_mock.assert_called_once_with(
+ elevated_context, inst_obj, None)
+
+ def test_reschedule_fail(self):
+ # Test handling of exception from _reschedule.
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, self.instance,
+ {}, self.compute.scheduler_rpcapi.run_instance,
+ method_args, task_states.SCHEDULING, exc_info).AndRaise(
+ InnerTestingException("Inner"))
+
+ self.mox.ReplayAll()
+
+ self.assertFalse(self.compute._reschedule_or_error(self.context,
+ self.instance, exc_info, None, None, None, False, None, {}))
+
+ def test_reschedule_false(self):
+ # Test not-rescheduling, but no nested exception.
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ try:
+ raise test.TestingException("Original")
+ except test.TestingException:
+ exc_info = sys.exc_info()
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, {}, self.instance,
+ self.compute.scheduler_rpcapi.run_instance, method_args,
+ task_states.SCHEDULING, exc_info).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ # re-scheduling is False, the original build error should be
+ # raised here:
+ self.assertFalse(self.compute._reschedule_or_error(self.context,
+ self.instance, exc_info, None, None, None, False, None, {}))
+
+ def test_reschedule_true(self):
+ # Test behavior when re-scheduling happens.
+ instance_uuid = self.instance['uuid']
+ method_args = (None, None, None, None, False, {})
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute, '_reschedule')
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, exc_info[0], exc_info=exc_info)
+ self.compute._shutdown_instance(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.compute._cleanup_volumes(mox.IgnoreArg(), instance_uuid,
+ mox.IgnoreArg())
+ self.compute._reschedule(self.context, None, {}, self.instance,
+ self.compute.scheduler_rpcapi.run_instance,
+ method_args, task_states.SCHEDULING, exc_info).AndReturn(
+ True)
+ self.compute._log_original_error(exc_info, instance_uuid)
+
+ self.mox.ReplayAll()
+
+ # re-scheduling is True, original error is logged, but nothing
+ # is raised:
+ self.compute._reschedule_or_error(self.context, self.instance,
+ exc_info, None, None, None, False, None, {})
+
+ def test_no_reschedule_on_delete_during_spawn(self):
+ # instance should not be rescheduled if instance is deleted
+ # during the build
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.UnexpectedDeletingTaskStateError(
+ expected=task_states.SPAWNING, actual=task_states.DELETING)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ # test succeeds if mocked method '_reschedule_or_error' is not
+ # called.
+ self.compute._run_instance(self.context, None, {}, None, None, None,
+ False, None, self.instance, False)
+
+ def test_no_reschedule_on_unexpected_task_state(self):
+ # instance shouldn't be rescheduled if unexpected task state arises.
+ # the exception should get reraised.
+ self.mox.StubOutWithMock(self.compute, '_spawn')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.UnexpectedTaskStateError(expected=task_states.SPAWNING,
+ actual=task_states.SCHEDULING)
+ self.compute._spawn(mox.IgnoreArg(), self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), set_access_ip=False).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.UnexpectedTaskStateError,
+ self.compute._run_instance, self.context, None, {}, None, None,
+ None, False, None, self.instance, False)
+
+ def test_no_reschedule_on_block_device_fail(self):
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+
+ exc = exception.InvalidBDM()
+
+ self.compute._prep_block_device(mox.IgnoreArg(), self.instance,
+ mox.IgnoreArg()).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidBDM, self.compute._run_instance,
+ self.context, None, {}, None, None, None, False,
+ None, self.instance, False)
+
+
+class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
+ """Test logic and exception handling around rescheduling prep resize
+ requests
+ """
+ def setUp(self):
+ super(ComputeRescheduleResizeOrReraiseTestCase, self).setUp()
+ self.instance = self._create_fake_instance()
+ self.instance_uuid = self.instance['uuid']
+ self.instance_type = flavors.get_flavor_by_name(
+ "m1.tiny")
+
+ def test_reschedule_resize_or_reraise_called(self):
+ """Verify the rescheduling logic gets called when there is an error
+ during prep_resize.
+ """
+ inst_obj = self._create_fake_instance_obj()
+
+ self.mox.StubOutWithMock(self.compute.db, 'migration_create')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
+
+ self.compute.db.migration_create(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
+
+ self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
+ inst_obj, mox.IgnoreArg(), self.instance_type,
+ mox.IgnoreArg(), {},
+ {})
+
+ self.mox.ReplayAll()
+
+ self.compute.prep_resize(self.context, image=None,
+ instance=inst_obj,
+ instance_type=self.instance_type,
+ reservations=[], request_spec={},
+ filter_properties={}, node=None)
+
+ def test_reschedule_fails_with_exception(self):
+ """Original exception should be raised if the _reschedule method
+ raises another exception
+ """
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+
+ self.compute._reschedule(
+ self.context, None, None, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP).AndRaise(
+ InnerTestingException("Inner"))
+ self.mox.ReplayAll()
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+ self.assertRaises(test.TestingException,
+ self.compute._reschedule_resize_or_reraise, self.context,
+ None, instance, exc_info, self.instance_type,
+ self.none_quotas, {}, {})
+
+ def test_reschedule_false(self):
+ """Original exception should be raised if the resize is not
+ rescheduled.
+ """
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+
+ self.compute._reschedule(
+ self.context, None, None, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP).AndReturn(False)
+ self.mox.ReplayAll()
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+ self.assertRaises(test.TestingException,
+ self.compute._reschedule_resize_or_reraise, self.context,
+ None, instance, exc_info, self.instance_type,
+ self.none_quotas, {}, {})
+
+ def test_reschedule_true(self):
+ # If rescheduled, the original resize exception should be logged.
+ instance = self._create_fake_instance_obj()
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+
+ try:
+ raise test.TestingException("Original")
+ except Exception:
+ exc_info = sys.exc_info()
+
+ self.mox.StubOutWithMock(self.compute, "_reschedule")
+ self.mox.StubOutWithMock(self.compute, "_log_original_error")
+ self.compute._reschedule(self.context, {}, {},
+ instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP, exc_info).AndReturn(True)
+
+ self.compute._log_original_error(exc_info, instance.uuid)
+ self.mox.ReplayAll()
+
+ self.compute._reschedule_resize_or_reraise(
+ self.context, None, instance, exc_info,
+ self.instance_type, self.none_quotas, {}, {})
+
+
+class ComputeInactiveImageTestCase(BaseTestCase):
+ def setUp(self):
+ super(ComputeInactiveImageTestCase, self).setUp()
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'deleted',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.compute_api = compute.API()
+
+ def test_create_instance_with_deleted_image(self):
+ # Make sure we can't start an instance with a deleted image.
+ inst_type = flavors.get_flavor_by_name('m1.tiny')
+ self.assertRaises(exception.ImageNotActive,
+ self.compute_api.create,
+ self.context, inst_type, 'fake-image-uuid')
+
+
+class EvacuateHostTestCase(BaseTestCase):
+ def setUp(self):
+ super(EvacuateHostTestCase, self).setUp()
+ self.inst = self._create_fake_instance_obj(
+ {'host': 'fake_host_2', 'node': 'fakenode2'})
+ self.inst.task_state = task_states.REBUILDING
+ self.inst.save()
+
+ def tearDown(self):
+ db.instance_destroy(self.context, self.inst.uuid)
+ super(EvacuateHostTestCase, self).tearDown()
+
+ def _rebuild(self, on_shared_storage=True):
+ def fake(cls, ctxt, instance, *args, **kwargs):
+ pass
+
+ self.stubs.Set(network_api.API, 'setup_networks_on_host', fake)
+
+ orig_image_ref = None
+ image_ref = None
+ injected_files = None
+ bdms = db.block_device_mapping_get_all_by_instance(self.context,
+ self.inst.uuid)
+ self.compute.rebuild_instance(
+ self.context, self.inst, orig_image_ref,
+ image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
+ on_shared_storage=on_shared_storage)
+
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host and node."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ self.assertTrue(context.is_admin)
+ self.assertEqual('fake-mini', host)
+ cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
+ return cn
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertEqual(NODENAME, instance['node'])
+
+ def test_rebuild_on_host_updated_target_node_not_found(self):
+ """Confirm evacuate scenario where compute_node isn't found."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+
+ def fake_get_compute_info(context, host):
+ raise exception.NotFound(_("Host %s not found") % host)
+
+ self.stubs.Set(self.compute, '_get_compute_info',
+ fake_get_compute_info)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertIsNone(instance['node'])
+
+ def test_rebuild_with_instance_in_stopped_state(self):
+ """Confirm evacuate scenario updates vm_state to stopped
+ if instance is in stopped state
+ """
+ # Initialize the VM to stopped state
+ db.instance_update(self.context, self.inst.uuid,
+ {"vm_state": vm_states.STOPPED})
+ self.inst.vm_state = vm_states.STOPPED
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # Check the vm state is reset to stopped
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario does not update host."""
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InvalidSharedStorage,
+ lambda: self._rebuild(on_shared_storage=False))
+
+ # Should remain on original host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], 'fake_host_2')
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+ values = {'instance_uuid': self.inst.uuid,
+ 'source_type': 'volume',
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': 'fake_volume_id'}
+
+ db.block_device_mapping_create(self.context, values)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': 'fake_volume_id'}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == 'fake_volume_id'
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.compute._prep_block_device(mox.IsA(self.context),
+ mox.IsA(objects.Instance),
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ self.context, self.inst.uuid):
+ db.block_device_mapping_destroy(self.context, bdms['id'])
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
+ network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.mox.ReplayAll()
+
+ self._rebuild()
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)
+ """
+ fake_image = {'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'}}
+
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.compute.driver.spawn(mox.IsA(self.context),
+ mox.IsA(objects.Instance), mox.IsA(fake_image),
+ mox.IgnoreArg(), mox.IsA('newpass'),
+ network_info=mox.IgnoreArg(),
+ block_device_info=mox.IgnoreArg())
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: False)
+ self.mox.ReplayAll()
+
+ self._rebuild(on_shared_storage=False)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raises an exception."""
+ db.instance_update(self.context, self.inst.uuid,
+ {"task_state": task_states.SCHEDULING})
+ self.compute.run_instance(self.context,
+ self.inst, {}, {},
+ [], None, None, True, None, False)
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.assertRaises(exception.InstanceExists,
+ lambda: self._rebuild(on_shared_storage=True))
+
+ def test_driver_does_not_support_recreate(self):
+ with utils.temporary_mutation(self.compute.driver.capabilities,
+ supports_recreate=False):
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ lambda x: True)
+ self.assertRaises(exception.InstanceRecreateNotSupported,
+ lambda: self._rebuild(on_shared_storage=True))
+
+
+class ComputeInjectedFilesTestCase(BaseTestCase):
+ # Test that running instances with injected_files decodes files correctly
+
+ def setUp(self):
+ super(ComputeInjectedFilesTestCase, self).setUp()
+ self.instance = self._create_fake_instance_obj()
+ self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
+
+ def _spawn(self, context, instance, image_meta, injected_files,
+ admin_password, nw_info, block_device_info, db_api=None):
+ self.assertEqual(self.expected, injected_files)
+
+ def _test(self, injected_files, decoded_files):
+ self.expected = decoded_files
+ self.compute.run_instance(self.context, self.instance, {}, {}, [],
+ injected_files, None, True, None, False)
+
+ def test_injected_none(self):
+ # test an input of None for injected_files
+ self._test(None, [])
+
+ def test_injected_empty(self):
+ # test an input of [] for injected_files
+ self._test([], [])
+
+ def test_injected_success(self):
+ # test with valid b64 encoded content.
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', base64.b64encode('seespotrun')),
+ ]
+
+ decoded_files = [
+ ('/a/b/c', 'foobarbaz'),
+ ('/d/e/f', 'seespotrun'),
+ ]
+ self._test(injected_files, decoded_files)
+
+ def test_injected_invalid(self):
+ # test with invalid b64 encoded content
+ injected_files = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', 'seespotrun'),
+ ]
+
+ self.assertRaises(exception.Base64Exception, self.compute.run_instance,
+ self.context, self.instance, {}, {}, [], injected_files, None,
+ True, None, False)
+
+ def test_reschedule(self):
+ # test that rescheduling is done with original encoded files
+ expected = [
+ ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/d/e/f', base64.b64encode('seespotrun')),
+ ]
+
+ def _roe(context, instance, exc_info, requested_networks,
+ admin_password, injected_files, is_first_time, request_spec,
+ filter_properties, bdms=None, legacy_bdm_in_spec=False):
+ self.assertEqual(expected, injected_files)
+ return True
+
+ def spawn_explode(context, instance, image_meta, injected_files,
+ admin_password, nw_info, block_device_info):
+ # force reschedule logic to execute
+ raise test.TestingException(_("spawn error"))
+
+ self.stubs.Set(self.compute.driver, 'spawn', spawn_explode)
+ self.stubs.Set(self.compute, '_reschedule_or_error', _roe)
+
+ self.compute.run_instance(self.context, self.instance, {}, {}, [],
+ expected, None, True, None, False)
+
+
+class CheckConfigDriveTestCase(test.TestCase):
+ # NOTE(sirp): `TestCase` is far too heavyweight for this test, this should
+ # probably derive from a `test.FastTestCase` that omits DB and env
+ # handling
+ def setUp(self):
+ super(CheckConfigDriveTestCase, self).setUp()
+ self.compute_api = compute.API()
+
+ def _assertCheck(self, expected, config_drive):
+ self.assertEqual(expected,
+ self.compute_api._check_config_drive(config_drive))
+
+ def _assertInvalid(self, config_drive):
+ self.assertRaises(exception.ConfigDriveInvalidValue,
+ self.compute_api._check_config_drive,
+ config_drive)
+
+ def test_config_drive_false_values(self):
+ self._assertCheck('', None)
+ self._assertCheck('', '')
+ self._assertCheck('', 'False')
+ self._assertCheck('', 'f')
+ self._assertCheck('', '0')
+
+ def test_config_drive_true_values(self):
+ self._assertCheck(True, 'True')
+ self._assertCheck(True, 't')
+ self._assertCheck(True, '1')
+
+ def test_config_drive_bogus_values_raise(self):
+ self._assertInvalid('asd')
+ self._assertInvalid(uuidutils.generate_uuid())
+
+
+class CheckRequestedImageTestCase(test.TestCase):
+ def setUp(self):
+ super(CheckRequestedImageTestCase, self).setUp()
+ self.compute_api = compute.API()
+ self.context = context.RequestContext(
+ 'fake_user_id', 'fake_project_id')
+
+ self.instance_type = flavors.get_default_flavor()
+ self.instance_type['memory_mb'] = 64
+ self.instance_type['root_gb'] = 1
+
+ def test_no_image_specified(self):
+ self.compute_api._check_requested_image(self.context, None, None,
+ self.instance_type)
+
+ def test_image_status_must_be_active(self):
+ image = dict(id='123', status='foo')
+
+ self.assertRaises(exception.ImageNotActive,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['status'] = 'active'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_min_ram_check(self):
+ image = dict(id='123', status='active', min_ram='65')
+
+ self.assertRaises(exception.FlavorMemoryTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['min_ram'] = '64'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_min_disk_check(self):
+ image = dict(id='123', status='active', min_disk='2')
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['min_disk'] = '1'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_image_too_large(self):
+ image = dict(id='123', status='active', size='1073741825')
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ self.compute_api._check_requested_image, self.context,
+ image['id'], image, self.instance_type)
+
+ image['size'] = '1073741824'
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_root_gb_zero_disables_size_check(self):
+ self.instance_type['root_gb'] = 0
+ image = dict(id='123', status='active', size='1073741825')
+
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_root_gb_zero_disables_min_disk(self):
+ self.instance_type['root_gb'] = 0
+ image = dict(id='123', status='active', min_disk='2')
+
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+
+ def test_config_drive_option(self):
+ image = {'id': 1, 'status': 'active'}
+ image['properties'] = {'img_config_drive': 'optional'}
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+ image['properties'] = {'img_config_drive': 'mandatory'}
+ self.compute_api._check_requested_image(self.context, image['id'],
+ image, self.instance_type)
+ image['properties'] = {'img_config_drive': 'bar'}
+ self.assertRaises(exception.InvalidImageConfigDrive,
+ self.compute_api._check_requested_image,
+ self.context, image['id'], image, self.instance_type)
+
+
+class ComputeHooksTestCase(test.BaseHookTestCase):
+ def test_delete_instance_has_hook(self):
+ delete_func = compute_manager.ComputeManager._delete_instance
+ self.assert_has_hook('delete_instance', delete_func)
+
+ def test_create_instance_has_hook(self):
+ create_func = compute_api.API.create
+ self.assert_has_hook('create_instance', create_func)
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
new file mode 100644
index 0000000000..10ac29d3dd
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -0,0 +1,2635 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for compute API."""
+
+import contextlib
+import copy
+import datetime
+
+import iso8601
+import mock
+import mox
+from oslo.utils import timeutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import instance_actions
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_mode
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import quotas as quotas_obj
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_migration
+from nova.tests.unit.objects import test_service
+from nova.volume import cinder
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+NODENAME = 'fakenode1'
+SHELVED_IMAGE = 'fake-shelved-image'
+SHELVED_IMAGE_NOT_FOUND = 'fake-shelved-image-notfound'
+SHELVED_IMAGE_NOT_AUTHORIZED = 'fake-shelved-image-not-authorized'
+SHELVED_IMAGE_EXCEPTION = 'fake-shelved-image-exception'
+
+
+class _ComputeAPIUnitTestMixIn(object):
+ def setUp(self):
+ super(_ComputeAPIUnitTestMixIn, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def _get_vm_states(self, exclude_states=None):
+ vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
+ vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED])
+ if not exclude_states:
+ exclude_states = set()
+ return vm_state - exclude_states
+
+ def _create_flavor(self, params=None):
+ flavor = {'id': 1,
+ 'flavorid': 1,
+ 'name': 'm1.tiny',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'vcpu_weight': None,
+ 'root_gb': 1,
+ 'ephemeral_gb': 0,
+ 'rxtx_factor': 1,
+ 'swap': 0,
+ 'deleted': 0,
+ 'disabled': False,
+ 'is_public': True,
+ }
+ if params:
+ flavor.update(params)
+ return flavor
+
+ def _create_instance_obj(self, params=None, flavor=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ if flavor is None:
+ flavor = self._create_flavor()
+
+ def make_fake_sys_meta():
+ sys_meta = params.pop("system_metadata", {})
+ for key in flavors.system_metadata_flavor_props:
+ sys_meta['instance_type_%s' % key] = flavor[key]
+ return sys_meta
+
+ now = timeutils.utcnow()
+
+ instance = objects.Instance()
+ instance.metadata = {}
+ instance.metadata.update(params.pop('metadata', {}))
+ instance.system_metadata = make_fake_sys_meta()
+ instance.system_metadata.update(params.pop('system_metadata', {}))
+ instance._context = self.context
+ instance.id = 1
+ instance.uuid = uuidutils.generate_uuid()
+ instance.cell_name = 'api!child'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = None
+ instance.image_ref = FAKE_IMAGE_REF
+ instance.reservation_id = 'r-fakeres'
+ instance.user_id = self.user_id
+ instance.project_id = self.project_id
+ instance.host = 'fake_host'
+ instance.node = NODENAME
+ instance.instance_type_id = flavor['id']
+ instance.ami_launch_index = 0
+ instance.memory_mb = 0
+ instance.vcpus = 0
+ instance.root_gb = 0
+ instance.ephemeral_gb = 0
+ instance.architecture = arch.X86_64
+ instance.os_type = 'Linux'
+ instance.locked = False
+ instance.created_at = now
+ instance.updated_at = now
+ instance.launched_at = now
+ instance.disable_terminate = False
+ instance.info_cache = objects.InstanceInfoCache()
+
+ if params:
+ instance.update(params)
+ instance.obj_reset_changes()
+ return instance
+
+ def test_create_quota_exceeded_messages(self):
+ image_href = "image_href"
+ image_id = 0
+ instance_type = self._create_flavor()
+
+ self.mox.StubOutWithMock(self.compute_api, "_get_image")
+ self.mox.StubOutWithMock(quota.QUOTAS, "limit_check")
+ self.mox.StubOutWithMock(quota.QUOTAS, "reserve")
+
+ quotas = {'instances': 1, 'cores': 1, 'ram': 1}
+ usages = dict((r, {'in_use': 1, 'reserved': 1}) for r in
+ ['instances', 'cores', 'ram'])
+ headroom = dict((res, quotas[res] -
+ (usages[res]['in_use'] + usages[res]['reserved']))
+ for res in quotas.keys())
+ quota_exception = exception.OverQuota(quotas=quotas,
+ usages=usages, overs=['instances'], headroom=headroom)
+
+ for _unused in range(2):
+ self.compute_api._get_image(self.context, image_href).AndReturn(
+ (image_id, {}))
+ quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
+ quota.QUOTAS.reserve(self.context, instances=40,
+ cores=mox.IsA(int),
+ expire=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg(),
+ ram=mox.IsA(int)).AndRaise(quota_exception)
+
+ self.mox.ReplayAll()
+
+ for min_count, message in [(20, '20-40'), (40, '40')]:
+ try:
+ self.compute_api.create(self.context, instance_type,
+ "image_href", min_count=min_count,
+ max_count=40)
+ except exception.TooManyInstances as e:
+ self.assertEqual(message, e.kwargs['req'])
+ else:
+ self.fail("Exception not raised")
+
+ def test_specified_port_and_multiple_instances_neutronv2(self):
+ # Tests that if port is specified there is only one instance booting
+ # (i.e max_count == 1) as we can't share the same port across multiple
+ # instances.
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ port = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ min_count = 1
+ max_count = 2
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(address=address,
+ port_id=port)])
+
+ self.assertRaises(exception.MultiplePortsNotApplicable,
+ self.compute_api.create, self.context, 'fake_flavor', 'image_id',
+ min_count=min_count, max_count=max_count,
+ requested_networks=requested_networks)
+
+ def _test_specified_ip_and_multiple_instances_helper(self,
+ requested_networks):
+ # Tests that if ip is specified there is only one instance booting
+ # (i.e max_count == 1)
+ min_count = 1
+ max_count = 2
+ self.assertRaises(exception.InvalidFixedIpAndMaxCountRequest,
+ self.compute_api.create, self.context, "fake_flavor", 'image_id',
+ min_count=min_count, max_count=max_count,
+ requested_networks=requested_networks)
+
+ def test_specified_ip_and_multiple_instances(self):
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=network,
+ address=address)])
+ self._test_specified_ip_and_multiple_instances_helper(
+ requested_networks)
+
+ def test_specified_ip_and_multiple_instances_neutronv2(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ address = '10.0.0.1'
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=network,
+ address=address)])
+ self._test_specified_ip_and_multiple_instances_helper(
+ requested_networks)
+
+ def test_suspend(self):
+ # Ensure instance can be suspended.
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'suspend_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.SUSPEND)
+ rpcapi.suspend_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.suspend(self.context, instance)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertEqual(task_states.SUSPENDING,
+ instance.task_state)
+
+ def _test_suspend_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.suspend,
+ self.context, instance)
+
+ def test_suspend_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_suspend_fails(state)
+
+ def test_resume(self):
+ # Ensure instance can be resumed (if suspended).
+ instance = self._create_instance_obj(
+ params=dict(vm_state=vm_states.SUSPENDED))
+ self.assertEqual(instance.vm_state, vm_states.SUSPENDED)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'resume_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.RESUME)
+ rpcapi.resume_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.resume(self.context, instance)
+ self.assertEqual(vm_states.SUSPENDED, instance.vm_state)
+ self.assertEqual(task_states.RESUMING,
+ instance.task_state)
+
+ def test_start(self):
+ params = dict(vm_state=vm_states.STOPPED)
+ instance = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.START)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'start_instance')
+ rpcapi.start_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.start(self.context, instance)
+ self.assertEqual(task_states.POWERING_ON,
+ instance.task_state)
+
+ def test_start_invalid_state(self):
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.start,
+ self.context, instance)
+
+ def test_start_no_host(self):
+ params = dict(vm_state=vm_states.STOPPED, host='')
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.start,
+ self.context, instance)
+
+ def _test_stop(self, vm_state, force=False):
+ # Make sure 'progress' gets reset
+ params = dict(task_state=None, progress=99, vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.STOP)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'stop_instance')
+ rpcapi.stop_instance(self.context, instance, do_cast=True)
+
+ self.mox.ReplayAll()
+
+ if force:
+ self.compute_api.force_stop(self.context, instance)
+ else:
+ self.compute_api.stop(self.context, instance)
+ self.assertEqual(task_states.POWERING_OFF,
+ instance.task_state)
+ self.assertEqual(0, instance.progress)
+
+ def test_stop(self):
+ self._test_stop(vm_states.ACTIVE)
+
+ def test_stop_stopped_instance_with_bypass(self):
+ self._test_stop(vm_states.STOPPED, force=True)
+
+ def _test_stop_invalid_state(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def test_stop_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
+ vm_states.ERROR]))
+ for state in invalid_vm_states:
+ self._test_stop_invalid_state(state)
+
+ def test_stop_a_stopped_inst(self):
+ params = {'vm_state': vm_states.STOPPED}
+ instance = self._create_instance_obj(params=params)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def test_stop_no_host(self):
+ params = {'host': ''}
+ instance = self._create_instance_obj(params=params)
+ self.assertRaises(exception.InstanceNotReady,
+ self.compute_api.stop,
+ self.context, instance)
+
+ def _test_reboot_type(self, vm_state, reboot_type, task_state=None):
+ # Ensure instance can be soft rebooted.
+ inst = self._create_instance_obj()
+ inst.vm_state = vm_state
+ inst.task_state = task_state
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(inst, 'save')
+ inst.save(expected_task_state=[None, task_states.REBOOTING,
+ task_states.REBOOT_PENDING,
+ task_states.REBOOT_STARTED])
+ self.compute_api._record_action_start(self.context, inst,
+ instance_actions.REBOOT)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+
+ self.mox.StubOutWithMock(rpcapi, 'reboot_instance')
+ rpcapi.reboot_instance(self.context, instance=inst,
+ block_device_info=None,
+ reboot_type=reboot_type)
+ self.mox.ReplayAll()
+
+ self.compute_api.reboot(self.context, inst, reboot_type)
+
+ def _test_reboot_type_fails(self, reboot_type, **updates):
+ inst = self._create_instance_obj()
+ inst.update(updates)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.reboot,
+ self.context, inst, reboot_type)
+
+ def test_reboot_hard_active(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD')
+
+ def test_reboot_hard_error(self):
+ self._test_reboot_type(vm_states.ERROR, 'HARD')
+
+ def test_reboot_hard_rebooting(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOTING)
+
+ def test_reboot_hard_reboot_started(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOT_STARTED)
+
+ def test_reboot_hard_reboot_pending(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'HARD',
+ task_state=task_states.REBOOT_PENDING)
+
+ def test_reboot_hard_rescued(self):
+ self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED)
+
+ def test_reboot_hard_error_not_launched(self):
+ self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR,
+ launched_at=None)
+
+ def test_reboot_soft(self):
+ self._test_reboot_type(vm_states.ACTIVE, 'SOFT')
+
+ def test_reboot_soft_error(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR)
+
+ def test_reboot_soft_paused(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED)
+
+ def test_reboot_soft_stopped(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED)
+
+ def test_reboot_soft_suspended(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED)
+
+ def test_reboot_soft_rebooting(self):
+ self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING)
+
+ def test_reboot_soft_rebooting_hard(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOTING_HARD)
+
+ def test_reboot_soft_reboot_started(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOT_STARTED)
+
+ def test_reboot_soft_reboot_pending(self):
+ self._test_reboot_type_fails('SOFT',
+ task_state=task_states.REBOOT_PENDING)
+
+ def test_reboot_soft_rescued(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED)
+
+ def test_reboot_soft_error_not_launched(self):
+ self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR,
+ launched_at=None)
+
+ def _test_delete_resizing_part(self, inst, deltas):
+ fake_db_migration = test_migration.fake_db_migration()
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ fake_db_migration)
+ inst.instance_type_id = migration.new_instance_type_id
+ old_flavor = {'vcpus': 1,
+ 'memory_mb': 512}
+ deltas['cores'] = -old_flavor['vcpus']
+ deltas['ram'] = -old_flavor['memory_mb']
+
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(flavors, 'get_flavor')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, inst.uuid, 'post-migrating').AndReturn(migration)
+ flavors.get_flavor(migration.old_instance_type_id).AndReturn(
+ old_flavor)
+
+ def _test_delete_resized_part(self, inst):
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, inst.uuid, 'finished').AndReturn(migration)
+ self.compute_api._downsize_quota_delta(self.context, inst
+ ).AndReturn('deltas')
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ ['rsvs'])
+ self.compute_api._reserve_quota_delta(self.context, 'deltas', inst,
+ ).AndReturn(fake_quotas)
+ self.compute_api._record_action_start(
+ self.context, inst, instance_actions.CONFIRM_RESIZE)
+ self.compute_api.compute_rpcapi.confirm_resize(
+ self.context, inst, migration,
+ migration['source_compute'], fake_quotas.reservations, cast=False)
+
+ def _test_delete_shelved_part(self, inst):
+ image_api = self.compute_api.image_api
+ self.mox.StubOutWithMock(image_api, 'delete')
+
+ snapshot_id = inst.system_metadata.get('shelved_image_id')
+ if snapshot_id == SHELVED_IMAGE:
+ image_api.delete(self.context, snapshot_id).AndReturn(True)
+ elif snapshot_id == SHELVED_IMAGE_NOT_FOUND:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ exception.ImageNotFound(image_id=snapshot_id))
+ elif snapshot_id == SHELVED_IMAGE_NOT_AUTHORIZED:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ exception.ImageNotAuthorized(image_id=snapshot_id))
+ elif snapshot_id == SHELVED_IMAGE_EXCEPTION:
+ image_api.delete(self.context, snapshot_id).AndRaise(
+ test.TestingException("Unexpected error"))
+
+ def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
+ inst.info_cache.delete()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context, inst,
+ '%s.start' % delete_type)
+ self.context.elevated().AndReturn(self.context)
+ self.compute_api.network_api.deallocate_for_instance(
+ self.context, inst)
+ state = (delete_types.SOFT_DELETE in delete_type and
+ vm_states.SOFT_DELETED or
+ vm_states.DELETED)
+ updates.update({'vm_state': state,
+ 'task_state': None,
+ 'terminated_at': delete_time})
+ inst.save()
+
+ updates.update({'deleted_at': delete_time,
+ 'deleted': True})
+ fake_inst = fake_instance.fake_db_instance(**updates)
+ db.instance_destroy(self.context, inst.uuid,
+ constraint=None).AndReturn(fake_inst)
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier,
+ self.context, inst, '%s.end' % delete_type,
+ system_metadata=inst.system_metadata)
+
+ def _test_delete(self, delete_type, **attrs):
+ reservations = ['fake-resv']
+ inst = self._create_instance_obj()
+ inst.update(attrs)
+ inst._context = self.context
+ deltas = {'instances': -1,
+ 'cores': -inst.vcpus,
+ 'ram': -inst.memory_mb}
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(delete_time)
+ task_state = (delete_type == delete_types.SOFT_DELETE and
+ task_states.SOFT_DELETING or task_states.DELETING)
+ updates = {'progress': 0, 'task_state': task_state}
+ if delete_type == delete_types.SOFT_DELETE:
+ updates['deleted_at'] = delete_time
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.compute_api.servicegroup_api,
+ 'service_is_up')
+ self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(inst.info_cache, 'delete')
+ self.mox.StubOutWithMock(self.compute_api.network_api,
+ 'deallocate_for_instance')
+ self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'confirm_resize')
+
+ if (inst.vm_state in
+ (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED)):
+ self._test_delete_shelved_part(inst)
+
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
+ self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance')
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, inst.uuid).AndReturn([])
+ inst.save()
+ if inst.task_state == task_states.RESIZE_FINISH:
+ self._test_delete_resizing_part(inst, deltas)
+ quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
+ user_id=inst.user_id,
+ expire=mox.IgnoreArg(),
+ **deltas).AndReturn(reservations)
+
+ # NOTE(comstud): This is getting messy. But what we are wanting
+ # to test is:
+ # If cells is enabled and we're the API cell:
+ # * Cast to cells_rpcapi.<method> with reservations=None
+ # * Commit reservations
+ # Otherwise:
+ # * Check for downed host
+ # * If downed host:
+ # * Clean up instance, destroying it, sending notifications.
+ # (Tested in _test_downed_host_part())
+ # * Commit reservations
+ # * If not downed host:
+ # * Record the action start.
+ # * Cast to compute_rpcapi.<method> with the reservations
+
+ cast = True
+ commit_quotas = True
+ if self.cell_type != 'api':
+ if inst.vm_state == vm_states.RESIZED:
+ self._test_delete_resized_part(inst)
+
+ self.context.elevated().AndReturn(self.context)
+ db.service_get_by_compute_host(
+ self.context, inst.host).AndReturn(
+ test_service.fake_service)
+ self.compute_api.servicegroup_api.service_is_up(
+ mox.IsA(objects.Service)).AndReturn(
+ inst.host != 'down-host')
+
+ if inst.host == 'down-host':
+ self._test_downed_host_part(inst, updates, delete_time,
+ delete_type)
+ cast = False
+ else:
+ # Happens on the manager side
+ commit_quotas = False
+
+ if cast:
+ if self.cell_type != 'api':
+ self.compute_api._record_action_start(self.context, inst,
+ instance_actions.DELETE)
+ if commit_quotas:
+ cast_reservations = None
+ else:
+ cast_reservations = reservations
+ if delete_type == delete_types.SOFT_DELETE:
+ rpcapi.soft_delete_instance(self.context, inst,
+ reservations=cast_reservations)
+ elif delete_type in [delete_types.DELETE,
+ delete_types.FORCE_DELETE]:
+ rpcapi.terminate_instance(self.context, inst, [],
+ reservations=cast_reservations)
+
+ if commit_quotas:
+ # Local delete or when we're testing API cell.
+ quota.QUOTAS.commit(self.context, reservations,
+ project_id=inst.project_id,
+ user_id=inst.user_id)
+
+ self.mox.ReplayAll()
+
+ getattr(self.compute_api, delete_type)(self.context, inst)
+ for k, v in updates.items():
+ self.assertEqual(inst[k], v)
+
+ self.mox.UnsetStubs()
+
+ def test_delete(self):
+ self._test_delete(delete_types.DELETE)
+
+ def test_delete_if_not_launched(self):
+ self._test_delete(delete_types.DELETE, launched_at=None)
+
+ def test_delete_in_resizing(self):
+ self._test_delete(delete_types.DELETE,
+ task_state=task_states.RESIZE_FINISH)
+
+ def test_delete_in_resized(self):
+ self._test_delete(delete_types.DELETE, vm_state=vm_states.RESIZED)
+
+ def test_delete_shelved(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_offloaded(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_image_not_found(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_FOUND}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_image_not_authorized(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_NOT_AUTHORIZED}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED_OFFLOADED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_shelved_exception(self):
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE_EXCEPTION}
+ self._test_delete(delete_types.DELETE,
+ vm_state=vm_states.SHELVED,
+ system_metadata=fake_sys_meta)
+
+ def test_delete_with_down_host(self):
+ self._test_delete(delete_types.DELETE, host='down-host')
+
+ def test_delete_soft_with_down_host(self):
+ self._test_delete(delete_types.SOFT_DELETE, host='down-host')
+
+ def test_delete_soft(self):
+ self._test_delete(delete_types.SOFT_DELETE)
+
+ def test_delete_forced(self):
+ for vm_state in self._get_vm_states():
+ self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state)
+
+ def test_delete_forced_when_task_state_deleting(self):
+ for vm_state in self._get_vm_states():
+ self._test_delete(delete_types.FORCE_DELETE, vm_state=vm_state,
+ task_state=task_states.DELETING)
+
+ def test_no_delete_when_task_state_deleting(self):
+ if self.cell_type == 'api':
+ # In 'api' cell, the callback terminate_instance will
+ # get called, and quota will be committed before returning.
+ # It doesn't check for below condition, hence skipping the test.
+ """
+ if original_task_state in (task_states.DELETING,
+ task_states.SOFT_DELETING):
+ LOG.info(_('Instance is already in deleting state, '
+ 'ignoring this request'), instance=instance)
+ quotas.rollback()
+ return
+ """
+ self.skipTest("API cell doesn't delete instance directly.")
+
+ attrs = {}
+ fake_sys_meta = {'shelved_image_id': SHELVED_IMAGE}
+
+ for vm_state in self._get_vm_states():
+ if vm_state in (vm_states.SHELVED, vm_states.SHELVED_OFFLOADED):
+ attrs.update({'system_metadata': fake_sys_meta})
+
+ attrs.update({'vm_state': vm_state, 'task_state': 'deleting'})
+ reservations = ['fake-resv']
+ inst = self._create_instance_obj()
+ inst.update(attrs)
+ inst._context = self.context
+ deltas = {'instances': -1,
+ 'cores': -inst.vcpus,
+ 'ram': -inst.memory_mb}
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(delete_time)
+ bdms = []
+ migration = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ ['rsvs'])
+
+ image_api = self.compute_api.image_api
+ rpcapi = self.compute_api.compute_rpcapi
+
+ with contextlib.nested(
+ mock.patch.object(image_api, 'delete'),
+ mock.patch.object(inst, 'save'),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=bdms),
+ mock.patch.object(objects.Migration,
+ 'get_by_instance_and_status'),
+ mock.patch.object(quota.QUOTAS, 'reserve',
+ return_value=reservations),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(db, 'service_get_by_compute_host',
+ return_value=test_service.fake_service),
+ mock.patch.object(self.compute_api.servicegroup_api,
+ 'service_is_up',
+ return_value=inst.host != 'down-host'),
+ mock.patch.object(self.compute_api,
+ '_downsize_quota_delta',
+ return_value=fake_quotas),
+ mock.patch.object(self.compute_api,
+ '_reserve_quota_delta'),
+ mock.patch.object(self.compute_api,
+ '_record_action_start'),
+ mock.patch.object(db, 'instance_update_and_get_original'),
+ mock.patch.object(inst.info_cache, 'delete'),
+ mock.patch.object(self.compute_api.network_api,
+ 'deallocate_for_instance'),
+ mock.patch.object(db, 'instance_system_metadata_get'),
+ mock.patch.object(db, 'instance_destroy'),
+ mock.patch.object(compute_utils,
+ 'notify_about_instance_usage'),
+ mock.patch.object(quota.QUOTAS, 'commit'),
+ mock.patch.object(quota.QUOTAS, 'rollback'),
+ mock.patch.object(rpcapi, 'confirm_resize'),
+ mock.patch.object(rpcapi, 'terminate_instance')
+ ) as (
+ image_delete,
+ save,
+ get_by_instance_uuid,
+ get_by_instance_and_status,
+ reserve,
+ elevated,
+ service_get_by_compute_host,
+ service_is_up,
+ _downsize_quota_delta,
+ _reserve_quota_delta,
+ _record_action_start,
+ instance_update_and_get_original,
+ delete,
+ deallocate_for_instance,
+ instance_system_metadata_get,
+ instance_destroy,
+ notify_about_instance_usage,
+ commit,
+ rollback,
+ confirm_resize,
+ terminate_instance
+ ):
+ if (inst.vm_state in (vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED)):
+ image_delete.return_value = True
+
+ if inst.vm_state == vm_states.RESIZED:
+ get_by_instance_and_status.return_value = migration
+ _downsize_quota_delta.return_value = deltas
+
+ self.compute_api.delete(self.context, inst)
+ self.assertEqual(1, rollback.call_count)
+ self.assertEqual(0, terminate_instance.call_count)
+
+ def test_delete_fast_if_host_not_set(self):
+ inst = self._create_instance_obj()
+ inst.host = ''
+ quotas = quotas_obj.Quotas(self.context)
+ updates = {'progress': 0, 'task_state': task_states.DELETING}
+
+ self.mox.StubOutWithMock(inst, 'save')
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+
+ self.mox.StubOutWithMock(db, 'constraint')
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ self.mox.StubOutWithMock(self.compute_api, '_create_reservations')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
+
+ db.block_device_mapping_get_all_by_instance(self.context,
+ inst.uuid,
+ use_slave=False).AndReturn([])
+ inst.save()
+ self.compute_api._create_reservations(self.context,
+ inst, inst.task_state,
+ inst.project_id, inst.user_id
+ ).AndReturn(quotas)
+
+ if self.cell_type == 'api':
+ rpcapi.terminate_instance(
+ self.context, inst,
+ mox.IsA(objects.BlockDeviceMappingList),
+ reservations=None)
+ else:
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.start')
+ db.constraint(host=mox.IgnoreArg()).AndReturn('constraint')
+ delete_time = datetime.datetime(1955, 11, 5, 9, 30,
+ tzinfo=iso8601.iso8601.Utc())
+ updates['deleted_at'] = delete_time
+ updates['deleted'] = True
+ fake_inst = fake_instance.fake_db_instance(**updates)
+ db.instance_destroy(self.context, inst.uuid,
+ constraint='constraint').AndReturn(fake_inst)
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.end',
+ system_metadata=inst.system_metadata)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.delete(self.context, inst)
+ for k, v in updates.items():
+ self.assertEqual(inst[k], v)
+
+ def test_local_delete_with_deleted_volume(self):
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 42, 'volume_id': 'volume_id',
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'delete_on_termination': False}))]
+
+ def _fake_do_delete(context, instance, bdms,
+ rservations=None, local=False):
+ pass
+
+ inst = self._create_instance_obj()
+ inst._context = self.context
+
+ self.mox.StubOutWithMock(inst, 'destroy')
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(inst.info_cache, 'delete')
+ self.mox.StubOutWithMock(self.compute_api.network_api,
+ 'deallocate_for_instance')
+ self.mox.StubOutWithMock(db, 'instance_system_metadata_get')
+ self.mox.StubOutWithMock(compute_utils,
+ 'notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute_api.volume_api,
+ 'terminate_connection')
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'destroy')
+
+ inst.info_cache.delete()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.start')
+ self.context.elevated().MultipleTimes().AndReturn(self.context)
+ if self.cell_type != 'api':
+ self.compute_api.network_api.deallocate_for_instance(
+ self.context, inst)
+
+ self.compute_api.volume_api.terminate_connection(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
+ AndRaise(exception. VolumeNotFound('volume_id'))
+ bdms[0].destroy(self.context)
+
+ inst.destroy()
+ compute_utils.notify_about_instance_usage(
+ self.compute_api.notifier, self.context,
+ inst, 'delete.end',
+ system_metadata=inst.system_metadata)
+
+ self.mox.ReplayAll()
+ self.compute_api._local_delete(self.context, inst, bdms,
+ delete_types.DELETE,
+ _fake_do_delete)
+
+ def test_delete_disabled(self):
+ inst = self._create_instance_obj()
+ inst.disable_terminate = True
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.ReplayAll()
+ self.compute_api.delete(self.context, inst)
+
+ def test_delete_soft_rollback(self):
+ inst = self._create_instance_obj()
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(inst, 'save')
+
+ delete_time = datetime.datetime(1955, 11, 5)
+ timeutils.set_time_override(delete_time)
+
+ db.block_device_mapping_get_all_by_instance(
+ self.context, inst.uuid, use_slave=False).AndReturn([])
+ inst.save().AndRaise(test.TestingException)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute_api.soft_delete, self.context, inst)
+
+ def _test_confirm_resize(self, mig_ref_passed=False):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_mig, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'confirm_resize')
+
+ self.context.elevated().AndReturn(self.context)
+ if not mig_ref_passed:
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(
+ fake_mig)
+ self.compute_api._downsize_quota_delta(self.context,
+ fake_inst).AndReturn('deltas')
+
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_mig(expected_task_state=None):
+ self.assertEqual('confirming', fake_mig.status)
+
+ fake_mig.save().WithSideEffects(_check_mig)
+
+ if self.cell_type:
+ fake_quotas.commit(self.context)
+
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'confirmResize')
+
+ self.compute_api.compute_rpcapi.confirm_resize(
+ self.context, fake_inst, fake_mig, 'compute-source',
+ [] if self.cell_type else fake_quotas.reservations)
+
+ self.mox.ReplayAll()
+
+ if mig_ref_passed:
+ self.compute_api.confirm_resize(self.context, fake_inst,
+ migration=fake_mig)
+ else:
+ self.compute_api.confirm_resize(self.context, fake_inst)
+
+ def test_confirm_resize(self):
+ self._test_confirm_resize()
+
+ def test_confirm_resize_with_migration_ref(self):
+ self._test_confirm_resize(mig_ref_passed=True)
+
+ def _test_revert_resize(self):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_reverse_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+ self.mox.StubOutWithMock(fake_mig, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'revert_resize')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(
+ fake_mig)
+ self.compute_api._reverse_upsize_quota_delta(
+ self.context, fake_mig).AndReturn('deltas')
+
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_state(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_REVERTING,
+ fake_inst.task_state)
+
+ fake_inst.save(expected_task_state=[None]).WithSideEffects(
+ _check_state)
+
+ def _check_mig(expected_task_state=None):
+ self.assertEqual('reverting', fake_mig.status)
+
+ fake_mig.save().WithSideEffects(_check_mig)
+
+ if self.cell_type:
+ fake_quotas.commit(self.context)
+
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'revertResize')
+
+ self.compute_api.compute_rpcapi.revert_resize(
+ self.context, fake_inst, fake_mig, 'compute-dest',
+ [] if self.cell_type else fake_quotas.reservations)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.revert_resize(self.context, fake_inst)
+
+ def test_revert_resize(self):
+ self._test_revert_resize()
+
+ def test_revert_resize_concurent_fail(self):
+ params = dict(vm_state=vm_states.RESIZED)
+ fake_inst = self._create_instance_obj(params=params)
+ fake_mig = objects.Migration._from_db_object(
+ self.context, objects.Migration(),
+ test_migration.fake_db_migration())
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(objects.Migration,
+ 'get_by_instance_and_status')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_reverse_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+
+ self.context.elevated().AndReturn(self.context)
+ objects.Migration.get_by_instance_and_status(
+ self.context, fake_inst['uuid'], 'finished').AndReturn(fake_mig)
+
+ delta = ['delta']
+ self.compute_api._reverse_upsize_quota_delta(
+ self.context, fake_mig).AndReturn(delta)
+ resvs = ['resvs']
+ fake_quotas = objects.Quotas.from_reservations(self.context, resvs)
+ self.compute_api._reserve_quota_delta(
+ self.context, delta, fake_inst).AndReturn(fake_quotas)
+
+ exc = exception.UnexpectedTaskStateError(
+ actual=task_states.RESIZE_REVERTING, expected=None)
+ fake_inst.save(expected_task_state=[None]).AndRaise(exc)
+
+ fake_quotas.rollback(self.context)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.UnexpectedTaskStateError,
+ self.compute_api.revert_resize,
+ self.context,
+ fake_inst)
+
+ def _test_resize(self, flavor_id_passed=True,
+ same_host=False, allow_same_host=False,
+ allow_mig_same_host=False,
+ project_id=None,
+ extra_kwargs=None,
+ same_flavor=False):
+ if extra_kwargs is None:
+ extra_kwargs = {}
+
+ self.flags(allow_resize_to_same_host=allow_same_host,
+ allow_migrate_to_same_host=allow_mig_same_host)
+
+ params = {}
+ if project_id is not None:
+ # To test instance w/ different project id than context (admin)
+ params['project_id'] = project_id
+ fake_inst = self._create_instance_obj(params=params)
+
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(fake_inst, 'save')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ current_flavor = flavors.extract_flavor(fake_inst)
+ if flavor_id_passed:
+ new_flavor = dict(id=200, flavorid='new-flavor-id',
+ name='new_flavor', disabled=False)
+ if same_flavor:
+ cur_flavor = flavors.extract_flavor(fake_inst)
+ new_flavor['id'] = cur_flavor['id']
+ flavors.get_flavor_by_flavor_id(
+ 'new-flavor-id',
+ read_deleted='no').AndReturn(new_flavor)
+ else:
+ new_flavor = current_flavor
+
+ if (self.cell_type == 'compute' or
+ not (flavor_id_passed and same_flavor)):
+ resvs = ['resvs']
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ fake_inst)
+ fake_quotas = objects.Quotas.from_reservations(self.context,
+ resvs)
+
+ self.compute_api._upsize_quota_delta(
+ self.context, new_flavor,
+ current_flavor).AndReturn('deltas')
+ self.compute_api._reserve_quota_delta(self.context, 'deltas',
+ fake_inst).AndReturn(fake_quotas)
+
+ def _check_state(expected_task_state=None):
+ self.assertEqual(task_states.RESIZE_PREP,
+ fake_inst.task_state)
+ self.assertEqual(fake_inst.progress, 0)
+ for key, value in extra_kwargs.items():
+ self.assertEqual(value, getattr(fake_inst, key))
+
+ fake_inst.save(expected_task_state=[None]).WithSideEffects(
+ _check_state)
+
+ if allow_same_host:
+ filter_properties = {'ignore_hosts': []}
+ else:
+ filter_properties = {'ignore_hosts': [fake_inst['host']]}
+
+ if not flavor_id_passed and not allow_mig_same_host:
+ filter_properties['ignore_hosts'].append(fake_inst['host'])
+
+ expected_reservations = fake_quotas.reservations
+ if self.cell_type == 'api':
+ fake_quotas.commit(self.context)
+ expected_reservations = []
+ mig = objects.Migration()
+
+ def _get_migration():
+ return mig
+
+ def _check_mig(ctxt):
+ self.assertEqual(fake_inst.uuid, mig.instance_uuid)
+ self.assertEqual(current_flavor['id'],
+ mig.old_instance_type_id)
+ self.assertEqual(new_flavor['id'],
+ mig.new_instance_type_id)
+ self.assertEqual('finished', mig.status)
+
+ self.stubs.Set(objects, 'Migration', _get_migration)
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.mox.StubOutWithMock(mig, 'create')
+
+ self.context.elevated().AndReturn(self.context)
+ mig.create(self.context).WithSideEffects(_check_mig)
+
+ if flavor_id_passed:
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'resize')
+ else:
+ self.compute_api._record_action_start(self.context, fake_inst,
+ 'migrate')
+
+ scheduler_hint = {'filter_properties': filter_properties}
+
+ self.compute_api.compute_task_api.resize_instance(
+ self.context, fake_inst, extra_kwargs,
+ scheduler_hint=scheduler_hint,
+ flavor=new_flavor, reservations=expected_reservations)
+
+ self.mox.ReplayAll()
+
+ if flavor_id_passed:
+ self.compute_api.resize(self.context, fake_inst,
+ flavor_id='new-flavor-id',
+ **extra_kwargs)
+ else:
+ self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
+
+ def _test_migrate(self, *args, **kwargs):
+ self._test_resize(*args, flavor_id_passed=False, **kwargs)
+
+ def test_resize(self):
+ self._test_resize()
+
+ def test_resize_with_kwargs(self):
+ self._test_resize(extra_kwargs=dict(cow='moo'))
+
+ def test_resize_same_host_and_allowed(self):
+ self._test_resize(same_host=True, allow_same_host=True)
+
+ def test_resize_same_host_and_not_allowed(self):
+ self._test_resize(same_host=True, allow_same_host=False)
+
+ def test_resize_different_project_id(self):
+ self._test_resize(project_id='different')
+
+ def test_migrate(self):
+ self._test_migrate()
+
+ def test_migrate_with_kwargs(self):
+ self._test_migrate(extra_kwargs=dict(cow='moo'))
+
+ def test_migrate_same_host_and_allowed(self):
+ self._test_migrate(same_host=True, allow_same_host=True)
+
+ def test_migrate_same_host_and_not_allowed(self):
+ self._test_migrate(same_host=True, allow_same_host=False)
+
+ def test_migrate_different_project_id(self):
+ self._test_migrate(project_id='different')
+
+ def test_resize_invalid_flavor_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ exc = exception.FlavorNotFound(flavor_id='flavor-id')
+
+ flavors.get_flavor_by_flavor_id('flavor-id',
+ read_deleted='no').AndRaise(exc)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_resize_disabled_flavor_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ disabled=True)
+
+ flavors.get_flavor_by_flavor_id(
+ 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorNotFound,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
+ def test_resize_to_zero_disk_flavor_fails(self, get_flavor_by_flavor_id):
+ fake_inst = self._create_instance_obj()
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ root_gb=0)
+
+ get_flavor_by_flavor_id.return_value = fake_flavor
+
+ self.assertRaises(exception.CannotResizeDisk,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_resize_quota_exceeds_fails(self):
+ self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id')
+ self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta')
+ self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta')
+ # Should never reach these.
+ self.mox.StubOutWithMock(self.compute_api, 'update')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'commit')
+ self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
+ self.mox.StubOutWithMock(self.compute_api.compute_task_api,
+ 'resize_instance')
+
+ fake_inst = obj_base.obj_to_primitive(self._create_instance_obj())
+ current_flavor = flavors.extract_flavor(fake_inst)
+ fake_flavor = dict(id=200, flavorid='flavor-id', name='foo',
+ disabled=False)
+ flavors.get_flavor_by_flavor_id(
+ 'flavor-id', read_deleted='no').AndReturn(fake_flavor)
+ deltas = dict(resource=0)
+ self.compute_api._upsize_quota_delta(
+ self.context, fake_flavor,
+ current_flavor).AndReturn(deltas)
+ usage = dict(in_use=0, reserved=0)
+ quotas = {'resource': 0}
+ usages = {'resource': usage}
+ overs = ['resource']
+ headroom = {'resource': quotas['resource'] -
+ (usages['resource']['in_use'] + usages['resource']['reserved'])}
+ over_quota_args = dict(quotas=quotas,
+ usages=usages,
+ overs=overs,
+ headroom=headroom)
+
+ self.compute_api._reserve_quota_delta(self.context, deltas,
+ fake_inst).AndRaise(
+ exception.OverQuota(**over_quota_args))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.TooManyInstances,
+ self.compute_api.resize, self.context,
+ fake_inst, flavor_id='flavor-id')
+
+ def test_pause(self):
+ # Ensure instance can be paused.
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'pause_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.PAUSE)
+ rpcapi.pause_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.pause(self.context, instance)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertEqual(task_states.PAUSING,
+ instance.task_state)
+
+ def _test_pause_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.pause,
+ self.context, instance)
+
+ def test_pause_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_pause_fails(state)
+
+ def test_unpause(self):
+ # Ensure instance can be unpaused.
+ params = dict(vm_state=vm_states.PAUSED)
+ instance = self._create_instance_obj(params=params)
+ self.assertEqual(instance.vm_state, vm_states.PAUSED)
+ self.assertIsNone(instance.task_state)
+
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api,
+ '_record_action_start')
+ if self.cell_type == 'api':
+ rpcapi = self.compute_api.cells_rpcapi
+ else:
+ rpcapi = self.compute_api.compute_rpcapi
+ self.mox.StubOutWithMock(rpcapi, 'unpause_instance')
+
+ instance.save(expected_task_state=[None])
+ self.compute_api._record_action_start(self.context,
+ instance, instance_actions.UNPAUSE)
+ rpcapi.unpause_instance(self.context, instance)
+
+ self.mox.ReplayAll()
+
+ self.compute_api.unpause(self.context, instance)
+ self.assertEqual(vm_states.PAUSED, instance.vm_state)
+ self.assertEqual(task_states.UNPAUSING, instance.task_state)
+
+ def test_swap_volume_volume_api_usage(self):
+ # This test ensures that volume_id arguments are passed to volume_api
+ # and that volumes return to previous states in case of error.
+ def fake_vol_api_begin_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ volumes[volume_id]['status'] = 'detaching'
+
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ def fake_vol_api_reserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ self.assertEqual(volumes[volume_id]['status'], 'available')
+ volumes[volume_id]['status'] = 'attaching'
+
+ def fake_vol_api_unreserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'attaching':
+ volumes[volume_id]['status'] = 'available'
+
+ def fake_swap_volume_exc(context, instance, old_volume_id,
+ new_volume_id):
+ raise AttributeError # Random exception
+
+ # Should fail if VM state is not valid
+ instance = {'vm_state': vm_states.BUILDING,
+ 'launched_at': timeutils.utcnow(),
+ 'locked': False,
+ 'availability_zone': 'fake_az',
+ 'uuid': 'fake'}
+ volumes = {}
+ old_volume_id = uuidutils.generate_uuid()
+ volumes[old_volume_id] = {'id': old_volume_id,
+ 'display_name': 'old_volume',
+ 'attach_status': 'attached',
+ 'instance_uuid': 'fake',
+ 'size': 5,
+ 'status': 'in-use'}
+ new_volume_id = uuidutils.generate_uuid()
+ volumes[new_volume_id] = {'id': new_volume_id,
+ 'display_name': 'new_volume',
+ 'attach_status': 'detached',
+ 'instance_uuid': None,
+ 'size': 5,
+ 'status': 'available'}
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ instance['vm_state'] = vm_states.ACTIVE
+ instance['task_state'] = None
+
+ # Should fail if old volume is not attached
+ volumes[old_volume_id]['attach_status'] = 'detached'
+ self.assertRaises(exception.VolumeUnattached,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[old_volume_id]['attach_status'] = 'attached'
+
+ # Should fail if old volume's instance_uuid is not that of the instance
+ volumes[old_volume_id]['instance_uuid'] = 'fake2'
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[old_volume_id]['instance_uuid'] = 'fake'
+
+ # Should fail if new volume is attached
+ volumes[new_volume_id]['attach_status'] = 'attached'
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[new_volume_id]['attach_status'] = 'detached'
+
+ # Should fail if new volume is smaller than the old volume
+ volumes[new_volume_id]['size'] = 4
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+ volumes[new_volume_id]['size'] = 5
+
+ # Fail call to swap_volume
+ self.stubs.Set(self.compute_api.volume_api, 'begin_detaching',
+ fake_vol_api_begin_detaching)
+ self.stubs.Set(self.compute_api.volume_api, 'roll_detaching',
+ fake_vol_api_roll_detaching)
+ self.stubs.Set(self.compute_api.volume_api, 'reserve_volume',
+ fake_vol_api_reserve)
+ self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume',
+ fake_vol_api_unreserve)
+ self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
+ fake_swap_volume_exc)
+ self.assertRaises(AttributeError,
+ self.compute_api.swap_volume, self.context, instance,
+ volumes[old_volume_id], volumes[new_volume_id])
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ # Should succeed
+ self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume',
+ lambda c, instance, old_volume_id, new_volume_id: True)
+ self.compute_api.swap_volume(self.context, instance,
+ volumes[old_volume_id],
+ volumes[new_volume_id])
+
+ def _test_snapshot_and_backup(self, is_snapshot=True,
+ with_base_ref=False, min_ram=None,
+ min_disk=None,
+ create_fails=False,
+ instance_vm_state=vm_states.ACTIVE):
+ # 'cache_in_nova' is for testing non-inheritable properties
+ # 'user_id' should also not be carried from sys_meta into
+ # image property...since it should be set explicitly by
+ # _create_image() in compute api.
+ fake_sys_meta = dict(image_foo='bar', blah='bug?',
+ image_cache_in_nova='dropped',
+ cache_in_nova='dropped',
+ user_id='meow')
+ if with_base_ref:
+ fake_sys_meta['image_base_image_ref'] = 'fake-base-ref'
+ params = dict(system_metadata=fake_sys_meta, locked=True)
+ instance = self._create_instance_obj(params=params)
+ instance.vm_state = instance_vm_state
+ fake_sys_meta.update(instance.system_metadata)
+ extra_props = dict(cow='moo', cat='meow')
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(self.compute_api.image_api,
+ 'create')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'snapshot_instance')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'backup_instance')
+
+ image_type = is_snapshot and 'snapshot' or 'backup'
+
+ expected_sys_meta = dict(fake_sys_meta)
+ expected_sys_meta.pop('cache_in_nova')
+ expected_sys_meta.pop('image_cache_in_nova')
+ expected_sys_meta.pop('user_id')
+ expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo')
+ if with_base_ref:
+ expected_sys_meta['base_image_ref'] = expected_sys_meta.pop(
+ 'image_base_image_ref')
+
+ expected_props = {'instance_uuid': instance.uuid,
+ 'user_id': self.context.user_id,
+ 'image_type': image_type}
+ expected_props.update(extra_props)
+ expected_props.update(expected_sys_meta)
+ expected_meta = {'name': 'fake-name',
+ 'is_public': False,
+ 'properties': expected_props}
+ if is_snapshot:
+ if min_ram is not None:
+ expected_meta['min_ram'] = min_ram
+ if min_disk is not None:
+ expected_meta['min_disk'] = min_disk
+ else:
+ expected_props['backup_type'] = 'fake-backup-type'
+
+ compute_utils.get_image_metadata(
+ self.context, self.compute_api.image_api,
+ FAKE_IMAGE_REF, instance).AndReturn(expected_meta)
+
+ fake_image = dict(id='fake-image-id')
+ mock_method = self.compute_api.image_api.create(
+ self.context, expected_meta)
+ if create_fails:
+ mock_method.AndRaise(test.TestingException())
+ else:
+ mock_method.AndReturn(fake_image)
+
+ def check_state(expected_task_state=None):
+ expected_state = (is_snapshot and
+ task_states.IMAGE_SNAPSHOT_PENDING or
+ task_states.IMAGE_BACKUP)
+ self.assertEqual(expected_state, instance.task_state)
+
+ if not create_fails:
+ instance.save(expected_task_state=[None]).WithSideEffects(
+ check_state)
+ if is_snapshot:
+ self.compute_api.compute_rpcapi.snapshot_instance(
+ self.context, instance, fake_image['id'])
+ else:
+ self.compute_api.compute_rpcapi.backup_instance(
+ self.context, instance, fake_image['id'],
+ 'fake-backup-type', 'fake-rotation')
+
+ self.mox.ReplayAll()
+
+ got_exc = False
+ try:
+ if is_snapshot:
+ res = self.compute_api.snapshot(self.context, instance,
+ 'fake-name',
+ extra_properties=extra_props)
+ else:
+ res = self.compute_api.backup(self.context, instance,
+ 'fake-name',
+ 'fake-backup-type',
+ 'fake-rotation',
+ extra_properties=extra_props)
+ self.assertEqual(fake_image, res)
+ except test.TestingException:
+ got_exc = True
+ self.assertEqual(create_fails, got_exc)
+ self.mox.UnsetStubs()
+
+ def test_snapshot(self):
+ self._test_snapshot_and_backup()
+
+ def test_snapshot_fails(self):
+ self._test_snapshot_and_backup(create_fails=True)
+
+ def test_snapshot_invalid_state(self):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_BACKUP
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+ instance.vm_state = vm_states.BUILDING
+ instance.task_state = None
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.snapshot,
+ self.context, instance, 'fake-name')
+
+ def test_snapshot_with_base_image_ref(self):
+ self._test_snapshot_and_backup(with_base_ref=True)
+
+ def test_snapshot_min_ram(self):
+ self._test_snapshot_and_backup(min_ram=42)
+
+ def test_snapshot_min_disk(self):
+ self._test_snapshot_and_backup(min_disk=42)
+
+ def test_backup(self):
+ for state in [vm_states.ACTIVE, vm_states.STOPPED,
+ vm_states.PAUSED, vm_states.SUSPENDED]:
+ self._test_snapshot_and_backup(is_snapshot=False,
+ instance_vm_state=state)
+
+ def test_backup_fails(self):
+ self._test_snapshot_and_backup(is_snapshot=False, create_fails=True)
+
+ def test_backup_invalid_state(self):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_BACKUP
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+ instance.vm_state = vm_states.BUILDING
+ instance.task_state = None
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.backup,
+ self.context, instance, 'fake-name',
+ 'fake', 'fake')
+
+ def test_backup_with_base_image_ref(self):
+ self._test_snapshot_and_backup(is_snapshot=False,
+ with_base_ref=True)
+
+ def test_snapshot_volume_backed(self):
+ params = dict(locked=True)
+ instance = self._create_instance_obj(params=params)
+ instance['root_device_name'] = 'vda'
+
+ instance_bdms = []
+
+ image_meta = {
+ 'id': 'fake-image-id',
+ 'properties': {'mappings': []},
+ 'status': 'fake-status',
+ 'location': 'far-away',
+ 'owner': 'fake-tenant',
+ }
+
+ expect_meta = {
+ 'name': 'test-snapshot',
+ 'properties': {'root_device_name': 'vda',
+ 'mappings': 'DONTCARE'},
+ 'size': 0,
+ 'is_public': False
+ }
+
+ def fake_get_all_by_instance(context, instance, use_slave=False):
+ return copy.deepcopy(instance_bdms)
+
+ def fake_image_create(context, image_meta, data=None):
+ self.assertThat(image_meta, matchers.DictMatches(expect_meta))
+
+ def fake_volume_get(context, volume_id):
+ return {'id': volume_id, 'display_description': ''}
+
+ def fake_volume_create_snapshot(context, volume_id, name, description):
+ return {'id': '%s-snapshot' % volume_id}
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_get_all_by_instance)
+ self.stubs.Set(self.compute_api.image_api, 'create',
+ fake_image_create)
+ self.stubs.Set(self.compute_api.volume_api, 'get',
+ fake_volume_get)
+ self.stubs.Set(self.compute_api.volume_api, 'create_snapshot_force',
+ fake_volume_create_snapshot)
+
+ # No block devices defined
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'no_device': False, 'volume_id': '1', 'boot_index': 0,
+ 'connection_info': 'inf', 'device_name': '/dev/vda',
+ 'source_type': 'volume', 'destination_type': 'volume'})
+ instance_bdms.append(bdm)
+
+ expect_meta['properties']['bdm_v2'] = True
+ expect_meta['properties']['block_device_mapping'] = []
+ expect_meta['properties']['block_device_mapping'].append(
+ {'guest_format': None, 'boot_index': 0, 'no_device': None,
+ 'image_id': None, 'volume_id': None, 'disk_bus': None,
+ 'volume_size': None, 'source_type': 'snapshot',
+ 'device_type': None, 'snapshot_id': '1-snapshot',
+ 'destination_type': 'volume', 'delete_on_termination': None})
+
+ # All the db_only fields and the volume ones are removed
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ image_mappings = [{'virtual': 'ami', 'device': 'vda'},
+ {'device': 'vda', 'virtual': 'ephemeral0'},
+ {'device': 'vdb', 'virtual': 'swap'},
+ {'device': 'vdc', 'virtual': 'ephemeral1'}]
+
+ image_meta['properties']['mappings'] = image_mappings
+
+ expect_meta['properties']['mappings'] = [
+ {'virtual': 'ami', 'device': 'vda'}]
+
+ # Check that the mappgins from the image properties are included
+ self.compute_api.snapshot_volume_backed(
+ self.context, instance, copy.deepcopy(image_meta), 'test-snapshot')
+
+ def test_volume_snapshot_create(self):
+ volume_id = '1'
+ create_info = {'id': 'eyedee'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'device_name': '/dev/sda2',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 1,
+ 'boot_index': -1})
+ fake_bdm['instance'] = fake_instance.fake_db_instance()
+ fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
+ fake_bdm = objects.BlockDeviceMapping._from_db_object(
+ self.context, objects.BlockDeviceMapping(),
+ fake_bdm, expected_attrs=['instance'])
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'volume_snapshot_create')
+
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, volume_id,
+ expected_attrs=['instance']).AndReturn(fake_bdm)
+ self.compute_api.compute_rpcapi.volume_snapshot_create(self.context,
+ fake_bdm['instance'], volume_id, create_info)
+
+ self.mox.ReplayAll()
+
+ snapshot = self.compute_api.volume_snapshot_create(self.context,
+ volume_id, create_info)
+
+ expected_snapshot = {
+ 'snapshot': {
+ 'id': create_info['id'],
+ 'volumeId': volume_id,
+ },
+ }
+ self.assertEqual(snapshot, expected_snapshot)
+
+ def test_volume_snapshot_delete(self):
+ volume_id = '1'
+ snapshot_id = '2'
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'device_name': '/dev/sda2',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 1,
+ 'boot_index': -1})
+ fake_bdm['instance'] = fake_instance.fake_db_instance()
+ fake_bdm['instance_uuid'] = fake_bdm['instance']['uuid']
+ fake_bdm = objects.BlockDeviceMapping._from_db_object(
+ self.context, objects.BlockDeviceMapping(),
+ fake_bdm, expected_attrs=['instance'])
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMapping,
+ 'get_by_volume_id')
+ self.mox.StubOutWithMock(self.compute_api.compute_rpcapi,
+ 'volume_snapshot_delete')
+
+ objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, volume_id,
+ expected_attrs=['instance']).AndReturn(fake_bdm)
+ self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context,
+ fake_bdm['instance'], volume_id, snapshot_id, {})
+
+ self.mox.ReplayAll()
+
+ self.compute_api.volume_snapshot_delete(self.context, volume_id,
+ snapshot_id, {})
+
+ def _test_boot_volume_bootable(self, is_bootable=False):
+ def get_vol_data(*args, **kwargs):
+ return {'bootable': is_bootable}
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': '1',
+ 'delete_on_termination': False,
+ }]
+
+ expected_meta = {'min_disk': 0, 'min_ram': 0, 'properties': {},
+ 'size': 0, 'status': 'active'}
+
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ side_effect=get_vol_data):
+ if not is_bootable:
+ self.assertRaises(exception.InvalidBDMVolumeNotBootable,
+ self.compute_api._get_bdm_image_metadata,
+ self.context, block_device_mapping)
+ else:
+ meta = self.compute_api._get_bdm_image_metadata(self.context,
+ block_device_mapping)
+ self.assertEqual(expected_meta, meta)
+
+ def test_boot_volume_non_bootable(self):
+ self._test_boot_volume_bootable(False)
+
+ def test_boot_volume_bootable(self):
+ self._test_boot_volume_bootable(True)
+
+ def test_boot_volume_basic_property(self):
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': None,
+ 'volume_id': '1',
+ 'delete_on_termination': False,
+ }]
+ fake_volume = {"volume_image_metadata":
+ {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume):
+ meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ self.assertEqual(256, meta['min_ram'])
+ self.assertEqual(128, meta['min_disk'])
+ self.assertEqual('active', meta['status'])
+ self.assertEqual('bar', meta['properties']['foo'])
+
+ def test_boot_volume_snapshot_basic_property(self):
+ block_device_mapping = [{
+ 'id': 1,
+ 'device_name': 'vda',
+ 'no_device': None,
+ 'virtual_name': None,
+ 'snapshot_id': '2',
+ 'volume_id': None,
+ 'delete_on_termination': False,
+ }]
+ fake_volume = {"volume_image_metadata":
+ {"min_ram": 256, "min_disk": 128, "foo": "bar"}}
+ fake_snapshot = {"volume_id": "1"}
+ with contextlib.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume),
+ mock.patch.object(self.compute_api.volume_api, 'get_snapshot',
+ return_value=fake_snapshot)) as (
+ volume_get, volume_get_snapshot):
+ meta = self.compute_api._get_bdm_image_metadata(
+ self.context, block_device_mapping)
+ self.assertEqual(256, meta['min_ram'])
+ self.assertEqual(128, meta['min_disk'])
+ self.assertEqual('active', meta['status'])
+ self.assertEqual('bar', meta['properties']['foo'])
+ volume_get_snapshot.assert_called_once_with(self.context,
+ block_device_mapping[0]['snapshot_id'])
+ volume_get.assert_called_once_with(self.context,
+ fake_snapshot['volume_id'])
+
+ def _create_instance_with_disabled_disk_config(self, object=False):
+ sys_meta = {"image_auto_disk_config": "Disabled"}
+ params = {"system_metadata": sys_meta}
+ instance = self._create_instance_obj(params=params)
+ if object:
+ return instance
+ return obj_base.obj_to_primitive(instance)
+
+ def _setup_fake_image_with_disabled_disk_config(self):
+ self.fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {"auto_disk_config": "Disabled"},
+ }
+
+ def fake_show(obj, context, image_id, **kwargs):
+ return self.fake_image
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ return self.fake_image['id']
+
+ def test_resize_with_disabled_auto_disk_config_fails(self):
+ fake_inst = self._create_instance_with_disabled_disk_config()
+
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.resize,
+ self.context, fake_inst,
+ auto_disk_config=True)
+
+ def test_create_with_disabled_auto_disk_config_fails(self):
+ image_id = self._setup_fake_image_with_disabled_disk_config()
+
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.create, self.context,
+ "fake_flavor", image_id, auto_disk_config=True)
+
+ def test_rebuild_with_disabled_auto_disk_config_fails(self):
+ fake_inst = self._create_instance_with_disabled_disk_config(
+ object=True)
+ image_id = self._setup_fake_image_with_disabled_disk_config()
+ self.assertRaises(exception.AutoDiskConfigDisabledByImage,
+ self.compute_api.rebuild,
+ self.context,
+ fake_inst,
+ image_id,
+ "new password",
+ auto_disk_config=True)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image_href = ''
+ image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ _get_image.return_value = (None, image)
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ self.compute_api.rebuild(self.context, instance, image_href,
+ admin_pass, files_to_inject)
+
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ injected_files=files_to_inject, image_ref=image_href,
+ orig_image_ref=image_href,
+ orig_sys_metadata=orig_system_metadata, bdms=bdms,
+ preserve_ephemeral=False, host=instance.host, kwargs={})
+
+ _check_auto_disk_config.assert_called_once_with(image=image)
+ _checks_for_create_and_rebuild.assert_called_once_with(self.context,
+ None, image, flavor, {}, [])
+ self.assertNotEqual(orig_system_metadata, instance.system_metadata)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_change_image(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ get_flavor.return_value = test_flavor.fake_flavor
+ orig_image_href = 'orig_image'
+ orig_image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64,
+ 'vm_mode': 'hvm'}}
+ new_image_href = 'new_image'
+ new_image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': arch.X86_64,
+ 'vm_mode': 'xen'}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'],
+ image_ref=orig_image_href,
+ vm_mode=vm_mode.HVM)
+ flavor = instance.get_flavor()
+
+ def get_image(context, image_href):
+ if image_href == new_image_href:
+ return (None, new_image)
+ if image_href == orig_image_href:
+ return (None, orig_image)
+ _get_image.side_effect = get_image
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ self.compute_api.rebuild(self.context, instance, new_image_href,
+ admin_pass, files_to_inject)
+
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ injected_files=files_to_inject, image_ref=new_image_href,
+ orig_image_ref=orig_image_href,
+ orig_sys_metadata=orig_system_metadata, bdms=bdms,
+ preserve_ephemeral=False, host=instance.host, kwargs={})
+
+ _check_auto_disk_config.assert_called_once_with(image=new_image)
+ _checks_for_create_and_rebuild.assert_called_once_with(self.context,
+ None, new_image, flavor, {}, [])
+ self.assertEqual(vm_mode.XEN, instance.vm_mode)
+
+ def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
+ side_effect):
+ injected_files = [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "foo"
+ }
+ ]
+ with mock.patch.object(quota.QUOTAS, 'limit_check',
+ side_effect=side_effect):
+ self.compute_api._check_injected_file_quota(
+ self.context, injected_files)
+
+ def test_check_injected_file_quota_onset_file_limit_exceeded(self):
+ # This is the first call to limit_check.
+ side_effect = exception.OverQuota(overs='injected_files')
+ self.assertRaises(exception.OnsetFileLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_path_limit(self):
+ # This is the second call to limit_check.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_path_bytes'))
+ self.assertRaises(exception.OnsetFilePathLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_content_limit(self):
+ # This is the second call to limit_check but with different overs.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_content_bytes'))
+ self.assertRaises(exception.OnsetFileContentLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ @mock.patch('nova.objects.Quotas.commit')
+ @mock.patch('nova.objects.Quotas.reserve')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.objects.InstanceAction.action_start')
+ def test_restore(self, action_start, instance_save, quota_reserve,
+ quota_commit):
+ instance = self._create_instance_obj()
+ instance.vm_state = vm_states.SOFT_DELETED
+ instance.task_state = None
+ instance.save()
+ with mock.patch.object(self.compute_api, 'compute_rpcapi') as rpc:
+ self.compute_api.restore(self.context, instance)
+ rpc.restore_instance.assert_called_once_with(self.context,
+ instance)
+ self.assertEqual(instance.task_state, task_states.RESTORING)
+ self.assertEqual(1, quota_commit.call_count)
+
+ def test_external_instance_event(self):
+ instances = [
+ objects.Instance(uuid='uuid1', host='host1'),
+ objects.Instance(uuid='uuid2', host='host1'),
+ objects.Instance(uuid='uuid3', host='host2'),
+ ]
+ events = [
+ objects.InstanceExternalEvent(instance_uuid='uuid1'),
+ objects.InstanceExternalEvent(instance_uuid='uuid2'),
+ objects.InstanceExternalEvent(instance_uuid='uuid3'),
+ ]
+ self.compute_api.compute_rpcapi = mock.MagicMock()
+ self.compute_api.external_instance_event(self.context,
+ instances, events)
+ method = self.compute_api.compute_rpcapi.external_instance_event
+ method.assert_any_call(self.context, instances[0:2], events[0:2])
+ method.assert_any_call(self.context, instances[2:], events[2:])
+ self.assertEqual(2, method.call_count)
+
+ def test_volume_ops_invalid_task_state(self):
+ instance = self._create_instance_obj()
+ self.assertEqual(instance.vm_state, vm_states.ACTIVE)
+ instance.task_state = 'Any'
+ volume_id = uuidutils.generate_uuid()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_volume,
+ self.context, instance, volume_id)
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_volume,
+ self.context, instance, volume_id)
+
+ new_volume_id = uuidutils.generate_uuid()
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.swap_volume,
+ self.context, instance,
+ volume_id, new_volume_id)
+
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_get_bdm_image_metadata_with_cinder_down(self, mock_get):
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ }))]
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._get_bdm_image_metadata,
+ self.context,
+ bdms, legacy_bdm=True)
+
+ @mock.patch.object(cinder.API, 'get')
+ @mock.patch.object(cinder.API, 'check_attach',
+ side_effect=exception.InvalidVolume(reason='error'))
+ def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
+ # Tests that an InvalidVolume exception raised from
+ # volume_api.check_attach due to the volume status not being
+ # 'available' results in _validate_bdm re-raising InvalidVolume.
+ instance = self._create_instance_obj()
+ instance_type = self._create_flavor()
+ volume_id = 'e856840e-9f5b-4894-8bde-58c6e29ac1e8'
+ volume_info = {'status': 'error',
+ 'attach_status': 'detached',
+ 'id': volume_id}
+ mock_get.return_value = volume_info
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'boot_index': 0,
+ 'volume_id': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ }))]
+
+ self.assertRaises(exception.InvalidVolume,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdms)
+
+ mock_get.assert_called_once_with(self.context, volume_id)
+ mock_check_attach.assert_called_once_with(
+ self.context, volume_info, instance=instance)
+
+ @mock.patch.object(cinder.API, 'get_snapshot',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
+ instance = self._create_instance_obj()
+ instance_type = self._create_flavor()
+ bdm = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'snapshot_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdm)
+ self.assertRaises(exception.CinderConnectionFailed,
+ self.compute_api._validate_bdm,
+ self.context,
+ instance, instance_type, bdms)
+
+ def _test_create_db_entry_for_new_instance_with_cinder_error(self,
+ expected_exception):
+
+ @mock.patch.object(objects.Instance, 'create')
+ @mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
+ @mock.patch.object(compute_api.API, '_populate_instance_names')
+ @mock.patch.object(compute_api.API, '_populate_instance_for_create')
+ def do_test(self, mock_create, mock_names, mock_ensure,
+ mock_inst_create):
+ instance = self._create_instance_obj()
+ instance['display_name'] = 'FAKE_DISPLAY_NAME'
+ instance['shutdown_terminate'] = False
+ instance_type = self._create_flavor()
+ fake_image = {
+ 'id': 'fake-image-id',
+ 'properties': {'mappings': []},
+ 'status': 'fake-status',
+ 'location': 'far-away'}
+ fake_security_group = None
+ fake_num_instances = 1
+ fake_index = 1
+ bdm = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ with mock.patch.object(instance, "destroy") as destroy:
+ self.assertRaises(expected_exception,
+ self.compute_api.
+ create_db_entry_for_new_instance,
+ self.context,
+ instance_type,
+ fake_image,
+ instance,
+ fake_security_group,
+ bdm,
+ fake_num_instances,
+ fake_index)
+ destroy.assert_called_once_with(self.context)
+
+ # We use a nested method so we can decorate with the mocks.
+ do_test(self)
+
+ @mock.patch.object(cinder.API, 'get',
+ side_effect=exception.CinderConnectionFailed(reason='error'))
+ def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get):
+ self._test_create_db_entry_for_new_instance_with_cinder_error(
+ expected_exception=exception.CinderConnectionFailed)
+
+ @mock.patch.object(cinder.API, 'get',
+ return_value={'id': 1, 'status': 'error',
+ 'attach_status': 'detached'})
+ def test_create_db_entry_for_new_instancewith_error_volume(self, mock_get):
+ self._test_create_db_entry_for_new_instance_with_cinder_error(
+ expected_exception=exception.InvalidVolume)
+
+ def _test_rescue(self, vm_state):
+ instance = self._create_instance_obj(params={'vm_state': vm_state})
+ bdms = []
+ with contextlib.nested(
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid', return_value=bdms),
+ mock.patch.object(self.compute_api, 'is_volume_backed_instance',
+ return_value=False),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ bdm_get_by_instance_uuid, volume_backed_inst, instance_save,
+ record_action_start, rpcapi_rescue_instance
+ ):
+ self.compute_api.rescue(self.context, instance)
+ # assert field values set on the instance object
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+ # assert our mock calls
+ bdm_get_by_instance_uuid.assert_called_once_with(
+ self.context, instance.uuid)
+ volume_backed_inst.assert_called_once_with(
+ self.context, instance, bdms)
+ instance_save.assert_called_once_with(expected_task_state=[None])
+ record_action_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ rpcapi_rescue_instance.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=None)
+
+ def test_rescue_active(self):
+ self._test_rescue(vm_state=vm_states.ACTIVE)
+
+ def test_rescue_stopped(self):
+ self._test_rescue(vm_state=vm_states.STOPPED)
+
+ def test_rescue_error(self):
+ self._test_rescue(vm_state=vm_states.ERROR)
+
+ def test_unrescue(self):
+ instance = self._create_instance_obj(
+ params={'vm_state': vm_states.RESCUED})
+ with contextlib.nested(
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'unrescue_instance')
+ ) as (
+ instance_save, record_action_start, rpcapi_unrescue_instance
+ ):
+ self.compute_api.unrescue(self.context, instance)
+ # assert field values set on the instance object
+ self.assertEqual(task_states.UNRESCUING, instance.task_state)
+ # assert our mock calls
+ instance_save.assert_called_once_with(expected_task_state=[None])
+ record_action_start.assert_called_once_with(
+ self.context, instance, instance_actions.UNRESCUE)
+ rpcapi_unrescue_instance.assert_called_once_with(
+ self.context, instance=instance)
+
+ def test_set_admin_password_invalid_state(self):
+ # Tests that InstanceInvalidState is raised when not ACTIVE.
+ instance = self._create_instance_obj({'vm_state': vm_states.STOPPED})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.set_admin_password,
+ self.context, instance)
+
+ def test_set_admin_password(self):
+ # Ensure instance can have its admin password set.
+ instance = self._create_instance_obj()
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(self.compute_api, '_record_action_start')
+ @mock.patch.object(self.compute_api.compute_rpcapi,
+ 'set_admin_password')
+ def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
+ # call the API
+ self.compute_api.set_admin_password(self.context, instance)
+ # make our assertions
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=[None])
+ record_mock.assert_called_once_with(
+ self.context, instance, instance_actions.CHANGE_PASSWORD)
+ compute_rpcapi_mock.assert_called_once_with(
+ self.context, instance=instance, new_pass=None)
+
+ do_test()
+
+ def _test_attach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_attach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_attach_interface_invalid_state(state)
+
+ def _test_detach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_detach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_detach_interface_invalid_state(state)
+
+
+class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIUnitTestCase, self).setUp()
+ self.compute_api = compute_api.API()
+ self.cell_type = None
+
+ def test_resize_same_flavor_fails(self):
+ self.assertRaises(exception.CannotResizeToSameFlavor,
+ self._test_resize, same_flavor=True)
+
+
+class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIAPICellUnitTestCase, self).setUp()
+ self.flags(cell_type='api', enable=True, group='cells')
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.cell_type = 'api'
+
+ def test_resize_same_flavor_fails(self):
+ self.assertRaises(exception.CannotResizeToSameFlavor,
+ self._test_resize, same_flavor=True)
+
+
+class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
+ test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeAPIComputeCellUnitTestCase, self).setUp()
+ self.flags(cell_type='compute', enable=True, group='cells')
+ self.compute_api = compute_api.API()
+ self.cell_type = 'compute'
+
+ def test_resize_same_flavor_passes(self):
+ self._test_resize(same_flavor=True)
+
+
+class DiffDictTestCase(test.NoDBTestCase):
+ """Unit tests for _diff_dict()."""
+
+ def test_no_change(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=2, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, {})
+
+ def test_new_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=2, c=3, d=4)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(d=['+', 4]))
+
+ def test_changed_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, b=4, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(b=['+', 4]))
+
+ def test_removed_key(self):
+ old = dict(a=1, b=2, c=3)
+ new = dict(a=1, c=3)
+ diff = compute_api._diff_dict(old, new)
+
+ self.assertEqual(diff, dict(b=['-']))
+
+
+class SecurityGroupAPITest(test.NoDBTestCase):
+ def setUp(self):
+ super(SecurityGroupAPITest, self).setUp()
+ self.secgroup_api = compute_api.SecurityGroupAPI()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ @mock.patch('nova.objects.security_group.SecurityGroupList.'
+ 'get_by_instance')
+ def test_get_instance_security_groups(self, mock_get):
+ groups = objects.SecurityGroupList()
+ groups.objects = [objects.SecurityGroup(name='foo'),
+ objects.SecurityGroup(name='bar')]
+ mock_get.return_value = groups
+ names = self.secgroup_api.get_instance_security_groups(self.context,
+ 'fake-uuid')
+ self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
+ self.assertEqual(1, mock_get.call_count)
+ self.assertEqual('fake-uuid', mock_get.call_args_list[0][0][1].uuid)
diff --git a/nova/tests/unit/compute/test_compute_cells.py b/nova/tests/unit/compute/test_compute_cells.py
new file mode 100644
index 0000000000..9908e6aad3
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_cells.py
@@ -0,0 +1,332 @@
+# Copyright (c) 2012 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute w/ Cells
+"""
+import functools
+import inspect
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.cells import manager
+from nova.compute import api as compute_api
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import delete_types
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import objects
+from nova import quota
+from nova import test
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_instance
+
+
+ORIG_COMPUTE_API = None
+cfg.CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+
+
+def stub_call_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ return fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
+
+
+def stub_cast_to_cells(context, instance, method, *args, **kwargs):
+ fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
+ # Use NoopQuotaDriver in child cells.
+ saved_quotas = quota.QUOTAS
+ quota.QUOTAS = quota.QuotaEngine(
+ quota_driver_class=quota.NoopQuotaDriver())
+ compute_api.QUOTAS = quota.QUOTAS
+ try:
+ fn(context, instance, *args, **kwargs)
+ finally:
+ quota.QUOTAS = saved_quotas
+ compute_api.QUOTAS = saved_quotas
+
+
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
+ def setUp(self):
+ super(CellsComputeAPITestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+ self.flags(enable=True, group='cells')
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _nop_update(context, instance, **kwargs):
+ return instance
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.compute_api, '_validate_cell',
+ _fake_validate_cell)
+
+ # NOTE(belliott) Don't update the instance state
+ # for the tests at the API layer. Let it happen after
+ # the stub cast to cells so that expected_task_states
+ # match.
+ self.stubs.Set(self.compute_api, 'update', _nop_update)
+
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputeAPITestCase, self).tearDown()
+
+ def test_instance_metadata(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_evacuate(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_error_evacuate(self):
+ self.skipTest("Test is incompatible with cells.")
+
+ def test_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ inst = self._create_fake_instance_obj()
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, delete_types.DELETE)
+ self.mox.ReplayAll()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.delete(self.context, inst)
+
+ def test_soft_delete_instance_no_cell(self):
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'instance_delete_everywhere')
+ inst = self._create_fake_instance_obj()
+ cells_rpcapi.instance_delete_everywhere(self.context,
+ inst, delete_types.SOFT_DELETE)
+ self.mox.ReplayAll()
+ self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ lambda *a, **kw: None)
+ self.compute_api.soft_delete(self.context, inst)
+
+ def test_get_migrations(self):
+ filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
+ migrations = {'migrations': [{'id': 1234}]}
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi, 'get_migrations')
+ cells_rpcapi.get_migrations(self.context,
+ filters).AndReturn(migrations)
+ self.mox.ReplayAll()
+
+ response = self.compute_api.get_migrations(self.context, filters)
+
+ self.assertEqual(migrations, response)
+
+ @mock.patch('nova.cells.messaging._TargetedMessage')
+ def test_rebuild_sig(self, mock_msg):
+ # TODO(belliott) Cells could benefit from better testing to ensure API
+ # and manager signatures stay up to date
+
+ def wire(version):
+ # wire the rpc cast directly to the manager method to make sure
+ # the signature matches
+ cells_mgr = manager.CellsManager()
+
+ def cast(context, method, *args, **kwargs):
+ fn = getattr(cells_mgr, method)
+ fn(context, *args, **kwargs)
+
+ cells_mgr.cast = cast
+ return cells_mgr
+
+ cells_rpcapi = self.compute_api.cells_rpcapi
+ client = cells_rpcapi.client
+
+ with mock.patch.object(client, 'prepare', side_effect=wire):
+ inst = self._create_fake_instance_obj()
+ inst.cell_name = 'mycell'
+
+ cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
+ None, None, None, None,
+ recreate=False,
+ on_shared_storage=False, host='host',
+ preserve_ephemeral=True, kwargs=None)
+
+ # one targeted message should have been created
+ self.assertEqual(1, mock_msg.call_count)
+
+
+class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
+ def setUp(self):
+ super(CellsConductorAPIRPCRedirect, self).setUp()
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.cells_rpcapi = mock.MagicMock()
+ self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi
+
+ self.context = context.RequestContext('fake', 'fake')
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_provision_instances')
+ @mock.patch.object(compute_api.API, '_check_and_transform_bdm')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_validate_and_build_base_options')
+ def test_build_instances(self, _validate, _get_image, _check_bdm,
+ _provision, _record_action_start):
+ _get_image.return_value = (None, 'fake-image')
+ _validate.return_value = ({}, 1)
+ _check_bdm.return_value = 'bdms'
+ _provision.return_value = 'instances'
+
+ self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
+
+ # Subsequent tests in class are verifying the hooking. We don't check
+ # args since this is verified in compute test code.
+ self.assertTrue(self.cells_rpcapi.build_instances.called)
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_resize_cells_support')
+ @mock.patch.object(compute_api.API, '_reserve_quota_delta')
+ @mock.patch.object(compute_api.API, '_upsize_quota_delta')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ def test_resize_instance(self, _check, _extract, _save, _upsize, _reserve,
+ _cells, _record):
+ _extract.return_value = {'name': 'fake', 'id': 'fake'}
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.resize(self.context, instance)
+ self.assertTrue(self.cells_rpcapi.resize_instance.called)
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_live_migrate_instance(self, instance_save, _record):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.live_migrate(self.context, instance,
+ True, True, 'fake_dest_host')
+
+ self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_instance(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+ get_flavor.return_value = ''
+ image_href = ''
+ image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': 'x86_64'}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ _get_image.return_value = (None, image)
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ self.compute_api.rebuild(self.context, instance, image_href,
+ admin_pass, files_to_inject)
+
+ self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
+
+ def test_check_equal(self):
+ task_api = self.compute_api.compute_task_api
+ tests = set()
+ for (name, value) in inspect.getmembers(self, inspect.ismethod):
+ if name.startswith('test_') and name != 'test_check_equal':
+ tests.add(name[5:])
+ if tests != set(task_api.cells_compatible):
+ self.fail("Testcases not equivalent to cells_compatible list")
+
+
+class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
+ def setUp(self):
+ super(CellsComputePolicyTestCase, self).setUp()
+ global ORIG_COMPUTE_API
+ ORIG_COMPUTE_API = self.compute_api
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ deploy_stubs(self.stubs, self.compute_api)
+
+ def tearDown(self):
+ global ORIG_COMPUTE_API
+ self.compute_api = ORIG_COMPUTE_API
+ super(CellsComputePolicyTestCase, self).tearDown()
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
new file mode 100644
index 0000000000..04b9f6bdc6
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -0,0 +1,3053 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for ComputeManager()."""
+
+import contextlib
+import time
+
+from cinderclient import exceptions as cinder_exception
+from eventlet import event as eventlet_event
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.utils import importutils
+
+from nova.compute import manager
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import api as network_api
+from nova.network import model as network_model
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit.compute import fake_resource_tracker
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.objects import test_instance_info_cache
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+
+
+class ComputeManagerUnitTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerUnitTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_allocate_network_succeeds_after_retries(self):
+ self.flags(network_allocate_retries=8)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(time, 'sleep')
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'])
+
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ final_result = 'meow'
+ dhcp_options = None
+
+ expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
+
+ for sleep_time in expected_sleep_times:
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(
+ test.TestingException())
+ time.sleep(sleep_time)
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndReturn(final_result)
+ self.compute._instance_update(self.context, instance['uuid'],
+ system_metadata={'network_allocated': 'True'})
+
+ self.mox.ReplayAll()
+
+ res = self.compute._allocate_network_async(self.context, instance,
+ req_networks,
+ macs,
+ sec_groups,
+ is_vpn,
+ dhcp_options)
+ self.assertEqual(final_result, res)
+
+ def test_allocate_network_maintains_context(self):
+ # override tracker with a version that doesn't need the database:
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ return mox.MockAnything()
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), instance.uuid).AndReturn([])
+
+ node = 'fake_node'
+ self.compute._get_resource_tracker(node).AndReturn(
+ FakeResourceTracker())
+
+ self.admin_context = False
+
+ def fake_allocate(context, *args, **kwargs):
+ if context.is_admin:
+ self.admin_context = True
+
+ # NOTE(vish): The nice mox parameter matchers here don't work well
+ # because they raise an exception that gets wrapped by
+ # the retry exception handling, so use a side effect
+ # to keep track of whether allocate was called with admin
+ # context.
+ self.compute._allocate_network(mox.IgnoreArg(), instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).WithSideEffects(fake_allocate)
+
+ self.mox.ReplayAll()
+
+ instance, nw_info = self.compute._build_instance(self.context, {}, {},
+ None, None, None, True,
+ node, instance,
+ {}, False)
+ self.assertFalse(self.admin_context,
+ "_allocate_network called with admin context")
+ self.assertEqual(vm_states.BUILDING, instance.vm_state)
+ self.assertEqual(task_states.BLOCK_DEVICE_MAPPING, instance.task_state)
+
+ def test_reschedule_maintains_context(self):
+ # override tracker with a version that causes a reschedule
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ raise test.TestingException()
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute, '_reschedule_or_error')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), instance.uuid).AndReturn([])
+
+ node = 'fake_node'
+ self.compute._get_resource_tracker(node).AndReturn(
+ FakeResourceTracker())
+
+ self.admin_context = False
+
+ def fake_retry_or_error(context, *args, **kwargs):
+ if context.is_admin:
+ self.admin_context = True
+
+ # NOTE(vish): we could use a mos parameter matcher here but it leads
+ # to a very cryptic error message, so use the same method
+ # as the allocate_network_maintains_context test.
+ self.compute._reschedule_or_error(mox.IgnoreArg(), instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).WithSideEffects(fake_retry_or_error)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._build_instance, self.context, {}, {},
+ None, None, None, True, node, instance, {}, False)
+ self.assertFalse(self.admin_context,
+ "_reschedule_or_error called with admin context")
+
+ def test_allocate_network_fails(self):
+ self.flags(network_allocate_retries=0)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn, dhcp_options)
+
+ def test_allocate_network_neg_conf_value_treated_as_zero(self):
+ self.flags(network_allocate_retries=-1)
+
+ nwapi = self.compute.network_api
+ self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
+
+ instance = {}
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+
+ # Only attempted once.
+ nwapi.allocate_for_instance(
+ self.context, instance, vpn=is_vpn,
+ requested_networks=req_networks, macs=macs,
+ security_groups=sec_groups,
+ dhcp_options=dhcp_options).AndRaise(test.TestingException())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.compute._allocate_network_async,
+ self.context, instance, req_networks, macs,
+ sec_groups, is_vpn, dhcp_options)
+
+ @mock.patch.object(network_api.API, 'allocate_for_instance')
+ @mock.patch.object(manager.ComputeManager, '_instance_update')
+ @mock.patch.object(time, 'sleep')
+ def test_allocate_network_with_conf_value_is_one(
+ self, sleep, _instance_update, allocate_for_instance):
+ self.flags(network_allocate_retries=1)
+
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'])
+ is_vpn = 'fake-is-vpn'
+ req_networks = 'fake-req-networks'
+ macs = 'fake-macs'
+ sec_groups = 'fake-sec-groups'
+ dhcp_options = None
+ final_result = 'zhangtralon'
+
+ allocate_for_instance.side_effect = [test.TestingException(),
+ final_result]
+ res = self.compute._allocate_network_async(self.context, instance,
+ req_networks,
+ macs,
+ sec_groups,
+ is_vpn,
+ dhcp_options)
+ self.assertEqual(final_result, res)
+ self.assertEqual(1, sleep.call_count)
+
+ def test_init_host(self):
+ our_host = self.compute.host
+ fake_context = 'fake-context'
+ inst = fake_instance.fake_db_instance(
+ vm_state=vm_states.ACTIVE,
+ info_cache=dict(test_instance_info_cache.fake_info_cache,
+ network_info=None),
+ security_groups=None)
+ startup_instances = [inst, inst, inst]
+
+ def _do_mock_calls(defer_iptables_apply):
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ db.instance_get_all_by_host(
+ fake_context, our_host, columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn(startup_instances)
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_on()
+ self.compute._destroy_evacuated_instances(fake_context)
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ self.compute._init_instance(fake_context,
+ mox.IsA(objects.Instance))
+ if defer_iptables_apply:
+ self.compute.driver.filter_defer_apply_off()
+
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_on')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'filter_defer_apply_off')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute,
+ '_destroy_evacuated_instances')
+ self.mox.StubOutWithMock(self.compute,
+ '_init_instance')
+
+ # Test with defer_iptables_apply
+ self.flags(defer_iptables_apply=True)
+ _do_mock_calls(True)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ self.mox.VerifyAll()
+
+ # Test without defer_iptables_apply
+ self.mox.ResetAll()
+ self.flags(defer_iptables_apply=False)
+ _do_mock_calls(False)
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ @mock.patch('nova.objects.InstanceList')
+ def test_cleanup_host(self, mock_instance_list):
+ # just testing whether the cleanup_host method
+ # when fired will invoke the underlying driver's
+ # equivalent method.
+
+ mock_instance_list.get_by_host.return_value = []
+
+ with mock.patch.object(self.compute, 'driver') as mock_driver:
+ self.compute.init_host()
+ mock_driver.init_host.assert_called_once_with(host='fake-mini')
+
+ self.compute.cleanup_host()
+ mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
+
+ def test_init_host_with_deleted_migration(self):
+ our_host = self.compute.host
+ not_our_host = 'not-' + our_host
+ fake_context = 'fake-context'
+
+ deleted_instance = fake_instance.fake_instance_obj(
+ self.context, host=not_our_host, uuid='fake-uuid')
+
+ self.mox.StubOutWithMock(self.compute.driver, 'init_host')
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(context, 'get_admin_context')
+ self.mox.StubOutWithMock(self.compute, 'init_virt_events')
+ self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
+ self.mox.StubOutWithMock(self.compute, '_init_instance')
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+
+ self.compute.driver.init_host(host=our_host)
+ context.get_admin_context().AndReturn(fake_context)
+ db.instance_get_all_by_host(fake_context, our_host,
+ columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn([])
+ self.compute.init_virt_events()
+
+ # simulate failed instance
+ self.compute._get_instances_on_driver(
+ fake_context, {'deleted': False}).AndReturn([deleted_instance])
+ self.compute._get_instance_nw_info(fake_context, deleted_instance
+ ).AndRaise(exception.InstanceNotFound(
+ instance_id=deleted_instance['uuid']))
+ # ensure driver.destroy is called so that driver may
+ # clean up any dangling files
+ self.compute.driver.destroy(fake_context, deleted_instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute.init_host()
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_init_instance_failed_resume_sets_error(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake-uuid',
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ expected_attrs=['info_cache'])
+
+ self.flags(resume_guests_state_on_host_boot=True)
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'resume_state_on_host_boot')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute,
+ '_set_instance_error_state')
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute._get_power_state(mox.IgnoreArg(),
+ instance).AndReturn(power_state.SHUTDOWN)
+ self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
+ self.compute._get_instance_block_device_info(mox.IgnoreArg(),
+ instance).AndReturn('fake-bdm')
+ self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
+ instance, mox.IgnoreArg(),
+ 'fake-bdm').AndRaise(test.TestingException)
+ self.compute._set_instance_error_state(mox.IgnoreArg(), instance)
+ self.mox.ReplayAll()
+ self.compute._init_instance('fake-context', instance)
+
+ def test_init_instance_stuck_in_deleting(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake-uuid',
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.DELETING)
+
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.mox.StubOutWithMock(instance, 'obj_load_attr')
+
+ bdms = []
+ instance.obj_load_attr('metadata')
+ instance.obj_load_attr('system_metadata')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid).AndReturn(bdms)
+ self.compute._delete_instance(self.context, instance, bdms,
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.compute._init_instance(self.context, instance)
+
+ def _test_init_instance_reverts_crashed_migrations(self,
+ old_vm_state=None):
+ power_on = True if (not old_vm_state or
+ old_vm_state == vm_states.ACTIVE) else False
+ sys_meta = {
+ 'old_vm_state': old_vm_state
+ }
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.RESIZE_MIGRATING,
+ power_state=power_state.SHUTDOWN,
+ system_metadata=sys_meta,
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
+ self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'finish_revert_migration')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_info')
+ self.mox.StubOutWithMock(instance, 'save')
+ self.mox.StubOutWithMock(self.compute, '_retry_reboot')
+
+ self.compute._retry_reboot(self.context, instance).AndReturn(
+ (False, None))
+ compute_utils.get_nw_info_for_instance(instance).AndReturn(
+ network_model.NetworkInfo())
+ self.compute.driver.plug_vifs(instance, [])
+ self.compute._get_instance_block_device_info(
+ self.context, instance).AndReturn([])
+ self.compute.driver.finish_revert_migration(self.context, instance,
+ [], [], power_on)
+ instance.save()
+ self.compute.driver.get_info(instance).AndReturn(
+ {'state': power_state.SHUTDOWN})
+ self.compute.driver.get_info(instance).AndReturn(
+ {'state': power_state.SHUTDOWN})
+
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(self.context, instance)
+ self.assertIsNone(instance.task_state)
+
+ def test_init_instance_reverts_crashed_migration_from_active(self):
+ self._test_init_instance_reverts_crashed_migrations(
+ old_vm_state=vm_states.ACTIVE)
+
+ def test_init_instance_reverts_crashed_migration_from_stopped(self):
+ self._test_init_instance_reverts_crashed_migrations(
+ old_vm_state=vm_states.STOPPED)
+
+ def test_init_instance_reverts_crashed_migration_no_old_state(self):
+ self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
+
+ def test_init_instance_resets_crashed_live_migration(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.MIGRATING)
+ with contextlib.nested(
+ mock.patch.object(instance, 'save'),
+ mock.patch('nova.compute.utils.get_nw_info_for_instance',
+ return_value=network_model.NetworkInfo())
+ ) as (save, get_nw_info):
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with(expected_task_state=['migrating'])
+ get_nw_info.assert_called_once_with(instance)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def _test_init_instance_sets_building_error(self, vm_state,
+ task_state=None):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=vm_state,
+ task_state=task_state)
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_init_instance_sets_building_error(self):
+ self._test_init_instance_sets_building_error(vm_states.BUILDING)
+
+ def test_init_instance_sets_rebuilding_errors(self):
+ tasks = [task_states.REBUILDING,
+ task_states.REBUILD_BLOCK_DEVICE_MAPPING,
+ task_states.REBUILD_SPAWNING]
+ vms = [vm_states.ACTIVE, vm_states.STOPPED]
+
+ for vm_state in vms:
+ for task_state in tasks:
+ self._test_init_instance_sets_building_error(
+ vm_state, task_state)
+
+ def _test_init_instance_sets_building_tasks_error(self, instance):
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+
+ def test_init_instance_sets_building_tasks_error_scheduling(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='foo',
+ vm_state=None,
+ task_state=task_states.SCHEDULING)
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_block_device(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.BLOCK_DEVICE_MAPPING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_networking(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.NETWORKING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def test_init_instance_sets_building_tasks_error_spawning(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = None
+ instance.task_state = task_states.SPAWNING
+ self._test_init_instance_sets_building_tasks_error(instance)
+
+ def _test_init_instance_cleans_image_states(self, instance):
+ with mock.patch.object(instance, 'save') as save:
+ self.compute._get_power_state = mock.Mock()
+ self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
+ instance.info_cache = None
+ instance.power_state = power_state.RUNNING
+ self.compute._init_instance(self.context, instance)
+ save.assert_called_once_with()
+ self.compute.driver.post_interrupted_snapshot_cleanup.\
+ assert_called_once_with(self.context, instance)
+ self.assertIsNone(instance.task_state)
+
+ def test_init_instance_cleans_image_state_pending_upload(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_PENDING_UPLOAD
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_uploading(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_UPLOADING
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_snapshot(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_cleans_image_state_snapshot_pending(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
+ self._test_init_instance_cleans_image_states(instance)
+
+ def test_init_instance_errors_when_not_migrating(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ERROR
+ instance.task_state = task_states.IMAGE_UPLOADING
+ self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
+ self.mox.ReplayAll()
+ self.compute._init_instance(self.context, instance)
+ self.mox.VerifyAll()
+
+ def test_init_instance_deletes_error_deleting_instance(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.DELETING)
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+ self.mox.StubOutWithMock(self.compute, '_delete_instance')
+ self.mox.StubOutWithMock(instance, 'obj_load_attr')
+
+ bdms = []
+ instance.obj_load_attr('metadata')
+ instance.obj_load_attr('system_metadata')
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, instance.uuid).AndReturn(bdms)
+ self.compute._delete_instance(self.context, instance, bdms,
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.compute._init_instance(self.context, instance)
+ self.mox.VerifyAll()
+
+ @mock.patch('nova.context.RequestContext.elevated')
+ @mock.patch('nova.compute.utils.get_nw_info_for_instance')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._get_instance_block_device_info')
+ @mock.patch('nova.virt.driver.ComputeDriver.destroy')
+ @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
+ def test_shutdown_instance_endpoint_not_found(self, mock_connector,
+ mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
+ mock_connector.side_effect = cinder_exception.EndpointNotFound
+ mock_elevated.return_value = self.context
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.DELETING)
+ bdms = [mock.Mock(id=1, is_volume=True)]
+
+ self.compute._shutdown_instance(self.context, instance, bdms,
+ notify=False, try_deallocate_networks=False)
+
+ def _test_init_instance_retries_reboot(self, instance, reboot_type,
+ return_power_state):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=return_power_state),
+ mock.patch.object(self.compute.compute_rpcapi, 'reboot_instance'),
+ mock.patch.object(compute_utils, 'get_nw_info_for_instance')
+ ) as (
+ _get_power_state,
+ reboot_instance,
+ get_nw_info_for_instance
+ ):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance, block_device_info=None,
+ reboot_type=reboot_type)
+ reboot_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_reboot_pending(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING
+ for state in vm_states.ALLOW_SOFT_REBOOT:
+ instance.vm_state = state
+ self._test_init_instance_retries_reboot(instance, 'SOFT',
+ power_state.RUNNING)
+
+ def test_init_instance_retries_reboot_pending_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING_HARD
+ for state in vm_states.ALLOW_HARD_REBOOT:
+ # NOTE(dave-mcnally) while a reboot of a vm in error state is
+ # possible we don't attempt to recover an error during init
+ if state == vm_states.ERROR:
+ continue
+ instance.vm_state = state
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.RUNNING)
+
+ def test_init_instance_retries_reboot_started(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.NOSTATE)
+
+ def test_init_instance_retries_reboot_started_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ self._test_init_instance_retries_reboot(instance, 'HARD',
+ power_state.NOSTATE)
+
+ def _test_init_instance_cleans_reboot_state(self, instance):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save', autospec=True),
+ mock.patch.object(compute_utils, 'get_nw_info_for_instance')
+ ) as (
+ _get_power_state,
+ instance_save,
+ get_nw_info_for_instance
+ ):
+ self.compute._init_instance(self.context, instance)
+ instance_save.assert_called_once_with()
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+
+ def test_init_instance_cleans_image_state_reboot_started(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED
+ instance.power_state = power_state.RUNNING
+ self._test_init_instance_cleans_reboot_state(instance)
+
+ def test_init_instance_cleans_image_state_reboot_started_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ instance.power_state = power_state.RUNNING
+ self._test_init_instance_cleans_reboot_state(instance)
+
+ def test_init_instance_retries_power_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_OFF
+ with mock.patch.object(self.compute, 'stop_instance'):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.stop_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_power_on(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_ON
+ with mock.patch.object(self.compute, 'start_instance'):
+ self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.start_instance.assert_has_calls([call])
+
+ def test_init_instance_retries_power_on_silent_exception(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_ON
+ with mock.patch.object(self.compute, 'start_instance',
+ return_value=Exception):
+ init_return = self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.start_instance.assert_has_calls([call])
+ self.assertIsNone(init_return)
+
+ def test_init_instance_retries_power_off_silent_exception(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.id = 1
+ instance.vm_state = vm_states.ACTIVE
+ instance.task_state = task_states.POWERING_OFF
+ with mock.patch.object(self.compute, 'stop_instance',
+ return_value=Exception):
+ init_return = self.compute._init_instance(self.context, instance)
+ call = mock.call(self.context, instance)
+ self.compute.stop_instance.assert_has_calls([call])
+ self.assertIsNone(init_return)
+
+ def test_get_instances_on_driver(self):
+ fake_context = context.get_admin_context()
+
+ driver_instances = []
+ for x in xrange(10):
+ driver_instances.append(fake_instance.fake_db_instance())
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+
+ self.compute.driver.list_instance_uuids().AndReturn(
+ [inst['uuid'] for inst in driver_instances])
+ db.instance_get_all_by_filters(
+ fake_context,
+ {'uuid': [inst['uuid'] for
+ inst in driver_instances]},
+ 'created_at', 'desc', columns_to_join=None,
+ limit=None, marker=None,
+ use_slave=True).AndReturn(
+ driver_instances)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context)
+ self.assertEqual([x['uuid'] for x in driver_instances],
+ [x['uuid'] for x in result])
+
+ def test_get_instances_on_driver_fallback(self):
+ # Test getting instances when driver doesn't support
+ # 'list_instance_uuids'
+ self.compute.host = 'host'
+ filters = {'host': self.compute.host}
+ fake_context = context.get_admin_context()
+
+ self.flags(instance_name_template='inst-%i')
+
+ all_instances = []
+ driver_instances = []
+ for x in xrange(10):
+ instance = fake_instance.fake_db_instance(name='inst-%i' % x,
+ id=x)
+ if x % 2:
+ driver_instances.append(instance)
+ all_instances.append(instance)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instance_uuids')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'list_instances')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+
+ self.compute.driver.list_instance_uuids().AndRaise(
+ NotImplementedError())
+ self.compute.driver.list_instances().AndReturn(
+ [inst['name'] for inst in driver_instances])
+ db.instance_get_all_by_filters(
+ fake_context, filters,
+ 'created_at', 'desc', columns_to_join=None,
+ limit=None, marker=None,
+ use_slave=True).AndReturn(all_instances)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instances_on_driver(fake_context, filters)
+ self.assertEqual([x['uuid'] for x in driver_instances],
+ [x['uuid'] for x in result])
+
+ def test_instance_usage_audit(self):
+ instances = [objects.Instance(uuid='foo')]
+
+ @classmethod
+ def fake_get(*a, **k):
+ return instances
+
+ self.flags(instance_usage_audit=True)
+ self.stubs.Set(compute_utils, 'has_audit_been_run',
+ lambda *a, **k: False)
+ self.stubs.Set(objects.InstanceList,
+ 'get_active_by_window_joined', fake_get)
+ self.stubs.Set(compute_utils, 'start_instance_usage_audit',
+ lambda *a, **k: None)
+ self.stubs.Set(compute_utils, 'finish_instance_usage_audit',
+ lambda *a, **k: None)
+
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'notify_usage_exists')
+ self.compute.conductor_api.notify_usage_exists(
+ self.context, instances[0], ignore_missing_network_data=False)
+ self.mox.ReplayAll()
+ self.compute._instance_usage_audit(self.context)
+
+ def _get_sync_instance(self, power_state, vm_state, task_state=None,
+ shutdown_terminate=False):
+ instance = objects.Instance()
+ instance.uuid = 'fake-uuid'
+ instance.power_state = power_state
+ instance.vm_state = vm_state
+ instance.host = self.compute.host
+ instance.task_state = task_state
+ instance.shutdown_terminate = shutdown_terminate
+ self.mox.StubOutWithMock(instance, 'refresh')
+ self.mox.StubOutWithMock(instance, 'save')
+ return instance
+
+ def test_sync_instance_power_state_match(self):
+ instance = self._get_sync_instance(power_state.RUNNING,
+ vm_states.ACTIVE)
+ instance.refresh(use_slave=False)
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ power_state.RUNNING)
+
+ def test_sync_instance_power_state_running_stopped(self):
+ instance = self._get_sync_instance(power_state.RUNNING,
+ vm_states.ACTIVE)
+ instance.refresh(use_slave=False)
+ instance.save()
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ power_state.SHUTDOWN)
+ self.assertEqual(instance.power_state, power_state.SHUTDOWN)
+
+ def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
+ stop=True, force=False, shutdown_terminate=False):
+ instance = self._get_sync_instance(
+ power_state, vm_state, shutdown_terminate=shutdown_terminate)
+ instance.refresh(use_slave=False)
+ instance.save()
+ self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
+ self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
+ self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
+ if shutdown_terminate:
+ self.compute.compute_api.delete(self.context, instance)
+ elif stop:
+ if force:
+ self.compute.compute_api.force_stop(self.context, instance)
+ else:
+ self.compute.compute_api.stop(self.context, instance)
+ self.mox.ReplayAll()
+ self.compute._sync_instance_power_state(self.context, instance,
+ driver_power_state)
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
+
+ def test_sync_instance_power_state_to_stop(self):
+ for ps in (power_state.SHUTDOWN, power_state.CRASHED,
+ power_state.SUSPENDED):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
+
+ for ps in (power_state.SHUTDOWN, power_state.CRASHED):
+ self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
+ force=True)
+
+ self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
+ power_state.RUNNING, force=True)
+
+ def test_sync_instance_power_state_to_terminate(self):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
+ power_state.SHUTDOWN,
+ force=False, shutdown_terminate=True)
+
+ def test_sync_instance_power_state_to_no_stop(self):
+ for ps in (power_state.PAUSED, power_state.NOSTATE):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
+ stop=False)
+ for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
+ for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
+ self._test_sync_to_stop(power_state.RUNNING, vs, ps,
+ stop=False)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_sync_instance_power_state')
+ def test_query_driver_power_state_and_sync_pending_task(
+ self, mock_sync_power_state):
+ with mock.patch.object(self.compute.driver,
+ 'get_info') as mock_get_info:
+ db_instance = objects.Instance(uuid='fake-uuid',
+ task_state=task_states.POWERING_OFF)
+ self.compute._query_driver_power_state_and_sync(self.context,
+ db_instance)
+ self.assertFalse(mock_get_info.called)
+ self.assertFalse(mock_sync_power_state.called)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_sync_instance_power_state')
+ def test_query_driver_power_state_and_sync_not_found_driver(
+ self, mock_sync_power_state):
+ error = exception.InstanceNotFound(instance_id=1)
+ with mock.patch.object(self.compute.driver,
+ 'get_info', side_effect=error) as mock_get_info:
+ db_instance = objects.Instance(uuid='fake-uuid', task_state=None)
+ self.compute._query_driver_power_state_and_sync(self.context,
+ db_instance)
+ mock_get_info.assert_called_once_with(db_instance)
+ mock_sync_power_state.assert_called_once_with(self.context,
+ db_instance,
+ power_state.NOSTATE,
+ use_slave=True)
+
+ def test_run_pending_deletes(self):
+ self.flags(instance_delete_interval=10)
+
+ class FakeInstance(object):
+ def __init__(self, uuid, name, smd):
+ self.uuid = uuid
+ self.name = name
+ self.system_metadata = smd
+ self.cleaned = False
+
+ def __getitem__(self, name):
+ return getattr(self, name)
+
+ def save(self, context):
+ pass
+
+ class FakeInstanceList(object):
+ def get_by_filters(self, *args, **kwargs):
+ return []
+
+ a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
+ b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
+ c = FakeInstance('789', 'banana', {})
+
+ self.mox.StubOutWithMock(objects.InstanceList,
+ 'get_by_filters')
+ objects.InstanceList.get_by_filters(
+ {'read_deleted': 'yes'},
+ {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
+ 'cleaned': False},
+ expected_attrs=['info_cache', 'security_groups',
+ 'system_metadata'],
+ use_slave=True).AndReturn([a, b, c])
+
+ self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
+ self.compute.driver.delete_instance_files(
+ mox.IgnoreArg()).AndReturn(True)
+ self.compute.driver.delete_instance_files(
+ mox.IgnoreArg()).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.compute._run_pending_deletes({})
+ self.assertFalse(a.cleaned)
+ self.assertEqual('100', a.system_metadata['clean_attempts'])
+ self.assertTrue(b.cleaned)
+ self.assertEqual('4', b.system_metadata['clean_attempts'])
+ self.assertFalse(c.cleaned)
+ self.assertEqual('1', c.system_metadata['clean_attempts'])
+
+ def test_attach_interface_failure(self):
+ # Test that the fault methods are invoked when an attach fails
+ db_instance = fake_instance.fake_db_instance()
+ f_instance = objects.Instance._from_db_object(self.context,
+ objects.Instance(),
+ db_instance)
+ e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute.network_api,
+ 'allocate_port_for_instance',
+ side_effect=e)
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.compute.attach_interface,
+ self.context, f_instance, 'net_id', 'port_id',
+ None)
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, e,
+ mock.ANY))
+
+ do_test()
+
+ def test_detach_interface_failure(self):
+ # Test that the fault methods are invoked when a detach fails
+
+ # Build test data that will cause a PortNotFound exception
+ f_instance = mock.MagicMock()
+ f_instance.info_cache = mock.MagicMock()
+ f_instance.info_cache.network_info = []
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute, '_set_instance_error_state')
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.PortNotFound,
+ self.compute.detach_interface,
+ self.context, f_instance, 'port_id')
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, mock.ANY, mock.ANY))
+
+ do_test()
+
+ def test_swap_volume_volume_api_usage(self):
+ # This test ensures that volume_id arguments are passed to volume_api
+ # and that volume states are OK
+ volumes = {}
+ old_volume_id = uuidutils.generate_uuid()
+ volumes[old_volume_id] = {'id': old_volume_id,
+ 'display_name': 'old_volume',
+ 'status': 'detaching',
+ 'size': 1}
+ new_volume_id = uuidutils.generate_uuid()
+ volumes[new_volume_id] = {'id': new_volume_id,
+ 'display_name': 'new_volume',
+ 'status': 'available',
+ 'size': 2}
+
+ def fake_vol_api_roll_detaching(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'detaching':
+ volumes[volume_id]['status'] = 'in-use'
+
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ {'device_name': '/dev/vdb', 'source_type': 'volume',
+ 'destination_type': 'volume', 'instance_uuid': 'fake',
+ 'connection_info': '{"foo": "bar"}'})
+
+ def fake_vol_api_func(context, volume, *args):
+ self.assertTrue(uuidutils.is_uuid_like(volume))
+ return {}
+
+ def fake_vol_get(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ return volumes[volume_id]
+
+ def fake_vol_unreserve(context, volume_id):
+ self.assertTrue(uuidutils.is_uuid_like(volume_id))
+ if volumes[volume_id]['status'] == 'attaching':
+ volumes[volume_id]['status'] = 'available'
+
+ def fake_vol_migrate_volume_completion(context, old_volume_id,
+ new_volume_id, error=False):
+ self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
+ self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
+ volumes[old_volume_id]['status'] = 'in-use'
+ return {'save_volume_id': new_volume_id}
+
+ def fake_func_exc(*args, **kwargs):
+ raise AttributeError # Random exception
+
+ def fake_swap_volume(old_connection_info, new_connection_info,
+ instance, mountpoint, resize_to):
+ self.assertEqual(resize_to, 2)
+
+ self.stubs.Set(self.compute.volume_api, 'roll_detaching',
+ fake_vol_api_roll_detaching)
+ self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ fake_vol_api_func)
+ self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
+ fake_vol_unreserve)
+ self.stubs.Set(self.compute.volume_api, 'terminate_connection',
+ fake_vol_api_func)
+ self.stubs.Set(db, 'block_device_mapping_get_by_volume_id',
+ lambda x, y, z: fake_bdm)
+ self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ lambda x: {})
+ self.stubs.Set(self.compute.driver, 'swap_volume',
+ fake_swap_volume)
+ self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
+ fake_vol_migrate_volume_completion)
+ self.stubs.Set(db, 'block_device_mapping_update',
+ lambda *a, **k: fake_bdm)
+ self.stubs.Set(db,
+ 'instance_fault_create',
+ lambda x, y:
+ test_instance_fault.fake_faults['fake-uuid'][0])
+
+ # Good path
+ self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+
+ # Error paths
+ volumes[old_volume_id]['status'] = 'detaching'
+ volumes[new_volume_id]['status'] = 'attaching'
+ self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
+ self.assertRaises(AttributeError, self.compute.swap_volume,
+ self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ volumes[old_volume_id]['status'] = 'detaching'
+ volumes[new_volume_id]['status'] = 'attaching'
+ self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ fake_func_exc)
+ self.assertRaises(AttributeError, self.compute.swap_volume,
+ self.context, old_volume_id, new_volume_id,
+ fake_instance.fake_instance_obj(
+ self.context, **{'uuid': 'fake'}))
+ self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
+ self.assertEqual(volumes[new_volume_id]['status'], 'available')
+
+ def test_check_can_live_migrate_source(self):
+ is_volume_backed = 'volume_backed'
+ dest_check_data = dict(foo='bar')
+ db_instance = fake_instance.fake_db_instance()
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ expected_dest_check_data = dict(dest_check_data,
+ is_volume_backed=is_volume_backed)
+
+ self.mox.StubOutWithMock(self.compute.compute_api,
+ 'is_volume_backed_instance')
+ self.mox.StubOutWithMock(self.compute,
+ '_get_instance_block_device_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_source')
+
+ self.compute.compute_api.is_volume_backed_instance(
+ self.context, instance).AndReturn(is_volume_backed)
+ self.compute._get_instance_block_device_info(
+ self.context, instance, refresh_conn_info=True
+ ).AndReturn({'block_device_mapping': 'fake'})
+ self.compute.driver.check_can_live_migrate_source(
+ self.context, instance, expected_dest_check_data,
+ {'block_device_mapping': 'fake'})
+
+ self.mox.ReplayAll()
+
+ self.compute.check_can_live_migrate_source(
+ self.context, instance=instance,
+ dest_check_data=dest_check_data)
+
+ def _test_check_can_live_migrate_destination(self, do_raise=False,
+ has_mig_data=False):
+ db_instance = fake_instance.fake_db_instance(host='fake-host')
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ instance.host = 'fake-host'
+ block_migration = 'block_migration'
+ disk_over_commit = 'disk_over_commit'
+ src_info = 'src_info'
+ dest_info = 'dest_info'
+ dest_check_data = dict(foo='bar')
+ mig_data = dict(cow='moo')
+ expected_result = dict(mig_data)
+ if has_mig_data:
+ dest_check_data['migrate_data'] = dict(cat='meow')
+ expected_result.update(cat='meow')
+
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_destination')
+ self.mox.StubOutWithMock(self.compute.compute_rpcapi,
+ 'check_can_live_migrate_source')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'check_can_live_migrate_destination_cleanup')
+
+ self.compute._get_compute_info(self.context,
+ 'fake-host').AndReturn(src_info)
+ self.compute._get_compute_info(self.context,
+ CONF.host).AndReturn(dest_info)
+ self.compute.driver.check_can_live_migrate_destination(
+ self.context, instance, src_info, dest_info,
+ block_migration, disk_over_commit).AndReturn(dest_check_data)
+
+ mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
+ self.context, instance, dest_check_data)
+ if do_raise:
+ mock_meth.AndRaise(test.TestingException())
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_fault_create(
+ self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ else:
+ mock_meth.AndReturn(mig_data)
+ self.compute.driver.check_can_live_migrate_destination_cleanup(
+ self.context, dest_check_data)
+
+ self.mox.ReplayAll()
+
+ result = self.compute.check_can_live_migrate_destination(
+ self.context, instance=instance,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ self.assertEqual(expected_result, result)
+
+ def test_check_can_live_migrate_destination_success(self):
+ self._test_check_can_live_migrate_destination()
+
+ def test_check_can_live_migrate_destination_success_w_mig_data(self):
+ self._test_check_can_live_migrate_destination(has_mig_data=True)
+
+ def test_check_can_live_migrate_destination_fail(self):
+ self.assertRaises(
+ test.TestingException,
+ self._test_check_can_live_migrate_destination,
+ do_raise=True)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_prepare_for_instance_event(self, lock_name_mock):
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ self.assertIn('foo', self.compute.instance_events._events)
+ self.assertIn('test-event',
+ self.compute.instance_events._events['foo'])
+ self.assertEqual(
+ result,
+ self.compute.instance_events._events['foo']['test-event'])
+ self.assertTrue(hasattr(result, 'send'))
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_pop_instance_event(self, lock_name_mock):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ event_obj = objects.InstanceExternalEvent(name='test-event',
+ tag=None)
+ result = self.compute.instance_events.pop_instance_event(inst_obj,
+ event_obj)
+ self.assertEqual(result, event)
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ @mock.patch('nova.compute.manager.InstanceEvents._lock_name')
+ def test_clear_events_for_instance(self, lock_name_mock):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events.clear_events_for_instance(
+ inst_obj)
+ self.assertEqual(result, {'test-event': event})
+ lock_name_mock.assert_called_once_with(inst_obj)
+
+ def test_instance_events_lock_name(self):
+ inst_obj = objects.Instance(uuid='foo')
+ result = self.compute.instance_events._lock_name(inst_obj)
+ self.assertEqual(result, 'foo-events')
+
+ def test_prepare_for_instance_event_again(self):
+ inst_obj = objects.Instance(uuid='foo')
+ self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ # A second attempt will avoid creating a new list; make sure we
+ # get the current list
+ result = self.compute.instance_events.prepare_for_instance_event(
+ inst_obj, 'test-event')
+ self.assertIn('foo', self.compute.instance_events._events)
+ self.assertIn('test-event',
+ self.compute.instance_events._events['foo'])
+ self.assertEqual(
+ result,
+ self.compute.instance_events._events['foo']['test-event'])
+ self.assertTrue(hasattr(result, 'send'))
+
+ def test_process_instance_event(self):
+ event = eventlet_event.Event()
+ self.compute.instance_events._events = {
+ 'foo': {
+ 'test-event': event,
+ }
+ }
+ inst_obj = objects.Instance(uuid='foo')
+ event_obj = objects.InstanceExternalEvent(name='test-event', tag=None)
+ self.compute._process_instance_event(inst_obj, event_obj)
+ self.assertTrue(event.ready())
+ self.assertEqual(event_obj, event.wait())
+ self.assertEqual({}, self.compute.instance_events._events)
+
+ def test_external_instance_event(self):
+ instances = [
+ objects.Instance(id=1, uuid='uuid1'),
+ objects.Instance(id=2, uuid='uuid2')]
+ events = [
+ objects.InstanceExternalEvent(name='network-changed',
+ tag='tag1',
+ instance_uuid='uuid1'),
+ objects.InstanceExternalEvent(name='foo', instance_uuid='uuid2',
+ tag='tag2')]
+
+ @mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
+ @mock.patch.object(self.compute, '_process_instance_event')
+ def do_test(_process_instance_event, get_instance_nw_info):
+ self.compute.external_instance_event(self.context,
+ instances, events)
+ get_instance_nw_info.assert_called_once_with(self.context,
+ instances[0])
+ _process_instance_event.assert_called_once_with(instances[1],
+ events[1])
+ do_test()
+
+ def test_retry_reboot_pending_soft(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING
+ instance.vm_state = vm_states.ACTIVE
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_retry_reboot_pending_hard(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_PENDING_HARD
+ instance.vm_state = vm_states.ACTIVE
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_soft_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.NOSTATE):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_hard_off(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.NOSTATE):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertTrue(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_starting_hard_on(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = task_states.REBOOT_STARTED_HARD
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertFalse(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_retry_reboot_no_reboot(self):
+ instance = objects.Instance(self.context)
+ instance.uuid = 'foo'
+ instance.task_state = 'bar'
+ with mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING):
+ allow_reboot, reboot_type = self.compute._retry_reboot(
+ context, instance)
+ self.assertFalse(allow_reboot)
+ self.assertEqual(reboot_type, 'HARD')
+
+ @mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
+ @mock.patch('nova.compute.manager.ComputeManager._detach_volume')
+ @mock.patch('nova.objects.Instance._from_db_object')
+ def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
+ bdm = mock.sentinel.bdm
+ inst_obj = mock.sentinel.inst_obj
+ bdm_get.return_value = bdm
+ inst_from_db.return_value = inst_obj
+ with mock.patch.object(self.compute, 'volume_api'):
+ self.compute.remove_volume_connection(self.context, 'vol',
+ inst_obj)
+ detach.assert_called_once_with(self.context, inst_obj, bdm)
+
+ def _test_rescue(self, clean_shutdown=True):
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE)
+ fake_nw_info = network_model.NetworkInfo()
+ rescue_image_meta = {'id': 'fake', 'name': 'fake'}
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=fake_nw_info),
+ mock.patch.object(self.compute, '_get_rescue_image',
+ return_value=rescue_image_meta),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute, '_power_off_instance'),
+ mock.patch.object(self.compute.driver, 'rescue'),
+ mock.patch.object(self.compute.conductor_api,
+ 'notify_usage_exists'),
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save')
+ ) as (
+ event_start, event_finish, elevated_context, get_nw_info,
+ get_rescue_image, notify_instance_usage, power_off_instance,
+ driver_rescue, notify_usage_exists, get_power_state, instance_save
+ ):
+ self.compute.rescue_instance(
+ self.context, instance, rescue_password='verybadpass',
+ rescue_image_ref=None, clean_shutdown=clean_shutdown)
+
+ # assert the field values on the instance object
+ self.assertEqual(vm_states.RESCUED, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(power_state.RUNNING, instance.power_state)
+ self.assertIsNotNone(instance.launched_at)
+
+ # assert our mock calls
+ get_nw_info.assert_called_once_with(self.context, instance)
+ get_rescue_image.assert_called_once_with(
+ self.context, instance, None)
+
+ extra_usage_info = {'rescue_image_name': 'fake'}
+ notify_calls = [
+ mock.call(self.context, instance, "rescue.start",
+ extra_usage_info=extra_usage_info,
+ network_info=fake_nw_info),
+ mock.call(self.context, instance, "rescue.end",
+ extra_usage_info=extra_usage_info,
+ network_info=fake_nw_info)
+ ]
+ notify_instance_usage.assert_has_calls(notify_calls)
+
+ power_off_instance.assert_called_once_with(self.context, instance,
+ clean_shutdown)
+
+ driver_rescue.assert_called_once_with(
+ self.context, instance, fake_nw_info, rescue_image_meta,
+ 'verybadpass')
+
+ notify_usage_exists.assert_called_once_with(
+ self.context, instance, current_period=True)
+
+ instance_save.assert_called_once_with(
+ expected_task_state=task_states.RESCUING)
+
+ def test_rescue(self):
+ self._test_rescue()
+
+ def test_rescue_forced_shutdown(self):
+ self._test_rescue(clean_shutdown=False)
+
+ def test_unrescue(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.RESCUED)
+ fake_nw_info = network_model.NetworkInfo()
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=self.context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=fake_nw_info),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute.driver, 'unrescue'),
+ mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.RUNNING),
+ mock.patch.object(instance, 'save')
+ ) as (
+ event_start, event_finish, elevated_context, get_nw_info,
+ notify_instance_usage, driver_unrescue, get_power_state,
+ instance_save
+ ):
+ self.compute.unrescue_instance(self.context, instance)
+
+ # assert the field values on the instance object
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(power_state.RUNNING, instance.power_state)
+
+ # assert our mock calls
+ get_nw_info.assert_called_once_with(self.context, instance)
+
+ notify_calls = [
+ mock.call(self.context, instance, "unrescue.start",
+ network_info=fake_nw_info),
+ mock.call(self.context, instance, "unrescue.end",
+ network_info=fake_nw_info)
+ ]
+ notify_instance_usage.assert_has_calls(notify_calls)
+
+ driver_unrescue.assert_called_once_with(instance, fake_nw_info)
+
+ instance_save.assert_called_once_with(
+ expected_task_state=task_states.UNRESCUING)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.RUNNING)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch('nova.utils.generate_password', return_value='fake-pass')
+ def test_set_admin_password(self, gen_password_mock,
+ instance_save_mock, power_state_mock,
+ event_finish_mock, event_start_mock):
+ # Ensure instance can have its admin password set.
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.UPDATING_PASSWORD)
+
+ @mock.patch.object(self.context, 'elevated', return_value=self.context)
+ @mock.patch.object(self.compute.driver, 'set_admin_password')
+ def do_test(driver_mock, elevated_mock):
+ # call the manager method
+ self.compute.set_admin_password(self.context, instance, None)
+ # make our assertions
+ self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+
+ power_state_mock.assert_called_once_with(self.context, instance)
+ driver_mock.assert_called_once_with(instance, 'fake-pass')
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+
+ do_test()
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.NOSTATE)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def test_set_admin_password_bad_state(self, add_fault_mock,
+ instance_save_mock, power_state_mock,
+ event_finish_mock, event_start_mock):
+ # Test setting password while instance is rebuilding.
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self.context, 'elevated',
+ return_value=self.context):
+ # call the manager method
+ self.assertRaises(exception.InstancePasswordSetFailed,
+ self.compute.set_admin_password,
+ self.context, instance, None)
+
+ # make our assertions
+ power_state_mock.assert_called_once_with(self.context, instance)
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+ add_fault_mock.assert_called_once_with(
+ self.context, instance, mock.ANY, mock.ANY)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch('nova.utils.generate_password', return_value='fake-pass')
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state',
+ return_value=power_state.RUNNING)
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def _do_test_set_admin_password_driver_error(self, exc,
+ expected_vm_state,
+ expected_task_state,
+ expected_exception,
+ add_fault_mock,
+ instance_save_mock,
+ update_mock,
+ power_state_mock,
+ gen_password_mock,
+ event_finish_mock,
+ event_start_mock):
+ # Ensure expected exception is raised if set_admin_password fails.
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.UPDATING_PASSWORD)
+
+ @mock.patch.object(self.context, 'elevated', return_value=self.context)
+ @mock.patch.object(self.compute.driver, 'set_admin_password',
+ side_effect=exc)
+ def do_test(driver_mock, elevated_mock):
+ # error raised from the driver should not reveal internal
+ # information so a new error is raised
+ self.assertRaises(expected_exception,
+ self.compute.set_admin_password,
+ self.context,
+ instance=instance,
+ new_pass=None)
+
+ if expected_exception == NotImplementedError:
+ instance_save_mock.assert_called_once_with(
+ expected_task_state=task_states.UPDATING_PASSWORD)
+ else:
+ # setting the instance to error state
+ instance_save_mock.assert_called_once_with()
+
+ self.assertEqual(expected_vm_state, instance.vm_state)
+ # check revert_task_state decorator
+ update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ task_state=expected_task_state)
+ # check wrap_instance_fault decorator
+ add_fault_mock.assert_called_once_with(
+ self.context, instance, mock.ANY, mock.ANY)
+
+ do_test()
+
+ def test_set_admin_password_driver_not_authorized(self):
+ # Ensure expected exception is raised if set_admin_password not
+ # authorized.
+ exc = exception.Forbidden('Internal error')
+ expected_exception = exception.InstancePasswordSetFailed
+ self._do_test_set_admin_password_driver_error(
+ exc, vm_states.ERROR, None, expected_exception)
+
+ def test_set_admin_password_driver_not_implemented(self):
+ # Ensure expected exception is raised if set_admin_password not
+ # implemented by driver.
+ exc = NotImplementedError()
+ expected_exception = NotImplementedError
+ self._do_test_set_admin_password_driver_error(
+ exc, vm_states.ACTIVE, None, expected_exception)
+
+ def _test_init_host_with_partial_migration(self, task_state=None,
+ vm_state=vm_states.ACTIVE):
+ our_host = self.compute.host
+ instance_1 = objects.Instance(self.context)
+ instance_1.uuid = 'foo'
+ instance_1.task_state = task_state
+ instance_1.vm_state = vm_state
+ instance_1.host = 'not-' + our_host
+ instance_2 = objects.Instance(self.context)
+ instance_2.uuid = 'bar'
+ instance_2.task_state = None
+ instance_2.vm_state = vm_states.ACTIVE
+ instance_2.host = 'not-' + our_host
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_get_instances_on_driver',
+ return_value=[instance_1,
+ instance_2]),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=None),
+ mock.patch.object(self.compute, '_get_instance_block_device_info',
+ return_value={}),
+ mock.patch.object(self.compute, '_is_instance_storage_shared',
+ return_value=False),
+ mock.patch.object(self.compute.driver, 'destroy')
+ ) as (_get_instances_on_driver, _get_instance_nw_info,
+ _get_instance_block_device_info, _is_instance_storage_shared,
+ destroy):
+ self.compute._destroy_evacuated_instances(self.context)
+ destroy.assert_called_once_with(self.context, instance_2, None,
+ {}, True)
+
+ def test_init_host_with_partial_migration_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrated(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATED)
+
+ def test_init_host_with_partial_migration_finish_resize(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_FINISH)
+
+ def test_init_host_with_partial_migration_resized(self):
+ self._test_init_host_with_partial_migration(
+ vm_state=vm_states.RESIZED)
+
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ def test_error_out_instance_on_exception_not_implemented_err(self,
+ inst_update_mock):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(
+ self.context, instance, instance_state=vm_states.STOPPED):
+ raise NotImplementedError('test')
+
+ self.assertRaises(NotImplementedError, do_test)
+ inst_update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ vm_state=vm_states.STOPPED, task_state=None)
+
+ @mock.patch('nova.compute.manager.ComputeManager._instance_update')
+ def test_error_out_instance_on_exception_inst_fault_rollback(self,
+ inst_update_mock):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(self.context,
+ instance):
+ raise exception.InstanceFaultRollback(
+ inner_exception=test.TestingException('test'))
+
+ self.assertRaises(test.TestingException, do_test)
+ inst_update_mock.assert_called_once_with(
+ self.context, instance.uuid,
+ vm_state=vm_states.ACTIVE, task_state=None)
+
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ '_set_instance_error_state')
+ def test_error_out_instance_on_exception_unknown_with_quotas(self,
+ set_error):
+ instance = fake_instance.fake_instance_obj(self.context)
+ quotas = mock.create_autospec(objects.Quotas, spec_set=True)
+
+ def do_test():
+ with self.compute._error_out_instance_on_exception(
+ self.context, instance, quotas):
+ raise test.TestingException('test')
+
+ self.assertRaises(test.TestingException, do_test)
+ self.assertEqual(1, len(quotas.method_calls))
+ self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
+ set_error.assert_called_once_with(self.context, instance)
+
+ def test_cleanup_volumes(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': False})
+ bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_do_not_delete_dict, bdm_delete_dict])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete') as volume_delete:
+ self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
+ volume_delete.assert_called_once_with(self.context,
+ bdms[1].volume_id)
+
+ def test_cleanup_volumes_exception_do_not_raise(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_dict1, bdm_dict2])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete',
+ side_effect=[test.TestingException(), None]) as volume_delete:
+ self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
+ raise_exc=False)
+ calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
+ self.assertEqual(calls, volume_delete.call_args_list)
+
+ def test_cleanup_volumes_exception_raise(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id1', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 'fake-id2', 'source_type': 'image',
+ 'delete_on_termination': True})
+ bdms = block_device_obj.block_device_make_list(self.context,
+ [bdm_dict1, bdm_dict2])
+
+ with mock.patch.object(self.compute.volume_api,
+ 'delete',
+ side_effect=[test.TestingException(), None]) as volume_delete:
+ self.assertRaises(test.TestingException,
+ self.compute._cleanup_volumes, self.context, instance.uuid,
+ bdms)
+ calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
+ self.assertEqual(calls, volume_delete.call_args_list)
+
+ def test_start_building(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self.compute, '_instance_update') as update:
+ self.compute._start_building(self.context, instance)
+ update.assert_called_once_with(
+ self.context, instance.uuid, vm_state=vm_states.BUILDING,
+ task_state=None, expected_task_state=(task_states.SCHEDULING,
+ None))
+
+ def _test_prebuild_instance_build_abort_exception(self, exc):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_check_instance_exists'),
+ mock.patch.object(self.compute, '_start_building',
+ side_effect=exc)
+ ) as (
+ check, start
+ ):
+ # run the code
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._prebuild_instance,
+ self.context, instance)
+ # assert the calls
+ check.assert_called_once_with(self.context, instance)
+ start.assert_called_once_with(self.context, instance)
+
+ def test_prebuild_instance_instance_not_found(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.InstanceNotFound(instance_id='fake'))
+
+ def test_prebuild_instance_unexpected_deleting_task_state_err(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.UnexpectedDeletingTaskStateError(expected='foo',
+ actual='bar'))
+
+ def test_stop_instance_task_state_none_power_state_shutdown(self):
+ # Tests that stop_instance doesn't puke when the instance power_state
+ # is shutdown and the task_state is None.
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE,
+ task_state=None, power_state=power_state.SHUTDOWN)
+
+ @mock.patch.object(objects.InstanceActionEvent, 'event_start')
+ @mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ @mock.patch.object(self.compute, '_get_power_state',
+ return_value=power_state.SHUTDOWN)
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(self.compute, '_power_off_instance')
+ @mock.patch.object(instance, 'save')
+ def do_test(save_mock, power_off_mock, notify_mock, get_state_mock,
+ event_finish_mock, event_start_mock):
+ # run the code
+ self.compute.stop_instance(self.context, instance)
+ # assert the calls
+ self.assertEqual(2, get_state_mock.call_count)
+ notify_mock.assert_has_calls([
+ mock.call(self.context, instance, 'power_off.start'),
+ mock.call(self.context, instance, 'power_off.end')
+ ])
+ power_off_mock.assert_called_once_with(
+ self.context, instance, True)
+ save_mock.assert_called_once_with(
+ expected_task_state=[task_states.POWERING_OFF, None])
+ self.assertEqual(power_state.SHUTDOWN, instance.power_state)
+ self.assertIsNone(instance.task_state)
+ self.assertEqual(vm_states.STOPPED, instance.vm_state)
+
+ do_test()
+
+ def test_reset_network_driver_not_implemented(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(self.compute.driver, 'reset_network',
+ side_effect=NotImplementedError())
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def do_test(mock_add_fault, mock_reset):
+ self.assertRaises(messaging.ExpectedException,
+ self.compute.reset_network,
+ self.context,
+ instance)
+
+ self.compute = utils.ExceptionHelper(self.compute)
+
+ self.assertRaises(NotImplementedError,
+ self.compute.reset_network,
+ self.context,
+ instance)
+
+ do_test()
+
+ def test_rebuild_default_impl(self):
+ def _detach(context, bdms):
+ pass
+
+ def _attach(context, instance, bdms, do_check_attach=True):
+ return {'block_device_mapping': 'shared_block_storage'}
+
+ def _spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ self.assertEqual(block_device_info['block_device_mapping'],
+ 'shared_block_storage')
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'destroy',
+ return_value=None),
+ mock.patch.object(self.compute.driver, 'spawn',
+ side_effect=_spawn),
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None)
+ ) as(
+ mock_destroy,
+ mock_spawn,
+ mock_save
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._rebuild_default_impl(self.context,
+ instance,
+ None,
+ [],
+ admin_password='new_pass',
+ bdms=[],
+ detach_block_devices=_detach,
+ attach_block_devices=_attach,
+ network_info=None,
+ recreate=True,
+ block_device_info=None,
+ preserve_ephemeral=False)
+
+ self.assertFalse(mock_destroy.called)
+ self.assertTrue(mock_save.called)
+ self.assertTrue(mock_spawn.called)
+
+
+class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerBuildInstanceTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.admin_pass = 'pass'
+ self.injected_files = []
+ self.image = {}
+ self.node = 'fake-node'
+ self.limits = {}
+ self.requested_networks = []
+ self.security_groups = []
+ self.block_device_mapping = []
+ self.filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': [[self.compute.host,
+ 'fake-node']]}}
+
+ def fake_network_info():
+ return network_model.NetworkInfo()
+
+ self.network_info = network_model.NetworkInfoAsyncWrapper(
+ fake_network_info)
+ self.block_device_info = self.compute._prep_block_device(context,
+ self.instance, self.block_device_mapping)
+
+ # override tracker with a version that doesn't need the database:
+ fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
+ self.compute.driver, self.node)
+ self.compute._resource_tracker_dict[self.node] = fake_rt
+
+ def _do_build_instance_update(self, reschedule_update=False):
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self.instance.save(
+ expected_task_state=(task_states.SCHEDULING, None)).AndReturn(
+ self.instance)
+ if reschedule_update:
+ self.instance.save().AndReturn(self.instance)
+
+ def _build_and_run_instance_update(self):
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self._build_resources_instance_update(stub=False)
+ self.instance.save(expected_task_state=
+ task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance)
+
+ def _build_resources_instance_update(self, stub=True):
+ if stub:
+ self.mox.StubOutWithMock(self.instance, 'save')
+ self.instance.save().AndReturn(self.instance)
+
+ def _notify_about_instance_usage(self, event, stub=True, **kwargs):
+ if stub:
+ self.mox.StubOutWithMock(self.compute,
+ '_notify_about_instance_usage')
+ self.compute._notify_about_instance_usage(self.context, self.instance,
+ event, **kwargs)
+
+ def _instance_action_events(self):
+ self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start')
+ self.mox.StubOutWithMock(objects.InstanceActionEvent,
+ 'event_finish_with_failure')
+ objects.InstanceActionEvent.event_start(
+ self.context, self.instance.uuid, mox.IgnoreArg(),
+ want_result=False)
+ objects.InstanceActionEvent.event_finish_with_failure(
+ self.context, self.instance.uuid, mox.IgnoreArg(),
+ exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
+ want_result=False)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_and_run_instance_called_with_proper_args(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ # This test when sending an icehouse compatible rpc call to juno compute
+ # node, NetworkRequest object can load from three items tuple.
+ @mock.patch('nova.objects.InstanceActionEvent.event_finish_with_failure')
+ @mock.patch('nova.objects.InstanceActionEvent.event_start')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance')
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_and_run_instance_with_icehouse_requested_network(
+ self, mock_spawn, mock_build_and_run, mock_save, mock_event_start,
+ mock_event_finish):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ mock_save.return_value = self.instance
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=[('fake_network_id', '10.0.0.1',
+ 'fake_port_id')],
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+ requested_network = mock_build_and_run.call_args[0][5][0]
+ self.assertEqual('fake_network_id', requested_network.network_id)
+ self.assertEqual('10.0.0.1', str(requested_network.address))
+ self.assertEqual('fake_port_id', requested_network.port_id)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_build_abort_exception(self, mock_spawn):
+ def fake_spawn(f, *args, **kwargs):
+ # NOTE(danms): Simulate the detached nature of spawn so that
+ # we confirm that the inner task has the fault logic
+ try:
+ return f(*args, **kwargs)
+ except Exception:
+ pass
+
+ mock_spawn.side_effect = fake_spawn
+
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.BuildAbortException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context, self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_rescheduled_exception_with_non_ascii_exception(self):
+ exc = exception.NovaException(u's\xe9quence')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_without_retry(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ {}).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context,
+ self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties={},
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_do_not_deallocate_network(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'deallocate_networks_on_reschedule')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(False)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_rescheduled_exception_deallocate_network(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'deallocate_networks_on_reschedule')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update(reschedule_update=True)
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(
+ exception.RescheduledException(reason='',
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(True)
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def _test_build_and_run_exceptions(self, exc, set_error=False,
+ cleanup_volumes=False):
+ self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self._do_build_instance_update()
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties).AndRaise(exc)
+ self.compute._cleanup_allocated_networks(self.context, self.instance,
+ self.requested_networks)
+ if cleanup_volumes:
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
+ if set_error:
+ self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
+ self.mox.StubOutWithMock(compute_utils,
+ 'add_instance_fault_from_exc')
+ compute_utils.add_instance_fault_from_exc(self.context,
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg())
+ self.compute._set_instance_error_state(self.context, self.instance)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ with mock.patch('nova.utils.spawn_n') as mock_spawn:
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_build_and_run_notfound_exception(self):
+ self._test_build_and_run_exceptions(exception.InstanceNotFound(
+ instance_id=''))
+
+ def test_build_and_run_unexpecteddeleting_exception(self):
+ self._test_build_and_run_exceptions(
+ exception.UnexpectedDeletingTaskStateError(expected='',
+ actual=''))
+
+ def test_build_and_run_buildabort_exception(self):
+ self._test_build_and_run_exceptions(exception.BuildAbortException(
+ instance_uuid='', reason=''), set_error=True, cleanup_volumes=True)
+
+ def test_build_and_run_unhandled_exception(self):
+ self._test_build_and_run_exceptions(test.TestingException(),
+ set_error=True, cleanup_volumes=True)
+
+ def test_instance_not_found(self):
+ exc = exception.InstanceNotFound(instance_id=1)
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ self._notify_about_instance_usage('create.end',
+ fault=exc, stub=False)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_reschedule_on_exception(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ exc = test.TestingException()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_spawn_network_alloc_failure(self):
+ # Because network allocation is asynchronous, failures may not present
+ # themselves until the virt spawn method is called.
+ self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks())
+
+ def test_build_and_run_flavor_disk_too_small_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.FlavorDiskTooSmall())
+
+ def test_build_and_run_flavor_memory_too_small_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.FlavorMemoryTooSmall())
+
+ def test_build_and_run_image_not_active_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.ImageNotActive(image_id=self.image.get('id')))
+
+ def test_build_and_run_image_unacceptable_exception(self):
+ self._test_build_and_run_spawn_exceptions(
+ exception.ImageUnacceptable(image_id=self.image.get('id'),
+ reason=""))
+
+ def _test_build_and_run_spawn_exceptions(self, exc):
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn',
+ side_effect=exc),
+ mock.patch.object(conductor_rpcapi.ConductorAPI,
+ 'instance_update'),
+ mock.patch.object(self.instance, 'save',
+ side_effect=[self.instance, self.instance]),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info),
+ mock.patch.object(self.compute,
+ '_notify_about_instance_usage'),
+ mock.patch.object(self.compute,
+ '_shutdown_instance'),
+ mock.patch.object(self.compute,
+ '_validate_instance_group_policy')
+ ) as (spawn, instance_update, save,
+ _build_networks_for_instance, _notify_about_instance_usage,
+ _shutdown_instance, _validate_instance_group_policy):
+
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ _validate_instance_group_policy.assert_called_once_with(
+ self.context, self.instance, self.filter_properties)
+ _build_networks_for_instance.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ _notify_about_instance_usage.assert_has_calls([
+ mock.call(self.context, self.instance, 'create.start',
+ extra_usage_info={'image_name': self.image.get('name')}),
+ mock.call(self.context, self.instance, 'create.error',
+ fault=exc)])
+
+ save.assert_has_calls([
+ mock.call(),
+ mock.call(
+ expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
+
+ spawn.assert_has_calls(mock.call(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info))
+
+ instance_update.assert_has_calls(mock.call(self.context,
+ self.instance.uuid, mock.ANY, 'conductor'))
+
+ _shutdown_instance.assert_called_once_with(self.context,
+ self.instance, self.block_device_mapping,
+ self.requested_networks, try_deallocate_networks=False)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_power_state')
+ def test_spawn_waits_for_network_and_saves_info_cache(self, gps):
+ inst = mock.MagicMock()
+ network_info = mock.MagicMock()
+ with mock.patch.object(self.compute, 'driver'):
+ self.compute._spawn(self.context, inst, {}, network_info, None,
+ None, None)
+ network_info.wait.assert_called_once_with(do_raise=True)
+ self.assertEqual(network_info, inst.info_cache.network_info)
+ inst.save.assert_called_with(expected_task_state=task_states.SPAWNING)
+
+ @mock.patch('nova.utils.spawn_n')
+ def test_reschedule_on_resources_unavailable(self, mock_spawn):
+ mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
+ reason = 'resource unavailable'
+ exc = exception.ComputeResourcesUnavailable(reason=reason)
+
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ raise exc
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute.compute_task_api,
+ 'build_instances')
+ self.compute._get_resource_tracker(self.node).AndReturn(
+ FakeResourceTracker())
+ self._do_build_instance_update(reschedule_update=True)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.compute.compute_task_api.build_instances(self.context,
+ [self.instance], self.image, self.filter_properties,
+ self.admin_pass, self.injected_files, self.requested_networks,
+ self.security_groups, self.block_device_mapping)
+ self._instance_action_events()
+ self.mox.ReplayAll()
+
+ self.compute.build_and_run_instance(self.context, self.instance,
+ self.image, request_spec={},
+ filter_properties=self.filter_properties,
+ injected_files=self.injected_files,
+ admin_password=self.admin_pass,
+ requested_networks=self.requested_networks,
+ security_groups=self.security_groups,
+ block_device_mapping=self.block_device_mapping, node=self.node,
+ limits=self.limits)
+
+ def test_build_resources_buildabort_reraise(self):
+ exc = exception.BuildAbortException(
+ instance_uuid=self.instance.uuid, reason='')
+ self.mox.StubOutWithMock(self.compute, '_build_resources')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups, self.image,
+ self.block_device_mapping).AndRaise(exc)
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
+ def test_build_resources_reraises_on_failed_bdm_prep(self):
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self._build_resources_instance_update()
+ self.compute._prep_block_device(self.context, self.instance,
+ self.block_device_mapping).AndRaise(test.TestingException())
+ self.mox.ReplayAll()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_failed_bdm_prep_from_delete_raises_unexpected(self):
+ with contextlib.nested(
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info),
+ mock.patch.object(self.instance, 'save',
+ side_effect=exception.UnexpectedDeletingTaskStateError(
+ actual=task_states.DELETING, expected='None')),
+ ) as (_build_networks_for_instance, save):
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.UnexpectedDeletingTaskStateError)
+
+ _build_networks_for_instance.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ save.assert_has_calls(mock.call())
+
+ def test_build_resources_aborts_on_failed_network_alloc(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndRaise(
+ test.TestingException())
+ self.mox.ReplayAll()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups, self.image,
+ self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_failed_network_alloc_from_delete_raises_unexpected(self):
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance') as _build_networks:
+
+ exc = exception.UnexpectedDeletingTaskStateError
+ _build_networks.side_effect = exc(actual=task_states.DELETING,
+ expected='None')
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e, exc)
+
+ _build_networks.assert_has_calls(
+ mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups))
+
+ def test_build_resources_with_network_info_obj_on_spawn_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ network_model.NetworkInfo())
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ test_exception = test.TestingException()
+
+ def fake_spawn():
+ raise test_exception
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertEqual(test_exception, e)
+
+ def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ test_exception = test.TestingException()
+
+ def fake_spawn():
+ raise test_exception
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertEqual(test_exception, e)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_instance_not_found_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ expected_exc = exception.InstanceNotFound(
+ instance_id=self.instance.uuid)
+ mock_save.side_effect = expected_exc
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except Exception as e:
+ self.assertEqual(expected_exc, e)
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_unexpected_task_error_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ mock_save.side_effect = exception.UnexpectedTaskStateError(
+ expected='', actual='')
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except exception.BuildAbortException:
+ pass
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ @mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._build_networks_for_instance')
+ @mock.patch('nova.objects.Instance.save')
+ def test_build_resources_exception_before_yield(
+ self, mock_save, mock_build_network, mock_info_wait):
+ mock_build_network.return_value = self.network_info
+ mock_save.side_effect = Exception()
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ raise
+ except exception.BuildAbortException:
+ pass
+ mock_build_network.assert_called_once_with(self.context, self.instance,
+ self.requested_networks, self.security_groups)
+ mock_info_wait.assert_called_once_with(do_raise=False)
+
+ def test_build_resources_aborts_on_cleanup_failure(self):
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False).AndRaise(
+ test.TestingException())
+ self._build_resources_instance_update()
+ self.mox.ReplayAll()
+
+ def fake_spawn():
+ raise test.TestingException()
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping):
+ fake_spawn()
+ except Exception as e:
+ self.assertIsInstance(e, exception.BuildAbortException)
+
+ def test_build_networks_if_not_allocated(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata={},
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._allocate_network(self.context, instance,
+ self.requested_networks, None, self.security_groups, None)
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_build_networks_if_allocated_false(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata=dict(network_allocated='False'),
+ expected_attrs=['system_metadata'])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._allocate_network(self.context, instance,
+ self.requested_networks, None, self.security_groups, None)
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_return_networks_if_found(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ system_metadata=dict(network_allocated='True'),
+ expected_attrs=['system_metadata'])
+
+ def fake_network_info():
+ return network_model.NetworkInfo([{'address': '123.123.123.123'}])
+
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.compute._get_instance_nw_info(self.context, instance).AndReturn(
+ network_model.NetworkInfoAsyncWrapper(fake_network_info))
+ self.mox.ReplayAll()
+
+ self.compute._build_networks_for_instance(self.context, instance,
+ self.requested_networks, self.security_groups)
+
+ def test_cleanup_allocated_networks_instance_not_found(self):
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_deallocate_network'),
+ mock.patch.object(self.instance, 'save',
+ side_effect=exception.InstanceNotFound(instance_id=''))
+ ) as (_deallocate_network, save):
+ # Testing that this doesn't raise an exeption
+ self.compute._cleanup_allocated_networks(self.context,
+ self.instance, self.requested_networks)
+ save.assert_called_once_with()
+ self.assertEqual('False',
+ self.instance.system_metadata['network_allocated'])
+
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_launched_at_in_create_end_notification(self,
+ mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', extra_usage_info={'message': u'Success'},
+ network_info=[])
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_create_end_on_instance_delete(self, mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ exc = exception.InstanceNotFound(instance_id='')
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save',
+ side_effect=[None, None, exc]),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', fault=exc)
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
+
+class ComputeManagerMigrationTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(ComputeManagerMigrationTestCase, self).setUp()
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.context = context.RequestContext('fake', 'fake')
+ self.image = {}
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.migration = objects.Migration()
+ self.migration.status = 'migrating'
+
+ def test_finish_resize_failure(self):
+ elevated_context = self.context.elevated()
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_finish_resize',
+ side_effect=exception.ResizeError(reason='')),
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(db, 'instance_fault_create'),
+ mock.patch.object(self.compute, '_instance_update'),
+ mock.patch.object(self.migration, 'save'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context)
+ ) as (meth, event_start, event_finish, fault_create, instance_update,
+ migration_save, context_elevated):
+ fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.assertRaises(
+ exception.ResizeError, self.compute.finish_resize,
+ context=self.context, disk_info=[], image=self.image,
+ instance=self.instance, reservations=[],
+ migration=self.migration
+ )
+ self.assertEqual("error", self.migration.status)
+ migration_save.assert_has_calls([mock.call(elevated_context)])
+
+ def test_resize_instance_failure(self):
+ elevated_context = self.context.elevated()
+ self.migration.dest_host = None
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver,
+ 'migrate_disk_and_power_off',
+ side_effect=exception.ResizeError(reason='')),
+ mock.patch.object(objects.InstanceActionEvent, 'event_start'),
+ mock.patch.object(objects.InstanceActionEvent,
+ 'event_finish_with_failure'),
+ mock.patch.object(db, 'instance_fault_create'),
+ mock.patch.object(self.compute, '_instance_update'),
+ mock.patch.object(self.migration, 'save'),
+ mock.patch.object(self.context, 'elevated',
+ return_value=elevated_context),
+ mock.patch.object(self.compute, '_get_instance_nw_info',
+ return_value=None),
+ mock.patch.object(self.instance, 'save'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute,
+ '_get_instance_block_device_info',
+ return_value=None),
+ mock.patch.object(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid',
+ return_value=None)
+ ) as (meth, event_start, event_finish, fault_create, instance_update,
+ migration_save, context_elevated, nw_info, save_inst, notify,
+ vol_block_info, bdm):
+ fault_create.return_value = (
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.assertRaises(
+ exception.ResizeError, self.compute.resize_instance,
+ context=self.context, instance=self.instance, image=self.image,
+ reservations=[], migration=self.migration, instance_type='type'
+ )
+ self.assertEqual("error", self.migration.status)
+ migration_save.assert_has_calls([mock.call(elevated_context)])
diff --git a/nova/tests/unit/compute/test_compute_utils.py b/nova/tests/unit/compute/test_compute_utils.py
new file mode 100644
index 0000000000..6234ae30f6
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_utils.py
@@ -0,0 +1,827 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests For miscellaneous util methods used with compute."""
+
+import copy
+import string
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+import six
+import testtools
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova.network import api as network_api
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.objects import instance as instance_obj
+from nova import rpc
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova import utils
+from nova.virt import driver
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class ComputeValidateDeviceTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeValidateDeviceTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ # check if test name includes "xen"
+ if 'xen' in self.id():
+ self.flags(compute_driver='xenapi.XenAPIDriver')
+ self.instance = {
+ 'uuid': 'fake',
+ 'root_device_name': None,
+ 'instance_type_id': 'fake',
+ }
+ else:
+ self.instance = {
+ 'uuid': 'fake',
+ 'root_device_name': '/dev/vda',
+ 'default_ephemeral_device': '/dev/vdb',
+ 'instance_type_id': 'fake',
+ }
+ self.data = []
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ lambda context, instance, use_slave=False: self.data)
+
+ def _update_flavor(self, flavor_info):
+ self.flavor = {
+ 'id': 1,
+ 'name': 'foo',
+ 'memory_mb': 128,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': 1,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ }
+ self.flavor.update(flavor_info)
+ self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key,
+ 'value': value}
+ for key, value in
+ self.flavor.items()]
+
+ def _validate_device(self, device=None):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, self.instance['uuid'])
+ return compute_utils.get_device_name_for_instance(
+ self.context, self.instance, bdms, device)
+
+ @staticmethod
+ def _fake_bdm(device):
+ return fake_block_device.FakeDbBlockDeviceDict({
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': device,
+ 'no_device': None,
+ 'volume_id': 'fake',
+ 'snapshot_id': None,
+ 'guest_format': None
+ })
+
+ def test_wrap(self):
+ self.data = []
+ for letter in string.ascii_lowercase[2:]:
+ self.data.append(self._fake_bdm('/dev/vd' + letter))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdaa')
+
+ def test_wrap_plus_one(self):
+ self.data = []
+ for letter in string.ascii_lowercase[2:]:
+ self.data.append(self._fake_bdm('/dev/vd' + letter))
+ self.data.append(self._fake_bdm('/dev/vdaa'))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdab')
+
+ def test_later(self):
+ self.data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vdd'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdf')
+
+ def test_gap(self):
+ self.data = [
+ self._fake_bdm('/dev/vdc'),
+ self._fake_bdm('/dev/vde'),
+ ]
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdd')
+
+ def test_no_bdms(self):
+ self.data = []
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_lxc_names_work(self):
+ self.instance['root_device_name'] = '/dev/a'
+ self.instance['ephemeral_device_name'] = '/dev/b'
+ self.data = []
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/c')
+
+ def test_name_conversion(self):
+ self.data = []
+ device = self._validate_device('/dev/c')
+ self.assertEqual(device, '/dev/vdc')
+ device = self._validate_device('/dev/sdc')
+ self.assertEqual(device, '/dev/vdc')
+ device = self._validate_device('/dev/xvdc')
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_invalid_device_prefix(self):
+ self.assertRaises(exception.InvalidDevicePath,
+ self._validate_device, '/baddata/vdc')
+
+ def test_device_in_use(self):
+ exc = self.assertRaises(exception.DevicePathInUse,
+ self._validate_device, '/dev/vda')
+ self.assertIn('/dev/vda', six.text_type(exc))
+
+ def test_swap(self):
+ self.instance['default_swap_device'] = "/dev/vdc"
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdd')
+
+ def test_swap_no_ephemeral(self):
+ del self.instance['default_ephemeral_device']
+ self.instance['default_swap_device'] = "/dev/vdb"
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/vdc')
+
+ def test_ephemeral_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 10,
+ 'swap': 0,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdc')
+
+ def test_swap_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdb')
+
+ def test_swap_and_ephemeral_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 10,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdd')
+
+ def test_swap_and_one_attachment_xenapi(self):
+ self._update_flavor({
+ 'ephemeral_gb': 0,
+ 'swap': 10,
+ })
+ self.stubs.Set(flavors, 'get_flavor',
+ lambda instance_type_id, ctxt=None: self.flavor)
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdb')
+ self.data.append(self._fake_bdm(device))
+ device = self._validate_device()
+ self.assertEqual(device, '/dev/xvdd')
+
+ def test_no_dev_root_device_name_get_next_name(self):
+ self.instance['root_device_name'] = 'vda'
+ device = self._validate_device()
+ self.assertEqual('/dev/vdc', device)
+
+
+class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(DefaultDeviceNamesForInstanceTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.ephemerals = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'boot_index': -1})])
+
+ self.swap = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1})])
+
+ self.block_device_mapping = block_device_obj.block_device_make_list(
+ self.context,
+ [fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1})])
+ self.flavor = {'swap': 4}
+ self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
+ self.is_libvirt = False
+ self.root_device_name = '/dev/vda'
+ self.update_called = False
+
+ def fake_extract_flavor(instance):
+ return self.flavor
+
+ def fake_driver_matches(driver_string):
+ if driver_string == 'libvirt.LibvirtDriver':
+ return self.is_libvirt
+ return False
+
+ self.patchers = []
+ self.patchers.append(
+ mock.patch.object(objects.BlockDeviceMapping, 'save'))
+ self.patchers.append(
+ mock.patch.object(
+ flavors, 'extract_flavor',
+ new=mock.Mock(side_effect=fake_extract_flavor)))
+ self.patchers.append(
+ mock.patch.object(driver,
+ 'compute_driver_matches',
+ new=mock.Mock(
+ side_effect=fake_driver_matches)))
+ for patcher in self.patchers:
+ patcher.start()
+
+ def tearDown(self):
+ super(DefaultDeviceNamesForInstanceTestCase, self).tearDown()
+ for patcher in self.patchers:
+ patcher.stop()
+
+ def _test_default_device_names(self, *block_device_lists):
+ compute_utils.default_device_names_for_instance(self.instance,
+ self.root_device_name,
+ *block_device_lists)
+
+ def test_only_block_device_mapping(self):
+ # Test no-op
+ original_bdm = copy.deepcopy(self.block_device_mapping)
+ self._test_default_device_names([], [], self.block_device_mapping)
+ for original, new in zip(original_bdm, self.block_device_mapping):
+ self.assertEqual(original.device_name, new.device_name)
+
+ # Assert it defaults the missing one as expected
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], [], self.block_device_mapping)
+ self.assertEqual('/dev/vdb',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_ephemerals(self):
+ # Test ephemeral gets assigned
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_swap(self):
+ # Test swap only
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names([], self.swap, [])
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
+
+ # Test swap and block_device_mapping
+ self.swap[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], self.swap,
+ self.block_device_mapping)
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_all_together(self):
+ # Test swap missing
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+
+ # Test swap and eph missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+
+ # Test all missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
+ self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+
+class UsageInfoTestCase(test.TestCase):
+
+ def setUp(self):
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
+ super(UsageInfoTestCase, self).setUp()
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.flags(use_local=True, group='conductor')
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ network_manager='nova.network.manager.FlatManager')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_show(meh, context, id, **kwargs):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'show', fake_show)
+ fake_network.set_stub_network_methods(self.stubs)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def _create_instance(self, params=None):
+ """Create a test instance."""
+ params = params or {}
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ sys_meta = flavors.save_flavor_info({}, flavor)
+ inst = {}
+ inst['image_ref'] = 1
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = flavor['id']
+ inst['system_metadata'] = sys_meta
+ inst['ami_launch_index'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['info_cache'] = {'network_info': '[]'}
+ inst.update(params)
+ return db.instance_create(self.context, inst)['id']
+
+ def test_notify_usage_exists(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_notify_usage_exists_deleted_instance(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ self.compute.terminate_instance(self.context, instance, [], [])
+ instance = objects.Instance.get_by_id(
+ self.context.elevated(read_deleted='yes'), instance_id,
+ expected_attrs=['system_metadata'])
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ msg = fake_notifier.NOTIFICATIONS[-1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_notify_usage_exists_instance_not_found(self):
+ # Ensure 'exists' notification generates appropriate usage data.
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ self.compute.terminate_instance(self.context, instance, [], [])
+ compute_utils.notify_usage_exists(
+ rpc.get_notifier('compute'), self.context, instance)
+ msg = fake_notifier.NOTIFICATIONS[-1]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.exists')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description',
+ 'bandwidth', 'audit_period_beginning',
+ 'audit_period_ending', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'], {})
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+
+ def test_notify_about_instance_usage(self):
+ instance_id = self._create_instance()
+ instance = objects.Instance.get_by_id(self.context, instance_id,
+ expected_attrs=['metadata', 'system_metadata', 'info_cache'])
+ # Set some system metadata
+ sys_metadata = {'image_md_key1': 'val1',
+ 'image_md_key2': 'val2',
+ 'other_data': 'meow'}
+ instance.system_metadata.update(sys_metadata)
+ instance.save()
+ extra_usage_info = {'image_name': 'fake_name'}
+ compute_utils.notify_about_instance_usage(
+ rpc.get_notifier('compute'),
+ self.context, instance, 'create.start',
+ extra_usage_info=extra_usage_info)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'compute.instance.create.start')
+ payload = msg.payload
+ self.assertEqual(payload['tenant_id'], self.project_id)
+ self.assertEqual(payload['user_id'], self.user_id)
+ self.assertEqual(payload['instance_id'], instance['uuid'])
+ self.assertEqual(payload['instance_type'], 'm1.tiny')
+ type_id = flavors.get_flavor_by_name('m1.tiny')['id']
+ self.assertEqual(str(payload['instance_type_id']), str(type_id))
+ flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+ self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id))
+ for attr in ('display_name', 'created_at', 'launched_at',
+ 'state', 'state_description', 'image_meta'):
+ self.assertTrue(attr in payload,
+ msg="Key %s not in payload" % attr)
+ self.assertEqual(payload['image_meta'],
+ {'md_key1': 'val1', 'md_key2': 'val2'})
+ self.assertEqual(payload['image_name'], 'fake_name')
+ image_ref_url = "%s/images/1" % glance.generate_glance_url()
+ self.assertEqual(payload['image_ref_url'], image_ref_url)
+ self.compute.terminate_instance(self.context, instance, [], [])
+
+ def test_notify_about_aggregate_update_with_id(self):
+ # Set aggregate payload
+ aggregate_payload = {'aggregate_id': 1}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.end",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'aggregate.create.end')
+ payload = msg.payload
+ self.assertEqual(payload['aggregate_id'], 1)
+
+ def test_notify_about_aggregate_update_with_name(self):
+ # Set aggregate payload
+ aggregate_payload = {'name': 'fakegroup'}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.start",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(msg.priority, 'INFO')
+ self.assertEqual(msg.event_type, 'aggregate.create.start')
+ payload = msg.payload
+ self.assertEqual(payload['name'], 'fakegroup')
+
+ def test_notify_about_aggregate_update_without_name_id(self):
+ # Set empty aggregate payload
+ aggregate_payload = {}
+ compute_utils.notify_about_aggregate_update(self.context,
+ "create.start",
+ aggregate_payload)
+ self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
+
+
+class ComputeGetImageMetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeGetImageMetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+
+ self.image = {
+ "min_ram": 10,
+ "min_disk": 1,
+ "disk_format": "raw",
+ "container_format": "bare",
+ "properties": {},
+ }
+
+ self.mock_image_api = mock.Mock()
+ self.mock_image_api.get.return_value = self.image
+
+ self.ctx = context.RequestContext('fake', 'fake')
+
+ sys_meta = {
+ 'image_min_ram': 10,
+ 'image_min_disk': 1,
+ 'image_disk_format': 'raw',
+ 'image_container_format': 'bare',
+ 'instance_type_id': 0,
+ 'instance_type_name': 'm1.fake',
+ 'instance_type_memory_mb': 10,
+ 'instance_type_vcpus': 1,
+ 'instance_type_root_gb': 1,
+ 'instance_type_ephemeral_gb': 1,
+ 'instance_type_flavorid': '0',
+ 'instance_type_swap': 1,
+ 'instance_type_rxtx_factor': 0.0,
+ 'instance_type_vcpu_weight': None,
+ }
+
+ self.instance = fake_instance.fake_db_instance(
+ memory_mb=0, root_gb=0,
+ system_metadata=sys_meta)
+
+ @property
+ def instance_obj(self):
+ return objects.Instance._from_db_object(
+ self.ctx, objects.Instance(), self.instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+
+ def test_get_image_meta(self):
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_with_image_id_none(self):
+ self.image['properties'] = {'fake_property': 'fake_value'}
+
+ with mock.patch.object(flavors,
+ "extract_flavor") as mock_extract_flavor:
+ with mock.patch.object(utils, "get_system_metadata_from_image"
+ ) as mock_get_sys_metadata:
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, None, self.instance_obj)
+
+ self.assertEqual(0, self.mock_image_api.get.call_count)
+ self.assertEqual(0, mock_extract_flavor.call_count)
+ self.assertEqual(0, mock_get_sys_metadata.call_count)
+ self.assertNotIn('fake_property', image_meta['properties'])
+
+ # Checking mock_image_api_get is called with 0 image_id
+ # as 0 is a valid image ID
+ image_meta = compute_utils.get_image_metadata(self.ctx,
+ self.mock_image_api,
+ 0, self.instance_obj)
+ self.assertEqual(1, self.mock_image_api.get.call_count)
+ self.assertIn('fake_property', image_meta['properties'])
+
+ def _test_get_image_meta_exception(self, error):
+ self.mock_image_api.get.side_effect = error
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ # NOTE(danms): The trip through system_metadata will stringify things
+ for key in self.image:
+ self.image[key] = str(self.image[key])
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_no_image(self):
+ error = exception.ImageNotFound(image_id='fake-image')
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_not_authorized(self):
+ error = exception.ImageNotAuthorized(image_id='fake-image')
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_bad_request(self):
+ error = exception.Invalid()
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_unexpected_exception(self):
+ error = test.TestingException()
+ with testtools.ExpectedException(test.TestingException):
+ self._test_get_image_meta_exception(error)
+
+ def test_get_image_meta_no_image_system_meta(self):
+ for k in self.instance['system_metadata'].keys():
+ if k.startswith('image_'):
+ del self.instance['system_metadata'][k]
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ self.image['properties'] = 'DONTCARE'
+ self.assertThat(self.image, matchers.DictMatches(image_meta))
+
+ def test_get_image_meta_no_image_no_image_system_meta(self):
+ e = exception.ImageNotFound(image_id='fake-image')
+ self.mock_image_api.get.side_effect = e
+
+ for k in self.instance['system_metadata'].keys():
+ if k.startswith('image_'):
+ del self.instance['system_metadata'][k]
+
+ image_meta = compute_utils.get_image_metadata(
+ self.ctx, self.mock_image_api, 'fake-image', self.instance_obj)
+
+ expected = {'properties': 'DONTCARE'}
+ self.assertThat(expected, matchers.DictMatches(image_meta))
+
+
+class ComputeUtilsGetValFromSysMetadata(test.TestCase):
+
+ def test_get_value_from_system_metadata(self):
+ instance = fake_instance.fake_instance_obj('fake-context')
+ system_meta = {'int_val': 1,
+ 'int_string': '2',
+ 'not_int': 'Nope'}
+ instance.system_metadata = system_meta
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_val', int, 0)
+ self.assertEqual(1, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_string', int, 0)
+ self.assertEqual(2, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'not_int', int, 0)
+ self.assertEqual(0, result)
+
+
+class ComputeUtilsGetNWInfo(test.TestCase):
+ def test_instance_object_none_info_cache(self):
+ inst = fake_instance.fake_instance_obj('fake-context',
+ expected_attrs=['info_cache'])
+ self.assertIsNone(inst.info_cache)
+ result = compute_utils.get_nw_info_for_instance(inst)
+ self.assertEqual(jsonutils.dumps([]), result.json())
+
+ def test_instance_dict_none_info_cache(self):
+ inst = fake_instance.fake_db_instance(info_cache=None)
+ self.assertIsNone(inst['info_cache'])
+ result = compute_utils.get_nw_info_for_instance(inst)
+ self.assertEqual(jsonutils.dumps([]), result.json())
+
+
+class ComputeUtilsGetRebootTypes(test.TestCase):
+ def setUp(self):
+ super(ComputeUtilsGetRebootTypes, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_get_reboot_type_started_soft(self):
+ reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED,
+ power_state.RUNNING)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_get_reboot_type_pending_soft(self):
+ reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING,
+ power_state.RUNNING)
+ self.assertEqual(reboot_type, 'SOFT')
+
+ def test_get_reboot_type_hard(self):
+ reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING)
+ self.assertEqual(reboot_type, 'HARD')
+
+ def test_get_reboot_not_running_hard(self):
+ reboot_type = compute_utils.get_reboot_type('foo', 'bar')
+ self.assertEqual(reboot_type, 'HARD')
diff --git a/nova/tests/unit/compute/test_compute_xen.py b/nova/tests/unit/compute/test_compute_xen.py
new file mode 100644
index 0000000000..90a81e9d13
--- /dev/null
+++ b/nova/tests/unit/compute/test_compute_xen.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for expectations of behaviour from the Xen driver."""
+
+from oslo.config import cfg
+from oslo.utils import importutils
+
+from nova.compute import power_state
+from nova import context
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.tests.unit.compute import eventlet_utils
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ComputeXenTestCase, self).setUp()
+ self.flags(compute_driver='xenapi.XenAPIDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ # execute power syncing synchronously for testing:
+ self.compute._sync_power_pool = eventlet_utils.SyncPool()
+
+ def test_sync_power_states_instance_not_found(self):
+ db_instance = fake_instance.fake_db_instance()
+ ctxt = context.get_admin_context()
+ instance_list = instance_obj._make_instance_list(ctxt,
+ objects.InstanceList(), [db_instance], None)
+ instance = instance_list[0]
+
+ self.mox.StubOutWithMock(objects.InstanceList, 'get_by_host')
+ self.mox.StubOutWithMock(self.compute.driver, 'get_num_instances')
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
+
+ objects.InstanceList.get_by_host(ctxt,
+ self.compute.host, use_slave=True).AndReturn(instance_list)
+ self.compute.driver.get_num_instances().AndReturn(1)
+ vm_utils.lookup(self.compute.driver._session, instance['name'],
+ False).AndReturn(None)
+ self.compute._sync_instance_power_state(ctxt, instance,
+ power_state.NOSTATE)
+
+ self.mox.ReplayAll()
+
+ self.compute._sync_power_states(ctxt)
diff --git a/nova/tests/compute/test_flavors.py b/nova/tests/unit/compute/test_flavors.py
index cece4b3f39..cece4b3f39 100644
--- a/nova/tests/compute/test_flavors.py
+++ b/nova/tests/unit/compute/test_flavors.py
diff --git a/nova/tests/unit/compute/test_host_api.py b/nova/tests/unit/compute/test_host_api.py
new file mode 100644
index 0000000000..348d2dea3d
--- /dev/null
+++ b/nova/tests/unit/compute/test_host_api.py
@@ -0,0 +1,480 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova.cells import utils as cells_utils
+from nova import compute
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_service
+
+
+class ComputeHostAPITestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeHostAPITestCase, self).setUp()
+ self.host_api = compute.HostAPI()
+ self.ctxt = context.get_admin_context()
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def _compare_obj(self, obj, db_obj):
+ test_objects.compare_obj(self, obj, db_obj,
+ allow_missing=test_service.OPTIONAL)
+
+ def _compare_objs(self, obj_list, db_obj_list):
+ for index, obj in enumerate(obj_list):
+ self._compare_obj(obj, db_obj_list[index])
+
+ def _mock_rpc_call(self, method, **kwargs):
+ self.mox.StubOutWithMock(self.host_api.rpcapi, method)
+ getattr(self.host_api.rpcapi, method)(
+ self.ctxt, **kwargs).AndReturn('fake-result')
+
+ def _mock_assert_host_exists(self):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists.
+ """
+ def fake_assert_host_exists(context, host_name, must_be_up=False):
+ return 'fake_host'
+ self.stubs.Set(self.host_api, '_assert_host_exists',
+ fake_assert_host_exists)
+
+ def test_set_host_enabled(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('set_host_enabled',
+ host='fake_host',
+ enabled='fake_enabled')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_host',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.set_enabled.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_enabled', msg.payload['enabled'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.set_enabled.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_enabled', msg.payload['enabled'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+
+ def test_host_name_from_assert_hosts_exists(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('set_host_enabled',
+ host='fake_host',
+ enabled='fake_enabled')
+ self.mox.ReplayAll()
+ result = self.host_api.set_host_enabled(self.ctxt, 'fake_hosT',
+ 'fake_enabled')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('get_host_uptime',
+ host='fake_host')
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake_host')
+ self.assertEqual('fake-result', result)
+
+ def test_get_host_uptime_service_down(self):
+ def fake_service_get_by_compute_host(context, host_name):
+ return dict(test_service.fake_service, id=1)
+ self.stubs.Set(self.host_api.db, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+
+ def fake_service_is_up(service):
+ return False
+ self.stubs.Set(self.host_api.servicegroup_api,
+ 'service_is_up', fake_service_is_up)
+
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.host_api.get_host_uptime, self.ctxt,
+ 'fake_host')
+
+ def test_host_power_action(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('host_power_action',
+ host='fake_host',
+ action='fake_action')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.host_power_action(self.ctxt, 'fake_host',
+ 'fake_action')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.power_action.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_action', msg.payload['action'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.power_action.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_action', msg.payload['action'])
+ self.assertEqual('fake_host', msg.payload['host_name'])
+
+ def test_set_host_maintenance(self):
+ self._mock_assert_host_exists()
+ self._mock_rpc_call('host_maintenance_mode',
+ host='fake_host',
+ host_param='fake_host',
+ mode='fake_mode')
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ result = self.host_api.set_host_maintenance(self.ctxt, 'fake_host',
+ 'fake_mode')
+ self.assertEqual('fake-result', result)
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('HostAPI.set_maintenance.start', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ self.assertEqual('fake_mode', msg.payload['mode'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('HostAPI.set_maintenance.end', msg.event_type)
+ self.assertEqual('api.fake_host', msg.publisher_id)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake_host', msg.payload['host_name'])
+ self.assertEqual('fake_mode', msg.payload['mode'])
+
+ def test_service_get_all_no_zones(self):
+ services = [dict(test_service.fake_service,
+ id=1, topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ topic='compute', host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt)
+ self.mox.VerifyAll()
+ self._compare_objs(result, services)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={})
+ self.mox.VerifyAll()
+ self._compare_objs(result, services)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(host='host2'))
+ self.mox.VerifyAll()
+ self._compare_objs(result, [services[1]])
+
+ def test_service_get_all(self):
+ services = [dict(test_service.fake_service,
+ topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ topic='compute', host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_all')
+
+ # Test no filters
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ # Test no filters #2
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt, filters={},
+ set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ # Test w/ filter
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=dict(host='host2'),
+ set_zones=True)
+ self.mox.VerifyAll()
+ self._compare_objs(result, [exp_services[1]])
+
+ # Test w/ zone filter but no set_zones arg.
+ self.mox.ResetAll()
+ self.host_api.db.service_get_all(self.ctxt,
+ disabled=None).AndReturn(services)
+ self.mox.ReplayAll()
+ filters = {'availability_zone': 'nova'}
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=filters)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'service_get_by_compute_host')
+
+ self.host_api.db.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn(test_service.fake_service)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual(test_service.fake_service['id'], result.id)
+
+ def test_service_update(self):
+ host_name = 'fake-host'
+ binary = 'nova-compute'
+ params_to_update = dict(disabled=True)
+ service_id = 42
+ expected_result = dict(test_service.fake_service, id=service_id)
+
+ self.mox.StubOutWithMock(self.host_api.db, 'service_get_by_args')
+ self.host_api.db.service_get_by_args(self.ctxt,
+ host_name, binary).AndReturn(expected_result)
+
+ self.mox.StubOutWithMock(self.host_api.db, 'service_update')
+ self.host_api.db.service_update(
+ self.ctxt, service_id, params_to_update).AndReturn(expected_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_api.service_update(
+ self.ctxt, host_name, binary, params_to_update)
+ self._compare_obj(result, expected_result)
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(['fake-responses'])
+ self.mox.ReplayAll()
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ 'fake-host')
+ self.assertEqual(['fake-responses'], result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.db, 'task_log_get_all')
+
+ self.host_api.db.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
+
+ def test_service_delete(self):
+ with contextlib.nested(
+ mock.patch.object(objects.Service, 'get_by_id',
+ return_value=objects.Service()),
+ mock.patch.object(objects.Service, 'destroy')
+ ) as (
+ get_by_id, destroy
+ ):
+ self.host_api.service_delete(self.ctxt, 1)
+ get_by_id.assert_called_once_with(self.ctxt, 1)
+ destroy.assert_called_once_with()
+
+
+class ComputeHostAPICellsTestCase(ComputeHostAPITestCase):
+ def setUp(self):
+ self.flags(enable=True, group='cells')
+ self.flags(cell_type='api', group='cells')
+ super(ComputeHostAPICellsTestCase, self).setUp()
+
+ def _mock_rpc_call(self, method, **kwargs):
+ if 'host_param' in kwargs:
+ kwargs.pop('host_param')
+ else:
+ kwargs.pop('host')
+ rpc_message = {
+ 'method': method,
+ 'namespace': None,
+ 'args': kwargs,
+ 'version': self.host_api.rpcapi.client.target.version,
+ }
+ cells_rpcapi = self.host_api.rpcapi.client.cells_rpcapi
+ self.mox.StubOutWithMock(cells_rpcapi, 'proxy_rpc_to_manager')
+ cells_rpcapi.proxy_rpc_to_manager(self.ctxt,
+ rpc_message,
+ 'compute.fake_host',
+ call=True).AndReturn('fake-result')
+
+ def test_service_get_all_no_zones(self):
+ services = [dict(test_service.fake_service,
+ id='cell1@1', topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ id='cell1@2', topic='compute', host='host2')]
+ exp_services = [s.copy() for s in services]
+
+ fake_filters = {'host': 'host1'}
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters)
+ self._compare_objs(result, exp_services)
+
+ def _test_service_get_all(self, fake_filters, **kwargs):
+ services = [dict(test_service.fake_service,
+ id='cell1@1', key1='val1', key2='val2',
+ topic='compute', host='host1'),
+ dict(test_service.fake_service,
+ id='cell1@2', key1='val2', key3='val3',
+ topic='compute', host='host2')]
+ exp_services = []
+ for service in services:
+ exp_service = {}
+ exp_service.update(availability_zone='nova', **service)
+ exp_services.append(exp_service)
+
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_all')
+ self.host_api.cells_rpcapi.service_get_all(self.ctxt,
+ filters=fake_filters).AndReturn(services)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_all(self.ctxt,
+ filters=fake_filters,
+ **kwargs)
+ self.mox.VerifyAll()
+ self._compare_objs(result, exp_services)
+
+ def test_service_get_all(self):
+ fake_filters = {'availability_zone': 'nova'}
+ self._test_service_get_all(fake_filters)
+
+ def test_service_get_all_set_zones(self):
+ fake_filters = {'key1': 'val1'}
+ self._test_service_get_all(fake_filters, set_zones=True)
+
+ def test_service_get_by_compute_host(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'service_get_by_compute_host')
+
+ self.host_api.cells_rpcapi.service_get_by_compute_host(self.ctxt,
+ 'fake-host').AndReturn(test_service.fake_service)
+ self.mox.ReplayAll()
+ result = self.host_api.service_get_by_compute_host(self.ctxt,
+ 'fake-host')
+ self._compare_obj(result, test_service.fake_service)
+
+ def test_service_update(self):
+ host_name = 'fake-host'
+ binary = 'nova-compute'
+ params_to_update = dict(disabled=True)
+ service_id = 42
+ expected_result = dict(test_service.fake_service, id=service_id)
+
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi, 'service_update')
+ self.host_api.cells_rpcapi.service_update(
+ self.ctxt, host_name,
+ binary, params_to_update).AndReturn(expected_result)
+
+ self.mox.ReplayAll()
+
+ result = self.host_api.service_update(
+ self.ctxt, host_name, binary, params_to_update)
+ self._compare_obj(result, expected_result)
+
+ def test_service_delete(self):
+ cell_service_id = cells_utils.cell_with_item('cell1', 1)
+ with mock.patch.object(self.host_api.cells_rpcapi,
+ 'service_delete') as service_delete:
+ self.host_api.service_delete(self.ctxt, cell_service_id)
+ service_delete.assert_called_once_with(
+ self.ctxt, cell_service_id)
+
+ def test_instance_get_all_by_host(self):
+ instances = [dict(id=1, cell_name='cell1', host='host1'),
+ dict(id=2, cell_name='cell2', host='host1'),
+ dict(id=3, cell_name='cell1', host='host2')]
+
+ self.mox.StubOutWithMock(self.host_api.db,
+ 'instance_get_all_by_host')
+
+ self.host_api.db.instance_get_all_by_host(self.ctxt,
+ 'fake-host').AndReturn(instances)
+ self.mox.ReplayAll()
+ expected_result = [instances[0], instances[2]]
+ cell_and_host = cells_utils.cell_with_item('cell1', 'fake-host')
+ result = self.host_api.instance_get_all_by_host(self.ctxt,
+ cell_and_host)
+ self.assertEqual(expected_result, result)
+
+ def test_task_log_get_all(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'task_log_get_all')
+
+ self.host_api.cells_rpcapi.task_log_get_all(self.ctxt,
+ 'fake-name', 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state').AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.task_log_get_all(self.ctxt, 'fake-name',
+ 'fake-begin', 'fake-end', host='fake-host',
+ state='fake-state')
+ self.assertEqual('fake-response', result)
+
+ def test_get_host_uptime_service_down(self):
+ # The corresponding Compute test case depends on the
+ # _assert_host_exists which is a no-op in the cells api
+ pass
+
+ def test_get_host_uptime(self):
+ self.mox.StubOutWithMock(self.host_api.cells_rpcapi,
+ 'get_host_uptime')
+
+ self.host_api.cells_rpcapi.get_host_uptime(self.ctxt,
+ 'fake-host'). \
+ AndReturn('fake-response')
+ self.mox.ReplayAll()
+ result = self.host_api.get_host_uptime(self.ctxt, 'fake-host')
+ self.assertEqual('fake-response', result)
diff --git a/nova/tests/compute/test_hvtype.py b/nova/tests/unit/compute/test_hvtype.py
index 93cb245e10..93cb245e10 100644
--- a/nova/tests/compute/test_hvtype.py
+++ b/nova/tests/unit/compute/test_hvtype.py
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
new file mode 100644
index 0000000000..ecdbcff103
--- /dev/null
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -0,0 +1,221 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for keypair API."""
+
+from oslo.config import cfg
+import six
+
+from nova.compute import api as compute_api
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import quota
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_keypair
+
+CONF = cfg.CONF
+QUOTAS = quota.QUOTAS
+
+
+class KeypairAPITestCase(test_compute.BaseTestCase):
+ def setUp(self):
+ super(KeypairAPITestCase, self).setUp()
+ self.keypair_api = compute_api.KeypairAPI()
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._keypair_db_call_stubs()
+ self.existing_key_name = 'fake existing key name'
+ self.pub_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLnVkqJu9WVf'
+ '/5StU3JCrBR2r1s1j8K1tux+5XeSvdqaM8lMFNorzbY5iyoBbR'
+ 'S56gy1jmm43QsMPJsrpfUZKcJpRENSe3OxIIwWXRoiapZe78u/'
+ 'a9xKwj0avFYMcws9Rk9iAB7W4K1nEJbyCPl5lRBoyqeHBqrnnu'
+ 'XWEgGxJCK0Ah6wcOzwlEiVjdf4kxzXrwPHyi7Ea1qvnNXTziF8'
+ 'yYmUlH4C8UXfpTQckwSwpDyxZUc63P8q+vPbs3Q2kw+/7vvkCK'
+ 'HJAXVI+oCiyMMfffoTq16M1xfV58JstgtTqAXG+ZFpicGajREU'
+ 'E/E3hO5MGgcHmyzIrWHKpe1n3oEGuz')
+ self.fingerprint = '4e:48:c6:a0:4a:f9:dd:b5:4c:85:54:5a:af:43:47:5a'
+ self.key_destroyed = False
+
+ def _keypair_db_call_stubs(self):
+
+ def db_key_pair_get_all_by_user(context, user_id):
+ return [dict(test_keypair.fake_keypair,
+ name=self.existing_key_name,
+ public_key=self.pub_key,
+ fingerprint=self.fingerprint)]
+
+ def db_key_pair_create(context, keypair):
+ return dict(test_keypair.fake_keypair, **keypair)
+
+ def db_key_pair_destroy(context, user_id, name):
+ if name == self.existing_key_name:
+ self.key_destroyed = True
+
+ def db_key_pair_get(context, user_id, name):
+ if name == self.existing_key_name and not self.key_destroyed:
+ return dict(test_keypair.fake_keypair,
+ name=self.existing_key_name,
+ public_key=self.pub_key,
+ fingerprint=self.fingerprint)
+ else:
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+ self.stubs.Set(db, "key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stubs.Set(db, "key_pair_create",
+ db_key_pair_create)
+ self.stubs.Set(db, "key_pair_destroy",
+ db_key_pair_destroy)
+ self.stubs.Set(db, "key_pair_get",
+ db_key_pair_get)
+
+ def _check_notifications(self, action='create', key_name='foo'):
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+
+ n1 = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('INFO', n1.priority)
+ self.assertEqual('keypair.%s.start' % action, n1.event_type)
+ self.assertEqual('api.%s' % CONF.host, n1.publisher_id)
+ self.assertEqual('fake', n1.payload['user_id'])
+ self.assertEqual('fake', n1.payload['tenant_id'])
+ self.assertEqual(key_name, n1.payload['key_name'])
+
+ n2 = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('INFO', n2.priority)
+ self.assertEqual('keypair.%s.end' % action, n2.event_type)
+ self.assertEqual('api.%s' % CONF.host, n2.publisher_id)
+ self.assertEqual('fake', n2.payload['user_id'])
+ self.assertEqual('fake', n2.payload['tenant_id'])
+ self.assertEqual(key_name, n2.payload['key_name'])
+
+
+class CreateImportSharedTestMixIn(object):
+ """Tests shared between create and import_key.
+
+ Mix-in pattern is used here so that these `test_*` methods aren't picked
+ up by the test runner unless they are part of a 'concrete' test case.
+ """
+
+ def assertKeyNameRaises(self, exc_class, expected_message, name):
+ func = getattr(self.keypair_api, self.func_name)
+
+ args = []
+ if self.func_name == 'import_key_pair':
+ args.append(self.pub_key)
+
+ exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
+ name, *args)
+ self.assertEqual(expected_message, six.text_type(exc))
+
+ def assertInvalidKeypair(self, expected_message, name):
+ msg = _('Keypair data is invalid: %s') % expected_message
+ self.assertKeyNameRaises(exception.InvalidKeypair, msg, name)
+
+ def test_name_too_short(self):
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
+ self.assertInvalidKeypair(msg, '')
+
+ def test_name_too_long(self):
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
+ self.assertInvalidKeypair(msg, 'x' * 256)
+
+ def test_invalid_chars(self):
+ msg = _("Keypair name contains unsafe characters")
+ self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
+
+ def test_already_exists(self):
+ def db_key_pair_create_duplicate(context, keypair):
+ raise exception.KeyPairExists(key_name=keypair.get('name', ''))
+
+ self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+
+ msg = (_("Key pair '%(key_name)s' already exists.") %
+ {'key_name': self.existing_key_name})
+ self.assertKeyNameRaises(exception.KeyPairExists, msg,
+ self.existing_key_name)
+
+ def test_quota_limit(self):
+ def fake_quotas_count(self, context, resource, *args, **kwargs):
+ return CONF.quota_key_pairs
+
+ self.stubs.Set(QUOTAS, "count", fake_quotas_count)
+
+ msg = _("Maximum number of key pairs exceeded")
+ self.assertKeyNameRaises(exception.KeypairLimitExceeded, msg, 'foo')
+
+
+class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
+ func_name = 'create_key_pair'
+
+ def test_success(self):
+ keypair, private_key = self.keypair_api.create_key_pair(
+ self.ctxt, self.ctxt.user_id, 'foo')
+ self.assertEqual('foo', keypair['name'])
+ self._check_notifications()
+
+
+class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
+ func_name = 'import_key_pair'
+
+ def test_success(self):
+ keypair = self.keypair_api.import_key_pair(self.ctxt,
+ self.ctxt.user_id,
+ 'foo',
+ self.pub_key)
+
+ self.assertEqual('foo', keypair['name'])
+ self.assertEqual(self.fingerprint, keypair['fingerprint'])
+ self.assertEqual(self.pub_key, keypair['public_key'])
+ self._check_notifications(action='import')
+
+ def test_bad_key_data(self):
+ exc = self.assertRaises(exception.InvalidKeypair,
+ self.keypair_api.import_key_pair,
+ self.ctxt, self.ctxt.user_id, 'foo',
+ 'bad key data')
+ msg = u'Keypair data is invalid: failed to generate fingerprint'
+ self.assertEqual(msg, six.text_type(exc))
+
+
+class GetKeypairTestCase(KeypairAPITestCase):
+ def test_success(self):
+ keypair = self.keypair_api.get_key_pair(self.ctxt,
+ self.ctxt.user_id,
+ self.existing_key_name)
+ self.assertEqual(self.existing_key_name, keypair['name'])
+
+
+class GetKeypairsTestCase(KeypairAPITestCase):
+ def test_success(self):
+ keypairs = self.keypair_api.get_key_pairs(self.ctxt, self.ctxt.user_id)
+ self.assertEqual([self.existing_key_name],
+ [k['name'] for k in keypairs])
+
+
+class DeleteKeypairTestCase(KeypairAPITestCase):
+ def test_success(self):
+ self.keypair_api.get_key_pair(self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+ self.keypair_api.delete_key_pair(self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+ self.assertRaises(exception.KeypairNotFound,
+ self.keypair_api.get_key_pair, self.ctxt, self.ctxt.user_id,
+ self.existing_key_name)
+
+ self._check_notifications(action='delete',
+ key_name=self.existing_key_name)
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/unit/compute/test_multiple_nodes.py
index 7362534b44..7362534b44 100644
--- a/nova/tests/compute/test_multiple_nodes.py
+++ b/nova/tests/unit/compute/test_multiple_nodes.py
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
new file mode 100644
index 0000000000..e646fb19ad
--- /dev/null
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -0,0 +1,1539 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for compute resource tracking."""
+
+import uuid
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.compute import flavors
+from nova.compute import resource_tracker
+from nova.compute import resources
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import objects
+from nova.objects import base as obj_base
+from nova import rpc
+from nova import test
+from nova.tests.unit.compute.monitors import test_monitors
+from nova.tests.unit.objects import test_migration
+from nova.tests.unit.pci import fakes as pci_fakes
+from nova.virt import driver
+from nova.virt import hardware
+
+
+FAKE_VIRT_MEMORY_MB = 5
+FAKE_VIRT_MEMORY_OVERHEAD = 1
+FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
+ FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
+FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
+ hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
+FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
+ cells=[hardware.VirtNUMATopologyCellLimit(
+ 0, set([1, 2]), 3072, 4, 10240),
+ hardware.VirtNUMATopologyCellLimit(
+ 1, set([3, 4]), 3072, 4, 10240)])
+ROOT_GB = 5
+EPHEMERAL_GB = 1
+FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
+FAKE_VIRT_VCPUS = 1
+FAKE_VIRT_STATS = {'virt_stat': 10}
+FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
+RESOURCE_NAMES = ['vcpu']
+CONF = cfg.CONF
+
+
+class UnsupportedVirtDriver(driver.ComputeDriver):
+ """Pretend version of a lame virt driver."""
+
+ def __init__(self):
+ super(UnsupportedVirtDriver, self).__init__(None)
+
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
+ # no support for getting resource usage info
+ return {}
+
+
+class FakeVirtDriver(driver.ComputeDriver):
+
+ def __init__(self, pci_support=False, stats=None,
+ numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
+ super(FakeVirtDriver, self).__init__(None)
+ self.memory_mb = FAKE_VIRT_MEMORY_MB
+ self.local_gb = FAKE_VIRT_LOCAL_GB
+ self.vcpus = FAKE_VIRT_VCPUS
+ self.numa_topology = numa_topology
+
+ self.memory_mb_used = 0
+ self.local_gb_used = 0
+ self.pci_support = pci_support
+ self.pci_devices = [{
+ 'label': 'forza-napoli',
+ 'dev_type': 'foo',
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p1',
+ 'vendor_id': 'v1',
+ 'status': 'available',
+ 'extra_k1': 'v1'}] if self.pci_support else []
+ self.pci_stats = [{
+ 'count': 1,
+ 'vendor_id': 'v1',
+ 'product_id': 'p1'}] if self.pci_support else []
+ if stats is not None:
+ self.stats = stats
+
+ def get_host_ip_addr(self):
+ return '127.0.0.1'
+
+ def get_available_resource(self, nodename):
+ d = {
+ 'vcpus': self.vcpus,
+ 'memory_mb': self.memory_mb,
+ 'local_gb': self.local_gb,
+ 'vcpus_used': 0,
+ 'memory_mb_used': self.memory_mb_used,
+ 'local_gb_used': self.local_gb_used,
+ 'hypervisor_type': 'fake',
+ 'hypervisor_version': 0,
+ 'hypervisor_hostname': 'fakehost',
+ 'cpu_info': '',
+ 'numa_topology': (
+ self.numa_topology.to_json() if self.numa_topology else None),
+ }
+ if self.pci_support:
+ d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
+ if hasattr(self, 'stats'):
+ d['stats'] = self.stats
+ return d
+
+ def estimate_instance_overhead(self, instance_info):
+ instance_info['memory_mb'] # make sure memory value is present
+ overhead = {
+ 'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
+ }
+ return overhead # just return a constant value for testing
+
+
+class BaseTestCase(test.TestCase):
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ self.flags(reserved_host_disk_mb=0,
+ reserved_host_memory_mb=0)
+
+ self.context = context.get_admin_context()
+
+ self.flags(use_local=True, group='conductor')
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+
+ self._instances = {}
+ self._numa_topologies = {}
+ self._instance_types = {}
+
+ self.stubs.Set(self.conductor.db,
+ 'instance_get_all_by_host_and_node',
+ self._fake_instance_get_all_by_host_and_node)
+ self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
+ self._fake_instance_extra_get_by_instance_uuid)
+ self.stubs.Set(self.conductor.db,
+ 'instance_update_and_get_original',
+ self._fake_instance_update_and_get_original)
+ self.stubs.Set(self.conductor.db,
+ 'flavor_get', self._fake_flavor_get)
+
+ self.host = 'fakehost'
+
+ def _create_compute_node(self, values=None):
+ compute = {
+ "id": 1,
+ "service_id": 1,
+ "vcpus": 1,
+ "memory_mb": 1,
+ "local_gb": 1,
+ "vcpus_used": 1,
+ "memory_mb_used": 1,
+ "local_gb_used": 1,
+ "free_ram_mb": 1,
+ "free_disk_gb": 1,
+ "current_workload": 1,
+ "running_vms": 0,
+ "cpu_info": None,
+ "numa_topology": None,
+ "stats": {
+ "num_instances": "1",
+ },
+ "hypervisor_hostname": "fakenode",
+ }
+ if values:
+ compute.update(values)
+ return compute
+
+ def _create_service(self, host="fakehost", compute=None):
+ if compute:
+ compute = [compute]
+
+ service = {
+ "id": 1,
+ "host": host,
+ "binary": "nova-compute",
+ "topic": "compute",
+ "compute_node": compute,
+ }
+ return service
+
+ def _fake_instance_system_metadata(self, instance_type, prefix=''):
+ sys_meta = []
+ for key in flavors.system_metadata_flavor_props.keys():
+ sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
+ 'value': instance_type[key]})
+ return sys_meta
+
+ def _fake_instance(self, stash=True, flavor=None, **kwargs):
+
+ # Default to an instance ready to resize to or from the same
+ # instance_type
+ flavor = flavor or self._fake_flavor_create()
+ sys_meta = self._fake_instance_system_metadata(flavor)
+
+ if stash:
+ # stash instance types in system metadata.
+ sys_meta = (sys_meta +
+ self._fake_instance_system_metadata(flavor, 'new_') +
+ self._fake_instance_system_metadata(flavor, 'old_'))
+
+ instance_uuid = str(uuid.uuid1())
+ instance = {
+ 'uuid': instance_uuid,
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None,
+ 'ephemeral_key_uuid': None,
+ 'os_type': 'Linux',
+ 'project_id': '123456',
+ 'host': None,
+ 'node': None,
+ 'instance_type_id': flavor['id'],
+ 'memory_mb': flavor['memory_mb'],
+ 'vcpus': flavor['vcpus'],
+ 'root_gb': flavor['root_gb'],
+ 'ephemeral_gb': flavor['ephemeral_gb'],
+ 'launched_on': None,
+ 'system_metadata': sys_meta,
+ 'availability_zone': None,
+ 'vm_mode': None,
+ 'reservation_id': None,
+ 'display_name': None,
+ 'default_swap_device': None,
+ 'power_state': None,
+ 'scheduled_at': None,
+ 'access_ip_v6': None,
+ 'access_ip_v4': None,
+ 'key_name': None,
+ 'updated_at': None,
+ 'cell_name': None,
+ 'locked': None,
+ 'locked_by': None,
+ 'launch_index': None,
+ 'architecture': None,
+ 'auto_disk_config': None,
+ 'terminated_at': None,
+ 'ramdisk_id': None,
+ 'user_data': None,
+ 'cleaned': None,
+ 'deleted_at': None,
+ 'id': 333,
+ 'disable_terminate': None,
+ 'hostname': None,
+ 'display_description': None,
+ 'key_data': None,
+ 'deleted': None,
+ 'default_ephemeral_device': None,
+ 'progress': None,
+ 'launched_at': None,
+ 'config_drive': None,
+ 'kernel_id': None,
+ 'user_id': None,
+ 'shutdown_terminate': None,
+ 'created_at': None,
+ 'image_ref': None,
+ 'root_device_name': None,
+ }
+ numa_topology = kwargs.pop('numa_topology', None)
+ if numa_topology:
+ numa_topology = {
+ 'id': 1, 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': None,
+ 'instance_uuid': instance['uuid'],
+ 'numa_topology': numa_topology.to_json()
+ }
+ instance.update(kwargs)
+
+ self._instances[instance_uuid] = instance
+ self._numa_topologies[instance_uuid] = numa_topology
+ return instance
+
+ def _fake_flavor_create(self, **kwargs):
+ instance_type = {
+ 'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'disabled': False,
+ 'is_public': True,
+ 'name': 'fakeitype',
+ 'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'vcpus': FAKE_VIRT_VCPUS,
+ 'root_gb': ROOT_GB,
+ 'ephemeral_gb': EPHEMERAL_GB,
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'flavorid': 'fakeflavor',
+ 'extra_specs': {},
+ }
+ instance_type.update(**kwargs)
+
+ id_ = instance_type['id']
+ self._instance_types[id_] = instance_type
+ return instance_type
+
+ def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
+ return [i for i in self._instances.values() if i['host'] == host]
+
+ def _fake_instance_extra_get_by_instance_uuid(self, context,
+ instance_uuid, columns=None):
+ return self._numa_topologies.get(instance_uuid)
+
+ def _fake_flavor_get(self, ctxt, id_):
+ return self._instance_types[id_]
+
+ def _fake_instance_update_and_get_original(self, context, instance_uuid,
+ values):
+ instance = self._instances[instance_uuid]
+ instance.update(values)
+ # the test doesn't care what the original instance values are, it's
+ # only used in the subsequent notification:
+ return (instance, instance)
+
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def _tracker(self, host=None):
+
+ if host is None:
+ host = self.host
+
+ node = "fakenode"
+
+ driver = self._driver()
+
+ tracker = resource_tracker.ResourceTracker(host, driver, node)
+ tracker.ext_resources_handler = \
+ resources.ResourceHandler(RESOURCE_NAMES, True)
+ return tracker
+
+
+class UnsupportedDriverTestCase(BaseTestCase):
+ """Resource tracking should be disabled when the virt driver doesn't
+ support it.
+ """
+ def setUp(self):
+ super(UnsupportedDriverTestCase, self).setUp()
+ self.tracker = self._tracker()
+ # seed tracker with data:
+ self.tracker.update_available_resource(self.context)
+
+ def _driver(self):
+ return UnsupportedVirtDriver()
+
+ def test_disabled(self):
+ # disabled = no compute node stats
+ self.assertTrue(self.tracker.disabled)
+ self.assertIsNone(self.tracker.compute_node)
+
+ def test_disabled_claim(self):
+ # basic claim:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_instance_claim(self):
+ # instance variation:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_instance_context_claim(self):
+ # instance context manager variation:
+ instance = self._fake_instance()
+ claim = self.tracker.instance_claim(self.context, instance)
+ with self.tracker.instance_claim(self.context, instance) as claim:
+ self.assertEqual(0, claim.memory_mb)
+
+ def test_disabled_updated_usage(self):
+ instance = self._fake_instance(host='fakehost', memory_mb=5,
+ root_gb=10)
+ self.tracker.update_usage(self.context, instance)
+
+ def test_disabled_resize_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_flavor_create()
+ claim = self.tracker.resize_claim(self.context, instance,
+ instance_type)
+ self.assertEqual(0, claim.memory_mb)
+ self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
+ self.assertEqual(instance_type['id'],
+ claim.migration['new_instance_type_id'])
+
+ def test_disabled_resize_context_claim(self):
+ instance = self._fake_instance()
+ instance_type = self._fake_flavor_create()
+ with self.tracker.resize_claim(self.context, instance, instance_type) \
+ as claim:
+ self.assertEqual(0, claim.memory_mb)
+
+
+class MissingServiceTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingServiceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.tracker = self._tracker()
+
+ def test_missing_service(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.tracker.disabled)
+
+
+class MissingComputeNodeTestCase(BaseTestCase):
+ def setUp(self):
+ super(MissingComputeNodeTestCase, self).setUp()
+ self.tracker = self._tracker()
+
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
+ self.stubs.Set(db, 'compute_node_create',
+ self._fake_create_compute_node)
+ self.tracker.scheduler_client.update_resource_stats = mock.Mock()
+
+ def _fake_create_compute_node(self, context, values):
+ self.created = True
+ return self._create_compute_node()
+
+ def _fake_service_get_by_compute_host(self, ctx, host):
+ # return a service with no joined compute
+ service = self._create_service()
+ return service
+
+ def test_create_compute_node(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(self.created)
+
+ def test_enabled(self):
+ self.tracker.update_available_resource(self.context)
+ self.assertFalse(self.tracker.disabled)
+
+
+class BaseTrackerTestCase(BaseTestCase):
+
+ def setUp(self):
+ # setup plumbing for a working resource tracker with required
+ # database models and a compatible compute driver:
+ super(BaseTrackerTestCase, self).setUp()
+
+ self.updated = False
+ self.deleted = False
+ self.update_call_count = 0
+
+ self.tracker = self._tracker()
+ self._migrations = {}
+
+ self.stubs.Set(db, 'service_get_by_compute_host',
+ self._fake_service_get_by_compute_host)
+ self.stubs.Set(db, 'compute_node_update',
+ self._fake_compute_node_update)
+ self.stubs.Set(db, 'compute_node_delete',
+ self._fake_compute_node_delete)
+ self.stubs.Set(db, 'migration_update',
+ self._fake_migration_update)
+ self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
+ self._fake_migration_get_in_progress_by_host_and_node)
+
+ # Note that this must be called before the call to _init_tracker()
+ patcher = pci_fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+
+ self._init_tracker()
+ self.limits = self._limits()
+
+ def _fake_service_get_by_compute_host(self, ctx, host):
+ self.compute = self._create_compute_node()
+ self.service = self._create_service(host, compute=self.compute)
+ return self.service
+
+ def _fake_compute_node_update(self, ctx, compute_node_id, values,
+ prune_stats=False):
+ self.update_call_count += 1
+ self.updated = True
+ self.compute.update(values)
+ return self.compute
+
+ def _fake_compute_node_delete(self, ctx, compute_node_id):
+ self.deleted = True
+ self.compute.update({'deleted': 1})
+ return self.compute
+
+ def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
+ node):
+ status = ['confirmed', 'reverted', 'error']
+ migrations = []
+
+ for migration in self._migrations.values():
+ migration = obj_base.obj_to_primitive(migration)
+ if migration['status'] in status:
+ continue
+
+ uuid = migration['instance_uuid']
+ migration['instance'] = self._instances[uuid]
+ migrations.append(migration)
+
+ return migrations
+
+ def _fake_migration_update(self, ctxt, migration_id, values):
+ # cheat and assume there's only 1 migration present
+ migration = self._migrations.values()[0]
+ migration.update(values)
+ return migration
+
+ def _init_tracker(self):
+ self.tracker.update_available_resource(self.context)
+
+ def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ disk_gb=FAKE_VIRT_LOCAL_GB,
+ vcpus=FAKE_VIRT_VCPUS,
+ numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
+ """Create limits dictionary used for oversubscribing resources."""
+
+ return {
+ 'memory_mb': memory_mb,
+ 'disk_gb': disk_gb,
+ 'vcpu': vcpus,
+ 'numa_topology': numa_topology.to_json() if numa_topology else None
+ }
+
+ def assertEqualNUMAHostTopology(self, expected, got):
+ attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
+ if None in (expected, got):
+ if expected != got:
+ raise AssertionError("Topologies don't match. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+ else:
+ return
+
+ if len(expected) != len(got):
+ raise AssertionError("Topologies don't match due to different "
+ "number of cells. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ for attr in attrs:
+ if getattr(exp_cell, attr) != getattr(got_cell, attr):
+ raise AssertionError("Topologies don't match. Expected: "
+ "%(expected)s, but got: %(got)s" %
+ {'expected': expected, 'got': got})
+
+ def _assert(self, value, field, tracker=None):
+
+ if tracker is None:
+ tracker = self.tracker
+
+ if field not in tracker.compute_node:
+ raise test.TestingException(
+ "'%(field)s' not in compute node." % {'field': field})
+ x = tracker.compute_node[field]
+
+ if field == 'numa_topology':
+ self.assertEqualNUMAHostTopology(
+ value, hardware.VirtNUMAHostTopology.from_json(x))
+ else:
+ self.assertEqual(value, x)
+
+
+class TrackerTestCase(BaseTrackerTestCase):
+
+ def test_free_ram_resource_value(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.memory_mb - driver.memory_mb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
+
+ def test_free_disk_resource_value(self):
+ driver = FakeVirtDriver()
+ mem_free = driver.local_gb - driver.local_gb_used
+ self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
+
+ def test_update_compute_node(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertTrue(self.updated)
+
+ def test_init(self):
+ driver = self._driver()
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ self.assertEqual(driver.pci_stats,
+ jsonutils.loads(self.tracker.compute_node['pci_stats']))
+
+
+class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(SchedulerClientTrackerTestCase, self).setUp()
+ self.tracker.scheduler_client.update_resource_stats = mock.Mock()
+
+ def test_create_resource(self):
+ self.tracker._write_ext_resources = mock.Mock()
+ self.tracker.conductor_api.compute_node_create = mock.Mock(
+ return_value=dict(id=1))
+ values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
+ self.tracker._create(self.context, values)
+
+ expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
+ 'id': 1}
+ self.tracker.scheduler_client.update_resource_stats.\
+ assert_called_once_with(self.context,
+ ("fakehost", "fakenode"),
+ expected)
+
+ def test_update_resource(self):
+ self.tracker._write_ext_resources = mock.Mock()
+ values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
+ self.tracker._update(self.context, values)
+
+ expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
+ 'id': 1}
+ self.tracker.scheduler_client.update_resource_stats.\
+ assert_called_once_with(self.context,
+ ("fakehost", "fakenode"),
+ expected)
+
+
+class TrackerPciStatsTestCase(BaseTrackerTestCase):
+
+ def test_update_compute_node(self):
+ self.assertFalse(self.tracker.disabled)
+ self.assertTrue(self.updated)
+
+ def test_init(self):
+ driver = self._driver()
+ self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self._assert(0, 'running_vms')
+ self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
+ self.assertFalse(self.tracker.disabled)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+ self.assertEqual(driver.pci_stats,
+ jsonutils.loads(self.tracker.compute_node['pci_stats']))
+
+ def _driver(self):
+ return FakeVirtDriver(pci_support=True)
+
+
+class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(TrackerExtraResourcesTestCase, self).setUp()
+ self.driver = self._driver()
+
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def test_set_empty_ext_resources(self):
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.assertNotIn('stats', resources)
+ self.tracker._write_ext_resources(resources)
+ self.assertIn('stats', resources)
+
+ def test_set_extra_resources(self):
+ def fake_write_resources(resources):
+ resources['stats']['resA'] = '123'
+ resources['stats']['resB'] = 12
+
+ self.stubs.Set(self.tracker.ext_resources_handler,
+ 'write_resources',
+ fake_write_resources)
+
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.tracker._write_ext_resources(resources)
+
+ expected = {"resA": "123", "resB": 12}
+ self.assertEqual(sorted(expected),
+ sorted(resources['stats']))
+
+
+class InstanceClaimTestCase(BaseTrackerTestCase):
+ def _instance_topology(self, mem):
+ mem = mem * 1024
+ return hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), mem),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), mem)])
+
+ def _claim_topology(self, mem, cpus=1):
+ if self.tracker.driver.numa_topology is None:
+ return None
+ mem = mem * 1024
+ return hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 0, set([1, 2]), 3072, cpu_usage=cpus,
+ memory_usage=mem),
+ hardware.VirtNUMATopologyCellUsage(
+ 1, set([3, 4]), 3072, cpu_usage=cpus,
+ memory_usage=mem)])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_update_usage_only_for_tracked(self, mock_get):
+ flavor = self._fake_flavor_create()
+ claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
+ claim_topology = self._claim_topology(claim_mem / 2)
+
+ instance_topology = self._instance_topology(claim_mem / 2)
+
+ instance = self._fake_instance(
+ flavor=flavor, task_state=None,
+ numa_topology=instance_topology)
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'current_workload')
+ self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
+
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertNotEqual(0, claim.memory_mb)
+ self._assert(claim_mem, 'memory_mb_used')
+ self._assert(claim_gb, 'local_gb_used')
+ self._assert(claim_topology, 'numa_topology')
+
+ # now update should actually take effect
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+
+ self._assert(claim_mem, 'memory_mb_used')
+ self._assert(claim_gb, 'local_gb_used')
+ self._assert(claim_topology, 'numa_topology')
+ self._assert(1, 'current_workload')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_audit(self, mock_get):
+ claim_mem = 3
+ claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_disk = 2
+ claim_topology = self._claim_topology(claim_mem_total / 2)
+
+ instance_topology = self._instance_topology(claim_mem_total / 2)
+ instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
+ ephemeral_gb=0, numa_topology=instance_topology)
+
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
+ self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute["free_disk_gb"])
+
+ # 1st pretend that the compute operation finished and claimed the
+ # desired resources from the virt layer
+ driver = self.tracker.driver
+ driver.memory_mb_used = claim_mem
+ driver.local_gb_used = claim_disk
+
+ self.tracker.update_available_resource(self.context)
+
+ # confirm tracker is adding in host_ip
+ self.assertIsNotNone(self.compute.get('host_ip'))
+
+ # confirm that resource usage is derived from instance usages,
+ # not virt layer:
+ self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute['free_ram_mb'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(claim_disk, self.compute['local_gb_used'])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute['free_disk_gb'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_abort(self, mock_get):
+ claim_mem = 3
+ claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
+ claim_disk = 2
+ claim_topology = self._claim_topology(claim_mem_total / 2)
+
+ instance_topology = self._instance_topology(claim_mem_total / 2)
+ instance = self._fake_instance(memory_mb=claim_mem,
+ root_gb=claim_disk, ephemeral_gb=0,
+ numa_topology=instance_topology)
+
+ claim = self.tracker.instance_claim(self.context, instance,
+ self.limits)
+ self.assertIsNotNone(claim)
+
+ self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
+ self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ claim_topology, hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(claim_disk, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
+ self.compute["free_disk_gb"])
+
+ claim.abort()
+
+ self.assertEqual(0, self.compute["memory_mb_used"])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
+ self.assertEqualNUMAHostTopology(
+ FAKE_VIRT_NUMA_TOPOLOGY,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ self.assertEqual(0, self.compute["local_gb_used"])
+ self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_instance_claim_with_oversubscription(self, mock_get):
+ memory_mb = FAKE_VIRT_MEMORY_MB * 2
+ root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
+ vcpus = FAKE_VIRT_VCPUS * 2
+ claim_topology = self._claim_topology(memory_mb)
+ instance_topology = self._instance_topology(memory_mb)
+
+ limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
+ 'disk_gb': root_gb * 2,
+ 'vcpu': vcpus,
+ 'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
+
+ instance = self._fake_instance(memory_mb=memory_mb,
+ root_gb=root_gb, ephemeral_gb=ephemeral_gb,
+ numa_topology=instance_topology)
+
+ self.tracker.instance_claim(self.context, instance, limits)
+ self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(root_gb * 2,
+ self.tracker.compute_node['local_gb_used'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_additive_claims(self, mock_get):
+ self.limits['vcpu'] = 2
+ claim_topology = self._claim_topology(2, cpus=2)
+
+ flavor = self._fake_flavor_create(
+ memory_mb=1, root_gb=1, ephemeral_gb=0)
+ instance_topology = self._instance_topology(1)
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance, self.limits):
+ pass
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance, self.limits):
+ pass
+
+ self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(2 * flavor['vcpus'],
+ self.tracker.compute_node['vcpus_used'])
+
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_context_claim_with_exception(self, mock_get):
+ instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
+ try:
+ with self.tracker.instance_claim(self.context, instance):
+ # <insert exciting things that utilize resources>
+ raise test.TestingException()
+ except test.TestingException:
+ pass
+
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(0, self.compute['memory_mb_used'])
+ self.assertEqual(0, self.compute['local_gb_used'])
+ self.assertEqualNUMAHostTopology(
+ FAKE_VIRT_NUMA_TOPOLOGY,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_instance_context_claim(self, mock_get):
+ flavor = self._fake_flavor_create(
+ memory_mb=1, root_gb=2, ephemeral_gb=3)
+ claim_topology = self._claim_topology(1)
+
+ instance_topology = self._instance_topology(1)
+ instance = self._fake_instance(
+ flavor=flavor, numa_topology=instance_topology)
+ with self.tracker.instance_claim(self.context, instance):
+ # <insert exciting things that utilize resources>
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.compute['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.compute['local_gb_used'])
+
+ # after exiting claim context, build is marked as finished. usage
+ # totals should be same:
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.tracker.compute_node['local_gb_used'])
+ self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.compute['memory_mb_used'])
+ self.assertEqualNUMAHostTopology(
+ claim_topology,
+ hardware.VirtNUMAHostTopology.from_json(
+ self.compute['numa_topology']))
+ self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
+ self.compute['local_gb_used'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_update_load_stats_for_instance(self, mock_get):
+ instance = self._fake_instance(task_state=task_states.SCHEDULING)
+ with self.tracker.instance_claim(self.context, instance):
+ pass
+
+ self.assertEqual(1, self.tracker.compute_node['current_workload'])
+
+ instance['vm_state'] = vm_states.ACTIVE
+ instance['task_state'] = None
+ instance['host'] = 'fakehost'
+
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(0, self.tracker.compute_node['current_workload'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_cpu_stats(self, mock_get):
+ limits = {'disk_gb': 100, 'memory_mb': 100}
+ self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
+
+ vcpus = 1
+ instance = self._fake_instance(vcpus=vcpus)
+
+ # should not do anything until a claim is made:
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
+
+ with self.tracker.instance_claim(self.context, instance, limits):
+ pass
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ # instance state can change without modifying vcpus in use:
+ instance['task_state'] = task_states.SCHEDULING
+ self.tracker.update_usage(self.context, instance)
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ add_vcpus = 10
+ vcpus += add_vcpus
+ instance = self._fake_instance(vcpus=add_vcpus)
+ with self.tracker.instance_claim(self.context, instance, limits):
+ pass
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ instance['vm_state'] = vm_states.DELETED
+ self.tracker.update_usage(self.context, instance)
+ vcpus -= add_vcpus
+ self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
+
+ def test_skip_deleted_instances(self):
+ # ensure that the audit process skips instances that have vm_state
+ # DELETED, but the DB record is not yet deleted.
+ self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+
+
+class ResizeClaimTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(ResizeClaimTestCase, self).setUp()
+
+ def _fake_migration_create(mig_self, ctxt):
+ self._migrations[mig_self.instance_uuid] = mig_self
+ mig_self.obj_reset_changes()
+
+ self.stubs.Set(objects.Migration, 'create',
+ _fake_migration_create)
+
+ self.instance = self._fake_instance()
+ self.instance_type = self._fake_flavor_create()
+
+ def _fake_migration_create(self, context, values=None):
+ instance_uuid = str(uuid.uuid1())
+ mig_dict = test_migration.fake_db_migration()
+ mig_dict.update({
+ 'id': 1,
+ 'source_compute': 'host1',
+ 'source_node': 'fakenode',
+ 'dest_compute': 'host2',
+ 'dest_node': 'fakenode',
+ 'dest_host': '127.0.0.1',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'instance_uuid': instance_uuid,
+ 'status': 'pre-migrating',
+ 'updated_at': timeutils.utcnow()
+ })
+ if values:
+ mig_dict.update(values)
+
+ migration = objects.Migration()
+ migration.update(mig_dict)
+ # This hits the stub in setUp()
+ migration.create('fake')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_abort(self, mock_get):
+ try:
+ with self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits):
+ raise test.TestingException("abort")
+ except test.TestingException:
+ pass
+
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_additive_claims(self, mock_get):
+
+ limits = self._limits(
+ 2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 2 * FAKE_VIRT_LOCAL_GB,
+ 2 * FAKE_VIRT_VCPUS)
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, limits)
+ instance2 = self._fake_instance()
+ self.tracker.resize_claim(self.context, instance2, self.instance_type,
+ limits)
+
+ self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_claim_and_audit(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, self.limits)
+
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_same_host(self, mock_get):
+ self.limits['vcpu'] = 3
+
+ src_dict = {
+ 'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
+ dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
+ src_type = self._fake_flavor_create(
+ id=10, name="srcflavor", **src_dict)
+ dest_type = self._fake_flavor_create(
+ id=11, name="destflavor", **dest_dict)
+
+ # make an instance of src_type:
+ instance = self._fake_instance(flavor=src_type)
+ instance['system_metadata'] = self._fake_instance_system_metadata(
+ dest_type)
+ self.tracker.instance_claim(self.context, instance, self.limits)
+
+ # resize to dest_type:
+ claim = self.tracker.resize_claim(self.context, instance,
+ dest_type, self.limits)
+
+ self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ + 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
+ self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ + dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
+ 'local_gb_used')
+ self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
+
+ self.tracker.update_available_resource(self.context)
+ claim.abort()
+
+ # only the original instance should remain, not the migration:
+ self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
+ 'memory_mb_used')
+ self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
+ 'local_gb_used')
+ self._assert(src_dict['vcpus'], 'vcpus_used')
+ self.assertEqual(1, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_revert(self, mock_get):
+ self.tracker.resize_claim(self.context, self.instance,
+ self.instance_type, {}, self.limits)
+ self.tracker.drop_resize_claim(self.context, self.instance)
+
+ self.assertEqual(0, len(self.tracker.tracked_instances))
+ self.assertEqual(0, len(self.tracker.tracked_migrations))
+ self._assert(0, 'memory_mb_used')
+ self._assert(0, 'local_gb_used')
+ self._assert(0, 'vcpus_used')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_revert_reserve_source(self, mock_get):
+ # if a revert has started at the API and audit runs on
+ # the source compute before the instance flips back to source,
+ # resources should still be held at the source based on the
+ # migration:
+ dest = "desthost"
+ dest_tracker = self._tracker(host=dest)
+ dest_tracker.update_available_resource(self.context)
+
+ self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
+ root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
+ vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
+
+ values = {'source_compute': self.host, 'dest_compute': dest,
+ 'old_instance_type_id': 1, 'new_instance_type_id': 1,
+ 'status': 'post-migrating',
+ 'instance_uuid': self.instance['uuid']}
+ self._fake_migration_create(self.context, values)
+
+ # attach an instance to the destination host tracker:
+ dest_tracker.instance_claim(self.context, self.instance)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 'memory_mb_used', tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # audit and recheck to confirm migration doesn't get double counted
+ # on dest:
+ dest_tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ 'memory_mb_used', tracker=dest_tracker)
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
+ tracker=dest_tracker)
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
+ tracker=dest_tracker)
+
+ # apply the migration to the source host tracker:
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ # flag the instance and migration as reverting and re-audit:
+ self.instance['vm_state'] = vm_states.RESIZED
+ self.instance['task_state'] = task_states.RESIZE_REVERTING
+ self.tracker.update_available_resource(self.context)
+
+ self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
+ self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
+ self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
+
+ def test_resize_filter(self):
+ instance = self._fake_instance(vm_state=vm_states.ACTIVE,
+ task_state=task_states.SUSPENDING)
+ self.assertFalse(self.tracker._instance_in_resize_state(instance))
+
+ instance = self._fake_instance(vm_state=vm_states.RESIZED,
+ task_state=task_states.SUSPENDING)
+ self.assertTrue(self.tracker._instance_in_resize_state(instance))
+
+ states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
+ for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
+ for task_state in states:
+ instance = self._fake_instance(vm_state=vm_state,
+ task_state=task_state)
+ result = self.tracker._instance_in_resize_state(instance)
+ self.assertTrue(result)
+
+ def test_dupe_filter(self):
+ instance = self._fake_instance(host=self.host)
+
+ values = {'source_compute': self.host, 'dest_compute': self.host,
+ 'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
+ self._fake_flavor_create(id=2)
+ self._fake_migration_create(self.context, values)
+ self._fake_migration_create(self.context, values)
+
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, len(self.tracker.tracked_migrations))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
+ return_value=objects.InstancePCIRequests(requests=[]))
+ def test_set_instance_host_and_node(self, mock_get):
+ instance = self._fake_instance()
+ self.assertIsNone(instance['host'])
+ self.assertIsNone(instance['launched_on'])
+ self.assertIsNone(instance['node'])
+
+ claim = self.tracker.instance_claim(self.context, instance)
+ self.assertNotEqual(0, claim.memory_mb)
+
+ self.assertEqual('fakehost', instance['host'])
+ self.assertEqual('fakehost', instance['launched_on'])
+ self.assertEqual('fakenode', instance['node'])
+
+
+class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
+ """Make sure we handle the case where the following are true:
+
+ #) Compute node C gets upgraded to code that looks for instance types in
+ system metadata. AND
+ #) C already has instances in the process of migrating that do not have
+ stashed instance types.
+
+ bug 1164110
+ """
+ def setUp(self):
+ super(NoInstanceTypesInSysMetadata, self).setUp()
+ self.instance = self._fake_instance(stash=False)
+
+ def test_get_instance_type_stash_false(self):
+ with (mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=self.instance_type)):
+ flavor = self.tracker._get_instance_type(self.context,
+ self.instance, "new_")
+ self.assertEqual(self.instance_type, flavor)
+
+
+class OrphanTestCase(BaseTrackerTestCase):
+ def _driver(self):
+ class OrphanVirtDriver(FakeVirtDriver):
+ def get_per_instance_usage(self):
+ return {
+ '1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'uuid': '1-2-3-4-5'},
+ '2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
+ 'uuid': '2-3-4-5-6'},
+ }
+
+ return OrphanVirtDriver()
+
+ def test_usage(self):
+ self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+
+ def test_find(self):
+ # create one legit instance and verify the 2 orphans remain
+ self._fake_instance()
+ orphans = self.tracker._find_orphaned_instances()
+
+ self.assertEqual(2, len(orphans))
+
+
+class ComputeMonitorTestCase(BaseTestCase):
+ def setUp(self):
+ super(ComputeMonitorTestCase, self).setUp()
+ fake_monitors = [
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass1',
+ 'nova.tests.unit.compute.monitors.test_monitors.FakeMonitorClass2']
+ self.flags(compute_available_monitors=fake_monitors)
+ self.tracker = self._tracker()
+ self.node_name = 'nodename'
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.info = {}
+ self.context = context.RequestContext(self.user_id,
+ self.project_id)
+
+ def test_get_host_metrics_none(self):
+ self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
+ self.tracker.monitors = []
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ self.assertEqual(len(metrics), 0)
+
+ def test_get_host_metrics_one_failed(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
+ class1 = test_monitors.FakeMonitorClass1(self.tracker)
+ class4 = test_monitors.FakeMonitorClass4(self.tracker)
+ self.tracker.monitors = [class1, class4]
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ self.assertTrue(len(metrics) > 0)
+
+ def test_get_host_metrics(self):
+ self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
+ class1 = test_monitors.FakeMonitorClass1(self.tracker)
+ class2 = test_monitors.FakeMonitorClass2(self.tracker)
+ self.tracker.monitors = [class1, class2]
+
+ mock_notifier = mock.Mock()
+
+ with mock.patch.object(rpc, 'get_notifier',
+ return_value=mock_notifier) as mock_get:
+ metrics = self.tracker._get_host_metrics(self.context,
+ self.node_name)
+ mock_get.assert_called_once_with(service='compute',
+ host=self.node_name)
+
+ expected_metrics = [{
+ 'timestamp': 1232,
+ 'name': 'key1',
+ 'value': 2600,
+ 'source': 'libvirt'
+ }, {
+ 'name': 'key2',
+ 'source': 'libvirt',
+ 'timestamp': 123,
+ 'value': 1600
+ }]
+
+ payload = {
+ 'metrics': expected_metrics,
+ 'host': self.tracker.host,
+ 'host_ip': CONF.my_ip,
+ 'nodename': self.node_name
+ }
+
+ mock_notifier.info.assert_called_once_with(
+ self.context, 'compute.metrics.update', payload)
+
+ self.assertEqual(metrics, expected_metrics)
+
+
+class TrackerPeriodicTestCase(BaseTrackerTestCase):
+
+ def test_periodic_status_update(self):
+ # verify update called on instantiation
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update not called if no change to resources
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update is called when resources change
+ driver = self.tracker.driver
+ driver.memory_mb += 1
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(2, self.update_call_count)
+
+ def test_update_available_resource_calls_locked_inner(self):
+ @mock.patch.object(self.tracker, 'driver')
+ @mock.patch.object(self.tracker,
+ '_update_available_resource')
+ @mock.patch.object(self.tracker, '_verify_resources')
+ @mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
+ def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
+ resources = {'there is someone in my head': 'but it\'s not me'}
+ mock_driver.get_available_resource.return_value = resources
+ self.tracker.update_available_resource(self.context)
+ mock_uar.assert_called_once_with(self.context, resources)
+
+ _test()
+
+
+class StatsDictTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a dictionary.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a json string.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ # and add rt stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsInvalidJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats='this is not json')
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for string that does not parse as json
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
+
+
+class StatsInvalidTypeTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=10)
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for incorrect stats value type
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
diff --git a/nova/tests/unit/compute/test_resources.py b/nova/tests/unit/compute/test_resources.py
new file mode 100644
index 0000000000..cdd1585e34
--- /dev/null
+++ b/nova/tests/unit/compute/test_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the compute extra resources framework."""
+
+
+from oslo.config import cfg
+from stevedore import extension
+from stevedore import named
+
+from nova.compute import resources
+from nova.compute.resources import base
+from nova.compute.resources import vcpu
+from nova import context
+from nova.i18n import _
+from nova.objects import flavor as flavor_obj
+from nova import test
+from nova.tests.unit.fake_instance import fake_instance_obj
+
+CONF = cfg.CONF
+
+
+class FakeResourceHandler(resources.ResourceHandler):
+ def __init__(self, extensions):
+ self._mgr = \
+ named.NamedExtensionManager.make_test_instance(extensions)
+
+
+class FakeResource(base.Resource):
+
+ def __init__(self):
+ self.total_res = 0
+ self.used_res = 0
+
+ def _get_requested(self, usage):
+ if 'extra_specs' not in usage:
+ return
+ if self.resource_name not in usage['extra_specs']:
+ return
+ req = usage['extra_specs'][self.resource_name]
+ return int(req)
+
+ def _get_limit(self, limits):
+ if self.resource_name not in limits:
+ return
+ limit = limits[self.resource_name]
+ return int(limit)
+
+ def reset(self, resources, driver):
+ self.total_res = 0
+ self.used_res = 0
+
+ def test(self, usage, limits):
+ requested = self._get_requested(usage)
+ if not requested:
+ return
+
+ limit = self._get_limit(limits)
+ if not limit:
+ return
+
+ free = limit - self.used_res
+ if requested <= free:
+ return
+ else:
+ return (_('Free %(free)d < requested %(requested)d ') %
+ {'free': free, 'requested': requested})
+
+ def add_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res += requested
+
+ def remove_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res -= requested
+
+ def write(self, resources):
+ pass
+
+ def report_free(self):
+ return "Free %s" % (self.total_res - self.used_res)
+
+
+class ResourceA(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceA uses a configuration option
+ self.total_res = int(CONF.resA)
+ self.used_res = 0
+ self.resource_name = 'resource:resA'
+
+ def write(self, resources):
+ resources['resA'] = self.total_res
+ resources['used_resA'] = self.used_res
+
+
+class ResourceB(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceB uses resource details passed in parameter resources
+ self.total_res = resources['resB']
+ self.used_res = 0
+ self.resource_name = 'resource:resB'
+
+ def write(self, resources):
+ resources['resB'] = self.total_res
+ resources['used_resB'] = self.used_res
+
+
+def fake_flavor_obj(**updates):
+ flavor = flavor_obj.Flavor()
+ flavor.id = 1
+ flavor.name = 'fakeflavor'
+ flavor.memory_mb = 8000
+ flavor.vcpus = 3
+ flavor.root_gb = 11
+ flavor.ephemeral_gb = 4
+ flavor.swap = 0
+ flavor.rxtx_factor = 1.0
+ flavor.vcpu_weight = 1
+ if updates:
+ flavor.update(updates)
+ return flavor
+
+
+class BaseTestCase(test.TestCase):
+
+ def _initialize_used_res_counter(self):
+ # Initialize the value for the used resource
+ for ext in self.r_handler._mgr.extensions:
+ ext.obj.used_res = 0
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ # initialize flavors and stub get_by_id to
+ # get flavors from here
+ self._flavors = {}
+ self.ctxt = context.get_admin_context()
+
+ # Create a flavor without extra_specs defined
+ _flavor_id = 1
+ _flavor = fake_flavor_obj(id=_flavor_id)
+ self._flavors[_flavor_id] = _flavor
+
+ # Create a flavor with extra_specs defined
+ _flavor_id = 2
+ requested_resA = 5
+ requested_resB = 7
+ requested_resC = 7
+ _extra_specs = {'resource:resA': requested_resA,
+ 'resource:resB': requested_resB,
+ 'resource:resC': requested_resC}
+ _flavor = fake_flavor_obj(id=_flavor_id,
+ extra_specs=_extra_specs)
+ self._flavors[_flavor_id] = _flavor
+
+ # create fake resource extensions and resource handler
+ _extensions = [
+ extension.Extension('resA', None, ResourceA, ResourceA()),
+ extension.Extension('resB', None, ResourceB, ResourceB()),
+ ]
+ self.r_handler = FakeResourceHandler(_extensions)
+
+ # Resources details can be passed to each plugin or can be specified as
+ # configuration options
+ driver_resources = {'resB': 5}
+ CONF.resA = '10'
+
+ # initialise the resources
+ self.r_handler.reset_resources(driver_resources, None)
+
+ def test_update_from_instance_with_extra_specs(self):
+ # Flavor with extra_specs
+ _flavor_id = 2
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+
+ expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
+ expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
+ self.assertEqual(int(expected_resA),
+ self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(int(expected_resB),
+ self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_update_from_instance_without_extra_specs(self):
+ # Flavor id without extra spec
+ _flavor_id = 1
+ self._initialize_used_res_counter()
+ self.r_handler.resource_list = []
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+ self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_write_resources(self):
+ self._initialize_used_res_counter()
+ extra_resources = {}
+ expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
+ self.r_handler.write_resources(extra_resources)
+ self.assertEqual(expected, extra_resources)
+
+ def test_test_resources_without_extra_specs(self):
+ limits = {}
+ # Flavor id without extra_specs
+ flavor = self._flavors[1]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_test_resources_with_limits_for_different_resource(self):
+ limits = {'resource:resC': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_passing_test_resources(self):
+ limits = {'resource:resA': 10, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_failing_test_resources_for_single_resource(self):
+ limits = {'resource:resA': 4, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ expected = ['Free 4 < requested 5 ', None]
+ self.assertEqual(sorted(expected),
+ sorted(result))
+
+ def test_empty_resource_handler(self):
+ """An empty resource handler has no resource extensions,
+ should have no effect, and should raise no exceptions.
+ """
+ empty_r_handler = FakeResourceHandler([])
+
+ resources = {}
+ empty_r_handler.reset_resources(resources, None)
+
+ flavor = self._flavors[1]
+ sign = 1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ limits = {}
+ test_result = empty_r_handler.test_resources(flavor, limits)
+ self.assertEqual([], test_result)
+
+ sign = -1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ extra_resources = {}
+ expected_extra_resources = extra_resources
+ empty_r_handler.write_resources(extra_resources)
+ self.assertEqual(expected_extra_resources, extra_resources)
+
+ empty_r_handler.report_free_resources()
+
+ def test_vcpu_resource_load(self):
+ # load the vcpu example
+ names = ['vcpu']
+ real_r_handler = resources.ResourceHandler(names)
+ ext_names = real_r_handler._mgr.names()
+ self.assertEqual(names, ext_names)
+
+ # check the extension loaded is the one we expect
+ # and an instance of the object has been created
+ ext = real_r_handler._mgr['vcpu']
+ self.assertIsInstance(ext.obj, vcpu.VCPU)
+
+
+class TestVCPU(test.TestCase):
+
+ def setUp(self):
+ super(TestVCPU, self).setUp()
+ self._vcpu = vcpu.VCPU()
+ self._vcpu._total = 10
+ self._vcpu._used = 0
+ self._flavor = fake_flavor_obj(vcpus=5)
+ self._big_flavor = fake_flavor_obj(vcpus=20)
+ self._instance = fake_instance_obj(None)
+
+ def test_reset(self):
+ # set vcpu values to something different to test reset
+ self._vcpu._total = 10
+ self._vcpu._used = 5
+
+ driver_resources = {'vcpus': 20}
+ self._vcpu.reset(driver_resources, None)
+ self.assertEqual(20, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_add_and_remove_instance(self):
+ self._vcpu.add_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(5, self._vcpu._used)
+
+ self._vcpu.remove_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_test_pass_limited(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 10})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_pass_unlimited(self):
+ result = self._vcpu.test(self._big_flavor, {})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_fail(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 2})
+ expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs')
+ self.assertEqual(expected, result)
+
+ def test_write(self):
+ resources = {'stats': {}}
+ self._vcpu.write(resources)
+ expected = {
+ 'vcpus': 10,
+ 'vcpus_used': 0,
+ 'stats': {
+ 'num_vcpus': 10,
+ 'num_vcpus_used': 0
+ }
+ }
+ self.assertEqual(sorted(expected),
+ sorted(resources))
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
new file mode 100644
index 0000000000..bf8e41215e
--- /dev/null
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -0,0 +1,486 @@
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.compute.rpcapi
+"""
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova import context
+from nova.objects import block_device as objects_block_dev
+from nova.objects import network_request as objects_network_request
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit.fake_instance import fake_instance_obj
+
+CONF = cfg.CONF
+
+
+class ComputeRpcAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(ComputeRpcAPITestCase, self).setUp()
+ self.context = context.get_admin_context()
+ instance_attr = {'host': 'fake_host',
+ 'instance_type_id': 1}
+ self.fake_instance_obj = fake_instance_obj(self.context,
+ **instance_attr)
+ self.fake_instance = jsonutils.to_primitive(self.fake_instance_obj)
+ self.fake_volume_bdm = jsonutils.to_primitive(
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'source_type': 'volume', 'destination_type': 'volume',
+ 'instance_uuid': self.fake_instance['uuid'],
+ 'volume_id': 'fake-volume-id'}))
+
+ def test_serialized_instance_has_name(self):
+ self.assertIn('name', self.fake_instance)
+
+ def _test_compute_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = kwargs.pop('rpcapi_class', compute_rpcapi.ComputeAPI)()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.compute_topic)
+
+ orig_prepare = rpcapi.client.prepare
+ expected_version = kwargs.pop('version', rpcapi.client.target.version)
+
+ expected_kwargs = kwargs.copy()
+ if ('requested_networks' in expected_kwargs and
+ expected_version == '3.23'):
+ expected_kwargs['requested_networks'] = []
+ for requested_network in kwargs['requested_networks']:
+ expected_kwargs['requested_networks'].append(
+ (requested_network.network_id,
+ str(requested_network.address),
+ requested_network.port_id))
+ if 'host_param' in expected_kwargs:
+ expected_kwargs['host'] = expected_kwargs.pop('host_param')
+ else:
+ expected_kwargs.pop('host', None)
+ expected_kwargs.pop('destination', None)
+
+ cast_and_call = ['confirm_resize', 'stop_instance']
+ if rpc_method == 'call' and method in cast_and_call:
+ if method == 'confirm_resize':
+ kwargs['cast'] = False
+ else:
+ kwargs['do_cast'] = False
+ if 'host' in kwargs:
+ host = kwargs['host']
+ elif 'destination' in kwargs:
+ host = kwargs['destination']
+ elif 'instances' in kwargs:
+ host = kwargs['instances'][0]['host']
+ else:
+ host = kwargs['instance']['host']
+
+ with contextlib.nested(
+ mock.patch.object(rpcapi.client, rpc_method),
+ mock.patch.object(rpcapi.client, 'prepare'),
+ mock.patch.object(rpcapi.client, 'can_send_version'),
+ ) as (
+ rpc_mock, prepare_mock, csv_mock
+ ):
+ prepare_mock.return_value = rpcapi.client
+ if 'return_bdm_object' in kwargs:
+ del kwargs['return_bdm_object']
+ rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
+ elif rpc_method == 'call':
+ rpc_mock.return_value = 'foo'
+ else:
+ rpc_mock.return_value = None
+ csv_mock.side_effect = (
+ lambda v: orig_prepare(version=v).can_send_version())
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, rpc_mock.return_value)
+
+ prepare_mock.assert_called_once_with(version=expected_version,
+ server=host)
+ rpc_mock.assert_called_once_with(ctxt, method, **expected_kwargs)
+
+ def test_add_aggregate_host(self):
+ self._test_compute_api('add_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={})
+
+ def test_add_fixed_ip_to_instance(self):
+ self._test_compute_api('add_fixed_ip_to_instance', 'cast',
+ instance=self.fake_instance_obj, network_id='id',
+ version='3.12')
+
+ def test_attach_interface(self):
+ self._test_compute_api('attach_interface', 'call',
+ instance=self.fake_instance_obj, network_id='id',
+ port_id='id2', version='3.17', requested_ip='192.168.1.50')
+
+ def test_attach_volume(self):
+ self._test_compute_api('attach_volume', 'cast',
+ instance=self.fake_instance_obj, volume_id='id',
+ mountpoint='mp', bdm=self.fake_volume_bdm, version='3.16')
+
+ def test_change_instance_metadata(self):
+ self._test_compute_api('change_instance_metadata', 'cast',
+ instance=self.fake_instance_obj, diff={}, version='3.7')
+
+ def test_check_can_live_migrate_destination(self):
+ self._test_compute_api('check_can_live_migrate_destination', 'call',
+ instance=self.fake_instance_obj,
+ destination='dest', block_migration=True,
+ disk_over_commit=True, version='3.32')
+
+ def test_check_can_live_migrate_source(self):
+ self._test_compute_api('check_can_live_migrate_source', 'call',
+ instance=self.fake_instance_obj,
+ dest_check_data={"test": "data"}, version='3.32')
+
+ def test_check_instance_shared_storage(self):
+ self._test_compute_api('check_instance_shared_storage', 'call',
+ instance=self.fake_instance_obj, data='foo',
+ version='3.29')
+
+ def test_confirm_resize_cast(self):
+ self._test_compute_api('confirm_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ host='host', reservations=list('fake_res'))
+
+ def test_confirm_resize_call(self):
+ self._test_compute_api('confirm_resize', 'call',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ host='host', reservations=list('fake_res'))
+
+ def test_detach_interface(self):
+ self._test_compute_api('detach_interface', 'cast',
+ version='3.17', instance=self.fake_instance_obj,
+ port_id='fake_id')
+
+ def test_detach_volume(self):
+ self._test_compute_api('detach_volume', 'cast',
+ instance=self.fake_instance_obj, volume_id='id',
+ version='3.25')
+
+ def test_finish_resize(self):
+ self._test_compute_api('finish_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'foo'},
+ image='image', disk_info='disk_info', host='host',
+ reservations=list('fake_res'))
+
+ def test_finish_revert_resize(self):
+ self._test_compute_api('finish_revert_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'))
+
+ def test_get_console_output(self):
+ self._test_compute_api('get_console_output', 'call',
+ instance=self.fake_instance_obj, tail_length='tl',
+ version='3.28')
+
+ def test_get_console_pool_info(self):
+ self._test_compute_api('get_console_pool_info', 'call',
+ console_type='type', host='host')
+
+ def test_get_console_topic(self):
+ self._test_compute_api('get_console_topic', 'call', host='host')
+
+ def test_get_diagnostics(self):
+ self._test_compute_api('get_diagnostics', 'call',
+ instance=self.fake_instance_obj, version='3.18')
+
+ def test_get_instance_diagnostics(self):
+ self._test_compute_api('get_instance_diagnostics', 'call',
+ instance=self.fake_instance, version='3.31')
+
+ def test_get_vnc_console(self):
+ self._test_compute_api('get_vnc_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.2')
+
+ def test_get_spice_console(self):
+ self._test_compute_api('get_spice_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.1')
+
+ def test_get_rdp_console(self):
+ self._test_compute_api('get_rdp_console', 'call',
+ instance=self.fake_instance_obj, console_type='type',
+ version='3.10')
+
+ def test_get_serial_console(self):
+ self._test_compute_api('get_serial_console', 'call',
+ instance=self.fake_instance, console_type='serial',
+ version='3.34')
+
+ def test_validate_console_port(self):
+ self._test_compute_api('validate_console_port', 'call',
+ instance=self.fake_instance_obj, port="5900",
+ console_type="novnc", version='3.3')
+
+ def test_host_maintenance_mode(self):
+ self._test_compute_api('host_maintenance_mode', 'call',
+ host_param='param', mode='mode', host='host')
+
+ def test_host_power_action(self):
+ self._test_compute_api('host_power_action', 'call', action='action',
+ host='host')
+
+ def test_inject_network_info(self):
+ self._test_compute_api('inject_network_info', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_live_migration(self):
+ self._test_compute_api('live_migration', 'cast',
+ instance=self.fake_instance_obj, dest='dest',
+ block_migration='blockity_block', host='tsoh',
+ migrate_data={}, version='3.26')
+
+ def test_post_live_migration_at_destination(self):
+ self._test_compute_api('post_live_migration_at_destination', 'cast',
+ instance=self.fake_instance_obj,
+ block_migration='block_migration', host='host', version='3.14')
+
+ def test_pause_instance(self):
+ self._test_compute_api('pause_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_soft_delete_instance(self):
+ self._test_compute_api('soft_delete_instance', 'cast',
+ instance=self.fake_instance_obj,
+ reservations=['uuid1', 'uuid2'])
+
+ def test_swap_volume(self):
+ self._test_compute_api('swap_volume', 'cast',
+ instance=self.fake_instance_obj, old_volume_id='oldid',
+ new_volume_id='newid')
+
+ def test_restore_instance(self):
+ self._test_compute_api('restore_instance', 'cast',
+ instance=self.fake_instance_obj, version='3.20')
+
+ def test_pre_live_migration(self):
+ self._test_compute_api('pre_live_migration', 'call',
+ instance=self.fake_instance_obj,
+ block_migration='block_migration', disk='disk', host='host',
+ migrate_data=None, version='3.19')
+
+ def test_prep_resize(self):
+ self._test_compute_api('prep_resize', 'cast',
+ instance=self.fake_instance_obj, instance_type='fake_type',
+ image='fake_image', host='host',
+ reservations=list('fake_res'),
+ request_spec='fake_spec',
+ filter_properties={'fakeprop': 'fakeval'},
+ node='node')
+
+ def test_reboot_instance(self):
+ self.maxDiff = None
+ self._test_compute_api('reboot_instance', 'cast',
+ instance=self.fake_instance_obj,
+ block_device_info={},
+ reboot_type='type')
+
+ def test_rebuild_instance(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance_obj, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ preserve_ephemeral=True, version='3.21')
+
+ def test_reserve_block_device_name(self):
+ self._test_compute_api('reserve_block_device_name', 'call',
+ instance=self.fake_instance_obj, device='device',
+ volume_id='id', disk_bus='ide', device_type='cdrom',
+ version='3.35', return_bdm_object=True)
+
+ def refresh_provider_fw_rules(self):
+ self._test_compute_api('refresh_provider_fw_rules', 'cast',
+ host='host')
+
+ def test_refresh_security_group_rules(self):
+ self._test_compute_api('refresh_security_group_rules', 'cast',
+ rpcapi_class=compute_rpcapi.SecurityGroupAPI,
+ security_group_id='id', host='host')
+
+ def test_refresh_security_group_members(self):
+ self._test_compute_api('refresh_security_group_members', 'cast',
+ rpcapi_class=compute_rpcapi.SecurityGroupAPI,
+ security_group_id='id', host='host')
+
+ def test_remove_aggregate_host(self):
+ self._test_compute_api('remove_aggregate_host', 'cast',
+ aggregate={'id': 'fake_id'}, host_param='host', host='host',
+ slave_info={})
+
+ def test_remove_fixed_ip_from_instance(self):
+ self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
+ instance=self.fake_instance_obj, address='addr',
+ version='3.13')
+
+ def test_remove_volume_connection(self):
+ self._test_compute_api('remove_volume_connection', 'call',
+ instance=self.fake_instance, volume_id='id', host='host',
+ version='3.30')
+
+ def test_rescue_instance(self):
+ self.flags(compute='3.9', group='upgrade_levels')
+ self._test_compute_api('rescue_instance', 'cast',
+ instance=self.fake_instance_obj, rescue_password='pw',
+ version='3.9')
+
+ def test_rescue_instance_with_rescue_image_ref_passed(self):
+ self._test_compute_api('rescue_instance', 'cast',
+ instance=self.fake_instance_obj, rescue_password='pw',
+ rescue_image_ref='fake_image_ref', version='3.24')
+
+ def test_reset_network(self):
+ self._test_compute_api('reset_network', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_resize_instance(self):
+ self._test_compute_api('resize_instance', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ image='image', instance_type={'id': 1},
+ reservations=list('fake_res'))
+
+ def test_resume_instance(self):
+ self._test_compute_api('resume_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_revert_resize(self):
+ self._test_compute_api('revert_resize', 'cast',
+ instance=self.fake_instance_obj, migration={'id': 'fake_id'},
+ host='host', reservations=list('fake_res'))
+
+ def test_rollback_live_migration_at_destination(self):
+ self._test_compute_api('rollback_live_migration_at_destination',
+ 'cast', instance=self.fake_instance_obj, host='host',
+ destroy_disks=True, migrate_data=None, version='3.32')
+
+ def test_run_instance(self):
+ self._test_compute_api('run_instance', 'cast',
+ instance=self.fake_instance_obj, host='fake_host',
+ request_spec='fake_spec', filter_properties={},
+ requested_networks='networks', injected_files='files',
+ admin_password='pw', is_first_time=True, node='node',
+ legacy_bdm_in_spec=False, version='3.27')
+
+ def test_set_admin_password(self):
+ self._test_compute_api('set_admin_password', 'call',
+ instance=self.fake_instance_obj, new_pass='pw',
+ version='3.8')
+
+ def test_set_host_enabled(self):
+ self._test_compute_api('set_host_enabled', 'call',
+ enabled='enabled', host='host')
+
+ def test_get_host_uptime(self):
+ self._test_compute_api('get_host_uptime', 'call', host='host')
+
+ def test_backup_instance(self):
+ self._test_compute_api('backup_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='id',
+ backup_type='type', rotation='rotation')
+
+ def test_snapshot_instance(self):
+ self._test_compute_api('snapshot_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='id')
+
+ def test_start_instance(self):
+ self._test_compute_api('start_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_stop_instance_cast(self):
+ self._test_compute_api('stop_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_stop_instance_call(self):
+ self._test_compute_api('stop_instance', 'call',
+ instance=self.fake_instance_obj)
+
+ def test_suspend_instance(self):
+ self._test_compute_api('suspend_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_terminate_instance(self):
+ self._test_compute_api('terminate_instance', 'cast',
+ instance=self.fake_instance_obj, bdms=[],
+ reservations=['uuid1', 'uuid2'], version='3.22')
+
+ def test_unpause_instance(self):
+ self._test_compute_api('unpause_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_unrescue_instance(self):
+ self._test_compute_api('unrescue_instance', 'cast',
+ instance=self.fake_instance_obj, version='3.11')
+
+ def test_shelve_instance(self):
+ self._test_compute_api('shelve_instance', 'cast',
+ instance=self.fake_instance_obj, image_id='image_id')
+
+ def test_shelve_offload_instance(self):
+ self._test_compute_api('shelve_offload_instance', 'cast',
+ instance=self.fake_instance_obj)
+
+ def test_unshelve_instance(self):
+ self._test_compute_api('unshelve_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ filter_properties={'fakeprop': 'fakeval'}, node='node',
+ version='3.15')
+
+ def test_volume_snapshot_create(self):
+ self._test_compute_api('volume_snapshot_create', 'cast',
+ instance=self.fake_instance, volume_id='fake_id',
+ create_info={}, version='3.6')
+
+ def test_volume_snapshot_delete(self):
+ self._test_compute_api('volume_snapshot_delete', 'cast',
+ instance=self.fake_instance_obj, volume_id='fake_id',
+ snapshot_id='fake_id2', delete_info={}, version='3.6')
+
+ def test_external_instance_event(self):
+ self._test_compute_api('external_instance_event', 'cast',
+ instances=[self.fake_instance_obj],
+ events=['event'],
+ version='3.23')
+
+ def test_build_and_run_instance(self):
+ self._test_compute_api('build_and_run_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ request_spec={'request': 'spec'}, filter_properties=[],
+ admin_password='passwd', injected_files=None,
+ requested_networks=['network1'], security_groups=None,
+ block_device_mapping=None, node='node', limits=[],
+ version='3.33')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_build_and_run_instance_icehouse_compat(self, is_neutron):
+ self.flags(compute='icehouse', group='upgrade_levels')
+ self._test_compute_api('build_and_run_instance', 'cast',
+ instance=self.fake_instance_obj, host='host', image='image',
+ request_spec={'request': 'spec'}, filter_properties=[],
+ admin_password='passwd', injected_files=None,
+ requested_networks= objects_network_request.NetworkRequestList(
+ objects=[objects_network_request.NetworkRequest(
+ network_id="fake_network_id", address="10.0.0.1",
+ port_id="fake_port_id")]),
+ security_groups=None,
+ block_device_mapping=None, node='node', limits=[],
+ version='3.23')
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
new file mode 100644
index 0000000000..3e792ae893
--- /dev/null
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -0,0 +1,414 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import iso8601
+import mock
+import mox
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova.compute import claims
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import objects
+from nova.objects import base as obj_base
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit.image import fake as fake_image
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+
+
+def _fake_resources():
+ resources = {
+ 'memory_mb': 2048,
+ 'memory_mb_used': 0,
+ 'free_ram_mb': 2048,
+ 'local_gb': 20,
+ 'local_gb_used': 0,
+ 'free_disk_gb': 20,
+ 'vcpus': 2,
+ 'vcpus_used': 0
+ }
+ return resources
+
+
+class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
+ def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
+ CONF.set_override('shelved_offload_time', shelved_offload_time)
+ instance = self._create_fake_instance_obj()
+ db_instance = obj_base.obj_to_primitive(instance)
+ image_id = 'fake_image_id'
+ host = 'fake-mini'
+ cur_time = timeutils.utcnow()
+ timeutils.set_time_override(cur_time)
+ instance.task_state = task_states.SHELVING
+ instance.save()
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = image_id
+ sys_meta['shelved_host'] = host
+ db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve.start')
+ if clean_shutdown:
+ self.compute.driver.power_off(instance,
+ CONF.shutdown_timeout,
+ self.compute.SHUTDOWN_RETRY_INTERVAL)
+ else:
+ self.compute.driver.power_off(instance, 0, 0)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
+ mox.IgnoreArg())
+
+ update_values = {'power_state': 123,
+ 'vm_state': vm_states.SHELVED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_IMAGE_UPLOADING],
+ 'system_metadata': sys_meta}
+ if CONF.shelved_offload_time == 0:
+ update_values['task_state'] = task_states.SHELVING_OFFLOADING
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ update_values, update_cells=False,
+ columns_to_join=['metadata', 'system_metadata', 'info_cache',
+ 'security_groups'],
+ ).AndReturn((db_instance,
+ db_instance))
+ self.compute._notify_about_instance_usage(self.context,
+ instance, 'shelve.end')
+ if CONF.shelved_offload_time == 0:
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.start')
+ self.compute.driver.power_off(instance)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context,
+ instance['uuid'],
+ {'power_state': 123, 'host': None, 'node': None,
+ 'vm_state': vm_states.SHELVED_OFFLOADED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_OFFLOADING]},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata',
+ 'info_cache',
+ 'security_groups'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.end')
+ self.mox.ReplayAll()
+
+ self.compute.shelve_instance(self.context, instance,
+ image_id=image_id, clean_shutdown=clean_shutdown)
+
+ def test_shelve(self):
+ self._shelve_instance(-1)
+
+ def test_shelve_forced_shutdown(self):
+ self._shelve_instance(-1, clean_shutdown=False)
+
+ def test_shelve_offload(self):
+ self._shelve_instance(0)
+
+ def test_shelve_volume_backed(self):
+ instance = self._create_fake_instance_obj()
+ instance.task_state = task_states.SHELVING
+ instance.save()
+ db_instance = obj_base.obj_to_primitive(instance)
+ host = 'fake-mini'
+ cur_time = timeutils.utcnow()
+ timeutils.set_time_override(cur_time)
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = None
+ sys_meta['shelved_host'] = host
+ db_instance['system_metadata'] = utils.dict_to_metadata(sys_meta)
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'power_off')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.start')
+ self.compute.driver.power_off(instance)
+ self.compute._get_power_state(self.context,
+ instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123, 'host': None, 'node': None,
+ 'vm_state': vm_states.SHELVED_OFFLOADED,
+ 'task_state': None,
+ 'expected_task_state': [task_states.SHELVING,
+ task_states.SHELVING_OFFLOADING]},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata',
+ 'info_cache', 'security_groups'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'shelve_offload.end')
+ self.mox.ReplayAll()
+
+ self.compute.shelve_offload_instance(self.context, instance)
+
+ def test_unshelve(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['metadata', 'system_metadata'])
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ image = {'id': 'fake_id'}
+ host = 'fake-mini'
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ cur_time = timeutils.utcnow()
+ cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(cur_time)
+ sys_meta = dict(instance.system_metadata)
+ sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
+ sys_meta['shelved_image_id'] = image['id']
+ sys_meta['shelved_host'] = host
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.rt, 'instance_claim')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+
+ self.deleted_image_id = None
+
+ def fake_delete(self2, ctxt, image_id):
+ self.deleted_image_id = image_id
+
+ def fake_claim(context, instance, limits):
+ instance.host = self.compute.host
+ return claims.Claim(context, db_instance,
+ self.rt, _fake_resources())
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.start')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'task_state': task_states.SPAWNING},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata'],
+ ).AndReturn((db_instance, db_instance))
+ self.compute._prep_block_device(self.context, instance,
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
+ db_instance['key_data'] = None
+ db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
+ self.compute.driver.spawn(self.context, instance, image,
+ injected_files=[], admin_password=None,
+ network_info=[],
+ block_device_info='fake_bdm')
+ self.compute._get_power_state(self.context, instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'image_ref': instance['image_ref'],
+ 'key_data': None,
+ 'host': self.compute.host, # rt.instance_claim set this
+ 'auto_disk_config': False,
+ 'expected_task_state': task_states.SPAWNING,
+ 'launched_at': cur_time_tz},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance,
+ dict(db_instance,
+ host=self.compute.host,
+ metadata={})))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.end')
+ self.mox.ReplayAll()
+
+ with mock.patch.object(self.rt, 'instance_claim',
+ side_effect=fake_claim):
+ self.compute.unshelve_instance(self.context, instance, image=image,
+ filter_properties=filter_properties, node=node)
+ self.assertEqual(image['id'], self.deleted_image_id)
+ self.assertEqual(instance.host, self.compute.host)
+
+ def test_unshelve_volume_backed(self):
+ db_instance = self._create_fake_instance()
+ node = test_compute.NODENAME
+ limits = {}
+ filter_properties = {'limits': limits}
+ cur_time = timeutils.utcnow()
+ cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
+ timeutils.set_time_override(cur_time)
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['metadata', 'system_metadata'])
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_prep_block_device')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.rt, 'instance_claim')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
+
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.start')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'task_state': task_states.SPAWNING},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance, db_instance))
+ self.compute._prep_block_device(self.context, instance,
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
+ db_instance['key_data'] = None
+ db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
+ self.rt.instance_claim(self.context, instance, limits).AndReturn(
+ claims.Claim(self.context, db_instance, self.rt,
+ _fake_resources()))
+ self.compute.driver.spawn(self.context, instance, None,
+ injected_files=[], admin_password=None,
+ network_info=[],
+ block_device_info='fake_bdm')
+ self.compute._get_power_state(self.context, instance).AndReturn(123)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'power_state': 123,
+ 'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'key_data': None,
+ 'auto_disk_config': False,
+ 'expected_task_state': task_states.SPAWNING,
+ 'launched_at': cur_time_tz},
+ update_cells=False,
+ columns_to_join=['metadata', 'system_metadata']
+ ).AndReturn((db_instance, db_instance))
+ self.compute._notify_about_instance_usage(self.context, instance,
+ 'unshelve.end')
+ self.mox.ReplayAll()
+
+ self.compute.unshelve_instance(self.context, instance, image=None,
+ filter_properties=filter_properties, node=node)
+
+ def test_shelved_poll_none_exist(self):
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.StubOutWithMock(timeutils, 'is_older_than')
+ self.mox.ReplayAll()
+ self.compute._poll_shelved_instances(self.context)
+
+ def test_shelved_poll_not_timedout(self):
+ instance = self._create_fake_instance_obj()
+ sys_meta = instance.system_metadata
+ shelved_time = timeutils.utcnow()
+ timeutils.set_time_override(shelved_time)
+ timeutils.advance_time_seconds(CONF.shelved_offload_time - 1)
+ sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ {'vm_state': vm_states.SHELVED, 'system_metadata': sys_meta})
+
+ self.mox.StubOutWithMock(self.compute.driver, 'destroy')
+ self.mox.ReplayAll()
+ self.compute._poll_shelved_instances(self.context)
+
+ def test_shelved_poll_timedout(self):
+ instance = self._create_fake_instance_obj()
+ sys_meta = instance.system_metadata
+ shelved_time = timeutils.utcnow()
+ timeutils.set_time_override(shelved_time)
+ timeutils.advance_time_seconds(CONF.shelved_offload_time + 1)
+ sys_meta['shelved_at'] = timeutils.strtime(at=shelved_time)
+ (old, instance) = db.instance_update_and_get_original(self.context,
+ instance['uuid'], {'vm_state': vm_states.SHELVED,
+ 'system_metadata': sys_meta})
+
+ def fake_destroy(inst, nw_info, bdm):
+ # NOTE(alaski) There are too many differences between an instance
+ # as returned by instance_update_and_get_original and
+ # instance_get_all_by_filters so just compare the uuid.
+ self.assertEqual(instance['uuid'], inst['uuid'])
+
+ self.stubs.Set(self.compute.driver, 'destroy', fake_destroy)
+ self.compute._poll_shelved_instances(self.context)
+
+
+class ShelveComputeAPITestCase(test_compute.BaseTestCase):
+ def test_shelve(self):
+ # Ensure instance can be shelved.
+ fake_instance = self._create_fake_instance_obj(
+ {'display_name': 'vm01'})
+ instance = fake_instance
+
+ self.assertIsNone(instance['task_state'])
+
+ def fake_init(self2):
+ # In original _FakeImageService.__init__(), some fake images are
+ # created. To verify the snapshot name of this test only, here
+ # sets a fake method.
+ self2.images = {}
+
+ def fake_create(self2, ctxt, metadata, data=None):
+ self.assertEqual(metadata['name'], 'vm01-shelved')
+ metadata['id'] = '8b24ed3f-ee57-43bc-bc2e-fb2e9482bc42'
+ return metadata
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, '__init__', fake_init)
+ self.stubs.Set(fake_image._FakeImageService, 'create', fake_create)
+
+ self.compute_api.shelve(self.context, instance)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.SHELVING)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_unshelve(self):
+ # Ensure instance can be unshelved.
+ instance = self._create_fake_instance_obj()
+
+ self.assertIsNone(instance['task_state'])
+
+ self.compute_api.shelve(self.context, instance)
+
+ instance.refresh()
+ instance.task_state = None
+ instance.vm_state = vm_states.SHELVED
+ instance.save()
+
+ self.compute_api.unshelve(self.context, instance)
+
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.UNSHELVING)
+
+ db.instance_destroy(self.context, instance['uuid'])
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/unit/compute/test_stats.py
index c90314b0fc..c90314b0fc 100644
--- a/nova/tests/compute/test_stats.py
+++ b/nova/tests/unit/compute/test_stats.py
diff --git a/nova/tests/compute/test_tracker.py b/nova/tests/unit/compute/test_tracker.py
index 0a49b93b0c..0a49b93b0c 100644
--- a/nova/tests/compute/test_tracker.py
+++ b/nova/tests/unit/compute/test_tracker.py
diff --git a/nova/tests/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index 5e58ba05d1..5e58ba05d1 100644
--- a/nova/tests/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
diff --git a/nova/tests/compute/test_vmmode.py b/nova/tests/unit/compute/test_vmmode.py
index 67475ecbb3..67475ecbb3 100644
--- a/nova/tests/compute/test_vmmode.py
+++ b/nova/tests/unit/compute/test_vmmode.py
diff --git a/nova/tests/conductor/__init__.py b/nova/tests/unit/conductor/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/conductor/__init__.py
+++ b/nova/tests/unit/conductor/__init__.py
diff --git a/nova/tests/conductor/tasks/__init__.py b/nova/tests/unit/conductor/tasks/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/conductor/tasks/__init__.py
+++ b/nova/tests/unit/conductor/tasks/__init__.py
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
new file mode 100644
index 0000000000..1d7c0340b9
--- /dev/null
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -0,0 +1,384 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+
+from nova.compute import power_state
+from nova.compute import utils as compute_utils
+from nova.conductor.tasks import live_migrate
+from nova import db
+from nova import exception
+from nova import objects
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import fake_instance
+
+
+class LiveMigrationTaskTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LiveMigrationTaskTestCase, self).setUp()
+ self.context = "context"
+ self.instance_host = "host"
+ self.instance_uuid = "uuid"
+ self.instance_image = "image_ref"
+ db_instance = fake_instance.fake_db_instance(
+ host=self.instance_host,
+ uuid=self.instance_uuid,
+ power_state=power_state.RUNNING,
+ memory_mb=512,
+ image_ref=self.instance_image)
+ self.instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), db_instance)
+ self.destination = "destination"
+ self.block_migration = "bm"
+ self.disk_over_commit = "doc"
+ self._generate_task()
+
+ def _generate_task(self):
+ self.task = live_migrate.LiveMigrationTask(self.context,
+ self.instance, self.destination, self.block_migration,
+ self.disk_over_commit)
+
+ def test_execute_with_destination(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task, '_check_requested_destination')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
+
+ self.task._check_host_is_up(self.instance_host)
+ self.task._check_requested_destination()
+ self.task.compute_rpcapi.live_migration(self.context,
+ host=self.instance_host,
+ instance=self.instance,
+ dest=self.destination,
+ block_migration=self.block_migration,
+ migrate_data=None).AndReturn("bob")
+
+ self.mox.ReplayAll()
+ self.assertEqual("bob", self.task.execute())
+
+ def test_execute_without_destination(self):
+ self.destination = None
+ self._generate_task()
+ self.assertIsNone(self.task.destination)
+
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task, '_find_destination')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi, 'live_migration')
+
+ self.task._check_host_is_up(self.instance_host)
+ self.task._find_destination().AndReturn("found_host")
+ self.task.compute_rpcapi.live_migration(self.context,
+ host=self.instance_host,
+ instance=self.instance,
+ dest="found_host",
+ block_migration=self.block_migration,
+ migrate_data=None).AndReturn("bob")
+
+ self.mox.ReplayAll()
+ self.assertEqual("bob", self.task.execute())
+
+ def test_check_instance_is_running_passes(self):
+ self.task._check_instance_is_running()
+
+ def test_check_instance_is_running_fails_when_shutdown(self):
+ self.task.instance['power_state'] = power_state.SHUTDOWN
+ self.assertRaises(exception.InstanceNotRunning,
+ self.task._check_instance_is_running)
+
+ def test_check_instance_host_is_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(True)
+
+ self.mox.ReplayAll()
+ self.task._check_host_is_up("host")
+
+ def test_check_instance_host_is_up_fails_if_not_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(False)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_host_is_up, "host")
+
+ def test_check_instance_host_is_up_fails_if_not_found(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ db.service_get_by_compute_host(self.context,
+ "host").AndRaise(exception.NotFound)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_host_is_up, "host")
+
+ def test_check_requested_destination(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+ self.mox.StubOutWithMock(self.task.servicegroup_api, 'service_is_up')
+ self.mox.StubOutWithMock(self.task.compute_rpcapi,
+ 'check_can_live_migrate_destination')
+
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndReturn("service")
+ self.task.servicegroup_api.service_is_up("service").AndReturn(True)
+ hypervisor_details = {
+ "hypervisor_type": "a",
+ "hypervisor_version": 6.1,
+ "free_ram_mb": 513
+ }
+ self.task._get_compute_info(self.destination)\
+ .AndReturn(hypervisor_details)
+ self.task._get_compute_info(self.instance_host)\
+ .AndReturn(hypervisor_details)
+ self.task._get_compute_info(self.destination)\
+ .AndReturn(hypervisor_details)
+
+ self.task.compute_rpcapi.check_can_live_migrate_destination(
+ self.context, self.instance, self.destination,
+ self.block_migration, self.disk_over_commit).AndReturn(
+ "migrate_data")
+
+ self.mox.ReplayAll()
+ self.task._check_requested_destination()
+ self.assertEqual("migrate_data", self.task.migrate_data)
+
+ def test_check_requested_destination_fails_with_same_dest(self):
+ self.task.destination = "same"
+ self.task.source = "same"
+ self.assertRaises(exception.UnableToMigrateToSelf,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_when_destination_is_up(self):
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndRaise(exception.NotFound)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_not_enough_memory(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(db, 'service_get_by_compute_host')
+
+ self.task._check_host_is_up(self.destination)
+ db.service_get_by_compute_host(self.context,
+ self.destination).AndReturn({
+ "compute_node": [{"free_ram_mb": 511}]
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_hypervisor_diff(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task,
+ '_check_destination_has_enough_memory')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+
+ self.task._check_host_is_up(self.destination)
+ self.task._check_destination_has_enough_memory()
+ self.task._get_compute_info(self.instance_host).AndReturn({
+ "hypervisor_type": "b"
+ })
+ self.task._get_compute_info(self.destination).AndReturn({
+ "hypervisor_type": "a"
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidHypervisorType,
+ self.task._check_requested_destination)
+
+ def test_check_requested_destination_fails_with_hypervisor_too_old(self):
+ self.mox.StubOutWithMock(self.task, '_check_host_is_up')
+ self.mox.StubOutWithMock(self.task,
+ '_check_destination_has_enough_memory')
+ self.mox.StubOutWithMock(self.task, '_get_compute_info')
+
+ self.task._check_host_is_up(self.destination)
+ self.task._check_destination_has_enough_memory()
+ self.task._get_compute_info(self.instance_host).AndReturn({
+ "hypervisor_type": "a",
+ "hypervisor_version": 7
+ })
+ self.task._get_compute_info(self.destination).AndReturn({
+ "hypervisor_type": "a",
+ "hypervisor_version": 6
+ })
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_requested_destination)
+
+ def test_find_destination_works(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host1", self.task._find_destination())
+
+ def test_find_destination_no_image_works(self):
+ self.instance['image_ref'] = ''
+
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ scheduler_utils.build_request_spec(self.context, None,
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host1", self.task._find_destination())
+
+ def _test_find_destination_retry_hypervisor_raises(self, error):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")\
+ .AndRaise(error)
+
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host2'}])
+ self.task._check_compatible_with_source_hypervisor("host2")
+ self.task._call_livem_checks_on_host("host2")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host2", self.task._find_destination())
+
+ def test_find_destination_retry_with_old_hypervisor(self):
+ self._test_find_destination_retry_hypervisor_raises(
+ exception.DestinationHypervisorTooOld)
+
+ def test_find_destination_retry_with_invalid_hypervisor_type(self):
+ self._test_find_destination_retry_hypervisor_raises(
+ exception.InvalidHypervisorType)
+
+ def test_find_destination_retry_with_invalid_livem_checks(self):
+ self.flags(migrate_max_retries=1)
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+ self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")
+ self.task._call_livem_checks_on_host("host1")\
+ .AndRaise(exception.Invalid)
+
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host2'}])
+ self.task._check_compatible_with_source_hypervisor("host2")
+ self.task._call_livem_checks_on_host("host2")
+
+ self.mox.ReplayAll()
+ self.assertEqual("host2", self.task._find_destination())
+
+ def test_find_destination_retry_exceeds_max(self):
+ self.flags(migrate_max_retries=0)
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.task,
+ '_check_compatible_with_source_hypervisor')
+
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ [{'host': 'host1'}])
+ self.task._check_compatible_with_source_hypervisor("host1")\
+ .AndRaise(exception.DestinationHypervisorTooOld)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost, self.task._find_destination)
+
+ def test_find_destination_when_runs_out_of_hosts(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.task.scheduler_client,
+ 'select_destinations')
+ compute_utils.get_image_metadata(self.context,
+ self.task.image_api, self.instance_image,
+ self.instance).AndReturn("image")
+ scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn({})
+ self.task.scheduler_client.select_destinations(self.context,
+ mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(
+ exception.NoValidHost(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoValidHost, self.task._find_destination)
+
+ def test_not_implemented_rollback(self):
+ self.assertRaises(NotImplementedError, self.task.rollback)
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
new file mode 100644
index 0000000000..0570ada217
--- /dev/null
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -0,0 +1,2151 @@
+# Copyright 2012 IBM Corp.
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the conductor service."""
+
+import contextlib
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo import messaging
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova.api.ec2 import ec2utils
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import conductor
+from nova.conductor import api as conductor_api
+from nova.conductor import manager as conductor_manager
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova.conductor.tasks import live_migrate
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception as exc
+from nova import notifications
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import block_device as block_device_obj
+from nova.objects import fields
+from nova.objects import quotas as quotas_obj
+from nova import quota
+from nova import rpc
+from nova.scheduler import driver as scheduler_driver
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit.compute import test_compute
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit import fake_utils
+from nova import utils
+
+
+CONF = cfg.CONF
+CONF.import_opt('report_interval', 'nova.service')
+
+
+FAKE_IMAGE_REF = 'fake-image-ref'
+
+
+class FakeContext(context.RequestContext):
+ def elevated(self):
+ """Return a consistent elevated context so we can detect it."""
+ if not hasattr(self, '_elevated'):
+ self._elevated = super(FakeContext, self).elevated()
+ return self._elevated
+
+
+class _BaseTestCase(object):
+ def setUp(self):
+ super(_BaseTestCase, self).setUp()
+ self.db = None
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = FakeContext(self.user_id, self.project_id)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def fake_deserialize_context(serializer, ctxt_dict):
+ self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
+ self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
+ return self.context
+
+ self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
+ fake_deserialize_context)
+
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
+ def _create_fake_instance(self, params=None, type_name='m1.tiny'):
+ if not params:
+ params = {}
+
+ inst = {}
+ inst['vm_state'] = vm_states.ACTIVE
+ inst['image_ref'] = FAKE_IMAGE_REF
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['host'] = 'fake_host'
+ type_id = flavors.get_flavor_by_name(type_name)['id']
+ inst['instance_type_id'] = type_id
+ inst['ami_launch_index'] = 0
+ inst['memory_mb'] = 0
+ inst['vcpus'] = 0
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['architecture'] = arch.X86_64
+ inst['os_type'] = 'Linux'
+ inst['availability_zone'] = 'fake-az'
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+ def _do_update(self, instance_uuid, **updates):
+ return self.conductor.instance_update(self.context, instance_uuid,
+ updates, None)
+
+ def test_instance_update(self):
+ instance = self._create_fake_instance()
+ new_inst = self._do_update(instance['uuid'],
+ vm_state=vm_states.STOPPED)
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.STOPPED)
+ self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+
+ def test_instance_update_invalid_key(self):
+ # NOTE(danms): the real DB API call ignores invalid keys
+ if self.db is None:
+ self.conductor = utils.ExceptionHelper(self.conductor)
+ self.assertRaises(KeyError,
+ self._do_update, 'any-uuid', foobar=1)
+
+ def test_migration_get_in_progress_by_host_and_node(self):
+ self.mox.StubOutWithMock(db,
+ 'migration_get_in_progress_by_host_and_node')
+ db.migration_get_in_progress_by_host_and_node(
+ self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.migration_get_in_progress_by_host_and_node(
+ self.context, 'fake-host', 'fake-node')
+ self.assertEqual(result, 'fake-result')
+
+ def test_aggregate_metadata_get_by_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
+ db.aggregate_metadata_get_by_host(self.context, 'host',
+ 'key').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.aggregate_metadata_get_by_host(self.context,
+ 'host', 'key')
+ self.assertEqual(result, 'result')
+
+ def test_bw_usage_update(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_update(*update_args, update_cells=True)
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_update(*update_args,
+ update_cells=True)
+ self.assertEqual(result, 'foo')
+
+ def test_provider_fw_rule_get_all(self):
+ fake_rules = ['a', 'b', 'c']
+ self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
+ db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
+ self.mox.ReplayAll()
+ result = self.conductor.provider_fw_rule_get_all(self.context)
+ self.assertEqual(result, fake_rules)
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ fake_inst = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ db.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst['uuid']).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.block_device_mapping_get_all_by_instance(
+ self.context, fake_inst, legacy=False)
+ self.assertEqual(result, 'fake-result')
+
+ def test_vol_usage_update(self):
+ self.mox.StubOutWithMock(db, 'vol_usage_update')
+ self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
+
+ fake_inst = {'uuid': 'fake-uuid',
+ 'project_id': 'fake-project',
+ 'user_id': 'fake-user',
+ 'availability_zone': 'fake-az',
+ }
+
+ db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
+ fake_inst['uuid'],
+ fake_inst['project_id'],
+ fake_inst['user_id'],
+ fake_inst['availability_zone'],
+ False).AndReturn('fake-usage')
+ compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
+
+ self.mox.ReplayAll()
+
+ self.conductor.vol_usage_update(self.context, 'fake-vol',
+ 22, 33, 44, 55, fake_inst, None, False)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('conductor.%s' % self.conductor_manager.host,
+ msg.publisher_id)
+ self.assertEqual('volume.usage', msg.event_type)
+ self.assertEqual('INFO', msg.priority)
+ self.assertEqual('fake-info', msg.payload)
+
+ def test_compute_node_create(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(self.context, 'fake-values').AndReturn(
+ 'fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_create(self.context,
+ 'fake-values')
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_node_update(self):
+ node = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'compute_node_update')
+ db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
+ AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_update(self.context, node,
+ {'fake': 'values'})
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_node_delete(self):
+ node = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'compute_node_delete')
+ db.compute_node_delete(self.context, node['id']).AndReturn(None)
+ self.mox.ReplayAll()
+ result = self.conductor.compute_node_delete(self.context, node)
+ self.assertIsNone(result)
+
+ def test_task_log_get(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
+ 'state').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host', 'state')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_get_with_no_state(self):
+ self.mox.StubOutWithMock(db, 'task_log_get')
+ db.task_log_get(self.context, 'task', 'begin', 'end',
+ 'host', None).AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_get(self.context, 'task', 'begin',
+ 'end', 'host', None)
+ self.assertEqual(result, 'result')
+
+ def test_task_log_begin_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_begin_task')
+ db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
+ 'end', 'host', 'items',
+ 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_begin_task(
+ self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_task_log_end_task(self):
+ self.mox.StubOutWithMock(db, 'task_log_end_task')
+ db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
+ 'host', 'errors', 'message').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.task_log_end_task(
+ self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
+ self.assertEqual(result, 'result')
+
+ def test_notify_usage_exists(self):
+ info = {
+ 'audit_period_beginning': 'start',
+ 'audit_period_ending': 'end',
+ 'bandwidth': 'bw_usage',
+ 'image_meta': {},
+ 'extra': 'info',
+ }
+ instance = {
+ 'system_metadata': [],
+ }
+
+ self.mox.StubOutWithMock(notifications, 'audit_period_bounds')
+ self.mox.StubOutWithMock(notifications, 'bandwidth_usage')
+ self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage')
+
+ notifications.audit_period_bounds(False).AndReturn(('start', 'end'))
+ notifications.bandwidth_usage(instance, 'start', True).AndReturn(
+ 'bw_usage')
+ notifier = self.conductor_manager.notifier
+ compute_utils.notify_about_instance_usage(notifier,
+ self.context, instance,
+ 'exists',
+ system_metadata={},
+ extra_usage_info=info)
+
+ self.mox.ReplayAll()
+
+ self.conductor.notify_usage_exists(self.context, instance, False, True,
+ system_metadata={},
+ extra_usage_info=dict(extra='info'))
+
+ def test_security_groups_trigger_members_refresh(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_members_refresh')
+ self.conductor_manager.security_group_api.trigger_members_refresh(
+ self.context, [1, 2, 3])
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_members_refresh(self.context,
+ [1, 2, 3])
+
+ def test_get_ec2_ids(self):
+ expected = {
+ 'instance-id': 'ec2-inst-id',
+ 'ami-id': 'ec2-ami-id',
+ 'kernel-id': 'ami-kernel-ec2-kernelid',
+ 'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
+ }
+ inst = {
+ 'uuid': 'fake-uuid',
+ 'kernel_id': 'ec2-kernelid',
+ 'ramdisk_id': 'ec2-ramdiskid',
+ 'image_ref': 'fake-image',
+ }
+ self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
+ self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
+ self.mox.StubOutWithMock(ec2utils, 'image_type')
+
+ ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
+ expected['instance-id'])
+ ec2utils.glance_id_to_ec2_id(self.context,
+ inst['image_ref']).AndReturn(
+ expected['ami-id'])
+ for image_type in ['kernel', 'ramdisk']:
+ image_id = inst['%s_id' % image_type]
+ ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
+ ec2utils.glance_id_to_ec2_id(self.context, image_id,
+ 'ami-' + image_type).AndReturn(
+ 'ami-%s-ec2-%sid' % (image_type, image_type))
+
+ self.mox.ReplayAll()
+ result = self.conductor.get_ec2_ids(self.context, inst)
+ self.assertEqual(result, expected)
+
+
+class ConductorTestCase(_BaseTestCase, test.TestCase):
+ """Conductor Manager Tests."""
+ def setUp(self):
+ super(ConductorTestCase, self).setUp()
+ self.conductor = conductor_manager.ConductorManager()
+ self.conductor_manager = self.conductor
+
+ def test_instance_get_by_uuid(self):
+ orig_instance = self._create_fake_instance()
+ copy_instance = self.conductor.instance_get_by_uuid(
+ self.context, orig_instance['uuid'], None)
+ self.assertEqual(orig_instance['name'],
+ copy_instance['name'])
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 1, 'device_name': 'foo',
+ 'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
+ fake_bdm2 = {'id': 1, 'device_name': 'foo2',
+ 'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume'}
+ fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
+ cells_rpcapi = self.conductor.cells_rpcapi
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(cells_rpcapi,
+ 'bdm_update_or_create_at_top')
+ db.block_device_mapping_create(self.context,
+ fake_bdm).AndReturn(fake_bdm2)
+ cells_rpcapi.bdm_update_or_create_at_top(
+ self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
+ create=True)
+ db.block_device_mapping_update(self.context, fake_bdm['id'],
+ fake_bdm).AndReturn(fake_bdm2)
+ cells_rpcapi.bdm_update_or_create_at_top(
+ self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
+ create=False)
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+
+ def test_instance_get_all_by_filters(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None, use_slave=False)
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ None, False)
+
+ def test_instance_get_all_by_filters_use_slave(self):
+ filters = {'foo': 'bar'}
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None, use_slave=True)
+ self.mox.ReplayAll()
+ self.conductor.instance_get_all_by_filters(self.context, filters,
+ 'fake-key', 'fake-sort',
+ columns_to_join=None,
+ use_slave=True)
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host(self.context.elevated(),
+ 'host', None).AndReturn('result')
+ db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
+ 'node').AndReturn('result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_get_all_by_host(self.context, 'host',
+ None, None)
+ self.assertEqual(result, 'result')
+ result = self.conductor.instance_get_all_by_host(self.context, 'host',
+ 'node', None)
+ self.assertEqual(result, 'result')
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False, db_exception=None):
+
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(messaging.ExpectedException,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+ else:
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (),
+ dict(host=None, topic=None, binary=None))
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host', binary=None))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic', host=None, binary=None))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host', topic=None, binary=None))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None))
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'args')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['args'])
+
+ def _test_object_action(self, is_classmethod, raise_exception):
+ class TestObject(obj_base.NovaObject):
+ def foo(self, context, raise_exception=False):
+ if raise_exception:
+ raise Exception('test')
+ else:
+ return 'test'
+
+ @classmethod
+ def bar(cls, context, raise_exception=False):
+ if raise_exception:
+ raise Exception('test')
+ else:
+ return 'test'
+
+ obj = TestObject()
+ if is_classmethod:
+ result = self.conductor.object_class_action(
+ self.context, TestObject.obj_name(), 'bar', '1.0',
+ tuple(), {'raise_exception': raise_exception})
+ else:
+ updates, result = self.conductor.object_action(
+ self.context, obj, 'foo', tuple(),
+ {'raise_exception': raise_exception})
+ self.assertEqual('test', result)
+
+ def test_object_action(self):
+ self._test_object_action(False, False)
+
+ def test_object_action_on_raise(self):
+ self.assertRaises(messaging.ExpectedException,
+ self._test_object_action, False, True)
+
+ def test_object_class_action(self):
+ self._test_object_action(True, False)
+
+ def test_object_class_action_on_raise(self):
+ self.assertRaises(messaging.ExpectedException,
+ self._test_object_action, True, True)
+
+ def test_object_action_copies_object(self):
+ class TestObject(obj_base.NovaObject):
+ fields = {'dict': fields.DictOfStringsField()}
+
+ def touch_dict(self, context):
+ self.dict['foo'] = 'bar'
+ self.obj_reset_changes()
+
+ obj = TestObject()
+ obj.dict = {}
+ obj.obj_reset_changes()
+ updates, result = self.conductor.object_action(
+ self.context, obj, 'touch_dict', tuple(), {})
+ # NOTE(danms): If conductor did not properly copy the object, then
+ # the new and reference copies of the nested dict object will be
+ # the same, and thus 'dict' will not be reported as changed
+ self.assertIn('dict', updates)
+ self.assertEqual({'foo': 'bar'}, updates['dict'])
+
+ def _test_expected_exceptions(self, db_method, conductor_method, errors,
+ *args, **kwargs):
+ # Tests that expected exceptions are handled properly.
+ for error in errors:
+ with mock.patch.object(db, db_method, side_effect=error):
+ self.assertRaises(messaging.ExpectedException,
+ conductor_method,
+ self.context, *args, **kwargs)
+
+ def test_action_event_start_expected_exceptions(self):
+ error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
+ self._test_expected_exceptions(
+ 'action_event_start', self.conductor.action_event_start, [error],
+ {'foo': 'bar'})
+
+ def test_action_event_finish_expected_exceptions(self):
+ errors = (exc.InstanceActionNotFound(request_id='1',
+ instance_uuid='2'),
+ exc.InstanceActionEventNotFound(event='1', action_id='2'))
+ self._test_expected_exceptions(
+ 'action_event_finish', self.conductor.action_event_finish,
+ errors, {'foo': 'bar'})
+
+ def test_instance_update_expected_exceptions(self):
+ errors = (exc.InvalidUUID(uuid='foo'),
+ exc.InstanceNotFound(instance_id=1),
+ exc.UnexpectedTaskStateError(expected='foo',
+ actual='bar'))
+ self._test_expected_exceptions(
+ 'instance_update', self.conductor.instance_update,
+ errors, None, {'foo': 'bar'}, None)
+
+ def test_instance_get_by_uuid_expected_exceptions(self):
+ error = exc.InstanceNotFound(instance_id=1)
+ self._test_expected_exceptions(
+ 'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
+ [error], None, [])
+
+ def test_aggregate_host_add_expected_exceptions(self):
+ error = exc.AggregateHostExists(aggregate_id=1, host='foo')
+ self._test_expected_exceptions(
+ 'aggregate_host_add', self.conductor.aggregate_host_add,
+ [error], {'id': 1}, None)
+
+ def test_aggregate_host_delete_expected_exceptions(self):
+ error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
+ self._test_expected_exceptions(
+ 'aggregate_host_delete', self.conductor.aggregate_host_delete,
+ [error], {'id': 1}, None)
+
+ def test_service_update_expected_exceptions(self):
+ error = exc.ServiceNotFound(service_id=1)
+ self._test_expected_exceptions(
+ 'service_update',
+ self.conductor.service_update,
+ [error], {'id': 1}, None)
+
+ def test_service_destroy_expected_exceptions(self):
+ error = exc.ServiceNotFound(service_id=1)
+ self._test_expected_exceptions(
+ 'service_destroy',
+ self.conductor.service_destroy,
+ [error], 1)
+
+ def _setup_aggregate_with_host(self):
+ aggregate_ref = db.aggregate_create(self.context.elevated(),
+ {'name': 'foo'}, metadata={'availability_zone': 'foo'})
+
+ self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ return aggregate_ref
+
+ def test_aggregate_host_add(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.assertIn('bar', aggregate_ref['hosts'])
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_aggregate_host_delete(self):
+ aggregate_ref = self._setup_aggregate_with_host()
+
+ self.conductor.aggregate_host_delete(self.context, aggregate_ref,
+ 'bar')
+
+ aggregate_ref = db.aggregate_get(self.context.elevated(),
+ aggregate_ref['id'])
+
+ self.assertNotIn('bar', aggregate_ref['hosts'])
+
+ db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
+
+ def test_network_migrate_instance_start(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_start')
+ self.conductor_manager.network_api.migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_start(self.context,
+ 'instance',
+ 'migration')
+
+ def test_network_migrate_instance_finish(self):
+ self.mox.StubOutWithMock(self.conductor_manager.network_api,
+ 'migrate_instance_finish')
+ self.conductor_manager.network_api.migrate_instance_finish(
+ self.context, 'instance', 'migration')
+ self.mox.ReplayAll()
+ self.conductor.network_migrate_instance_finish(self.context,
+ 'instance',
+ 'migration')
+
+ def test_instance_destroy(self):
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_destroy(self.context,
+ {'uuid': 'fake-uuid'})
+ self.assertEqual(result, 'fake-result')
+
+ def test_compute_unrescue(self):
+ self.mox.StubOutWithMock(self.conductor_manager.compute_api,
+ 'unrescue')
+ self.conductor_manager.compute_api.unrescue(self.context, 'instance')
+ self.mox.ReplayAll()
+ self.conductor.compute_unrescue(self.context, 'instance')
+
+ def test_instance_get_active_by_window_joined(self):
+ self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
+ db.instance_get_active_by_window_joined(self.context, 'fake-begin',
+ 'fake-end', 'fake-proj',
+ 'fake-host')
+ self.mox.ReplayAll()
+ self.conductor.instance_get_active_by_window_joined(
+ self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
+
+ def test_instance_fault_create(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_fault_create(self.context, 'fake-values').AndReturn(
+ 'fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_fault_create(self.context,
+ 'fake-values')
+ self.assertEqual(result, 'fake-result')
+
+ def test_action_event_start(self):
+ self.mox.StubOutWithMock(db, 'action_event_start')
+ db.action_event_start(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_start(self.context, {})
+
+ def test_action_event_finish(self):
+ self.mox.StubOutWithMock(db, 'action_event_finish')
+ db.action_event_finish(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_finish(self.context, {})
+
+ def test_agent_build_get_by_triple(self):
+ self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
+ db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
+ 'fake-arch').AndReturn('it worked')
+ self.mox.ReplayAll()
+ result = self.conductor.agent_build_get_by_triple(self.context,
+ 'fake-hv',
+ 'fake-os',
+ 'fake-arch')
+ self.assertEqual(result, 'it worked')
+
+
+class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor RPC API Tests."""
+ def setUp(self):
+ super(ConductorRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor_manager = self.conductor_service.manager
+ self.conductor = conductor_rpcapi.ConductorAPI()
+
+ def test_block_device_mapping_update_or_create(self):
+ fake_bdm = {'id': 'fake-id'}
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
+ '_from_db_object')
+ db.block_device_mapping_create(self.context, fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update_or_create(self.context, fake_bdm)
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=True)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm,
+ create=False)
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ fake_bdm)
+
+ def _test_stubbed(self, name, dbargs, condargs,
+ db_result_listified=False, db_exception=None):
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
+ else:
+ getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ self.conductor.service_get_all_by,
+ self.context, **condargs)
+ else:
+ result = self.conductor.service_get_all_by(self.context,
+ **condargs)
+ if db_result_listified:
+ self.assertEqual(['fake-result'], result)
+ else:
+ self.assertEqual('fake-result', result)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all', (),
+ dict(topic=None, host=None, binary=None))
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic',
+ ('host', 'topic'),
+ dict(topic='topic', host='host', binary=None))
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic',
+ ('topic',),
+ dict(topic='topic', host=None, binary=None))
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host',
+ ('host',),
+ dict(host='host', topic=None, binary=None))
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_result_listified=True)
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None))
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host',
+ ('host',),
+ dict(topic='compute', host='host', binary=None),
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args',
+ ('host', 'binary'),
+ dict(host='host', binary='binary', topic=None),
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', ['arg'])
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_big(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 10)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=9)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_time_small(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', 3)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with(timeout=3)
+
+ @mock.patch.object(db, 'service_update')
+ @mock.patch('oslo.messaging.RPCClient.prepare')
+ def test_service_update_no_time(self, mock_prepare, mock_update):
+ CONF.set_override('report_interval', None)
+ services = {'id': 1}
+ self.conductor.service_update(self.context, services, {})
+ mock_prepare.assert_called_once_with()
+
+
+class ConductorAPITestCase(_BaseTestCase, test.TestCase):
+ """Conductor API Tests."""
+ def setUp(self):
+ super(ConductorAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.API()
+ self.conductor_manager = self.conductor_service.manager
+ self.db = None
+
+ def _do_update(self, instance_uuid, **updates):
+ # NOTE(danms): the public API takes actual keyword arguments,
+ # so override the base class here to make the call correctly
+ return self.conductor.instance_update(self.context, instance_uuid,
+ **updates)
+
+ def test_bw_usage_get(self):
+ self.mox.StubOutWithMock(db, 'bw_usage_update')
+ self.mox.StubOutWithMock(db, 'bw_usage_get')
+
+ get_args = (self.context, 'uuid', 0, 'mac')
+
+ db.bw_usage_get(*get_args).AndReturn('foo')
+
+ self.mox.ReplayAll()
+ result = self.conductor.bw_usage_get(*get_args)
+ self.assertEqual(result, 'foo')
+
+ def test_block_device_mapping_update_or_create(self):
+ self.mox.StubOutWithMock(db, 'block_device_mapping_create')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update')
+ self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
+ self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
+ '_from_db_object')
+ db.block_device_mapping_create(self.context, 'fake-bdm')
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update(self.context,
+ 'fake-id', {'id': 'fake-id'})
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+ db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
+ block_device_obj.BlockDeviceMapping._from_db_object(
+ self.context, mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
+ self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
+ self.conductor.block_device_mapping_update_or_create(self.context,
+ 'fake-bdm')
+
+ def _test_stubbed(self, name, *args, **kwargs):
+ if args and isinstance(args[0], FakeContext):
+ ctxt = args[0]
+ args = args[1:]
+ else:
+ ctxt = self.context
+ db_exception = kwargs.get('db_exception')
+ self.mox.StubOutWithMock(db, name)
+ if db_exception:
+ getattr(db, name)(ctxt, *args).AndRaise(db_exception)
+ else:
+ getattr(db, name)(ctxt, *args).AndReturn('fake-result')
+ if name == 'service_destroy':
+ # TODO(russellb) This is a hack ... SetUp() starts the conductor()
+ # service. There is a cleanup step that runs after this test which
+ # also deletes the associated service record. This involves a call
+ # to db.service_destroy(), which we have stubbed out.
+ db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ if db_exception:
+ self.assertRaises(db_exception.__class__,
+ getattr(self.conductor, name),
+ self.context, *args)
+ else:
+ result = getattr(self.conductor, name)(self.context, *args)
+ self.assertEqual(
+ result, 'fake-result' if kwargs.get('returns', True) else None)
+
+ def test_service_get_all(self):
+ self._test_stubbed('service_get_all')
+
+ def test_service_get_by_host_and_topic(self):
+ self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
+
+ def test_service_get_all_by_topic(self):
+ self._test_stubbed('service_get_all_by_topic', 'topic')
+
+ def test_service_get_all_by_host(self):
+ self._test_stubbed('service_get_all_by_host', 'host')
+
+ def test_service_get_by_compute_host(self):
+ self._test_stubbed('service_get_by_compute_host', 'host')
+
+ def test_service_get_by_args(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary')
+
+ def test_service_get_by_compute_host_not_found(self):
+ self._test_stubbed('service_get_by_compute_host', 'host',
+ db_exception=exc.ComputeHostNotFound(host='host'))
+
+ def test_service_get_by_args_not_found(self):
+ self._test_stubbed('service_get_by_args', 'host', 'binary',
+ db_exception=exc.HostBinaryNotFound(binary='binary',
+ host='host'))
+
+ def test_service_create(self):
+ self._test_stubbed('service_create', {})
+
+ def test_service_destroy(self):
+ self._test_stubbed('service_destroy', '', returns=False)
+
+ def test_service_update(self):
+ ctxt = self.context
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(ctxt, '', {}).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.service_update(self.context, {'id': ''}, {})
+ self.assertEqual(result, 'fake-result')
+
+ def test_instance_get_all_by_host_and_node(self):
+ self._test_stubbed('instance_get_all_by_host_and_node',
+ self.context.elevated(), 'host', 'node')
+
+ def test_instance_get_all_by_host(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host(self.context.elevated(), 'host',
+ None).AndReturn('fake-result')
+ self.mox.ReplayAll()
+ result = self.conductor.instance_get_all_by_host(self.context,
+ 'host', None)
+ self.assertEqual(result, 'fake-result')
+
+ def test_wait_until_ready(self):
+ timeouts = []
+ calls = dict(count=0)
+
+ def fake_ping(context, message, timeout):
+ timeouts.append(timeout)
+ calls['count'] += 1
+ if calls['count'] < 15:
+ raise messaging.MessagingTimeout("fake")
+
+ self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
+
+ self.conductor.wait_until_ready(self.context)
+
+ self.assertEqual(timeouts.count(10), 10)
+ self.assertIn(None, timeouts)
+
+ def test_security_groups_trigger_handler(self):
+ self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
+ 'trigger_handler')
+ self.conductor_manager.security_group_api.trigger_handler('event',
+ self.context,
+ 'arg')
+ self.mox.ReplayAll()
+ self.conductor.security_groups_trigger_handler(self.context,
+ 'event', 'arg')
+
+
+class ConductorLocalAPITestCase(ConductorAPITestCase):
+ """Conductor LocalAPI Tests."""
+ def setUp(self):
+ super(ConductorLocalAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalAPI()
+ self.conductor_manager = self.conductor._manager._target
+ self.db = db
+
+ def test_client_exceptions(self):
+ instance = self._create_fake_instance()
+ # NOTE(danms): The LocalAPI should not raise exceptions wrapped
+ # in ClientException. KeyError should be raised if an invalid
+ # update key is passed, so use that to validate.
+ self.assertRaises(KeyError,
+ self._do_update, instance['uuid'], foo='bar')
+
+ def test_wait_until_ready(self):
+ # Override test in ConductorAPITestCase
+ pass
+
+
+class ConductorImportTest(test.TestCase):
+ def test_import_conductor_local(self):
+ self.flags(use_local=True, group='conductor')
+ self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
+ self.assertIsInstance(conductor.ComputeTaskAPI(),
+ conductor_api.LocalComputeTaskAPI)
+
+ def test_import_conductor_rpc(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertIsInstance(conductor.API(), conductor_api.API)
+ self.assertIsInstance(conductor.ComputeTaskAPI(),
+ conductor_api.ComputeTaskAPI)
+
+ def test_import_conductor_override_to_local(self):
+ self.flags(use_local=False, group='conductor')
+ self.assertIsInstance(conductor.API(use_local=True),
+ conductor_api.LocalAPI)
+ self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
+ conductor_api.LocalComputeTaskAPI)
+
+
+class ConductorPolicyTest(test.TestCase):
+ def test_all_allowed_keys(self):
+
+ def fake_db_instance_update(self, *args, **kwargs):
+ return None, None
+ self.stubs.Set(db, 'instance_update_and_get_original',
+ fake_db_instance_update)
+
+ ctxt = context.RequestContext('fake-user', 'fake-project')
+ conductor = conductor_api.LocalAPI()
+ updates = {}
+ for key in conductor_manager.allowed_updates:
+ if key in conductor_manager.datetime_fields:
+ updates[key] = timeutils.utcnow()
+ else:
+ updates[key] = 'foo'
+ conductor.instance_update(ctxt, 'fake-instance', **updates)
+
+ def test_allowed_keys_are_real(self):
+ instance = models.Instance()
+ keys = list(conductor_manager.allowed_updates)
+
+ # NOTE(danms): expected_task_state is a parameter that gets
+ # passed to the db layer, but is not actually an instance attribute
+ del keys[keys.index('expected_task_state')]
+
+ for key in keys:
+ self.assertTrue(hasattr(instance, key))
+
+
+class _BaseTaskTestCase(object):
+ def setUp(self):
+ super(_BaseTaskTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = FakeContext(self.user_id, self.project_id)
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def fake_deserialize_context(serializer, ctxt_dict):
+ self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
+ self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
+ return self.context
+
+ self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
+ fake_deserialize_context)
+
+ def _prepare_rebuild_args(self, update_args=None):
+ rebuild_args = {'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': 'image_ref',
+ 'orig_image_ref': 'orig_image_ref',
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'host': 'compute-host'}
+ if update_args:
+ rebuild_args.update(update_args)
+ return rebuild_args
+
+ def test_live_migrate(self):
+ inst = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ live_migrate.execute(self.context,
+ mox.IsA(objects.Instance),
+ 'destination',
+ 'block_migration',
+ 'disk_over_commit')
+ self.mox.ReplayAll()
+
+ if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
+ conductor_api.LocalComputeTaskAPI)):
+ # The API method is actually 'live_migrate_instance'. It gets
+ # converted into 'migrate_server' when doing RPC.
+ self.conductor.live_migrate_instance(self.context, inst_obj,
+ 'destination', 'block_migration', 'disk_over_commit')
+ else:
+ self.conductor.migrate_server(self.context, inst_obj,
+ {'host': 'destination'}, True, False, None,
+ 'block_migration', 'disk_over_commit')
+
+ def test_cold_migrate(self):
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(
+ self.conductor_manager.compute_rpcapi, 'prep_resize')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ inst = fake_instance.fake_db_instance(image_ref='image_ref')
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+ flavor = flavors.get_default_flavor()
+ flavor['extra_specs'] = 'extra_specs'
+ request_spec = {'instance_type': flavor,
+ 'instance_properties': {}}
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
+
+ scheduler_utils.build_request_spec(
+ self.context, 'image',
+ [mox.IsA(objects.Instance)],
+ instance_type=flavor).AndReturn(request_spec)
+
+ hosts = [dict(host='host1', nodename=None, limits={})]
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, request_spec,
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
+
+ filter_properties = {'limits': {},
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host1', None]]}}
+
+ self.conductor_manager.compute_rpcapi.prep_resize(
+ self.context, 'image', mox.IsA(objects.Instance),
+ mox.IsA(dict), 'host1', [], request_spec=request_spec,
+ filter_properties=filter_properties, node=None)
+
+ self.mox.ReplayAll()
+
+ scheduler_hint = {'filter_properties': {}}
+
+ if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
+ conductor_api.LocalComputeTaskAPI)):
+ # The API method is actually 'resize_instance'. It gets
+ # converted into 'migrate_server' when doing RPC.
+ self.conductor.resize_instance(
+ self.context, inst_obj, {}, scheduler_hint, flavor, [])
+ else:
+ self.conductor.migrate_server(
+ self.context, inst_obj, scheduler_hint,
+ False, False, flavor, None, None, [])
+
+ def test_build_instances(self):
+ system_metadata = flavors.save_flavor_info({},
+ flavors.get_default_flavor())
+ instances = [fake_instance.fake_instance_obj(
+ self.context,
+ system_metadata=system_metadata,
+ expected_attrs=['system_metadata']) for i in xrange(2)]
+ instance_type = flavors.extract_flavor(instances[0])
+ instance_type['extra_specs'] = 'fake-specs'
+ instance_properties = jsonutils.to_primitive(instances[0])
+
+ self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db,
+ 'block_device_mapping_get_all_by_instance')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+
+ db.flavor_extra_specs_get(
+ self.context,
+ instance_type['flavorid']).AndReturn('fake-specs')
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, {'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': jsonutils.to_primitive(
+ instances[0]),
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
+ [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}])
+ db.instance_get_by_uuid(self.context, instances[0].uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False).AndReturn(
+ jsonutils.to_primitive(instances[0]))
+ db.block_device_mapping_get_all_by_instance(self.context,
+ instances[0].uuid, use_slave=False).AndReturn([])
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context,
+ instance=mox.IgnoreArg(),
+ host='host1',
+ image={'fake_data': 'should_pass_silently'},
+ request_spec={
+ 'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ filter_properties={'retry': {'num_attempts': 1,
+ 'hosts': [['host1', 'node1']]},
+ 'limits': []},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IgnoreArg(),
+ node='node1', limits=[])
+ db.instance_get_by_uuid(self.context, instances[1].uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False).AndReturn(
+ jsonutils.to_primitive(instances[1]))
+ db.block_device_mapping_get_all_by_instance(self.context,
+ instances[1].uuid, use_slave=False).AndReturn([])
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context,
+ instance=mox.IgnoreArg(),
+ host='host2',
+ image={'fake_data': 'should_pass_silently'},
+ request_spec={
+ 'image': {'fake_data': 'should_pass_silently'},
+ 'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [inst.uuid for inst in instances],
+ 'num_instances': 2},
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2', 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IgnoreArg(),
+ node='node2', limits=[])
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image={'fake_data': 'should_pass_silently'},
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ def test_build_instances_scheduler_failure(self):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ image = {'fake-data': 'should_pass_silently'}
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ exception = exc.NoValidHost(reason='fake-reason')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+
+ scheduler_utils.build_request_spec(self.context, image,
+ mox.IgnoreArg()).AndReturn(spec)
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, spec,
+ {'retry': {'num_attempts': 1,
+ 'hosts': []}}).AndRaise(exception)
+ for instance in instances:
+ scheduler_driver.handle_schedule_error(self.context, exception,
+ instance.uuid, spec)
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ def test_unshelve_instance_on_host(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'start_instance')
+ self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.compute_rpcapi.start_instance(self.context,
+ instance)
+ self.conductor_manager._delete_image(self.context,
+ 'fake_image_id')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_unshelve_offloaded_instance_glance_image_not_found(self):
+ shelved_image_id = "image_not_found"
+
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(
+ self.context,
+ db_instance['uuid'],
+ expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+
+ e = exc.ImageNotFound(image_id=shelved_image_id)
+ self.conductor_manager.image_api.get(
+ self.context, shelved_image_id, show_deleted=False).AndRaise(e)
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_host'] = 'fake-mini'
+ system_metadata['shelved_image_id'] = shelved_image_id
+
+ self.assertRaises(
+ exc.UnshelveException,
+ self.conductor_manager.unshelve_instance,
+ self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+
+ def test_unshelve_offloaded_instance_image_id_is_none(self):
+ db_instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance = objects.Instance.get_by_uuid(
+ self.context,
+ db_instance['uuid'],
+ expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.task_state = task_states.UNSHELVING
+ system_metadata = instance.system_metadata
+ system_metadata['shelved_image_id'] = None
+ instance.save()
+
+ self.assertRaises(
+ exc.UnshelveException,
+ self.conductor_manager.unshelve_instance,
+ self.context, instance)
+ self.assertEqual(instance.vm_state, vm_states.ERROR)
+
+ def test_unshelve_instance_schedule_and_rebuild(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ filter_properties = {}
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+ self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.image_api.get(self.context,
+ 'fake_image_id', show_deleted=False).AndReturn('fake_image')
+ self.conductor_manager._schedule_instances(self.context,
+ 'fake_image', filter_properties, instance).AndReturn(
+ [{'host': 'fake_host',
+ 'nodename': 'fake_node',
+ 'limits': {}}])
+ self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
+ instance, 'fake_host', image='fake_image',
+ filter_properties={'limits': {}}, node='fake_node')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ def fake_schedule_instances(context, image, filter_properties,
+ *instances):
+ raise exc.NoValidHost(reason='')
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.image_api, 'get',
+ return_value='fake_image'),
+ mock.patch.object(self.conductor_manager, '_schedule_instances',
+ fake_schedule_instances)
+ ) as (_get_image, _schedule_instances):
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+ _get_image.assert_has_calls([mock.call(self.context,
+ system_metadata['shelved_image_id'],
+ show_deleted=False)])
+ self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
+
+ def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
+ db_instance = self._create_fake_instance()
+ instance = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'], expected_attrs=['system_metadata'])
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.save()
+ filter_properties = {}
+ system_metadata = instance.system_metadata
+
+ self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
+ self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+
+ self.conductor_manager.image_api.get(self.context,
+ 'fake_image_id', show_deleted=False).AndReturn(None)
+ self.conductor_manager._schedule_instances(self.context,
+ None, filter_properties, instance).AndReturn(
+ [{'host': 'fake_host',
+ 'nodename': 'fake_node',
+ 'limits': {}}])
+ self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
+ instance, 'fake_host', image=None,
+ filter_properties={'limits': {}}, node='fake_node')
+ self.mox.ReplayAll()
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+ self.conductor_manager.unshelve_instance(self.context, instance)
+
+ def test_rebuild_instance(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ ) as (rebuild_mock, select_dest_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ self.assertFalse(select_dest_mock.called)
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ expected_host = 'thebesthost'
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations',
+ return_value=[{'host': expected_host}]),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ rebuild_args['host'] = expected_host
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler_no_host(self):
+ db_instance = self._create_fake_instance()
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason='')),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.assertRaises(exc.NoValidHost,
+ self.conductor_manager.rebuild_instance,
+ context=self.context, instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ self.assertFalse(rebuild_mock.called)
+
+
+class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
+ """ComputeTaskManager Tests."""
+ def setUp(self):
+ super(ConductorTaskTestCase, self).setUp()
+ self.conductor = conductor_manager.ComputeTaskManager()
+ self.conductor_manager = self.conductor
+
+ def test_migrate_server_fails_with_rebuild(self):
+ self.assertRaises(NotImplementedError, self.conductor.migrate_server,
+ self.context, None, None, True, True, None, None, None)
+
+ def test_migrate_server_fails_with_flavor(self):
+ self.assertRaises(NotImplementedError, self.conductor.migrate_server,
+ self.context, None, None, True, False, "dummy", None, None)
+
+ def _build_request_spec(self, instance):
+ return {
+ 'instance_properties': {
+ 'uuid': instance['uuid'], },
+ }
+
+ def _test_migrate_server_deals_with_expected_exceptions(self, ex):
+ instance = fake_instance.fake_db_instance(uuid='uuid',
+ vm_state=vm_states.ACTIVE)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ 'compute_task', 'migrate_server',
+ {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'expected_task_state': task_states.MIGRATING},
+ ex, self._build_request_spec(inst_obj),
+ self.conductor_manager.db)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(type(ex),
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
+ instance = fake_instance.fake_db_instance(uuid='uuid',
+ vm_state=vm_states.ACTIVE)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ 'compute_task', 'migrate_server',
+ {'vm_state': vm_states.ACTIVE,
+ 'task_state': None,
+ 'expected_task_state': task_states.MIGRATING},
+ ex, self._build_request_spec(inst_obj),
+ self.conductor_manager.db)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.InvalidCPUInfo,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ @mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
+ @mock.patch.object(live_migrate, 'execute')
+ def test_migrate_server_deals_with_instancenotrunning_exception(self,
+ mock_live_migrate, mock_set_state):
+ inst = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst, [])
+
+ error = exc.InstanceNotRunning(instance_id="fake")
+ mock_live_migrate.side_effect = error
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.InstanceNotRunning,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None,
+ 'block_migration', 'disk_over_commit')
+
+ request_spec = self._build_request_spec(inst_obj)
+ mock_set_state.assert_called_once_with(self.context, 'compute_task',
+ 'migrate_server',
+ dict(vm_state=inst_obj.vm_state,
+ task_state=None,
+ expected_task_state=task_states.MIGRATING),
+ error, request_spec, self.conductor_manager.db)
+
+ def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
+ ex = exc.DestinationHypervisorTooOld()
+ self._test_migrate_server_deals_with_expected_exceptions(ex)
+
+ def test_migrate_server_deals_with_HypervisorUnavailable(self):
+ ex = exc.HypervisorUnavailable(host='dummy')
+ self._test_migrate_server_deals_with_expected_exceptions(ex)
+
+ def test_migrate_server_deals_with_unexpected_exceptions(self):
+ instance = fake_instance.fake_db_instance()
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance, [])
+ self.mox.StubOutWithMock(live_migrate, 'execute')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+
+ ex = IOError()
+ live_migrate.execute(self.context, mox.IsA(objects.Instance),
+ 'destination', 'block_migration',
+ 'disk_over_commit').AndRaise(ex)
+ self.mox.ReplayAll()
+
+ self.conductor = utils.ExceptionHelper(self.conductor)
+
+ self.assertRaises(exc.MigrationError,
+ self.conductor.migrate_server, self.context, inst_obj,
+ {'host': 'destination'}, True, False, None, 'block_migration',
+ 'disk_over_commit')
+
+ def test_set_vm_state_and_notify(self):
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'set_vm_state_and_notify')
+ scheduler_utils.set_vm_state_and_notify(
+ self.context, 'compute_task', 'method', 'updates',
+ 'ex', 'request_spec', self.conductor.db)
+
+ self.mox.ReplayAll()
+
+ self.conductor._set_vm_state_and_notify(
+ self.context, 'method', 'updates', 'ex', 'request_spec')
+
+ def test_cold_migrate_no_valid_host_back_in_active_state(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type=flavor).AndReturn(request_spec)
+
+ exc_info = exc.NoValidHost(reason="")
+
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ filter_props).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.ACTIVE,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate,
+ self.context, inst_obj,
+ flavor, filter_props, [resvs])
+
+ def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type=flavor).AndReturn(request_spec)
+
+ exc_info = exc.NoValidHost(reason="")
+
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ filter_props).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.STOPPED,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor, filter_props, [resvs])
+
+ def test_cold_migrate_no_valid_host_error_msg(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ with contextlib.nested(
+ mock.patch.object(compute_utils, 'get_image_metadata',
+ return_value=image),
+ mock.patch.object(scheduler_utils, 'build_request_spec',
+ return_value=request_spec),
+ mock.patch.object(self.conductor.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason=""))
+ ) as (image_mock, brs_mock, select_dest_mock):
+ nvh = self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor, filter_props, [resvs])
+ self.assertIn('cold migrate', nvh.message)
+
+ def test_cold_migrate_exception_host_in_error_state_and_raise(self):
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED)
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+ hosts = [dict(host='host1', nodename=None, limits={})]
+
+ self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(self.conductor.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(scheduler_utils,
+ 'populate_filter_properties')
+ self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
+ 'prep_resize')
+ self.mox.StubOutWithMock(self.conductor,
+ '_set_vm_state_and_notify')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+
+ compute_utils.get_image_metadata(
+ self.context, self.conductor_manager.image_api,
+ 'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
+
+ scheduler_utils.build_request_spec(
+ self.context, image, [inst_obj],
+ instance_type='flavor').AndReturn(request_spec)
+
+ expected_filter_props = {'retry': {'num_attempts': 1,
+ 'hosts': []},
+ 'context': None}
+ self.conductor.scheduler_client.select_destinations(
+ self.context, request_spec,
+ expected_filter_props).AndReturn(hosts)
+
+ scheduler_utils.populate_filter_properties(filter_props,
+ hosts[0])
+ exc_info = test.TestingException('something happened')
+
+ expected_filter_props = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+
+ self.conductor.compute_rpcapi.prep_resize(
+ self.context, image, inst_obj,
+ 'flavor', hosts[0]['host'], [resvs],
+ request_spec=request_spec,
+ filter_properties=expected_filter_props,
+ node=hosts[0]['nodename']).AndRaise(exc_info)
+
+ updates = {'vm_state': vm_states.STOPPED,
+ 'task_state': None}
+
+ self.conductor._set_vm_state_and_notify(self.context,
+ 'migrate_server',
+ updates, exc_info,
+ request_spec)
+ # NOTE(mriedem): Validate that the quota rollback is using
+ # the correct project_id and user_id.
+ project_id, user_id = quotas_obj.ids_from_instance(self.context,
+ inst_obj)
+ quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
+ user_id=user_id)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException,
+ self.conductor._cold_migrate,
+ self.context, inst_obj, 'flavor',
+ filter_props, [resvs])
+
+ def test_resize_no_valid_host_error_msg(self):
+ flavor = flavors.get_flavor_by_name('m1.tiny')
+ flavor_new = flavors.get_flavor_by_name('m1.small')
+ inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
+ vm_state=vm_states.STOPPED,
+ instance_type_id=flavor['id'])
+ inst_obj = objects.Instance._from_db_object(
+ self.context, objects.Instance(), inst,
+ expected_attrs=[])
+ request_spec = dict(instance_type=dict(extra_specs=dict()),
+ instance_properties=dict())
+ filter_props = dict(context=None)
+ resvs = 'fake-resvs'
+ image = 'fake-image'
+
+ with contextlib.nested(
+ mock.patch.object(compute_utils, 'get_image_metadata',
+ return_value=image),
+ mock.patch.object(scheduler_utils, 'build_request_spec',
+ return_value=request_spec),
+ mock.patch.object(self.conductor.scheduler_client,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason=""))
+ ) as (image_mock, brs_mock, select_dest_mock):
+ nvh = self.assertRaises(exc.NoValidHost,
+ self.conductor._cold_migrate, self.context,
+ inst_obj, flavor_new, filter_props,
+ [resvs])
+ self.assertIn('resize', nvh.message)
+
+ def test_build_instances_instance_not_found(self):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ self.mox.StubOutWithMock(instances[0], 'refresh')
+ self.mox.StubOutWithMock(instances[1], 'refresh')
+ image = {'fake-data': 'should_pass_silently'}
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
+ self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
+ self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
+ self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
+ 'select_destinations')
+ self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+
+ scheduler_utils.build_request_spec(self.context, image,
+ mox.IgnoreArg()).AndReturn(spec)
+ scheduler_utils.setup_instance_group(self.context, None, None)
+ self.conductor_manager.scheduler_client.select_destinations(
+ self.context, spec,
+ {'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
+ [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}])
+ instances[0].refresh().AndRaise(
+ exc.InstanceNotFound(instance_id=instances[0].uuid))
+ instances[1].refresh()
+ self.conductor_manager.compute_rpcapi.build_and_run_instance(
+ self.context, instance=instances[1], host='host2',
+ image={'fake-data': 'should_pass_silently'}, request_spec=spec,
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2',
+ 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
+ node='node2', limits=[])
+ self.mox.ReplayAll()
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ @mock.patch.object(scheduler_utils, 'setup_instance_group')
+ @mock.patch.object(scheduler_utils, 'build_request_spec')
+ def test_build_instances_info_cache_not_found(self, build_request_spec,
+ setup_instance_group):
+ instances = [fake_instance.fake_instance_obj(self.context)
+ for i in xrange(2)]
+ image = {'fake-data': 'should_pass_silently'}
+ destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
+ {'host': 'host2', 'nodename': 'node2', 'limits': []}]
+ spec = {'fake': 'specs',
+ 'instance_properties': instances[0]}
+ build_request_spec.return_value = spec
+ with contextlib.nested(
+ mock.patch.object(instances[0], 'refresh',
+ side_effect=exc.InstanceInfoCacheNotFound(
+ instance_uuid=instances[0].uuid)),
+ mock.patch.object(instances[1], 'refresh'),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations', return_value=destinations),
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'build_and_run_instance')
+ ) as (inst1_refresh, inst2_refresh, select_destinations,
+ build_and_run_instance):
+
+ # build_instances() is a cast, we need to wait for it to complete
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ self.conductor.build_instances(self.context,
+ instances=instances,
+ image=image,
+ filter_properties={},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping='block_device_mapping',
+ legacy_bdm=False)
+
+ setup_instance_group.assert_called_once_with(
+ self.context, None, None)
+ build_and_run_instance.assert_called_once_with(self.context,
+ instance=instances[1], host='host2', image={'fake-data':
+ 'should_pass_silently'}, request_spec=spec,
+ filter_properties={'limits': [],
+ 'retry': {'num_attempts': 1,
+ 'hosts': [['host2',
+ 'node2']]}},
+ admin_password='admin_password',
+ injected_files='injected_files',
+ requested_networks=None,
+ security_groups='security_groups',
+ block_device_mapping=mock.ANY,
+ node='node2', limits=[])
+
+
+class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
+ test_compute.BaseTestCase):
+ """Conductor compute_task RPC namespace Tests."""
+ def setUp(self):
+ super(ConductorTaskRPCAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_rpcapi.ComputeTaskAPI()
+ service_manager = self.conductor_service.manager
+ self.conductor_manager = service_manager.compute_task_mgr
+
+
+class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
+ """Compute task API Tests."""
+ def setUp(self):
+ super(ConductorTaskAPITestCase, self).setUp()
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.conductor = conductor_api.ComputeTaskAPI()
+ service_manager = self.conductor_service.manager
+ self.conductor_manager = service_manager.compute_task_mgr
+
+
+class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
+ """Conductor LocalComputeTaskAPI Tests."""
+ def setUp(self):
+ super(ConductorLocalComputeTaskAPITestCase, self).setUp()
+ self.conductor = conductor_api.LocalComputeTaskAPI()
+ self.conductor_manager = self.conductor._manager._target
diff --git a/nova/tests/unit/conf_fixture.py b/nova/tests/unit/conf_fixture.py
new file mode 100644
index 0000000000..336ba61daf
--- /dev/null
+++ b/nova/tests/unit/conf_fixture.py
@@ -0,0 +1,64 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+
+from nova import config
+from nova import ipv6
+from nova.openstack.common.fixture import config as config_fixture
+from nova import paths
+from nova.tests.unit import utils
+
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
+CONF.import_opt('fake_network', 'nova.network.linux_net')
+CONF.import_opt('network_size', 'nova.network.manager')
+CONF.import_opt('num_networks', 'nova.network.manager')
+CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
+CONF.import_opt('policy_file', 'nova.openstack.common.policy')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('api_paste_config', 'nova.wsgi')
+
+
+class ConfFixture(config_fixture.Config):
+ """Fixture to manage global conf settings."""
+ def setUp(self):
+ super(ConfFixture, self).setUp()
+ self.conf.set_default('api_paste_config',
+ paths.state_path_def('etc/nova/api-paste.ini'))
+ self.conf.set_default('host', 'fake-mini')
+ self.conf.set_default('compute_driver',
+ 'nova.virt.fake.SmallFakeDriver')
+ self.conf.set_default('fake_network', True)
+ self.conf.set_default('flat_network_bridge', 'br100')
+ self.conf.set_default('floating_ip_dns_manager',
+ 'nova.tests.unit.utils.dns_manager')
+ self.conf.set_default('instance_dns_manager',
+ 'nova.tests.unit.utils.dns_manager')
+ self.conf.set_default('network_size', 8)
+ self.conf.set_default('num_networks', 2)
+ self.conf.set_default('use_ipv6', True)
+ self.conf.set_default('vlan_interface', 'eth0')
+ self.conf.set_default('auth_strategy', 'noauth')
+ config.parse_args([], default_config_files=[])
+ self.conf.set_default('connection', "sqlite://", group='database')
+ self.conf.set_default('sqlite_synchronous', False, group='database')
+ self.addCleanup(utils.cleanup_dns_managers)
+ self.addCleanup(ipv6.api.reset_backend)
diff --git a/nova/tests/console/__init__.py b/nova/tests/unit/console/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/console/__init__.py
+++ b/nova/tests/unit/console/__init__.py
diff --git a/nova/tests/console/test_console.py b/nova/tests/unit/console/test_console.py
index ba09272978..ba09272978 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/unit/console/test_console.py
diff --git a/nova/tests/console/test_rpcapi.py b/nova/tests/unit/console/test_rpcapi.py
index 690c4bb103..690c4bb103 100644
--- a/nova/tests/console/test_rpcapi.py
+++ b/nova/tests/unit/console/test_rpcapi.py
diff --git a/nova/tests/console/test_serial.py b/nova/tests/unit/console/test_serial.py
index ebdc52dafa..ebdc52dafa 100644
--- a/nova/tests/console/test_serial.py
+++ b/nova/tests/unit/console/test_serial.py
diff --git a/nova/tests/console/test_type.py b/nova/tests/unit/console/test_type.py
index d9a82d7658..d9a82d7658 100644
--- a/nova/tests/console/test_type.py
+++ b/nova/tests/unit/console/test_type.py
diff --git a/nova/tests/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index c0526a2cf1..c0526a2cf1 100644
--- a/nova/tests/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
diff --git a/nova/tests/consoleauth/__init__.py b/nova/tests/unit/consoleauth/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/consoleauth/__init__.py
+++ b/nova/tests/unit/consoleauth/__init__.py
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/unit/consoleauth/test_consoleauth.py
index 571d54fd92..571d54fd92 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/unit/consoleauth/test_consoleauth.py
diff --git a/nova/tests/consoleauth/test_rpcapi.py b/nova/tests/unit/consoleauth/test_rpcapi.py
index eb76acbf34..eb76acbf34 100644
--- a/nova/tests/consoleauth/test_rpcapi.py
+++ b/nova/tests/unit/consoleauth/test_rpcapi.py
diff --git a/nova/tests/db/__init__.py b/nova/tests/unit/db/__init__.py
index fdf33be941..fdf33be941 100644
--- a/nova/tests/db/__init__.py
+++ b/nova/tests/unit/db/__init__.py
diff --git a/nova/tests/db/fakes.py b/nova/tests/unit/db/fakes.py
index 250c664d1e..250c664d1e 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/unit/db/fakes.py
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
new file mode 100644
index 0000000000..f103dd49ce
--- /dev/null
+++ b/nova/tests/unit/db/test_db_api.py
@@ -0,0 +1,7517 @@
+# encoding=UTF8
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for the DB API."""
+
+import copy
+import datetime
+import types
+import uuid as stdlib_uuid
+
+import iso8601
+import mock
+import netaddr
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+from oslo.db.sqlalchemy import test_base
+from oslo.db.sqlalchemy import utils as sqlalchemyutils
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+from sqlalchemy import Column
+from sqlalchemy.dialects import sqlite
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy.orm import query
+from sqlalchemy import sql
+from sqlalchemy import Table
+
+from nova import block_device
+from nova.compute import arch
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api as sqlalchemy_api
+from nova.db.sqlalchemy import models
+from nova.db.sqlalchemy import types as col_types
+from nova.db.sqlalchemy import utils as db_utils
+from nova import exception
+from nova.openstack.common import uuidutils
+from nova import quota
+from nova import test
+from nova.tests.unit import matchers
+from nova import utils
+
+CONF = cfg.CONF
+CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
+CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
+
+get_engine = sqlalchemy_api.get_engine
+get_session = sqlalchemy_api.get_session
+
+
+def _reservation_get(context, uuid):
+ result = sqlalchemy_api.model_query(context, models.Reservation,
+ read_deleted="no").filter_by(uuid=uuid).first()
+
+ if not result:
+ raise exception.ReservationNotFound(uuid=uuid)
+
+ return result
+
+
+def _quota_reserve(context, project_id, user_id):
+ """Create sample Quota, QuotaUsage and Reservation objects.
+
+ There is no method db.quota_usage_create(), so we have to use
+ db.quota_reserve() for creating QuotaUsage objects.
+
+ Returns reservations uuids.
+
+ """
+ def get_sync(resource, usage):
+ def sync(elevated, project_id, user_id, session):
+ return {resource: usage}
+ return sync
+ quotas = {}
+ user_quotas = {}
+ resources = {}
+ deltas = {}
+ for i in range(3):
+ resource = 'resource%d' % i
+ if i == 2:
+ # test for project level resources
+ resource = 'fixed_ips'
+ quotas[resource] = db.quota_create(context,
+ project_id, resource, i)
+ user_quotas[resource] = quotas[resource]
+ else:
+ quotas[resource] = db.quota_create(context,
+ project_id, resource, i)
+ user_quotas[resource] = db.quota_create(context, project_id,
+ resource, i,
+ user_id=user_id)
+ sync_name = '_sync_%s' % resource
+ resources[resource] = quota.ReservableResource(
+ resource, sync_name, 'quota_res_%d' % i)
+ deltas[resource] = i
+ setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
+ sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
+ sqlalchemy_api, sync_name)
+ return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
+ timeutils.utcnow(), CONF.until_refresh,
+ datetime.timedelta(days=1), project_id, user_id)
+
+
+class DbTestCase(test.TestCase):
+ def setUp(self):
+ super(DbTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def create_instance_with_args(self, **kwargs):
+ args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
+ 'node': 'node1', 'project_id': self.project_id,
+ 'vm_state': 'fake'}
+ if 'context' in kwargs:
+ ctxt = kwargs.pop('context')
+ args['project_id'] = ctxt.project_id
+ else:
+ ctxt = self.context
+ args.update(kwargs)
+ return db.instance_create(ctxt, args)
+
+ def fake_metadata(self, content):
+ meta = {}
+ for i in range(0, 10):
+ meta["foo%i" % i] = "this is %s item %i" % (content, i)
+ return meta
+
+ def create_metadata_for_instance(self, instance_uuid):
+ meta = self.fake_metadata('metadata')
+ db.instance_metadata_update(self.context, instance_uuid, meta, False)
+ sys_meta = self.fake_metadata('system_metadata')
+ db.instance_system_metadata_update(self.context, instance_uuid,
+ sys_meta, False)
+ return meta, sys_meta
+
+
+class DecoratorTestCase(test.TestCase):
+ def _test_decorator_wraps_helper(self, decorator):
+ def test_func():
+ """Test docstring."""
+
+ decorated_func = decorator(test_func)
+
+ self.assertEqual(test_func.func_name, decorated_func.func_name)
+ self.assertEqual(test_func.__doc__, decorated_func.__doc__)
+ self.assertEqual(test_func.__module__, decorated_func.__module__)
+
+ def test_require_context_decorator_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
+
+ def test_require_admin_context_decorator_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
+
+ def test_require_deadlock_retry_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api._retry_on_deadlock)
+
+
+def _get_fake_aggr_values():
+ return {'name': 'fake_aggregate'}
+
+
+def _get_fake_aggr_metadata():
+ return {'fake_key1': 'fake_value1',
+ 'fake_key2': 'fake_value2',
+ 'availability_zone': 'fake_avail_zone'}
+
+
+def _get_fake_aggr_hosts():
+ return ['foo.openstack.org']
+
+
+def _create_aggregate(context=context.get_admin_context(),
+ values=_get_fake_aggr_values(),
+ metadata=_get_fake_aggr_metadata()):
+ return db.aggregate_create(context, values, metadata)
+
+
+def _create_aggregate_with_hosts(context=context.get_admin_context(),
+ values=_get_fake_aggr_values(),
+ metadata=_get_fake_aggr_metadata(),
+ hosts=_get_fake_aggr_hosts()):
+ result = _create_aggregate(context=context,
+ values=values, metadata=metadata)
+ for host in hosts:
+ db.aggregate_host_add(context, result['id'], host)
+ return result
+
+
+class NotDbApiTestCase(DbTestCase):
+ def setUp(self):
+ super(NotDbApiTestCase, self).setUp()
+ self.flags(connection='notdb://', group='database')
+
+ def test_instance_get_all_by_filters_regex_unsupported_db(self):
+ # Ensure that the 'LIKE' operator is used for unsupported dbs.
+ self.create_instance_with_args(display_name='test1')
+ self.create_instance_with_args(display_name='test2')
+ self.create_instance_with_args(display_name='diff')
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': 'test'})
+ self.assertEqual(2, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': 'di'})
+ self.assertEqual(1, len(result))
+
+ def test_instance_get_all_by_filters_paginate(self):
+ test1 = self.create_instance_with_args(display_name='test1')
+ test2 = self.create_instance_with_args(display_name='test2')
+ test3 = self.create_instance_with_args(display_name='test3')
+
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ marker=None)
+ self.assertEqual(3, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test1['uuid'])
+ self.assertEqual(2, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test2['uuid'])
+ self.assertEqual(1, len(result))
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': '%test%'},
+ sort_dir="asc",
+ marker=test3['uuid'])
+ self.assertEqual(0, len(result))
+
+ self.assertRaises(exception.MarkerNotFound,
+ db.instance_get_all_by_filters,
+ self.context, {'display_name': '%test%'},
+ marker=str(stdlib_uuid.uuid4()))
+
+ def test_convert_objects_related_datetimes(self):
+
+ t1 = timeutils.utcnow()
+ t2 = t1 + datetime.timedelta(seconds=10)
+ t3 = t2 + datetime.timedelta(hours=1)
+
+ t2_utc = t2.replace(tzinfo=iso8601.iso8601.Utc())
+ t3_utc = t3.replace(tzinfo=iso8601.iso8601.Utc())
+
+ datetime_keys = ('created_at', 'deleted_at')
+
+ test1 = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ sqlalchemy_api.convert_objects_related_datetimes(test1, *datetime_keys)
+ self.assertEqual(test1, expected_dict)
+
+ test2 = {'created_at': t1, 'deleted_at': t2_utc, 'updated_at': t3}
+ expected_dict = {'created_at': t1, 'deleted_at': t2, 'updated_at': t3}
+ sqlalchemy_api.convert_objects_related_datetimes(test2, *datetime_keys)
+ self.assertEqual(test2, expected_dict)
+
+ test3 = {'deleted_at': t2_utc, 'updated_at': t3_utc}
+ expected_dict = {'deleted_at': t2, 'updated_at': t3_utc}
+ sqlalchemy_api.convert_objects_related_datetimes(test3, *datetime_keys)
+ self.assertEqual(test3, expected_dict)
+
+
+class AggregateDBApiTestCase(test.TestCase):
+ def setUp(self):
+ super(AggregateDBApiTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def test_aggregate_create_no_metadata(self):
+ result = _create_aggregate(metadata=None)
+ self.assertEqual(result['name'], 'fake_aggregate')
+
+ def test_aggregate_create_avoid_name_conflict(self):
+ r1 = _create_aggregate(metadata=None)
+ db.aggregate_delete(context.get_admin_context(), r1['id'])
+ values = {'name': r1['name']}
+ metadata = {'availability_zone': 'new_zone'}
+ r2 = _create_aggregate(values=values, metadata=metadata)
+ self.assertEqual(r2['name'], values['name'])
+ self.assertEqual(r2['availability_zone'],
+ metadata['availability_zone'])
+
+ def test_aggregate_create_raise_exist_exc(self):
+ _create_aggregate(metadata=None)
+ self.assertRaises(exception.AggregateNameExists,
+ _create_aggregate, metadata=None)
+
+ def test_aggregate_get_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_get,
+ ctxt, aggregate_id)
+
+ def test_aggregate_metadata_get_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_metadata_get,
+ ctxt, aggregate_id)
+
+ def test_aggregate_create_with_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
+
+ def test_aggregate_create_delete_create_with_metadata(self):
+ # test for bug 1052479
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(expected_metadata,
+ matchers.DictMatches(_get_fake_aggr_metadata()))
+ db.aggregate_delete(ctxt, result['id'])
+ result = _create_aggregate(metadata={'availability_zone':
+ 'fake_avail_zone'})
+ expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertEqual(expected_metadata, {'availability_zone':
+ 'fake_avail_zone'})
+
+ def test_aggregate_get(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt)
+ expected = db.aggregate_get(ctxt, result['id'])
+ self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
+ self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
+
+ def test_aggregate_get_by_host(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
+ values5 = {'name': 'fake_aggregate5'}
+ a1 = _create_aggregate_with_hosts(context=ctxt)
+ a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
+ # a3 has no hosts and should not be in the results.
+ _create_aggregate(context=ctxt, values=values3)
+ # a4 has no matching hosts.
+ _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'])
+ # a5 has no matching hosts after deleting the only matching host.
+ a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
+ hosts=['foo5.openstack.org', 'foo.openstack.org'])
+ db.aggregate_host_delete(ctxt, a5['id'],
+ 'foo.openstack.org')
+ r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
+ self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
+
+ def test_aggregate_get_by_host_with_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate2'}
+ values3 = {'name': 'fake_aggregate3'}
+ values4 = {'name': 'fake_aggregate4'}
+ a1 = _create_aggregate_with_hosts(context=ctxt,
+ metadata={'goodkey': 'good'})
+ _create_aggregate_with_hosts(context=ctxt, values=values2)
+ _create_aggregate(context=ctxt, values=values3)
+ _create_aggregate_with_hosts(context=ctxt, values=values4,
+ hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
+ # filter result by key
+ r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
+ self.assertEqual([a1['id']], [x['id'] for x in r1])
+
+ def test_aggregate_metadata_get_by_host(self):
+ ctxt = context.get_admin_context()
+ values = {'name': 'fake_aggregate2'}
+ values2 = {'name': 'fake_aggregate3'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
+ r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
+ self.assertEqual(r1['fake_key1'], set(['fake_value1']))
+ self.assertNotIn('badkey', r1)
+
+ def test_aggregate_metadata_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values = {'aggregate_id': 'fake_id',
+ 'name': 'fake_aggregate'}
+ aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
+ hosts=['bar.openstack.org'],
+ metadata={'availability_zone':
+ 'az1'})
+ r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
+ 'availability_zone')
+ self.assertEqual(r1['availability_zone'], set(['az1']))
+ self.assertIn('availability_zone', r1)
+ self.assertNotIn('name', r1)
+
+ def test_aggregate_metadata_get_by_host_with_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
+ r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
+ key='good')
+ self.assertEqual(r1['good'], set(['value12', 'value23']))
+ self.assertNotIn('fake_key1', r1)
+ self.assertNotIn('bad', r1)
+ # Delete metadata
+ db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
+ r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo3.openstack.org',
+ key='good')
+ self.assertNotIn('good', r2)
+
+ def test_aggregate_host_get_by_metadata_key(self):
+ ctxt = context.get_admin_context()
+ values2 = {'name': 'fake_aggregate12'}
+ values3 = {'name': 'fake_aggregate23'}
+ a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
+ a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
+ a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
+ a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
+ _create_aggregate_with_hosts(context=ctxt)
+ _create_aggregate_with_hosts(context=ctxt, values=values2,
+ hosts=a2_hosts, metadata=a2_metadata)
+ _create_aggregate_with_hosts(context=ctxt, values=values3,
+ hosts=a3_hosts, metadata=a3_metadata)
+ r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
+ self.assertEqual({
+ 'foo1.openstack.org': set(['value12']),
+ 'foo2.openstack.org': set(['value12', 'value23']),
+ 'foo3.openstack.org': set(['value23']),
+ }, r1)
+ self.assertNotIn('fake_key1', r1)
+
+ def test_aggregate_get_by_host_not_found(self):
+ ctxt = context.get_admin_context()
+ _create_aggregate_with_hosts(context=ctxt)
+ self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
+
+ def test_aggregate_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_delete,
+ ctxt, aggregate_id)
+
+ def test_aggregate_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ db.aggregate_delete(ctxt, result['id'])
+ expected = db.aggregate_get_all(ctxt)
+ self.assertEqual(0, len(expected))
+ aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
+ result['id'])
+ self.assertEqual(aggregate['deleted'], result['id'])
+
+ def test_aggregate_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ self.assertEqual(result['availability_zone'], 'fake_avail_zone')
+ new_values = _get_fake_aggr_values()
+ new_values['availability_zone'] = 'different_avail_zone'
+ updated = db.aggregate_update(ctxt, result['id'], new_values)
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
+
+ def test_aggregate_update_with_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ values = _get_fake_aggr_values()
+ values['metadata'] = _get_fake_aggr_metadata()
+ values['availability_zone'] = 'different_avail_zone'
+ db.aggregate_update(ctxt, result['id'], values)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ updated = db.aggregate_get(ctxt, result['id'])
+ self.assertThat(values['metadata'],
+ matchers.DictMatches(expected))
+ self.assertNotEqual(result['availability_zone'],
+ updated['availability_zone'])
+
+ def test_aggregate_update_with_existing_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ values = _get_fake_aggr_values()
+ values['metadata'] = _get_fake_aggr_metadata()
+ values['metadata']['fake_key1'] = 'foo'
+ db.aggregate_update(ctxt, result['id'], values)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(values['metadata'], matchers.DictMatches(expected))
+
+ def test_aggregate_update_zone_with_existing_metadata(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ new_zone = {'availability_zone': 'fake_avail_zone_2'}
+ metadata = _get_fake_aggr_metadata()
+ metadata.update(new_zone)
+ db.aggregate_update(ctxt, result['id'], new_zone)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_update_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ new_values = _get_fake_aggr_values()
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_update, ctxt, aggregate_id, new_values)
+
+ def test_aggregate_update_raise_name_exist(self):
+ ctxt = context.get_admin_context()
+ _create_aggregate(context=ctxt, values={'name': 'test1'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ _create_aggregate(context=ctxt, values={'name': 'test2'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ aggregate_id = 1
+ new_values = {'name': 'test2'}
+ self.assertRaises(exception.AggregateNameExists,
+ db.aggregate_update, ctxt, aggregate_id, new_values)
+
+ def test_aggregate_get_all(self):
+ ctxt = context.get_admin_context()
+ counter = 3
+ for c in range(counter):
+ _create_aggregate(context=ctxt,
+ values={'name': 'fake_aggregate_%d' % c},
+ metadata=None)
+ results = db.aggregate_get_all(ctxt)
+ self.assertEqual(len(results), counter)
+
+ def test_aggregate_get_all_non_deleted(self):
+ ctxt = context.get_admin_context()
+ add_counter = 5
+ remove_counter = 2
+ aggregates = []
+ for c in range(1, add_counter):
+ values = {'name': 'fake_aggregate_%d' % c}
+ aggregates.append(_create_aggregate(context=ctxt,
+ values=values, metadata=None))
+ for c in range(1, remove_counter):
+ db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
+ results = db.aggregate_get_all(ctxt)
+ self.assertEqual(len(results), add_counter - remove_counter)
+
+ def test_aggregate_metadata_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ metadata = _get_fake_aggr_metadata()
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_add_and_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ metadata = _get_fake_aggr_metadata()
+ key = metadata.keys()[0]
+ new_metadata = {key: 'foo',
+ 'fake_new_key': 'fake_new_value'}
+ metadata.update(new_metadata)
+ db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_add_retry(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+
+ def counted():
+ def get_query(context, id, session, read_deleted):
+ get_query.counter += 1
+ raise db_exc.DBDuplicateEntry
+ get_query.counter = 0
+ return get_query
+
+ get_query = counted()
+ self.stubs.Set(sqlalchemy_api,
+ '_aggregate_metadata_get_query', get_query)
+ self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
+ aggregate_metadata_add, ctxt, result['id'], {},
+ max_retries=5)
+ self.assertEqual(get_query.counter, 5)
+
+ def test_aggregate_metadata_update(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ metadata = _get_fake_aggr_metadata()
+ key = metadata.keys()[0]
+ db.aggregate_metadata_delete(ctxt, result['id'], key)
+ new_metadata = {key: 'foo'}
+ db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ metadata[key] = 'foo'
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata=None)
+ metadata = _get_fake_aggr_metadata()
+ db.aggregate_metadata_add(ctxt, result['id'], metadata)
+ db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ del metadata[metadata.keys()[0]]
+ self.assertThat(metadata, matchers.DictMatches(expected))
+
+ def test_aggregate_remove_availability_zone(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt, metadata={'availability_zone':
+ 'fake_avail_zone'})
+ db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
+ expected = db.aggregate_metadata_get(ctxt, result['id'])
+ aggregate = db.aggregate_get(ctxt, result['id'])
+ self.assertIsNone(aggregate['availability_zone'])
+ self.assertThat({}, matchers.DictMatches(expected))
+
+ def test_aggregate_metadata_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ self.assertRaises(exception.AggregateMetadataNotFound,
+ db.aggregate_metadata_delete,
+ ctxt, result['id'], 'foo_key')
+
+ def test_aggregate_host_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(_get_fake_aggr_hosts(), expected)
+
+ def test_aggregate_host_re_add(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ host = _get_fake_aggr_hosts()[0]
+ db.aggregate_host_delete(ctxt, result['id'], host)
+ db.aggregate_host_add(ctxt, result['id'], host)
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(len(expected), 1)
+
+ def test_aggregate_host_add_duplicate_works(self):
+ ctxt = context.get_admin_context()
+ r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ r2 = _create_aggregate_with_hosts(ctxt,
+ values={'name': 'fake_aggregate2'},
+ metadata={'availability_zone': 'fake_avail_zone2'})
+ h1 = db.aggregate_host_get_all(ctxt, r1['id'])
+ h2 = db.aggregate_host_get_all(ctxt, r2['id'])
+ self.assertEqual(h1, h2)
+
+ def test_aggregate_host_add_duplicate_raise_exist_exc(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ self.assertRaises(exception.AggregateHostExists,
+ db.aggregate_host_add,
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
+
+ def test_aggregate_host_add_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ # this does not exist!
+ aggregate_id = 1
+ host = _get_fake_aggr_hosts()[0]
+ self.assertRaises(exception.AggregateNotFound,
+ db.aggregate_host_add,
+ ctxt, aggregate_id, host)
+
+ def test_aggregate_host_delete(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
+ db.aggregate_host_delete(ctxt, result['id'],
+ _get_fake_aggr_hosts()[0])
+ expected = db.aggregate_host_get_all(ctxt, result['id'])
+ self.assertEqual(0, len(expected))
+
+ def test_aggregate_host_delete_raise_not_found(self):
+ ctxt = context.get_admin_context()
+ result = _create_aggregate(context=ctxt)
+ self.assertRaises(exception.AggregateHostNotFound,
+ db.aggregate_host_delete,
+ ctxt, result['id'], _get_fake_aggr_hosts()[0])
+
+
+class SqlAlchemyDbApiTestCase(DbTestCase):
+ def test_instance_get_all_by_host(self):
+ ctxt = context.get_admin_context()
+
+ self.create_instance_with_args()
+ self.create_instance_with_args()
+ self.create_instance_with_args(host='host2')
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ self.assertEqual(2, len(result))
+
+ def test_instance_get_all_uuids_by_host(self):
+ ctxt = context.get_admin_context()
+ self.create_instance_with_args()
+ self.create_instance_with_args()
+ self.create_instance_with_args(host='host2')
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ self.assertEqual(2, len(result))
+ self.assertEqual(types.UnicodeType, type(result[0]))
+
+ def test_instance_get_active_by_window_joined(self):
+ now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
+ start_time = now - datetime.timedelta(minutes=10)
+ now1 = now + datetime.timedelta(minutes=1)
+ now2 = now + datetime.timedelta(minutes=2)
+ now3 = now + datetime.timedelta(minutes=3)
+ ctxt = context.get_admin_context()
+ # used for testing columns_to_join
+ network_info = jsonutils.dumps({'ckey': 'cvalue'})
+ sample_data = {
+ 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
+ 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
+ 'info_cache': {'network_info': network_info},
+ }
+ self.create_instance_with_args(launched_at=now, **sample_data)
+ self.create_instance_with_args(launched_at=now1, terminated_at=now2,
+ **sample_data)
+ self.create_instance_with_args(launched_at=now2, terminated_at=now3,
+ **sample_data)
+ self.create_instance_with_args(launched_at=now3, terminated_at=None,
+ **sample_data)
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now)
+ self.assertEqual(4, len(result))
+ # verify that all default columns are joined
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual(sample_data['metadata'], meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual(sample_data['system_metadata'], sys_meta)
+ self.assertIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now3, columns_to_join=['info_cache'])
+ self.assertEqual(2, len(result))
+ # verify that only info_cache is loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual({}, meta)
+ self.assertIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=start_time, end=now)
+ self.assertEqual(0, len(result))
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=start_time, end=now2,
+ columns_to_join=['system_metadata'])
+ self.assertEqual(2, len(result))
+ # verify that only system_metadata is loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual({}, meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual(sample_data['system_metadata'], sys_meta)
+ self.assertNotIn('info_cache', result[0])
+
+ result = sqlalchemy_api.instance_get_active_by_window_joined(
+ ctxt, begin=now2, end=now3,
+ columns_to_join=['metadata', 'info_cache'])
+ self.assertEqual(2, len(result))
+ # verify that only metadata and info_cache are loaded
+ meta = utils.metadata_to_dict(result[0]['metadata'])
+ self.assertEqual(sample_data['metadata'], meta)
+ sys_meta = utils.metadata_to_dict(result[0]['system_metadata'])
+ self.assertEqual({}, sys_meta)
+ self.assertIn('info_cache', result[0])
+ self.assertEqual(network_info, result[0]['info_cache']['network_info'])
+
+
+class ProcessSortParamTestCase(test.TestCase):
+
+ def test_process_sort_params_defaults(self):
+ '''Verifies default sort parameters.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params([], [])
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['asc', 'asc'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(None, None)
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_override_default_keys(self):
+ '''Verifies that the default keys can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=['key1', 'key2', 'key3'])
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_override_default_dir(self):
+ '''Verifies that the default direction can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_dir='dir1')
+ self.assertEqual(['created_at', 'id'], sort_keys)
+ self.assertEqual(['dir1', 'dir1'], sort_dirs)
+
+ def test_process_sort_params_override_default_key_and_dir(self):
+ '''Verifies that the default key and dir can be overridden.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=['key1', 'key2', 'key3'],
+ default_dir='dir1')
+ self.assertEqual(['key1', 'key2', 'key3'], sort_keys)
+ self.assertEqual(['dir1', 'dir1', 'dir1'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ [], [], default_keys=[], default_dir='dir1')
+ self.assertEqual([], sort_keys)
+ self.assertEqual([], sort_dirs)
+
+ def test_process_sort_params_non_default(self):
+ '''Verifies that non-default keys are added correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['key1', 'key2'], ['asc', 'desc'])
+ self.assertEqual(['key1', 'key2', 'created_at', 'id'], sort_keys)
+ # First sort_dir in list is used when adding the default keys
+ self.assertEqual(['asc', 'desc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_default(self):
+ '''Verifies that default keys are added correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], ['asc', 'desc'])
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['asc', 'desc', 'asc'], sort_dirs)
+
+ # Include default key value, rely on default direction
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], [])
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['asc', 'asc', 'asc'], sort_dirs)
+
+ def test_process_sort_params_default_dir(self):
+ '''Verifies that the default dir is applied to all keys.'''
+ # Direction is set, ignore default dir
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], ['desc'], default_dir='dir')
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'desc', 'desc'], sort_dirs)
+
+ # But should be used if no direction is set
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2'], [], default_dir='dir')
+ self.assertEqual(['id', 'key2', 'created_at'], sort_keys)
+ self.assertEqual(['dir', 'dir', 'dir'], sort_dirs)
+
+ def test_process_sort_params_unequal_length(self):
+ '''Verifies that a sort direction list is applied correctly.'''
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'desc', 'desc', 'desc'], sort_dirs)
+
+ # Default direction is the first key in the list
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc', 'asc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'asc', 'desc', 'desc'], sort_dirs)
+
+ sort_keys, sort_dirs = sqlalchemy_api.process_sort_params(
+ ['id', 'key2', 'key3'], ['desc', 'asc', 'asc'])
+ self.assertEqual(['id', 'key2', 'key3', 'created_at'], sort_keys)
+ self.assertEqual(['desc', 'asc', 'asc', 'desc'], sort_dirs)
+
+ def test_process_sort_params_extra_dirs_lengths(self):
+ '''InvalidInput raised if more directions are given.'''
+ self.assertRaises(exception.InvalidInput,
+ sqlalchemy_api.process_sort_params,
+ ['key1', 'key2'],
+ ['asc', 'desc', 'desc'])
+
+
+class MigrationTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MigrationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self._create()
+ self._create()
+ self._create(status='reverted')
+ self._create(status='confirmed')
+ self._create(status='error')
+ self._create(source_compute='host2', source_node='b',
+ dest_compute='host1', dest_node='a')
+ self._create(source_compute='host2', dest_compute='host3')
+ self._create(source_compute='host3', dest_compute='host4')
+
+ def _create(self, status='migrating', source_compute='host1',
+ source_node='a', dest_compute='host2', dest_node='b',
+ system_metadata=None):
+
+ values = {'host': source_compute}
+ instance = db.instance_create(self.ctxt, values)
+ if system_metadata:
+ db.instance_system_metadata_update(self.ctxt, instance['uuid'],
+ system_metadata, False)
+
+ values = {'status': status, 'source_compute': source_compute,
+ 'source_node': source_node, 'dest_compute': dest_compute,
+ 'dest_node': dest_node, 'instance_uuid': instance['uuid']}
+ db.migration_create(self.ctxt, values)
+
+ def _assert_in_progress(self, migrations):
+ for migration in migrations:
+ self.assertNotEqual('confirmed', migration['status'])
+ self.assertNotEqual('reverted', migration['status'])
+ self.assertNotEqual('error', migration['status'])
+
+ def test_migration_get_in_progress_joins(self):
+ self._create(source_compute='foo', system_metadata={'foo': 'bar'})
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'foo', 'a')
+ system_metadata = migrations[0]['instance']['system_metadata'][0]
+ self.assertEqual(system_metadata['key'], 'foo')
+ self.assertEqual(system_metadata['value'], 'bar')
+
+ def test_in_progress_host1_nodea(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'a')
+ # 2 as source + 1 as dest
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_in_progress_host1_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host1', 'b')
+ # some migrations are to/from host1, but none with a node 'b'
+ self.assertEqual(0, len(migrations))
+
+ def test_in_progress_host2_nodeb(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ # 2 as dest, 1 as source
+ self.assertEqual(3, len(migrations))
+ self._assert_in_progress(migrations)
+
+ def test_instance_join(self):
+ migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
+ 'host2', 'b')
+ for migration in migrations:
+ instance = migration['instance']
+ self.assertEqual(migration['instance_uuid'], instance['uuid'])
+
+ def test_get_migrations_by_filters(self):
+ filters = {"status": "migrating", "host": "host3"}
+ migrations = db.migration_get_all_by_filters(self.ctxt, filters)
+ self.assertEqual(2, len(migrations))
+ for migration in migrations:
+ self.assertEqual(filters["status"], migration['status'])
+ hosts = [migration['source_compute'], migration['dest_compute']]
+ self.assertIn(filters["host"], hosts)
+
+ def test_only_admin_can_get_all_migrations_by_filters(self):
+ user_ctxt = context.RequestContext(user_id=None, project_id=None,
+ is_admin=False, read_deleted="no",
+ overwrite=False)
+
+ self.assertRaises(exception.AdminRequired,
+ db.migration_get_all_by_filters, user_ctxt, {})
+
+ def test_migration_get_unconfirmed_by_dest_compute(self):
+ # Ensure no migrations are returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host')
+ self.assertEqual(0, len(results))
+
+ # Ensure no migrations are returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host2')
+ self.assertEqual(0, len(results))
+
+ updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
+ values = {"status": "finished", "updated_at": updated_at,
+ "dest_compute": "fake_host2"}
+ migration = db.migration_create(self.ctxt, values)
+
+ # Ensure different host is not returned
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host')
+ self.assertEqual(0, len(results))
+
+ # Ensure one migration older than 10 seconds is returned.
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ 'fake_host2')
+ self.assertEqual(1, len(results))
+ db.migration_update(self.ctxt, migration['id'],
+ {"status": "CONFIRMED"})
+
+ # Ensure the new migration is not returned.
+ updated_at = timeutils.utcnow()
+ values = {"status": "finished", "updated_at": updated_at,
+ "dest_compute": "fake_host2"}
+ migration = db.migration_create(self.ctxt, values)
+ results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
+ "fake_host2")
+ self.assertEqual(0, len(results))
+ db.migration_update(self.ctxt, migration['id'],
+ {"status": "CONFIRMED"})
+
+ def test_migration_update_not_found(self):
+ self.assertRaises(exception.MigrationNotFound,
+ db.migration_update, self.ctxt, 42, {})
+
+
+class ModelsObjectComparatorMixin(object):
+ def _dict_from_object(self, obj, ignored_keys):
+ if ignored_keys is None:
+ ignored_keys = []
+ return dict([(k, v) for k, v in obj.iteritems()
+ if k not in ignored_keys])
+
+ def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
+ obj1 = self._dict_from_object(obj1, ignored_keys)
+ obj2 = self._dict_from_object(obj2, ignored_keys)
+
+ self.assertEqual(len(obj1),
+ len(obj2),
+ "Keys mismatch: %s" %
+ str(set(obj1.keys()) ^ set(obj2.keys())))
+ for key, value in obj1.iteritems():
+ self.assertEqual(value, obj2[key])
+
+ def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
+ obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
+ sort_key = lambda d: [d[k] for k in sorted(d)]
+ conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
+
+ self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
+
+ def _assertEqualOrderedListOfObjects(self, objs1, objs2,
+ ignored_keys=None):
+ obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
+ conv = lambda obj: map(obj_to_dict, obj)
+
+ self.assertEqual(conv(objs1), conv(objs2))
+
+ def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
+ self.assertEqual(len(primitives1), len(primitives2))
+ for primitive in primitives1:
+ self.assertIn(primitive, primitives2)
+
+ for primitive in primitives2:
+ self.assertIn(primitive, primitives1)
+
+
+class InstanceSystemMetadataTestCase(test.TestCase):
+
+ """Tests for db.api.instance_system_metadata_* methods."""
+
+ def setUp(self):
+ super(InstanceSystemMetadataTestCase, self).setUp()
+ values = {'host': 'h1', 'project_id': 'p1',
+ 'system_metadata': {'key': 'value'}}
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, values)
+
+ def test_instance_system_metadata_get(self):
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value'})
+
+ def test_instance_system_metadata_update_new_pair(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'new_key': 'new_value'}, False)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
+
+ def test_instance_system_metadata_update_existent_pair(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'key': 'new_value'}, True)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'key': 'new_value'})
+
+ def test_instance_system_metadata_update_delete_true(self):
+ db.instance_system_metadata_update(
+ self.ctxt, self.instance['uuid'],
+ {'new_key': 'new_value'}, True)
+ metadata = db.instance_system_metadata_get(self.ctxt,
+ self.instance['uuid'])
+ self.assertEqual(metadata, {'new_key': 'new_value'})
+
+ @test.testtools.skip("bug 1189462")
+ def test_instance_system_metadata_update_nonexistent(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_system_metadata_update,
+ self.ctxt, 'nonexistent-uuid',
+ {'key': 'value'}, True)
+
+
+class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.reservation_* methods."""
+
+ def setUp(self):
+ super(ReservationTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ self.reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ usage = db.quota_usage_get(self.ctxt, 'project1', 'resource1', 'user1')
+
+ self.values = {'uuid': 'sample-uuid',
+ 'project_id': 'project1',
+ 'user_id': 'user1',
+ 'resource': 'resource1',
+ 'delta': 42,
+ 'expire': timeutils.utcnow() + datetime.timedelta(days=1),
+ 'usage': {'id': usage.id}}
+
+ def test_reservation_commit(self):
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 1, 'in_use': 1},
+ 'fixed_ips': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+ _reservation_get(self.ctxt, self.reservations[0])
+ db.reservation_commit(self.ctxt, self.reservations, 'project1',
+ 'user1')
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, self.reservations[0])
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 2},
+ 'fixed_ips': {'reserved': 0, 'in_use': 4}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+ def test_reservation_rollback(self):
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 1, 'in_use': 1},
+ 'fixed_ips': {'reserved': 2, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+ _reservation_get(self.ctxt, self.reservations[0])
+ db.reservation_rollback(self.ctxt, self.reservations, 'project1',
+ 'user1')
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, self.reservations[0])
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 1},
+ 'fixed_ips': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+ def test_reservation_expire(self):
+ db.reservation_expire(self.ctxt)
+
+ expected = {'project_id': 'project1', 'user_id': 'user1',
+ 'resource0': {'reserved': 0, 'in_use': 0},
+ 'resource1': {'reserved': 0, 'in_use': 1},
+ 'fixed_ips': {'reserved': 0, 'in_use': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'))
+
+
+class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(SecurityGroupRuleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_sec_group',
+ 'description': 'fake_sec_group_descr',
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'instances': []
+ }
+
+ def _get_base_rule_values(self):
+ return {
+ 'protocol': "tcp",
+ 'from_port': 80,
+ 'to_port': 8080,
+ 'cidr': None,
+ 'deleted': 0,
+ 'deleted_at': None,
+ 'grantee_group': None,
+ 'updated_at': None
+ }
+
+ def _create_security_group(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.security_group_create(self.ctxt, v)
+
+ def _create_security_group_rule(self, values):
+ v = self._get_base_rule_values()
+ v.update(values)
+ return db.security_group_rule_create(self.ctxt, v)
+
+ def test_security_group_rule_create(self):
+ security_group_rule = self._create_security_group_rule({})
+ self.assertIsNotNone(security_group_rule['id'])
+ for key, value in self._get_base_rule_values().items():
+ self.assertEqual(value, security_group_rule[key])
+
+ def _test_security_group_rule_get_by_security_group(self, columns=None):
+ instance = db.instance_create(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ security_group = self._create_security_group({
+ 'instances': [instance]})
+ security_group_rule = self._create_security_group_rule(
+ {'parent_group': security_group, 'grantee_group': security_group})
+ security_group_rule1 = self._create_security_group_rule(
+ {'parent_group': security_group, 'grantee_group': security_group})
+ found_rules = db.security_group_rule_get_by_security_group(
+ self.ctxt, security_group['id'], columns_to_join=columns)
+ self.assertEqual(len(found_rules), 2)
+ rules_ids = [security_group_rule['id'], security_group_rule1['id']]
+ for rule in found_rules:
+ if columns is None:
+ self.assertIn('grantee_group', dict(rule.iteritems()))
+ self.assertIn('instances',
+ dict(rule.grantee_group.iteritems()))
+ self.assertIn(
+ 'system_metadata',
+ dict(rule.grantee_group.instances[0].iteritems()))
+ self.assertIn(rule['id'], rules_ids)
+ else:
+ self.assertNotIn('grantee_group', dict(rule.iteritems()))
+
+ def test_security_group_rule_get_by_security_group(self):
+ self._test_security_group_rule_get_by_security_group()
+
+ def test_security_group_rule_get_by_security_group_no_joins(self):
+ self._test_security_group_rule_get_by_security_group(columns=[])
+
+ def test_security_group_rule_get_by_security_group_grantee(self):
+ security_group = self._create_security_group({})
+ security_group_rule = self._create_security_group_rule(
+ {'grantee_group': security_group})
+ rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
+ security_group['id'])
+ self.assertEqual(len(rules), 1)
+ self.assertEqual(rules[0]['id'], security_group_rule['id'])
+
+ def test_security_group_rule_destroy(self):
+ self._create_security_group({'name': 'fake1'})
+ self._create_security_group({'name': 'fake2'})
+ security_group_rule1 = self._create_security_group_rule({})
+ security_group_rule2 = self._create_security_group_rule({})
+ db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_get,
+ self.ctxt, security_group_rule1['id'])
+ self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
+ security_group_rule2['id']),
+ security_group_rule2, ['grantee_group'])
+
+ def test_security_group_rule_destroy_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_destroy, self.ctxt, 100500)
+
+ def test_security_group_rule_get(self):
+ security_group_rule1 = (
+ self._create_security_group_rule({}))
+ self._create_security_group_rule({})
+ real_security_group_rule = db.security_group_rule_get(self.ctxt,
+ security_group_rule1['id'])
+ self._assertEqualObjects(security_group_rule1,
+ real_security_group_rule, ['grantee_group'])
+
+ def test_security_group_rule_get_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_rule_get, self.ctxt, 100500)
+
+ def test_security_group_rule_count_by_group(self):
+ sg1 = self._create_security_group({'name': 'fake1'})
+ sg2 = self._create_security_group({'name': 'fake2'})
+ rules_by_group = {sg1: [], sg2: []}
+ for group in rules_by_group:
+ rules = rules_by_group[group]
+ for i in range(0, 10):
+ rules.append(
+ self._create_security_group_rule({'parent_group_id':
+ group['id']}))
+ db.security_group_rule_destroy(self.ctxt,
+ rules_by_group[sg1][0]['id'])
+ counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
+ group['id'])
+ for group in [sg1, sg2]]
+ expected = [9, 10]
+ self.assertEqual(counted_groups, expected)
+
+
+class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(SecurityGroupTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_sec_group',
+ 'description': 'fake_sec_group_descr',
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'instances': []
+ }
+
+ def _create_security_group(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.security_group_create(self.ctxt, v)
+
+ def test_security_group_create(self):
+ security_group = self._create_security_group({})
+ self.assertIsNotNone(security_group['id'])
+ for key, value in self._get_base_values().iteritems():
+ self.assertEqual(value, security_group[key])
+
+ def test_security_group_destroy(self):
+ security_group1 = self._create_security_group({})
+ security_group2 = \
+ self._create_security_group({'name': 'fake_sec_group2'})
+
+ db.security_group_destroy(self.ctxt, security_group1['id'])
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_get,
+ self.ctxt, security_group1['id'])
+ self._assertEqualObjects(db.security_group_get(
+ self.ctxt, security_group2['id'],
+ columns_to_join=['instances']), security_group2)
+
+ def test_security_group_get(self):
+ security_group1 = self._create_security_group({})
+ self._create_security_group({'name': 'fake_sec_group2'})
+ real_security_group = db.security_group_get(self.ctxt,
+ security_group1['id'],
+ columns_to_join=['instances'])
+ self._assertEqualObjects(security_group1,
+ real_security_group)
+
+ def test_security_group_get_with_instance_columns(self):
+ instance = db.instance_create(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ secgroup = self._create_security_group({'instances': [instance]})
+ secgroup = db.security_group_get(
+ self.ctxt, secgroup['id'],
+ columns_to_join=['instances.system_metadata'])
+ inst = secgroup.instances[0]
+ self.assertIn('system_metadata', dict(inst.iteritems()).keys())
+
+ def test_security_group_get_no_instances(self):
+ instance = db.instance_create(self.ctxt, {})
+ sid = self._create_security_group({'instances': [instance]})['id']
+
+ security_group = db.security_group_get(self.ctxt, sid,
+ columns_to_join=['instances'])
+ self.assertIn('instances', security_group.__dict__)
+
+ security_group = db.security_group_get(self.ctxt, sid)
+ self.assertNotIn('instances', security_group.__dict__)
+
+ def test_security_group_get_not_found_exception(self):
+ self.assertRaises(exception.SecurityGroupNotFound,
+ db.security_group_get, self.ctxt, 100500)
+
+ def test_security_group_get_by_name(self):
+ security_group1 = self._create_security_group({'name': 'fake1'})
+ security_group2 = self._create_security_group({'name': 'fake2'})
+
+ real_security_group1 = db.security_group_get_by_name(
+ self.ctxt,
+ security_group1['project_id'],
+ security_group1['name'],
+ columns_to_join=None)
+ real_security_group2 = db.security_group_get_by_name(
+ self.ctxt,
+ security_group2['project_id'],
+ security_group2['name'],
+ columns_to_join=None)
+ self._assertEqualObjects(security_group1, real_security_group1)
+ self._assertEqualObjects(security_group2, real_security_group2)
+
+ def test_security_group_get_by_project(self):
+ security_group1 = self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj1'})
+ security_group2 = self._create_security_group(
+ {'name': 'fake2', 'project_id': 'fake_proj2'})
+
+ real1 = db.security_group_get_by_project(
+ self.ctxt,
+ security_group1['project_id'])
+ real2 = db.security_group_get_by_project(
+ self.ctxt,
+ security_group2['project_id'])
+
+ expected1, expected2 = [security_group1], [security_group2]
+ self._assertEqualListsOfObjects(expected1, real1,
+ ignored_keys=['instances'])
+ self._assertEqualListsOfObjects(expected2, real2,
+ ignored_keys=['instances'])
+
+ def test_security_group_get_by_instance(self):
+ instance = db.instance_create(self.ctxt, dict(host='foo'))
+ values = [
+ {'name': 'fake1', 'instances': [instance]},
+ {'name': 'fake2', 'instances': [instance]},
+ {'name': 'fake3', 'instances': []},
+ ]
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = db.security_group_get_by_instance(self.ctxt,
+ instance['uuid'])
+ expected = security_groups[:2]
+ self._assertEqualListsOfObjects(expected, real,
+ ignored_keys=['instances'])
+
+ def test_security_group_get_all(self):
+ values = [
+ {'name': 'fake1', 'project_id': 'fake_proj1'},
+ {'name': 'fake2', 'project_id': 'fake_proj2'},
+ ]
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = db.security_group_get_all(self.ctxt)
+
+ self._assertEqualListsOfObjects(security_groups, real,
+ ignored_keys=['instances'])
+
+ def test_security_group_in_use(self):
+ instance = db.instance_create(self.ctxt, dict(host='foo'))
+ values = [
+ {'instances': [instance],
+ 'name': 'fake_in_use'},
+ {'instances': []},
+ ]
+
+ security_groups = [self._create_security_group(vals)
+ for vals in values]
+
+ real = []
+ for security_group in security_groups:
+ in_use = db.security_group_in_use(self.ctxt,
+ security_group['id'])
+ real.append(in_use)
+ expected = [True, False]
+
+ self.assertEqual(expected, real)
+
+ def test_security_group_ensure_default(self):
+ self.ctxt.project_id = 'fake'
+ self.ctxt.user_id = 'fake'
+ self.assertEqual(0, len(db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)))
+
+ db.security_group_ensure_default(self.ctxt)
+
+ security_groups = db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)
+
+ self.assertEqual(1, len(security_groups))
+ self.assertEqual("default", security_groups[0]["name"])
+
+ usage = db.quota_usage_get(self.ctxt,
+ self.ctxt.project_id,
+ 'security_groups',
+ self.ctxt.user_id)
+ self.assertEqual(1, usage.in_use)
+
+ @mock.patch.object(db.sqlalchemy.api, '_security_group_get_by_names')
+ def test_security_group_ensure_default_called_concurrently(self, sg_mock):
+ # make sure NotFound is always raised here to trick Nova to insert the
+ # duplicate security group entry
+ sg_mock.side_effect = exception.NotFound
+
+ # create the first db entry
+ self.ctxt.project_id = 1
+ db.security_group_ensure_default(self.ctxt)
+ security_groups = db.security_group_get_by_project(
+ self.ctxt,
+ self.ctxt.project_id)
+ self.assertEqual(1, len(security_groups))
+
+ # create the second one and ensure the exception is handled properly
+ default_group = db.security_group_ensure_default(self.ctxt)
+ self.assertEqual('default', default_group.name)
+
+ def test_security_group_update(self):
+ security_group = self._create_security_group({})
+ new_values = {
+ 'name': 'sec_group1',
+ 'description': 'sec_group_descr1',
+ 'user_id': 'fake_user1',
+ 'project_id': 'fake_proj1',
+ }
+
+ updated_group = db.security_group_update(self.ctxt,
+ security_group['id'],
+ new_values,
+ columns_to_join=['rules.grantee_group'])
+ for key, value in new_values.iteritems():
+ self.assertEqual(updated_group[key], value)
+ self.assertEqual(updated_group['rules'], [])
+
+ def test_security_group_update_to_duplicate(self):
+ self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj1'})
+ security_group2 = self._create_security_group(
+ {'name': 'fake1', 'project_id': 'fake_proj2'})
+
+ self.assertRaises(exception.SecurityGroupExists,
+ db.security_group_update,
+ self.ctxt, security_group2['id'],
+ {'project_id': 'fake_proj1'})
+
+
+class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.instance_* methods."""
+
+ sample_data = {
+ 'project_id': 'project1',
+ 'hostname': 'example.com',
+ 'host': 'h1',
+ 'node': 'n1',
+ 'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
+ 'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
+ 'info_cache': {'ckey': 'cvalue'},
+ }
+
+ def setUp(self):
+ super(InstanceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _assertEqualInstances(self, instance1, instance2):
+ self._assertEqualObjects(instance1, instance2,
+ ignored_keys=['metadata', 'system_metadata', 'info_cache'])
+
+ def _assertEqualListsOfInstances(self, list1, list2):
+ self._assertEqualListsOfObjects(list1, list2,
+ ignored_keys=['metadata', 'system_metadata', 'info_cache'])
+
+ def create_instance_with_args(self, **kwargs):
+ if 'context' in kwargs:
+ context = kwargs.pop('context')
+ else:
+ context = self.ctxt
+ args = self.sample_data.copy()
+ args.update(kwargs)
+ return db.instance_create(context, args)
+
+ def test_instance_create(self):
+ instance = self.create_instance_with_args()
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+
+ def test_instance_create_with_object_values(self):
+ values = {
+ 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1'),
+ }
+ dt_keys = ('created_at', 'deleted_at', 'updated_at',
+ 'launched_at', 'terminated_at', 'scheduled_at')
+ dt = timeutils.utcnow()
+ dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
+ for key in dt_keys:
+ values[key] = dt_utc
+ inst = db.instance_create(self.ctxt, values)
+ self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
+ self.assertEqual(inst['access_ip_v6'], '::1')
+ for key in dt_keys:
+ self.assertEqual(inst[key], dt)
+
+ def test_instance_update_with_object_values(self):
+ values = {
+ 'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1'),
+ }
+ dt_keys = ('created_at', 'deleted_at', 'updated_at',
+ 'launched_at', 'terminated_at', 'scheduled_at')
+ dt = timeutils.utcnow()
+ dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
+ for key in dt_keys:
+ values[key] = dt_utc
+ inst = db.instance_create(self.ctxt, {})
+ inst = db.instance_update(self.ctxt, inst['uuid'], values)
+ self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
+ self.assertEqual(inst['access_ip_v6'], '::1')
+ for key in dt_keys:
+ self.assertEqual(inst[key], dt)
+
+ def test_instance_update_no_metadata_clobber(self):
+ meta = {'foo': 'bar'}
+ sys_meta = {'sfoo': 'sbar'}
+ values = {
+ 'metadata': meta,
+ 'system_metadata': sys_meta,
+ }
+ inst = db.instance_create(self.ctxt, {})
+ inst = db.instance_update(self.ctxt, inst['uuid'], values)
+ self.assertEqual({'foo': 'bar'}, meta)
+ self.assertEqual({'sfoo': 'sbar'}, sys_meta)
+
+ def test_instance_get_all_with_meta(self):
+ inst = self.create_instance_with_args()
+ for inst in db.instance_get_all(self.ctxt):
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_update(self):
+ instance = self.create_instance_with_args()
+ metadata = {'host': 'bar', 'key2': 'wuff'}
+ system_metadata = {'original_image_ref': 'baz'}
+ # Update the metadata
+ db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
+ 'system_metadata': system_metadata})
+ # Retrieve the user-provided metadata to ensure it was successfully
+ # updated
+ self.assertEqual(metadata,
+ db.instance_metadata_get(self.ctxt, instance['uuid']))
+ self.assertEqual(system_metadata,
+ db.instance_system_metadata_get(self.ctxt, instance['uuid']))
+
+ def test_instance_update_bad_str_dates(self):
+ instance = self.create_instance_with_args()
+ values = {'created_at': '123'}
+ self.assertRaises(ValueError,
+ db.instance_update,
+ self.ctxt, instance['uuid'], values)
+
+ def test_instance_update_good_str_dates(self):
+ instance = self.create_instance_with_args()
+ values = {'created_at': '2011-01-31T00:00:00.0'}
+ actual = db.instance_update(self.ctxt, instance['uuid'], values)
+ expected = datetime.datetime(2011, 1, 31)
+ self.assertEqual(expected, actual["created_at"])
+
+ def test_create_instance_unique_hostname(self):
+ context1 = context.RequestContext('user1', 'p1')
+ context2 = context.RequestContext('user2', 'p2')
+ self.create_instance_with_args(hostname='h1', project_id='p1')
+
+ # With scope 'global' any duplicate should fail, be it this project:
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context1,
+ hostname='h1', project_id='p3')
+ # or another:
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context2,
+ hostname='h1', project_id='p2')
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists,
+ self.create_instance_with_args,
+ context=context1,
+ hostname='h1', project_id='p1')
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.create_instance_with_args(context=context2, hostname='h2')
+ self.flags(osapi_compute_unique_server_name_scope=None)
+
+ def test_instance_get_all_by_filters_with_meta(self):
+ inst = self.create_instance_with_args()
+ for inst in db.instance_get_all_by_filters(self.ctxt, {}):
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_get_all_by_filters_without_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt, {},
+ columns_to_join=[])
+ for inst in result:
+ meta = utils.metadata_to_dict(inst['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(inst['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_all_by_filters(self):
+ instances = [self.create_instance_with_args() for i in range(3)]
+ filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
+ self._assertEqualListsOfInstances(instances, filtered_instances)
+
+ def test_instance_get_all_by_filters_zero_limit(self):
+ self.create_instance_with_args()
+ instances = db.instance_get_all_by_filters(self.ctxt, {}, limit=0)
+ self.assertEqual([], instances)
+
+ def test_instance_metadata_get_multi(self):
+ uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
+ meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
+ for row in meta:
+ self.assertIn(row['instance_uuid'], uuids)
+
+ def test_instance_metadata_get_multi_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
+
+ def test_instance_system_system_metadata_get_multi(self):
+ uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
+ sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
+ self.ctxt, uuids)
+ for row in sys_meta:
+ self.assertIn(row['instance_uuid'], uuids)
+
+ def test_instance_system_metadata_get_multi_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
+
+ def test_instance_get_all_by_filters_regex(self):
+ i1 = self.create_instance_with_args(display_name='test1')
+ i2 = self.create_instance_with_args(display_name='teeeest2')
+ self.create_instance_with_args(display_name='diff')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'display_name': 't.*st.'})
+ self._assertEqualListsOfInstances(result, [i1, i2])
+
+ def test_instance_get_all_by_filters_changes_since(self):
+ i1 = self.create_instance_with_args(updated_at=
+ '2013-12-05T15:03:25.000000')
+ i2 = self.create_instance_with_args(updated_at=
+ '2013-12-05T15:03:26.000000')
+ changes_since = iso8601.parse_date('2013-12-05T15:03:25.000000')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'changes-since':
+ changes_since})
+ self._assertEqualListsOfInstances([i1, i2], result)
+
+ changes_since = iso8601.parse_date('2013-12-05T15:03:26.000000')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'changes-since':
+ changes_since})
+ self._assertEqualListsOfInstances([i2], result)
+
+ def test_instance_get_all_by_filters_exact_match(self):
+ instance = self.create_instance_with_args(host='host1')
+ self.create_instance_with_args(host='host12')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'host': 'host1'})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_metadata(self):
+ instance = self.create_instance_with_args(metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'metadata': {'foo': 'bar'}})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_system_metadata(self):
+ instance = self.create_instance_with_args(
+ system_metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'system_metadata': {'foo': 'bar'}})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_unicode_value(self):
+ instance = self.create_instance_with_args(display_name=u'test♥')
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'display_name': u'test'})
+ self._assertEqualListsOfInstances([instance], result)
+
+ def test_instance_get_all_by_filters_tags(self):
+ instance = self.create_instance_with_args(
+ metadata={'foo': 'bar'})
+ self.create_instance_with_args()
+ # For format 'tag-'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-key', 'value': 'foo'},
+ {'name': 'tag-value', 'value': 'bar'},
+ ]})
+ self._assertEqualListsOfInstances([instance], result)
+ # For format 'tag:'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'bar'},
+ ]})
+ self._assertEqualListsOfInstances([instance], result)
+ # For non-existent tag
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'barred'},
+ ]})
+ self.assertEqual([], result)
+
+ # Confirm with deleted tags
+ db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
+ # For format 'tag-'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-key', 'value': 'foo'},
+ ]})
+ self.assertEqual([], result)
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag-value', 'value': 'bar'}
+ ]})
+ self.assertEqual([], result)
+ # For format 'tag:'
+ result = db.instance_get_all_by_filters(
+ self.ctxt, {'filter': [
+ {'name': 'tag:foo', 'value': 'bar'},
+ ]})
+ self.assertEqual([], result)
+
+ def test_instance_get_by_uuid(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
+ self._assertEqualInstances(inst, result)
+
+ def test_instance_get_by_uuid_join_empty(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=[])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_by_uuid_join_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=['metadata'])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_get_by_uuid_join_sys_meta(self):
+ inst = self.create_instance_with_args()
+ result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
+ columns_to_join=['system_metadata'])
+ meta = utils.metadata_to_dict(result['metadata'])
+ self.assertEqual(meta, {})
+ sys_meta = utils.metadata_to_dict(result['system_metadata'])
+ self.assertEqual(sys_meta, self.sample_data['system_metadata'])
+
+ def test_instance_get_all_by_filters_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(reservation_id='b')
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt, {})
+ self._assertEqualListsOfObjects([inst1, inst2], result,
+ ignored_keys=['metadata', 'system_metadata',
+ 'deleted', 'deleted_at', 'info_cache',
+ 'pci_devices'])
+
+ def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': True})
+ self._assertEqualListsOfObjects([inst1, inst2], result,
+ ignored_keys=['metadata', 'system_metadata',
+ 'deleted', 'deleted_at', 'info_cache',
+ 'pci_devices'])
+
+ def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': True,
+ 'soft_deleted': False})
+ self._assertEqualListsOfObjects([inst1], result,
+ ignored_keys=['deleted', 'deleted_at', 'metadata',
+ 'system_metadata', 'info_cache', 'pci_devices'])
+
+ def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ inst3 = self.create_instance_with_args()
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': False,
+ 'soft_deleted': True})
+ self._assertEqualListsOfInstances([inst2, inst3], result)
+
+ def test_instance_get_all_by_filters_not_deleted(self):
+ inst1 = self.create_instance_with_args()
+ self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
+ inst3 = self.create_instance_with_args()
+ inst4 = self.create_instance_with_args(vm_state=vm_states.ACTIVE)
+ db.instance_destroy(self.ctxt, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'deleted': False})
+ self.assertIsNone(inst3.vm_state)
+ self._assertEqualListsOfInstances([inst3, inst4], result)
+
+ def test_instance_get_all_by_filters_cleaned(self):
+ inst1 = self.create_instance_with_args()
+ inst2 = self.create_instance_with_args(reservation_id='b')
+ db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
+ result = db.instance_get_all_by_filters(self.ctxt, {})
+ self.assertEqual(2, len(result))
+ self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
+ self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
+ if inst1['uuid'] == result[0]['uuid']:
+ self.assertTrue(result[0]['cleaned'])
+ self.assertFalse(result[1]['cleaned'])
+ else:
+ self.assertTrue(result[1]['cleaned'])
+ self.assertFalse(result[0]['cleaned'])
+
+ def test_instance_get_all_by_host_and_node_no_join(self):
+ instance = self.create_instance_with_args()
+ result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
+ self.assertEqual(result[0]['uuid'], instance['uuid'])
+ self.assertEqual(result[0]['system_metadata'], [])
+
+ def test_instance_get_all_hung_in_rebooting(self):
+ # Ensure no instances are returned.
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self.assertEqual([], results)
+
+ # Ensure one rebooting instance with updated_at older than 10 seconds
+ # is returned.
+ instance = self.create_instance_with_args(task_state="rebooting",
+ updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self._assertEqualListsOfObjects([instance], results,
+ ignored_keys=['task_state', 'info_cache', 'security_groups',
+ 'metadata', 'system_metadata', 'pci_devices'])
+ db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
+
+ # Ensure the newly rebooted instance is not returned.
+ instance = self.create_instance_with_args(task_state="rebooting",
+ updated_at=timeutils.utcnow())
+ results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
+ self.assertEqual([], results)
+
+ def test_instance_update_with_expected_vm_state(self):
+ instance = self.create_instance_with_args(vm_state='foo')
+ db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
+ 'expected_vm_state': ('foo', 'bar')})
+
+ def test_instance_update_with_unexpected_vm_state(self):
+ instance = self.create_instance_with_args(vm_state='foo')
+ self.assertRaises(exception.UnexpectedVMStateError,
+ db.instance_update, self.ctxt, instance['uuid'],
+ {'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
+
+ def test_instance_update_with_instance_uuid(self):
+ # test instance_update() works when an instance UUID is passed.
+ ctxt = context.get_admin_context()
+
+ # Create an instance with some metadata
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
+ 'system_metadata': {'original_image_ref': 'blah'}}
+ instance = db.instance_create(ctxt, values)
+
+ # Update the metadata
+ values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
+ 'system_metadata': {'original_image_ref': 'baz'}}
+ db.instance_update(ctxt, instance['uuid'], values)
+
+ # Retrieve the user-provided metadata to ensure it was successfully
+ # updated
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('bar', instance_meta['host'])
+ self.assertEqual('wuff', instance_meta['key2'])
+ self.assertNotIn('key1', instance_meta)
+
+ # Retrieve the system metadata to ensure it was successfully updated
+ system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('baz', system_meta['original_image_ref'])
+
+ def test_delete_instance_metadata_on_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ # Create an instance with some metadata
+ values = {'metadata': {'host': 'foo', 'key1': 'meow'},
+ 'system_metadata': {'original_image_ref': 'blah'}}
+ instance = db.instance_create(ctxt, values)
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ self.assertEqual('foo', instance_meta['host'])
+ self.assertEqual('meow', instance_meta['key1'])
+ db.instance_destroy(ctxt, instance['uuid'])
+ instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
+ # Make sure instance metadata is deleted as well
+ self.assertEqual({}, instance_meta)
+
+ def test_delete_instance_faults_on_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+ # Create faults
+ db.instance_create(ctxt, {'uuid': uuid})
+
+ fault_values = {
+ 'message': 'message',
+ 'details': 'detail',
+ 'instance_uuid': uuid,
+ 'code': 404,
+ 'host': 'localhost'
+ }
+ fault = db.instance_fault_create(ctxt, fault_values)
+
+ # Retrieve the fault to ensure it was successfully added
+ faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
+ self.assertEqual(1, len(faults[uuid]))
+ self._assertEqualObjects(fault, faults[uuid][0])
+ db.instance_destroy(ctxt, uuid)
+ faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
+ # Make sure instance faults is deleted as well
+ self.assertEqual(0, len(faults[uuid]))
+
+ def test_instance_update_with_and_get_original(self):
+ instance = self.create_instance_with_args(vm_state='building')
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
+ instance['uuid'], {'vm_state': 'needscoffee'})
+ self.assertEqual('building', old_ref['vm_state'])
+ self.assertEqual('needscoffee', new_ref['vm_state'])
+
+ def test_instance_update_and_get_original_metadata(self):
+ instance = self.create_instance_with_args()
+ columns_to_join = ['metadata']
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
+ columns_to_join=columns_to_join)
+ meta = utils.metadata_to_dict(new_ref['metadata'])
+ self.assertEqual(meta, self.sample_data['metadata'])
+ sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
+ self.assertEqual(sys_meta, {})
+
+ def test_instance_update_and_get_original_metadata_none_join(self):
+ instance = self.create_instance_with_args()
+ (old_ref, new_ref) = db.instance_update_and_get_original(
+ self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
+ meta = utils.metadata_to_dict(new_ref['metadata'])
+ self.assertEqual(meta, {'mk1': 'mv3'})
+
+ def test_instance_update_unique_name(self):
+ context1 = context.RequestContext('user1', 'p1')
+ context2 = context.RequestContext('user2', 'p2')
+
+ inst1 = self.create_instance_with_args(context=context1,
+ project_id='p1',
+ hostname='fake_name1')
+ inst2 = self.create_instance_with_args(context=context1,
+ project_id='p1',
+ hostname='fake_name2')
+ inst3 = self.create_instance_with_args(context=context2,
+ project_id='p2',
+ hostname='fake_name3')
+ # osapi_compute_unique_server_name_scope is unset so this should work:
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
+
+ # With scope 'global' any duplicate should fail.
+ self.flags(osapi_compute_unique_server_name_scope='global')
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ context1,
+ inst2['uuid'],
+ {'hostname': 'fake_name1'})
+ self.assertRaises(exception.InstanceExists,
+ db.instance_update,
+ context2,
+ inst3['uuid'],
+ {'hostname': 'fake_name1'})
+ # But we should definitely be able to update our name if we aren't
+ # really changing it.
+ db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
+
+ # With scope 'project' a duplicate in the project should fail:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ self.assertRaises(exception.InstanceExists, db.instance_update,
+ context1, inst2['uuid'], {'hostname': 'fake_NAME'})
+
+ # With scope 'project' a duplicate in a different project should work:
+ self.flags(osapi_compute_unique_server_name_scope='project')
+ db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
+
+ def _test_instance_update_updates_metadata(self, metadata_type):
+ instance = self.create_instance_with_args()
+
+ def set_and_check(meta):
+ inst = db.instance_update(self.ctxt, instance['uuid'],
+ {metadata_type: dict(meta)})
+ _meta = utils.metadata_to_dict(inst[metadata_type])
+ self.assertEqual(meta, _meta)
+
+ meta = {'speed': '88', 'units': 'MPH'}
+ set_and_check(meta)
+ meta['gigawatts'] = '1.21'
+ set_and_check(meta)
+ del meta['gigawatts']
+ set_and_check(meta)
+
+ def test_security_group_in_use(self):
+ db.instance_create(self.ctxt, dict(host='foo'))
+
+ def test_instance_update_updates_system_metadata(self):
+ # Ensure that system_metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('system_metadata')
+
+ def test_instance_update_updates_metadata(self):
+ # Ensure that metadata is updated during instance_update
+ self._test_instance_update_updates_metadata('metadata')
+
+ def test_instance_floating_address_get_all(self):
+ ctxt = context.get_admin_context()
+
+ instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
+ instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
+
+ fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
+ instance_uuids = [instance1['uuid'], instance1['uuid'],
+ instance2['uuid']]
+
+ for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
+ float_addresses,
+ instance_uuids):
+ db.fixed_ip_create(ctxt, {'address': fixed_addr,
+ 'instance_uuid': instance_uuid})
+ fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
+ db.floating_ip_create(ctxt,
+ {'address': float_addr,
+ 'fixed_ip_id': fixed_id})
+
+ real_float_addresses = \
+ db.instance_floating_address_get_all(ctxt, instance_uuids[0])
+ self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
+ real_float_addresses = \
+ db.instance_floating_address_get_all(ctxt, instance_uuids[2])
+ self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
+
+ self.assertRaises(exception.InvalidUUID,
+ db.instance_floating_address_get_all,
+ ctxt, 'invalid_uuid')
+
+ def test_instance_stringified_ips(self):
+ instance = self.create_instance_with_args()
+ instance = db.instance_update(
+ self.ctxt, instance['uuid'],
+ {'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
+ 'access_ip_v6': netaddr.IPAddress('::1')})
+ self.assertIsInstance(instance['access_ip_v4'], six.string_types)
+ self.assertIsInstance(instance['access_ip_v6'], six.string_types)
+ instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
+ self.assertIsInstance(instance['access_ip_v4'], six.string_types)
+ self.assertIsInstance(instance['access_ip_v6'], six.string_types)
+
+ def test_instance_destroy(self):
+ ctxt = context.get_admin_context()
+ values = {
+ 'metadata': {'key': 'value'}
+ }
+ inst_uuid = self.create_instance_with_args(**values)['uuid']
+ db.instance_destroy(ctxt, inst_uuid)
+
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_get, ctxt, inst_uuid)
+ self.assertIsNone(db.instance_info_cache_get(ctxt, inst_uuid))
+ self.assertEqual({}, db.instance_metadata_get(ctxt, inst_uuid))
+
+ def test_instance_destroy_already_destroyed(self):
+ ctxt = context.get_admin_context()
+ instance = self.create_instance_with_args()
+ db.instance_destroy(ctxt, instance['uuid'])
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_destroy, ctxt, instance['uuid'])
+
+
+class InstanceMetadataTestCase(test.TestCase):
+
+ """Tests for db.api.instance_metadata_* methods."""
+
+ def setUp(self):
+ super(InstanceMetadataTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_instance_metadata_get(self):
+ instance = db.instance_create(self.ctxt, {'metadata':
+ {'key': 'value'}})
+ self.assertEqual({'key': 'value'}, db.instance_metadata_get(
+ self.ctxt, instance['uuid']))
+
+ def test_instance_metadata_delete(self):
+ instance = db.instance_create(self.ctxt,
+ {'metadata': {'key': 'val',
+ 'key1': 'val1'}})
+ db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
+ self.assertEqual({'key': 'val'}, db.instance_metadata_get(
+ self.ctxt, instance['uuid']))
+
+ def test_instance_metadata_update(self):
+ instance = db.instance_create(self.ctxt, {'host': 'h1',
+ 'project_id': 'p1', 'metadata': {'key': 'value'}})
+
+ # This should add new key/value pair
+ metadata = db.instance_metadata_update(
+ self.ctxt, instance['uuid'],
+ {'new_key': 'new_value'}, False)
+ metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
+ self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
+
+ # This should leave only one key/value pair
+ metadata = db.instance_metadata_update(
+ self.ctxt, instance['uuid'],
+ {'new_key': 'new_value'}, True)
+ metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
+ self.assertEqual(metadata, {'new_key': 'new_value'})
+
+
+class InstanceExtraTestCase(test.TestCase):
+ def setUp(self):
+ super(InstanceExtraTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, {})
+
+ def test_instance_extra_get_by_uuid_instance_create(self):
+ inst_extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'])
+ self.assertIsNotNone(inst_extra)
+
+ def test_instance_extra_update_by_uuid(self):
+ db.instance_extra_update_by_uuid(self.ctxt, self.instance['uuid'],
+ {'numa_topology': 'changed'})
+ inst_extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'])
+ self.assertEqual('changed', inst_extra.numa_topology)
+
+ def test_instance_extra_get_with_columns(self):
+ extra = db.instance_extra_get_by_instance_uuid(
+ self.ctxt, self.instance['uuid'],
+ columns=['numa_topology'])
+ self.assertNotIn('pci_requests', extra)
+ self.assertIn('numa_topology', extra)
+
+
+class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(ServiceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'host': 'fake_host',
+ 'binary': 'fake_binary',
+ 'topic': 'fake_topic',
+ 'report_count': 3,
+ 'disabled': False
+ }
+
+ def _create_service(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.service_create(self.ctxt, v)
+
+ def test_service_create(self):
+ service = self._create_service({})
+ self.assertIsNotNone(service['id'])
+ for key, value in self._get_base_values().iteritems():
+ self.assertEqual(value, service[key])
+
+ def test_service_destroy(self):
+ service1 = self._create_service({})
+ service2 = self._create_service({'host': 'fake_host2'})
+
+ db.service_destroy(self.ctxt, service1['id'])
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, self.ctxt, service1['id'])
+ self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
+ service2, ignored_keys=['compute_node'])
+
+ def test_service_update(self):
+ service = self._create_service({})
+ new_values = {
+ 'host': 'fake_host1',
+ 'binary': 'fake_binary1',
+ 'topic': 'fake_topic1',
+ 'report_count': 4,
+ 'disabled': True
+ }
+ db.service_update(self.ctxt, service['id'], new_values)
+ updated_service = db.service_get(self.ctxt, service['id'])
+ for key, value in new_values.iteritems():
+ self.assertEqual(value, updated_service[key])
+
+ def test_service_update_not_found_exception(self):
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_update, self.ctxt, 100500, {})
+
+ def test_service_get(self):
+ service1 = self._create_service({})
+ self._create_service({'host': 'some_other_fake_host'})
+ real_service1 = db.service_get(self.ctxt, service1['id'])
+ self._assertEqualObjects(service1, real_service1,
+ ignored_keys=['compute_node'])
+
+ def test_service_get_with_compute_node(self):
+ service = self._create_service({})
+ compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
+ vcpus_used=0, memory_mb_used=0,
+ local_gb_used=0, free_ram_mb=1024,
+ free_disk_gb=2048, hypervisor_type="xen",
+ hypervisor_version=1, cpu_info="",
+ running_vms=0, current_workload=0,
+ service_id=service['id'])
+ compute = db.compute_node_create(self.ctxt, compute_values)
+ real_service = db.service_get(self.ctxt, service['id'],
+ with_compute_node=True)
+ real_compute = real_service['compute_node'][0]
+ self.assertEqual(compute['id'], real_compute['id'])
+
+ def test_service_get_not_found_exception(self):
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, self.ctxt, 100500)
+
+ def test_service_get_by_host_and_topic(self):
+ service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
+ self._create_service({'host': 'host2', 'topic': 'topic2'})
+
+ real_service1 = db.service_get_by_host_and_topic(self.ctxt,
+ host='host1',
+ topic='topic1')
+ self._assertEqualObjects(service1, real_service1)
+
+ def test_service_get_all(self):
+ values = [
+ {'host': 'host1', 'topic': 'topic1'},
+ {'host': 'host2', 'topic': 'topic2'},
+ {'disabled': True}
+ ]
+ services = [self._create_service(vals) for vals in values]
+ disabled_services = [services[-1]]
+ non_disabled_services = services[:-1]
+
+ compares = [
+ (services, db.service_get_all(self.ctxt)),
+ (disabled_services, db.service_get_all(self.ctxt, True)),
+ (non_disabled_services, db.service_get_all(self.ctxt, False))
+ ]
+ for comp in compares:
+ self._assertEqualListsOfObjects(*comp)
+
+ def test_service_get_all_by_topic(self):
+ values = [
+ {'host': 'host1', 'topic': 't1'},
+ {'host': 'host2', 'topic': 't1'},
+ {'disabled': True, 'topic': 't1'},
+ {'host': 'host3', 'topic': 't2'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+ expected = services[:2]
+ real = db.service_get_all_by_topic(self.ctxt, 't1')
+ self._assertEqualListsOfObjects(expected, real)
+
+ def test_service_get_all_by_host(self):
+ values = [
+ {'host': 'host1', 'topic': 't11', 'binary': 'b11'},
+ {'host': 'host1', 'topic': 't12', 'binary': 'b12'},
+ {'host': 'host2', 'topic': 't1'},
+ {'host': 'host3', 'topic': 't1'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ expected = services[:2]
+ real = db.service_get_all_by_host(self.ctxt, 'host1')
+ self._assertEqualListsOfObjects(expected, real)
+
+ def test_service_get_by_compute_host(self):
+ values = [
+ {'host': 'host1', 'topic': CONF.compute_topic},
+ {'host': 'host2', 'topic': 't1'},
+ {'host': 'host3', 'topic': CONF.compute_topic}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
+ self._assertEqualObjects(services[0], real_service,
+ ignored_keys=['compute_node'])
+
+ self.assertRaises(exception.ComputeHostNotFound,
+ db.service_get_by_compute_host,
+ self.ctxt, 'non-exists-host')
+
+ def test_service_get_by_compute_host_not_found(self):
+ self.assertRaises(exception.ComputeHostNotFound,
+ db.service_get_by_compute_host,
+ self.ctxt, 'non-exists-host')
+
+ def test_service_get_by_args(self):
+ values = [
+ {'host': 'host1', 'binary': 'a'},
+ {'host': 'host2', 'binary': 'b'}
+ ]
+ services = [self._create_service(vals) for vals in values]
+
+ service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
+ self._assertEqualObjects(services[0], service1)
+
+ service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
+ self._assertEqualObjects(services[1], service2)
+
+ def test_service_get_by_args_not_found_exception(self):
+ self.assertRaises(exception.HostBinaryNotFound,
+ db.service_get_by_args,
+ self.ctxt, 'non-exists-host', 'a')
+
+ def test_service_binary_exists_exception(self):
+ db.service_create(self.ctxt, self._get_base_values())
+ values = self._get_base_values()
+ values.update({'topic': 'top1'})
+ self.assertRaises(exception.ServiceBinaryExists, db.service_create,
+ self.ctxt, values)
+
+ def test_service_topic_exists_exceptions(self):
+ db.service_create(self.ctxt, self._get_base_values())
+ values = self._get_base_values()
+ values.update({'binary': 'bin1'})
+ self.assertRaises(exception.ServiceTopicExists, db.service_create,
+ self.ctxt, values)
+
+
+class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(BaseInstanceTypeTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.user_ctxt = context.RequestContext('user', 'user')
+
+ def _get_base_values(self):
+ return {
+ 'name': 'fake_name',
+ 'memory_mb': 512,
+ 'vcpus': 1,
+ 'root_gb': 10,
+ 'ephemeral_gb': 10,
+ 'flavorid': 'fake_flavor',
+ 'swap': 0,
+ 'rxtx_factor': 0.5,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': True
+ }
+
+ def _create_flavor(self, values, projects=None):
+ v = self._get_base_values()
+ v.update(values)
+ return db.flavor_create(self.ctxt, v, projects)
+
+
+class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ IGNORED_FIELDS = [
+ 'id',
+ 'created_at',
+ 'updated_at',
+ 'deleted_at',
+ 'deleted'
+ ]
+
+ def setUp(self):
+ super(InstanceActionTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_action_values(self, uuid, action='run_instance',
+ ctxt=None, extra=None):
+ if ctxt is None:
+ ctxt = self.ctxt
+
+ db.instance_create(ctxt, {'uuid': uuid})
+
+ values = {
+ 'action': action,
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': timeutils.utcnow(),
+ 'message': 'action-message'
+ }
+ if extra is not None:
+ values.update(extra)
+ return values
+
+ def _create_event_values(self, uuid, event='schedule',
+ ctxt=None, extra=None):
+ if ctxt is None:
+ ctxt = self.ctxt
+ values = {
+ 'event': event,
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': timeutils.utcnow(),
+ 'host': 'fake-host',
+ 'details': 'fake-details',
+ }
+ if extra is not None:
+ values.update(extra)
+ return values
+
+ def _assertActionSaved(self, action, uuid):
+ """Retrieve the action to ensure it was successfully added."""
+ actions = db.actions_get(self.ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self._assertEqualObjects(action, actions[0])
+
+ def _assertActionEventSaved(self, event, action_id):
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(self.ctxt, action_id)
+ self.assertEqual(1, len(events))
+ self._assertEqualObjects(event, events[0],
+ ['instance_uuid', 'request_id'])
+
+ def test_instance_action_start(self):
+ """Create an instance action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action = db.action_start(self.ctxt, action_values)
+
+ ignored_keys = self.IGNORED_FIELDS + ['finish_time']
+ self._assertEqualObjects(action_values, action, ignored_keys)
+
+ self._assertActionSaved(action, uuid)
+
+ def test_instance_action_finish(self):
+ """Create an instance action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ db.action_start(self.ctxt, action_values)
+
+ action_values['finish_time'] = timeutils.utcnow()
+ action = db.action_finish(self.ctxt, action_values)
+ self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
+
+ self._assertActionSaved(action, uuid)
+
+ def test_instance_action_finish_without_started_event(self):
+ """Create an instance finish action."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action_values['finish_time'] = timeutils.utcnow()
+ self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
+ self.ctxt, action_values)
+
+ def test_instance_actions_get_by_instance(self):
+ """Ensure we can get actions by UUID."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ expected = []
+
+ action_values = self._create_action_values(uuid1)
+ action = db.action_start(self.ctxt, action_values)
+ expected.append(action)
+
+ action_values['action'] = 'resize'
+ action = db.action_start(self.ctxt, action_values)
+ expected.append(action)
+
+ # Create some extra actions
+ uuid2 = str(stdlib_uuid.uuid4())
+ ctxt2 = context.get_admin_context()
+ action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(self.ctxt, uuid1)
+ self._assertEqualListsOfObjects(expected, actions)
+
+ def test_instance_actions_get_are_in_order(self):
+ """Ensure retrived actions are in order."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ extra = {
+ 'created_at': timeutils.utcnow()
+ }
+
+ action_values = self._create_action_values(uuid1, extra=extra)
+ action1 = db.action_start(self.ctxt, action_values)
+
+ action_values['action'] = 'delete'
+ action2 = db.action_start(self.ctxt, action_values)
+
+ actions = db.actions_get(self.ctxt, uuid1)
+ self.assertEqual(2, len(actions))
+
+ self._assertEqualOrderedListOfObjects([action2, action1], actions)
+
+ def test_instance_action_get_by_instance_and_action(self):
+ """Ensure we can get an action by instance UUID and action id."""
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid1)
+ db.action_start(self.ctxt, action_values)
+ request_id = action_values['request_id']
+
+ # NOTE(rpodolyaka): ensure we use a different req id for the 2nd req
+ action_values['action'] = 'resize'
+ action_values['request_id'] = 'req-00000000-7522-4d99-7ff-111111111111'
+ db.action_start(self.ctxt, action_values)
+
+ action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
+ self.assertEqual('run_instance', action['action'])
+ self.assertEqual(self.ctxt.request_id, action['request_id'])
+
+ def test_instance_action_event_start(self):
+ """Create an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action_values = self._create_action_values(uuid)
+ action = db.action_start(self.ctxt, action_values)
+
+ event_values = self._create_event_values(uuid)
+ event = db.action_event_start(self.ctxt, event_values)
+ # self.fail(self._dict_from_object(event, None))
+ event_values['action_id'] = action['id']
+ ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
+ self._assertEqualObjects(event_values, event, ignored)
+
+ self._assertActionEventSaved(event, action['id'])
+
+ def test_instance_action_event_start_without_action(self):
+ """Create an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ event_values = self._create_event_values(uuid)
+ self.assertRaises(exception.InstanceActionNotFound,
+ db.action_event_start, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_without_started_event(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ self.assertRaises(exception.InstanceActionEventNotFound,
+ db.action_event_finish, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_without_action(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ self.assertRaises(exception.InstanceActionNotFound,
+ db.action_event_finish, self.ctxt, event_values)
+
+ def test_instance_action_event_finish_success(self):
+ """Finish an instance action event."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ db.action_event_start(self.ctxt, self._create_event_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Success'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_finish(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+ action = db.action_get_by_request_id(self.ctxt, uuid,
+ self.ctxt.request_id)
+ self.assertNotEqual('Error', action['message'])
+
+ def test_instance_action_event_finish_error(self):
+ """Finish an instance action event with an error."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ db.action_event_start(self.ctxt, self._create_event_values(uuid))
+
+ event_values = {
+ 'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
+ 'result': 'Error'
+ }
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_finish(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+ action = db.action_get_by_request_id(self.ctxt, uuid,
+ self.ctxt.request_id)
+ self.assertEqual('Error', action['message'])
+
+ def test_instance_action_and_event_start_string_time(self):
+ """Create an instance action and event with a string start_time."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt, self._create_action_values(uuid))
+
+ event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
+ event_values = self._create_event_values(uuid, extra=event_values)
+ event = db.action_event_start(self.ctxt, event_values)
+
+ self._assertActionEventSaved(event, action['id'])
+
+ def test_instance_action_events_get_are_in_order(self):
+ """Ensure retrived action events are in order."""
+ uuid1 = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt,
+ self._create_action_values(uuid1))
+
+ extra1 = {
+ 'created_at': timeutils.utcnow()
+ }
+ extra2 = {
+ 'created_at': timeutils.utcnow() + datetime.timedelta(seconds=5)
+ }
+
+ event_val1 = self._create_event_values(uuid1, 'schedule', extra=extra1)
+ event_val2 = self._create_event_values(uuid1, 'run', extra=extra1)
+ event_val3 = self._create_event_values(uuid1, 'stop', extra=extra2)
+
+ event1 = db.action_event_start(self.ctxt, event_val1)
+ event2 = db.action_event_start(self.ctxt, event_val2)
+ event3 = db.action_event_start(self.ctxt, event_val3)
+
+ events = db.action_events_get(self.ctxt, action['id'])
+ self.assertEqual(3, len(events))
+
+ self._assertEqualOrderedListOfObjects([event3, event2, event1], events,
+ ['instance_uuid', 'request_id'])
+
+ def test_instance_action_event_get_by_id(self):
+ """Get a specific instance action event."""
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action = db.action_start(self.ctxt,
+ self._create_action_values(uuid1))
+
+ db.action_start(ctxt2,
+ self._create_action_values(uuid2, 'reboot', ctxt2))
+
+ event = db.action_event_start(self.ctxt,
+ self._create_event_values(uuid1))
+
+ event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
+ db.action_event_start(ctxt2, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ saved_event = db.action_event_get_by_id(self.ctxt,
+ action['id'],
+ event['id'])
+ self._assertEqualObjects(event, saved_event,
+ ['instance_uuid', 'request_id'])
+
+
+class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(InstanceFaultTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_fault_values(self, uuid, code=404):
+ return {
+ 'message': 'message',
+ 'details': 'detail',
+ 'instance_uuid': uuid,
+ 'code': code,
+ 'host': 'localhost'
+ }
+
+ def test_instance_fault_create(self):
+ """Ensure we can create an instance fault."""
+ uuid = str(stdlib_uuid.uuid4())
+
+ # Ensure no faults registered for this instance
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(0, len(faults[uuid]))
+
+ # Create a fault
+ fault_values = self._create_fault_values(uuid)
+ db.instance_create(self.ctxt, {'uuid': uuid})
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(fault_values, fault, ignored_keys)
+
+ # Retrieve the fault to ensure it was successfully added
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ self.assertEqual(1, len(faults[uuid]))
+ self._assertEqualObjects(fault, faults[uuid][0])
+
+ def test_instance_fault_get_by_instance(self):
+ """Ensure we can retrieve faults for instance."""
+ uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
+ fault_codes = [404, 500]
+ expected = {}
+
+ # Create faults
+ for uuid in uuids:
+ db.instance_create(self.ctxt, {'uuid': uuid})
+
+ expected[uuid] = []
+ for code in fault_codes:
+ fault_values = self._create_fault_values(uuid, code)
+ fault = db.instance_fault_create(self.ctxt, fault_values)
+ expected[uuid].append(fault)
+
+ # Ensure faults are saved
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
+ self.assertEqual(len(expected), len(faults))
+ for uuid in uuids:
+ self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
+
+ def test_instance_faults_get_by_instance_uuids_no_faults(self):
+ uuid = str(stdlib_uuid.uuid4())
+ # None should be returned when no faults exist.
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
+ expected = {uuid: []}
+ self.assertEqual(expected, faults)
+
+ def test_instance_faults_get_by_instance_uuids_no_uuids(self):
+ self.mox.StubOutWithMock(query.Query, 'filter')
+ self.mox.ReplayAll()
+ faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
+ self.assertEqual({}, faults)
+
+
+class InstanceTypeTestCase(BaseInstanceTypeTestCase):
+
+ def test_flavor_create(self):
+ flavor = self._create_flavor({})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'extra_specs']
+
+ self.assertIsNotNone(flavor['id'])
+ self._assertEqualObjects(flavor, self._get_base_values(),
+ ignored_keys)
+
+ def test_flavor_create_with_projects(self):
+ projects = ['fake-project1', 'fake-project2']
+ flavor = self._create_flavor({}, projects + ['fake-project2'])
+ access = db.flavor_access_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'])
+ self.assertEqual(projects, [x.project_id for x in access])
+
+ def test_flavor_destroy(self):
+ specs1 = {'a': '1', 'b': '2'}
+ flavor1 = self._create_flavor({'name': 'name1', 'flavorid': 'a1',
+ 'extra_specs': specs1})
+ specs2 = {'c': '4', 'd': '3'}
+ flavor2 = self._create_flavor({'name': 'name2', 'flavorid': 'a2',
+ 'extra_specs': specs2})
+
+ db.flavor_destroy(self.ctxt, 'name1')
+
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get, self.ctxt, flavor1['id'])
+ real_specs1 = db.flavor_extra_specs_get(self.ctxt, flavor1['flavorid'])
+ self._assertEqualObjects(real_specs1, {})
+
+ r_flavor2 = db.flavor_get(self.ctxt, flavor2['id'])
+ self._assertEqualObjects(flavor2, r_flavor2, 'extra_specs')
+
+ def test_flavor_destroy_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_destroy, self.ctxt, 'nonexists')
+
+ def test_flavor_create_duplicate_name(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorExists,
+ self._create_flavor,
+ {'flavorid': 'some_random_flavor'})
+
+ def test_flavor_create_duplicate_flavorid(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorIdExists,
+ self._create_flavor,
+ {'name': 'some_random_name'})
+
+ def test_flavor_create_with_extra_specs(self):
+ extra_specs = dict(a='abc', b='def', c='ghi')
+ flavor = self._create_flavor({'extra_specs': extra_specs})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'extra_specs']
+
+ self._assertEqualObjects(flavor, self._get_base_values(),
+ ignored_keys)
+ self._assertEqualObjects(extra_specs, flavor['extra_specs'])
+
+ def test_flavor_get_all(self):
+ # NOTE(boris-42): Remove base instance types
+ for it in db.flavor_get_all(self.ctxt):
+ db.flavor_destroy(self.ctxt, it['name'])
+
+ flavors = [
+ {'root_gb': 600, 'memory_mb': 100, 'disabled': True,
+ 'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
+ {'root_gb': 500, 'memory_mb': 200, 'disabled': True,
+ 'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
+ {'root_gb': 400, 'memory_mb': 300, 'disabled': False,
+ 'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
+ {'root_gb': 300, 'memory_mb': 400, 'disabled': False,
+ 'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
+ {'root_gb': 200, 'memory_mb': 500, 'disabled': True,
+ 'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
+ {'root_gb': 100, 'memory_mb': 600, 'disabled': True,
+ 'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
+ ]
+ flavors = [self._create_flavor(it) for it in flavors]
+
+ lambda_filters = {
+ 'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
+ 'min_root_gb': lambda it, v: it['root_gb'] >= v,
+ 'disabled': lambda it, v: it['disabled'] == v,
+ 'is_public': lambda it, v: (v is None or it['is_public'] == v)
+ }
+
+ mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
+ root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
+ disabled_filts = [{'disabled': x} for x in [True, False]]
+ is_public_filts = [{'is_public': x} for x in [True, False, None]]
+
+ def assert_multi_filter_flavor_get(filters=None):
+ if filters is None:
+ filters = {}
+
+ expected_it = flavors
+ for name, value in filters.iteritems():
+ filt = lambda it: lambda_filters[name](it, value)
+ expected_it = filter(filt, expected_it)
+
+ real_it = db.flavor_get_all(self.ctxt, filters=filters)
+ self._assertEqualListsOfObjects(expected_it, real_it)
+
+ # no filter
+ assert_multi_filter_flavor_get()
+
+ # test only with one filter
+ for filt in mem_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in root_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in disabled_filts:
+ assert_multi_filter_flavor_get(filt)
+ for filt in is_public_filts:
+ assert_multi_filter_flavor_get(filt)
+
+ # test all filters together
+ for mem in mem_filts:
+ for root in root_filts:
+ for disabled in disabled_filts:
+ for is_public in is_public_filts:
+ filts = [f.items() for f in
+ [mem, root, disabled, is_public]]
+ filts = dict(reduce(lambda x, y: x + y, filts, []))
+ assert_multi_filter_flavor_get(filts)
+
+ def test_flavor_get_all_limit_sort(self):
+ def assert_sorted_by_key_dir(sort_key, asc=True):
+ sort_dir = 'asc' if asc else 'desc'
+ results = db.flavor_get_all(self.ctxt, sort_key='name',
+ sort_dir=sort_dir)
+ # Manually sort the results as we would expect them
+ expected_results = sorted(results,
+ key=lambda item: item['name'],
+ reverse=(not asc))
+ self.assertEqual(expected_results, results)
+
+ def assert_sorted_by_key_both_dir(sort_key):
+ assert_sorted_by_key_dir(sort_key, True)
+ assert_sorted_by_key_dir(sort_key, False)
+
+ for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
+ 'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
+ 'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
+ 'vcpu_weight', 'id']:
+ assert_sorted_by_key_both_dir(attr)
+
+ def test_flavor_get_all_limit(self):
+ limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
+ self.assertEqual(2, len(limited_flavors))
+
+ def test_flavor_get_all_list_marker(self):
+ all_flavors = db.flavor_get_all(self.ctxt)
+
+ # Set the 3rd result as the marker
+ marker_flavorid = all_flavors[2]['flavorid']
+ marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
+ # We expect everything /after/ the 3rd result
+ expected_results = all_flavors[3:]
+ self.assertEqual(expected_results, marked_flavors)
+
+ def test_flavor_get_all_marker_not_found(self):
+ self.assertRaises(exception.MarkerNotFound,
+ db.flavor_get_all, self.ctxt, marker='invalid')
+
+ def test_flavor_get(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ def test_flavor_get_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_id = db.flavor_get(self.ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFound, db.flavor_get,
+ self.user_ctxt, flavor['id'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_id = db.flavor_get(self.user_ctxt, flavor['id'])
+ self._assertEqualObjects(flavor, flavor_by_id)
+
+ def test_flavor_get_by_name(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ def test_flavor_get_by_name_not_found(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorNotFoundByName,
+ db.flavor_get_by_name, self.ctxt, 'nonexists')
+
+ def test_flavor_get_by_name_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_name = db.flavor_get_by_name(self.ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFoundByName,
+ db.flavor_get_by_name, self.user_ctxt,
+ flavor['name'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_name = db.flavor_get_by_name(self.user_ctxt, flavor['name'])
+ self._assertEqualObjects(flavor, flavor_by_name)
+
+ def test_flavor_get_by_flavor_id(self):
+ flavors = [{'name': 'abc', 'flavorid': '123'},
+ {'name': 'def', 'flavorid': '456'},
+ {'name': 'ghi', 'flavorid': '789'}]
+ flavors = [self._create_flavor(t) for t in flavors]
+
+ for flavor in flavors:
+ params = (self.ctxt, flavor['flavorid'])
+ flavor_by_flavorid = db.flavor_get_by_flavor_id(*params)
+ self._assertEqualObjects(flavor, flavor_by_flavorid)
+
+ def test_flavor_get_by_flavor_not_found(self):
+ self._create_flavor({})
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id,
+ self.ctxt, 'nonexists')
+
+ def test_flavor_get_by_flavor_id_non_public(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123',
+ 'is_public': False})
+
+ # Admin can see it
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'])
+ self._assertEqualObjects(flavor, flavor_by_fid)
+
+ # Regular user can not
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id, self.user_ctxt,
+ flavor['flavorid'])
+
+ # Regular user can see it after being granted access
+ db.flavor_access_add(self.ctxt, flavor['flavorid'],
+ self.user_ctxt.project_id)
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
+ flavor['flavorid'])
+ self._assertEqualObjects(flavor, flavor_by_fid)
+
+ def test_flavor_get_by_flavor_id_deleted(self):
+ flavor = self._create_flavor({'name': 'abc', 'flavorid': '123'})
+
+ db.flavor_destroy(self.ctxt, 'abc')
+
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'], read_deleted='yes')
+ self.assertEqual(flavor['id'], flavor_by_fid['id'])
+
+ def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
+ # NOTE(wingwj): Aims to test difference between mysql and postgresql
+ # for bug 1288636
+ param_dict = {'name': 'abc', 'flavorid': '123'}
+
+ self._create_flavor(param_dict)
+ db.flavor_destroy(self.ctxt, 'abc')
+
+ # Recreate the flavor with the same params
+ flavor = self._create_flavor(param_dict)
+
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'], read_deleted='yes')
+ self.assertEqual(flavor['id'], flavor_by_fid['id'])
+
+
+class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
+
+ def setUp(self):
+ super(InstanceTypeExtraSpecsTestCase, self).setUp()
+ values = ({'name': 'n1', 'flavorid': 'f1',
+ 'extra_specs': dict(a='a', b='b', c='c')},
+ {'name': 'n2', 'flavorid': 'f2',
+ 'extra_specs': dict(d='d', e='e', f='f')})
+
+ # NOTE(boris-42): We have already tested flavor_create method
+ # with extra_specs in InstanceTypeTestCase.
+ self.flavors = [self._create_flavor(v) for v in values]
+
+ def test_flavor_extra_specs_get(self):
+ for it in self.flavors:
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(it['extra_specs'], real_specs)
+
+ def test_flavor_extra_specs_get_item(self):
+ expected = dict(f1=dict(a='a', b='b', c='c'),
+ f2=dict(d='d', e='e', f='f'))
+
+ for flavor, specs in expected.iteritems():
+ for key, val in specs.iteritems():
+ spec = db.flavor_extra_specs_get_item(self.ctxt, flavor, key)
+ self.assertEqual(spec[key], val)
+
+ def test_flavor_extra_specs_delete(self):
+ for it in self.flavors:
+ specs = it['extra_specs']
+ key = specs.keys()[0]
+ del specs[key]
+ db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(it['extra_specs'], real_specs)
+
+ def test_flavor_extra_specs_delete_failed(self):
+ for it in self.flavors:
+ self.assertRaises(exception.FlavorExtraSpecsNotFound,
+ db.flavor_extra_specs_delete,
+ self.ctxt, it['flavorid'], 'dummy')
+
+ def test_flavor_extra_specs_update_or_create(self):
+ for it in self.flavors:
+ current_specs = it['extra_specs']
+ current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
+ params = (self.ctxt, it['flavorid'], current_specs)
+ db.flavor_extra_specs_update_or_create(*params)
+ real_specs = db.flavor_extra_specs_get(self.ctxt, it['flavorid'])
+ self._assertEqualObjects(current_specs, real_specs)
+
+ def test_flavor_extra_specs_update_or_create_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_extra_specs_update_or_create,
+ self.ctxt, 'nonexists', {})
+
+ def test_flavor_extra_specs_update_or_create_retry(self):
+
+ def counted():
+ def get_id(context, flavorid, session):
+ get_id.counter += 1
+ raise db_exc.DBDuplicateEntry
+ get_id.counter = 0
+ return get_id
+
+ get_id = counted()
+ self.stubs.Set(sqlalchemy_api, '_flavor_get_id_from_flavor', get_id)
+ self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
+ sqlalchemy_api.flavor_extra_specs_update_or_create,
+ self.ctxt, 1, {}, 5)
+ self.assertEqual(get_id.counter, 5)
+
+
+class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
+
+ def _create_flavor_access(self, flavor_id, project_id):
+ return db.flavor_access_add(self.ctxt, flavor_id, project_id)
+
+ def test_flavor_access_get_by_flavor_id(self):
+ flavors = ({'name': 'n1', 'flavorid': 'f1'},
+ {'name': 'n2', 'flavorid': 'f2'})
+ it1, it2 = tuple((self._create_flavor(v) for v in flavors))
+
+ access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
+ self._create_flavor_access(it1['flavorid'], 'pr2')]
+
+ access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
+
+ for it, access_it in zip((it1, it2), (access_it1, access_it2)):
+ params = (self.ctxt, it['flavorid'])
+ real_access_it = db.flavor_access_get_by_flavor_id(*params)
+ self._assertEqualListsOfObjects(access_it, real_access_it)
+
+ def test_flavor_access_get_by_flavor_id_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_get_by_flavor_id,
+ self.ctxt, 'nonexists')
+
+ def test_flavor_access_add(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ project_id = 'p1'
+
+ access = self._create_flavor_access(flavor['flavorid'], project_id)
+ # NOTE(boris-42): Check that flavor_access_add doesn't fail and
+ # returns correct value. This is enough because other
+ # logic is checked by other methods.
+ self.assertIsNotNone(access['id'])
+ self.assertEqual(access['instance_type_id'], flavor['id'])
+ self.assertEqual(access['project_id'], project_id)
+
+ def test_flavor_access_add_to_non_existing_flavor(self):
+ self.assertRaises(exception.FlavorNotFound,
+ self._create_flavor_access,
+ 'nonexists', 'does_not_matter')
+
+ def test_flavor_access_add_duplicate_project_id_flavor(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ params = (flavor['flavorid'], 'p1')
+
+ self._create_flavor_access(*params)
+ self.assertRaises(exception.FlavorAccessExists,
+ self._create_flavor_access, *params)
+
+ def test_flavor_access_remove(self):
+ flavors = ({'name': 'n1', 'flavorid': 'f1'},
+ {'name': 'n2', 'flavorid': 'f2'})
+ it1, it2 = tuple((self._create_flavor(v) for v in flavors))
+
+ access_it1 = [self._create_flavor_access(it1['flavorid'], 'pr1'),
+ self._create_flavor_access(it1['flavorid'], 'pr2')]
+
+ access_it2 = [self._create_flavor_access(it2['flavorid'], 'pr1')]
+
+ db.flavor_access_remove(self.ctxt, it1['flavorid'],
+ access_it1[1]['project_id'])
+
+ for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
+ params = (self.ctxt, it['flavorid'])
+ real_access_it = db.flavor_access_get_by_flavor_id(*params)
+ self._assertEqualListsOfObjects(access_it, real_access_it)
+
+ def test_flavor_access_remove_flavor_not_found(self):
+ self.assertRaises(exception.FlavorNotFound,
+ db.flavor_access_remove,
+ self.ctxt, 'nonexists', 'does_not_matter')
+
+ def test_flavor_access_remove_access_not_found(self):
+ flavor = self._create_flavor({'flavorid': 'f1'})
+ params = (flavor['flavorid'], 'p1')
+ self._create_flavor_access(*params)
+ self.assertRaises(exception.FlavorAccessNotFound,
+ db.flavor_access_remove,
+ self.ctxt, flavor['flavorid'], 'p2')
+
+ def test_flavor_access_removed_after_flavor_destroy(self):
+ flavor1 = self._create_flavor({'flavorid': 'f1', 'name': 'n1'})
+ flavor2 = self._create_flavor({'flavorid': 'f2', 'name': 'n2'})
+ values = [
+ (flavor1['flavorid'], 'p1'),
+ (flavor1['flavorid'], 'p2'),
+ (flavor2['flavorid'], 'p3')
+ ]
+ for v in values:
+ self._create_flavor_access(*v)
+
+ db.flavor_destroy(self.ctxt, flavor1['name'])
+
+ p = (self.ctxt, flavor1['flavorid'])
+ self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
+ p = (self.ctxt, flavor2['flavorid'])
+ self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
+ db.flavor_destroy(self.ctxt, flavor2['name'])
+ self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
+
+
+class FixedIPTestCase(BaseInstanceTypeTestCase):
+ def _timeout_test(self, ctxt, timeout, multi_host):
+ instance = db.instance_create(ctxt, dict(host='foo'))
+ net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
+ host='bar'))
+ old = timeout - datetime.timedelta(seconds=5)
+ new = timeout + datetime.timedelta(seconds=5)
+ # should deallocate
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=net['id'],
+ updated_at=old))
+ # still allocated
+ db.fixed_ip_create(ctxt, dict(allocated=True,
+ instance_uuid=instance['uuid'],
+ network_id=net['id'],
+ updated_at=old))
+ # wrong network
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=None,
+ updated_at=old))
+ # too new
+ db.fixed_ip_create(ctxt, dict(allocated=False,
+ instance_uuid=instance['uuid'],
+ network_id=None,
+ updated_at=new))
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
+ now = timeutils.utcnow()
+ self._timeout_test(self.ctxt, now, False)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
+ self.assertEqual(result, 0)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
+ self.assertEqual(result, 1)
+
+ def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
+ now = timeutils.utcnow()
+ self._timeout_test(self.ctxt, now, True)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
+ self.assertEqual(result, 1)
+ result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
+ self.assertEqual(result, 0)
+
+ def test_fixed_ip_get_by_floating_address(self):
+ fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
+ values = {'address': '8.7.6.5',
+ 'fixed_ip_id': fixed_ip['id']}
+ floating = db.floating_ip_create(self.ctxt, values)['address']
+ fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
+ self._assertEqualObjects(fixed_ip, fixed_ip_ref)
+
+ def test_fixed_ip_get_by_host(self):
+ host_ips = {
+ 'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
+ 'host2': ['1.1.1.4', '1.1.1.5'],
+ 'host3': ['1.1.1.6']
+ }
+
+ for host, ips in host_ips.iteritems():
+ for ip in ips:
+ instance_uuid = self._create_instance(host=host)
+ db.fixed_ip_create(self.ctxt, {'address': ip})
+ db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
+
+ for host, ips in host_ips.iteritems():
+ ips_on_host = map(lambda x: x['address'],
+ db.fixed_ip_get_by_host(self.ctxt, host))
+ self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
+
+ def test_fixed_ip_get_by_network_host_not_found_exception(self):
+ self.assertRaises(
+ exception.FixedIpNotFoundForNetworkHost,
+ db.fixed_ip_get_by_network_host,
+ self.ctxt, 1, 'ignore')
+
+ def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
+ db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
+
+ fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
+
+ self.assertEqual(1, fip['network_id'])
+ self.assertEqual('host', fip['host'])
+
+ def _create_instance(self, **kwargs):
+ instance = db.instance_create(self.ctxt, kwargs)
+ return instance['uuid']
+
+ def test_fixed_ip_get_by_instance_fixed_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
+ [ips_list[0].address])
+
+ def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
+ instance_uuid = self._create_instance()
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
+
+ another_instance = db.instance_create(self.ctxt, {})
+ db.fixed_ip_create(self.ctxt, dict(
+ instance_uuid=another_instance['uuid'], address="192.168.1.7"))
+
+ ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ip_get_by_instance_not_found_exception(self):
+ instance_uuid = self._create_instance()
+
+ self.assertRaises(exception.FixedIpNotFoundForInstance,
+ db.fixed_ip_get_by_instance,
+ self.ctxt, instance_uuid)
+
+ def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
+ [ips_list[0].address])
+
+ def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ FIXED_IP_ADDRESS_1 = '192.168.1.5'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
+ FIXED_IP_ADDRESS_2 = '192.168.1.6'
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
+
+ another_vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+ db.fixed_ip_create(self.ctxt, dict(
+ virtual_interface_id=another_vif.id, address="192.168.1.7"))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self._assertEqualListsOfPrimitivesAsSets(
+ [FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
+ [ips_list[0].address, ips_list[1].address])
+
+ def test_fixed_ips_by_virtual_interface_no_ip_found(self):
+ instance_uuid = self._create_instance()
+
+ vif = db.virtual_interface_create(
+ self.ctxt, dict(instance_uuid=instance_uuid))
+
+ ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
+ self.assertEqual(0, len(ips_list))
+
+ def create_fixed_ip(self, **params):
+ default_params = {'address': '192.168.0.1'}
+ default_params.update(params)
+ return db.fixed_ip_create(self.ctxt, default_params)['address']
+
+ def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
+ instance_uuid = self._create_instance()
+ self.assertRaises(exception.FixedIpNotFoundForNetwork,
+ db.fixed_ip_associate,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_fails_if_ip_in_use(self):
+ instance_uuid = self._create_instance()
+
+ address = self.create_fixed_ip(instance_uuid=instance_uuid)
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ db.fixed_ip_associate,
+ self.ctxt, address, instance_uuid)
+
+ def test_fixed_ip_associate_succeeds(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip(network_id=network['id'])
+ db.fixed_ip_associate(self.ctxt, address, instance_uuid,
+ network_id=network['id'])
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+
+ def test_fixed_ip_associate_succeeds_and_sets_network(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip()
+ db.fixed_ip_associate(self.ctxt, address, instance_uuid,
+ network_id=network['id'])
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+ self.assertEqual(fixed_ip['network_id'], network['id'])
+
+ def test_fixed_ip_associate_pool_invalid_uuid(self):
+ instance_uuid = '123'
+ self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
+ instance_uuid = self._create_instance()
+ self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
+ self.ctxt, None, instance_uuid)
+
+ def test_fixed_ip_associate_pool_succeeds(self):
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+
+ address = self.create_fixed_ip(network_id=network['id'])
+ db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+
+ def test_fixed_ip_create_same_address(self):
+ address = '192.168.1.5'
+ params = {'address': address}
+ db.fixed_ip_create(self.ctxt, params)
+ self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
+ self.ctxt, params)
+
+ def test_fixed_ip_create_success(self):
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': '192.168.1.5',
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+
+ def test_fixed_ip_bulk_create_same_address(self):
+ address_1 = '192.168.1.5'
+ address_2 = '192.168.1.6'
+ instance_uuid = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ params = [
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_2, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': 'localhost', 'address': address_2, 'allocated': True,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_2,
+ 'virtual_interface_id': None},
+ ]
+
+ self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
+ self.ctxt, params)
+ # In this case the transaction will be rolled back and none of the ips
+ # will make it to the database.
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address_1)
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address_2)
+
+ def test_fixed_ip_bulk_create_success(self):
+ address_1 = '192.168.1.5'
+ address_2 = '192.168.1.6'
+
+ instance_uuid = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ params = [
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': address_1, 'allocated': False,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_1,
+ 'virtual_interface_id': None},
+ {'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': 'localhost', 'address': address_2, 'allocated': True,
+ 'instance_uuid': instance_uuid, 'network_id': network_id_2,
+ 'virtual_interface_id': None}
+ ]
+
+ db.fixed_ip_bulk_create(self.ctxt, params)
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at',
+ 'virtual_interface', 'network', 'floating_ips']
+ fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
+
+ # we have no `id` in incoming data so we can not use
+ # _assertEqualListsOfObjects to compare incoming data and received
+ # objects
+ fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
+ params = sorted(params, key=lambda i: i['network_id'])
+ for param, ip in zip(params, fixed_ip_data):
+ self._assertEqualObjects(param, ip, ignored_keys)
+
+ def test_fixed_ip_disassociate(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ values = {'address': '192.168.1.5', 'instance_uuid': instance_uuid}
+ vif = db.virtual_interface_create(self.ctxt, values)
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': vif['id']
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ db.fixed_ip_disassociate(self.ctxt, address)
+ fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
+ ignored_keys = ['created_at', 'id', 'deleted_at',
+ 'updated_at', 'instance_uuid',
+ 'virtual_interface_id']
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+ self.assertIsNone(fixed_ip_data['instance_uuid'])
+ self.assertIsNone(fixed_ip_data['virtual_interface_id'])
+
+ def test_fixed_ip_get_not_found_exception(self):
+ self.assertRaises(exception.FixedIpNotFound,
+ db.fixed_ip_get, self.ctxt, 0)
+
+ def test_fixed_ip_get_success2(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
+
+ self.ctxt.is_admin = False
+ self.assertRaises(exception.Forbidden, db.fixed_ip_get,
+ self.ctxt, fixed_ip_id)
+
+ def test_fixed_ip_get_success(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
+ fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
+
+ def test_fixed_ip_get_by_address(self):
+ instance_uuid = self._create_instance()
+ db.fixed_ip_create(self.ctxt, {'address': '1.2.3.4',
+ 'instance_uuid': instance_uuid,
+ })
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, '1.2.3.4',
+ columns_to_join=['instance'])
+ self.assertIn('instance', fixed_ip.__dict__)
+ self.assertEqual(instance_uuid, fixed_ip.instance.uuid)
+
+ def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address_detailed, self.ctxt,
+ '192.168.1.5')
+
+ def test_fixed_ip_get_by_address_with_data_error_exception(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.FixedIpInvalid,
+ db.fixed_ip_get_by_address_detailed, self.ctxt,
+ '192.168.1.6')
+
+ def test_fixed_ip_get_by_address_detailed_sucsess(self):
+ address = '192.168.1.5'
+ instance_uuid = self._create_instance()
+ network_id = db.network_create_safe(self.ctxt, {})['id']
+ param = {
+ 'reserved': False,
+ 'deleted': 0,
+ 'leased': False,
+ 'host': '127.0.0.1',
+ 'address': address,
+ 'allocated': False,
+ 'instance_uuid': instance_uuid,
+ 'network_id': network_id,
+ 'virtual_interface_id': None
+ }
+ db.fixed_ip_create(self.ctxt, param)
+
+ fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, address)
+ # fixed ip check here
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
+
+ # network model check here
+ network_data = db.network_get(self.ctxt, network_id)
+ self._assertEqualObjects(network_data, fixed_ip_data[1])
+
+ # Instance check here
+ instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
+ ignored_keys = ['info_cache', 'system_metadata',
+ 'security_groups', 'metadata',
+ 'pci_devices'] # HOW ????
+ self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
+
+ def test_fixed_ip_update_not_found_for_address(self):
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_update, self.ctxt,
+ '192.168.1.5', {})
+
+ def test_fixed_ip_update(self):
+ instance_uuid_1 = self._create_instance()
+ instance_uuid_2 = self._create_instance()
+ network_id_1 = db.network_create_safe(self.ctxt, {})['id']
+ network_id_2 = db.network_create_safe(self.ctxt, {})['id']
+ param_1 = {
+ 'reserved': True, 'deleted': 0, 'leased': True,
+ 'host': '192.168.133.1', 'address': '10.0.0.2',
+ 'allocated': True, 'instance_uuid': instance_uuid_1,
+ 'network_id': network_id_1, 'virtual_interface_id': '123',
+ }
+
+ param_2 = {
+ 'reserved': False, 'deleted': 0, 'leased': False,
+ 'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
+ 'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
+ 'virtual_interface_id': None
+ }
+
+ ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
+ fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
+ db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
+ fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
+ param_2['address'])
+ self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
+
+
+class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(FloatingIpTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_base_values(self):
+ return {
+ 'address': '1.1.1.1',
+ 'fixed_ip_id': None,
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'auto_assigned': False,
+ 'pool': 'fake_pool',
+ 'interface': 'fake_interface',
+ }
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def _create_floating_ip(self, values):
+ if not values:
+ values = {}
+ vals = self._get_base_values()
+ vals.update(values)
+ return db.floating_ip_create(self.ctxt, vals)
+
+ def test_floating_ip_get(self):
+ values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
+ floating_ips = [self._create_floating_ip(val) for val in values]
+
+ for floating_ip in floating_ips:
+ real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
+ self._assertEqualObjects(floating_ip, real_floating_ip,
+ ignored_keys=['fixed_ip'])
+
+ def test_floating_ip_get_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, 100500)
+
+ def test_floating_ip_get_with_long_id_not_found(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidID,
+ db.floating_ip_get, self.ctxt, 123456789101112)
+
+ def test_floating_ip_get_pools(self):
+ values = [
+ {'address': '0.0.0.0', 'pool': 'abc'},
+ {'address': '1.1.1.1', 'pool': 'abc'},
+ {'address': '2.2.2.2', 'pool': 'def'},
+ {'address': '3.3.3.3', 'pool': 'ghi'},
+ ]
+ for val in values:
+ self._create_floating_ip(val)
+ expected_pools = [{'name': x}
+ for x in set(map(lambda x: x['pool'], values))]
+ real_pools = db.floating_ip_get_pools(self.ctxt)
+ self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
+
+ def test_floating_ip_allocate_address(self):
+ pools = {
+ 'pool1': ['0.0.0.0', '1.1.1.1'],
+ 'pool2': ['2.2.2.2'],
+ 'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
+ }
+ for pool, addresses in pools.iteritems():
+ for address in addresses:
+ vals = {'pool': pool, 'address': address, 'project_id': None}
+ self._create_floating_ip(vals)
+
+ project_id = self._get_base_values()['project_id']
+ for pool, addresses in pools.iteritems():
+ alloc_addrs = []
+ for i in addresses:
+ float_addr = db.floating_ip_allocate_address(self.ctxt,
+ project_id, pool)
+ alloc_addrs.append(float_addr)
+ self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
+
+ def test_floating_ip_allocate_auto_assigned(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+
+ float_ips = []
+ for i in range(0, 2):
+ float_ips.append(self._create_floating_ip(
+ {"address": addresses[i]}))
+ for i in range(2, 4):
+ float_ips.append(self._create_floating_ip({"address": addresses[i],
+ "auto_assigned": True}))
+
+ for i in range(0, 2):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertFalse(float_ip.auto_assigned)
+ for i in range(2, 4):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertTrue(float_ip.auto_assigned)
+
+ def test_floating_ip_allocate_address_no_more_floating_ips(self):
+ self.assertRaises(exception.NoMoreFloatingIps,
+ db.floating_ip_allocate_address,
+ self.ctxt, 'any_project_id', 'no_such_pool')
+
+ def test_floating_ip_allocate_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.Forbidden,
+ db.floating_ip_allocate_address,
+ ctxt, 'other_project_id', 'any_pool')
+
+ def _get_existing_ips(self):
+ return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
+
+ def test_floating_ip_bulk_create(self):
+ expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ db.floating_ip_bulk_create(self.ctxt,
+ map(lambda x: {'address': x}, expected_ips))
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_ips)
+
+ def test_floating_ip_bulk_create_duplicate(self):
+ ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
+ prepare_ips = lambda x: {'address': x}
+
+ result = db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
+ self.assertEqual('1.1.1.1', result[0].address)
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_bulk_create,
+ self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, '1.1.1.5')
+
+ def test_floating_ip_bulk_destroy(self):
+ ips_for_delete = []
+ ips_for_non_delete = []
+
+ def create_ips(i, j):
+ return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, j + 1)]
+
+ # NOTE(boris-42): Create more than 256 ip to check that
+ # _ip_range_splitter works properly.
+ for i in range(1, 3):
+ ips_for_delete.extend(create_ips(i, 255))
+ ips_for_non_delete.extend(create_ips(3, 255))
+
+ db.floating_ip_bulk_create(self.ctxt,
+ ips_for_delete + ips_for_non_delete)
+
+ non_bulk_ips_for_delete = create_ips(4, 3)
+ non_bulk_ips_for_non_delete = create_ips(5, 3)
+ non_bulk_ips = non_bulk_ips_for_delete + non_bulk_ips_for_non_delete
+ project_id = 'fake_project'
+ reservations = quota.QUOTAS.reserve(self.ctxt,
+ floating_ips=len(non_bulk_ips),
+ project_id=project_id)
+ for dct in non_bulk_ips:
+ self._create_floating_ip(dct)
+ quota.QUOTAS.commit(self.ctxt, reservations, project_id=project_id)
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, project_id),
+ {'project_id': project_id,
+ 'floating_ips': {'in_use': 6, 'reserved': 0}})
+ ips_for_delete.extend(non_bulk_ips_for_delete)
+ ips_for_non_delete.extend(non_bulk_ips_for_non_delete)
+
+ db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
+
+ expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
+ self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
+ expected_addresses)
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, project_id),
+ {'project_id': project_id,
+ 'floating_ips': {'in_use': 3, 'reserved': 0}})
+
+ def test_floating_ip_create(self):
+ floating_ip = self._create_floating_ip({})
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+
+ self.assertIsNotNone(floating_ip['id'])
+ self._assertEqualObjects(floating_ip, self._get_base_values(),
+ ignored_keys)
+
+ def test_floating_ip_create_duplicate(self):
+ self._create_floating_ip({})
+ self.assertRaises(exception.FloatingIpExists,
+ self._create_floating_ip, {})
+
+ def _create_fixed_ip(self, params):
+ default_params = {'address': '192.168.0.1'}
+ default_params.update(params)
+ return db.fixed_ip_create(self.ctxt, default_params)['address']
+
+ def test_floating_ip_fixed_ip_associate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+ self.assertEqual(fixed_ip.address, fixed_addr)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
+ self.assertEqual('host', updated_float_ip.host)
+
+ # Test that already allocated float_ip returns None
+ result = db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_addresses[0],
+ fixed_addresses[0], 'host')
+ self.assertIsNone(result)
+
+ def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_fixed_ip_associate,
+ self.ctxt, '10.10.10.10', 'some', 'some')
+
+ def test_floating_ip_deallocate(self):
+ values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
+ float_ip = self._create_floating_ip(values)
+ rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
+ self.assertEqual(1, rows_updated)
+
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertIsNone(updated_float_ip.project_id)
+ self.assertIsNone(updated_float_ip.host)
+ self.assertFalse(updated_float_ip.auto_assigned)
+
+ def test_floating_ip_deallocate_address_not_found(self):
+ self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
+
+ def test_floating_ip_destroy(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ expected_len = len(addresses)
+ for float_ip in float_ips:
+ db.floating_ip_destroy(self.ctxt, float_ip.address)
+ self.assertRaises(exception.FloatingIpNotFound,
+ db.floating_ip_get, self.ctxt, float_ip.id)
+ expected_len -= 1
+ if expected_len > 0:
+ self.assertEqual(expected_len,
+ len(db.floating_ip_get_all(self.ctxt)))
+ else:
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_disassociate(self):
+ float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
+
+ float_ips = [self._create_floating_ip({'address': address})
+ for address in float_addresses]
+ fixed_addrs = [self._create_fixed_ip({'address': address})
+ for address in fixed_addresses]
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ db.floating_ip_fixed_ip_associate(self.ctxt,
+ float_ip.address,
+ fixed_addr, 'host')
+
+ for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
+ fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
+ self.assertEqual(fixed.address, fixed_addr)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
+ self.assertIsNone(updated_float_ip.fixed_ip_id)
+ self.assertIsNone(updated_float_ip.host)
+
+ def test_floating_ip_disassociate_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_disassociate, self.ctxt,
+ '11.11.11.11')
+
+ def test_floating_ip_set_auto_assigned(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr,
+ 'auto_assigned': False})
+ for addr in addresses]
+
+ for i in range(2):
+ db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
+ for i in range(2):
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
+ self.assertTrue(float_ip.auto_assigned)
+
+ float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
+ self.assertFalse(float_ip.auto_assigned)
+
+ def test_floating_ip_get_all(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+ self._assertEqualListsOfObjects(float_ips,
+ db.floating_ip_get_all(self.ctxt))
+
+ def test_floating_ip_get_all_not_found(self):
+ self.assertRaises(exception.NoFloatingIpsDefined,
+ db.floating_ip_get_all, self.ctxt)
+
+ def test_floating_ip_get_all_by_host(self):
+ hosts = {
+ 'host1': ['1.1.1.1', '1.1.1.2'],
+ 'host2': ['2.1.1.1', '2.1.1.2'],
+ 'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ hosts_with_float_ips = {}
+ for host, addresses in hosts.iteritems():
+ hosts_with_float_ips[host] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'host': host,
+ 'address': address})
+ hosts_with_float_ips[host].append(float_ip)
+
+ for host, float_ips in hosts_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips)
+
+ def test_floating_ip_get_all_by_host_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForHost,
+ db.floating_ip_get_all_by_host,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_all_by_project(self):
+ projects = {
+ 'pr1': ['1.1.1.1', '1.1.1.2'],
+ 'pr2': ['2.1.1.1', '2.1.1.2'],
+ 'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
+ }
+
+ projects_with_float_ips = {}
+ for project_id, addresses in projects.iteritems():
+ projects_with_float_ips[project_id] = []
+ for address in addresses:
+ float_ip = self._create_floating_ip({'project_id': project_id,
+ 'address': address})
+ projects_with_float_ips[project_id].append(float_ip)
+
+ for project_id, float_ips in projects_with_float_ips.iteritems():
+ real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
+ project_id)
+ self._assertEqualListsOfObjects(float_ips, real_float_ips,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_all_by_project_not_authorized(self):
+ ctxt = context.RequestContext(user_id='a', project_id='abc',
+ is_admin=False)
+ self.assertRaises(exception.Forbidden,
+ db.floating_ip_get_all_by_project,
+ ctxt, 'other_project')
+
+ def test_floating_ip_get_by_address(self):
+ addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
+ float_ips = [self._create_floating_ip({'address': addr})
+ for addr in addresses]
+
+ for float_ip in float_ips:
+ real_float_ip = db.floating_ip_get_by_address(self.ctxt,
+ float_ip.address)
+ self._assertEqualObjects(float_ip, real_float_ip,
+ ignored_keys='fixed_ip')
+
+ def test_floating_ip_get_by_address_not_found(self):
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ db.floating_ip_get_by_address,
+ self.ctxt, '20.20.20.20')
+
+ def test_floating_ip_get_by_invalid_address(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidIpAddressError,
+ db.floating_ip_get_by_address,
+ self.ctxt, 'non_exists_host')
+
+ def test_floating_ip_get_by_fixed_address(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
+ fixed_addr)
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_get_by_fixed_ip_id(self):
+ fixed_float = [
+ ('1.1.1.1', '2.2.2.1'),
+ ('1.1.1.2', '2.2.2.2'),
+ ('1.1.1.3', '2.2.2.3')
+ ]
+
+ for fixed_addr, float_addr in fixed_float:
+ self._create_floating_ip({'address': float_addr})
+ self._create_fixed_ip({'address': fixed_addr})
+ db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
+ fixed_addr, 'some_host')
+
+ for fixed_addr, float_addr in fixed_float:
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
+ float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
+ fixed_ip['id'])
+ self.assertEqual(float_addr, float_ip[0]['address'])
+
+ def test_floating_ip_update(self):
+ float_ip = self._create_floating_ip({})
+
+ values = {
+ 'project_id': 'some_pr',
+ 'host': 'some_host',
+ 'auto_assigned': True,
+ 'interface': 'some_interface',
+ 'pool': 'some_pool'
+ }
+ floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
+ values)
+ self.assertIsNotNone(floating_ref)
+ updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
+ self._assertEqualObjects(updated_float_ip, values,
+ ignored_keys=['id', 'address', 'updated_at',
+ 'deleted_at', 'created_at',
+ 'deleted', 'fixed_ip_id',
+ 'fixed_ip'])
+
+ def test_floating_ip_update_to_duplicate(self):
+ float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
+ float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
+
+ self.assertRaises(exception.FloatingIpExists,
+ db.floating_ip_update,
+ self.ctxt, float_ip2['address'],
+ {'address': float_ip1['address']})
+
+
+class InstanceDestroyConstraints(test.TestCase):
+
+ def test_destroy_with_equal_any_constraint_met_single_value(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.equal_any('deleting'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_equal_any_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.equal_any('deleting',
+ 'error'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_equal_any_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'resize'})
+ constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
+
+ def test_destroy_with_not_equal_constraint_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'task_state': 'deleting'})
+ constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
+ db.instance_destroy(ctx, instance['uuid'], constraint)
+ self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
+ ctx, instance['uuid'])
+
+ def test_destroy_with_not_equal_constraint_not_met(self):
+ ctx = context.get_admin_context()
+ instance = db.instance_create(ctx, {'vm_state': 'active'})
+ constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
+ self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
+ ctx, instance['uuid'], constraint)
+ instance = db.instance_get_by_uuid(ctx, instance['uuid'])
+ self.assertFalse(instance['deleted'])
+
+
+class VolumeUsageDBApiTestCase(test.TestCase):
+
+ def setUp(self):
+ super(VolumeUsageDBApiTestCase, self).setUp()
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ self.useFixture(test.TimeOverride())
+
+ def test_vol_usage_update_no_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ timeutils.set_time_override(now)
+ start_time = now - datetime.timedelta(seconds=10)
+
+ expected_vol_usages = {
+ u'1': {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 1000,
+ 'curr_read_bytes': 2000,
+ 'curr_writes': 3000,
+ 'curr_write_bytes': 4000,
+ 'curr_last_refreshed': now,
+ 'tot_reads': 0,
+ 'tot_read_bytes': 0,
+ 'tot_writes': 0,
+ 'tot_write_bytes': 0,
+ 'tot_last_refreshed': None},
+ u'2': {'volume_id': u'2',
+ 'instance_uuid': 'fake-instance-uuid2',
+ 'project_id': 'fake-project-uuid2',
+ 'user_id': 'fake-user-uuid2',
+ 'curr_reads': 100,
+ 'curr_read_bytes': 200,
+ 'curr_writes': 300,
+ 'curr_write_bytes': 400,
+ 'tot_reads': 0,
+ 'tot_read_bytes': 0,
+ 'tot_writes': 0,
+ 'tot_write_bytes': 0,
+ 'tot_last_refreshed': None}
+ }
+
+ def _compare(vol_usage, expected):
+ for key, value in expected.items():
+ self.assertEqual(vol_usage[key], value)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
+ wr_req=30, wr_bytes=40,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid2',
+ project_id='fake-project-uuid2',
+ user_id='fake-user-uuid2',
+ availability_zone='fake-az')
+ db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
+ wr_req=3000, wr_bytes=4000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ user_id='fake-user-uuid1',
+ availability_zone='fake-az')
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 2)
+ for usage in vol_usages:
+ _compare(usage, expected_vol_usages[usage.volume_id])
+
+ def test_vol_usage_update_totals_update(self):
+ ctxt = context.get_admin_context()
+ now = datetime.datetime(1, 1, 1, 1, 0, 0)
+ start_time = now - datetime.timedelta(seconds=10)
+ now1 = now + datetime.timedelta(minutes=1)
+ now2 = now + datetime.timedelta(minutes=2)
+ now3 = now + datetime.timedelta(minutes=3)
+
+ timeutils.set_time_override(now)
+ db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az')
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 0)
+ self.assertEqual(current_usage['curr_reads'], 100)
+
+ timeutils.set_time_override(now1)
+ db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 200)
+ self.assertEqual(current_usage['curr_reads'], 0)
+
+ timeutils.set_time_override(now2)
+ db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
+ wr_req=500, wr_bytes=600,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid')
+ current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ self.assertEqual(current_usage['tot_reads'], 200)
+ self.assertEqual(current_usage['curr_reads'], 300)
+
+ timeutils.set_time_override(now3)
+ db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
+ wr_req=600, wr_bytes=700,
+ instance_id='fake-instance-uuid',
+ project_id='fake-project-uuid',
+ user_id='fake-user-uuid',
+ availability_zone='fake-az',
+ update_totals=True)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+
+ expected_vol_usages = {'volume_id': u'1',
+ 'project_id': 'fake-project-uuid',
+ 'user_id': 'fake-user-uuid',
+ 'instance_uuid': 'fake-instance-uuid',
+ 'availability_zone': 'fake-az',
+ 'tot_reads': 600,
+ 'tot_read_bytes': 800,
+ 'tot_writes': 1000,
+ 'tot_write_bytes': 1200,
+ 'tot_last_refreshed': now3,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'curr_last_refreshed': now2}
+
+ self.assertEqual(1, len(vol_usages))
+ for key, value in expected_vol_usages.items():
+ self.assertEqual(vol_usages[0][key], value, key)
+
+ def test_vol_usage_update_when_blockdevicestats_reset(self):
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ start_time = now - datetime.timedelta(seconds=10)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=10000, rd_bytes=20000,
+ wr_req=30000, wr_bytes=40000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ # Instance rebooted or crashed. block device stats were reset and are
+ # less than the previous values
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=200, rd_bytes=300,
+ wr_req=400, wr_bytes=500,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ expected_vol_usage = {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'availability_zone': 'fake-az',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 200,
+ 'curr_read_bytes': 300,
+ 'curr_writes': 400,
+ 'curr_write_bytes': 500,
+ 'tot_reads': 10000,
+ 'tot_read_bytes': 20000,
+ 'tot_writes': 30000,
+ 'tot_write_bytes': 40000}
+ for key, value in expected_vol_usage.items():
+ self.assertEqual(vol_usage[key], value, key)
+
+ def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
+ # This is unlikely to happen, but could when a volume is detached
+ # right after a instance has rebooted / recovered and before
+ # the system polled and updated the volume usage cache table.
+ ctxt = context.get_admin_context()
+ now = timeutils.utcnow()
+ start_time = now - datetime.timedelta(seconds=10)
+
+ vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
+ self.assertEqual(len(vol_usages), 0)
+
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=10000, rd_bytes=20000,
+ wr_req=30000, wr_bytes=40000,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1')
+
+ # Instance rebooted or crashed. block device stats were reset and are
+ # less than the previous values
+ db.vol_usage_update(ctxt, u'1',
+ rd_req=100, rd_bytes=200,
+ wr_req=300, wr_bytes=400,
+ instance_id='fake-instance-uuid1',
+ project_id='fake-project-uuid1',
+ availability_zone='fake-az',
+ user_id='fake-user-uuid1',
+ update_totals=True)
+
+ vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
+ expected_vol_usage = {'volume_id': u'1',
+ 'instance_uuid': 'fake-instance-uuid1',
+ 'project_id': 'fake-project-uuid1',
+ 'availability_zone': 'fake-az',
+ 'user_id': 'fake-user-uuid1',
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'tot_reads': 10100,
+ 'tot_read_bytes': 20200,
+ 'tot_writes': 30300,
+ 'tot_write_bytes': 40400}
+ for key, value in expected_vol_usage.items():
+ self.assertEqual(vol_usage[key], value, key)
+
+
+class TaskLogTestCase(test.TestCase):
+
+ def setUp(self):
+ super(TaskLogTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ now = timeutils.utcnow()
+ self.begin = now - datetime.timedelta(seconds=10)
+ self.end = now - datetime.timedelta(seconds=5)
+ self.task_name = 'fake-task-name'
+ self.host = 'fake-host'
+ self.message = 'Fake task message'
+ db.task_log_begin_task(self.context, self.task_name, self.begin,
+ self.end, self.host, message=self.message)
+
+ def test_task_log_get(self):
+ result = db.task_log_get(self.context, self.task_name, self.begin,
+ self.end, self.host)
+ self.assertEqual(result['task_name'], self.task_name)
+ self.assertEqual(result['period_beginning'], self.begin)
+ self.assertEqual(result['period_ending'], self.end)
+ self.assertEqual(result['host'], self.host)
+ self.assertEqual(result['message'], self.message)
+
+ def test_task_log_get_all(self):
+ result = db.task_log_get_all(self.context, self.task_name, self.begin,
+ self.end, host=self.host)
+ self.assertEqual(len(result), 1)
+ result = db.task_log_get_all(self.context, self.task_name, self.begin,
+ self.end, host=self.host, state='')
+ self.assertEqual(len(result), 0)
+
+ def test_task_log_begin_task(self):
+ db.task_log_begin_task(self.context, 'fake', self.begin,
+ self.end, self.host, task_items=42,
+ message=self.message)
+ result = db.task_log_get(self.context, 'fake', self.begin,
+ self.end, self.host)
+ self.assertEqual(result['task_name'], 'fake')
+
+ def test_task_log_begin_task_duplicate(self):
+ params = (self.context, 'fake', self.begin, self.end, self.host)
+ db.task_log_begin_task(*params, message=self.message)
+ self.assertRaises(exception.TaskAlreadyRunning,
+ db.task_log_begin_task,
+ *params, message=self.message)
+
+ def test_task_log_end_task(self):
+ errors = 1
+ db.task_log_end_task(self.context, self.task_name, self.begin,
+ self.end, self.host, errors, message=self.message)
+ result = db.task_log_get(self.context, self.task_name, self.begin,
+ self.end, self.host)
+ self.assertEqual(result['errors'], 1)
+
+ def test_task_log_end_task_task_not_running(self):
+ self.assertRaises(exception.TaskNotRunning,
+ db.task_log_end_task, self.context, 'nonexistent',
+ self.begin, self.end, self.host, 42,
+ message=self.message)
+
+
+class BlockDeviceMappingTestCase(test.TestCase):
+ def setUp(self):
+ super(BlockDeviceMappingTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance = db.instance_create(self.ctxt, {})
+
+ def _create_bdm(self, values):
+ values.setdefault('instance_uuid', self.instance['uuid'])
+ values.setdefault('device_name', 'fake_device')
+ values.setdefault('source_type', 'volume')
+ values.setdefault('destination_type', 'volume')
+ block_dev = block_device.BlockDeviceDict(values)
+ db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
+ uuid = block_dev['instance_uuid']
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+
+ for bdm in bdms:
+ if bdm['device_name'] == values['device_name']:
+ return bdm
+
+ def test_scrub_empty_str_values_no_effect(self):
+ values = {'volume_size': 5}
+ expected = copy.copy(values)
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, expected)
+
+ def test_scrub_empty_str_values_empty_string(self):
+ values = {'volume_size': ''}
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, {})
+
+ def test_scrub_empty_str_values_empty_unicode(self):
+ values = {'volume_size': u''}
+ sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
+ self.assertEqual(values, {})
+
+ def test_block_device_mapping_create(self):
+ bdm = self._create_bdm({})
+ self.assertIsNotNone(bdm)
+
+ def test_block_device_mapping_update(self):
+ bdm = self._create_bdm({})
+ result = db.block_device_mapping_update(
+ self.ctxt, bdm['id'], {'destination_type': 'moon'},
+ legacy=False)
+ uuid = bdm['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(bdm_real[0]['destination_type'], 'moon')
+ # Also make sure the update call returned correct data
+ self.assertEqual(dict(bdm_real[0].iteritems()),
+ dict(result.iteritems()))
+
+ def test_block_device_mapping_update_or_create(self):
+ values = {
+ 'instance_uuid': self.instance['uuid'],
+ 'device_name': 'fake_name',
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }
+ # check create
+ db.block_device_mapping_update_or_create(self.ctxt, values,
+ legacy=False)
+ uuid = values['instance_uuid']
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
+
+ # check update
+ values['destination_type'] = 'camelot'
+ db.block_device_mapping_update_or_create(self.ctxt, values,
+ legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'fake_name')
+ self.assertEqual(bdm_real['destination_type'], 'camelot')
+
+ # check create without device_name
+ bdm1 = dict(values)
+ bdm1['device_name'] = None
+ db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 2)
+ bdm_real = bdm_real[1]
+ self.assertIsNone(bdm_real['device_name'])
+
+ # check create multiple devices without device_name
+ bdm2 = dict(values)
+ bdm2['device_name'] = None
+ db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 3)
+ bdm_real = bdm_real[2]
+ self.assertIsNone(bdm_real['device_name'])
+
+ def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
+ uuid = self.instance['uuid']
+ values = {
+ 'instance_uuid': uuid,
+ 'source_type': 'blank',
+ 'guest_format': 'myformat',
+ }
+
+ bdm1 = dict(values)
+ bdm1['device_name'] = '/dev/sdb'
+ db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
+
+ bdm2 = dict(values)
+ bdm2['device_name'] = '/dev/sdc'
+ db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
+
+ bdm_real = sorted(
+ db.block_device_mapping_get_all_by_instance(self.ctxt, uuid),
+ key=lambda bdm: bdm['device_name']
+ )
+
+ self.assertEqual(len(bdm_real), 2)
+ for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
+ self.assertEqual(bdm['device_name'], device_name)
+ self.assertEqual(bdm['guest_format'], 'myformat')
+
+ def test_block_device_mapping_update_or_create_check_remove_virt(self):
+ uuid = self.instance['uuid']
+ values = {
+ 'instance_uuid': uuid,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ }
+
+ # check that old swap bdms are deleted on create
+ val1 = dict(values)
+ val1['device_name'] = 'device1'
+ db.block_device_mapping_create(self.ctxt, val1, legacy=False)
+ val2 = dict(values)
+ val2['device_name'] = 'device2'
+ db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
+ bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdm_real), 1)
+ bdm_real = bdm_real[0]
+ self.assertEqual(bdm_real['device_name'], 'device2')
+ self.assertEqual(bdm_real['source_type'], 'blank')
+ self.assertEqual(bdm_real['guest_format'], 'swap')
+ db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
+
+ def test_block_device_mapping_get_all_by_instance(self):
+ uuid1 = self.instance['uuid']
+ uuid2 = db.instance_create(self.ctxt, {})['uuid']
+
+ bmds_values = [{'instance_uuid': uuid1,
+ 'device_name': '/dev/vda'},
+ {'instance_uuid': uuid2,
+ 'device_name': '/dev/vdb'},
+ {'instance_uuid': uuid2,
+ 'device_name': '/dev/vdc'}]
+
+ for bdm in bmds_values:
+ self._create_bdm(bdm)
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
+ self.assertEqual(len(bmd), 1)
+ self.assertEqual(bmd[0]['device_name'], '/dev/vda')
+
+ bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
+ self.assertEqual(len(bmd), 2)
+
+ def test_block_device_mapping_destroy(self):
+ bdm = self._create_bdm({})
+ db.block_device_mapping_destroy(self.ctxt, bdm['id'])
+ bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
+ bdm['instance_uuid'])
+ self.assertEqual(len(bdm), 0)
+
+ def test_block_device_mapping_destroy_by_instance_and_volume(self):
+ vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
+ vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
+
+ self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
+ self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
+
+ uuid = self.instance['uuid']
+ db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
+ vol_id1)
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
+
+ def test_block_device_mapping_destroy_by_instance_and_device(self):
+ self._create_bdm({'device_name': '/dev/vda'})
+ self._create_bdm({'device_name': '/dev/vdb'})
+
+ uuid = self.instance['uuid']
+ params = (self.ctxt, uuid, '/dev/vdb')
+ db.block_device_mapping_destroy_by_instance_and_device(*params)
+
+ bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
+ self.assertEqual(len(bdms), 1)
+ self.assertEqual(bdms[0]['device_name'], '/dev/vda')
+
+ def test_block_device_mapping_get_by_volume_id(self):
+ self._create_bdm({'volume_id': 'fake_id'})
+ bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
+ self.assertEqual(bdm['volume_id'], 'fake_id')
+
+ def test_block_device_mapping_get_by_volume_id_join_instance(self):
+ self._create_bdm({'volume_id': 'fake_id'})
+ bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
+ ['instance'])
+ self.assertEqual(bdm['volume_id'], 'fake_id')
+ self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
+
+
+class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.agent_build_* methods."""
+
+ def setUp(self):
+ super(AgentBuildTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_agent_build_create_and_get_all(self):
+ self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
+ all_agent_builds = db.agent_build_get_all(self.ctxt)
+ self.assertEqual(1, len(all_agent_builds))
+ self._assertEqualObjects(agent_build, all_agent_builds[0])
+
+ def test_agent_build_get_by_triple(self):
+ agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
+ 'os': 'FreeBSD', 'architecture': arch.X86_64})
+ self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
+ 'FreeBSD', 'i386'))
+ self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
+ self.ctxt, 'kvm', 'FreeBSD', arch.X86_64))
+
+ def test_agent_build_destroy(self):
+ agent_build = db.agent_build_create(self.ctxt, {})
+ self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
+
+ def test_agent_build_update(self):
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
+ db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
+ self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
+
+ def test_agent_build_destroy_destroyed(self):
+ agent_build = db.agent_build_create(self.ctxt, {})
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertRaises(exception.AgentBuildNotFound,
+ db.agent_build_destroy, self.ctxt, agent_build.id)
+
+ def test_agent_build_update_destroyed(self):
+ agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
+ db.agent_build_destroy(self.ctxt, agent_build.id)
+ self.assertRaises(exception.AgentBuildNotFound,
+ db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
+
+ def test_agent_build_exists(self):
+ values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
+ 'architecture': arch.X86_64}
+ db.agent_build_create(self.ctxt, values)
+ self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
+ self.ctxt, values)
+
+ def test_agent_build_get_all_by_hypervisor(self):
+ values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
+ 'architecture': arch.X86_64}
+ created = db.agent_build_create(self.ctxt, values)
+ actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
+ self._assertEqualListsOfObjects([created], actual)
+
+
+class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(VirtualInterfaceTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ self.network = db.network_create_safe(self.ctxt, values)
+
+ def _get_base_values(self):
+ return {
+ 'instance_uuid': self.instance_uuid,
+ 'address': 'fake_address',
+ 'network_id': self.network['id'],
+ 'uuid': str(stdlib_uuid.uuid4())
+ }
+
+ def mock_db_query_first_to_raise_data_error_exception(self):
+ self.mox.StubOutWithMock(query.Query, 'first')
+ query.Query.first().AndRaise(db_exc.DBError())
+ self.mox.ReplayAll()
+
+ def _create_virt_interface(self, values):
+ v = self._get_base_values()
+ v.update(values)
+ return db.virtual_interface_create(self.ctxt, v)
+
+ def test_virtual_interface_create(self):
+ vif = self._create_virt_interface({})
+ self.assertIsNotNone(vif['id'])
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at', 'uuid']
+ self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
+
+ def test_virtual_interface_create_with_duplicate_address(self):
+ vif = self._create_virt_interface({})
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._create_virt_interface, {"uuid": vif['uuid']})
+
+ def test_virtual_interface_get(self):
+ vifs = [self._create_virt_interface({'address': 'a'}),
+ self._create_virt_interface({'address': 'b'})]
+
+ for vif in vifs:
+ real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_address(self):
+ vifs = [self._create_virt_interface({'address': 'first'}),
+ self._create_virt_interface({'address': 'second'})]
+ for vif in vifs:
+ real_vif = db.virtual_interface_get_by_address(self.ctxt,
+ vif['address'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_address_not_found(self):
+ self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
+ "i.nv.ali.ip"))
+
+ def test_virtual_interface_get_by_address_data_error_exception(self):
+ self.mock_db_query_first_to_raise_data_error_exception()
+ self.assertRaises(exception.InvalidIpAddressError,
+ db.virtual_interface_get_by_address,
+ self.ctxt,
+ "i.nv.ali.ip")
+
+ def test_virtual_interface_get_by_uuid(self):
+ vifs = [self._create_virt_interface({"address": "address_1"}),
+ self._create_virt_interface({"address": "address_2"})]
+ for vif in vifs:
+ real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
+ self._assertEqualObjects(vif, real_vif)
+
+ def test_virtual_interface_get_by_instance(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ vifs1 = [self._create_virt_interface({'address': 'fake1'}),
+ self._create_virt_interface({'address': 'fake2'})]
+ # multiple nic of same instance
+ vifs2 = [self._create_virt_interface({'address': 'fake3',
+ 'instance_uuid': inst_uuid2}),
+ self._create_virt_interface({'address': 'fake4',
+ 'instance_uuid': inst_uuid2})]
+ vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
+ self.instance_uuid)
+ vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
+ inst_uuid2)
+ self._assertEqualListsOfObjects(vifs1, vifs1_real)
+ self._assertEqualOrderedListOfObjects(vifs2, vifs2_real)
+
+ def test_virtual_interface_get_by_instance_and_network(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ values = {'host': 'localhost', 'project_id': 'project2'}
+ network_id = db.network_create_safe(self.ctxt, values)['id']
+
+ vifs = [self._create_virt_interface({'address': 'fake1'}),
+ self._create_virt_interface({'address': 'fake2',
+ 'network_id': network_id,
+ 'instance_uuid': inst_uuid2}),
+ self._create_virt_interface({'address': 'fake3',
+ 'instance_uuid': inst_uuid2})]
+ for vif in vifs:
+ params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
+ r_vif = db.virtual_interface_get_by_instance_and_network(*params)
+ self._assertEqualObjects(r_vif, vif)
+
+ def test_virtual_interface_delete_by_instance(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+
+ values = [dict(address='fake1'), dict(address='fake2'),
+ dict(address='fake3', instance_uuid=inst_uuid2)]
+ for vals in values:
+ self._create_virt_interface(vals)
+
+ db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
+
+ real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
+ self.instance_uuid)
+ real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
+ inst_uuid2)
+ self.assertEqual(len(real_vifs1), 0)
+ self.assertEqual(len(real_vifs2), 1)
+
+ def test_virtual_interface_get_all(self):
+ inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
+ values = [dict(address='fake1'), dict(address='fake2'),
+ dict(address='fake3', instance_uuid=inst_uuid2)]
+
+ vifs = [self._create_virt_interface(val) for val in values]
+ real_vifs = db.virtual_interface_get_all(self.ctxt)
+ self._assertEqualListsOfObjects(vifs, real_vifs)
+
+
+class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.network_* methods."""
+
+ def setUp(self):
+ super(NetworkTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_associated_fixed_ip(self, host, cidr, ip):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': cidr})
+ self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
+ host))
+ instance = db.instance_create(self.ctxt,
+ {'project_id': 'project1', 'host': host})
+ virtual_interface = db.virtual_interface_create(self.ctxt,
+ {'instance_uuid': instance.uuid, 'network_id': network.id,
+ 'address': ip})
+ db.fixed_ip_create(self.ctxt, {'address': ip,
+ 'network_id': network.id, 'allocated': True,
+ 'virtual_interface_id': virtual_interface.id})
+ db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
+ network.id)
+ return network, instance
+
+ def test_network_get_associated_default_route(self):
+ network, instance = self._get_associated_fixed_ip('host.net',
+ '192.0.2.0/30', '192.0.2.1')
+ network2 = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': '192.0.3.0/30'})
+ ip = '192.0.3.1'
+ virtual_interface = db.virtual_interface_create(self.ctxt,
+ {'instance_uuid': instance.uuid, 'network_id': network2.id,
+ 'address': ip})
+ db.fixed_ip_create(self.ctxt, {'address': ip,
+ 'network_id': network2.id, 'allocated': True,
+ 'virtual_interface_id': virtual_interface.id})
+ db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
+ network2.id)
+ data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
+ self.assertEqual(1, len(data))
+ self.assertTrue(data[0]['default_route'])
+ data = db.network_get_associated_fixed_ips(self.ctxt, network2.id)
+ self.assertEqual(1, len(data))
+ self.assertFalse(data[0]['default_route'])
+
+ def test_network_get_associated_fixed_ips(self):
+ network, instance = self._get_associated_fixed_ip('host.net',
+ '192.0.2.0/30', '192.0.2.1')
+ data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
+ self.assertEqual(1, len(data))
+ self.assertEqual('192.0.2.1', data[0]['address'])
+ self.assertEqual('192.0.2.1', data[0]['vif_address'])
+ self.assertEqual(instance.uuid, data[0]['instance_uuid'])
+ self.assertTrue(data[0]['allocated'])
+
+ def test_network_create_safe(self):
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(36, len(network['uuid']))
+ db_network = db.network_get(self.ctxt, network['id'])
+ self._assertEqualObjects(network, db_network)
+
+ def test_network_create_with_duplicate_vlan(self):
+ values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
+ values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
+ db.network_create_safe(self.ctxt, values1)
+ self.assertRaises(exception.DuplicateVlan,
+ db.network_create_safe, self.ctxt, values2)
+
+ def test_network_delete_safe(self):
+ values = {'host': 'localhost', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ db.network_get(self.ctxt, network['id'])
+ values = {'network_id': network['id'], 'address': '192.168.1.5'}
+ address1 = db.fixed_ip_create(self.ctxt, values)['address']
+ values = {'network_id': network['id'],
+ 'address': '192.168.1.6',
+ 'allocated': True}
+ address2 = db.fixed_ip_create(self.ctxt, values)['address']
+ self.assertRaises(exception.NetworkInUse,
+ db.network_delete_safe, self.ctxt, network['id'])
+ db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
+ network = db.network_delete_safe(self.ctxt, network['id'])
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ db.fixed_ip_get_by_address, self.ctxt, address1)
+ ctxt = self.ctxt.elevated(read_deleted='yes')
+ fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
+ self.assertTrue(fixed_ip['deleted'])
+
+ def test_network_in_use_on_host(self):
+ values = {'host': 'foo', 'hostname': 'myname'}
+ instance = db.instance_create(self.ctxt, values)
+ values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
+ vif = db.virtual_interface_create(self.ctxt, values)
+ values = {'address': '192.168.1.6',
+ 'network_id': 1,
+ 'allocated': True,
+ 'instance_uuid': instance['uuid'],
+ 'virtual_interface_id': vif['id']}
+ db.fixed_ip_create(self.ctxt, values)
+ self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
+ self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
+
+ def test_network_update_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_update, self.ctxt, 123456, {})
+
+ def test_network_update_with_duplicate_vlan(self):
+ values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
+ values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
+ network_ref = db.network_create_safe(self.ctxt, values1)
+ db.network_create_safe(self.ctxt, values2)
+ self.assertRaises(exception.DuplicateVlan,
+ db.network_update, self.ctxt,
+ network_ref["id"], values2)
+
+ def test_network_update(self):
+ network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
+ 'vlan': 1, 'host': 'test.com'})
+ db.network_update(self.ctxt, network.id, {'vlan': 2})
+ network_new = db.network_get(self.ctxt, network.id)
+ self.assertEqual(2, network_new.vlan)
+
+ def test_network_set_host_nonexistent_network(self):
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_set_host, self.ctxt, 123456, 'nonexistent')
+
+ def test_network_set_host_with_initially_no_host(self):
+ values = {'host': 'example.com', 'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(
+ db.network_set_host(self.ctxt, network.id, 'new.example.com'),
+ 'example.com')
+
+ def test_network_set_host(self):
+ values = {'project_id': 'project1'}
+ network = db.network_create_safe(self.ctxt, values)
+ self.assertEqual(
+ db.network_set_host(self.ctxt, network.id, 'example.com'),
+ 'example.com')
+ self.assertEqual('example.com',
+ db.network_get(self.ctxt, network.id).host)
+
+ def test_network_get_all_by_host(self):
+ self.assertEqual([],
+ db.network_get_all_by_host(self.ctxt, 'example.com'))
+ host = 'h1.example.com'
+ # network with host set
+ net1 = db.network_create_safe(self.ctxt, {'host': host})
+ self._assertEqualListsOfObjects([net1],
+ db.network_get_all_by_host(self.ctxt, host))
+ # network with fixed ip with host set
+ net2 = db.network_create_safe(self.ctxt, {})
+ db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
+ db.network_get_all_by_host(self.ctxt, host)
+ self._assertEqualListsOfObjects([net1, net2],
+ db.network_get_all_by_host(self.ctxt, host))
+ # network with instance with host set
+ net3 = db.network_create_safe(self.ctxt, {})
+ instance = db.instance_create(self.ctxt, {'host': host})
+ db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
+ 'instance_uuid': instance.uuid})
+ self._assertEqualListsOfObjects([net1, net2, net3],
+ db.network_get_all_by_host(self.ctxt, host))
+
+ def test_network_get_by_cidr(self):
+ cidr = '192.0.2.0/30'
+ cidr_v6 = '2001:db8:1::/64'
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
+ self._assertEqualObjects(network,
+ db.network_get_by_cidr(self.ctxt, cidr))
+ self._assertEqualObjects(network,
+ db.network_get_by_cidr(self.ctxt, cidr_v6))
+
+ def test_network_get_by_cidr_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFoundForCidr,
+ db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
+
+ def test_network_get_by_uuid(self):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project_1'})
+ self._assertEqualObjects(network,
+ db.network_get_by_uuid(self.ctxt, network.uuid))
+
+ def test_network_get_by_uuid_nonexistent(self):
+ self.assertRaises(exception.NetworkNotFoundForUUID,
+ db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
+
+ def test_network_get_all_by_uuids_no_networks(self):
+ self.assertRaises(exception.NoNetworksFound,
+ db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
+
+ def test_network_get_all_by_uuids(self):
+ net1 = db.network_create_safe(self.ctxt, {})
+ net2 = db.network_create_safe(self.ctxt, {})
+ self._assertEqualListsOfObjects([net1, net2],
+ db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
+
+ def test_network_get_all_no_networks(self):
+ self.assertRaises(exception.NoNetworksFound,
+ db.network_get_all, self.ctxt)
+
+ def test_network_get_all(self):
+ network = db.network_create_safe(self.ctxt, {})
+ network_db = db.network_get_all(self.ctxt)
+ self.assertEqual(1, len(network_db))
+ self._assertEqualObjects(network, network_db[0])
+
+ def test_network_get_all_admin_user(self):
+ network1 = db.network_create_safe(self.ctxt, {})
+ network2 = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1'})
+ self._assertEqualListsOfObjects([network1, network2],
+ db.network_get_all(self.ctxt,
+ project_only=True))
+
+ def test_network_get_all_normal_user(self):
+ normal_ctxt = context.RequestContext('fake', 'fake')
+ db.network_create_safe(self.ctxt, {})
+ db.network_create_safe(self.ctxt, {'project_id': 'project1'})
+ network1 = db.network_create_safe(self.ctxt,
+ {'project_id': 'fake'})
+ network_db = db.network_get_all(normal_ctxt, project_only=True)
+ self.assertEqual(1, len(network_db))
+ self._assertEqualObjects(network1, network_db[0])
+
+ def test_network_get(self):
+ network = db.network_create_safe(self.ctxt, {})
+ self._assertEqualObjects(db.network_get(self.ctxt, network.id),
+ network)
+ db.network_delete_safe(self.ctxt, network.id)
+ self.assertRaises(exception.NetworkNotFound,
+ db.network_get, self.ctxt, network.id)
+
+ def test_network_associate(self):
+ network = db.network_create_safe(self.ctxt, {})
+ self.assertIsNone(network.project_id)
+ db.network_associate(self.ctxt, "project1", network.id)
+ self.assertEqual("project1", db.network_get(self.ctxt,
+ network.id).project_id)
+
+ def test_network_diassociate(self):
+ network = db.network_create_safe(self.ctxt,
+ {'project_id': 'project1', 'host': 'test.net'})
+ # disassociate project
+ db.network_disassociate(self.ctxt, network.id, False, True)
+ self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
+ # disassociate host
+ db.network_disassociate(self.ctxt, network.id, True, False)
+ self.assertIsNone(db.network_get(self.ctxt, network.id).host)
+
+ def test_network_count_reserved_ips(self):
+ net = db.network_create_safe(self.ctxt, {})
+ self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
+ db.fixed_ip_create(self.ctxt, {'network_id': net.id,
+ 'reserved': True})
+ self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
+
+
+class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(KeyPairTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _create_key_pair(self, values):
+ return db.key_pair_create(self.ctxt, values)
+
+ def test_key_pair_create(self):
+ param = {
+ 'name': 'test_1',
+ 'user_id': 'test_user_id_1',
+ 'public_key': 'test_public_key_1',
+ 'fingerprint': 'test_fingerprint_1'
+ }
+ key_pair = self._create_key_pair(param)
+
+ self.assertIsNotNone(key_pair['id'])
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(key_pair, param, ignored_keys)
+
+ def test_key_pair_create_with_duplicate_name(self):
+ params = {'name': 'test_name', 'user_id': 'test_user_id'}
+ self._create_key_pair(params)
+ self.assertRaises(exception.KeyPairExists, self._create_key_pair,
+ params)
+
+ def test_key_pair_get(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_2'},
+ {'name': 'test_3', 'user_id': 'test_user_id_3'}
+ ]
+ key_pairs = [self._create_key_pair(p) for p in params]
+
+ for key in key_pairs:
+ real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
+ self._assertEqualObjects(key, real_key)
+
+ def test_key_pair_get_no_results(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ def test_key_pair_get_deleted(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ key_pair_created = self._create_key_pair(param)
+
+ db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ ctxt = self.ctxt.elevated(read_deleted='yes')
+ key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
+ param['name'])
+ ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
+ self._assertEqualObjects(key_pair_deleted, key_pair_created,
+ ignored_keys)
+ self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
+
+ def test_key_pair_get_all_by_user(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_1'},
+ {'name': 'test_3', 'user_id': 'test_user_id_2'}
+ ]
+ key_pairs_user_1 = [self._create_key_pair(p) for p in params
+ if p['user_id'] == 'test_user_id_1']
+ key_pairs_user_2 = [self._create_key_pair(p) for p in params
+ if p['user_id'] == 'test_user_id_2']
+
+ real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
+ real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
+
+ self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
+ self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
+
+ def test_key_pair_count_by_user(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id_1'},
+ {'name': 'test_2', 'user_id': 'test_user_id_1'},
+ {'name': 'test_3', 'user_id': 'test_user_id_2'}
+ ]
+ for p in params:
+ self._create_key_pair(p)
+
+ count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
+ self.assertEqual(count_1, 2)
+
+ count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
+ self.assertEqual(count_2, 1)
+
+ def test_key_pair_destroy(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self._create_key_pair(param)
+
+ db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
+ self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
+ self.ctxt, param['user_id'], param['name'])
+
+ def test_key_pair_destroy_no_such_key(self):
+ param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
+ self.assertRaises(exception.KeypairNotFound,
+ db.key_pair_destroy, self.ctxt,
+ param['user_id'], param['name'])
+
+
+class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ """Tests for db.api.quota_* methods."""
+
+ def setUp(self):
+ super(QuotaTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_quota_create(self):
+ quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
+ self.assertEqual(quota.resource, 'resource')
+ self.assertEqual(quota.hard_limit, 99)
+ self.assertEqual(quota.project_id, 'project1')
+
+ def test_quota_get(self):
+ quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
+ quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
+ self._assertEqualObjects(quota, quota_db)
+
+ def test_quota_get_all_by_project(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
+ for i in range(3):
+ quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
+ self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
+ 'resource0': 0,
+ 'resource1': 1,
+ 'resource2': 2})
+
+ def test_quota_get_all_by_project_and_user(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
+ j - 1, user_id='user%d' % i)
+ for i in range(3):
+ quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
+ 'proj%d' % i,
+ 'user%d' % i)
+ self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
+ 'user_id': 'user%d' % i,
+ 'resource0': -1,
+ 'resource1': 0,
+ 'resource2': 1})
+
+ def test_quota_update(self):
+ db.quota_create(self.ctxt, 'project1', 'resource1', 41)
+ db.quota_update(self.ctxt, 'project1', 'resource1', 42)
+ quota = db.quota_get(self.ctxt, 'project1', 'resource1')
+ self.assertEqual(quota.hard_limit, 42)
+ self.assertEqual(quota.resource, 'resource1')
+ self.assertEqual(quota.project_id, 'project1')
+
+ def test_quota_update_nonexistent(self):
+ self.assertRaises(exception.ProjectQuotaNotFound,
+ db.quota_update, self.ctxt, 'project1', 'resource1', 42)
+
+ def test_quota_get_nonexistent(self):
+ self.assertRaises(exception.ProjectQuotaNotFound,
+ db.quota_get, self.ctxt, 'project1', 'resource1')
+
+ def test_quota_reserve_all_resources(self):
+ quotas = {}
+ deltas = {}
+ reservable_resources = {}
+ for i, resource in enumerate(quota.resources):
+ if isinstance(resource, quota.ReservableResource):
+ quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
+ resource.name, 100)
+ deltas[resource.name] = i
+ reservable_resources[resource.name] = resource
+
+ usages = {'instances': 3, 'cores': 6, 'ram': 9}
+ instances = []
+ for i in range(3):
+ instances.append(db.instance_create(self.ctxt,
+ {'vcpus': 2, 'memory_mb': 3,
+ 'project_id': 'project1'}))
+
+ usages['fixed_ips'] = 2
+ network = db.network_create_safe(self.ctxt, {})
+ for i in range(2):
+ address = '192.168.0.%d' % i
+ db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
+ 'address': address,
+ 'network_id': network['id']})
+ db.fixed_ip_associate(self.ctxt, address,
+ instances[0].uuid, network['id'])
+
+ usages['floating_ips'] = 5
+ for i in range(5):
+ db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
+
+ usages['security_groups'] = 3
+ for i in range(3):
+ db.security_group_create(self.ctxt, {'project_id': 'project1'})
+
+ usages['server_groups'] = 4
+ for i in range(4):
+ db.instance_group_create(self.ctxt, {'uuid': str(i),
+ 'project_id': 'project1'})
+
+ reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
+ quotas, quotas, deltas, None,
+ None, None, 'project1')
+ resources_names = reservable_resources.keys()
+ for reservation_uuid in reservations_uuids:
+ reservation = _reservation_get(self.ctxt, reservation_uuid)
+ usage = db.quota_usage_get(self.ctxt, 'project1',
+ reservation.resource)
+ self.assertEqual(usage.in_use, usages[reservation.resource],
+ 'Resource: %s' % reservation.resource)
+ self.assertEqual(usage.reserved, deltas[reservation.resource])
+ self.assertIn(reservation.resource, resources_names)
+ resources_names.remove(reservation.resource)
+ self.assertEqual(len(resources_names), 0)
+
+ def test_quota_destroy_all_by_project(self):
+ reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ db.quota_destroy_all_by_project(self.ctxt, 'project1')
+ self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
+ {'project_id': 'project1'})
+ self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
+ 'project1', 'user1'),
+ {'project_id': 'project1', 'user_id': 'user1'})
+ self.assertEqual(db.quota_usage_get_all_by_project(
+ self.ctxt, 'project1'),
+ {'project_id': 'project1'})
+ for r in reservations:
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, r)
+
+ def test_quota_destroy_all_by_project_and_user(self):
+ reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
+ db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
+ 'user1')
+ self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
+ 'project1', 'user1'),
+ {'project_id': 'project1',
+ 'user_id': 'user1'})
+ self.assertEqual(db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'project1', 'user1'),
+ {'project_id': 'project1',
+ 'user_id': 'user1',
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}})
+ for r in reservations:
+ self.assertRaises(exception.ReservationNotFound,
+ _reservation_get, self.ctxt, r)
+
+ def test_quota_usage_get_nonexistent(self):
+ self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
+ self.ctxt, 'p1', 'nonexitent_resource')
+
+ def test_quota_usage_get(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
+ expected = {'resource': 'resource0', 'project_id': 'p1',
+ 'in_use': 0, 'reserved': 0, 'total': 0}
+ for key, value in expected.iteritems():
+ self.assertEqual(value, quota_usage[key])
+
+ def test_quota_usage_get_all_by_project(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ expected = {'project_id': 'p1',
+ 'resource0': {'in_use': 0, 'reserved': 0},
+ 'resource1': {'in_use': 1, 'reserved': 1},
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project(
+ self.ctxt, 'p1'))
+
+ def test_quota_usage_get_all_by_project_and_user(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ expected = {'project_id': 'p1',
+ 'user_id': 'u1',
+ 'resource0': {'in_use': 0, 'reserved': 0},
+ 'resource1': {'in_use': 1, 'reserved': 1},
+ 'fixed_ips': {'in_use': 2, 'reserved': 2}}
+ self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
+ self.ctxt, 'p1', 'u1'))
+
+ def test_quota_usage_update_nonexistent(self):
+ self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
+ self.ctxt, 'p1', 'u1', 'resource', in_use=42)
+
+ def test_quota_usage_update(self):
+ _quota_reserve(self.ctxt, 'p1', 'u1')
+ db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
+ reserved=43)
+ quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
+ expected = {'resource': 'resource0', 'project_id': 'p1',
+ 'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
+ for key, value in expected.iteritems():
+ self.assertEqual(value, quota_usage[key])
+
+ def test_quota_create_exists(self):
+ db.quota_create(self.ctxt, 'project1', 'resource1', 41)
+ self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
+ 'project1', 'resource1', 42)
+
+
+class QuotaReserveNoDbTestCase(test.NoDBTestCase):
+ """Tests quota reserve/refresh operations using mock."""
+
+ def test_create_quota_usage_if_missing_not_created(self):
+ # Tests that QuotaUsage isn't created if it's already in user_usages.
+ resource = 'fake-resource'
+ project_id = 'fake-project'
+ user_id = 'fake_user'
+ session = mock.sentinel
+ quota_usage = mock.sentinel
+ user_usages = {resource: quota_usage}
+ with mock.patch.object(sqlalchemy_api, '_quota_usage_create') as quc:
+ self.assertFalse(sqlalchemy_api._create_quota_usage_if_missing(
+ user_usages, resource, None,
+ project_id, user_id, session))
+ self.assertFalse(quc.called)
+
+ def _test_create_quota_usage_if_missing_created(self, per_project_quotas):
+ # Tests that the QuotaUsage is created.
+ user_usages = {}
+ if per_project_quotas:
+ resource = sqlalchemy_api.PER_PROJECT_QUOTAS[0]
+ else:
+ resource = 'fake-resource'
+ project_id = 'fake-project'
+ user_id = 'fake_user'
+ session = mock.sentinel
+ quota_usage = mock.sentinel
+ with mock.patch.object(sqlalchemy_api, '_quota_usage_create',
+ return_value=quota_usage) as quc:
+ self.assertTrue(sqlalchemy_api._create_quota_usage_if_missing(
+ user_usages, resource, None,
+ project_id, user_id, session))
+ self.assertEqual(quota_usage, user_usages[resource])
+ # Now test if the QuotaUsage was created with a user_id or not.
+ if per_project_quotas:
+ quc.assert_called_once_with(
+ project_id, None, resource, 0, 0, None, session=session)
+ else:
+ quc.assert_called_once_with(
+ project_id, user_id, resource, 0, 0, None, session=session)
+
+ def test_create_quota_usage_if_missing_created_per_project_quotas(self):
+ self._test_create_quota_usage_if_missing_created(True)
+
+ def test_create_quota_usage_if_missing_created_user_quotas(self):
+ self._test_create_quota_usage_if_missing_created(False)
+
+ def test_is_quota_refresh_needed_in_use(self):
+ # Tests when a quota refresh is needed based on the in_use value.
+ for in_use in range(-1, 1):
+ # We have to set until_refresh=None otherwise mock will give it
+ # a value which runs some code we don't want.
+ quota_usage = mock.MagicMock(in_use=in_use, until_refresh=None)
+ if in_use < 0:
+ self.assertTrue(sqlalchemy_api._is_quota_refresh_needed(
+ quota_usage, max_age=0))
+ else:
+ self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(
+ quota_usage, max_age=0))
+
+ def test_is_quota_refresh_needed_until_refresh_none(self):
+ quota_usage = mock.MagicMock(in_use=0, until_refresh=None)
+ self.assertFalse(sqlalchemy_api._is_quota_refresh_needed(quota_usage,
+ max_age=0))
+
+ def test_is_quota_refresh_needed_until_refresh_not_none(self):
+ # Tests different values for the until_refresh counter.
+ for until_refresh in range(3):
+ quota_usage = mock.MagicMock(in_use=0, until_refresh=until_refresh)
+ refresh = sqlalchemy_api._is_quota_refresh_needed(quota_usage,
+ max_age=0)
+ until_refresh -= 1
+ if until_refresh <= 0:
+ self.assertTrue(refresh)
+ else:
+ self.assertFalse(refresh)
+ self.assertEqual(until_refresh, quota_usage.until_refresh)
+
+ def test_refresh_quota_usages(self):
+ quota_usage = mock.Mock(spec=models.QuotaUsage)
+ quota_usage.in_use = 5
+ quota_usage.until_refresh = None
+ sqlalchemy_api._refresh_quota_usages(quota_usage, until_refresh=5,
+ in_use=6)
+ self.assertEqual(6, quota_usage.in_use)
+ self.assertEqual(5, quota_usage.until_refresh)
+
+ def test_calculate_overquota_no_delta(self):
+ deltas = {'foo': -1}
+ user_quotas = {'foo': 10}
+ overs = sqlalchemy_api._calculate_overquota({}, user_quotas, deltas,
+ {}, {})
+ self.assertFalse(overs)
+
+ def test_calculate_overquota_unlimited_quota(self):
+ deltas = {'foo': 1}
+ project_quotas = {}
+ user_quotas = {'foo': -1}
+ project_usages = {}
+ user_usages = {'foo': 10}
+ overs = sqlalchemy_api._calculate_overquota(
+ project_quotas, user_quotas, deltas, project_usages, user_usages)
+ self.assertFalse(overs)
+
+ def _test_calculate_overquota(self, resource, project_usages, user_usages):
+ deltas = {resource: 1}
+ project_quotas = {resource: 10}
+ user_quotas = {resource: 10}
+ overs = sqlalchemy_api._calculate_overquota(
+ project_quotas, user_quotas, deltas, project_usages, user_usages)
+ self.assertEqual(resource, overs[0])
+
+ def test_calculate_overquota_per_project_quota_overquota(self):
+ # In this test, user quotas are fine but project quotas are over.
+ resource = 'foo'
+ project_usages = {resource: {'total': 10}}
+ user_usages = {resource: {'total': 5}}
+ self._test_calculate_overquota(resource, project_usages, user_usages)
+
+ def test_calculate_overquota_per_user_quota_overquota(self):
+ # In this test, project quotas are fine but user quotas are over.
+ resource = 'foo'
+ project_usages = {resource: {'total': 5}}
+ user_usages = {resource: {'total': 10}}
+ self._test_calculate_overquota(resource, project_usages, user_usages)
+
+
+class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(QuotaClassTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def test_quota_class_get_default(self):
+ params = {
+ 'test_resource1': '10',
+ 'test_resource2': '20',
+ 'test_resource3': '30',
+ }
+ for res, limit in params.items():
+ db.quota_class_create(self.ctxt, 'default', res, limit)
+
+ defaults = db.quota_class_get_default(self.ctxt)
+ self.assertEqual(defaults, dict(class_name='default',
+ test_resource1=10,
+ test_resource2=20,
+ test_resource3=30))
+
+ def test_quota_class_create(self):
+ qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ self.assertEqual(qc.class_name, 'class name')
+ self.assertEqual(qc.resource, 'resource')
+ self.assertEqual(qc.hard_limit, 42)
+
+ def test_quota_class_get(self):
+ qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
+ self._assertEqualObjects(qc, qc_db)
+
+ def test_quota_class_get_nonexistent(self):
+ self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
+ self.ctxt, 'nonexistent', 'resource')
+
+ def test_quota_class_get_all_by_name(self):
+ for i in range(3):
+ for j in range(3):
+ db.quota_class_create(self.ctxt, 'class%d' % i,
+ 'resource%d' % j, j)
+ for i in range(3):
+ classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
+ self.assertEqual(classes, {'class_name': 'class%d' % i,
+ 'resource0': 0, 'resource1': 1, 'resource2': 2})
+
+ def test_quota_class_update(self):
+ db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
+ db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
+ self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
+ 'resource').hard_limit, 43)
+
+ def test_quota_class_update_nonexistent(self):
+ self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
+ self.ctxt, 'class name', 'resource', 42)
+
+ def test_refresh_quota_usages(self):
+ quota_usages = mock.Mock()
+ sqlalchemy_api._refresh_quota_usages(quota_usages, until_refresh=5,
+ in_use=6)
+
+
+class S3ImageTestCase(test.TestCase):
+
+ def setUp(self):
+ super(S3ImageTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.values = [uuidutils.generate_uuid() for i in xrange(3)]
+ self.images = [db.s3_image_create(self.ctxt, uuid)
+ for uuid in self.values]
+
+ def test_s3_image_create(self):
+ for ref in self.images:
+ self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
+ self.assertEqual(sorted(self.values),
+ sorted([ref.uuid for ref in self.images]))
+
+ def test_s3_image_get_by_uuid(self):
+ for uuid in self.values:
+ ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
+ self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
+ self.assertEqual(uuid, ref.uuid)
+
+ def test_s3_image_get(self):
+ self.assertEqual(sorted(self.values),
+ sorted([db.s3_image_get(self.ctxt, ref.id).uuid
+ for ref in self.images]))
+
+ def test_s3_image_get_not_found(self):
+ self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
+ 100500)
+
+ def test_s3_image_get_by_uuid_not_found(self):
+ self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
+ self.ctxt, uuidutils.generate_uuid())
+
+
+class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(ComputeNodeTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.service_dict = dict(host='host1', binary='nova-compute',
+ topic=CONF.compute_topic, report_count=1,
+ disabled=False)
+ self.service = db.service_create(self.ctxt, self.service_dict)
+ self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
+ vcpus_used=0, memory_mb_used=0,
+ local_gb_used=0, free_ram_mb=1024,
+ free_disk_gb=2048, hypervisor_type="xen",
+ hypervisor_version=1, cpu_info="",
+ running_vms=0, current_workload=0,
+ service_id=self.service['id'],
+ disk_available_least=100,
+ hypervisor_hostname='abracadabra104',
+ host_ip='127.0.0.1',
+ supported_instances='',
+ pci_stats='',
+ metrics='',
+ extra_resources='',
+ stats='', numa_topology='')
+ # add some random stats
+ self.stats = dict(num_instances=3, num_proj_12345=2,
+ num_proj_23456=2, num_vm_building=3)
+ self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
+ self.flags(reserved_host_memory_mb=0)
+ self.flags(reserved_host_disk_mb=0)
+ self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
+
+ def test_compute_node_create(self):
+ self._assertEqualObjects(self.compute_node_dict, self.item,
+ ignored_keys=self._ignored_keys + ['stats'])
+ new_stats = jsonutils.loads(self.item['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_get_all(self):
+ date_fields = set(['created_at', 'updated_at',
+ 'deleted_at', 'deleted'])
+ for no_date_fields in [False, True]:
+ nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
+ self.assertEqual(1, len(nodes))
+ node = nodes[0]
+ self._assertEqualObjects(self.compute_node_dict, node,
+ ignored_keys=self._ignored_keys +
+ ['stats', 'service'])
+ node_fields = set(node.keys())
+ if no_date_fields:
+ self.assertFalse(date_fields & node_fields)
+ else:
+ self.assertTrue(date_fields <= node_fields)
+ new_stats = jsonutils.loads(node['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_get_all_deleted_compute_node(self):
+ # Create a service and compute node and ensure we can find its stats;
+ # delete the service and compute node when done and loop again
+ for x in range(2, 5):
+ # Create a service
+ service_data = self.service_dict.copy()
+ service_data['host'] = 'host-%s' % x
+ service = db.service_create(self.ctxt, service_data)
+
+ # Create a compute node
+ compute_node_data = self.compute_node_dict.copy()
+ compute_node_data['service_id'] = service['id']
+ compute_node_data['stats'] = jsonutils.dumps(self.stats.copy())
+ compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
+ node = db.compute_node_create(self.ctxt, compute_node_data)
+
+ # Ensure the "new" compute node is found
+ nodes = db.compute_node_get_all(self.ctxt, False)
+ self.assertEqual(2, len(nodes))
+ found = None
+ for n in nodes:
+ if n['id'] == node['id']:
+ found = n
+ break
+ self.assertIsNotNone(found)
+ # Now ensure the match has stats!
+ self.assertNotEqual(jsonutils.loads(found['stats']), {})
+
+ # Now delete the newly-created compute node to ensure the related
+ # compute node stats are wiped in a cascaded fashion
+ db.compute_node_delete(self.ctxt, node['id'])
+
+ # Clean up the service
+ db.service_destroy(self.ctxt, service['id'])
+
+ def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
+ service_data = self.service_dict.copy()
+ service_data['host'] = 'host2'
+ service = db.service_create(self.ctxt, service_data)
+
+ existing_node = dict(self.item.iteritems())
+ existing_node['service'] = dict(self.service.iteritems())
+ expected = [existing_node]
+
+ for name in ['bm_node1', 'bm_node2']:
+ compute_node_data = self.compute_node_dict.copy()
+ compute_node_data['service_id'] = service['id']
+ compute_node_data['stats'] = jsonutils.dumps(self.stats)
+ compute_node_data['hypervisor_hostname'] = 'bm_node_1'
+ node = db.compute_node_create(self.ctxt, compute_node_data)
+
+ node = dict(node.iteritems())
+ node['service'] = dict(service.iteritems())
+
+ expected.append(node)
+
+ result = sorted(db.compute_node_get_all(self.ctxt, False),
+ key=lambda n: n['hypervisor_hostname'])
+
+ self._assertEqualListsOfObjects(expected, result,
+ ignored_keys=['stats'])
+
+ def test_compute_node_get(self):
+ compute_node_id = self.item['id']
+ node = db.compute_node_get(self.ctxt, compute_node_id)
+ self._assertEqualObjects(self.compute_node_dict, node,
+ ignored_keys=self._ignored_keys + ['stats', 'service'])
+ new_stats = jsonutils.loads(node['stats'])
+ self.assertEqual(self.stats, new_stats)
+
+ def test_compute_node_update(self):
+ compute_node_id = self.item['id']
+ stats = jsonutils.loads(self.item['stats'])
+ # change some values:
+ stats['num_instances'] = 8
+ stats['num_tribbles'] = 1
+ values = {
+ 'vcpus': 4,
+ 'stats': jsonutils.dumps(stats),
+ }
+ item_updated = db.compute_node_update(self.ctxt, compute_node_id,
+ values)
+ self.assertEqual(4, item_updated['vcpus'])
+ new_stats = jsonutils.loads(item_updated['stats'])
+ self.assertEqual(stats, new_stats)
+
+ def test_compute_node_delete(self):
+ compute_node_id = self.item['id']
+ db.compute_node_delete(self.ctxt, compute_node_id)
+ nodes = db.compute_node_get_all(self.ctxt)
+ self.assertEqual(len(nodes), 0)
+
+ def test_compute_node_search_by_hypervisor(self):
+ nodes_created = []
+ new_service = copy.copy(self.service_dict)
+ for i in xrange(3):
+ new_service['binary'] += str(i)
+ new_service['topic'] += str(i)
+ service = db.service_create(self.ctxt, new_service)
+ self.compute_node_dict['service_id'] = service['id']
+ self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
+ self.compute_node_dict['stats'] = jsonutils.dumps(self.stats)
+ node = db.compute_node_create(self.ctxt, self.compute_node_dict)
+ nodes_created.append(node)
+ nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
+ self.assertEqual(3, len(nodes))
+ self._assertEqualListsOfObjects(nodes_created, nodes,
+ ignored_keys=self._ignored_keys + ['stats', 'service'])
+
+ def test_compute_node_statistics(self):
+ stats = db.compute_node_statistics(self.ctxt)
+ self.assertEqual(stats.pop('count'), 1)
+ for k, v in stats.iteritems():
+ self.assertEqual(v, self.item[k])
+
+ def test_compute_node_statistics_disabled_service(self):
+ serv = db.service_get_by_host_and_topic(
+ self.ctxt, 'host1', CONF.compute_topic)
+ db.service_update(self.ctxt, serv['id'], {'disabled': True})
+ stats = db.compute_node_statistics(self.ctxt)
+ self.assertEqual(stats.pop('count'), 0)
+
+ def test_compute_node_not_found(self):
+ self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
+ self.ctxt, 100500)
+
+ def test_compute_node_update_always_updates_updated_at(self):
+ item_updated = db.compute_node_update(self.ctxt,
+ self.item['id'], {})
+ self.assertNotEqual(self.item['updated_at'],
+ item_updated['updated_at'])
+
+ def test_compute_node_update_override_updated_at(self):
+ # Update the record once so updated_at is set.
+ first = db.compute_node_update(self.ctxt, self.item['id'],
+ {'free_ram_mb': '12'})
+ self.assertIsNotNone(first['updated_at'])
+
+ # Update a second time. Make sure that the updated_at value we send
+ # is overridden.
+ second = db.compute_node_update(self.ctxt, self.item['id'],
+ {'updated_at': first.updated_at,
+ 'free_ram_mb': '13'})
+ self.assertNotEqual(first['updated_at'], second['updated_at'])
+
+
+class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(ProviderFwRuleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.values = self._get_rule_values()
+ self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
+ for rule in self.values]
+
+ def _get_rule_values(self):
+ cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
+ '2001:4f8:3:ba::/64',
+ '2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
+ values = []
+ for i in xrange(len(cidr_samples)):
+ rule = {}
+ rule['protocol'] = 'foo' + str(i)
+ rule['from_port'] = 9999 + i
+ rule['to_port'] = 9898 + i
+ rule['cidr'] = cidr_samples[i]
+ values.append(rule)
+ return values
+
+ def test_provider_fw_rule_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for i, rule in enumerate(self.values):
+ self._assertEqualObjects(self.rules[i], rule,
+ ignored_keys=ignored_keys)
+
+ def test_provider_fw_rule_get_all(self):
+ self._assertEqualListsOfObjects(self.rules,
+ db.provider_fw_rule_get_all(self.ctxt))
+
+ def test_provider_fw_rule_destroy(self):
+ for rule in self.rules:
+ db.provider_fw_rule_destroy(self.ctxt, rule.id)
+ self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
+
+
+class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(CertificateTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.created = self._certificates_create()
+
+ def _get_certs_values(self):
+ base_values = {
+ 'user_id': 'user',
+ 'project_id': 'project',
+ 'file_name': 'filename'
+ }
+ return [dict((k, v + str(x)) for k, v in base_values.iteritems())
+ for x in xrange(1, 4)]
+
+ def _certificates_create(self):
+ return [db.certificate_create(self.ctxt, cert)
+ for cert in self._get_certs_values()]
+
+ def test_certificate_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for i, cert in enumerate(self._get_certs_values()):
+ self._assertEqualObjects(self.created[i], cert,
+ ignored_keys=ignored_keys)
+
+ def test_certificate_get_all_by_project(self):
+ cert = db.certificate_get_all_by_project(self.ctxt,
+ self.created[1].project_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+ def test_certificate_get_all_by_user(self):
+ cert = db.certificate_get_all_by_user(self.ctxt,
+ self.created[1].user_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+ def test_certificate_get_all_by_user_and_project(self):
+ cert = db.certificate_get_all_by_user_and_project(self.ctxt,
+ self.created[1].user_id, self.created[1].project_id)
+ self._assertEqualObjects(self.created[1], cert[0])
+
+
+class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ def setUp(self):
+ super(ConsoleTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ pools_data = [
+ {'address': '192.168.10.10',
+ 'username': 'user1',
+ 'password': 'passwd1',
+ 'console_type': 'type1',
+ 'public_hostname': 'public_host1',
+ 'host': 'host1',
+ 'compute_host': 'compute_host1',
+ },
+ {'address': '192.168.10.11',
+ 'username': 'user2',
+ 'password': 'passwd2',
+ 'console_type': 'type2',
+ 'public_hostname': 'public_host2',
+ 'host': 'host2',
+ 'compute_host': 'compute_host2',
+ },
+ ]
+ self.console_pools = [db.console_pool_create(self.ctxt, val)
+ for val in pools_data]
+ instance_uuid = uuidutils.generate_uuid()
+ db.instance_create(self.ctxt, {'uuid': instance_uuid})
+ self.console_data = [dict([('instance_name', 'name' + str(x)),
+ ('instance_uuid', instance_uuid),
+ ('password', 'pass' + str(x)),
+ ('port', 7878 + x),
+ ('pool_id', self.console_pools[x]['id'])])
+ for x in xrange(len(pools_data))]
+ self.consoles = [db.console_create(self.ctxt, val)
+ for val in self.console_data]
+
+ def test_console_create(self):
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
+ 'updated_at']
+ for console in self.consoles:
+ self.assertIsNotNone(console['id'])
+ self._assertEqualListsOfObjects(self.console_data, self.consoles,
+ ignored_keys=ignored_keys)
+
+ def test_console_get_by_id(self):
+ console = self.consoles[0]
+ console_get = db.console_get(self.ctxt, console['id'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_by_id_uuid(self):
+ console = self.consoles[0]
+ console_get = db.console_get(self.ctxt, console['id'],
+ console['instance_uuid'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_by_pool_instance(self):
+ console = self.consoles[0]
+ console_get = db.console_get_by_pool_instance(self.ctxt,
+ console['pool_id'], console['instance_uuid'])
+ self._assertEqualObjects(console, console_get,
+ ignored_keys=['pool'])
+
+ def test_console_get_all_by_instance(self):
+ instance_uuid = self.consoles[0]['instance_uuid']
+ consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
+ self._assertEqualListsOfObjects(self.consoles, consoles_get)
+
+ def test_console_get_all_by_instance_with_pool(self):
+ instance_uuid = self.consoles[0]['instance_uuid']
+ consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
+ columns_to_join=['pool'])
+ self._assertEqualListsOfObjects(self.consoles, consoles_get,
+ ignored_keys=['pool'])
+ self._assertEqualListsOfObjects([pool for pool in self.console_pools],
+ [c['pool'] for c in consoles_get])
+
+ def test_console_get_all_by_instance_empty(self):
+ consoles_get = db.console_get_all_by_instance(self.ctxt,
+ uuidutils.generate_uuid())
+ self.assertEqual(consoles_get, [])
+
+ def test_console_delete(self):
+ console_id = self.consoles[0]['id']
+ db.console_delete(self.ctxt, console_id)
+ self.assertRaises(exception.ConsoleNotFound, db.console_get,
+ self.ctxt, console_id)
+
+ def test_console_get_by_pool_instance_not_found(self):
+ self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
+ db.console_get_by_pool_instance, self.ctxt,
+ self.consoles[0]['pool_id'],
+ uuidutils.generate_uuid())
+
+ def test_console_get_not_found(self):
+ self.assertRaises(exception.ConsoleNotFound, db.console_get,
+ self.ctxt, 100500)
+
+ def test_console_get_not_found_instance(self):
+ self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
+ self.ctxt, self.consoles[0]['id'],
+ uuidutils.generate_uuid())
+
+
+class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(CellTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+
+ def _get_cell_base_values(self):
+ return {
+ 'name': 'myname',
+ 'api_url': 'apiurl',
+ 'transport_url': 'transporturl',
+ 'weight_offset': 0.5,
+ 'weight_scale': 1.5,
+ 'is_parent': True,
+ }
+
+ def _cell_value_modify(self, value, step):
+ if isinstance(value, str):
+ return value + str(step)
+ elif isinstance(value, float):
+ return value + step + 0.6
+ elif isinstance(value, bool):
+ return bool(step % 2)
+ elif isinstance(value, int):
+ return value + step
+
+ def _create_cells(self):
+ test_values = []
+ for x in xrange(1, 4):
+ modified_val = dict([(k, self._cell_value_modify(v, x))
+ for k, v in self._get_cell_base_values().iteritems()])
+ db.cell_create(self.ctxt, modified_val)
+ test_values.append(modified_val)
+ return test_values
+
+ def test_cell_create(self):
+ cell = db.cell_create(self.ctxt, self._get_cell_base_values())
+ self.assertIsNotNone(cell['id'])
+ self._assertEqualObjects(cell, self._get_cell_base_values(),
+ ignored_keys=self._ignored_keys)
+
+ def test_cell_update(self):
+ db.cell_create(self.ctxt, self._get_cell_base_values())
+ new_values = {
+ 'api_url': 'apiurl1',
+ 'transport_url': 'transporturl1',
+ 'weight_offset': 0.6,
+ 'weight_scale': 1.6,
+ 'is_parent': False,
+ }
+ test_cellname = self._get_cell_base_values()['name']
+ updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
+ self._assertEqualObjects(updated_cell, new_values,
+ ignored_keys=self._ignored_keys + ['name'])
+
+ def test_cell_delete(self):
+ new_cells = self._create_cells()
+ for cell in new_cells:
+ test_cellname = cell['name']
+ db.cell_delete(self.ctxt, test_cellname)
+ self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
+ test_cellname)
+
+ def test_cell_get(self):
+ new_cells = self._create_cells()
+ for cell in new_cells:
+ cell_get = db.cell_get(self.ctxt, cell['name'])
+ self._assertEqualObjects(cell_get, cell,
+ ignored_keys=self._ignored_keys)
+
+ def test_cell_get_all(self):
+ new_cells = self._create_cells()
+ cells = db.cell_get_all(self.ctxt)
+ self.assertEqual(len(new_cells), len(cells))
+ cells_byname = dict([(newcell['name'],
+ newcell) for newcell in new_cells])
+ for cell in cells:
+ self._assertEqualObjects(cell, cells_byname[cell['name']],
+ self._ignored_keys)
+
+ def test_cell_get_not_found(self):
+ self._create_cells()
+ self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
+ 'cellnotinbase')
+
+ def test_cell_update_not_found(self):
+ self._create_cells()
+ self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
+ 'cellnotinbase', self._get_cell_base_values())
+
+ def test_cell_create_exists(self):
+ db.cell_create(self.ctxt, self._get_cell_base_values())
+ self.assertRaises(exception.CellExists, db.cell_create,
+ self.ctxt, self._get_cell_base_values())
+
+
+class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(ConsolePoolTestCase, self).setUp()
+
+ self.ctxt = context.get_admin_context()
+ self.test_console_pool_1 = {
+ 'address': '192.168.2.10',
+ 'username': 'user_1',
+ 'password': 'secret_123',
+ 'console_type': 'type_1',
+ 'public_hostname': 'public_hostname_123',
+ 'host': 'localhost',
+ 'compute_host': '127.0.0.1',
+ }
+ self.test_console_pool_2 = {
+ 'address': '192.168.2.11',
+ 'username': 'user_2',
+ 'password': 'secret_1234',
+ 'console_type': 'type_2',
+ 'public_hostname': 'public_hostname_1234',
+ 'host': '127.0.0.1',
+ 'compute_host': 'localhost',
+ }
+ self.test_console_pool_3 = {
+ 'address': '192.168.2.12',
+ 'username': 'user_3',
+ 'password': 'secret_12345',
+ 'console_type': 'type_2',
+ 'public_hostname': 'public_hostname_12345',
+ 'host': '127.0.0.1',
+ 'compute_host': '192.168.1.1',
+ }
+
+ def test_console_pool_create(self):
+ console_pool = db.console_pool_create(
+ self.ctxt, self.test_console_pool_1)
+ self.assertIsNotNone(console_pool.get('id'))
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id']
+ self._assertEqualObjects(
+ console_pool, self.test_console_pool_1, ignored_keys)
+
+ def test_console_pool_create_duplicate(self):
+ db.console_pool_create(self.ctxt, self.test_console_pool_1)
+ self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
+ self.ctxt, self.test_console_pool_1)
+
+ def test_console_pool_get_by_host_type(self):
+ params = [
+ self.test_console_pool_1,
+ self.test_console_pool_2,
+ ]
+
+ for p in params:
+ db.console_pool_create(self.ctxt, p)
+
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id', 'consoles']
+
+ cp = self.test_console_pool_1
+ db_cp = db.console_pool_get_by_host_type(
+ self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
+ )
+ self._assertEqualObjects(cp, db_cp, ignored_keys)
+
+ def test_console_pool_get_by_host_type_no_resuls(self):
+ self.assertRaises(
+ exception.ConsolePoolNotFoundForHostType,
+ db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
+ 'host', 'console_type')
+
+ def test_console_pool_get_all_by_host_type(self):
+ params = [
+ self.test_console_pool_1,
+ self.test_console_pool_2,
+ self.test_console_pool_3,
+ ]
+ for p in params:
+ db.console_pool_create(self.ctxt, p)
+ ignored_keys = ['deleted', 'created_at', 'updated_at',
+ 'deleted_at', 'id', 'consoles']
+
+ cp = self.test_console_pool_2
+ db_cp = db.console_pool_get_all_by_host_type(
+ self.ctxt, cp['host'], cp['console_type'])
+
+ self._assertEqualListsOfObjects(
+ db_cp, [self.test_console_pool_2, self.test_console_pool_3],
+ ignored_keys)
+
+ def test_console_pool_get_all_by_host_type_no_results(self):
+ res = db.console_pool_get_all_by_host_type(
+ self.ctxt, 'cp_host', 'cp_console_type')
+ self.assertEqual([], res)
+
+
+class DnsdomainTestCase(test.TestCase):
+
+ def setUp(self):
+ super(DnsdomainTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.domain = 'test.domain'
+ self.testzone = 'testzone'
+ self.project = 'fake'
+
+ def test_dnsdomain_register_for_zone(self):
+ db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertEqual(domain['domain'], self.domain)
+ self.assertEqual(domain['availability_zone'], self.testzone)
+ self.assertEqual(domain['scope'], 'private')
+
+ def test_dnsdomain_register_for_project(self):
+ db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertEqual(domain['domain'], self.domain)
+ self.assertEqual(domain['project_id'], self.project)
+ self.assertEqual(domain['scope'], 'public')
+
+ def test_dnsdomain_list(self):
+ d_list = ['test.domain.one', 'test.domain.two']
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[0], self.testzone)
+ db.dnsdomain_register_for_project(self.ctxt, d_list[1], self.project)
+ db_list = db.dnsdomain_list(self.ctxt)
+ self.assertEqual(sorted(d_list), sorted(db_list))
+
+ def test_dnsdomain_unregister(self):
+ db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
+ db.dnsdomain_unregister(self.ctxt, self.domain)
+ domain = db.dnsdomain_get(self.ctxt, self.domain)
+ self.assertIsNone(domain)
+
+ def test_dnsdomain_get_all(self):
+ d_list = ['test.domain.one', 'test.domain.two']
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[0], 'zone')
+ db.dnsdomain_register_for_zone(self.ctxt, d_list[1], 'zone')
+ db_list = db.dnsdomain_get_all(self.ctxt)
+ db_domain_list = [d.domain for d in db_list]
+ self.assertEqual(sorted(d_list), sorted(db_domain_list))
+
+
+class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
+
+ _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
+
+ def setUp(self):
+ super(BwUsageTestCase, self).setUp()
+ self.ctxt = context.get_admin_context()
+ self.useFixture(test.TimeOverride())
+
+ def test_bw_usage_get_by_uuids(self):
+ now = timeutils.utcnow()
+ start_period = now - datetime.timedelta(seconds=10)
+ uuid3_refreshed = now - datetime.timedelta(seconds=5)
+
+ expected_bw_usages = {
+ 'fake_uuid1': {'uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': 100,
+ 'bw_out': 200,
+ 'last_ctr_in': 12345,
+ 'last_ctr_out': 67890,
+ 'last_refreshed': now},
+ 'fake_uuid2': {'uuid': 'fake_uuid2',
+ 'mac': 'fake_mac2',
+ 'start_period': start_period,
+ 'bw_in': 200,
+ 'bw_out': 300,
+ 'last_ctr_in': 22345,
+ 'last_ctr_out': 77890,
+ 'last_refreshed': now},
+ 'fake_uuid3': {'uuid': 'fake_uuid3',
+ 'mac': 'fake_mac3',
+ 'start_period': start_period,
+ 'bw_in': 400,
+ 'bw_out': 500,
+ 'last_ctr_in': 32345,
+ 'last_ctr_out': 87890,
+ 'last_refreshed': uuid3_refreshed}
+ }
+
+ bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
+ ['fake_uuid1', 'fake_uuid2'], start_period)
+ # No matches
+ self.assertEqual(len(bw_usages), 0)
+
+ # Add 3 entries
+ db.bw_usage_update(self.ctxt, 'fake_uuid1',
+ 'fake_mac1', start_period,
+ 100, 200, 12345, 67890)
+ db.bw_usage_update(self.ctxt, 'fake_uuid2',
+ 'fake_mac2', start_period,
+ 100, 200, 42, 42)
+ # Test explicit refreshed time
+ db.bw_usage_update(self.ctxt, 'fake_uuid3',
+ 'fake_mac3', start_period,
+ 400, 500, 32345, 87890,
+ last_refreshed=uuid3_refreshed)
+ # Update 2nd entry
+ db.bw_usage_update(self.ctxt, 'fake_uuid2',
+ 'fake_mac2', start_period,
+ 200, 300, 22345, 77890)
+
+ bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
+ ['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
+ self.assertEqual(len(bw_usages), 3)
+ for usage in bw_usages:
+ self._assertEqualObjects(expected_bw_usages[usage['uuid']], usage,
+ ignored_keys=self._ignored_keys)
+
+ def test_bw_usage_get(self):
+ now = timeutils.utcnow()
+ start_period = now - datetime.timedelta(seconds=10)
+
+ expected_bw_usage = {'uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': 100,
+ 'bw_out': 200,
+ 'last_ctr_in': 12345,
+ 'last_ctr_out': 67890,
+ 'last_refreshed': now}
+
+ bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
+ 'fake_mac1')
+ self.assertIsNone(bw_usage)
+
+ db.bw_usage_update(self.ctxt, 'fake_uuid1',
+ 'fake_mac1', start_period,
+ 100, 200, 12345, 67890)
+
+ bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
+ 'fake_mac1')
+ self._assertEqualObjects(bw_usage, expected_bw_usage,
+ ignored_keys=self._ignored_keys)
+
+
+class Ec2TestCase(test.TestCase):
+
+ def setUp(self):
+ super(Ec2TestCase, self).setUp()
+ self.ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ def test_ec2_ids_not_found_are_printable(self):
+ def check_exc_format(method, value):
+ try:
+ method(self.ctxt, value)
+ except exception.NotFound as exc:
+ self.assertIn(six.text_type(value), six.text_type(exc))
+
+ check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
+ check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
+
+ def test_ec2_volume_create(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(vol['id'])
+ self.assertEqual(vol['uuid'], 'fake-uuid')
+
+ def test_ec2_volume_get_by_id(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ vol2 = db.ec2_volume_get_by_id(self.ctxt, vol['id'])
+ self.assertEqual(vol2['uuid'], vol['uuid'])
+
+ def test_ec2_volume_get_by_uuid(self):
+ vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
+ vol2 = db.ec2_volume_get_by_uuid(self.ctxt, vol['uuid'])
+ self.assertEqual(vol2['id'], vol['id'])
+
+ def test_ec2_snapshot_create(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(snap['id'])
+ self.assertEqual(snap['uuid'], 'fake-uuid')
+
+ def test_ec2_snapshot_get_by_ec2_id(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
+ self.assertEqual(snap2['uuid'], 'fake-uuid')
+
+ def test_ec2_snapshot_get_by_uuid(self):
+ snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
+ snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(snap['id'], snap2['id'])
+
+ def test_ec2_snapshot_get_by_ec2_id_not_found(self):
+ self.assertRaises(exception.SnapshotNotFound,
+ db.ec2_snapshot_get_by_ec2_id,
+ self.ctxt, 123456)
+
+ def test_ec2_snapshot_get_by_uuid_not_found(self):
+ self.assertRaises(exception.SnapshotNotFound,
+ db.ec2_snapshot_get_by_uuid,
+ self.ctxt, 'fake-uuid')
+
+ def test_ec2_instance_create(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ self.assertIsNotNone(inst['id'])
+ self.assertEqual(inst['uuid'], 'fake-uuid')
+
+ def test_ec2_instance_get_by_uuid(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst2 = db.ec2_instance_get_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(inst['id'], inst2['id'])
+
+ def test_ec2_instance_get_by_id(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst2 = db.ec2_instance_get_by_id(self.ctxt, inst['id'])
+ self.assertEqual(inst['id'], inst2['id'])
+
+ def test_ec2_instance_get_by_uuid_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.ec2_instance_get_by_uuid,
+ self.ctxt, 'uuid-not-present')
+
+ def test_ec2_instance_get_by_id_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.ec2_instance_get_by_uuid,
+ self.ctxt, 12345)
+
+ def test_get_ec2_instance_id_by_uuid(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst_id = db.get_ec2_instance_id_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(inst['id'], inst_id)
+
+ def test_get_instance_uuid_by_ec2_id(self):
+ inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
+ inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
+ self.assertEqual(inst_uuid, 'fake-uuid')
+
+ def test_get_ec2_instance_id_by_uuid_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.get_ec2_instance_id_by_uuid,
+ self.ctxt, 'uuid-not-present')
+
+ def test_get_instance_uuid_by_ec2_id_not_found(self):
+ self.assertRaises(exception.InstanceNotFound,
+ db.get_instance_uuid_by_ec2_id,
+ self.ctxt, 100500)
+
+
+class ArchiveTestCase(test.TestCase):
+
+ def setUp(self):
+ super(ArchiveTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.engine = get_engine()
+ self.conn = self.engine.connect()
+ self.instance_id_mappings = sqlalchemyutils.get_table(
+ self.engine, "instance_id_mappings")
+ self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
+ self.engine, "shadow_instance_id_mappings")
+ self.dns_domains = sqlalchemyutils.get_table(
+ self.engine, "dns_domains")
+ self.shadow_dns_domains = sqlalchemyutils.get_table(
+ self.engine, "shadow_dns_domains")
+ self.consoles = sqlalchemyutils.get_table(self.engine, "consoles")
+ self.console_pools = sqlalchemyutils.get_table(
+ self.engine, "console_pools")
+ self.shadow_consoles = sqlalchemyutils.get_table(
+ self.engine, "shadow_consoles")
+ self.shadow_console_pools = sqlalchemyutils.get_table(
+ self.engine, "shadow_console_pools")
+ self.instances = sqlalchemyutils.get_table(self.engine, "instances")
+ self.shadow_instances = sqlalchemyutils.get_table(
+ self.engine, "shadow_instances")
+ self.uuidstrs = []
+ for unused in range(6):
+ self.uuidstrs.append(stdlib_uuid.uuid4().hex)
+ self.ids = []
+ self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
+ self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
+ "instances"])
+ self.domain_tablenames_to_cleanup = set(["dns_domains"])
+
+ def tearDown(self):
+ super(ArchiveTestCase, self).tearDown()
+ for tablename in self.id_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.id.in_(self.ids))
+ self.conn.execute(del_statement)
+ for tablename in self.uuid_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
+ self.conn.execute(del_statement)
+ for tablename in self.domain_tablenames_to_cleanup:
+ for name in [tablename, "shadow_" + tablename]:
+ table = sqlalchemyutils.get_table(self.engine, name)
+ del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
+ self.conn.execute(del_statement)
+
+ def test_shadow_tables(self):
+ metadata = MetaData(bind=self.engine)
+ metadata.reflect()
+ for table_name in metadata.tables:
+ # NOTE(rpodolyaka): migration 209 introduced a few new tables,
+ # which don't have shadow tables and it's
+ # completely OK, so we should skip them here
+ if table_name.startswith("dump_"):
+ continue
+
+ if table_name.startswith("shadow_"):
+ self.assertIn(table_name[7:], metadata.tables)
+ continue
+ self.assertTrue(db_utils.check_shadow_table(self.engine,
+ table_name))
+
+ def test_archive_deleted_rows(self):
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt)
+ # Set 4 to deleted
+ update_statement = self.instance_id_mappings.update().\
+ where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ qiim = sql.select([self.instance_id_mappings]).where(self.
+ instance_id_mappings.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows), 6)
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
+ where(self.shadow_instance_id_mappings.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 4 left in main
+ self.assertEqual(len(rows), 4)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 2 in shadow
+ self.assertEqual(len(rows), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we have 2 left in main
+ self.assertEqual(len(rows), 2)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we have 4 in shadow
+ self.assertEqual(len(rows), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows(self.context, max_rows=2)
+ rows = self.conn.execute(qiim).fetchall()
+ # Verify we still have 2 left in main
+ self.assertEqual(len(rows), 2)
+ rows = self.conn.execute(qsiim).fetchall()
+ # Verify we still have 4 in shadow
+ self.assertEqual(len(rows), 4)
+
+ def test_archive_deleted_rows_for_every_uuid_table(self):
+ tablenames = []
+ for model_class in models.__dict__.itervalues():
+ if hasattr(model_class, "__tablename__"):
+ tablenames.append(model_class.__tablename__)
+ tablenames.sort()
+ for tablename in tablenames:
+ ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
+ if ret == 0:
+ self.uuid_tablenames_to_cleanup.add(tablename)
+
+ def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
+ """:returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
+ main_table = sqlalchemyutils.get_table(self.engine, tablename)
+ if not hasattr(main_table.c, "uuid"):
+ # Not a uuid table, so skip it.
+ return 1
+ shadow_table = sqlalchemyutils.get_table(
+ self.engine, "shadow_" + tablename)
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = main_table.insert().values(uuid=uuidstr)
+ try:
+ self.conn.execute(ins_stmt)
+ except db_exc.DBError:
+ # This table has constraints that require a table-specific
+ # insert, so skip it.
+ return 2
+ # Set 4 to deleted
+ update_statement = main_table.update().\
+ where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qmt).fetchall()
+ # Verify we have 6 in main
+ self.assertEqual(len(rows), 6)
+ qst = sql.select([shadow_table]).\
+ where(shadow_table.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qst).fetchall()
+ # Verify we have 0 in shadow
+ self.assertEqual(len(rows), 0)
+ # Archive 2 rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we have 4 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Archive 2 more rows
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we have 2 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
+ # Verify we still have 2 left in main
+ rows = self.conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = self.conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+ return 0
+
+ def test_archive_deleted_rows_no_id_column(self):
+ uuidstr0 = self.uuidstrs[0]
+ ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
+ self.conn.execute(ins_stmt)
+ update_statement = self.dns_domains.update().\
+ where(self.dns_domains.c.domain == uuidstr0).\
+ values(deleted=True)
+ self.conn.execute(update_statement)
+ qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
+ uuidstr0)
+ rows = self.conn.execute(qdd).fetchall()
+ self.assertEqual(len(rows), 1)
+ qsdd = sql.select([self.shadow_dns_domains],
+ self.shadow_dns_domains.c.domain == uuidstr0)
+ rows = self.conn.execute(qsdd).fetchall()
+ self.assertEqual(len(rows), 0)
+ db.archive_deleted_rows(self.context, max_rows=1)
+ rows = self.conn.execute(qdd).fetchall()
+ self.assertEqual(len(rows), 0)
+ rows = self.conn.execute(qsdd).fetchall()
+ self.assertEqual(len(rows), 1)
+
+ def test_archive_deleted_rows_fk_constraint(self):
+ # consoles.pool_id depends on console_pools.id
+ # SQLite doesn't enforce foreign key constraints without a pragma.
+ dialect = self.engine.url.get_dialect()
+ if dialect == sqlite.dialect:
+ # We're seeing issues with foreign key support in SQLite 3.6.20
+ # SQLAlchemy doesn't support it at all with < SQLite 3.6.19
+ # It works fine in SQLite 3.7.
+ # So return early to skip this test if running SQLite < 3.7
+ import sqlite3
+ tup = sqlite3.sqlite_version_info
+ if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
+ self.skipTest(
+ 'sqlite version too old for reliable SQLA foreign_keys')
+ self.conn.execute("PRAGMA foreign_keys = ON")
+ ins_stmt = self.console_pools.insert().values(deleted=1)
+ result = self.conn.execute(ins_stmt)
+ id1 = result.inserted_primary_key[0]
+ self.ids.append(id1)
+ ins_stmt = self.consoles.insert().values(deleted=1,
+ pool_id=id1)
+ result = self.conn.execute(ins_stmt)
+ id2 = result.inserted_primary_key[0]
+ self.ids.append(id2)
+ # The first try to archive console_pools should fail, due to FK.
+ num = db.archive_deleted_rows_for_table(self.context, "console_pools")
+ self.assertEqual(num, 0)
+ # Then archiving consoles should work.
+ num = db.archive_deleted_rows_for_table(self.context, "consoles")
+ self.assertEqual(num, 1)
+ # Then archiving console_pools should work.
+ num = db.archive_deleted_rows_for_table(self.context, "console_pools")
+ self.assertEqual(num, 1)
+
+ def test_archive_deleted_rows_2_tables(self):
+ # Add 6 rows to each table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt)
+ ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
+ self.conn.execute(ins_stmt2)
+ # Set 4 of each to deleted
+ update_statement = self.instance_id_mappings.update().\
+ where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement)
+ update_statement2 = self.instances.update().\
+ where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
+ .values(deleted=1)
+ self.conn.execute(update_statement2)
+ # Verify we have 6 in each main table
+ qiim = sql.select([self.instance_id_mappings]).where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in each shadow table
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
+ where(self.shadow_instance_id_mappings.c.uuid.in_(
+ self.uuidstrs))
+ rows = self.conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ qsi = sql.select([self.shadow_instances]).\
+ where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
+ rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Archive 7 rows, which should be 4 in one table and 3 in the other.
+ db.archive_deleted_rows(self.context, max_rows=7)
+ # Verify we have 5 left in the two main tables combined
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 5)
+ # Verify we have 7 in the two shadow tables combined.
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 7)
+ # Archive the remaining deleted rows.
+ db.archive_deleted_rows(self.context, max_rows=1)
+ # Verify we have 4 total left in both main tables.
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+ # Try to archive more, but there are no deleted rows left.
+ db.archive_deleted_rows(self.context, max_rows=500)
+ # Verify we have 4 total left in both main tables.
+ iim_rows = self.conn.execute(qiim).fetchall()
+ i_rows = self.conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = self.conn.execute(qsiim).fetchall()
+ si_rows = self.conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+
+class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(InstanceGroupDBApiTestCase, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ members=members)
+
+ def test_instance_group_create_no_key(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
+
+ def test_instance_group_create_with_key(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+
+ def test_instance_group_create_with_same_key(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ self._create_instance_group(self.context, values)
+ self.assertRaises(exception.InstanceGroupIdExists,
+ self._create_instance_group, self.context, values)
+
+ def test_instance_group_get(self):
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ result2 = db.instance_group_get(self.context, result1['uuid'])
+ self._assertEqualObjects(result1, result2)
+
+ def test_instance_group_update_simple(self):
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ values = {'name': 'new_name', 'user_id': 'new_user',
+ 'project_id': 'new_project'}
+ db.instance_group_update(self.context, result1['uuid'],
+ values)
+ result2 = db.instance_group_get(self.context, result1['uuid'])
+ self.assertEqual(result1['uuid'], result2['uuid'])
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result2, values, ignored_keys)
+
+ def test_instance_group_delete(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ db.instance_group_delete(self.context, result['uuid'])
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_delete, self.context,
+ result['uuid'])
+
+ def test_instance_group_get_nonexistent(self):
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_get,
+ self.context,
+ 'nonexistent')
+
+ def test_instance_group_delete_nonexistent(self):
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_delete,
+ self.context,
+ 'nonexistent')
+
+ def test_instance_group_get_all(self):
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(0, len(groups))
+ value = self._get_default_values()
+ result1 = self._create_instance_group(self.context, value)
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(1, len(groups))
+ value = self._get_default_values()
+ result2 = self._create_instance_group(self.context, value)
+ groups = db.instance_group_get_all(self.context)
+ results = [result1, result2]
+ self._assertEqualListsOfObjects(results, groups)
+
+ def test_instance_group_get_all_by_project_id(self):
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ 'invalid_project_id')
+ self.assertEqual(0, len(groups))
+ values = self._get_default_values()
+ result1 = self._create_instance_group(self.context, values)
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ 'fake_project')
+ self.assertEqual(1, len(groups))
+ values = self._get_default_values()
+ values['project_id'] = 'new_project_id'
+ result2 = self._create_instance_group(self.context, values)
+ groups = db.instance_group_get_all(self.context)
+ results = [result1, result2]
+ self._assertEqualListsOfObjects(results, groups)
+ projects = [{'name': 'fake_project', 'value': [result1]},
+ {'name': 'new_project_id', 'value': [result2]}]
+ for project in projects:
+ groups = db.instance_group_get_all_by_project_id(self.context,
+ project['name'])
+ self._assertEqualListsOfObjects(project['value'], groups)
+
+ def test_instance_group_update(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
+ id = result['uuid']
+ values = self._get_default_values()
+ values['name'] = 'new_fake_name'
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self.assertEqual(result['name'], 'new_fake_name')
+ # update update members
+ values = self._get_default_values()
+ members = ['instance_id1', 'instance_id2']
+ values['members'] = members
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
+ # update update policies
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ values['policies'] = policies
+ db.instance_group_update(self.context, id, values)
+ result = db.instance_group_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
+ # test invalid ID
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_update, self.context,
+ 'invalid_id', values)
+
+ def test_instance_group_get_by_instance(self):
+ values = self._get_default_values()
+ group1 = self._create_instance_group(self.context, values)
+
+ members = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, group1.uuid, members)
+
+ group2 = db.instance_group_get_by_instance(self.context,
+ 'instance_id1')
+
+ self.assertEqual(group2.uuid, group1.uuid)
+
+
+class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
+ def test_instance_group_members_on_create(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ members = ['instance_id1', 'instance_id2']
+ result = self._create_instance_group(self.context, values,
+ members=members)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
+
+ def test_instance_group_members_add(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members = db.instance_group_members_get(self.context, id)
+ self.assertEqual(members, [])
+ members2 = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members2)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members2)
+
+ def test_instance_group_members_update(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members2 = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members2)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members2)
+ # check add with existing keys
+ members3 = ['instance_id1', 'instance_id2', 'instance_id3']
+ db.instance_group_members_add(self.context, id, members3)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+
+ def test_instance_group_members_delete(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ members3 = ['instance_id1', 'instance_id2', 'instance_id3']
+ db.instance_group_members_add(self.context, id, members3)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+ for instance_id in members3[:]:
+ db.instance_group_member_delete(self.context, id, instance_id)
+ members3.remove(instance_id)
+ members = db.instance_group_members_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(members, members3)
+
+ def test_instance_group_members_invalid_ids(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_members_get,
+ self.context, 'invalid')
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_member_delete, self.context,
+ 'invalidid', 'instance_id1')
+ members = ['instance_id1', 'instance_id2']
+ db.instance_group_members_add(self.context, id, members)
+ self.assertRaises(exception.InstanceGroupMemberNotFound,
+ db.instance_group_member_delete,
+ self.context, id, 'invalid_id')
+
+
+class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
+ def test_instance_group_policies_on_create(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ policies = ['policy1', 'policy2']
+ result = self._create_instance_group(self.context, values,
+ policies=policies)
+ ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+ self._assertEqualObjects(result, values, ignored_keys)
+ self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
+
+ def test_instance_group_policies_add(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies = db.instance_group_policies_get(self.context, id)
+ self.assertEqual(policies, [])
+ policies2 = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies2)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
+
+ def test_instance_group_policies_update(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies2 = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies2)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
+ policies3 = ['policy1', 'policy2', 'policy3']
+ db.instance_group_policies_add(self.context, id, policies3)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+
+ def test_instance_group_policies_delete(self):
+ values = self._get_default_values()
+ values['uuid'] = 'fake_id'
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ policies3 = ['policy1', 'policy2', 'policy3']
+ db.instance_group_policies_add(self.context, id, policies3)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+ for policy in policies3[:]:
+ db.instance_group_policy_delete(self.context, id, policy)
+ policies3.remove(policy)
+ policies = db.instance_group_policies_get(self.context, id)
+ self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
+
+ def test_instance_group_policies_invalid_ids(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ id = result['uuid']
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_policies_get,
+ self.context, 'invalid')
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_policy_delete, self.context,
+ 'invalidid', 'policy1')
+ policies = ['policy1', 'policy2']
+ db.instance_group_policies_add(self.context, id, policies)
+ self.assertRaises(exception.InstanceGroupPolicyNotFound,
+ db.instance_group_policy_delete,
+ self.context, id, 'invalid_policy')
+
+
+class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
+ def setUp(self):
+ super(PciDeviceDBApiTestCase, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.admin_context = context.get_admin_context()
+ self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
+ 'created_at']
+
+ def _get_fake_pci_devs(self):
+ return {'id': 3353,
+ 'compute_node_id': 1,
+ 'address': '0000:0f:08.7',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'dev_id': 'pci_0000:0f:08.7',
+ 'extra_info': None,
+ 'label': 'label_8086_1520',
+ 'status': 'available',
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'request_id': None,
+ }, {'id': 3356,
+ 'compute_node_id': 1,
+ 'address': '0000:0f:03.7',
+ 'vendor_id': '8083',
+ 'product_id': '1523',
+ 'dev_type': 'type-VF',
+ 'dev_id': 'pci_0000:0f:08.7',
+ 'extra_info': None,
+ 'label': 'label_8086_1520',
+ 'status': 'available',
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'request_id': None,
+ }
+
+ def _create_fake_pci_devs(self):
+ v1, v2 = self._get_fake_pci_devs()
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ return (v1, v2)
+
+ def test_pci_device_get_by_addr(self):
+ v1, v2 = self._create_fake_pci_devs()
+ result = db.pci_device_get_by_addr(self.admin_context, 1,
+ '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_get_by_addr_not_found(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.PciDeviceNotFound,
+ db.pci_device_get_by_addr, self.admin_context,
+ 1, '0000:0f:08:09')
+
+ def test_pci_device_get_by_addr_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_by_addr,
+ self.context, 1, '0000:0f:08.7')
+
+ def test_pci_device_get_by_id(self):
+ v1, v2 = self._create_fake_pci_devs()
+ result = db.pci_device_get_by_id(self.admin_context, 3353)
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_get_by_id_not_found(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.PciDeviceNotFoundById,
+ db.pci_device_get_by_id,
+ self.admin_context, 3354)
+
+ def test_pci_device_get_by_id_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_by_id,
+ self.context, 3553)
+
+ def test_pci_device_get_all_by_node(self):
+ v1, v2 = self._create_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+
+ def test_pci_device_get_all_by_node_empty(self):
+ v1, v2 = self._get_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 9)
+ self.assertEqual(len(results), 0)
+
+ def test_pci_device_get_all_by_node_low_priv(self):
+ self._create_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_get_all_by_node,
+ self.context, 1)
+
+ def test_pci_device_get_by_instance_uuid(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ v2['status'] = 'allocated'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ results = db.pci_device_get_all_by_instance_uuid(
+ self.context,
+ '00000000-0000-0000-0000-000000000010')
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+
+ def test_pci_device_get_by_instance_uuid_check_status(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ v2['status'] = 'claimed'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ db.pci_device_update(self.admin_context, v2['compute_node_id'],
+ v2['address'], v2)
+ results = db.pci_device_get_all_by_instance_uuid(
+ self.context,
+ '00000000-0000-0000-0000-000000000010')
+ self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
+
+ def test_pci_device_update(self):
+ v1, v2 = self._get_fake_pci_devs()
+ v1['status'] = 'allocated'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ result = db.pci_device_get_by_addr(
+ self.admin_context, 1, '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ v1['status'] = 'claimed'
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ result = db.pci_device_get_by_addr(
+ self.admin_context, 1, '0000:0f:08.7')
+ self._assertEqualObjects(v1, result, self.ignored_keys)
+
+ def test_pci_device_update_low_priv(self):
+ v1, v2 = self._get_fake_pci_devs()
+ self.assertRaises(exception.AdminRequired,
+ db.pci_device_update, self.context,
+ v1['compute_node_id'], v1['address'], v1)
+
+ def test_pci_device_destroy(self):
+ v1, v2 = self._create_fake_pci_devs()
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
+ db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
+ v1['address'])
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
+
+ def test_pci_device_destroy_exception(self):
+ v1, v2 = self._get_fake_pci_devs()
+ db.pci_device_update(self.admin_context, v1['compute_node_id'],
+ v1['address'], v1)
+ results = db.pci_device_get_all_by_node(self.admin_context, 1)
+ self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
+ self.assertRaises(exception.PciDeviceNotFound,
+ db.pci_device_destroy,
+ self.admin_context,
+ v2['compute_node_id'],
+ v2['address'])
+
+
+class RetryOnDeadlockTestCase(test.TestCase):
+ def test_without_deadlock(self):
+ @sqlalchemy_api._retry_on_deadlock
+ def call_api(*args, **kwargs):
+ return True
+ self.assertTrue(call_api())
+
+ def test_raise_deadlock(self):
+ self.attempts = 2
+
+ @sqlalchemy_api._retry_on_deadlock
+ def call_api(*args, **kwargs):
+ while self.attempts:
+ self.attempts = self.attempts - 1
+ raise db_exc.DBDeadlock("fake exception")
+ return True
+ self.assertTrue(call_api())
+
+
+class TestSqlalchemyTypesRepr(test_base.DbTestCase):
+ def setUp(self):
+ super(TestSqlalchemyTypesRepr, self).setUp()
+ meta = MetaData(bind=self.engine)
+ self.table = Table(
+ 'cidr_tbl',
+ meta,
+ Column('id', Integer, primary_key=True),
+ Column('addr', col_types.CIDR())
+ )
+ self.table.create()
+ self.addCleanup(meta.drop_all)
+
+ def test_cidr_repr(self):
+ addrs = [('192.168.3.0/24', '192.168.3.0/24'),
+ ('2001:db8::/64', '2001:db8::/64'),
+ ('192.168.3.0', '192.168.3.0/32'),
+ ('2001:db8::', '2001:db8::/128'),
+ (None, None)]
+ with self.engine.begin() as conn:
+ for i in addrs:
+ conn.execute(self.table.insert(), {'addr': i[0]})
+
+ query = self.table.select().order_by(self.table.c.id)
+ result = conn.execute(query)
+ for idx, row in enumerate(result):
+ self.assertEqual(addrs[idx][1], row.addr)
+
+
+class TestMySQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
+ test_base.MySQLOpportunisticTestCase):
+ pass
+
+
+class TestPostgreSQLSqlalchemyTypesRepr(TestSqlalchemyTypesRepr,
+ test_base.PostgreSQLOpportunisticTestCase):
+ pass
diff --git a/nova/tests/unit/db/test_migration_utils.py b/nova/tests/unit/db/test_migration_utils.py
new file mode 100644
index 0000000000..1d5d155894
--- /dev/null
+++ b/nova/tests/unit/db/test_migration_utils.py
@@ -0,0 +1,256 @@
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo.db.sqlalchemy import utils as oslodbutils
+import sqlalchemy
+from sqlalchemy import Integer, String
+from sqlalchemy import MetaData, Table, Column
+from sqlalchemy.exc import NoSuchTableError
+from sqlalchemy import sql
+from sqlalchemy.types import UserDefinedType
+
+from nova.db.sqlalchemy import api as db
+from nova.db.sqlalchemy import utils
+from nova import exception
+from nova.tests.unit.db import test_migrations
+
+
+SA_VERSION = tuple(map(int, sqlalchemy.__version__.split('.')))
+
+
+class CustomType(UserDefinedType):
+ """Dummy column type for testing unsupported types."""
+ def get_col_spec(self):
+ return "CustomType"
+
+
+class TestMigrationUtils(test_migrations.BaseMigrationTestCase):
+ """Class for testing utils that are used in db migrations."""
+
+ def test_delete_from_select(self):
+ table_name = "__test_deletefromselect_table__"
+ uuidstrs = []
+ for unused in range(10):
+ uuidstrs.append(uuid.uuid4().hex)
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ conn = engine.connect()
+ test_table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True,
+ nullable=False, autoincrement=True),
+ Column('uuid', String(36), nullable=False))
+ test_table.create()
+ # Add 10 rows to table
+ for uuidstr in uuidstrs:
+ ins_stmt = test_table.insert().values(uuid=uuidstr)
+ conn.execute(ins_stmt)
+
+ # Delete 4 rows in one chunk
+ column = test_table.c.id
+ query_delete = sql.select([column],
+ test_table.c.id < 5).order_by(column)
+ delete_statement = utils.DeleteFromSelect(test_table,
+ query_delete, column)
+ result_delete = conn.execute(delete_statement)
+ # Verify we delete 4 rows
+ self.assertEqual(result_delete.rowcount, 4)
+
+ query_all = sql.select([test_table]).\
+ where(test_table.c.uuid.in_(uuidstrs))
+ rows = conn.execute(query_all).fetchall()
+ # Verify we still have 6 rows in table
+ self.assertEqual(len(rows), 6)
+
+ test_table.drop()
+
+ def test_check_shadow_table(self):
+ table_name = 'test_check_shadow_table'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', String(256)))
+ table.create()
+
+ # check missing shadow table
+ self.assertRaises(NoSuchTableError,
+ utils.check_shadow_table, engine, table_name)
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer),
+ Column('a', Integer))
+ shadow_table.create()
+
+ # check missing column
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ # check when all is ok
+ c = Column('c', String(256))
+ shadow_table.create_column(c)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+
+ # check extra column
+ d = Column('d', Integer)
+ shadow_table.create_column(d)
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ table.drop()
+ shadow_table.drop()
+
+ def test_check_shadow_table_different_types(self):
+ table_name = 'test_check_shadow_table_different_types'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', String(256)))
+ shadow_table.create()
+ self.assertRaises(exception.NovaException,
+ utils.check_shadow_table, engine, table_name)
+
+ table.drop()
+ shadow_table.drop()
+
+ def test_check_shadow_table_with_unsupported_sqlite_type(self):
+ if 'sqlite' not in self.engines:
+ self.skipTest('sqlite is not configured')
+ table_name = 'test_check_shadow_table_with_unsupported_sqlite_type'
+ engine = self.engines['sqlite']
+ meta = MetaData(bind=engine)
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', CustomType))
+ table.create()
+
+ shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('c', CustomType))
+ shadow_table.create()
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ shadow_table.drop()
+
+ def test_create_shadow_table_by_table_instance(self):
+ table_name = 'test_create_shadow_table_by_table_instance'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine, table=table)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_table_by_name(self):
+ table_name = 'test_create_shadow_table_by_name'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer),
+ Column('b', String(256)))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name)
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_table_not_supported_type(self):
+ if 'sqlite' in self.engines:
+ table_name = 'test_create_shadow_table_not_supported_type'
+ engine = self.engines['sqlite']
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', CustomType))
+ table.create()
+
+ # reflection of custom types has been fixed upstream
+ if SA_VERSION < (0, 9, 0):
+ self.assertRaises(oslodbutils.ColumnError,
+ utils.create_shadow_table,
+ engine, table_name=table_name)
+
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name,
+ a=Column('a', CustomType())
+ )
+ self.assertTrue(utils.check_shadow_table(engine, table_name))
+ table.drop()
+ shadow_table.drop()
+
+ def test_create_shadow_both_table_and_table_name_are_none(self):
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table, engine)
+
+ def test_create_shadow_both_table_and_table_name_are_specified(self):
+ table_name = ('test_create_shadow_both_table_and_table_name_are_'
+ 'specified')
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ self.assertRaises(exception.NovaException,
+ utils.create_shadow_table,
+ engine, table=table, table_name=table_name)
+ table.drop()
+
+ def test_create_duplicate_shadow_table(self):
+ table_name = 'test_create_duplicate_shadow_table'
+ for key, engine in self.engines.items():
+ meta = MetaData()
+ meta.bind = engine
+ table = Table(table_name, meta,
+ Column('id', Integer, primary_key=True),
+ Column('a', Integer))
+ table.create()
+ shadow_table = utils.create_shadow_table(engine,
+ table_name=table_name)
+ self.assertRaises(exception.ShadowTableExists,
+ utils.create_shadow_table,
+ engine, table_name=table_name)
+ table.drop()
+ shadow_table.drop()
diff --git a/nova/tests/db/test_migrations.conf b/nova/tests/unit/db/test_migrations.conf
index 310b7055c4..310b7055c4 100644
--- a/nova/tests/db/test_migrations.conf
+++ b/nova/tests/unit/db/test_migrations.conf
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/unit/db/test_migrations.py
index cd1c3a7fdf..cd1c3a7fdf 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/unit/db/test_migrations.py
diff --git a/nova/tests/db/test_sqlite.py b/nova/tests/unit/db/test_sqlite.py
index e6a0951017..e6a0951017 100644
--- a/nova/tests/db/test_sqlite.py
+++ b/nova/tests/unit/db/test_sqlite.py
diff --git a/nova/tests/fake_block_device.py b/nova/tests/unit/fake_block_device.py
index 6f27eb3749..6f27eb3749 100644
--- a/nova/tests/fake_block_device.py
+++ b/nova/tests/unit/fake_block_device.py
diff --git a/nova/tests/fake_crypto.py b/nova/tests/unit/fake_crypto.py
index cac79a36bc..cac79a36bc 100644
--- a/nova/tests/fake_crypto.py
+++ b/nova/tests/unit/fake_crypto.py
diff --git a/nova/tests/unit/fake_hosts.py b/nova/tests/unit/fake_hosts.py
new file mode 100644
index 0000000000..78fa414ac7
--- /dev/null
+++ b/nova/tests/unit/fake_hosts.py
@@ -0,0 +1,37 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides some fake hosts to test host and service related functions
+"""
+
+from nova.tests.unit.objects import test_service
+
+
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+OS_API_HOST_LIST = {"hosts": HOST_LIST}
+
+HOST_LIST_NOVA_ZONE = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+service_base = test_service.fake_service
+
+SERVICES_LIST = [
+ dict(service_base, host='host_c1', topic='compute'),
+ dict(service_base, host='host_c2', topic='compute')]
diff --git a/nova/tests/fake_instance.py b/nova/tests/unit/fake_instance.py
index b1a080269d..b1a080269d 100644
--- a/nova/tests/fake_instance.py
+++ b/nova/tests/unit/fake_instance.py
diff --git a/nova/tests/fake_ldap.py b/nova/tests/unit/fake_ldap.py
index dd69d42961..dd69d42961 100644
--- a/nova/tests/fake_ldap.py
+++ b/nova/tests/unit/fake_ldap.py
diff --git a/nova/tests/fake_loadables/__init__.py b/nova/tests/unit/fake_loadables/__init__.py
index a74c55d21a..a74c55d21a 100644
--- a/nova/tests/fake_loadables/__init__.py
+++ b/nova/tests/unit/fake_loadables/__init__.py
diff --git a/nova/tests/unit/fake_loadables/fake_loadable1.py b/nova/tests/unit/fake_loadables/fake_loadable1.py
new file mode 100644
index 0000000000..a30b66bdf7
--- /dev/null
+++ b/nova/tests/unit/fake_loadables/fake_loadable1.py
@@ -0,0 +1,44 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #1
+"""
+
+from nova.tests.unit import fake_loadables
+
+
+class FakeLoadableSubClass1(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass2(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass3(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass4(object):
+ """Not a correct subclass."""
+
+
+def return_valid_classes():
+ return [FakeLoadableSubClass1, FakeLoadableSubClass2]
+
+
+def return_invalid_classes():
+ return [FakeLoadableSubClass1, _FakeLoadableSubClass3,
+ FakeLoadableSubClass4]
diff --git a/nova/tests/unit/fake_loadables/fake_loadable2.py b/nova/tests/unit/fake_loadables/fake_loadable2.py
new file mode 100644
index 0000000000..a70ab5f952
--- /dev/null
+++ b/nova/tests/unit/fake_loadables/fake_loadable2.py
@@ -0,0 +1,39 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake Loadable subclasses module #2
+"""
+
+from nova.tests.unit import fake_loadables
+
+
+class FakeLoadableSubClass5(fake_loadables.FakeLoadable):
+ pass
+
+
+class FakeLoadableSubClass6(fake_loadables.FakeLoadable):
+ pass
+
+
+class _FakeLoadableSubClass7(fake_loadables.FakeLoadable):
+ """Classes beginning with '_' will be ignored."""
+ pass
+
+
+class FakeLoadableSubClass8(BaseException):
+ """Not a correct subclass."""
+
+
+def return_valid_class():
+ return [FakeLoadableSubClass6]
diff --git a/nova/tests/unit/fake_network.py b/nova/tests/unit/fake_network.py
new file mode 100644
index 0000000000..09f54b13d3
--- /dev/null
+++ b/nova/tests/unit/fake_network.py
@@ -0,0 +1,457 @@
+# Copyright 2011 Rackspace
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+import nova.context
+from nova import db
+from nova import exception
+from nova.network import api as network_api
+from nova.network import manager as network_manager
+from nova.network import model as network_model
+from nova.network import rpcapi as network_rpcapi
+from nova import objects
+from nova.objects import base as obj_base
+from nova.objects import virtual_interface as vif_obj
+from nova.pci import device as pci_device
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_pci_device
+
+
+HOST = "testhost"
+CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
+
+
+class FakeModel(dict):
+ """Represent a model from the db."""
+ def __init__(self, *args, **kwargs):
+ self.update(kwargs)
+
+
+class FakeNetworkManager(network_manager.NetworkManager):
+ """This NetworkManager doesn't call the base class so we can bypass all
+ inherited service cruft and just perform unit tests.
+ """
+
+ class FakeDB:
+ vifs = [{'id': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000010',
+ 'network_id': 1,
+ 'uuid': 'fake-uuid',
+ 'address': 'DC:AD:BE:FF:EF:01'},
+ {'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000020',
+ 'network_id': 21,
+ 'uuid': 'fake-uuid2',
+ 'address': 'DC:AD:BE:FF:EF:02'},
+ {'id': 2,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': '00000000-0000-0000-0000-000000000030',
+ 'network_id': 31,
+ 'uuid': 'fake-uuid3',
+ 'address': 'DC:AD:BE:FF:EF:03'}]
+
+ floating_ips = [dict(address='172.16.1.1',
+ fixed_ip_id=100),
+ dict(address='172.16.1.2',
+ fixed_ip_id=200),
+ dict(address='173.16.1.2',
+ fixed_ip_id=210)]
+
+ fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
+ id=100,
+ address='172.16.0.1',
+ virtual_interface_id=0),
+ dict(test_fixed_ip.fake_fixed_ip,
+ id=200,
+ address='172.16.0.2',
+ virtual_interface_id=1),
+ dict(test_fixed_ip.fake_fixed_ip,
+ id=210,
+ address='173.16.0.2',
+ virtual_interface_id=2)]
+
+ def fixed_ip_get_by_instance(self, context, instance_uuid):
+ return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
+ dict(address='10.0.0.2')]
+
+ def network_get_by_cidr(self, context, cidr):
+ raise exception.NetworkNotFoundForCidr(cidr=cidr)
+
+ def network_create_safe(self, context, net):
+ fakenet = dict(net)
+ fakenet['id'] = 999
+ return fakenet
+
+ def network_get(self, context, network_id, project_only="allow_none"):
+ return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
+
+ def network_get_by_uuid(self, context, network_uuid):
+ raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
+
+ def network_get_all(self, context):
+ raise exception.NoNetworksFound()
+
+ def network_get_all_by_uuids(self, context, project_only="allow_none"):
+ raise exception.NoNetworksFound()
+
+ def network_disassociate(self, context, network_id):
+ return True
+
+ def virtual_interface_get_all(self, context):
+ return self.vifs
+
+ def fixed_ips_by_virtual_interface(self, context, vif_id):
+ return [ip for ip in self.fixed_ips
+ if ip['virtual_interface_id'] == vif_id]
+
+ def fixed_ip_disassociate(self, context, address):
+ return True
+
+ def __init__(self, stubs=None):
+ self.db = self.FakeDB()
+ if stubs:
+ stubs.Set(vif_obj, 'db', self.db)
+ self.deallocate_called = None
+ self.deallocate_fixed_ip_calls = []
+ self.network_rpcapi = network_rpcapi.NetworkAPI()
+
+ # TODO(matelakat) method signature should align with the faked one's
+ def deallocate_fixed_ip(self, context, address=None, host=None,
+ instance=None):
+ self.deallocate_fixed_ip_calls.append((context, address, host))
+ # TODO(matelakat) use the deallocate_fixed_ip_calls instead
+ self.deallocate_called = address
+
+ def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
+ pass
+
+ def get_instance_nw_info(context, instance_id, rxtx_factor,
+ host, instance_uuid=None, **kwargs):
+ pass
+
+
+def fake_network(network_id, ipv6=None):
+ if ipv6 is None:
+ ipv6 = CONF.use_ipv6
+ fake_network = {'id': network_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
+ 'label': 'test%d' % network_id,
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.%d.0/24' % network_id,
+ 'cidr_v6': None,
+ 'netmask': '255.255.255.0',
+ 'netmask_v6': None,
+ 'bridge': 'fake_br%d' % network_id,
+ 'bridge_interface': 'fake_eth%d' % network_id,
+ 'gateway': '192.168.%d.1' % network_id,
+ 'gateway_v6': None,
+ 'broadcast': '192.168.%d.255' % network_id,
+ 'dns1': '192.168.%d.3' % network_id,
+ 'dns2': '192.168.%d.4' % network_id,
+ 'dns3': '192.168.%d.3' % network_id,
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.%d.2' % network_id,
+ 'vpn_public_port': None,
+ 'vpn_private_address': None,
+ 'dhcp_start': None,
+ 'rxtx_base': network_id * 10,
+ 'priority': None,
+ 'deleted': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'mtu': None,
+ 'dhcp_server': '192.168.%d.1' % network_id,
+ 'enable_dhcp': True,
+ 'share_address': False}
+ if ipv6:
+ fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
+ fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
+ fake_network['netmask_v6'] = '64'
+ if CONF.flat_injected:
+ fake_network['injected'] = True
+
+ return fake_network
+
+
+def fake_vif(x):
+ return{'id': x,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:%02x' % x,
+ 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
+ 'network_id': x,
+ 'instance_uuid': 'fake-uuid'}
+
+
+def floating_ip_ids():
+ for i in xrange(1, 100):
+ yield i
+
+
+def fixed_ip_ids():
+ for i in xrange(1, 100):
+ yield i
+
+
+floating_ip_id = floating_ip_ids()
+fixed_ip_id = fixed_ip_ids()
+
+
+def next_fixed_ip(network_id, num_floating_ips=0):
+ next_id = fixed_ip_id.next()
+ f_ips = [FakeModel(**next_floating_ip(next_id))
+ for i in xrange(num_floating_ips)]
+ return {'id': next_id,
+ 'network_id': network_id,
+ 'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
+ 'instance_uuid': 1,
+ 'allocated': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'leased': True,
+ 'host': HOST,
+ 'deleted': 0,
+ 'network': fake_network(network_id),
+ 'virtual_interface': fake_vif(network_id),
+ # and since network_id and vif_id happen to be equivalent
+ 'virtual_interface_id': network_id,
+ 'floating_ips': f_ips}
+
+
+def next_floating_ip(fixed_ip_id):
+ next_id = floating_ip_id.next()
+ return {'id': next_id,
+ 'address': '10.10.10.%03d' % (next_id + 99),
+ 'fixed_ip_id': fixed_ip_id,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+
+def ipv4_like(ip, match_string):
+ ip = ip.split('.')
+ match_octets = match_string.split('.')
+
+ for i, octet in enumerate(match_octets):
+ if octet == '*':
+ continue
+ if octet != ip[i]:
+ return False
+ return True
+
+
+def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
+ floating_ips_per_fixed_ip=0):
+ # stubs is the self.stubs from the test
+ # ips_per_vif is the number of ips each vif will have
+ # num_floating_ips is number of float ips for each fixed ip
+ network = network_manager.FlatManager(host=HOST)
+ network.db = db
+
+ # reset the fixed and floating ip generators
+ global floating_ip_id, fixed_ip_id, fixed_ips
+ floating_ip_id = floating_ip_ids()
+ fixed_ip_id = fixed_ip_ids()
+ fixed_ips = []
+
+ def fixed_ips_fake(*args, **kwargs):
+ global fixed_ips
+ ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
+ for i in xrange(1, num_networks + 1)
+ for j in xrange(ips_per_vif)]
+ fixed_ips = ips
+ return ips
+
+ def update_cache_fake(*args, **kwargs):
+ pass
+
+ stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
+ stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
+
+ class FakeContext(nova.context.RequestContext):
+ def is_admin(self):
+ return True
+
+ nw_model = network.get_instance_nw_info(
+ FakeContext('fakeuser', 'fake_project'),
+ 0, 3, None)
+ return nw_model
+
+
+def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
+ num_networks=1,
+ ips_per_vif=1,
+ floating_ips_per_fixed_ip=0):
+
+ def get_instance_nw_info(self, context, instance, conductor_api=None):
+ return fake_get_instance_nw_info(stubs, num_networks=num_networks,
+ ips_per_vif=ips_per_vif,
+ floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
+
+ if func is None:
+ func = get_instance_nw_info
+ stubs.Set(network_api.API, 'get_instance_nw_info', func)
+
+
+def stub_out_network_cleanup(stubs):
+ stubs.Set(network_api.API, 'deallocate_for_instance',
+ lambda *args, **kwargs: None)
+
+
+_real_functions = {}
+
+
+def set_stub_network_methods(stubs):
+ global _real_functions
+ cm = compute_manager.ComputeManager
+ if not _real_functions:
+ _real_functions = {
+ '_get_instance_nw_info': cm._get_instance_nw_info,
+ '_allocate_network': cm._allocate_network,
+ '_deallocate_network': cm._deallocate_network}
+
+ def fake_networkinfo(*args, **kwargs):
+ return network_model.NetworkInfo()
+
+ def fake_async_networkinfo(*args, **kwargs):
+ return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
+
+ stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo)
+ stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
+ stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
+
+
+def unset_stub_network_methods(stubs):
+ global _real_functions
+ if _real_functions:
+ cm = compute_manager.ComputeManager
+ for name in _real_functions:
+ stubs.Set(cm, name, _real_functions[name])
+
+
+def stub_compute_with_ips(stubs):
+ orig_get = compute_api.API.get
+ orig_get_all = compute_api.API.get_all
+ orig_create = compute_api.API.create
+
+ def fake_get(*args, **kwargs):
+ return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
+
+ def fake_get_all(*args, **kwargs):
+ return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
+
+ def fake_create(*args, **kwargs):
+ return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
+
+ def fake_pci_device_get_by_addr(context, node_id, dev_addr):
+ return test_pci_device.fake_db_dev
+
+ stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
+ stubs.Set(compute_api.API, 'get', fake_get)
+ stubs.Set(compute_api.API, 'get_all', fake_get_all)
+ stubs.Set(compute_api.API, 'create', fake_create)
+
+
+def _get_fake_cache():
+ def _ip(ip, fixed=True, floats=None):
+ ip_dict = {'address': ip, 'type': 'fixed'}
+ if not fixed:
+ ip_dict['type'] = 'floating'
+ if fixed and floats:
+ ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
+ return ip_dict
+
+ info = [{'address': 'aa:bb:cc:dd:ee:ff',
+ 'id': 1,
+ 'network': {'bridge': 'br0',
+ 'id': 1,
+ 'label': 'private',
+ 'subnets': [{'cidr': '192.168.0.0/24',
+ 'ips': [_ip('192.168.0.3')]}]}}]
+ if CONF.use_ipv6:
+ ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
+ info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
+ 'ips': [_ip(ipv6_addr)]})
+ return jsonutils.dumps(info)
+
+
+def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the cache into instance(s) without having to create DB
+ entries
+ """
+ instances = orig_func(*args, **kwargs)
+ context = args[0]
+ fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
+
+ def _info_cache_for(instance):
+ info_cache = dict(test_instance_info_cache.fake_info_cache,
+ network_info=_get_fake_cache(),
+ instance_uuid=instance['uuid'])
+ if isinstance(instance, obj_base.NovaObject):
+ _info_cache = objects.InstanceInfoCache(context)
+ objects.InstanceInfoCache._from_db_object(context, _info_cache,
+ info_cache)
+ info_cache = _info_cache
+ instance['info_cache'] = info_cache
+
+ if isinstance(instances, (list, obj_base.ObjectListBase)):
+ for instance in instances:
+ _info_cache_for(instance)
+ pci_device.claim(fake_device, instance)
+ pci_device.allocate(fake_device, instance)
+ else:
+ _info_cache_for(instances)
+ pci_device.claim(fake_device, instances)
+ pci_device.allocate(fake_device, instances)
+ return instances
+
+
+def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
+ """Kludge the above kludge so that the database doesn't get out
+ of sync with the actual instance.
+ """
+ instances, reservation_id = orig_func(*args, **kwargs)
+ fake_cache = _get_fake_cache()
+ for instance in instances:
+ instance['info_cache']['network_info'] = fake_cache
+ db.instance_info_cache_update(args[1], instance['uuid'],
+ {'network_info': fake_cache})
+ return (instances, reservation_id)
diff --git a/nova/tests/fake_network_cache_model.py b/nova/tests/unit/fake_network_cache_model.py
index 9757773ba9..9757773ba9 100644
--- a/nova/tests/fake_network_cache_model.py
+++ b/nova/tests/unit/fake_network_cache_model.py
diff --git a/nova/tests/fake_notifier.py b/nova/tests/unit/fake_notifier.py
index 110418215d..110418215d 100644
--- a/nova/tests/fake_notifier.py
+++ b/nova/tests/unit/fake_notifier.py
diff --git a/nova/tests/fake_policy.py b/nova/tests/unit/fake_policy.py
index 8344af475d..8344af475d 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/unit/fake_policy.py
diff --git a/nova/tests/fake_processutils.py b/nova/tests/unit/fake_processutils.py
index 111540d1d1..111540d1d1 100644
--- a/nova/tests/fake_processutils.py
+++ b/nova/tests/unit/fake_processutils.py
diff --git a/nova/tests/fake_server_actions.py b/nova/tests/unit/fake_server_actions.py
index 63047bfbae..63047bfbae 100644
--- a/nova/tests/fake_server_actions.py
+++ b/nova/tests/unit/fake_server_actions.py
diff --git a/nova/tests/fake_utils.py b/nova/tests/unit/fake_utils.py
index 7a97866d20..7a97866d20 100644
--- a/nova/tests/fake_utils.py
+++ b/nova/tests/unit/fake_utils.py
diff --git a/nova/tests/fake_volume.py b/nova/tests/unit/fake_volume.py
index 6fbe560162..6fbe560162 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/unit/fake_volume.py
diff --git a/nova/tests/functional/__init__.py b/nova/tests/unit/functional/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/functional/__init__.py
+++ b/nova/tests/unit/functional/__init__.py
diff --git a/nova/tests/image/__init__.py b/nova/tests/unit/image/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/image/__init__.py
+++ b/nova/tests/unit/image/__init__.py
diff --git a/nova/tests/image/abs.tar.gz b/nova/tests/unit/image/abs.tar.gz
index 4d39507340..4d39507340 100644
--- a/nova/tests/image/abs.tar.gz
+++ b/nova/tests/unit/image/abs.tar.gz
Binary files differ
diff --git a/nova/tests/image/fake.py b/nova/tests/unit/image/fake.py
index 0292afba60..0292afba60 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/unit/image/fake.py
diff --git a/nova/tests/image/rel.tar.gz b/nova/tests/unit/image/rel.tar.gz
index b54f55aa79..b54f55aa79 100644
--- a/nova/tests/image/rel.tar.gz
+++ b/nova/tests/unit/image/rel.tar.gz
Binary files differ
diff --git a/nova/tests/unit/image/test_fake.py b/nova/tests/unit/image/test_fake.py
new file mode 100644
index 0000000000..0f985ee16e
--- /dev/null
+++ b/nova/tests/unit/image/test_fake.py
@@ -0,0 +1,117 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import StringIO
+
+from nova import context
+from nova import exception
+from nova import test
+import nova.tests.unit.image.fake
+
+
+class FakeImageServiceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(FakeImageServiceTestCase, self).setUp()
+ self.image_service = nova.tests.unit.image.fake.FakeImageService()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(FakeImageServiceTestCase, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_detail(self):
+ res = self.image_service.detail(self.context)
+ for image in res:
+ keys = set(image.keys())
+ self.assertEqual(keys, set(['id', 'name', 'created_at',
+ 'updated_at', 'deleted_at', 'deleted',
+ 'status', 'is_public', 'properties',
+ 'disk_format', 'container_format',
+ 'size']))
+ self.assertIsInstance(image['created_at'], datetime.datetime)
+ self.assertIsInstance(image['updated_at'], datetime.datetime)
+
+ if not (isinstance(image['deleted_at'], datetime.datetime) or
+ image['deleted_at'] is None):
+ self.fail('image\'s "deleted_at" attribute was neither a '
+ 'datetime object nor None')
+
+ def check_is_bool(image, key):
+ val = image.get('deleted')
+ if not isinstance(val, bool):
+ self.fail('image\'s "%s" attribute wasn\'t '
+ 'a bool: %r' % (key, val))
+
+ check_is_bool(image, 'deleted')
+ check_is_bool(image, 'is_public')
+
+ def test_show_raises_imagenotfound_for_invalid_id(self):
+ self.assertRaises(exception.ImageNotFound,
+ self.image_service.show,
+ self.context,
+ 'this image does not exist')
+
+ def test_create_adds_id(self):
+ index = self.image_service.detail(self.context)
+ image_count = len(index)
+
+ self.image_service.create(self.context, {})
+
+ index = self.image_service.detail(self.context)
+ self.assertEqual(len(index), image_count + 1)
+
+ self.assertTrue(index[0]['id'])
+
+ def test_create_keeps_id(self):
+ self.image_service.create(self.context, {'id': '34'})
+ self.image_service.show(self.context, '34')
+
+ def test_create_rejects_duplicate_ids(self):
+ self.image_service.create(self.context, {'id': '34'})
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.image_service.create,
+ self.context,
+ {'id': '34'})
+
+ # Make sure there's still one left
+ self.image_service.show(self.context, '34')
+
+ def test_update(self):
+ self.image_service.create(self.context,
+ {'id': '34', 'foo': 'bar'})
+
+ self.image_service.update(self.context, '34',
+ {'id': '34', 'foo': 'baz'})
+
+ img = self.image_service.show(self.context, '34')
+ self.assertEqual(img['foo'], 'baz')
+
+ def test_delete(self):
+ self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
+ self.image_service.delete(self.context, '34')
+ self.assertRaises(exception.NotFound,
+ self.image_service.show,
+ self.context,
+ '34')
+
+ def test_create_then_get(self):
+ blob = 'some data'
+ s1 = StringIO.StringIO(blob)
+ self.image_service.create(self.context,
+ {'id': '32', 'foo': 'bar'},
+ data=s1)
+ s2 = StringIO.StringIO()
+ self.image_service.download(self.context, '32', data=s2)
+ self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
diff --git a/nova/tests/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index 63b4d22e1e..63b4d22e1e 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
diff --git a/nova/tests/unit/image/test_s3.py b/nova/tests/unit/image/test_s3.py
new file mode 100644
index 0000000000..d9ef08d3fe
--- /dev/null
+++ b/nova/tests/unit/image/test_s3.py
@@ -0,0 +1,267 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import binascii
+import os
+import tempfile
+
+import eventlet
+import fixtures
+import mox
+
+from nova.api.ec2 import ec2utils
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import s3
+from nova import test
+from nova.tests.unit.image import fake
+
+
+ami_manifest_xml = """<?xml version="1.0" ?>
+<manifest>
+ <version>2011-06-17</version>
+ <bundler>
+ <name>test-s3</name>
+ <version>0</version>
+ <release>0</release>
+ </bundler>
+ <machine_configuration>
+ <architecture>x86_64</architecture>
+ <block_device_mapping>
+ <mapping>
+ <virtual>ami</virtual>
+ <device>sda1</device>
+ </mapping>
+ <mapping>
+ <virtual>root</virtual>
+ <device>/dev/sda1</device>
+ </mapping>
+ <mapping>
+ <virtual>ephemeral0</virtual>
+ <device>sda2</device>
+ </mapping>
+ <mapping>
+ <virtual>swap</virtual>
+ <device>sda3</device>
+ </mapping>
+ </block_device_mapping>
+ <kernel_id>aki-00000001</kernel_id>
+ <ramdisk_id>ari-00000001</ramdisk_id>
+ </machine_configuration>
+</manifest>
+"""
+
+file_manifest_xml = """<?xml version="1.0" ?>
+<manifest>
+ 
+</manifest>
+"""
+
+
+class TestS3ImageService(test.TestCase):
+ def setUp(self):
+ super(TestS3ImageService, self).setUp()
+ self.context = context.RequestContext(None, None)
+ self.useFixture(fixtures.FakeLogger('boto'))
+
+ # set up 3 fixtures to test shows, should have id '1', '2', and '3'
+ db.s3_image_create(self.context,
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6')
+ db.s3_image_create(self.context,
+ 'a2459075-d96c-40d5-893e-577ff92e721c')
+ db.s3_image_create(self.context,
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
+
+ fake.stub_out_image_service(self.stubs)
+ self.image_service = s3.S3ImageService()
+ ec2utils.reset_cache()
+
+ def tearDown(self):
+ super(TestS3ImageService, self).tearDown()
+ fake.FakeImageService_reset()
+
+ def _assertEqualList(self, list0, list1, keys):
+ self.assertEqual(len(list0), len(list1))
+ key = keys[0]
+ for x in list0:
+ self.assertEqual(len(x), len(keys))
+ self.assertIn(key, x)
+ for y in list1:
+ self.assertIn(key, y)
+ if x[key] == y[key]:
+ for k in keys:
+ self.assertEqual(x[k], y[k])
+
+ def test_show_cannot_use_uuid(self):
+ self.assertRaises(exception.ImageNotFound,
+ self.image_service.show, self.context,
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6')
+
+ def test_show_translates_correctly(self):
+ self.image_service.show(self.context, '1')
+
+ def test_show_translates_image_state_correctly(self):
+ def my_fake_show(self, context, image_id, **kwargs):
+ fake_state_map = {
+ '155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
+ 'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
+ '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6': 'available'}
+ return {'id': image_id,
+ 'name': 'fakeimage123456',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'properties': {'image_state': fake_state_map[image_id]}}
+
+ # Override part of the fake image service as well just for
+ # this test so we can set the image_state to various values
+ # and test that S3ImageService does the correct mapping for
+ # us. We can't put fake bad or pending states in the real fake
+ # image service as it causes other tests to fail
+ self.stubs.Set(fake._FakeImageService, 'show', my_fake_show)
+ ret_image = self.image_service.show(self.context, '1')
+ self.assertEqual(ret_image['properties']['image_state'], 'pending')
+ ret_image = self.image_service.show(self.context, '2')
+ self.assertEqual(ret_image['properties']['image_state'], 'failed')
+ ret_image = self.image_service.show(self.context, '3')
+ self.assertEqual(ret_image['properties']['image_state'], 'available')
+
+ def test_detail(self):
+ self.image_service.detail(self.context)
+
+ def test_s3_create(self):
+ metadata = {'properties': {
+ 'root_device_name': '/dev/sda1',
+ 'block_device_mapping': [
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 'snap-12345678',
+ 'delete_on_termination': True},
+ {'device_name': '/dev/sda2',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/sdb0',
+ 'no_device': True}]}}
+ _manifest, image, image_uuid = self.image_service._s3_parse_manifest(
+ self.context, metadata, ami_manifest_xml)
+
+ ret_image = self.image_service.show(self.context, image['id'])
+ self.assertIn('properties', ret_image)
+ properties = ret_image['properties']
+
+ self.assertIn('mappings', properties)
+ mappings = properties['mappings']
+ expected_mappings = [
+ {"device": "sda1", "virtual": "ami"},
+ {"device": "/dev/sda1", "virtual": "root"},
+ {"device": "sda2", "virtual": "ephemeral0"},
+ {"device": "sda3", "virtual": "swap"}]
+ self._assertEqualList(mappings, expected_mappings,
+ ['device', 'virtual'])
+
+ self.assertIn('block_device_mapping', properties)
+ block_device_mapping = properties['block_device_mapping']
+ expected_bdm = [
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 'snap-12345678',
+ 'delete_on_termination': True},
+ {'device_name': '/dev/sda2',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/sdb0',
+ 'no_device': True}]
+ self.assertEqual(block_device_mapping, expected_bdm)
+
+ def _initialize_mocks(self):
+ handle, tempf = tempfile.mkstemp(dir='/tmp')
+ ignore = mox.IgnoreArg()
+ mockobj = self.mox.CreateMockAnything()
+ self.stubs.Set(self.image_service, '_conn', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_bucket', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_key', mockobj)
+ mockobj(ignore).AndReturn(mockobj)
+ self.stubs.Set(mockobj, 'get_contents_as_string', mockobj)
+ mockobj().AndReturn(file_manifest_xml)
+ self.stubs.Set(self.image_service, '_download_file', mockobj)
+ mockobj(ignore, ignore, ignore).AndReturn(tempf)
+ self.stubs.Set(binascii, 'a2b_hex', mockobj)
+ mockobj(ignore).AndReturn('foo')
+ mockobj(ignore).AndReturn('foo')
+ self.stubs.Set(self.image_service, '_decrypt_image', mockobj)
+ mockobj(ignore, ignore, ignore, ignore, ignore).AndReturn(mockobj)
+ self.stubs.Set(self.image_service, '_untarzip_image', mockobj)
+ mockobj(ignore, ignore).AndReturn(tempf)
+ self.mox.ReplayAll()
+
+ def test_s3_create_image_locations(self):
+ image_location_1 = 'testbucket_1/test.img.manifest.xml'
+ # Use another location that starts with a '/'
+ image_location_2 = '/testbucket_2/test.img.manifest.xml'
+
+ metadata = [{'properties': {'image_location': image_location_1}},
+ {'properties': {'image_location': image_location_2}}]
+
+ for mdata in metadata:
+ self._initialize_mocks()
+ image = self.image_service._s3_create(self.context, mdata)
+ eventlet.sleep()
+ translated = self.image_service._translate_id_to_uuid(self.context,
+ image)
+ uuid = translated['id']
+ image_service = fake.FakeImageService()
+ updated_image = image_service.update(self.context, uuid,
+ {'properties': {'image_state': 'available'}},
+ purge_props=False)
+ self.assertEqual(updated_image['properties']['image_state'],
+ 'available')
+
+ def test_s3_create_is_public(self):
+ self._initialize_mocks()
+ metadata = {'properties': {
+ 'image_location': 'mybucket/my.img.manifest.xml'},
+ 'name': 'mybucket/my.img'}
+ img = self.image_service._s3_create(self.context, metadata)
+ eventlet.sleep()
+ translated = self.image_service._translate_id_to_uuid(self.context,
+ img)
+ uuid = translated['id']
+ image_service = fake.FakeImageService()
+ updated_image = image_service.update(self.context, uuid,
+ {'is_public': True}, purge_props=False)
+ self.assertTrue(updated_image['is_public'])
+ self.assertEqual(updated_image['status'], 'active')
+ self.assertEqual(updated_image['properties']['image_state'],
+ 'available')
+
+ def test_s3_malicious_tarballs(self):
+ self.assertRaises(exception.NovaException,
+ self.image_service._test_for_malicious_tarball,
+ "/unused", os.path.join(os.path.dirname(__file__), 'abs.tar.gz'))
+ self.assertRaises(exception.NovaException,
+ self.image_service._test_for_malicious_tarball,
+ "/unused", os.path.join(os.path.dirname(__file__), 'rel.tar.gz'))
diff --git a/nova/tests/image/test_transfer_modules.py b/nova/tests/unit/image/test_transfer_modules.py
index 51920c36aa..51920c36aa 100644
--- a/nova/tests/image/test_transfer_modules.py
+++ b/nova/tests/unit/image/test_transfer_modules.py
diff --git a/nova/tests/image_fixtures.py b/nova/tests/unit/image_fixtures.py
index 9ab09b989a..9ab09b989a 100644
--- a/nova/tests/image_fixtures.py
+++ b/nova/tests/unit/image_fixtures.py
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/unit/integrated/__init__.py
index 16b4b921b7..16b4b921b7 100644
--- a/nova/tests/integrated/__init__.py
+++ b/nova/tests/unit/integrated/__init__.py
diff --git a/nova/tests/integrated/api/__init__.py b/nova/tests/unit/integrated/api/__init__.py
index 6168280c24..6168280c24 100644
--- a/nova/tests/integrated/api/__init__.py
+++ b/nova/tests/unit/integrated/api/__init__.py
diff --git a/nova/tests/unit/integrated/api/client.py b/nova/tests/unit/integrated/api/client.py
new file mode 100644
index 0000000000..733592ec26
--- /dev/null
+++ b/nova/tests/unit/integrated/api/client.py
@@ -0,0 +1,304 @@
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+from oslo.serialization import jsonutils
+import requests
+
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova.tests.unit.image import fake
+
+
+LOG = logging.getLogger(__name__)
+
+
+class OpenStackApiException(Exception):
+ def __init__(self, message=None, response=None):
+ self.response = response
+ if not message:
+ message = 'Unspecified error'
+
+ if response:
+ _status = response.status_code
+ _body = response.content
+
+ message = (_('%(message)s\nStatus Code: %(_status)s\n'
+ 'Body: %(_body)s') %
+ {'message': message, '_status': _status,
+ '_body': _body})
+
+ super(OpenStackApiException, self).__init__(message)
+
+
+class OpenStackApiAuthenticationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authentication error")
+ super(OpenStackApiAuthenticationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiAuthorizationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authorization error")
+ super(OpenStackApiAuthorizationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiNotFoundException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Item not found")
+ super(OpenStackApiNotFoundException, self).__init__(message, response)
+
+
+class TestOpenStackClient(object):
+ """Simple OpenStack API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing
+
+ """
+
+ def __init__(self, auth_user, auth_key, auth_uri):
+ super(TestOpenStackClient, self).__init__()
+ self.auth_result = None
+ self.auth_user = auth_user
+ self.auth_key = auth_key
+ self.auth_uri = auth_uri
+ # default project_id
+ self.project_id = 'openstack'
+
+ def request(self, url, method='GET', body=None, headers=None):
+ _headers = {'Content-Type': 'application/json'}
+ _headers.update(headers or {})
+
+ response = requests.request(method, url, data=body, headers=_headers)
+ return response
+
+ def _authenticate(self):
+ if self.auth_result:
+ return self.auth_result
+
+ auth_uri = self.auth_uri
+ headers = {'X-Auth-User': self.auth_user,
+ 'X-Auth-Key': self.auth_key,
+ 'X-Auth-Project-Id': self.project_id}
+ response = self.request(auth_uri,
+ headers=headers)
+
+ http_status = response.status_code
+ LOG.debug("%(auth_uri)s => code %(http_status)s",
+ {'auth_uri': auth_uri, 'http_status': http_status})
+
+ if http_status == 401:
+ raise OpenStackApiAuthenticationException(response=response)
+
+ self.auth_result = response.headers
+ return self.auth_result
+
+ def api_request(self, relative_uri, check_response_status=None,
+ strip_version=False, **kwargs):
+ auth_result = self._authenticate()
+
+ # NOTE(justinsb): httplib 'helpfully' converts headers to lower case
+ base_uri = auth_result['x-server-management-url']
+ if strip_version:
+ # NOTE(vish): cut out version number and tenant_id
+ base_uri = '/'.join(base_uri.split('/', 3)[:-1])
+
+ full_uri = '%s/%s' % (base_uri, relative_uri)
+
+ headers = kwargs.setdefault('headers', {})
+ headers['X-Auth-Token'] = auth_result['x-auth-token']
+
+ response = self.request(full_uri, **kwargs)
+
+ http_status = response.status_code
+ LOG.debug("%(relative_uri)s => code %(http_status)s",
+ {'relative_uri': relative_uri, 'http_status': http_status})
+
+ if check_response_status:
+ if http_status not in check_response_status:
+ if http_status == 404:
+ raise OpenStackApiNotFoundException(response=response)
+ elif http_status == 401:
+ raise OpenStackApiAuthorizationException(response=response)
+ else:
+ raise OpenStackApiException(
+ message=_("Unexpected status code"),
+ response=response)
+
+ return response
+
+ def _decode_json(self, response):
+ body = response.content
+ LOG.debug("Decoding JSON: %s", body)
+ if body:
+ return jsonutils.loads(body)
+ else:
+ return ""
+
+ def api_get(self, relative_uri, **kwargs):
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_post(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'POST'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = jsonutils.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200, 202])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_put(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'PUT'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = jsonutils.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200, 202, 204])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_delete(self, relative_uri, **kwargs):
+ kwargs['method'] = 'DELETE'
+ kwargs.setdefault('check_response_status', [200, 202, 204])
+ return self.api_request(relative_uri, **kwargs)
+
+ def get_server(self, server_id):
+ return self.api_get('/servers/%s' % server_id)['server']
+
+ def get_servers(self, detail=True, search_opts=None):
+ rel_url = '/servers/detail' if detail else '/servers'
+
+ if search_opts is not None:
+ qparams = {}
+ for opt, val in search_opts.iteritems():
+ qparams[opt] = val
+ if qparams:
+ query_string = "?%s" % urllib.urlencode(qparams)
+ rel_url += query_string
+ return self.api_get(rel_url)['servers']
+
+ def post_server(self, server):
+ response = self.api_post('/servers', server)
+ if 'reservation_id' in response:
+ return response
+ else:
+ return response['server']
+
+ def put_server(self, server_id, server):
+ return self.api_put('/servers/%s' % server_id, server)
+
+ def post_server_action(self, server_id, data):
+ return self.api_post('/servers/%s/action' % server_id, data)
+
+ def delete_server(self, server_id):
+ return self.api_delete('/servers/%s' % server_id)
+
+ def get_image(self, image_id):
+ return self.api_get('/images/%s' % image_id)['image']
+
+ def get_images(self, detail=True):
+ rel_url = '/images/detail' if detail else '/images'
+ return self.api_get(rel_url)['images']
+
+ def post_image(self, image):
+ return self.api_post('/images', image)['image']
+
+ def delete_image(self, image_id):
+ return self.api_delete('/images/%s' % image_id)
+
+ def get_flavor(self, flavor_id):
+ return self.api_get('/flavors/%s' % flavor_id)['flavor']
+
+ def get_flavors(self, detail=True):
+ rel_url = '/flavors/detail' if detail else '/flavors'
+ return self.api_get(rel_url)['flavors']
+
+ def post_flavor(self, flavor):
+ return self.api_post('/flavors', flavor)['flavor']
+
+ def delete_flavor(self, flavor_id):
+ return self.api_delete('/flavors/%s' % flavor_id)
+
+ def get_volume(self, volume_id):
+ return self.api_get('/volumes/%s' % volume_id)['volume']
+
+ def get_volumes(self, detail=True):
+ rel_url = '/volumes/detail' if detail else '/volumes'
+ return self.api_get(rel_url)['volumes']
+
+ def post_volume(self, volume):
+ return self.api_post('/volumes', volume)['volume']
+
+ def delete_volume(self, volume_id):
+ return self.api_delete('/volumes/%s' % volume_id)
+
+ def get_server_volume(self, server_id, attachment_id):
+ return self.api_get('/servers/%s/os-volume_attachments/%s' %
+ (server_id, attachment_id))['volumeAttachment']
+
+ def get_server_volumes(self, server_id):
+ return self.api_get('/servers/%s/os-volume_attachments' %
+ (server_id))['volumeAttachments']
+
+ def post_server_volume(self, server_id, volume_attachment):
+ return self.api_post('/servers/%s/os-volume_attachments' %
+ (server_id), volume_attachment
+ )['volumeAttachment']
+
+ def delete_server_volume(self, server_id, attachment_id):
+ return self.api_delete('/servers/%s/os-volume_attachments/%s' %
+ (server_id, attachment_id))
+
+
+class TestOpenStackClientV3(TestOpenStackClient):
+ """Simple OpenStack v3 API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing.
+
+ Note that the V3 API does not have an image API and so it is
+ not possible to query the api for the image information.
+ So instead we just access the fake image service used by the unittests
+ directly.
+
+ """
+
+ def get_image(self, image_id):
+ return fake._fakeImageService.show(None, image_id)
+
+ def get_images(self, detail=True):
+ return fake._fakeImageService.detail(None)
+
+ def post_image(self, image):
+ raise NotImplementedError
+
+ def delete_image(self, image_id):
+ return fake._fakeImageService.delete(None, image_id)
+
+
+class TestOpenStackClientV3Mixin(object):
+ def _get_test_client(self):
+ return TestOpenStackClientV3('fake', 'fake', self.auth_url)
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
index b9744ab2c7..b9744ab2c7 100644
--- a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
index ad11129129..ad11129129 100644
--- a/nova/tests/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-add-fixed-ip-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
index 7367e1242c..7367e1242c 100644
--- a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
index 10b722220f..10b722220f 100644
--- a/nova/tests/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/multinic-remove-fixed-ip-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/NMN/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/NMN/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/NMN/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/NMN/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/NMN/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
index 72ff4448b8..72ff4448b8 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
index bf82d296d1..bf82d296d1 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
index 62f02287f8..62f02287f8 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
index b4213312d7..b4213312d7 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/image-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
index 37c8e57dba..37c8e57dba 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
index 36b53957c6..36b53957c6 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/list-servers-detail-get.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
index b239818a8a..b239818a8a 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
index 93bfb0d4e9..93bfb0d4e9 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
index 9b9f188023..9b9f188023 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
index 5835392c31..5835392c31 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-action-rebuild-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
index d9cc795728..d9cc795728 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
index b9e8a2b365..b9e8a2b365 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl
index 81b89adf23..81b89adf23 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
index fcfada031b..fcfada031b 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
index 7c8371f161..7c8371f161 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
index 1309e6dfee..1309e6dfee 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
index a290485e1c..a290485e1c 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
index aa0b0b67a3..aa0b0b67a3 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-resize-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
index 4ac22820df..4ac22820df 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
index 8088846987..8088846987 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
index d9cc795728..d9cc795728 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
index cb8c662442..cb8c662442 100644
--- a/nova/tests/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-DCF/server-update-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
index a852da1207..a852da1207 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
index 2c4cdc07f2..2c4cdc07f2 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
index b0ddc7c051..b0ddc7c051 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
index 35e1618678..35e1618678 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-AZ/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
index c460cd0260..c460cd0260 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
index e8dffa3ba4..e8dffa3ba4 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/image-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
index 16d62deb4d..16d62deb4d 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
index 586c8ed46d..586c8ed46d 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IMG-SIZE/images-details-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl
index 7b9f1ba519..7b9f1ba519 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl
index 49d8a8e2bf..49d8a8e2bf 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl
index 743abc7c70..743abc7c70 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl
index 23dda7c583..23dda7c583 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS-MAC/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
index acc784fb18..acc784fb18 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
index aa78378f84..aa78378f84 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
index 26d812ceb8..26d812ceb8 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
index d3b5c524d3..d3b5c524d3 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-IPS/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
index 398f0f7027..398f0f7027 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
index 10495ff9d5..10495ff9d5 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
index 81f247192a..81f247192a 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
index f7da4086a3..f7da4086a3 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-SRV-ATTR/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
index fc48b73a4e..fc48b73a4e 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
index 6b28dde2da..6b28dde2da 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
index 94b3e2200c..94b3e2200c 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
index 89c3b9396e..89c3b9396e 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-STS/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl
index 684b93448d..684b93448d 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl
index adf7d7baa3..adf7d7baa3 100644
--- a/nova/tests/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-EXT-VIF-NET/vifs-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
index 04083d063c..04083d063c 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
index 5d73195fad..5d73195fad 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-detail-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
index a47af7b187..a47af7b187 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
index 13908e2ac6..13908e2ac6 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-DISABLED/flavor-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
index b86db0a461..b86db0a461 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
index da45536c37..da45536c37 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
index a798262f35..a798262f35 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
index 5ba4631884..5ba4631884 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
index 64385ad682..64385ad682 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
index df74ab383f..df74ab383f 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
index 3a92dabca8..3a92dabca8 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
index df7fc07a32..df7fc07a32 100644
--- a/nova/tests/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-FLV-EXT-DATA/flavors-extra-data-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl
index 1a19960c24..1a19960c24 100644
--- a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl
index a680e3476e..a680e3476e 100644
--- a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl
index 5a11c73c3c..5a11c73c3c 100644
--- a/nova/tests/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SCH-HNT/scheduler-hints-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl
index 2b2121d34b..2b2121d34b 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl
index ee35f36fa0..ee35f36fa0 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl
index bef5a2002a..bef5a2002a 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl
index bce5e2eabe..bce5e2eabe 100644
--- a/nova/tests/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/OS-SRV-USG/servers-detail-resp.xml.tpl
diff --git a/nova/tests/unit/integrated/api_samples/README.rst b/nova/tests/unit/integrated/api_samples/README.rst
new file mode 100644
index 0000000000..a08cac3a42
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples/README.rst
@@ -0,0 +1,29 @@
+Api Samples
+===========
+
+This part of the tree contains templates for API samples. The
+documentation in doc/api_samples is completely autogenerated from the
+tests in this directory.
+
+To add a new api sample, add tests for the common passing and failing
+cases in this directory for your extension, and modify test_samples.py
+for your tests. There should be both JSON and XML tests included.
+
+Then run the following command:
+
+ GENERATE_SAMPLES=True tox -epy27 nova.tests.unit.integrated
+
+Which will create the files on doc/api_samples.
+
+If new tests are added or the .tpl files are changed due to bug fixes, the
+samples must be regenerated so they are in sync with the templates, as
+there is an additional test which reloads the documentation and
+ensures that it's in sync.
+
+Debugging sample generation
+---------------------------
+
+If a .tpl is changed, its matching .xml and .json must be removed
+else the samples won't be generated. If an entirely new extension is
+added, a directory for it must be created before its samples will
+be generated.
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 668e282e2b..668e282e2b 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index eaa679f35f..eaa679f35f 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl
index b68bc3c979..b68bc3c979 100644
--- a/nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl
index 53f870ec4d..53f870ec4d 100644
--- a/nova/tests/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavor-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl
index ab86d2a52a..ab86d2a52a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl
index 435f96be56..435f96be56 100644
--- a/nova/tests/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/flavors-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl
index da615718fe..da615718fe 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl
index 6c343024e2..6c343024e2 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-changepassword.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl
index 432f6126e9..432f6126e9 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl
index 18f07bd67b..18f07bd67b 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-confirmresize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-createimage.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl
index 0b9e39ffb3..0b9e39ffb3 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-createimage.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl
index aa1eccf8a5..aa1eccf8a5 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-createimage.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl
index 18eda9b9ab..18eda9b9ab 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-reboot.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl
index d4cfe198c7..d4cfe198c7 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-reboot.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl
index 8705a8749c..8705a8749c 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl
index 6fa0505367..6fa0505367 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl
index 273906a349..273906a349 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl
index bd42f88b22..bd42f88b22 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-rebuild.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-resize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl
index 468a88da24..468a88da24 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-resize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-resize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl
index cbe49ea59a..cbe49ea59a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-resize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-resize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl
index 2ddf6e5ab0..2ddf6e5ab0 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl
index 5c13bbdc0c..5c13bbdc0c 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-action-revertresize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl
index c83ab91068..c83ab91068 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
index 1bd75a99b9..1bd75a99b9 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl
index c931eb3fdc..c931eb3fdc 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl
index d7dc316552..d7dc316552 100644
--- a/nova/tests/integrated/api_samples/all_extensions/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
index d2aea31149..d2aea31149 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
index bf8dc083cd..bf8dc083cd 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl
index 8b97dc28d7..8b97dc28d7 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl
index 03bee03a6e..03bee03a6e 100644
--- a/nova/tests/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/all_extensions/servers-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl
index 723be2898f..723be2898f 100644
--- a/nova/tests/integrated/api_samples/flavor-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/flavor-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/flavor-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl
index 5925c588d0..5925c588d0 100644
--- a/nova/tests/integrated/api_samples/flavor-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/flavor-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl
index fb9a8ff1f6..fb9a8ff1f6 100644
--- a/nova/tests/integrated/api_samples/flavors-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/flavors-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/flavors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl
index 435f96be56..435f96be56 100644
--- a/nova/tests/integrated/api_samples/flavors-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/flavors-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl
index 3d260b7e90..3d260b7e90 100644
--- a/nova/tests/integrated/api_samples/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl
index 2a69728071..2a69728071 100644
--- a/nova/tests/integrated/api_samples/image-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-get.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl
index 6d022eb97d..6d022eb97d 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-get.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-get.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-get.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl
index 1de6b40781..1de6b40781 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-get.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-get.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl
index 7d8ab69a51..7d8ab69a51 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl
index 319e075eef..319e075eef 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl
index 3db563ec14..3db563ec14 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-meta-key-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl
index c989c38a2d..c989c38a2d 100644
--- a/nova/tests/integrated/api_samples/image-meta-key-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-meta-key-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl
index 588f688d5a..588f688d5a 100644
--- a/nova/tests/integrated/api_samples/image-metadata-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl
index 8409016bf4..8409016bf4 100644
--- a/nova/tests/integrated/api_samples/image-metadata-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl
index b51e5f00fc..b51e5f00fc 100644
--- a/nova/tests/integrated/api_samples/image-metadata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl
index 6170aab5ae..6170aab5ae 100644
--- a/nova/tests/integrated/api_samples/image-metadata-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl
index 9479bb3395..9479bb3395 100644
--- a/nova/tests/integrated/api_samples/image-metadata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl
index 1d96bd9af5..1d96bd9af5 100644
--- a/nova/tests/integrated/api_samples/image-metadata-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl
index eec6152d77..eec6152d77 100644
--- a/nova/tests/integrated/api_samples/image-metadata-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl
index e5f5d8991c..e5f5d8991c 100644
--- a/nova/tests/integrated/api_samples/image-metadata-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl
index c8c5ee9c4a..c8c5ee9c4a 100644
--- a/nova/tests/integrated/api_samples/image-metadata-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl
index 7011871cc9..7011871cc9 100644
--- a/nova/tests/integrated/api_samples/image-metadata-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/image-metadata-resp.json.tpl b/nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl
index 657f0b1974..657f0b1974 100644
--- a/nova/tests/integrated/api_samples/image-metadata-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/image-metadata-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl
index e353b98956..e353b98956 100644
--- a/nova/tests/integrated/api_samples/images-details-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-details-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/images-details-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl
index 2194789790..2194789790 100644
--- a/nova/tests/integrated/api_samples/images-details-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-details-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/images-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl
index 5cd76d6c6b..5cd76d6c6b 100644
--- a/nova/tests/integrated/api_samples/images-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/images-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl
index aa1d973b9a..aa1d973b9a 100644
--- a/nova/tests/integrated/api_samples/images-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/images-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl
index 83e6accada..83e6accada 100644
--- a/nova/tests/integrated/api_samples/images-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/images-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl
index 71b9bfc8bf..71b9bfc8bf 100644
--- a/nova/tests/integrated/api_samples/images-list-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-list-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/images-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl
index 6ed1616770..6ed1616770 100644
--- a/nova/tests/integrated/api_samples/images-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/images-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl
index 701e958926..701e958926 100644
--- a/nova/tests/integrated/api_samples/images-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/images-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl
index f5b30047da..f5b30047da 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/limit-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl
index ecc7b3b5da..ecc7b3b5da 100644
--- a/nova/tests/integrated/api_samples/limit-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/limit-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl
index 60f5e1d9fe..60f5e1d9fe 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl
index f2f9024bd7..f2f9024bd7 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-backup-server.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
index 62e16737b0..62e16737b0 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl
index e5b71ffcdb..e5b71ffcdb 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-inject-network-info.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl
index 4800d4aa11..4800d4aa11 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl
index 88ead85f20..88ead85f20 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-live-migrate.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl
index a1863f2f39..a1863f2f39 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl
index f86b130547..f86b130547 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-lock-server.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl
index a9bf8c483e..a9bf8c483e 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl
index 431284448d..431284448d 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-migrate.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl
index 2e7c1fad30..2e7c1fad30 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl
index a37359338c..a37359338c 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-pause.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
index 7c79cb68a5..7c79cb68a5 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl
index 6034983911..6034983911 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-network.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
index 013aed4824..013aed4824 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl
index 435c1c7d76..435c1c7d76 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-server-state.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
index 72d9478678..72d9478678 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl
index 435c1c7d76..435c1c7d76 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-reset-state.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl
index ff00d97a14..ff00d97a14 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl
index 4d6aaa750c..4d6aaa750c 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-resume.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl
index 8c2206a063..8c2206a063 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl
index 02aeac572a..02aeac572a 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-suspend.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl
index 9e905ca2b9..9e905ca2b9 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl
index 8331e2258a..8331e2258a 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unlock-server.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl
index ce5024f0c9..ce5024f0c9 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl
index b674f09269..b674f09269 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/admin-actions-unpause.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-admin-actions/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl
index 6dbd2f17cb..6dbd2f17cb 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl
index 5c777749a2..5c777749a2 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl
index 79e41ceafc..79e41ceafc 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
index ecf97b91e9..ecf97b91e9 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
index d447350e0d..d447350e0d 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
index 19751dc807..19751dc807 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
index 110e52cd33..110e52cd33 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
index 2c9e50572c..2c9e50572c 100644
--- a/nova/tests/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agent-update-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl
index 30562289fc..30562289fc 100644
--- a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
index fbbbdad288..fbbbdad288 100644
--- a/nova/tests/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-agents/agents-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
index 2a84101a16..2a84101a16 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl
index 4454134efb..4454134efb 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-add-host-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
index 63a2921cac..63a2921cac 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl
index 72b1e742aa..72b1e742aa 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-metadata-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl
index fc806061e8..fc806061e8 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl
index 4931476ae5..4931476ae5 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl
index 935643d03c..935643d03c 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl
index 2a1bee5868..2a1bee5868 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
index 66ecf30cd6..66ecf30cd6 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl
index bc2896835f..bc2896835f 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-remove-host-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
index 55e4b09346..55e4b09346 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl
index 04ce4fba58..04ce4fba58 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
index 2e229a473a..2e229a473a 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
index 1ff22bc0e0..1ff22bc0e0 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregate-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
index e5775c206d..e5775c206d 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
index 7412dee66d..7412dee66d 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-add-host-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
index b91781fae2..b91781fae2 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
index 7f44a231cb..7f44a231cb 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
index 642653d1e6..642653d1e6 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
index 79af4a8d89..79af4a8d89 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-list-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
index b15c40fa5d..b15c40fa5d 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
index 01245a4dbb..01245a4dbb 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-metadata-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
index b91781fae2..b91781fae2 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
index 7f44a231cb..7f44a231cb 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/aggregates-remove-host-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-aggregates/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl
index defa10203e..defa10203e 100644
--- a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl
index 772bb43d92..772bb43d92 100644
--- a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl
index 8d4e7f5709..8d4e7f5709 100644
--- a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl
index 5da7d148b1..5da7d148b1 100644
--- a/nova/tests/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
index 11dcf64373..11dcf64373 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
index 75e9b97c8c..75e9b97c8c 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
index d882cdc612..d882cdc612 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
index b391e59733..b391e59733 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
index 47dcf2dc64..47dcf2dc64 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
index f3262e948e..f3262e948e 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
index 3333bb4999..3333bb4999 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
index a3393448d4..a3393448d4 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/attach-interfaces-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-attach-interfaces/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl
index 07529dfc93..07529dfc93 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl
index 856a649577..856a649577 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl
index c512d182fb..c512d182fb 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl
index 1eff177dee..1eff177dee 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl
index f013ba0796..f013ba0796 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl
index 9c55b49cbe..9c55b49cbe 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl
index 2ad5c102b0..2ad5c102b0 100644
--- a/nova/tests/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-availability-zone/availability-zone-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl
index 48912edfc0..48912edfc0 100644
--- a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl
index 962b507658..962b507658 100644
--- a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-block-device-mapping-v2-boot/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl
index b926f8d1df..b926f8d1df 100644
--- a/nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl
index 63672b00bd..63672b00bd 100644
--- a/nova/tests/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cell-capacities/cells-capacities-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl
index 2993b1df88..2993b1df88 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
index d31a674a2f..d31a674a2f 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
index b16e12cd69..b16e12cd69 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
index 32fef4f048..32fef4f048 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-empty-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl
index 3d7a6c207c..3d7a6c207c 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
index 58312201f6..58312201f6 100644
--- a/nova/tests/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cells/cells-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl
index 35c063c820..35c063c820 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl
index 75f2d5d7f4..75f2d5d7f4 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl
index 4938e92fba..4938e92fba 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl
index bbc54284a5..bbc54284a5 100644
--- a/nova/tests/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-certificates/certificate-get-root-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
index 0ab9141aea..0ab9141aea 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
index 34d2be9dfc..34d2be9dfc 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe-update/cloud-pipe-update-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
index c8fc75995a..c8fc75995a 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl
index b0a60b896c..b0a60b896c 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
index 6aa2ff60e2..6aa2ff60e2 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl
index 63064cc51a..63064cc51a 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
index 698008802e..698008802e 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl
index 63eb40be4f..63eb40be4f 100644
--- a/nova/tests/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-cloudpipe/cloud-pipe-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
index 723714bf73..723714bf73 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
index 6cd025045e..6cd025045e 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-config-drive-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
index f3ae979ecb..f3ae979ecb 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
index 1882ba835a..1882ba835a 100644
--- a/nova/tests/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-config-drive/servers-config-drive-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
index f5be11801e..f5be11801e 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl
index de81f08fe8..de81f08fe8 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
index 00956b90e4..00956b90e4 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl
index b761d78b67..b761d78b67 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/get-rdp-console-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-auth-tokens/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/console-output-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl
index caeb2a5502..caeb2a5502 100644
--- a/nova/tests/integrated/api_samples/os-console-output/console-output-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl
index af477004df..af477004df 100644
--- a/nova/tests/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl
index fae6b128e9..fae6b128e9 100644
--- a/nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl
index e93d81df35..e93d81df35 100644
--- a/nova/tests/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/console-output-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-console-output/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-console-output/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-console-output/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-console-output/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-console-output/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-console-output/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl
index 00956b90e4..00956b90e4 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl
index b761d78b67..b761d78b67 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl
index b8272ca5c0..b8272ca5c0 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl
index 24fc3cd848..24fc3cd848 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-rdp-console-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl
index 1d754d6608..1d754d6608 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl
index 71eb3ae555..71eb3ae555 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl
index 67fbfec5b4..67fbfec5b4 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl
index 1bef48769d..1bef48769d 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-serial-console-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl
index d04f7c7ae9..d04f7c7ae9 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl
index c8cd2df9f4..c8cd2df9f4 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl
index 20e260e9ef..20e260e9ef 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl
index 77e35ae5b8..77e35ae5b8 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-spice-console-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
index 1926119ced..1926119ced 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
index c1f73180e8..c1f73180e8 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
index 3cf7255759..3cf7255759 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
index d4904aa9a5..d4904aa9a5 100644
--- a/nova/tests/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/get-vnc-console-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-consoles/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-consoles/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-consoles/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
index d3562d390d..d3562d390d 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
index 31928207e8..31928207e8 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/force-delete-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
index d38291fe08..d38291fe08 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
index 8a95b4fccf..8a95b4fccf 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/restore-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-deferred-delete/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
index 179cddce73..179cddce73 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
index b0471f9162..b0471f9162 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
index 0da07da5b8..0da07da5b8 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
index 2a779af6d1..2a779af6d1 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-evacuate-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-evacuate/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
index 5e2c2e6ef0..5e2c2e6ef0 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
index a86c9e5c8a..a86c9e5c8a 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
index 0da07da5b8..0da07da5b8 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
index b3b95fdde4..b3b95fdde4 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-nopool-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl
index 24129f4958..24129f4958 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl
index a80147389d..a80147389d 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl
index 10ee8d9bd4..10ee8d9bd4 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl
index e0f68ef503..e0f68ef503 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl
index 10ee8d9bd4..10ee8d9bd4 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl
index e0f68ef503..e0f68ef503 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl
index 12f118da50..12f118da50 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl
index da6f0d4ce9..da6f0d4ce9 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-empty-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl
index 06f57451c9..06f57451c9 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl
index bbd0b117ef..bbd0b117ef 100644
--- a/nova/tests/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-floating-ips/floating-ips-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
index a1e5f2080b..a1e5f2080b 100644
--- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
index ed2a8b0829..ed2a8b0829 100644
--- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
index 18515bd6c4..18515bd6c4 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
index 3cc79bd837..3cc79bd837 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
index 5cf155b13f..5cf155b13f 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
index 3a757c5f2f..3a757c5f2f 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
index ac75fe7fb1..ac75fe7fb1 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
index 3139ca88a8..3139ca88a8 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
index ccdd586a0f..ccdd586a0f 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
index 0b7f456402..0b7f456402 100644
--- a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl
index a58a179123..a58a179123 100644
--- a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl
index 499b890f03..499b890f03 100644
--- a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl
index c882a8cb12..c882a8cb12 100644
--- a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl
index b8c4c0d831..b8c4c0d831 100644
--- a/nova/tests/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-quotas/quotas-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl
index d9a355319e..d9a355319e 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl
index fee8326e0c..fee8326e0c 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-get-resp-rescue.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl
index f926149842..f926149842 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl
index 0f2e751fcf..0f2e751fcf 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl
index 75666d81a2..75666d81a2 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl
index 1922e4db1b..1922e4db1b 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl
index b3b95fdde4..b3b95fdde4 100755
--- a/nova/tests/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-rescue-with-image/server-rescue.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl
index d91fe367f9..d91fe367f9 100644
--- a/nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl
index 8e87af9173..8e87af9173 100644
--- a/nova/tests/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-services-delete/services-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl
index 70b1deabe2..70b1deabe2 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl
index 75a8fb10cc..75a8fb10cc 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
index 1962d6a6af..1962d6a6af 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl
index a2ecf018eb..a2ecf018eb 100644
--- a/nova/tests/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-extended-volumes/servers-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
index 85ae4890ad..85ae4890ad 100644
--- a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
index 3896b24eb6..3896b24eb6 100644
--- a/nova/tests/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedip-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
index a3d11475bf..a3d11475bf 100644
--- a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
index 3e9598f347..3e9598f347 100644
--- a/nova/tests/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fixed-ips/fixedips-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl
index 94f5439e04..94f5439e04 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl
index 312819dadb..312819dadb 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl
index d797155795..d797155795 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl
index 2223052aae..2223052aae 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-add-tenant-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl
index 02ac4e695d..02ac4e695d 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl
index 5714fb9d0d..5714fb9d0d 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl
index 4110795ec9..4110795ec9 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl
index c3a8994078..c3a8994078 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl
index b5f1eea542..b5f1eea542 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl
index 8ee66226a5..8ee66226a5 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl
index a6b6dbdcda..a6b6dbdcda 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl
index 1e55ad2f95..1e55ad2f95 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl
index 20711e02b4..20711e02b4 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl
index 490de3e315..490de3e315 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl
index 5cab03334d..5cab03334d 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl
index 80d1ecc48c..80d1ecc48c 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-remove-tenant-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl
index 2e991a4cef..2e991a4cef 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl
index ae18daba22..ae18daba22 100644
--- a/nova/tests/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-access/flavor-access-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
index c94595cad1..c94595cad1 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
index 1008b5bb0e..1008b5bb0e 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
index e3de59a342..e3de59a342 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
index 1008b5bb0e..1008b5bb0e 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
index 6421e59592..6421e59592 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
index e3de59a342..e3de59a342 100644
--- a/nova/tests/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-extra-specs/flavor-extra-specs-update-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
index 5383e5d15e..5383e5d15e 100644
--- a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
index 764cebe8e4..764cebe8e4 100644
--- a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
index ae0ce80ba2..ae0ce80ba2 100644
--- a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
index 156ef215e6..156ef215e6 100644
--- a/nova/tests/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-manage/flavor-create-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
index 241cf7c800..241cf7c800 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
index d461b443ed..d461b443ed 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
index 035c860c9a..035c860c9a 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
index ee937b974b..ee937b974b 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
index 70d0a57de8..70d0a57de8 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
index a87b47670e..a87b47670e 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
index abf652fae3..abf652fae3 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
index d24623c555..d24623c555 100644
--- a/nova/tests/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
index 9b7e57c8a9..9b7e57c8a9 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
index 9375b14b5e..9375b14b5e 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
index 1367e75de5..1367e75de5 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
index 7c9c589bef..7c9c589bef 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
index ca86aeb4e4..ca86aeb4e4 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
index 5f54df5cd2..5f54df5cd2 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
index e61a08dc17..e61a08dc17 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
index e8c69ecee7..e8c69ecee7 100644
--- a/nova/tests/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-flavor-swap/flavor-swap-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
index 7dc33ddb10..7dc33ddb10 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
index bd62d34186..bd62d34186 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
index 3ec0743ba7..3ec0743ba7 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
index 38a659b78e..38a659b78e 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
index db73be14a8..db73be14a8 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
index 40866a5373..40866a5373 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
index a14d395d23..a14d395d23 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
index 1759c403af..1759c403af 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
index 8edd0603f7..8edd0603f7 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
index a889ef6e2c..a889ef6e2c 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
index 831cda7b55..831cda7b55 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
index bf7788f94d..bf7788f94d 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
index a6055cfecc..a6055cfecc 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
index e57c290cb8..e57c290cb8 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
index 607109d70d..607109d70d 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
index ae4b3a4bb3..ae4b3a4bb3 100644
--- a/nova/tests/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ip-pools/floatingippools-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
index 426f07e989..426f07e989 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
index ebe0b9aa9a..ebe0b9aa9a 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
index ef1cbfb17f..ef1cbfb17f 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
index db80bbfc10..db80bbfc10 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
index d630d669cd..d630d669cd 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
index 27a6b0e95a..27a6b0e95a 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
index 166984b24a..166984b24a 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
index 3d77af334a..3d77af334a 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
index 0eaaf75ae0..0eaaf75ae0 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
index 4c3c8cd9ca..4c3c8cd9ca 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
index de1e622bb1..de1e622bb1 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
index 6ef85bd874..6ef85bd874 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-nopool-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl
index 24129f4958..24129f4958 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl
index a80147389d..a80147389d 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl
index 10ee8d9bd4..10ee8d9bd4 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl
index e0f68ef503..e0f68ef503 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl
index 10ee8d9bd4..10ee8d9bd4 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl
index e0f68ef503..e0f68ef503 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl
index 12f118da50..12f118da50 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl
index da6f0d4ce9..da6f0d4ce9 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-empty-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl
index 06f57451c9..06f57451c9 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl
index bbd0b117ef..bbd0b117ef 100644
--- a/nova/tests/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-floating-ips/floating-ips-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
index f3b222c399..f3b222c399 100644
--- a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
index 758519b60e..758519b60e 100644
--- a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl
index b33e80668b..b33e80668b 100644
--- a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
index 290ad6ca68..290ad6ca68 100644
--- a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/fping-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-fping/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
index 4ac6374529..4ac6374529 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
index cee28db35c..cee28db35c 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
index 81afe431c0..81afe431c0 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
index da0472dbcf..da0472dbcf 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
index 8b97dc28d7..8b97dc28d7 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
index 03bee03a6e..03bee03a6e 100644
--- a/nova/tests/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hide-server-addresses/servers-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
index 4ed89a182d..4ed89a182d 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
index 4f9c8e4378..4f9c8e4378 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-reboot.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl
index efb234b436..efb234b436 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
index e162734ba3..e162734ba3 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
index c0df4481a2..c0df4481a2 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
index d78bd32a5d..d78bd32a5d 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-shutdown.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl
index 90f5ac7bcb..90f5ac7bcb 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
index 581f7cf07f..581f7cf07f 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-get-startup.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
index 6accac1644..6accac1644 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
index d127a7a26b..d127a7a26b 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
index 92f73892b3..92f73892b3 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
index e9c99512b8..e9c99512b8 100644
--- a/nova/tests/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/host-put-maintenance-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
index 9fb47106db..9fb47106db 100644
--- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
index a031c9b661..a031c9b661 100644
--- a/nova/tests/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hosts/hosts-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
index 14464ccf4d..14464ccf4d 100644
--- a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
index 6cfd860af5..6cfd860af5 100644
--- a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
index 9ccda9c7e6..9ccda9c7e6 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
index 1169ce1e01..1169ce1e01 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
index 8d94021274..8d94021274 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
index 6b7d9d7ca1..6b7d9d7ca1 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
index 8d94021274..8d94021274 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
index 6b7d9d7ca1..6b7d9d7ca1 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-search-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
index 8d94021274..8d94021274 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
index 7782732ba6..7782732ba6 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-servers-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
index 356316d61f..356316d61f 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
index 090f720398..090f720398 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
index 2cfb51e703..2cfb51e703 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
index 5d10411e3a..5d10411e3a 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-statistics-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
index 8a36c65f23..8a36c65f23 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
index 04219f5b5d..04219f5b5d 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-hypervisors/hypervisors-uptime-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
index 7cd5325239..7cd5325239 100644
--- a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
index 61c0ac8c76..61c0ac8c76 100644
--- a/nova/tests/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-action-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
index 0fdc33916a..0fdc33916a 100644
--- a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
index 87a8726cce..87a8726cce 100644
--- a/nova/tests/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance-actions/instance-actions-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
index 81b0d6c341..81b0d6c341 100644
--- a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
index 8b670b0c91..8b670b0c91 100644
--- a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-index-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
index 71549c156b..71549c156b 100644
--- a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
index 9ceb1c26c8..9ceb1c26c8 100644
--- a/nova/tests/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-instance_usage_audit_log/inst-usage-audit-log-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl
index 4fde60f14b..4fde60f14b 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl
index 3442f1ed62..3442f1ed62 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl
index 2301fa05b2..2301fa05b2 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl
index 0516de3035..0516de3035 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl
index ca7192d5dc..ca7192d5dc 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl
index ed2543c107..ed2543c107 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-import-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl
index 29ba63c00b..29ba63c00b 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl
index 493bfa3161..493bfa3161 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl
index 68e2f03487..68e2f03487 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl
index e14935d314..e14935d314 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl
index aace6f5ccc..aace6f5ccc 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl
index 4f041e0c9e..4f041e0c9e 100644
--- a/nova/tests/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-keypairs/keypairs-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-migrations/migrations-get.json.tpl b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl
index 91775be775..91775be775 100644
--- a/nova/tests/integrated/api_samples/os-migrations/migrations-get.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-migrations/migrations-get.xml.tpl b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl
index f5c59c7f1b..f5c59c7f1b 100644
--- a/nova/tests/integrated/api_samples/os-migrations/migrations-get.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-migrations/migrations-get.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
index a7690d7b69..a7690d7b69 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl
index 1548974da3..1548974da3 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl
index 2ad5c102b0..2ad5c102b0 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
index 9984c05884..9984c05884 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl
index 7ac9b23d67..7ac9b23d67 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
index 22d2880feb..22d2880feb 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl
index e5ba2cc56e..e5ba2cc56e 100644
--- a/nova/tests/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-multiple-create/multiple-create-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
index 762e881751..762e881751 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
index 7c96c96a12..7c96c96a12 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-associate-host-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
index 46f69b3e81..46f69b3e81 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
index 910504a44a..910504a44a 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-host-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
index 63b6eb6839..63b6eb6839 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
index d4162c19e0..d4162c19e0 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-project-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
index 2e09d15a60..2e09d15a60 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
index c26f7b61a8..c26f7b61a8 100644
--- a/nova/tests/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks-associate/network-disassociate-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl
index 6489f6e1b5..6489f6e1b5 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-add-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl
index 9e5822a9e8..9e5822a9e8 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-add-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-add-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl
index 5e2be031cb..5e2be031cb 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl
index d5222f9e8f..d5222f9e8f 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl
index e178ab50cb..e178ab50cb 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl
index d709952cda..d709952cda 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl
index 66e7122105..66e7122105 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl
index aeab222391..aeab222391 100644
--- a/nova/tests/integrated/api_samples/os-networks/network-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/network-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
index df99b889c4..df99b889c4 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
index 63c0300904..63c0300904 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-disassociate-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl
index 4e359c6171..4e359c6171 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
index 7ac19a8137..7ac19a8137 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-networks/networks-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl
index cd7fdcf2d3..cd7fdcf2d3 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl
index 254745649c..254745649c 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl
index 2f06fd7008..2f06fd7008 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl
index 6d469d40ea..6d469d40ea 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-action-rebuild.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-preserve-ephemeral-rebuild/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
index f9a94e760a..f9a94e760a 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
index fb8e7992a5..fb8e7992a5 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
index 483fda8c53..483fda8c53 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
index 150fb6a42a..150fb6a42a 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
index c36783f2f0..c36783f2f0 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
index cd674a24da..cd674a24da 100644
--- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
index 2f0fd98572..2f0fd98572 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
index f56987563c..f56987563c 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
index 2f0fd98572..2f0fd98572 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
index f56987563c..f56987563c 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
index 1f12caa045..1f12caa045 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
index 596ce56ac3..596ce56ac3 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
index 34df1fe01e..34df1fe01e 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
index 91ac3a0dda..91ac3a0dda 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl
index d9a355319e..d9a355319e 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl
index 5b134dcee0..5b134dcee0 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-rescue.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
index 5a017d8da1..5a017d8da1 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl
index 145dd0be28..145dd0be28 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-get-resp-unrescue.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-rescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl
index d712347537..d712347537 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-rescue-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl
index 09acae072a..09acae072a 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-rescue.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl
index 1922e4db1b..1922e4db1b 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-rescue.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-rescue.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl
index b3b95fdde4..b3b95fdde4 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-rescue.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-rescue.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl
index cafc9b13a8..cafc9b13a8 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl
index 6a87f8fb21..6a87f8fb21 100644
--- a/nova/tests/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-rescue/server-unrescue-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
index 8836d0eecc..8836d0eecc 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
index daee122905..daee122905 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
index ae6c62bfd6..ae6c62bfd6 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
index 9e700969ff..9e700969ff 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
index c083640c3e..c083640c3e 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
index f009bf80f1..f009bf80f1 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
index 97b5259a18..97b5259a18 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
index 9181abd387..9181abd387 100644
--- a/nova/tests/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl
index 41ae659135..41ae659135 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl
index 7540245bc3..7540245bc3 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-add-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl
index 3f54ab6856..3f54ab6856 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl
index c62b14c495..c62b14c495 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
index a3f545785f..a3f545785f 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl
index 9a64a5debc..9a64a5debc 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-group-remove-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl
index b9325e2e7a..b9325e2e7a 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl
index c641fd60fc..c641fd60fc 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl
index 0372512744..0372512744 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl
index 2b19797101..2b19797101 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
index 1771f2dff1..1771f2dff1 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl
index 8f6e201bdd..8f6e201bdd 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/security-groups-list-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl
index 4f0444219c..4f0444219c 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl
index 2133d3f890..2133d3f890 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl
index 1ca430955b..1ca430955b 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
index 1771f2dff1..1771f2dff1 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl
index 8f6e201bdd..8f6e201bdd 100644
--- a/nova/tests/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-security-groups/server-security-groups-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
index 1afedaee9c..1afedaee9c 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl
index 776419f82f..776419f82f 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-diagnostics-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-diagnostics/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/event-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl
index 43c3b6b407..43c3b6b407 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/event-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl
index a9029857cf..a9029857cf 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl
index aa11b62c83..aa11b62c83 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl
index 24cf59ccba..24cf59ccba 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/event-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-external-events/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl
index 939bbd7cd8..939bbd7cd8 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl
index 91fd5e0b9f..91fd5e0b9f 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/limit-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl
index 06a007e5e7..06a007e5e7 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl
index 5951360f60..5951360f60 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl
index 32df1e4b2b..32df1e4b2b 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl
index e32e3d44c1..e32e3d44c1 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl
index c08c585df8..c08c585df8 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl
index d0c34e50a6..d0c34e50a6 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quota-classes-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl
index f66f22cd2d..f66f22cd2d 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl
index e6076286ca..e6076286ca 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-defaults-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl
index f66f22cd2d..f66f22cd2d 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl
index e6076286ca..e6076286ca 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl
index 1f12caa045..1f12caa045 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl
index 596ce56ac3..596ce56ac3 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl
index 605857f39e..605857f39e 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl
index dfaddfd969..dfaddfd969 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/quotas-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl
index 3bd6b42432..3bd6b42432 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl
index 4b4ea91539..4b4ea91539 100644
--- a/nova/tests/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-group-quotas/usedlimits-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl
index ba72643b6d..ba72643b6d 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl
index dc4651aab7..dc4651aab7 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl
index f01d451dd2..f01d451dd2 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl
index bda7562118..bda7562118 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl
index 1cc2328320..1cc2328320 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl
index abe8459549..abe8459549 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl
index ee9c37e82c..ee9c37e82c 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl
index dc4651aab7..dc4651aab7 100644
--- a/nova/tests/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-groups/server-groups-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
index 8b97dc28d7..8b97dc28d7 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
index 03bee03a6e..03bee03a6e 100644
--- a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl
index 026f15d46a..026f15d46a 100644
--- a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
index 046eed30fb..046eed30fb 100644
--- a/nova/tests/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/get-password-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-server-password/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-server-password/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-password/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl
index a993b3f684..a993b3f684 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl
index 35cc3c2045..35cc3c2045 100644
--- a/nova/tests/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-server-start-stop/server_start_stop.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl
index 13ba2f11ca..13ba2f11ca 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl
index a1ffd7e205..a1ffd7e205 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl
index 5266b0b623..5266b0b623 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl
index f7255d3851..f7255d3851 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-log-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl
index 57182e935c..57182e935c 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl
index fc297bcd34..fc297bcd34 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
index 47a8b3d816..47a8b3d816 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl
index cc03298c53..cc03298c53 100644
--- a/nova/tests/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-disable-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl
index 57182e935c..57182e935c 100644
--- a/nova/tests/integrated/api_samples/os-services/service-enable-put-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl
index fc297bcd34..fc297bcd34 100644
--- a/nova/tests/integrated/api_samples/os-services/service-enable-put-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
index 24f72311d1..24f72311d1 100644
--- a/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl
index 3cbf51b778..3cbf51b778 100644
--- a/nova/tests/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/service-enable-put-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/services-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl
index 80be294dd6..80be294dd6 100644
--- a/nova/tests/integrated/api_samples/os-services/services-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/services-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl
index 365f02e573..365f02e573 100644
--- a/nova/tests/integrated/api_samples/os-services/services-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl
index 09564520e7..09564520e7 100644
--- a/nova/tests/integrated/api_samples/os-services/services-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-services/services-list-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl
index 1c6b0113b7..1c6b0113b7 100644
--- a/nova/tests/integrated/api_samples/os-services/services-list-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-services/services-list-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl
index 41d18bdac0..41d18bdac0 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve-offload.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-shelve.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-shelve.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-shelve.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl
index 41d18bdac0..41d18bdac0 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-shelve.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-shelve.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-unshelve.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-unshelve.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/os-unshelve.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl
index 41d18bdac0..41d18bdac0 100644
--- a/nova/tests/integrated/api_samples/os-shelve/os-unshelve.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/os-unshelve.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-shelve/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-shelve/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-shelve/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-shelve/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-shelve/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-shelve/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
index f37083013d..f37083013d 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl
index 014c2f9d64..014c2f9d64 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
index 25b5ff2b84..25b5ff2b84 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl
index b1bb63f1c4..b1bb63f1c4 100644
--- a/nova/tests/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
index 757084d2f3..757084d2f3 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl
index 0562ebae7c..0562ebae7c 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-list-res.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
index fb1c2d3d06..fb1c2d3d06 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl
index 0493de3872..0493de3872 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
index ff9e2273d3..ff9e2273d3 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl
index 9c6c2f28b2..9c6c2f28b2 100644
--- a/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-tenant-networks/networks-post-res.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl
index dcf861c4ef..dcf861c4ef 100644
--- a/nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl
index 9f6a2d9f7a..9f6a2d9f7a 100644
--- a/nova/tests/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits-for-admin/usedlimitsforadmin-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
index dcf861c4ef..dcf861c4ef 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
index 9f6a2d9f7a..9f6a2d9f7a 100644
--- a/nova/tests/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-used-limits/usedlimits-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl
index fb7744a3d0..fb7744a3d0 100644
--- a/nova/tests/integrated/api_samples/os-user-data/userdata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl
index 22ec4d5c5f..22ec4d5c5f 100644
--- a/nova/tests/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl
index 2ad5c102b0..2ad5c102b0 100644
--- a/nova/tests/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-data/userdata-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl
index 2f0fd98572..2f0fd98572 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl
index f56987563c..f56987563c 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-show-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl
index b322b2a870..b322b2a870 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl
index c5084d44e6..c5084d44e6 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl
index 5539332927..5539332927 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl
index 43c36c7da3..43c36c7da3 100644
--- a/nova/tests/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-user-quotas/user-quotas-update-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl
index af0b7e05a7..af0b7e05a7 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl
index 74d0c6f394..74d0c6f394 100644
--- a/nova/tests/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-virtual-interfaces/vifs-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl
index 3d360a57bc..3d360a57bc 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl
index ffb20ad1ea..ffb20ad1ea 100644
--- a/nova/tests/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volume-attachment-update/update-volume-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl
index 3d360a57bc..3d360a57bc 100644
--- a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl
index ffb20ad1ea..ffb20ad1ea 100644
--- a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl
index 4730b3c197..4730b3c197 100644
--- a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl
index efad2fd02a..efad2fd02a 100644
--- a/nova/tests/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/attach-volume-to-server-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl
index 6c1da07ef6..6c1da07ef6 100644
--- a/nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl
index 351646d81e..351646d81e 100644
--- a/nova/tests/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/list-volume-attachments-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
index 82a63eda5f..82a63eda5f 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl
index bd8f324ee0..bd8f324ee0 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl
index 84bfdd2a5b..84bfdd2a5b 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl
index cb3c5edf90..cb3c5edf90 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl
index 82a63eda5f..82a63eda5f 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl
index bd8f324ee0..bd8f324ee0 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-index-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl
index db7fbff4d4..db7fbff4d4 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl
index bb115cc61b..bb115cc61b 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl
index d13ce20cc3..d13ce20cc3 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl
index cb3c5edf90..cb3c5edf90 100644
--- a/nova/tests/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/os-volumes-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/os-volumes/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/os-volumes/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/os-volumes/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/os-volumes/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl
index a8d47ea031..a8d47ea031 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl
index a5b670bc2f..a5b670bc2f 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl
index 6153e8140e..6153e8140e 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl
index 78268c822c..78268c822c 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshot-create-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl
index 1b509d54f8..1b509d54f8 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl
index 730921f4cf..730921f4cf 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl
index c65d073ad7..c65d073ad7 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl
index 730921f4cf..730921f4cf 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl
index a9ab6240d6..a9ab6240d6 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl
index c42bf41b3c..c42bf41b3c 100644
--- a/nova/tests/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/snapshots-show-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl
index 86099eeb87..86099eeb87 100644
--- a/nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl
index 45fd199793..45fd199793 100644
--- a/nova/tests/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/os-volumes/volume-attachment-detail-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-changepassword.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl
index da615718fe..da615718fe 100644
--- a/nova/tests/integrated/api_samples/server-action-changepassword.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-changepassword.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-changepassword.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl
index 6c343024e2..6c343024e2 100644
--- a/nova/tests/integrated/api_samples/server-action-changepassword.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-changepassword.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-confirmresize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl
index 432f6126e9..432f6126e9 100644
--- a/nova/tests/integrated/api_samples/server-action-confirmresize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-confirmresize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl
index 18f07bd67b..18f07bd67b 100644
--- a/nova/tests/integrated/api_samples/server-action-confirmresize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-confirmresize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-createimage.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl
index 0b9e39ffb3..0b9e39ffb3 100644
--- a/nova/tests/integrated/api_samples/server-action-createimage.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-createimage.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-createimage.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl
index aa1eccf8a5..aa1eccf8a5 100644
--- a/nova/tests/integrated/api_samples/server-action-createimage.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-createimage.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-reboot.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl
index 18eda9b9ab..18eda9b9ab 100644
--- a/nova/tests/integrated/api_samples/server-action-reboot.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-reboot.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-reboot.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl
index d4cfe198c7..d4cfe198c7 100644
--- a/nova/tests/integrated/api_samples/server-action-reboot.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-reboot.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl
index cd7fdcf2d3..cd7fdcf2d3 100644
--- a/nova/tests/integrated/api_samples/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-rebuild-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl
index 254745649c..254745649c 100644
--- a/nova/tests/integrated/api_samples/server-action-rebuild-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl
index 273906a349..273906a349 100644
--- a/nova/tests/integrated/api_samples/server-action-rebuild.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-rebuild.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl
index 84f0b98961..84f0b98961 100644
--- a/nova/tests/integrated/api_samples/server-action-rebuild.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-rebuild.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-resize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl
index 468a88da24..468a88da24 100644
--- a/nova/tests/integrated/api_samples/server-action-resize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-resize.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-resize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl
index cbe49ea59a..cbe49ea59a 100644
--- a/nova/tests/integrated/api_samples/server-action-resize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-resize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-revertresize.json.tpl b/nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl
index 2ddf6e5ab0..2ddf6e5ab0 100644
--- a/nova/tests/integrated/api_samples/server-action-revertresize.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-revertresize.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-action-revertresize.xml.tpl b/nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl
index 5c13bbdc0c..5c13bbdc0c 100644
--- a/nova/tests/integrated/api_samples/server-action-revertresize.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-action-revertresize.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl
index 4ac6374529..4ac6374529 100644
--- a/nova/tests/integrated/api_samples/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl
index cee28db35c..cee28db35c 100644
--- a/nova/tests/integrated/api_samples/server-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-get-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-ips-network-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl
index 29d2370a74..29d2370a74 100644
--- a/nova/tests/integrated/api_samples/server-ips-network-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-ips-network-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl
index 153dca9b54..153dca9b54 100644
--- a/nova/tests/integrated/api_samples/server-ips-network-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-ips-network-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-ips-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl
index 259eabea72..259eabea72 100644
--- a/nova/tests/integrated/api_samples/server-ips-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-ips-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-ips-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl
index 62d804b2af..62d804b2af 100644
--- a/nova/tests/integrated/api_samples/server-ips-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-ips-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-all-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl
index 2278d2afd8..2278d2afd8 100644
--- a/nova/tests/integrated/api_samples/server-metadata-all-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-all-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl
index e742706736..e742706736 100644
--- a/nova/tests/integrated/api_samples/server-metadata-all-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-all-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl
index 2278d2afd8..2278d2afd8 100644
--- a/nova/tests/integrated/api_samples/server-metadata-all-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-all-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl
index e742706736..e742706736 100644
--- a/nova/tests/integrated/api_samples/server-metadata-all-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-all-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl
index 35872e95fc..35872e95fc 100644
--- a/nova/tests/integrated/api_samples/server-metadata-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl
index fa9d6ad480..fa9d6ad480 100644
--- a/nova/tests/integrated/api_samples/server-metadata-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl
index 85d69ec956..85d69ec956 100644
--- a/nova/tests/integrated/api_samples/server-metadata-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-metadata-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl
index fa9d6ad480..fa9d6ad480 100644
--- a/nova/tests/integrated/api_samples/server-metadata-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-metadata-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-post-req.json.tpl b/nova/tests/unit/integrated/api_samples/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/api_samples/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-post-req.xml.tpl b/nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl
index f926149842..f926149842 100644
--- a/nova/tests/integrated/api_samples/server-post-req.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-post-req.xml.tpl
diff --git a/nova/tests/integrated/api_samples/server-post-resp.json.tpl b/nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl
index d5f030c873..d5f030c873 100644
--- a/nova/tests/integrated/api_samples/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/server-post-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl
index 3bb13e69bd..3bb13e69bd 100644
--- a/nova/tests/integrated/api_samples/server-post-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/server-post-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/servers-details-resp.json.tpl b/nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl
index 81afe431c0..81afe431c0 100644
--- a/nova/tests/integrated/api_samples/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/servers-details-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl
index da0472dbcf..da0472dbcf 100644
--- a/nova/tests/integrated/api_samples/servers-details-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/servers-details-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/servers-list-resp.json.tpl b/nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl
index 8b97dc28d7..8b97dc28d7 100644
--- a/nova/tests/integrated/api_samples/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/servers-list-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl
index 03bee03a6e..03bee03a6e 100644
--- a/nova/tests/integrated/api_samples/servers-list-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/servers-list-resp.xml.tpl
diff --git a/nova/tests/integrated/api_samples/versions-get-resp.json.tpl b/nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl
index 5c3b1ec05b..5c3b1ec05b 100644
--- a/nova/tests/integrated/api_samples/versions-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/api_samples/versions-get-resp.json.tpl
diff --git a/nova/tests/integrated/api_samples/versions-get-resp.xml.tpl b/nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl
index 09c4a52f9e..09c4a52f9e 100644
--- a/nova/tests/integrated/api_samples/versions-get-resp.xml.tpl
+++ b/nova/tests/unit/integrated/api_samples/versions-get-resp.xml.tpl
diff --git a/nova/tests/unit/integrated/api_samples_test_base.py b/nova/tests/unit/integrated/api_samples_test_base.py
new file mode 100644
index 0000000000..69f5f3eb2b
--- /dev/null
+++ b/nova/tests/unit/integrated/api_samples_test_base.py
@@ -0,0 +1,323 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+
+from lxml import etree
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+import six
+
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.integrated import integrated_helpers
+
+
+class NoMatch(test.TestingException):
+ pass
+
+
+class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
+ ctype = 'json'
+ all_extensions = False
+ extension_name = None
+
+ def _pretty_data(self, data):
+ if self.ctype == 'json':
+ data = jsonutils.dumps(jsonutils.loads(data), sort_keys=True,
+ indent=4)
+
+ else:
+ if data is None:
+ # Likely from missing XML file.
+ return ""
+ xml = etree.XML(data)
+ data = etree.tostring(xml, encoding="UTF-8",
+ xml_declaration=True, pretty_print=True)
+ return '\n'.join(line.rstrip() for line in data.split('\n')).strip()
+
+ def _objectify(self, data):
+ if not data:
+ return {}
+ if self.ctype == 'json':
+ # NOTE(vish): allow non-quoted replacements to survive json
+ data = re.sub(r'([^"])%\((.+)\)s([^"])', r'\1"%(int:\2)s"\3', data)
+ return jsonutils.loads(data)
+ else:
+ def to_dict(node):
+ ret = {}
+ if node.items():
+ ret.update(dict(node.items()))
+ if node.text:
+ ret['__content__'] = node.text
+ if node.tag:
+ ret['__tag__'] = node.tag
+ if node.nsmap:
+ ret['__nsmap__'] = node.nsmap
+ for element in node:
+ ret.setdefault(node.tag, [])
+ ret[node.tag].append(to_dict(element))
+ return ret
+ return to_dict(etree.fromstring(data))
+
+ @classmethod
+ def _get_sample_path(cls, name, dirname, suffix=''):
+ parts = [dirname]
+ parts.append('api_samples')
+ if cls.all_extensions:
+ parts.append('all_extensions')
+ if cls.extension_name:
+ alias = importutils.import_class(cls.extension_name).alias
+ parts.append(alias)
+ parts.append(name + "." + cls.ctype + suffix)
+ return os.path.join(*parts)
+
+ @classmethod
+ def _get_sample(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ dirname = os.path.normpath(os.path.join(dirname, "../../../../doc"))
+ return cls._get_sample_path(name, dirname)
+
+ @classmethod
+ def _get_template(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ return cls._get_sample_path(name, dirname, suffix='.tpl')
+
+ def _read_template(self, name):
+ template = self._get_template(name)
+ with open(template) as inf:
+ return inf.read().strip()
+
+ def _write_template(self, name, data):
+ with open(self._get_template(name), 'w') as outf:
+ outf.write(data)
+
+ def _write_sample(self, name, data):
+ with open(self._get_sample(name), 'w') as outf:
+ outf.write(data)
+
+ def _compare_result(self, subs, expected, result, result_str):
+ matched_value = None
+ if isinstance(expected, dict):
+ if not isinstance(result, dict):
+ raise NoMatch(_('%(result_str)s: %(result)s is not a dict.')
+ % {'result_str': result_str, 'result': result})
+ ex_keys = sorted(expected.keys())
+ res_keys = sorted(result.keys())
+ if ex_keys != res_keys:
+ ex_delta = []
+ res_delta = []
+ for key in ex_keys:
+ if key not in res_keys:
+ ex_delta.append(key)
+ for key in res_keys:
+ if key not in ex_keys:
+ res_delta.append(key)
+ raise NoMatch(
+ _('Dictionary key mismatch:\n'
+ 'Extra key(s) in template:\n%(ex_delta)s\n'
+ 'Extra key(s) in %(result_str)s:\n%(res_delta)s\n') %
+ {'ex_delta': ex_delta, 'result_str': result_str,
+ 'res_delta': res_delta})
+ for key in ex_keys:
+ res = self._compare_result(subs, expected[key], result[key],
+ result_str)
+ matched_value = res or matched_value
+ elif isinstance(expected, list):
+ if not isinstance(result, list):
+ raise NoMatch(
+ _('%(result_str)s: %(result)s is not a list.') %
+ {'result_str': result_str, 'result': result})
+
+ expected = expected[:]
+ extra = []
+ for res_obj in result:
+ for i, ex_obj in enumerate(expected):
+ try:
+ matched_value = self._compare_result(subs, ex_obj,
+ res_obj,
+ result_str)
+ del expected[i]
+ break
+ except NoMatch:
+ pass
+ else:
+ extra.append(res_obj)
+
+ error = []
+ if expected:
+ error.append(_('Extra list items in template:'))
+ error.extend([repr(o) for o in expected])
+
+ if extra:
+ error.append(_('Extra list items in %(result_str)s:') %
+ {'result_str': result_str})
+ error.extend([repr(o) for o in extra])
+
+ if error:
+ raise NoMatch('\n'.join(error))
+ elif isinstance(expected, six.string_types) and '%' in expected:
+ # NOTE(vish): escape stuff for regex
+ for char in '[]<>?':
+ expected = expected.replace(char, '\\%s' % char)
+ # NOTE(vish): special handling of subs that are not quoted. We are
+ # expecting an int but we had to pass in a string
+ # so the json would parse properly.
+ if expected.startswith("%(int:"):
+ result = str(result)
+ expected = expected.replace('int:', '')
+ expected = expected % subs
+ expected = '^%s$' % expected
+ match = re.match(expected, result)
+ if not match:
+ raise NoMatch(
+ _('Values do not match:\n'
+ 'Template: %(expected)s\n%(result_str)s: %(result)s') %
+ {'expected': expected, 'result_str': result_str,
+ 'result': result})
+ try:
+ matched_value = match.group('id')
+ except IndexError:
+ if match.groups():
+ matched_value = match.groups()[0]
+ else:
+ if isinstance(expected, six.string_types):
+ # NOTE(danms): Ignore whitespace in this comparison
+ expected = expected.strip()
+ if isinstance(result, six.string_types):
+ result = result.strip()
+ if expected != result:
+ raise NoMatch(
+ _('Values do not match:\n'
+ 'Template: %(expected)s\n%(result_str)s: '
+ '%(result)s') % {'expected': expected,
+ 'result_str': result_str,
+ 'result': result})
+ return matched_value
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ This may be needed by some tests to convert exact matches expected
+ from the server into pattern matches to verify what is in the
+ sample file.
+
+ If there are no changes to be made, subs is returned unharmed.
+ """
+ return subs
+
+ def _verify_response(self, name, subs, response, exp_code):
+ self.assertEqual(response.status_code, exp_code)
+ response_data = response.content
+ response_data = self._pretty_data(response_data)
+ if not os.path.exists(self._get_template(name)):
+ self._write_template(name, response_data)
+ template_data = response_data
+ else:
+ template_data = self._read_template(name)
+
+ if (self.generate_samples and
+ not os.path.exists(self._get_sample(name))):
+ self._write_sample(name, response_data)
+ sample_data = response_data
+ else:
+ with file(self._get_sample(name)) as sample:
+ sample_data = sample.read()
+
+ try:
+ template_data = self._objectify(template_data)
+ response_data = self._objectify(response_data)
+ response_result = self._compare_result(subs, template_data,
+ response_data, "Response")
+ # NOTE(danms): replace some of the subs with patterns for the
+ # doc/api_samples check, which won't have things like the
+ # correct compute host name. Also let the test do some of its
+ # own generalization, if necessary
+ vanilla_regexes = self._get_regexes()
+ subs['compute_host'] = vanilla_regexes['host_name']
+ subs['id'] = vanilla_regexes['id']
+ subs = self.generalize_subs(subs, vanilla_regexes)
+ sample_data = self._objectify(sample_data)
+ self._compare_result(subs, template_data, sample_data, "Sample")
+ return response_result
+ except NoMatch:
+ raise
+
+ def _get_host(self):
+ return 'http://openstack.example.com'
+
+ def _get_glance_host(self):
+ return 'http://glance.openstack.example.com'
+
+ def _get_regexes(self):
+ if self.ctype == 'json':
+ text = r'(\\"|[^"])*'
+ else:
+ text = r'[^<]*'
+ isotime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}Z'
+ strtime_re = '\d{4}-[0,1]\d-[0-3]\dT\d{2}:\d{2}:\d{2}\.\d{6}'
+ xmltime_re = ('\d{4}-[0,1]\d-[0-3]\d '
+ '\d{2}:\d{2}:\d{2}'
+ '(\.\d{6})?(\+00:00)?')
+ return {
+ 'isotime': isotime_re,
+ 'strtime': strtime_re,
+ 'strtime_or_none': r'None|%s' % strtime_re,
+ 'xmltime': xmltime_re,
+ 'password': '[0-9a-zA-Z]{1,12}',
+ 'ip': '[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}',
+ 'ip6': '([0-9a-zA-Z]{1,4}:){1,7}:?[0-9a-zA-Z]{1,4}',
+ 'id': '(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12})',
+ 'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}',
+ 'reservation_id': 'r-[0-9a-zA-Z]{8}',
+ 'private_key': '-----BEGIN RSA PRIVATE KEY-----'
+ '[a-zA-Z0-9\n/+=]*'
+ '-----END RSA PRIVATE KEY-----',
+ 'public_key': 'ssh-rsa[ a-zA-Z0-9/+=]*'
+ 'Generated-by-Nova',
+ 'fingerprint': '([0-9a-f]{2}:){15}[0-9a-f]{2}',
+ 'host': self._get_host(),
+ 'host_name': '[0-9a-z]{32}',
+ 'glance_host': self._get_glance_host(),
+ 'compute_host': self.compute.host,
+ 'text': text,
+ 'int': '[0-9]+',
+ }
+
+ def _get_response(self, url, method, body=None, strip_version=False):
+ headers = {}
+ headers['Content-Type'] = 'application/' + self.ctype
+ headers['Accept'] = 'application/' + self.ctype
+ return self.api.api_request(url, body=body, method=method,
+ headers=headers, strip_version=strip_version)
+
+ def _do_get(self, url, strip_version=False):
+ return self._get_response(url, 'GET', strip_version=strip_version)
+
+ def _do_post(self, url, name, subs, method='POST'):
+ body = self._read_template(name) % subs
+ sample = self._get_sample(name)
+ if self.generate_samples and not os.path.exists(sample):
+ self._write_sample(name, body)
+ return self._get_response(url, method, body)
+
+ def _do_put(self, url, name, subs):
+ return self._do_post(url, name, subs, method='PUT')
+
+ def _do_delete(self, url):
+ return self._get_response(url, 'DELETE')
diff --git a/nova/tests/unit/integrated/integrated_helpers.py b/nova/tests/unit/integrated/integrated_helpers.py
new file mode 100644
index 0000000000..e62d84f642
--- /dev/null
+++ b/nova/tests/unit/integrated/integrated_helpers.py
@@ -0,0 +1,160 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides common functionality for integrated unit tests
+"""
+
+import random
+import string
+import uuid
+
+from oslo.config import cfg
+
+import nova.image.glance
+from nova.openstack.common import log as logging
+from nova import service
+from nova import test
+from nova.tests.unit import cast_as_call
+from nova.tests.unit import fake_crypto
+import nova.tests.unit.image.fake
+from nova.tests.unit.integrated.api import client
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
+
+
+def generate_random_alphanumeric(length):
+ """Creates a random alphanumeric string of specified length."""
+ return ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _x in range(length))
+
+
+def generate_random_numeric(length):
+ """Creates a random numeric string of specified length."""
+ return ''.join(random.choice(string.digits)
+ for _x in range(length))
+
+
+def generate_new_element(items, prefix, numeric=False):
+ """Creates a random string with prefix, that is not in 'items' list."""
+ while True:
+ if numeric:
+ candidate = prefix + generate_random_numeric(8)
+ else:
+ candidate = prefix + generate_random_alphanumeric(8)
+ if candidate not in items:
+ return candidate
+ LOG.debug("Random collision on %s" % candidate)
+
+
+class _IntegratedTestBase(test.TestCase):
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(_IntegratedTestBase, self).setUp()
+
+ f = self._get_flags()
+ self.flags(**f)
+ self.flags(verbose=True)
+
+ self.useFixture(test.ReplaceModule('crypto', fake_crypto))
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.flags(scheduler_driver='nova.scheduler.'
+ 'chance.ChanceScheduler')
+ self._setup_services()
+ self._start_api_service()
+
+ self.api = self._get_test_client()
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+
+ def _setup_services(self):
+ self.conductor = self.start_service('conductor',
+ manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.cert = self.start_service('cert')
+ self.consoleauth = self.start_service('consoleauth')
+ self.network = self.start_service('network')
+ self.scheduler = self.start_service('scheduler')
+ self.cells = self.start_service('cells', manager=CONF.cells.manager)
+
+ def tearDown(self):
+ self.osapi.stop()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+ super(_IntegratedTestBase, self).tearDown()
+
+ def _get_test_client(self):
+ return client.TestOpenStackClient('fake', 'fake', self.auth_url)
+
+ def _start_api_service(self):
+ self.osapi = service.WSGIService("osapi_compute")
+ self.osapi.start()
+ self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
+ 'host': self.osapi.host, 'port': self.osapi.port,
+ 'api_version': self._api_version})
+
+ def _get_flags(self):
+ """An opportunity to setup flags, before the services are started."""
+ f = {}
+
+ # Ensure tests only listen on localhost
+ f['ec2_listen'] = '127.0.0.1'
+ f['osapi_compute_listen'] = '127.0.0.1'
+ f['metadata_listen'] = '127.0.0.1'
+
+ # Auto-assign ports to allow concurrent tests
+ f['ec2_listen_port'] = 0
+ f['osapi_compute_listen_port'] = 0
+ f['metadata_listen_port'] = 0
+
+ f['fake_network'] = True
+ return f
+
+ def get_unused_server_name(self):
+ servers = self.api.get_servers()
+ server_names = [server['name'] for server in servers]
+ return generate_new_element(server_names, 'server')
+
+ def get_invalid_image(self):
+ return str(uuid.uuid4())
+
+ def _build_minimal_create_server_request(self):
+ server = {}
+
+ image = self.api.get_images()[0]
+ LOG.debug("Image: %s" % image)
+
+ if self._image_ref_parameter in image:
+ image_href = image[self._image_ref_parameter]
+ else:
+ image_href = image['id']
+ image_href = 'http://fake.server/%s' % image_href
+
+ # We now have a valid imageId
+ server[self._image_ref_parameter] = image_href
+
+ # Set a valid flavorId
+ flavor = self.api.get_flavors()[0]
+ LOG.debug("Using flavor: %s" % flavor)
+ server[self._flavor_ref_parameter] = ('http://fake.server/%s'
+ % flavor['id'])
+
+ # Set a valid server name
+ server_name = self.get_unused_server_name()
+ server['name'] = server_name
+ return server
diff --git a/nova/tests/unit/integrated/test_api_samples.py b/nova/tests/unit/integrated/test_api_samples.py
new file mode 100644
index 0000000000..676c7ee0e9
--- /dev/null
+++ b/nova/tests/unit/integrated/test_api_samples.py
@@ -0,0 +1,4433 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import copy
+import datetime
+import inspect
+import os
+import re
+import urllib
+import uuid as uuid_lib
+
+from lxml import etree
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+
+from nova.api.metadata import password
+from nova.api.openstack.compute.contrib import fping
+from nova.api.openstack.compute import extensions
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.cells import rpcapi as cells_rpcapi
+from nova.cells import state
+from nova.cloudpipe import pipelib
+from nova.compute import api as compute_api
+from nova.compute import cells_api as cells_api
+from nova.compute import manager as compute_manager
+from nova.compute import rpcapi as compute_rpcapi
+from nova.conductor import manager as conductor_manager
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova.openstack.common import log as logging
+import nova.quota
+from nova.servicegroup import api as service_group_api
+from nova import test
+from nova.tests.unit.api.openstack.compute.contrib import test_fping
+from nova.tests.unit.api.openstack.compute.contrib import test_networks
+from nova.tests.unit.api.openstack.compute.contrib import test_services
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit import fake_utils
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated import api_samples_test_base
+from nova.tests.unit.integrated import integrated_helpers
+from nova.tests.unit.objects import test_network
+from nova.tests.unit import utils as test_utils
+from nova import utils
+from nova.volume import cinder
+
+CONF = cfg.CONF
+CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+CONF.import_opt('enable_network_quota',
+ 'nova.api.openstack.compute.contrib.os_tenant_networks')
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
+CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
+CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
+LOG = logging.getLogger(__name__)
+
+
+class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
+ _api_version = 'v2'
+
+ def setUp(self):
+ extends = []
+ self.flags(use_ipv6=False,
+ osapi_compute_link_prefix=self._get_host(),
+ osapi_glance_link_prefix=self._get_glance_host())
+ if not self.all_extensions:
+ if hasattr(self, 'extends_name'):
+ extends = [self.extends_name]
+ ext = [self.extension_name] if self.extension_name else []
+ self.flags(osapi_compute_extension=ext + extends)
+ super(ApiSampleTestBaseV2, self).setUp()
+ self.useFixture(test.SampleNetworks(host=self.network.host))
+ fake_network.stub_compute_with_ips(self.stubs)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
+
+
+class ApiSamplesTrap(ApiSampleTestBaseV2):
+ """Make sure extensions don't get added without tests."""
+
+ all_extensions = True
+
+ def _get_extensions_tested(self):
+ tests = []
+ for attr in globals().values():
+ if not inspect.isclass(attr):
+ continue # Skip non-class objects
+ if not issubclass(attr, integrated_helpers._IntegratedTestBase):
+ continue # Skip non-test classes
+ if attr.extension_name is None:
+ continue # Skip base tests
+ cls = importutils.import_class(attr.extension_name)
+ tests.append(cls.alias)
+ return tests
+
+ def _get_extensions(self):
+ extensions = []
+ response = self._do_get('extensions')
+ for extension in jsonutils.loads(response.content)['extensions']:
+ extensions.append(str(extension['alias']))
+ return extensions
+
+ def test_all_extensions_have_samples(self):
+ # NOTE(danms): This is a list of extensions which are currently
+ # in the tree but that don't (yet) have tests. This list should
+ # NOT be allowed to grow, and should shrink to zero (and be
+ # removed) soon.
+ do_not_approve_additions = []
+ do_not_approve_additions.append('os-create-server-ext')
+ do_not_approve_additions.append('os-baremetal-ext-status')
+ do_not_approve_additions.append('os-baremetal-nodes')
+
+ tests = self._get_extensions_tested()
+ extensions = self._get_extensions()
+ missing_tests = []
+ for extension in extensions:
+ # NOTE(danms): if you add tests, remove it from the
+ # exclusions list
+ self.assertFalse(extension in do_not_approve_additions and
+ extension in tests)
+
+ # NOTE(danms): if you add an extension, it must come with
+ # api_samples tests!
+ if (extension not in tests and
+ extension not in do_not_approve_additions):
+ missing_tests.append(extension)
+
+ if missing_tests:
+ LOG.error("Extensions are missing tests: %s" % missing_tests)
+ self.assertEqual(missing_tests, [])
+
+
+class VersionsSampleJsonTest(ApiSampleTestBaseV2):
+ def test_versions_get(self):
+ response = self._do_get('', strip_version=True)
+ subs = self._get_regexes()
+ self._verify_response('versions-get-resp', subs, response, 200)
+
+
+class VersionsSampleXmlTest(VersionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleBase(ApiSampleTestBaseV2):
+ def _post_server(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs = self._get_regexes()
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+
+class ServersSampleJsonTest(ServersSampleBase):
+ def test_servers_post(self):
+ return self._post_server()
+
+ def test_servers_get(self):
+ uuid = self.test_servers_post()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+
+class ServersSampleXmlTest(ServersSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
+ all_extensions = True
+
+
+class ServersSampleAllExtensionXmlTest(ServersSampleXmlTest):
+ all_extensions = True
+
+
+class ServersSampleHideAddressesJsonTest(ServersSampleJsonTest):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'hide_server_addresses',
+ 'Hide_server_addresses'))
+
+
+class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
+ ctype = 'xml'
+
+
+class ServersSampleMultiStatusJsonTest(ServersSampleBase):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'server_list_multi_status',
+ 'Server_list_multi_status'))
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers?status=active&status=error')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+
+class ServersSampleMultiStatusXMLTest(ServersSampleMultiStatusJsonTest):
+ ctype = 'xml'
+
+
+class ServersMetadataJsonTest(ServersSampleBase):
+ def _create_and_set(self, subs):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+ return uuid
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['value'] = '(Foo|Bar) Value'
+ return subs
+
+ def test_metadata_put_all(self):
+ # Test setting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ self._create_and_set(subs)
+
+ def test_metadata_post_all(self):
+ # Test updating all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_post('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_get_all(self):
+ # Test getting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata' % uuid)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_put(self):
+ # Test putting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_put('servers/%s/metadata/foo' % uuid,
+ 'server-metadata-req',
+ subs)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_get(self):
+ # Test getting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata/foo' % uuid)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_delete(self):
+ # Test deleting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_delete('servers/%s/metadata/foo' % uuid)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
+
+
+class ServersMetadataXmlTest(ServersMetadataJsonTest):
+ ctype = 'xml'
+
+
+class ServersIpsJsonTest(ServersSampleBase):
+ def test_get(self):
+ # Test getting a server's IP information.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-resp', subs, response, 200)
+
+ def test_get_by_network(self):
+ # Test getting a server's IP information by network id.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips/private' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-network-resp', subs, response, 200)
+
+
+class ServersIpsXmlTest(ServersIpsJsonTest):
+ ctype = 'xml'
+
+
+class ExtensionsSampleJsonTest(ApiSampleTestBaseV2):
+ all_extensions = True
+
+ def test_extensions_get(self):
+ response = self._do_get('extensions')
+ subs = self._get_regexes()
+ self._verify_response('extensions-get-resp', subs, response, 200)
+
+
+class ExtensionsSampleXmlTest(ExtensionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsSampleJsonTest(ApiSampleTestBaseV2):
+
+ def test_flavors_get(self):
+ response = self._do_get('flavors/1')
+ subs = self._get_regexes()
+ self._verify_response('flavor-get-resp', subs, response, 200)
+
+ def test_flavors_list(self):
+ response = self._do_get('flavors')
+ subs = self._get_regexes()
+ self._verify_response('flavors-list-resp', subs, response, 200)
+
+
+class FlavorsSampleXmlTest(FlavorsSampleJsonTest):
+ ctype = 'xml'
+
+
+class HostsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.hosts.Hosts"
+
+ def test_host_startup(self):
+ response = self._do_get('os-hosts/%s/startup' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-startup', subs, response, 200)
+
+ def test_host_reboot(self):
+ response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-reboot', subs, response, 200)
+
+ def test_host_shutdown(self):
+ response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-shutdown', subs, response, 200)
+
+ def test_host_maintenance(self):
+ response = self._do_put('os-hosts/%s' % self.compute.host,
+ 'host-put-maintenance-req', {})
+ subs = self._get_regexes()
+ self._verify_response('host-put-maintenance-resp', subs, response, 200)
+
+ def test_host_get(self):
+ response = self._do_get('os-hosts/%s' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-resp', subs, response, 200)
+
+ def test_hosts_list(self):
+ response = self._do_get('os-hosts')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-resp', subs, response, 200)
+
+
+class HostsSampleXmlTest(HostsSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
+ all_extensions = True
+
+
+class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
+ all_extensions = True
+
+
+class ImagesSampleJsonTest(ApiSampleTestBaseV2):
+ def test_images_list(self):
+ # Get api sample of images get list request.
+ response = self._do_get('images')
+ subs = self._get_regexes()
+ self._verify_response('images-list-get-resp', subs, response, 200)
+
+ def test_image_get(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_images_details(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+ def test_image_metadata_get(self):
+ # Get api sample of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s/metadata' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-metadata-get-resp', subs, response, 200)
+
+ def test_image_metadata_post(self):
+ # Get api sample to update metadata of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_post(
+ 'images/%s/metadata' % image_id,
+ 'image-metadata-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-post-resp', subs, response, 200)
+
+ def test_image_metadata_put(self):
+ # Get api sample of image metadata put request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_put('images/%s/metadata' % image_id,
+ 'image-metadata-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-put-resp', subs, response, 200)
+
+ def test_image_meta_key_get(self):
+ # Get api sample of an image metadata key request.
+ image_id = fake.get_valid_image_id()
+ key = "kernel_id"
+ response = self._do_get('images/%s/metadata/%s' % (image_id, key))
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-get', subs, response, 200)
+
+ def test_image_meta_key_put(self):
+ # Get api sample of image metadata key put request.
+ image_id = fake.get_valid_image_id()
+ key = "auto_disk_config"
+ response = self._do_put('images/%s/metadata/%s' % (image_id, key),
+ 'image-meta-key-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-put-resp', subs, response, 200)
+
+
+class ImagesSampleXmlTest(ImagesSampleJsonTest):
+ ctype = 'xml'
+
+
+class LimitsSampleJsonTest(ApiSampleTestBaseV2):
+ def test_limits_get(self):
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('limit-get-resp', subs, response, 200)
+
+
+class LimitsSampleXmlTest(LimitsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServersActionsJsonTest(ServersSampleBase):
+ def _test_server_action(self, uuid, action,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-%s' % action.lower(),
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_server_password(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "changePassword",
+ {"password": "foo"})
+
+ def test_server_reboot_hard(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ {"type": "HARD"})
+
+ def test_server_reboot_soft(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ {"type": "SOFT"})
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ }
+ self._test_server_action(uuid, 'rebuild', subs,
+ 'server-action-rebuild-resp')
+
+ def test_server_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ self._test_server_action(uuid, "resize",
+ {"id": 2,
+ "host": self._get_host()})
+ return uuid
+
+ def test_server_revert_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "revertResize")
+
+ def test_server_confirm_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "confirmResize", code=204)
+
+ def test_server_create_image(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'createImage',
+ {'name': 'foo-image',
+ 'meta_var': 'myvar',
+ 'meta_val': 'foobar'})
+
+
+class ServersActionsXmlTest(ServersActionsJsonTest):
+ ctype = 'xml'
+
+
+class ServersActionsAllJsonTest(ServersActionsJsonTest):
+ all_extensions = True
+
+
+class ServersActionsAllXmlTest(ServersActionsXmlTest):
+ all_extensions = True
+
+
+class ServerStartStopJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".server_start_stop.Server_start_stop"
+
+ def _test_server_action(self, uuid, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server_start_stop',
+ {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_server_start(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop')
+ self._test_server_action(uuid, 'os-start')
+
+ def test_server_stop(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop')
+
+
+class ServerStartStopXmlTest(ServerStartStopJsonTest):
+ ctype = 'xml'
+
+
+class UserDataJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.user_data.User_data"
+
+ def test_user_data_post(self):
+ user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
+ user_data = base64.b64encode(user_data_contents)
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'user_data': user_data
+ }
+ response = self._do_post('servers', 'userdata-post-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('userdata-post-resp', subs, response, 202)
+
+
+class UserDataXmlTest(UserDataJsonTest):
+ ctype = 'xml'
+
+
+class FlavorsExtraDataJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavorextradata.'
+ 'Flavorextradata')
+
+ def _get_flags(self):
+ f = super(FlavorsExtraDataJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Flavorextradata extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavors_extra_data_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavors-extra-data-get-resp',
+ subs, response, 200)
+
+ def test_flavors_extra_data_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavors-extra-data-list-resp',
+ subs, response, 200)
+
+ def test_flavors_extra_data_create(self):
+ subs = {
+ 'flavor_id': 666,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavors-extra-data-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavors-extra-data-post-resp',
+ subs, response, 200)
+
+
+class FlavorsExtraDataXmlTest(FlavorsExtraDataJsonTest):
+ ctype = 'xml'
+
+
+class FlavorRxtxJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_rxtx.'
+ 'Flavor_rxtx')
+
+ def _get_flags(self):
+ f = super(FlavorRxtxJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorRxtx extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
+
+ def test_flavors_rxtx_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
+
+
+class FlavorRxtxXmlTest(FlavorRxtxJsonTest):
+ ctype = 'xml'
+
+
+class FlavorSwapJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.flavor_swap.'
+ 'Flavor_swap')
+
+ def _get_flags(self):
+ f = super(FlavorSwapJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorSwap extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def test_flavor_swap_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-swap-get-resp', subs, response, 200)
+
+ def test_flavor_swap_list(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-swap-list-resp', subs, response, 200)
+
+ def test_flavor_swap_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-swap-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-swap-post-resp', subs, response, 200)
+
+
+class FlavorSwapXmlTest(FlavorSwapJsonTest):
+ ctype = 'xml'
+
+
+class SecurityGroupsSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".security_groups.Security_groups"
+
+ def _get_create_subs(self):
+ return {
+ 'group_name': 'test',
+ "description": "description",
+ }
+
+ def _create_security_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-security-groups',
+ 'security-group-post-req', subs)
+
+ def _add_group(self, uuid):
+ subs = {
+ 'group_name': 'test'
+ }
+ return self._do_post('servers/%s/action' % uuid,
+ 'security-group-add-post-req', subs)
+
+ def test_security_group_create(self):
+ response = self._create_security_group()
+ subs = self._get_create_subs()
+ self._verify_response('security-groups-create-resp', subs,
+ response, 200)
+
+ def test_security_groups_list(self):
+ # Get api sample of security groups get list request.
+ response = self._do_get('os-security-groups')
+ subs = self._get_regexes()
+ self._verify_response('security-groups-list-get-resp',
+ subs, response, 200)
+
+ def test_security_groups_get(self):
+ # Get api sample of security groups get request.
+ security_group_id = '1'
+ response = self._do_get('os-security-groups/%s' % security_group_id)
+ subs = self._get_regexes()
+ self._verify_response('security-groups-get-resp', subs, response, 200)
+
+ def test_security_groups_list_server(self):
+ # Get api sample of security groups for a specific server.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-security-groups' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-security-groups-list-resp',
+ subs, response, 200)
+
+ def test_security_groups_add(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ response = self._add_group(uuid)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_security_groups_remove(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ self._add_group(uuid)
+ subs = {
+ 'group_name': 'test'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'security-group-remove-post-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class SecurityGroupsSampleXmlTest(SecurityGroupsSampleJsonTest):
+ ctype = 'xml'
+
+
+class SecurityGroupDefaultRulesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib'
+ '.security_group_default_rules'
+ '.Security_group_default_rules')
+
+ def test_security_group_default_rules_create(self):
+ response = self._do_post('os-security-group-default-rules',
+ 'security-group-default-rules-create-req',
+ {})
+ self._verify_response('security-group-default-rules-create-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_list(self):
+ self.test_security_group_default_rules_create()
+ response = self._do_get('os-security-group-default-rules')
+ self._verify_response('security-group-default-rules-list-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_show(self):
+ self.test_security_group_default_rules_create()
+ rule_id = '1'
+ response = self._do_get('os-security-group-default-rules/%s' % rule_id)
+ self._verify_response('security-group-default-rules-show-resp',
+ {}, response, 200)
+
+
+class SecurityGroupDefaultRulesSampleXmlTest(
+ SecurityGroupDefaultRulesSampleJsonTest):
+ ctype = 'xml'
+
+
+class SchedulerHintsJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.scheduler_hints."
+ "Scheduler_hints")
+
+ def test_scheduler_hints_post(self):
+ # Get api sample of scheduler hint post request.
+ hints = {'image_id': fake.get_valid_image_id(),
+ 'image_near': str(uuid_lib.uuid4())
+ }
+ response = self._do_post('servers', 'scheduler-hints-post-req',
+ hints)
+ subs = self._get_regexes()
+ self._verify_response('scheduler-hints-post-resp', subs, response, 202)
+
+
+class SchedulerHintsXmlTest(SchedulerHintsJsonTest):
+ ctype = 'xml'
+
+
+class ConsoleOutputSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".console_output.Console_output"
+
+ def test_get_console_output(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'console-output-post-req',
+ {'action': 'os-getConsoleOutput'})
+ subs = self._get_regexes()
+ self._verify_response('console-output-post-resp', subs, response, 200)
+
+
+class ConsoleOutputSampleXmlTest(ConsoleOutputSampleJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedServerAttributesJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".extended_server_attributes" + \
+ ".Extended_server_attributes"
+
+ def test_show(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIpsJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib." \
+ "floating_ips.Floating_ips"
+
+ def setUp(self):
+ super(FloatingIpsJsonTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ def tearDown(self):
+ self.compute.db.floating_ip_bulk_destroy(
+ context.get_admin_context(), self.ip_pool)
+ super(FloatingIpsJsonTest, self).tearDown()
+
+ def test_floating_ips_list_empty(self):
+ response = self._do_get('os-floating-ips')
+
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-list-empty-resp',
+ subs, response, 200)
+
+ def test_floating_ips_list(self):
+ self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+ self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+
+ response = self._do_get('os-floating-ips')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_create_nopool(self):
+ response = self._do_post('os-floating-ips',
+ 'floating-ips-create-nopool-req',
+ {})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp',
+ subs, response, 200)
+
+ def test_floating_ips_create(self):
+ response = self._do_post('os-floating-ips',
+ 'floating-ips-create-req',
+ {"pool": CONF.default_floating_pool})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp', subs, response, 200)
+
+ def test_floating_ips_get(self):
+ self.test_floating_ips_create()
+ # NOTE(sdague): the first floating ip will always have 1 as an id,
+ # but it would be better if we could get this from the create
+ response = self._do_get('os-floating-ips/%d' % 1)
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-create-resp', subs, response, 200)
+
+ def test_floating_ips_delete(self):
+ self.test_floating_ips_create()
+ response = self._do_delete('os-floating-ips/%d' % 1)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class ExtendedFloatingIpsJsonTest(FloatingIpsJsonTest):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "floating_ips.Floating_ips")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_floating_ips.Extended_floating_ips")
+
+
+class FloatingIpsXmlTest(FloatingIpsJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedFloatingIpsXmlTest(ExtendedFloatingIpsJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIpsBulkJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib." \
+ "floating_ips_bulk.Floating_ips_bulk"
+
+ def setUp(self):
+ super(FloatingIpsBulkJsonTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface,
+ 'host': "testHost"
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ def tearDown(self):
+ self.compute.db.floating_ip_bulk_destroy(
+ context.get_admin_context(), self.ip_pool)
+ super(FloatingIpsBulkJsonTest, self).tearDown()
+
+ def test_floating_ips_bulk_list(self):
+ response = self._do_get('os-floating-ips-bulk')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_list_by_host(self):
+ response = self._do_get('os-floating-ips-bulk/testHost')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-by-host-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_create(self):
+ response = self._do_post('os-floating-ips-bulk',
+ 'floating-ips-bulk-create-req',
+ {"ip_range": "192.168.1.0/24",
+ "pool": CONF.default_floating_pool,
+ "interface": CONF.public_interface})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-create-resp', subs,
+ response, 200)
+
+ def test_floating_ips_bulk_delete(self):
+ response = self._do_put('os-floating-ips-bulk/delete',
+ 'floating-ips-bulk-delete-req',
+ {"ip_range": "192.168.1.0/24"})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-delete-resp', subs,
+ response, 200)
+
+
+class FloatingIpsBulkXmlTest(FloatingIpsBulkJsonTest):
+ ctype = 'xml'
+
+
+class KeyPairsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
+ def test_keypairs_post(self, public_key=None):
+ """Get api sample of key pairs post request."""
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
+ response = self._do_post('os-keypairs', 'keypairs-post-req',
+ {'keypair_name': key_name})
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-post-resp', subs, response, 200)
+ # NOTE(maurosr): return the key_name is necessary cause the
+ # verification returns the label of the last compared information in
+ # the response, not necessarily the key name.
+ return key_name
+
+ def test_keypairs_import_key_post(self):
+ # Get api sample of key pairs post to import user's key.
+ key_name = 'keypair-' + str(uuid_lib.uuid4())
+ subs = {
+ 'keypair_name': key_name,
+ 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
+ "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
+ "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
+ "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
+ "pSxsIbECHw== Generated-by-Nova"
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-import-post-resp', subs, response, 200)
+
+ def test_keypairs_list(self):
+ # Get api sample of key pairs list request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs')
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-list-resp', subs, response, 200)
+
+ def test_keypairs_get(self):
+ # Get api sample of key pairs get request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs/%s' % key_name)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-get-resp', subs, response, 200)
+
+
+class KeyPairsSampleXmlTest(KeyPairsSampleJsonTest):
+ ctype = 'xml'
+
+
+class RescueJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".rescue.Rescue")
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def _unrescue(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-unrescue-req', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_unrescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+ self._unrescue(uuid)
+
+ # Do a server get to make sure that the 'ACTIVE' state is back
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'ACTIVE'
+
+ self._verify_response('server-get-resp-unrescue', subs, response, 200)
+
+
+class RescueXmlTest(RescueJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedRescueWithImageJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_rescue_with_image.Extended_rescue_with_image")
+
+ def _get_flags(self):
+ f = super(ExtendedRescueWithImageJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # ExtendedRescueWithImage extension also needs Rescue to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.rescue.Rescue')
+ return f
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass',
+ 'rescue_image_ref': fake.get_valid_image_id()
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+
+class ExtendedRescueWithImageXmlTest(ExtendedRescueWithImageJsonTest):
+ ctype = 'xml'
+
+
+class ShelveJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib.shelve.Shelve"
+
+ def setUp(self):
+ super(ShelveJsonTest, self).setUp()
+ # Don't offload instance, so we can test the offload call.
+ CONF.set_override('shelved_offload_time', -1)
+
+ def _test_server_action(self, uuid, template, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ template, {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_shelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+
+ def test_shelve_offload(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
+
+ def test_unshelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-unshelve', 'unshelve')
+
+
+class ShelveXmlTest(ShelveJsonTest):
+ ctype = 'xml'
+
+
+class VirtualInterfacesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".virtual_interfaces.Virtual_interfaces")
+
+ def test_vifs_list(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
+
+ subs = self._get_regexes()
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+
+ self._verify_response('vifs-list-resp', subs, response, 200)
+
+
+class VirtualInterfacesXmlTest(VirtualInterfacesJsonTest):
+ ctype = 'xml'
+
+
+class CloudPipeSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe"
+
+ def setUp(self):
+ super(CloudPipeSampleJsonTest, self).setUp()
+
+ def get_user_data(self, project_id):
+ """Stub method to generate user data for cloudpipe tests."""
+ return "VVNFUiBEQVRB\n"
+
+ def network_api_get(self, context, network_uuid):
+ """Stub to get a valid network and its information."""
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
+ self.stubs.Set(network_api.API, "get",
+ network_api_get)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
+ return subs
+
+ def test_cloud_pipe_create(self):
+ # Get api samples of cloud pipe extension creation.
+ self.flags(vpn_image_id=fake.get_valid_image_id())
+ project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
+ response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
+ project)
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-create-resp', subs, response, 200)
+ return project
+
+ def test_cloud_pipe_list(self):
+ # Get api samples of cloud pipe extension get request.
+ project = self.test_cloud_pipe_create()
+ response = self._do_get('os-cloudpipe')
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-get-resp', subs, response, 200)
+
+
+class CloudPipeSampleXmlTest(CloudPipeSampleJsonTest):
+ ctype = "xml"
+
+
+class CloudPipeUpdateJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".cloudpipe_update.Cloudpipe_update")
+
+ def _get_flags(self):
+ f = super(CloudPipeUpdateJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Cloudpipe_update also needs cloudpipe to be loaded
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
+ return f
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
+ ctype = "xml"
+
+
+class AgentsJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.agents.Agents"
+
+ def _get_flags(self):
+ f = super(AgentsJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(AgentsJsonTest, self).setUp()
+
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1}]
+
+ def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+ def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+ def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+ def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+
+ def test_agent_create(self):
+ # Creates a new agent build.
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'
+ }
+ response = self._do_post('os-agents', 'agent-post-req',
+ project)
+ project['agent_id'] = 1
+ self._verify_response('agent-post-resp', project, response, 200)
+ return project
+
+ def test_agent_list(self):
+ # Return a list of all agent builds.
+ response = self._do_get('os-agents')
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'agent_id': 1
+ }
+ self._verify_response('agents-get-resp', project, response, 200)
+
+ def test_agent_update(self):
+ # Update an existing agent build.
+ agent_id = 1
+ subs = {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
+ response = self._do_put('os-agents/%s' % agent_id,
+ 'agent-update-put-req', subs)
+ subs['agent_id'] = 1
+ self._verify_response('agent-update-put-resp', subs, response, 200)
+
+ def test_agent_delete(self):
+ # Deletes an existing agent build.
+ agent_id = 1
+ response = self._do_delete('os-agents/%s' % agent_id)
+ self.assertEqual(response.status_code, 200)
+
+
+class AgentsXmlTest(AgentsJsonTest):
+ ctype = "xml"
+
+
+class FixedIpJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.fixed_ips.Fixed_ips"
+
+ def _get_flags(self):
+ f = super(FixedIpJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ return f
+
+ def setUp(self):
+ super(FixedIpJsonTest, self).setUp()
+
+ instance = dict(test_utils.get_test_instance(),
+ hostname='openstack', host='host')
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address,
+ columns_to_join=None):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ # Reserve a Fixed IP.
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_get_fixed_ip(self):
+ # Return data about the given fixed ip.
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ self._verify_response('fixedips-get-resp', project, response, 200)
+
+
+class FixedIpXmlTest(FixedIpJsonTest):
+ ctype = "xml"
+
+
+class AggregatesSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib" + \
+ ".aggregates.Aggregates"
+ create_subs = {
+ "aggregate_id": '(?P<id>\d+)'
+ }
+
+ def _create_aggregate(self):
+ return self._do_post('os-aggregates', 'aggregate-post-req',
+ self.create_subs)
+
+ def test_aggregate_create(self):
+ response = self._create_aggregate()
+ subs = self.create_subs
+ subs.update(self._get_regexes())
+ return self._verify_response('aggregate-post-resp',
+ subs, response, 200)
+
+ def test_list_aggregates(self):
+ self._create_aggregate()
+ response = self._do_get('os-aggregates')
+ subs = self._get_regexes()
+ self._verify_response('aggregates-list-get-resp', subs, response, 200)
+
+ def test_aggregate_get(self):
+ self._create_aggregate()
+ response = self._do_get('os-aggregates/%s' % 1)
+ subs = self._get_regexes()
+ self._verify_response('aggregates-get-resp', subs, response, 200)
+
+ def test_add_metadata(self):
+ self._create_aggregate()
+ response = self._do_post('os-aggregates/%s/action' % 1,
+ 'aggregate-metadata-post-req',
+ {'action': 'set_metadata'})
+ subs = self._get_regexes()
+ self._verify_response('aggregates-metadata-post-resp', subs,
+ response, 200)
+
+ def test_add_host(self):
+ self._create_aggregate()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/%s/action' % 1,
+ 'aggregate-add-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-add-host-post-resp', subs,
+ response, 200)
+
+ def test_remove_host(self):
+ self.test_add_host()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/1/action',
+ 'aggregate-remove-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-remove-host-post-resp',
+ subs, response, 200)
+
+ def test_update_aggregate(self):
+ self._create_aggregate()
+ response = self._do_put('os-aggregates/%s' % 1,
+ 'aggregate-update-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('aggregate-update-post-resp',
+ subs, response, 200)
+
+
+class AggregatesSampleXmlTest(AggregatesSampleJsonTest):
+ ctype = 'xml'
+
+
+class CertificatesSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.certificates."
+ "Certificates")
+
+ def test_create_certificates(self):
+ response = self._do_post('os-certificates',
+ 'certificate-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('certificate-create-resp', subs, response, 200)
+
+ def test_get_root_certificate(self):
+ response = self._do_get('os-certificates/root')
+ subs = self._get_regexes()
+ self._verify_response('certificate-get-root-resp', subs, response, 200)
+
+
+class CertificatesSamplesXmlTest(CertificatesSamplesJsonTest):
+ ctype = 'xml'
+
+
+class UsedLimitsSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+ def test_get_used_limits(self):
+ # Get api sample to used limits.
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
+
+
+class UsedLimitsSamplesXmlTest(UsedLimitsSamplesJsonTest):
+ ctype = "xml"
+
+
+class UsedLimitsForAdminSamplesJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+ extension_name = (
+ "nova.api.openstack.compute.contrib.used_limits_for_admin."
+ "Used_limits_for_admin")
+
+ def test_get_used_limits_for_admin(self):
+ tenant_id = 'openstack'
+ response = self._do_get('limits?tenant_id=%s' % tenant_id)
+ subs = self._get_regexes()
+ return self._verify_response('usedlimitsforadmin-get-resp', subs,
+ response, 200)
+
+
+class UsedLimitsForAdminSamplesXmlTest(UsedLimitsForAdminSamplesJsonTest):
+ ctype = "xml"
+
+
+class MultipleCreateJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.multiple_create."
+ "Multiple_create")
+
+ def test_multiple_create(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-post-resp', subs, response, 202)
+
+ def test_multiple_create_without_reservation_id(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-no-resv-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-no-resv-post-resp', subs,
+ response, 202)
+
+
+class MultipleCreateXmlTest(MultipleCreateJsonTest):
+ ctype = 'xml'
+
+
+class ServicesJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.services.Services"
+
+ def setUp(self):
+ super(ServicesJsonTest, self).setUp()
+ self.stubs.Set(db, "service_get_all",
+ test_services.fake_db_api_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts", test_services.fake_utcnow_ts)
+ self.stubs.Set(db, "service_get_by_args",
+ test_services.fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update",
+ test_services.fake_service_update)
+
+ def tearDown(self):
+ super(ServicesJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def fake_load(self, service_name):
+ return service_name == 'os-extended-services'
+
+ def test_services_list(self):
+ """Return a list of all agent builds."""
+ response = self._do_get('os-services')
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-list-get-resp', subs, response, 200)
+
+ def test_service_enable(self):
+ """Enable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/enable',
+ 'service-enable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-enable-put-resp', subs, response, 200)
+
+ def test_service_disable(self):
+ """Disable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/disable',
+ 'service-disable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-disable-put-resp', subs, response, 200)
+
+ def test_service_detail(self):
+ """Return a list of all running services with the disable reason
+ information if that exists.
+ """
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ response = self._do_get('os-services')
+ self.assertEqual(response.status_code, 200)
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-get-resp',
+ subs, response, 200)
+
+ def test_service_disable_log_reason(self):
+ """Disable an existing service and log the reason."""
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ subs = {"host": "host1",
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test2'}
+ response = self._do_put('os-services/disable-log-reason',
+ 'service-disable-log-put-req', subs)
+ return self._verify_response('service-disable-log-put-resp',
+ subs, response, 200)
+
+
+class ServicesXmlTest(ServicesJsonTest):
+ ctype = 'xml'
+
+
+class ExtendedServicesJsonTest(ApiSampleTestBaseV2):
+ """This extension is extending the functionalities of the
+ Services extension so the funcionalities introduced by this extension
+ are tested in the ServicesJsonTest and ServicesXmlTest classes.
+ """
+
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_services.Extended_services")
+
+
+class ExtendedServicesXmlTest(ExtendedServicesJsonTest):
+ """This extension is tested in the ServicesXmlTest class."""
+ ctype = 'xml'
+
+
+@mock.patch.object(db, 'service_get_all',
+ side_effect=test_services.fake_db_api_service_get_all)
+@mock.patch.object(db, 'service_get_by_args',
+ side_effect=test_services.fake_service_get_by_host_binary)
+class ExtendedServicesDeleteJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.services.Services")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_services_delete.Extended_services_delete")
+
+ def setUp(self):
+ super(ExtendedServicesDeleteJsonTest, self).setUp()
+ timeutils.set_time_override(test_services.fake_utcnow())
+
+ def tearDown(self):
+ super(ExtendedServicesDeleteJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_service_detail(self, *mocks):
+ """Return a list of all running services with the disable reason
+ information if that exists.
+ """
+ response = self._do_get('os-services')
+ self.assertEqual(response.status_code, 200)
+ subs = {'id': 1,
+ 'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ return self._verify_response('services-get-resp',
+ subs, response, 200)
+
+ def test_service_delete(self, *mocks):
+ response = self._do_delete('os-services/1')
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, "")
+
+
+class ExtendedServicesDeleteXmlTest(ExtendedServicesDeleteJsonTest):
+ """This extension is tested in the ExtendedServicesDeleteJsonTest class."""
+ ctype = 'xml'
+
+
+class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
+ "Simple_tenant_usage")
+
+ def setUp(self):
+ """setUp method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).setUp()
+
+ started = timeutils.utcnow()
+ now = started + datetime.timedelta(hours=1)
+
+ timeutils.set_time_override(started)
+ self._post_server()
+ timeutils.set_time_override(now)
+
+ self.query = {
+ 'start': str(started),
+ 'end': str(now)
+ }
+
+ def tearDown(self):
+ """tearDown method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_get_tenants_usage(self):
+ # Get api sample to get all tenants usage request.
+ response = self._do_get('os-simple-tenant-usage?%s' % (
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get', subs, response, 200)
+
+ def test_get_tenant_usage_details(self):
+ # Get api sample to get specific tenant usage request.
+ tenant_id = 'openstack'
+ response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get-specific', subs,
+ response, 200)
+
+
+class SimpleTenantUsageSampleXmlTest(SimpleTenantUsageSampleJsonTest):
+ ctype = "xml"
+
+
+class ServerDiagnosticsSamplesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_diagnostics."
+ "Server_diagnostics")
+
+ def test_server_diagnostics_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/diagnostics' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-diagnostics-get-resp', subs,
+ response, 200)
+
+
+class ServerDiagnosticsSamplesXmlTest(ServerDiagnosticsSamplesJsonTest):
+ ctype = "xml"
+
+
+class AvailabilityZoneJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
+ "Availability_zone")
+
+ def test_create_availability_zone(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ "availability_zone": "nova"
+ }
+ response = self._do_post('servers', 'availability-zone-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('availability-zone-post-resp', subs,
+ response, 202)
+
+
+class AvailabilityZoneXmlTest(AvailabilityZoneJsonTest):
+ ctype = "xml"
+
+
+class AdminActionsSamplesJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.admin_actions."
+ "Admin_actions")
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(AdminActionsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_pause(self):
+ # Get api samples to pause server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-pause', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_unpause(self):
+ # Get api samples to unpause server request.
+ self.test_post_pause()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-unpause', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_suspend(self):
+ # Get api samples to suspend server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-suspend', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_resume(self):
+ # Get api samples to server resume request.
+ self.test_post_suspend()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-resume', {})
+ self.assertEqual(response.status_code, 202)
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
+ def test_post_migrate(self, mock_cold_migrate):
+ # Get api samples to migrate server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-migrate', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_network(self):
+ # Get api samples to reset server network request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-network', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_inject_network_info(self):
+ # Get api samples to inject network info request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-inject-network-info', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_lock_server(self):
+ # Get api samples to lock server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-lock-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_unlock_server(self):
+ # Get api samples to unlock server request.
+ self.test_post_lock_server()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-unlock-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_backup_server(self):
+ # Get api samples to backup server request.
+ def image_details(self, context, **kwargs):
+ """This stub is specifically used on the backup action."""
+ # NOTE(maurosr): I've added this simple stub cause backup action
+ # was trapped in infinite loop during fetch image phase since the
+ # fake Image Service always returns the same set of images
+ return []
+
+ self.stubs.Set(fake._FakeImageService, 'detail', image_details)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-backup-server', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_live_migrate_server(self):
+ # Get api samples to server live migrate request.
+ def fake_live_migrate(_self, context, instance, scheduler_hint,
+ block_migration, disk_over_commit):
+ self.assertEqual(self.uuid, instance["uuid"])
+ host = scheduler_hint["host"]
+ self.assertEqual(self.compute.host, host)
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ '_live_migrate',
+ fake_live_migrate)
+
+ def fake_get_compute(context, host):
+ service = dict(host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=1,
+ updated_at='foo',
+ hypervisor_type='bar',
+ hypervisor_version=
+ utils.convert_version_to_int('1.0'),
+ disabled=False)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-live-migrate',
+ {'hostname': self.compute.host})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_state(self):
+ # get api samples to server reset state request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-server-state', {})
+ self.assertEqual(response.status_code, 202)
+
+
+class AdminActionsSamplesXmlTest(AdminActionsSamplesJsonTest):
+ ctype = 'xml'
+
+
+class ConsolesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".consoles.Consoles")
+
+ def setUp(self):
+ super(ConsolesSampleJsonTests, self).setUp()
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ self.flags(enabled=True, group='rdp')
+ self.flags(enabled=True, group='serial_console')
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-vnc-console-post-resp', subs, response, 200)
+
+ def test_get_spice_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-spice-console-post-req',
+ {'action': 'os-getSPICEConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-spice-console-post-resp', subs,
+ response, 200)
+
+ def test_get_rdp_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-rdp-console-post-resp', subs,
+ response, 200)
+
+ def test_get_serial_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-serial-console-post-req',
+ {'action': 'os-getSerialConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-serial-console-post-resp', subs,
+ response, 200)
+
+
+class ConsolesSampleXmlTests(ConsolesSampleJsonTests):
+ ctype = 'xml'
+
+
+class ConsoleAuthTokensSampleJsonTests(ServersSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib.consoles.Consoles")
+ extension_name = ("nova.api.openstack.compute.contrib.console_auth_tokens."
+ "Console_auth_tokens")
+
+ def _get_console_url(self, data):
+ return jsonutils.loads(data)["console"]["url"]
+
+ def _get_console_token(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+
+ url = self._get_console_url(response.content)
+ return re.match('.+?token=([^&]+)', url).groups()[0]
+
+ def test_get_console_connect_info(self):
+ self.flags(enabled=True, group='rdp')
+
+ uuid = self._post_server()
+ token = self._get_console_token(uuid)
+
+ response = self._do_get('os-console-auth-tokens/%s' % token)
+
+ subs = self._get_regexes()
+ subs["uuid"] = uuid
+ subs["host"] = r"[\w\.\-]+"
+ subs["port"] = "[0-9]+"
+ subs["internal_access_path"] = ".*"
+ self._verify_response('get-console-connect-info-get-resp', subs,
+ response, 200)
+
+
+class ConsoleAuthTokensSampleXmlTests(ConsoleAuthTokensSampleJsonTests):
+ ctype = 'xml'
+
+ def _get_console_url(self, data):
+ return etree.fromstring(data).find('url').text
+
+
+class DeferredDeleteSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".deferred_delete.Deferred_delete")
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class DeferredDeleteSampleXmlTests(DeferredDeleteSampleJsonTests):
+ ctype = 'xml'
+
+
+class QuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+ def test_show_quotas(self):
+ # Get api sample to show quotas.
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self._verify_response('quotas-show-get-resp', {}, response, 200)
+
+ def test_show_quotas_defaults(self):
+ # Get api sample to show quotas defaults.
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self._verify_response('quotas-show-defaults-get-resp',
+ {}, response, 200)
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self._verify_response('quotas-update-post-resp', {}, response, 200)
+
+
+class QuotasSampleXmlTests(QuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedQuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_quotas.Extended_quotas")
+
+ def test_delete_quotas(self):
+ # Get api sample to delete quota.
+ response = self._do_delete('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ return self._verify_response('quotas-update-post-resp', {},
+ response, 200)
+
+
+class ExtendedQuotasSampleXmlTests(ExtendedQuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class UserQuotasSampleJsonTests(ApiSampleTestBaseV2):
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".user_quotas.User_quotas")
+
+ def fake_load(self, *args):
+ return True
+
+ def test_show_quotas_for_user(self):
+ # Get api sample to show quotas for user.
+ response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
+ self._verify_response('user-quotas-show-get-resp', {}, response, 200)
+
+ def test_delete_quotas_for_user(self):
+ # Get api sample to delete quota for user.
+ self.stubs.Set(extensions.ExtensionManager, "is_loaded",
+ self.fake_load)
+ response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_for_user(self):
+ # Get api sample to update quotas for user.
+ response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
+ 'user-quotas-update-post-req',
+ {})
+ return self._verify_response('user-quotas-update-post-resp', {},
+ response, 200)
+
+
+class UserQuotasSampleXmlTests(UserQuotasSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedIpsSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_ips.Extended_ips")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedIpsSampleXmlTests(ExtendedIpsSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedIpsMacSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_ips_mac.Extended_ips_mac")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ self.assertEqual(response.status_code, 200)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ self.assertEqual(response.status_code, 200)
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedIpsMacSampleXmlTests(ExtendedIpsMacSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedStatusSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_status.Extended_status")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedStatusSampleXmlTests(ExtendedStatusSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedVolumesSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_volumes.Extended_volumes")
+
+ def test_show(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedVolumesSampleXmlTests(ExtendedVolumesSampleJsonTests):
+ ctype = 'xml'
+
+
+class ServerUsageSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".server_usage.Server_usage")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ return self._verify_response('servers-detail-resp', subs,
+ response, 200)
+
+
+class ServerUsageSampleXmlTests(ServerUsageSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedVIFNetSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_virtual_interfaces_net.Extended_virtual_interfaces_net")
+
+ def _get_flags(self):
+ f = super(ExtendedVIFNetSampleJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # extended_virtual_interfaces_net_update also
+ # needs virtual_interfaces to be loaded
+ f['osapi_compute_extension'].append(
+ ('nova.api.openstack.compute.contrib'
+ '.virtual_interfaces.Virtual_interfaces'))
+ return f
+
+ def test_vifs_list(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
+ self.assertEqual(response.status_code, 200)
+
+ subs = self._get_regexes()
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+
+ self._verify_response('vifs-list-resp', subs, response, 200)
+
+
+class ExtendedVIFNetSampleXmlTests(ExtendedIpsSampleJsonTests):
+ ctype = 'xml'
+
+
+class FlavorManageSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavormanage."
+ "Flavormanage")
+
+ def _create_flavor(self):
+ """Create a flavor."""
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': "test_flavor"
+ }
+ response = self._do_post("flavors",
+ "flavor-create-post-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-create-post-resp", subs, response, 200)
+
+ def test_create_flavor(self):
+ # Get api sample to create a flavor.
+ self._create_flavor()
+
+ def test_delete_flavor(self):
+ # Get api sample to delete a flavor.
+ self._create_flavor()
+ response = self._do_delete("flavors/10")
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class FlavorManageSampleXmlTests(FlavorManageSampleJsonTests):
+ ctype = "xml"
+
+
+class ServerPasswordSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.server_password."
+ "Server_password")
+
+ def test_get_password(self):
+
+ # Mock password since there is no api to set it
+ def fake_ext_password(*args, **kwargs):
+ return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
+ "Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
+ "28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
+ "VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
+ "JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
+ "QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
+ "X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
+ self.stubs.Set(password, "extract_password", fake_ext_password)
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-server-password' % uuid)
+ subs = self._get_regexes()
+ subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
+ self._verify_response('get-password-resp', subs, response, 200)
+
+ def test_reset_password(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s/os-server-password' % uuid)
+ self.assertEqual(response.status_code, 204)
+
+
+class ServerPasswordSampleXmlTests(ServerPasswordSampleJsonTests):
+ ctype = "xml"
+
+
+class DiskConfigJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.disk_config."
+ "Disk_config")
+
+ def test_list_servers_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('list-servers-detail-get', subs, response, 200)
+
+ def test_get_server(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-put-req', {})
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-update-put-resp', subs, response, 200)
+
+ def test_resize_server(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-resize-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ # NOTE(tmello): Resize does not return response body
+ # Bug #1085213.
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server(self):
+ uuid = self._post_server()
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-req', subs)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_get_image(self):
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_list_images(self):
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('image-list-resp', subs, response, 200)
+
+
+class DiskConfigXmlTest(DiskConfigJsonTest):
+ ctype = 'xml'
+
+
+class OsNetworksJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
+
+ def setUp(self):
+ super(OsNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-tenant-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-res', subs, response, 200)
+
+ def test_create_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response, 200)
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = jsonutils.loads(response.content)
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class OsNetworksXmlTests(OsNetworksJsonTests):
+ ctype = 'xml'
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = etree.fromstring(response.content)
+ network_id = net.find('id').text
+ response = self._do_delete('os-tenant-networks/%s' % network_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".os_networks.Os_networks")
+
+ def setUp(self):
+ super(NetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_disassociate(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_post('os-networks/%s/action' % uuid,
+ 'networks-disassociate-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+ def test_network_add(self):
+ response = self._do_post("os-networks/add",
+ 'network-add-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_delete(self):
+ response = self._do_delete('os-networks/always_delete')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksXmlTests(NetworksJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedNetworksJsonTests(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "os_networks.Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_networks.Extended_networks")
+
+ def setUp(self):
+ super(ExtendedNetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+
+class ExtendedNetworksXmlTests(ExtendedNetworksJsonTests):
+ ctype = 'xml'
+
+
+class NetworksAssociateJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".networks_associate.Networks_associate")
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(network_api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+
+class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
+ ctype = 'xml'
+
+
+class FlavorDisabledSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavor_disabled."
+ "Flavor_disabled")
+
+ def test_show_flavor(self):
+ # Get api sample to show flavor_disabled attr. of a flavor.
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = self._get_regexes()
+ subs['flavor_id'] = flavor_id
+ self._verify_response('flavor-show-get-resp', subs, response, 200)
+
+ def test_detail_flavor(self):
+ # Get api sample to show details of a flavor.
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-detail-get-resp', subs, response, 200)
+
+
+class FlavorDisabledSampleXmlTests(FlavorDisabledSampleJsonTests):
+ ctype = "xml"
+
+
+class QuotaClassesSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
+ set_id = 'test_class'
+
+ def test_show_quota_classes(self):
+ # Get api sample to show quota classes.
+ response = self._do_get('os-quota-class-sets/%s' % self.set_id)
+ subs = {'set_id': self.set_id}
+ self._verify_response('quota-classes-show-get-resp', subs,
+ response, 200)
+
+ def test_update_quota_classes(self):
+ # Get api sample to update quota classes.
+ response = self._do_put('os-quota-class-sets/%s' % self.set_id,
+ 'quota-classes-update-post-req',
+ {})
+ self._verify_response('quota-classes-update-post-resp',
+ {}, response, 200)
+
+
+class QuotaClassesSampleXmlTests(QuotaClassesSampleJsonTests):
+ ctype = "xml"
+
+
+class CellsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.cells.Cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cells = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cells
+
+ def _fake_cell_get(inst, context, cell_name):
+ for cell in self.cells:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'transport_url': 'rabbit://username%s@/' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cells.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-empty-resp', subs, response, 200)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-resp', subs, response, 200)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ subs = self._get_regexes()
+ self._verify_response('cells-get-resp', subs, response, 200)
+
+
+class CellsSampleXmlTest(CellsSampleJsonTest):
+ ctype = 'xml'
+
+
+class CellsCapacitySampleJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib.cells.Cells")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "cell_capacities.Cell_capacities")
+
+ def setUp(self):
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsCapacitySampleJsonTest, self).setUp()
+ # (navneetk/kaushikc) : Mock cell capacity to avoid the capacity
+ # being calculated from the compute nodes in the environment
+ self._mock_cell_capacity()
+
+ def test_get_cell_capacity(self):
+ state_manager = state.CellStateManager()
+ my_state = state_manager.get_my_state()
+ response = self._do_get('os-cells/%s/capacities' %
+ my_state.name)
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def test_get_all_cells_capacity(self):
+ response = self._do_get('os-cells/capacities')
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def _mock_cell_capacity(self):
+ self.mox.StubOutWithMock(self.cells.manager.state_manager,
+ 'get_our_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.cells.manager.state_manager.get_our_capacities(). \
+ AndReturn(response)
+ self.mox.ReplayAll()
+
+
+class CellsCapacitySampleXmlTest(CellsCapacitySampleJsonTest):
+ ctype = 'xml'
+
+
+class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'block_device_mapping_v2_boot.'
+ 'Block_device_mapping_v2_boot')
+
+ def _get_flags(self):
+ f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # We need the volumes extension as well
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.volumes.Volumes')
+ return f
+
+ def test_servers_post_with_bdm_v2(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach',
+ fakes.stub_volume_check_attach)
+ return self._post_server()
+
+
+class BlockDeviceMappingV2BootXmlTest(BlockDeviceMappingV2BootJsonTest):
+ ctype = 'xml'
+
+
+class FloatingIPPoolsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_pools."
+ "Floating_ip_pools")
+
+ def test_list_floatingippools(self):
+ pool_list = ["pool1", "pool2"]
+
+ def fake_get_floating_ip_pools(self, context):
+ return pool_list
+
+ self.stubs.Set(network_api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+ response = self._do_get('os-floating-ip-pools')
+ subs = {
+ 'pool1': pool_list[0],
+ 'pool2': pool_list[1]
+ }
+ self._verify_response('floatingippools-list-resp', subs, response, 200)
+
+
+class FloatingIPPoolsSampleXmlTests(FloatingIPPoolsSampleJsonTests):
+ ctype = 'xml'
+
+
+class MultinicSampleJsonTest(ServersSampleBase):
+ extension_name = "nova.api.openstack.compute.contrib.multinic.Multinic"
+
+ def _disable_instance_dns_manager(self):
+ # NOTE(markmc): it looks like multinic and instance_dns_manager are
+ # incompatible. See:
+ # https://bugs.launchpad.net/nova/+bug/1213251
+ self.flags(
+ instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
+
+ def setUp(self):
+ self._disable_instance_dns_manager()
+ super(MultinicSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def _add_fixed_ip(self):
+ subs = {"networkId": 1}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-add-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_add_fixed_ip(self):
+ self._add_fixed_ip()
+
+ def test_remove_fixed_ip(self):
+ self._add_fixed_ip()
+
+ subs = {"ip": "10.0.0.4"}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-remove-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+
+class MultinicSampleXmlTest(MultinicSampleJsonTest):
+ ctype = "xml"
+
+
+class InstanceUsageAuditLogJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "instance_usage_audit_log.Instance_usage_audit_log")
+
+ def test_show_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log/%s' %
+ urllib.quote('2012-07-05 10:00:00'))
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('inst-usage-audit-log-show-get-resp',
+ subs, response, 200)
+
+ def test_index_instance_usage_audit_log(self):
+ response = self._do_get('os-instance_usage_audit_log')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('inst-usage-audit-log-index-get-resp',
+ subs, response, 200)
+
+
+class InstanceUsageAuditLogXmlTest(InstanceUsageAuditLogJsonTest):
+ ctype = "xml"
+
+
+class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavorextraspecs."
+ "Flavorextraspecs")
+
+ def _flavor_extra_specs_create(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ response = self._do_post('flavors/1/os-extra_specs',
+ 'flavor-extra-specs-create-req', subs)
+ self._verify_response('flavor-extra-specs-create-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_get(self):
+ subs = {'value1': 'value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs/key1')
+ self._verify_response('flavor-extra-specs-get-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_list(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs')
+ self._verify_response('flavor-extra-specs-list-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_create(self):
+ self._flavor_extra_specs_create()
+
+ def test_flavor_extra_specs_update(self):
+ subs = {'value1': 'new_value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_put('flavors/1/os-extra_specs/key1',
+ 'flavor-extra-specs-update-req', subs)
+ self._verify_response('flavor-extra-specs-update-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_delete(self):
+ self._flavor_extra_specs_create()
+ response = self._do_delete('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.content, '')
+
+
+class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests):
+ ctype = 'xml'
+
+
+class FpingSampleJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
+
+ def setUp(self):
+ super(FpingSampleJsonTests, self).setUp()
+
+ def fake_check_fping(self):
+ pass
+ self.stubs.Set(utils, "execute", test_fping.execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ fake_check_fping)
+
+ def test_get_fping(self):
+ self._post_server()
+ response = self._do_get('os-fping')
+ subs = self._get_regexes()
+ self._verify_response('fping-get-resp', subs, response, 200)
+
+ def test_get_fping_details(self):
+ uuid = self._post_server()
+ response = self._do_get('os-fping/%s' % (uuid))
+ subs = self._get_regexes()
+ self._verify_response('fping-get-details-resp', subs, response, 200)
+
+
+class FpingSampleXmlTests(FpingSampleJsonTests):
+ ctype = 'xml'
+
+
+class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_availability_zone"
+ ".Extended_availability_zone")
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedAvailabilityZoneXmlTests(ExtendedAvailabilityZoneJsonTests):
+ ctype = 'xml'
+
+
+class EvacuateJsonTest(ServersSampleBase):
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ def test_server_evacuate(self):
+ uuid = self._post_server()
+
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ def fake_service_is_up(self, service):
+ """Simulate validation of instance host is down."""
+ return False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ """Simulate that given host is a valid host."""
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ def fake_rebuild_instance(self, ctxt, instance, new_pass,
+ injected_files, image_ref, orig_image_ref,
+ orig_sys_metadata, bdms, recreate=False,
+ on_shared_storage=False, host=None,
+ preserve_ephemeral=False, kwargs=None):
+ return {
+ 'adminPass': new_pass
+ }
+
+ self.stubs.Set(service_group_api.API, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.stubs.Set(compute_rpcapi.ComputeAPI, 'rebuild_instance',
+ fake_rebuild_instance)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-evacuate-req', req_subs)
+ subs = self._get_regexes()
+ self._verify_response('server-evacuate-resp', subs, response, 200)
+
+
+class EvacuateXmlTest(EvacuateJsonTest):
+ ctype = 'xml'
+
+
+class EvacuateFindHostSampleJsonTest(ServersSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_evacuate_find_host.Extended_evacuate_find_host")
+
+ @mock.patch('nova.compute.manager.ComputeManager._check_instance_exists')
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock, service_get_mock,
+ check_instance_mock):
+ self.uuid = self._post_server()
+
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ check_instance_mock.return_value = False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+ service_get_mock.side_effect = fake_service_get_by_compute_host
+ with mock.patch.object(service_group_api.API, 'service_is_up',
+ return_value=False):
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-evacuate-find-host-req', req_subs)
+ subs = self._get_regexes()
+ self._verify_response('server-evacuate-find-host-resp', subs,
+ response, 200)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
+
+
+class EvacuateFindHostSampleXmlTests(EvacuateFindHostSampleJsonTest):
+ ctype = "xml"
+
+
+class FloatingIpDNSJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
+ "Floating_ip_dns")
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response, 200)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response, 200)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ self._verify_response('floating-ip-dns-list-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response, 200)
+
+
+class FloatingIpDNSXmlTest(FloatingIpDNSJsonTest):
+ ctype = 'xml'
+
+
+class InstanceActionsSampleJsonTest(ApiSampleTestBaseV2):
+ extension_name = ('nova.api.openstack.compute.contrib.instance_actions.'
+ 'Instance_actions')
+
+ def setUp(self):
+ super(InstanceActionsSampleJsonTest, self).setUp()
+ self.actions = fake_server_actions.FAKE_ACTIONS
+ self.events = fake_server_actions.FAKE_EVENTS
+ self.instance = test_utils.get_test_instance()
+
+ def fake_server_action_get_by_request_id(context, uuid, request_id):
+ return copy.deepcopy(self.actions[uuid][request_id])
+
+ def fake_server_actions_get(context, uuid):
+ return [copy.deepcopy(value) for value in
+ self.actions[uuid].itervalues()]
+
+ def fake_server_action_events_get(context, action_id):
+ return copy.deepcopy(self.events[action_id])
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return self.instance
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=True):
+ return {'uuid': instance_uuid}
+
+ self.stubs.Set(db, 'action_get_by_request_id',
+ fake_server_action_get_by_request_id)
+ self.stubs.Set(db, 'actions_get', fake_server_actions_get)
+ self.stubs.Set(db, 'action_events_get',
+ fake_server_action_events_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ def test_instance_action_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_action = self.actions[fake_uuid][fake_request_id]
+
+ response = self._do_get('servers/%s/os-instance-actions/%s' %
+ (fake_uuid, fake_request_id))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['instance_uuid'] = fake_uuid
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = fake_action['request_id']
+ subs['start_time'] = fake_action['start_time']
+ subs['result'] = '(Success)|(Error)'
+ subs['event'] = '(schedule)|(compute_create)'
+ self._verify_response('instance-action-get-resp', subs, response, 200)
+
+ def test_instance_actions_list(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}')
+ self._verify_response('instance-actions-list-resp', subs,
+ response, 200)
+
+
+class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ImageSizeSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".image_size.Image_size")
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+
+class ImageSizeSampleXmlTests(ImageSizeSampleJsonTests):
+ ctype = 'xml'
+
+
+class ConfigDriveSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.config_drive."
+ "Config_drive")
+
+ def setUp(self):
+ super(ConfigDriveSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+
+ def test_config_drive_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('server-config-drive-get-resp', subs,
+ response, 200)
+
+ def test_config_drive_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('servers-config-drive-details-resp',
+ subs, response, 200)
+
+
+class ConfigDriveSampleXmlTest(ConfigDriveSampleJsonTest):
+ ctype = 'xml'
+
+
+class FlavorAccessSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.flavor_access."
+ "Flavor_access")
+
+ def _get_flags(self):
+ f = super(FlavorAccessSampleJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # FlavorAccess extension also needs Flavormanage to be loaded.
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.flavormanage.Flavormanage')
+ return f
+
+ def _add_tenant(self):
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ 'flavor_id': 10
+ }
+ response = self._do_post('flavors/10/action',
+ 'flavor-access-add-tenant-req',
+ subs)
+ self._verify_response('flavor-access-add-tenant-resp',
+ subs, response, 200)
+
+ def _create_flavor(self):
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': 'test_flavor'
+ }
+ response = self._do_post("flavors",
+ "flavor-access-create-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-access-create-resp", subs, response, 200)
+
+ def test_flavor_access_create(self):
+ self._create_flavor()
+
+ def test_flavor_access_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-access-detail-resp', subs, response, 200)
+
+ def test_flavor_access_list(self):
+ self._create_flavor()
+ self._add_tenant()
+ flavor_id = 10
+ response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'tenant_id': 'fake_tenant',
+ }
+ self._verify_response('flavor-access-list-resp', subs, response, 200)
+
+ def test_flavor_access_show(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-access-show-resp', subs, response, 200)
+
+ def test_flavor_access_add_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+
+ def test_flavor_access_remove_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ }
+ response = self._do_post('flavors/10/action',
+ "flavor-access-remove-tenant-req",
+ subs)
+ exp_subs = {
+ "tenant_id": self.api.project_id,
+ "flavor_id": "10"
+ }
+ self._verify_response('flavor-access-remove-tenant-resp',
+ exp_subs, response, 200)
+
+
+class FlavorAccessSampleXmlTests(FlavorAccessSampleJsonTests):
+ ctype = 'xml'
+
+
+@mock.patch.object(service_group_api.API, "service_is_up", lambda _: True)
+class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
+ "Hypervisors")
+
+ def test_hypervisors_list(self):
+ response = self._do_get('os-hypervisors')
+ self._verify_response('hypervisors-list-resp', {}, response, 200)
+
+ def test_hypervisors_search(self):
+ response = self._do_get('os-hypervisors/fake/search')
+ self._verify_response('hypervisors-search-resp', {}, response, 200)
+
+ def test_hypervisors_servers(self):
+ response = self._do_get('os-hypervisors/fake/servers')
+ self._verify_response('hypervisors-servers-resp', {}, response, 200)
+
+ def test_hypervisors_show(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-resp', subs, response, 200)
+
+ def test_hypervisors_statistics(self):
+ response = self._do_get('os-hypervisors/statistics')
+ self._verify_response('hypervisors-statistics-resp', {}, response, 200)
+
+ def test_hypervisors_uptime(self):
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ self.stubs.Set(compute_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
+
+
+class HypervisorsSampleXmlTests(HypervisorsSampleJsonTests):
+ ctype = "xml"
+
+
+class ExtendedHypervisorsJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "hypervisors.Hypervisors")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_hypervisors.Extended_hypervisors")
+
+ def test_hypervisors_show_with_ip(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-with-ip-resp',
+ subs, response, 200)
+
+
+class ExtendedHypervisorsXmlTest(ExtendedHypervisorsJsonTest):
+ ctype = "xml"
+
+
+class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "hypervisors.Hypervisors")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "hypervisor_status.Hypervisor_status")
+
+ def test_hypervisors_show_with_status(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-with-status-resp',
+ subs, response, 200)
+
+
+class HypervisorStatusXmlTest(HypervisorStatusJsonTest):
+ ctype = 'xml'
+
+
+@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
+ "Hypervisors")
+
+ def setUp(self):
+ self.flags(enable=True, cell_type='api', group='cells')
+ super(HypervisorsCellsSampleJsonTests, self).setUp()
+
+ def test_hypervisor_uptime(self, mocks):
+ fake_hypervisor = {'service': {'host': 'fake-mini',
+ 'disabled': False,
+ 'disabled_reason': None},
+ 'id': 1, 'hypervisor_hostname': 'fake-mini'}
+
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ def fake_compute_node_get(self, context, hyp):
+ return fake_hypervisor
+
+ self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
+ fake_compute_node_get)
+
+ self.stubs.Set(cells_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = fake_hypervisor['id']
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {'hypervisor_id': hypervisor_id}
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
+
+
+class HypervisorsCellsSampleXmlTests(HypervisorsCellsSampleJsonTests):
+ ctype = "xml"
+
+
+class AttachInterfacesSampleJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.attach_interfaces.'
+ 'Attach_interfaces')
+
+ def setUp(self):
+ super(AttachInterfacesSampleJsonTest, self).setUp()
+
+ def fake_list_ports(self, *args, **kwargs):
+ uuid = kwargs.get('device_id', None)
+ if not uuid:
+ raise exception.InstanceNotFound(instance_id=None)
+ port_data = {
+ "id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": uuid,
+ }
+ ports = {'ports': [port_data]}
+ return ports
+
+ def fake_show_port(self, context, port_id=None):
+ if not port_id:
+ raise exception.PortNotFound(port_id=None)
+ port_data = {
+ "id": port_id,
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
+ }
+ port = {'port': port_data}
+ return port
+
+ def fake_attach_interface(self, context, instance,
+ network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ network_id = "fake_net_uuid"
+ if not port_id:
+ port_id = "fake_port_uuid"
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0] = requested_ip
+ return vif
+
+ def fake_detach_interface(self, context, instance, port_id):
+ pass
+
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['subnet_id'] = vanilla_regexes['uuid']
+ subs['net_id'] = vanilla_regexes['uuid']
+ subs['port_id'] = vanilla_regexes['uuid']
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ subs['ip_address'] = vanilla_regexes['ip']
+ return subs
+
+ def test_list_interfaces(self):
+ instance_uuid = self._post_server()
+ response = self._do_get('servers/%s/os-interface' % instance_uuid)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-list-resp', subs,
+ response, 200)
+
+ def _stub_show_for_instance(self, instance_uuid, port_id):
+ show_port = network_api.API().show_port(None, port_id)
+ show_port['port']['device_id'] = instance_uuid
+ self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
+
+ def test_show_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ self._stub_show_for_instance(instance_uuid, port_id)
+ response = self._do_get('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': port_id,
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-show-resp', subs,
+ response, 200)
+
+ def test_create_interfaces(self, instance_uuid=None):
+ if instance_uuid is None:
+ instance_uuid = self._post_server()
+ subs = {
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'ip_address': '192.168.1.3',
+ 'port_state': 'ACTIVE',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ }
+ self._stub_show_for_instance(instance_uuid, subs['port_id'])
+ response = self._do_post('servers/%s/os-interface' % instance_uuid,
+ 'attach-interfaces-create-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('attach-interfaces-create-resp', subs,
+ response, 200)
+
+ def test_delete_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ response = self._do_delete('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class AttachInterfacesSampleXmlTest(AttachInterfacesSampleJsonTest):
+ ctype = 'xml'
+
+
+class SnapshotsSampleJsonTests(ApiSampleTestBaseV2):
+ extension_name = "nova.api.openstack.compute.contrib.volumes.Volumes"
+
+ create_subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
+ }
+
+ def setUp(self):
+ super(SnapshotsSampleJsonTests, self).setUp()
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+
+ def _create_snapshot(self):
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+
+ response = self._do_post("os-snapshots",
+ "snapshot-create-req",
+ self.create_subs)
+ return response
+
+ def test_snapshots_create(self):
+ response = self._create_snapshot()
+ self.create_subs.update(self._get_regexes())
+ self._verify_response("snapshot-create-resp",
+ self.create_subs, response, 200)
+
+ def test_snapshots_delete(self):
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self._create_snapshot()
+ response = self._do_delete('os-snapshots/100')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_snapshots_detail(self):
+ response = self._do_get('os-snapshots/detail')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-detail-resp', subs, response, 200)
+
+ def test_snapshots_list(self):
+ response = self._do_get('os-snapshots')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-list-resp', subs, response, 200)
+
+ def test_snapshots_show(self):
+ response = self._do_get('os-snapshots/100')
+ subs = {
+ 'snapshot_name': 'Default name',
+ 'description': 'Default description'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('snapshots-show-resp', subs, response, 200)
+
+
+class SnapshotsSampleXmlTests(SnapshotsSampleJsonTests):
+ ctype = "xml"
+
+
+class AssistedVolumeSnapshotsJsonTest(ApiSampleTestBaseV2):
+ """Assisted volume snapshots."""
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "assisted_volume_snapshots.Assisted_volume_snapshots")
+
+ def _create_assisted_snapshot(self, subs):
+ self.stubs.Set(compute_api.API, 'volume_snapshot_create',
+ fakes.stub_compute_volume_snapshot_create)
+
+ response = self._do_post("os-assisted-volume-snapshots",
+ "snapshot-create-assisted-req",
+ subs)
+ return response
+
+ def test_snapshots_create_assisted(self):
+ subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c',
+ 'snapshot_id': '421752a6-acf6-4b2d-bc7a-119f9148cd8c',
+ 'type': 'qcow2',
+ 'new_file': 'new_file_name'
+ }
+ subs.update(self._get_regexes())
+ response = self._create_assisted_snapshot(subs)
+ self._verify_response("snapshot-create-assisted-resp",
+ subs, response, 200)
+
+ def test_snapshots_delete_assisted(self):
+ self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
+ fakes.stub_compute_volume_snapshot_delete)
+ snapshot_id = '100'
+ response = self._do_delete(
+ 'os-assisted-volume-snapshots/%s?delete_info='
+ '{"volume_id":"521752a6-acf6-4b2d-bc7a-119f9148cd8c"}'
+ % snapshot_id)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
+
+
+class AssistedVolumeSnapshotsXmlTest(AssistedVolumeSnapshotsJsonTest):
+ ctype = "xml"
+
+
+class VolumeAttachmentsSampleBase(ServersSampleBase):
+ def _stub_db_bdms_get_all_by_instance(self, server_id):
+
+ def fake_bdms_get_all_by_instance(context, instance_uuid,
+ use_slave=False):
+ bdms = [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdc'})
+ ]
+ return bdms
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+
+ def _stub_compute_api_get(self):
+
+ def fake_compute_api_get(self, context, instance_id,
+ want_objects=False, expected_attrs=None):
+ if want_objects:
+ return fake_instance.fake_instance_obj(
+ context, **{'uuid': instance_id})
+ else:
+ return {'uuid': instance_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+
+
+class VolumeAttachmentsSampleJsonTest(VolumeAttachmentsSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+
+ def test_attach_volume_to_server(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ device_name = '/dev/vdd'
+ bdm = objects.BlockDeviceMapping()
+ bdm['device_name'] = device_name
+ self.stubs.Set(compute_manager.ComputeManager,
+ "reserve_block_device_name",
+ lambda *a, **k: bdm)
+ self.stubs.Set(compute_manager.ComputeManager,
+ 'attach_volume',
+ lambda *a, **k: None)
+ self.stubs.Set(objects.BlockDeviceMapping, 'get_by_volume_id',
+ classmethod(lambda *a, **k: None))
+
+ volume = fakes.stub_volume_get(None, context.get_admin_context(),
+ 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
+ subs = {
+ 'volume_id': volume['id'],
+ 'device': device_name
+ }
+ server_id = self._post_server()
+ response = self._do_post('servers/%s/os-volume_attachments'
+ % server_id,
+ 'attach-volume-to-server-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('attach-volume-to-server-resp', subs,
+ response, 200)
+
+ def test_list_volume_attachments(self):
+ server_id = self._post_server()
+
+ self._stub_db_bdms_get_all_by_instance(server_id)
+
+ response = self._do_get('servers/%s/os-volume_attachments'
+ % server_id)
+ subs = self._get_regexes()
+ self._verify_response('list-volume-attachments-resp', subs,
+ response, 200)
+
+ def test_volume_attachment_detail(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ response = self._do_get('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id))
+ subs = self._get_regexes()
+ self._verify_response('volume-attachment-detail-resp', subs,
+ response, 200)
+
+ def test_volume_attachment_delete(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
+ response = self._do_delete('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumeAttachmentsSampleXmlTest(VolumeAttachmentsSampleJsonTest):
+ ctype = 'xml'
+
+
+class VolumeAttachUpdateSampleJsonTest(VolumeAttachmentsSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "volume_attachment_update.Volume_attachment_update")
+
+ def test_volume_attachment_update(self):
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ subs = {
+ 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f805',
+ 'device': '/dev/sdd'
+ }
+ server_id = self._post_server()
+ attach_id = 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+ self._stub_db_bdms_get_all_by_instance(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'swap_volume', lambda *a, **k: None)
+ response = self._do_put('servers/%s/os-volume_attachments/%s'
+ % (server_id, attach_id),
+ 'update-volume-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumeAttachUpdateSampleXmlTest(VolumeAttachUpdateSampleJsonTest):
+ ctype = 'xml'
+
+
+class VolumesSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib.volumes.Volumes")
+
+ def _get_volume_id(self):
+ return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+
+ def _stub_volume(self, id, displayname="Volume Name",
+ displaydesc="Volume Description", size=100):
+ volume = {
+ 'id': id,
+ 'size': size,
+ 'availability_zone': 'zone1:host1',
+ 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
+ 'mountpoint': '/',
+ 'status': 'in-use',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': displayname,
+ 'display_description': displaydesc,
+ 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'Backup'}
+ }
+ return volume
+
+ def _stub_volume_get(self, context, volume_id):
+ return self._stub_volume(volume_id)
+
+ def _stub_volume_delete(self, context, *args, **param):
+ pass
+
+ def _stub_volume_get_all(self, context, search_opts=None):
+ id = self._get_volume_id()
+ return [self._stub_volume(id)]
+
+ def _stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ id = self._get_volume_id()
+ return self._stub_volume(id)
+
+ def setUp(self):
+ super(VolumesSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", self._stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
+
+ def _post_volume(self):
+ subs_req = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+
+ self.stubs.Set(cinder.API, "create", self._stub_volume_create)
+ response = self._do_post('os-volumes', 'os-volumes-post-req',
+ subs_req)
+ subs = self._get_regexes()
+ subs.update(subs_req)
+ self._verify_response('os-volumes-post-resp', subs, response, 200)
+
+ def test_volumes_show(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ vol_id = self._get_volume_id()
+ response = self._do_get('os-volumes/%s' % vol_id)
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-get-resp', subs, response, 200)
+
+ def test_volumes_index(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-index-resp', subs, response, 200)
+
+ def test_volumes_detail(self):
+ # For now, index and detail are the same.
+ # See the volumes api
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-detail-resp', subs, response, 200)
+
+ def test_volumes_create(self):
+ self._post_volume()
+
+ def test_volumes_delete(self):
+ self._post_volume()
+ vol_id = self._get_volume_id()
+ response = self._do_delete('os-volumes/%s' % vol_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+
+class VolumesSampleXmlTest(VolumesSampleJsonTest):
+ ctype = 'xml'
+
+
+class MigrationsSamplesJsonTest(ApiSampleTestBaseV2):
+ extension_name = ("nova.api.openstack.compute.contrib.migrations."
+ "Migrations")
+
+ def _stub_migrations(self, context, filters):
+ fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+ ]
+ return fake_migrations
+
+ def setUp(self):
+ super(MigrationsSamplesJsonTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_migrations',
+ self._stub_migrations)
+
+ def test_get_migrations(self):
+ response = self._do_get('os-migrations')
+ subs = self._get_regexes()
+
+ self.assertEqual(response.status_code, 200)
+ self._verify_response('migrations-get', subs, response, 200)
+
+
+class MigrationsSamplesXmlTest(MigrationsSamplesJsonTest):
+ ctype = 'xml'
+
+
+class PreserveEphemeralOnRebuildJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'preserve_ephemeral_rebuild.'
+ 'Preserve_ephemeral_rebuild')
+
+ def _test_server_action(self, uuid, action,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-%s' % action.lower(),
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server_preserve_ephemeral_false(self):
+ uuid = self._post_server()
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': 'false'}
+ self._test_server_action(uuid, 'rebuild', subs,
+ 'server-action-rebuild-resp')
+
+ def test_rebuild_server_preserve_ephemeral_true(self):
+ image = self.api.get_images()[0]['id']
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'new-server-test',
+ 'pass': 'seekr3t',
+ 'ip': '1.2.3.4',
+ 'ip6': 'fe80::100',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': 'true'}
+
+ def fake_rebuild(self_, context, instance, image_href, admin_password,
+ **kwargs):
+ self.assertTrue(kwargs['preserve_ephemeral'])
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ instance_uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % instance_uuid,
+ 'server-action-rebuild', subs)
+ self.assertEqual(response.status_code, 202)
+
+
+class PreserveEphemeralOnRebuildXmlTest(PreserveEphemeralOnRebuildJsonTest):
+ ctype = 'xml'
+
+
+class ServerExternalEventsJsonTest(ServersSampleBase):
+ extension_name = ('nova.api.openstack.compute.contrib.'
+ 'server_external_events.Server_external_events')
+
+ def test_create_event(self):
+ instance_uuid = self._post_server()
+ subs = {
+ 'uuid': instance_uuid,
+ 'name': 'network-changed',
+ 'status': 'completed',
+ 'tag': 'foo',
+ }
+ response = self._do_post('os-server-external-events',
+ 'event-create-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('event-create-resp', subs, response, 200)
+
+
+class ServerExternalEventsXmlTest(ServerExternalEventsJsonTest):
+ ctype = 'xml'
+
+
+class ServerGroupsSampleJsonTest(ServersSampleBase):
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".server_groups.Server_groups")
+
+ def _get_create_subs(self):
+ return {'name': 'test'}
+
+ def _post_server_group(self):
+ """Verify the response status code and returns the UUID of the
+ newly created server group.
+ """
+ subs = self._get_create_subs()
+ response = self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+ subs = self._get_regexes()
+ subs['name'] = 'test'
+ return self._verify_response('server-groups-post-resp',
+ subs, response, 200)
+
+ def _create_server_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+
+ def test_server_groups_post(self):
+ return self._post_server_group()
+
+ def test_server_groups_list(self):
+ subs = self._get_create_subs()
+ uuid = self._post_server_group()
+ response = self._do_get('os-server-groups')
+ subs.update(self._get_regexes())
+ subs['id'] = uuid
+ self._verify_response('server-groups-list-resp',
+ subs, response, 200)
+
+ def test_server_groups_get(self):
+ # Get api sample of server groups get request.
+ subs = {'name': 'test'}
+ uuid = self._post_server_group()
+ subs['id'] = uuid
+ response = self._do_get('os-server-groups/%s' % uuid)
+
+ self._verify_response('server-groups-get-resp', subs, response, 200)
+
+ def test_server_groups_delete(self):
+ uuid = self._post_server_group()
+ response = self._do_delete('os-server-groups/%s' % uuid)
+ self.assertEqual(response.status_code, 204)
+
+
+class ServerGroupsSampleXmlTest(ServerGroupsSampleJsonTest):
+ ctype = 'xml'
+
+
+class ServerGroupQuotas_LimitsSampleJsonTest(LimitsSampleJsonTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+
+
+class ServerGroupQuotas_LimitsSampleXmlTest(LimitsSampleXmlTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+
+
+class ServerGroupQuotas_UsedLimitsSamplesJsonTest(UsedLimitsSamplesJsonTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+
+class ServerGroupQuotas_UsedLimitsSamplesXmlTest(UsedLimitsSamplesXmlTest):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.used_limits."
+ "Used_limits")
+
+
+class ServerGroupQuotas_QuotasSampleJsonTests(QuotasSampleJsonTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+
+class ServerGroupQuotas_QuotasSampleXmlTests(QuotasSampleXmlTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
+
+
+class ServerGroupQuotasQuota_ClassesSampleJsonTests(
+ QuotaClassesSampleJsonTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
+
+
+class ServerGroupQuotas_QuotaClassesSampleXmlTests(
+ QuotaClassesSampleXmlTests):
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "server_group_quotas.Server_group_quotas")
+ extends_name = ("nova.api.openstack.compute.contrib.quota_classes."
+ "Quota_classes")
diff --git a/nova/tests/unit/integrated/test_extensions.py b/nova/tests/unit/integrated/test_extensions.py
new file mode 100644
index 0000000000..927b51b453
--- /dev/null
+++ b/nova/tests/unit/integrated/test_extensions.py
@@ -0,0 +1,42 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+# Import extensions to pull in osapi_compute_extension CONF option used below.
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated import integrated_helpers
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class ExtensionsTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+
+ def _get_flags(self):
+ f = super(ExtensionsTest, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ f['osapi_compute_extension'].append(
+ 'nova.tests.unit.api.openstack.compute.extensions.'
+ 'foxinsocks.Foxinsocks')
+ return f
+
+ def test_get_foxnsocks(self):
+ # Simple check that fox-n-socks works.
+ response = self.api.api_request('/foxnsocks')
+ foxnsocks = response.content
+ LOG.debug("foxnsocks: %s" % foxnsocks)
+ self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
diff --git a/nova/tests/unit/integrated/test_login.py b/nova/tests/unit/integrated/test_login.py
new file mode 100644
index 0000000000..851282000d
--- /dev/null
+++ b/nova/tests/unit/integrated/test_login.py
@@ -0,0 +1,36 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated.api import client
+from nova.tests.unit.integrated import integrated_helpers
+
+
+LOG = logging.getLogger(__name__)
+
+
+class LoginTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+
+ def test_login(self):
+ # Simple check - we list flavors - so we know we're logged in.
+ flavors = self.api.get_flavors()
+ for flavor in flavors:
+ LOG.debug("flavor: %s", flavor)
+
+
+class LoginTestV3(client.TestOpenStackClientV3Mixin, LoginTest):
+ _api_version = 'v3'
diff --git a/nova/tests/unit/integrated/test_servers.py b/nova/tests/unit/integrated/test_servers.py
new file mode 100644
index 0000000000..97f80d7813
--- /dev/null
+++ b/nova/tests/unit/integrated/test_servers.py
@@ -0,0 +1,522 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import time
+import zlib
+
+from oslo.utils import timeutils
+
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit import fake_network
+from nova.tests.unit.integrated.api import client
+from nova.tests.unit.integrated import integrated_helpers
+import nova.virt.fake
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ServersTest(integrated_helpers._IntegratedTestBase):
+ _api_version = 'v2'
+ _force_delete_parameter = 'forceDelete'
+ _image_ref_parameter = 'imageRef'
+ _flavor_ref_parameter = 'flavorRef'
+ _access_ipv4_parameter = 'accessIPv4'
+ _access_ipv6_parameter = 'accessIPv6'
+ _return_resv_id_parameter = 'return_reservation_id'
+ _min_count_parameter = 'min_count'
+
+ def setUp(self):
+ super(ServersTest, self).setUp()
+ self.conductor = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+
+ def _wait_for_state_change(self, server, from_status):
+ for i in xrange(0, 50):
+ server = self.api.get_server(server['id'])
+ if server['status'] != from_status:
+ break
+ time.sleep(.1)
+
+ return server
+
+ def _restart_compute_service(self, *args, **kwargs):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ self.compute = self.start_service('compute', *args, **kwargs)
+
+ def test_get_servers(self):
+ # Simple check that listing servers works.
+ servers = self.api.get_servers()
+ for server in servers:
+ LOG.debug("server: %s" % server)
+
+ def test_create_server_with_error(self):
+ # Create a server which will enter error state.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ def throw_error(*args, **kwargs):
+ raise exception.BuildAbortException(reason='',
+ instance_uuid='fake')
+
+ self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)
+
+ server = self._build_minimal_create_server_request()
+ created_server = self.api.post_server({"server": server})
+ created_server_id = created_server['id']
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+
+ self.assertEqual('ERROR', found_server['status'])
+ self._delete_server(created_server_id)
+
+ def test_create_and_delete_server(self):
+ # Creates and deletes a server.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ # Build the server data gradually, checking errors along the way
+ server = {}
+ good_server = self._build_minimal_create_server_request()
+
+ post = {'server': server}
+
+ # Without an imageRef, this throws 500.
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # With an invalid imageRef, this throws 500.
+ server[self._image_ref_parameter] = self.get_invalid_image()
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Add a valid imageRef
+ server[self._image_ref_parameter] = good_server.get(
+ self._image_ref_parameter)
+
+ # Without flavorRef, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ server[self._flavor_ref_parameter] = good_server.get(
+ self._flavor_ref_parameter)
+
+ # Without a name, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid server name
+ server['name'] = good_server['name']
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ # It should also be in the all-servers list
+ servers = self.api.get_servers()
+ server_ids = [s['id'] for s in servers]
+ self.assertIn(created_server_id, server_ids)
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ # It should be available...
+ # TODO(justinsb): Mock doesn't yet do this...
+ self.assertEqual('ACTIVE', found_server['status'])
+ servers = self.api.get_servers(detail=True)
+ for server in servers:
+ self.assertIn("image", server)
+ self.assertIn("flavor", server)
+
+ self._delete_server(created_server_id)
+
+ def _force_reclaim(self):
+ # Make sure that compute manager thinks the instance is
+ # old enough to be expired
+ the_past = timeutils.utcnow() + datetime.timedelta(hours=1)
+ timeutils.set_time_override(override_time=the_past)
+ ctxt = context.get_admin_context()
+ self.compute._reclaim_queued_deletes(ctxt)
+
+ def test_deferred_delete(self):
+ # Creates, deletes and waits for server to be reclaimed.
+ self.flags(reclaim_instance_interval=1)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot restore unless instance is deleted
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, created_server_id,
+ {'restore': {}})
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ self._force_reclaim()
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def test_deferred_delete_restore(self):
+ # Creates, deletes and restores a server.
+ self.flags(reclaim_instance_interval=3600)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ # Restore server
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Wait for server to become active again
+ found_server = self._wait_for_state_change(found_server, 'DELETED')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ def test_deferred_delete_force(self):
+ # Creates, deletes and force deletes a server.
+ self.flags(reclaim_instance_interval=3600)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('SOFT_DELETED', found_server['status'])
+
+ # Force delete server
+ self.api.post_server_action(created_server_id,
+ {self._force_delete_parameter: {}})
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def _wait_for_deletion(self, server_id):
+ # Wait (briefly) for deletion
+ for _retries in range(50):
+ try:
+ found_server = self.api.get_server(server_id)
+ except client.OpenStackApiNotFoundException:
+ found_server = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found_server=%s" % found_server)
+
+ # TODO(justinsb): Mock doesn't yet do accurate state changes
+ # if found_server['status'] != 'deleting':
+ # break
+ time.sleep(.1)
+
+ # Should be gone
+ self.assertFalse(found_server)
+
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+ self._wait_for_deletion(server_id)
+
+ def test_create_server_with_metadata(self):
+ # Creates a server with metadata.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Build the server data gradually, checking errors along the way
+ server = self._build_minimal_create_server_request()
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server['metadata'] = metadata
+
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers details list
+ servers = self.api.get_servers(detail=True)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Details do include metadata
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers summary list
+ servers = self.api.get_servers(detail=False)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Summary should not include metadata
+ self.assertFalse(found_server.get('metadata'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def test_create_and_rebuild_server(self):
+ # Rebuild a server with metadata.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # create a server with initially has no metadata
+ server = self._build_minimal_create_server_request()
+ server_post = {'server': server}
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server_post['server']['metadata'] = metadata
+
+ created_server = self.api.post_server(server_post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # rebuild the server with metadata and other server attributes
+ post = {}
+ post['rebuild'] = {
+ self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "name": "blah",
+ self._access_ipv4_parameter: "172.19.0.2",
+ self._access_ipv6_parameter: "fe80::2",
+ "metadata": {'some': 'thing'},
+ }
+ post['rebuild'].update(self._get_access_ips_params())
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual(post['rebuild'][self._image_ref_parameter],
+ found_server.get('image')['id'])
+ self._verify_access_ips(found_server)
+
+ # rebuild the server with empty metadata and nothing else
+ post = {}
+ post['rebuild'] = {
+ self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "metadata": {},
+ }
+
+ self.api.post_server_action(created_server_id, post)
+ LOG.debug("rebuilt server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual({}, found_server.get('metadata'))
+ self.assertEqual('blah', found_server.get('name'))
+ self.assertEqual(post['rebuild'][self._image_ref_parameter],
+ found_server.get('image')['id'])
+ self._verify_access_ips(found_server)
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+ def _get_access_ips_params(self):
+ return {self._access_ipv4_parameter: "172.19.0.2",
+ self._access_ipv6_parameter: "fe80::2"}
+
+ def _verify_access_ips(self, server):
+ self.assertEqual('172.19.0.2',
+ server[self._access_ipv4_parameter])
+ self.assertEqual('fe80::2', server[self._access_ipv6_parameter])
+
+ def test_rename_server(self):
+ # Test building and renaming a server.
+ fake_network.set_stub_network_methods(self.stubs)
+
+ # Create a server
+ server = self._build_minimal_create_server_request()
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ server_id = created_server['id']
+ self.assertTrue(server_id)
+
+ # Rename the server to 'new-name'
+ self.api.put_server(server_id, {'server': {'name': 'new-name'}})
+
+ # Check the name of the server
+ created_server = self.api.get_server(server_id)
+ self.assertEqual(created_server['name'], 'new-name')
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ def test_create_multiple_servers(self):
+ # Creates multiple servers and checks for reservation_id.
+
+ # Create 2 servers, setting 'return_reservation_id, which should
+ # return a reservation_id
+ server = self._build_minimal_create_server_request()
+ server[self._min_count_parameter] = 2
+ server[self._return_resv_id_parameter] = True
+ post = {'server': server}
+ response = self.api.post_server(post)
+ self.assertIn('reservation_id', response)
+ reservation_id = response['reservation_id']
+ self.assertNotIn(reservation_id, ['', None])
+
+ # Create 1 more server, which should not return a reservation_id
+ server = self._build_minimal_create_server_request()
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # lookup servers created by the first request.
+ servers = self.api.get_servers(detail=True,
+ search_opts={'reservation_id': reservation_id})
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ # The server from the 2nd request should not be there.
+ self.assertIsNone(found_server)
+ # Should have found 2 servers.
+ self.assertEqual(len(server_map), 2)
+
+ # Cleanup
+ self._delete_server(created_server_id)
+ for server_id in server_map.iterkeys():
+ self._delete_server(server_id)
+
+ def test_create_server_with_injected_files(self):
+ # Creates a server with injected_files.
+ fake_network.set_stub_network_methods(self.stubs)
+ personality = []
+
+ # Inject a text file
+ data = 'Hello, World!'
+ personality.append({
+ 'path': '/helloworld.txt',
+ 'contents': data.encode('base64'),
+ })
+
+ # Inject a binary file
+ data = zlib.compress('Hello, World!')
+ personality.append({
+ 'path': '/helloworld.zip',
+ 'contents': data.encode('base64'),
+ })
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+ server['personality'] = personality
+
+ post = {'server': server}
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cleanup
+ self._delete_server(created_server_id)
+
+
+class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
+ _force_delete_parameter = 'forceDelete'
+ _api_version = 'v3'
+ _image_ref_parameter = 'imageRef'
+ _flavor_ref_parameter = 'flavorRef'
+ _access_ipv4_parameter = None
+ _access_ipv6_parameter = None
+
+ def _get_access_ips_params(self):
+ return {}
+
+ def _verify_access_ips(self, server):
+ # NOTE(alexxu): access_ips was demoted as extensions in v3 api.
+ # So skips verifying access_ips
+ pass
diff --git a/nova/tests/unit/integrated/test_xml.py b/nova/tests/unit/integrated/test_xml.py
new file mode 100644
index 0000000000..822a1db88b
--- /dev/null
+++ b/nova/tests/unit/integrated/test_xml.py
@@ -0,0 +1,51 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+
+from nova.api.openstack import common
+from nova.api.openstack import xmlutil
+from nova.openstack.common import log as logging
+from nova.tests.unit.integrated import integrated_helpers
+
+
+LOG = logging.getLogger(__name__)
+
+
+class XmlTests(integrated_helpers._IntegratedTestBase):
+ """"Some basic XML sanity checks."""
+
+ _api_version = 'v2'
+
+ def test_namespace_limits(self):
+ headers = {}
+ headers['Accept'] = 'application/xml'
+
+ response = self.api.api_request('/limits', headers=headers)
+ data = response.content
+ LOG.debug("data: %s" % data)
+ root = etree.XML(data)
+ self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
+
+ def test_namespace_servers(self):
+ # /servers should have v1.1 namespace (has changed in 1.1).
+ headers = {}
+ headers['Accept'] = 'application/xml'
+
+ response = self.api.api_request('/servers', headers=headers)
+ data = response.content
+ LOG.debug("data: %s" % data)
+ root = etree.XML(data)
+ self.assertEqual(root.nsmap.get(None), common.XML_NS_V11)
diff --git a/nova/tests/integrated/v3/__init__.py b/nova/tests/unit/integrated/v3/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/v3/__init__.py
+++ b/nova/tests/unit/integrated/v3/__init__.py
diff --git a/nova/tests/unit/integrated/v3/api_sample_base.py b/nova/tests/unit/integrated/v3/api_sample_base.py
new file mode 100644
index 0000000000..dad804328d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/api_sample_base.py
@@ -0,0 +1,79 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from oslo.config import cfg
+
+from nova.api.openstack import API_V3_CORE_EXTENSIONS # noqa
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_utils
+from nova.tests.unit.integrated import api_samples_test_base
+
+CONF = cfg.CONF
+
+
+class ApiSampleTestBaseV3(api_samples_test_base.ApiSampleTestBase):
+ _api_version = 'v3'
+ sample_dir = None
+ extra_extensions_to_load = None
+
+ def setUp(self):
+ self.flags(use_ipv6=False,
+ osapi_compute_link_prefix=self._get_host(),
+ osapi_glance_link_prefix=self._get_glance_host())
+ if not self.all_extensions:
+ # Set the whitelist to ensure only the extensions we are
+ # interested in are loaded so the api samples don't include
+ # data from extensions we are not interested in
+ whitelist = API_V3_CORE_EXTENSIONS.copy()
+ if self.extension_name:
+ whitelist.add(self.extension_name)
+ if self.extra_extensions_to_load:
+ whitelist.update(set(self.extra_extensions_to_load))
+
+ CONF.set_override('extensions_whitelist', whitelist,
+ 'osapi_v3')
+
+ super(ApiSampleTestBaseV3, self).setUp()
+ self.useFixture(test.SampleNetworks(host=self.network.host))
+ fake_network.stub_compute_with_ips(self.stubs)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+ self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
+
+ @classmethod
+ def _get_sample_path(cls, name, dirname, suffix=''):
+ parts = [dirname]
+ parts.append('api_samples')
+ if cls.all_extensions:
+ parts.append('all_extensions')
+ elif cls.sample_dir:
+ parts.append(cls.sample_dir)
+ elif cls.extension_name:
+ parts.append(cls.extension_name)
+ parts.append(name + "." + cls.ctype + suffix)
+ return os.path.join(*parts)
+
+ @classmethod
+ def _get_sample(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ dirname = os.path.normpath(os.path.join(dirname,
+ "../../../../../doc/v3"))
+ return cls._get_sample_path(name, dirname)
+
+ @classmethod
+ def _get_template(cls, name):
+ dirname = os.path.dirname(os.path.abspath(__file__))
+ return cls._get_sample_path(name, dirname, suffix='.tpl')
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
index add1a44c32..add1a44c32 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
index 6f1d0b498e..6f1d0b498e 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
index 1e9edd0592..1e9edd0592 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl
index 10a98858bf..10a98858bf 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/all_extensions/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl
index a2a6de6ed4..a2a6de6ed4 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl
index 9d908ad123..9d908ad123 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/consoles-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl
index 75d286fb03..75d286fb03 100644
--- a/nova/tests/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl
index 8ddbe20ac7..8ddbe20ac7 100644
--- a/nova/tests/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/extension-info/extensions-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl
index 94f5439e04..94f5439e04 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl
index d797155795..d797155795 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-add-tenant-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl
index 02ac4e695d..02ac4e695d 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl
index bd01300043..bd01300043 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl
index 5d593b4d62..5d593b4d62 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl
index a6b6dbdcda..a6b6dbdcda 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl
index 20711e02b4..20711e02b4 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl
index 5cab03334d..5cab03334d 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-remove-tenant-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl
index 255b122b7a..255b122b7a 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-access/flavor-access-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
index dd858e76c5..dd858e76c5 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
index adfa77008f..adfa77008f 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-extra-specs/flavor-extra-specs-update-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl
index 5383e5d15e..5383e5d15e 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl
index 3f4690b5bf..3f4690b5bf 100644
--- a/nova/tests/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavor-manage/flavor-create-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl
index 5f8a90b5f6..5f8a90b5f6 100644
--- a/nova/tests/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavor-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl
index 5d593b4d62..5d593b4d62 100644
--- a/nova/tests/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl
index fed9966909..fed9966909 100644
--- a/nova/tests/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/flavors/flavors-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/image-size/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl
index 9a5ebfbc11..9a5ebfbc11 100644
--- a/nova/tests/integrated/v3/api_samples/image-size/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/image-size/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl
index 2eba334009..2eba334009 100644
--- a/nova/tests/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/image-size/images-details-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl
index 57ae88548d..57ae88548d 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-meta-key-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl
index 6d022eb97d..6d022eb97d 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-meta-key-get.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-get.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl
index 01528f1ce6..01528f1ce6 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl
index 3db563ec14..3db563ec14 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-meta-key-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl
index 588f688d5a..588f688d5a 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl
index b51e5f00fc..b51e5f00fc 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl
index 9479bb3395..9479bb3395 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl
index eec6152d77..eec6152d77 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl
index c8c5ee9c4a..c8c5ee9c4a 100644
--- a/nova/tests/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/image-metadata-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/images-details-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl
index df8ecad0b8..df8ecad0b8 100644
--- a/nova/tests/integrated/v3/api_samples/images/images-details-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/images-details-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/images/images-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl
index 32ebd60cfa..32ebd60cfa 100644
--- a/nova/tests/integrated/v3/api_samples/images/images-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/images/images-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl
index 4fde60f14b..4fde60f14b 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl
index 2301fa05b2..2301fa05b2 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl
index ca7192d5dc..ca7192d5dc 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-import-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl
index 29ba63c00b..29ba63c00b 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl
index 68e2f03487..68e2f03487 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl
index aace6f5ccc..aace6f5ccc 100644
--- a/nova/tests/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/keypairs/keypairs-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
index e9b7921f30..e9b7921f30 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
index 603363b409..603363b409 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
index efe7801174..efe7801174 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
index 780f764cf5..780f764cf5 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
index fb0c23b504..fb0c23b504 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl
index d38d967042..d38d967042 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
index b3e8c665e8..b3e8c665e8 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
index 041f1a1056..041f1a1056 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl
index 8797266b68..8797266b68 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-access-ips/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
index 62e16737b0..62e16737b0 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-inject-network-info.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
index 7c79cb68a5..7c79cb68a5 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-network.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
index 013aed4824..013aed4824 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-server-state.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
index 72d9478678..72d9478678 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/admin-actions-reset-state.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
index da615718fe..da615718fe 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl
index 6dbd2f17cb..6dbd2f17cb 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
index 24ddede90b..24ddede90b 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl
index d447350e0d..d447350e0d 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
index 2919d21388..2919d21388 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
index 92e14e1dc5..92e14e1dc5 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
index 97395bf2f2..97395bf2f2 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-add-host-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
index 63a2921cac..63a2921cac 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-metadata-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl
index fc806061e8..fc806061e8 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl
index 935643d03c..935643d03c 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
index 4663e52931..4663e52931 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-remove-host-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
index 55e4b09346..55e4b09346 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
index 2e229a473a..2e229a473a 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregate-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
index e5775c206d..e5775c206d 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-add-host-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl
index b91781fae2..b91781fae2 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
index 642653d1e6..642653d1e6 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
index b15c40fa5d..b15c40fa5d 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-metadata-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
index b91781fae2..b91781fae2 100644
--- a/nova/tests/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-aggregates/aggregates-remove-host-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
index 11dcf64373..11dcf64373 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
index 9dff234366..9dff234366 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
index 192f9a6487..192f9a6487 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
index 9dff234366..9dff234366 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl
index d1b610e944..d1b610e944 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl
index 8190c5492f..8190c5492f 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/availability-zone-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
index dcc1142f47..dcc1142f47 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl
index 5e067dd3aa..5e067dd3aa 100644
--- a/nova/tests/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-capacities-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl
index 62eb8ec31d..62eb8ec31d 100644
--- a/nova/tests/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl
index 5325a4e855..5325a4e855 100644
--- a/nova/tests/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-empty-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl
index 97ea4c6dd3..97ea4c6dd3 100644
--- a/nova/tests/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cells/cells-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl
index 35c063c820..35c063c820 100644
--- a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl
index 4938e92fba..4938e92fba 100644
--- a/nova/tests/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-certificates/certificate-get-root-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
index c8fc75995a..c8fc75995a 100644
--- a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
index 6aa2ff60e2..6aa2ff60e2 100644
--- a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
index 698008802e..698008802e 100644
--- a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
index 0ab9141aea..0ab9141aea 100644
--- a/nova/tests/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-cloudpipe/cloud-pipe-update-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
index a9e9bc6564..a9e9bc6564 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
index 21ed41cf7d..21ed41cf7d 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
index f5be11801e..f5be11801e 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-console-connect-info-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
index 00956b90e4..00956b90e4 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/get-rdp-console-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl
index caeb2a5502..caeb2a5502 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl
index 27ffe7d4c2..27ffe7d4c2 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/console-output-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl
index 60f5e1d9fe..60f5e1d9fe 100644
--- a/nova/tests/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/create-backup-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
index 27557a3e9f..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
index d3562d390d..d3562d390d 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/force-delete-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl
index d38291fe08..d38291fe08 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/restore-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl
index b79c7c857e..b79c7c857e 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl
index f74aeb7c85..f74aeb7c85 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/image-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl
index 8a08b3e011..8a08b3e011 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/list-servers-detail-get.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl
index 3d98b99bcb..3d98b99bcb 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl
index ebb5f3d8a0..ebb5f3d8a0 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl
index f60c4af52f..f60c4af52f 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl
index c012c48318..c012c48318 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl
index 91aa3432ea..91aa3432ea 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl
index a290485e1c..a290485e1c 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-resize-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl
index 4ac22820df..4ac22820df 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl
index 26cf74e80a..26cf74e80a 100644
--- a/nova/tests/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-disk-config/server-update-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
index 5e2c2e6ef0..5e2c2e6ef0 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
index 0da07da5b8..0da07da5b8 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl
index 179cddce73..179cddce73 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl
index 0da07da5b8..0da07da5b8 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-evacuate-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
index d6436738ef..d6436738ef 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
index c81acaf935..c81acaf935 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
index a4c68a3e8b..a4c68a3e8b 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
index 8fc7dbcc4e..8fc7dbcc4e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
index a0b73e41d2..a0b73e41d2 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
index b8e17cd8cf..b8e17cd8cf 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl
index 683f67c98b..683f67c98b 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/attach-volume-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl
index c56f3c3b83..c56f3c3b83 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/detach-volume-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
index 1a28dd80ef..1a28dd80ef 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
index bb8a9bb9cb..bb8a9bb9cb 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl
index 07a3268421..07a3268421 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-extended-volumes/swap-volume-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
index 85ae4890ad..85ae4890ad 100644
--- a/nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedip-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
index a3d11475bf..a3d11475bf 100644
--- a/nova/tests/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fixed-ips/fixedips-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
index 75d8f5aacd..75d8f5aacd 100644
--- a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
index 1f7ea26cb0..1f7ea26cb0 100644
--- a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
index 70d0a57de8..70d0a57de8 100644
--- a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
index d3b4c231b2..d3b4c231b2 100644
--- a/nova/tests/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-flavor-rxtx/flavor-rxtx-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
index 000c5e1231..000c5e1231 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
index 3ec0743ba7..3ec0743ba7 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-entry-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
index f6685f5d3f..f6685f5d3f 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
index a14d395d23..a14d395d23 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-create-or-update-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
index 8edd0603f7..8edd0603f7 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
index 831cda7b55..831cda7b55 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-entry-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
index a6055cfecc..a6055cfecc 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-dns/floating-ip-dns-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
index 607109d70d..607109d70d 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ip-pools/floatingippools-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
index 2f16cf07cb..2f16cf07cb 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
index ef1cbfb17f..ef1cbfb17f 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
index d630d669cd..d630d669cd 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
index 166984b24a..166984b24a 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-delete-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
index 0eaaf75ae0..0eaaf75ae0 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-by-host-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
index de1e622bb1..de1e622bb1 100644
--- a/nova/tests/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-floating-ips-bulk/floating-ips-bulk-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl
index f3b222c399..f3b222c399 100644
--- a/nova/tests/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl
index b33e80668b..b33e80668b 100644
--- a/nova/tests/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/fping-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl
index 3271a58a7d..3271a58a7d 100644
--- a/nova/tests/integrated/v3/api_samples/os-fping/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-fping/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
index 3a69fcd321..3a69fcd321 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
index 353d29f480..353d29f480 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
index 8797266b68..8797266b68 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hide-server-addresses/servers-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl
index 4ed89a182d..4ed89a182d 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-reboot.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl
index efb234b436..efb234b436 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl
index c0df4481a2..c0df4481a2 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-shutdown.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl
index 90f5ac7bcb..90f5ac7bcb 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-get-startup.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl
index c1da8f4f9d..c1da8f4f9d 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
index 92f73892b3..92f73892b3 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/host-put-maintenance-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl
index 846988bd80..846988bd80 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-compute-service-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl
index cd5bfdf999..cd5bfdf999 100644
--- a/nova/tests/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hosts/hosts-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
index 2777eb4887..2777eb4887 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
index 710cdfcf9c..710cdfcf9c 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
index 375627499d..375627499d 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
index 857a1b2166..857a1b2166 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
index f125da01af..f125da01af 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
index 2cfb51e703..2cfb51e703 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-statistics-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
index e2f6d2e47e..e2f6d2e47e 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
index 7cd5325239..7cd5325239 100644
--- a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-action-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
index 0fdc33916a..0fdc33916a 100644
--- a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-actions-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
index f259deefdb..f259deefdb 100644
--- a/nova/tests/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl
index a1863f2f39..a1863f2f39 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/lock-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
index 27557a3e9f..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl
index 9e905ca2b9..9e905ca2b9 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-lock-server/unlock-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl
index 4800d4aa11..4800d4aa11 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/live-migrate-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl
index a9bf8c483e..a9bf8c483e 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/migrate-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl
index 91775be775..91775be775 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-migrations/migrations-get.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl
index b9744ab2c7..b9744ab2c7 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-add-fixed-ip-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl
index 7367e1242c..7367e1242c 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/multinic-remove-fixed-ip-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
index 19ede54ec2..19ede54ec2 100644
--- a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-no-resv-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
index e2f949f09d..e2f949f09d 100644
--- a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
index 22d2880feb..22d2880feb 100644
--- a/nova/tests/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-multiple-create/multiple-create-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl
index 762e881751..762e881751 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-associate-host-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
index 46f69b3e81..46f69b3e81 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-host-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
index 63b6eb6839..63b6eb6839 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-project-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl
index 2e09d15a60..2e09d15a60 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks-associate/network-disassociate-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/network-add-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl
index aca6770b3b..aca6770b3b 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/network-add-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-add-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/network-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl
index 18515bd6c4..18515bd6c4 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/network-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl
index 5cf155b13f..5cf155b13f 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl
index ac75fe7fb1..ac75fe7fb1 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/network-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl
index 2e09d15a60..2e09d15a60 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-disassociate-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl
index ccdd586a0f..ccdd586a0f 100644
--- a/nova/tests/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-networks/networks-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl
index 2e7c1fad30..2e7c1fad30 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/pause-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
index 27557a3e9f..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl
index ce5024f0c9..ce5024f0c9 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pause-server/unpause-server.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
index f2bf2bc02c..f2bf2bc02c 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
index 3c0fc0abcd..3c0fc0abcd 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl
index 61cb17c6b4..61cb17c6b4 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl
index 6268f316df..6268f316df 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-index-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl
index 9977769881..9977769881 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/pci-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
index b94f013f28..b94f013f28 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
index 27557a3e9f..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
index d152ae31ec..d152ae31ec 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
index f66f22cd2d..f66f22cd2d 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
index f66f22cd2d..f66f22cd2d 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl
index a58a179123..a58a179123 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl
index 97c456d4d4..97c456d4d4 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl
index ff23ff6ae4..ff23ff6ae4 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-force-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
index 1f12caa045..1f12caa045 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
index f7c276e3f7..f7c276e3f7 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl
index f66f22cd2d..f66f22cd2d 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-show-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl
index b322b2a870..b322b2a870 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl
index a17757aafe..a17757aafe 100644
--- a/nova/tests/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-quota-sets/user-quotas-update-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl
index 00956b90e4..00956b90e4 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl
index c3955d6ac0..c3955d6ac0 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-rdp-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl
index 1d754d6608..1d754d6608 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl
index 721ce2b2ea..721ce2b2ea 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-serial-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl
index d04f7c7ae9..d04f7c7ae9 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl
index 65b72a866f..65b72a866f 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-spice-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl
index 1926119ced..1926119ced 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl
index 2eeee7c543..2eeee7c543 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/get-vnc-console-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
index 02547e994f..02547e994f 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
index cd6ded9be3..cd6ded9be3 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl
index 8a4ad0d52a..8a4ad0d52a 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req-with-image-ref.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl
index f946b74f53..f946b74f53 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl
index 0da07da5b8..0da07da5b8 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-rescue.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl
index cafc9b13a8..cafc9b13a8 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-rescue/server-unrescue-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
index a381df7444..a381df7444 100644
--- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
index 8836d0eecc..8836d0eecc 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
index ae6c62bfd6..ae6c62bfd6 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
index c083640c3e..c083640c3e 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
index 97b5259a18..97b5259a18 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-group-default-rules/security-group-default-rules-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
index 19a6ed2cb8..19a6ed2cb8 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-add-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
index 3f54ab6856..3f54ab6856 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
index 7f550036b8..7f550036b8 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-group-remove-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
index e51714e3ee..e51714e3ee 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
index 0372512744..0372512744 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
index 1771f2dff1..1771f2dff1 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/security-groups-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
index 47ed3c1f22..47ed3c1f22 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
index 6657700682..6657700682 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
index c87c1ee064..c87c1ee064 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
index 1771f2dff1..1771f2dff1 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/server-security-groups-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
index 519292d50a..519292d50a 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
index 1afedaee9c..1afedaee9c 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-diagnostics-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl
index 43c3b6b407..43c3b6b407 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl
index aa11b62c83..aa11b62c83 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/event-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl
index ba72643b6d..ba72643b6d 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl
index f01d451dd2..f01d451dd2 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl
index 1cc2328320..1cc2328320 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl
index ee9c37e82c..ee9c37e82c 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-groups/server-groups-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
index 90e75947e5..90e75947e5 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
index ae20daabf7..ae20daabf7 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl
index f11cca3739..f11cca3739 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl
index 442e2099f9..442e2099f9 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-log-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl
index 1323ef50f5..1323ef50f5 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl
index d7fe948d01..d7fe948d01 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-disable-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl
index 1323ef50f5..1323ef50f5 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl
index 0a6b2668df..0a6b2668df 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/service-enable-put-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl
index 174b443d0b..174b443d0b 100644
--- a/nova/tests/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-services/services-list-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve-offload.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-shelve.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/os-unshelve.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
index 6f9336d3c0..6f9336d3c0 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
index d3916d1aa6..d3916d1aa6 100644
--- a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
index f37083013d..f37083013d 100644
--- a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get-specific.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
index 25b5ff2b84..25b5ff2b84 100644
--- a/nova/tests/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-simple-tenant-usage/simple-tenant-usage-get.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl
index ff00d97a14..ff00d97a14 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-resume.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl
index 8c2206a063..8c2206a063 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-suspend-server/server-suspend.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl
index 757084d2f3..757084d2f3 100644
--- a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-list-res.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl
index fb1c2d3d06..fb1c2d3d06 100644
--- a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl
index ff9e2273d3..ff9e2273d3 100644
--- a/nova/tests/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-tenant-networks/networks-post-res.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
index 28309af04c..28309af04c 100644
--- a/nova/tests/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-used-limits/usedlimits-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
index 37f0a75d0a..37f0a75d0a 100644
--- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
index 82a63eda5f..82a63eda5f 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl
index 84bfdd2a5b..84bfdd2a5b 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl
index 82a63eda5f..82a63eda5f 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-index-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl
index 33e9a68944..33e9a68944 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl
index d13ce20cc3..d13ce20cc3 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/os-volumes-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl
index 3271a58a7d..3271a58a7d 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl
index adfaaa381e..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl
index a8d47ea031..a8d47ea031 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl
index 6153e8140e..6153e8140e 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshot-create-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl
index 1b509d54f8..1b509d54f8 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-detail-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl
index c65d073ad7..c65d073ad7 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-list-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl
index a9ab6240d6..a9ab6240d6 100644
--- a/nova/tests/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/os-volumes/snapshots-show-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl
index 404649a36e..404649a36e 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-network-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl
index 322ff19802..322ff19802 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-ips-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl
index 3812a26c88..3812a26c88 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl
index 3812a26c88..3812a26c88 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-all-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl
index 85d69ec956..85d69ec956 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl
index 85d69ec956..85d69ec956 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-metadata-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl
index 432f6126e9..432f6126e9 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-confirm-resize.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-create-image.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl
index 19c2c489a4..19c2c489a4 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-create-image.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-create-image.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-reboot.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl
index 18eda9b9ab..18eda9b9ab 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-reboot.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-reboot.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
index 3c44eb8d7e..3c44eb8d7e 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
index 8f38088c19..8f38088c19 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
index 3c44eb8d7e..3c44eb8d7e 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
index 6385f10593..6385f10593 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl
index 468a88da24..468a88da24 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-resize.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl
index 2ddf6e5ab0..2ddf6e5ab0 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-revert-resize.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-start.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl
index 883d0247a2..883d0247a2 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-start.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-start.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-stop.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl
index 883d0247a2..883d0247a2 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-stop.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-action-stop.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl
index 3c7cc62999..3c7cc62999 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-get-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl
index ab0a3bb797..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-req.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl
index 71654b4b8a..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/server-post-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
index 1d4f8d9031..1d4f8d9031 100644
--- a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
diff --git a/nova/tests/integrated/v3/api_samples/servers/servers-list-resp.json.tpl b/nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl
index 8797266b68..8797266b68 100644
--- a/nova/tests/integrated/v3/api_samples/servers/servers-list-resp.json.tpl
+++ b/nova/tests/unit/integrated/v3/api_samples/servers/servers-list-resp.json.tpl
diff --git a/nova/tests/unit/integrated/v3/test_access_ips.py b/nova/tests/unit/integrated/v3/test_access_ips.py
new file mode 100644
index 0000000000..d0af25f281
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_access_ips.py
@@ -0,0 +1,93 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-access-ips'
+
+ def _servers_post(self, subs):
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs.update(self._get_regexes())
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+ def test_servers_post(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ self._servers_post(subs)
+
+ def test_servers_get(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ response = self._do_get('servers/%s' % uuid)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ response = self._do_get('servers/detail')
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+ def test_servers_rebuild(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ subs['access_ip_v4'] = "4.3.2.1"
+ subs['access_ip_v6'] = '80fe::'
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild', subs)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_servers_update(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fe80::'
+ }
+ uuid = self._servers_post(subs)
+ subs['access_ip_v4'] = "4.3.2.1"
+ subs['access_ip_v6'] = '80fe::'
+ response = self._do_put('servers/%s' % uuid, 'server-put-req', subs)
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('server-put-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_admin_actions.py b/nova/tests/unit/integrated/v3/test_admin_actions.py
new file mode 100644
index 0000000000..7530066438
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_admin_actions.py
@@ -0,0 +1,46 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-admin-actions"
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(AdminActionsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_reset_network(self):
+ # Get api samples to reset server network request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-network', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_inject_network_info(self):
+ # Get api samples to inject network info request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-inject-network-info', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_reset_state(self):
+ # get api samples to server reset state request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'admin-actions-reset-server-state', {})
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_admin_password.py b/nova/tests/unit/integrated/v3/test_admin_password.py
new file mode 100644
index 0000000000..2062e857df
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_admin_password.py
@@ -0,0 +1,29 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AdminPasswordJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-admin-password'
+
+ def test_server_password(self):
+ uuid = self._post_server()
+ subs = {"password": "foo"}
+ response = self._do_post('servers/%s/action' % uuid,
+ 'admin-password-change-password',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_agents.py b/nova/tests/unit/integrated/v3/test_agents.py
new file mode 100644
index 0000000000..ade59e6ec0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_agents.py
@@ -0,0 +1,98 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.db.sqlalchemy import models
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-agents"
+
+ def setUp(self):
+ super(AgentsJsonTest, self).setUp()
+
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545',
+ 'id': 1}]
+
+ def fake_agent_build_create(context, values):
+ values['id'] = 1
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ return agent_build_ref
+
+ def fake_agent_build_get_all(context, hypervisor):
+ agent_build_all = []
+ for agent in fake_agents_list:
+ if hypervisor and hypervisor != agent['hypervisor']:
+ continue
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(agent)
+ agent_build_all.append(agent_build_ref)
+ return agent_build_all
+
+ def fake_agent_build_update(context, agent_build_id, values):
+ pass
+
+ def fake_agent_build_destroy(context, agent_update_id):
+ pass
+
+ self.stubs.Set(db, "agent_build_create",
+ fake_agent_build_create)
+ self.stubs.Set(db, "agent_build_get_all",
+ fake_agent_build_get_all)
+ self.stubs.Set(db, "agent_build_update",
+ fake_agent_build_update)
+ self.stubs.Set(db, "agent_build_destroy",
+ fake_agent_build_destroy)
+
+ def test_agent_create(self):
+ # Creates a new agent build.
+ project = {'url': 'http://example.com/path/to/resource',
+ 'hypervisor': 'hypervisor',
+ 'architecture': 'x86',
+ 'os': 'os',
+ 'version': '8.0',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'
+ }
+ response = self._do_post('os-agents', 'agent-post-req',
+ project)
+ self._verify_response('agent-post-resp', project, response, 200)
+
+ def test_agent_list(self):
+ # Return a list of all agent builds.
+ response = self._do_get('os-agents')
+ self._verify_response('agents-get-resp', {}, response, 200)
+
+ def test_agent_update(self):
+ # Update an existing agent build.
+ agent_id = 1
+ subs = {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}
+ response = self._do_put('os-agents/%s' % agent_id,
+ 'agent-update-put-req', subs)
+ self._verify_response('agent-update-put-resp', subs, response, 200)
+
+ def test_agent_delete(self):
+ # Deletes an existing agent build.
+ agent_id = 1
+ response = self._do_delete('os-agents/%s' % agent_id)
+ self.assertEqual(response.status_code, 200)
diff --git a/nova/tests/unit/integrated/v3/test_aggregates.py b/nova/tests/unit/integrated/v3/test_aggregates.py
new file mode 100644
index 0000000000..6e29ea0403
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_aggregates.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class AggregatesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-aggregates"
+
+ def test_aggregate_create(self):
+ subs = {
+ "aggregate_id": '(?P<id>\d+)'
+ }
+ response = self._do_post('os-aggregates', 'aggregate-post-req', subs)
+ subs.update(self._get_regexes())
+ return self._verify_response('aggregate-post-resp',
+ subs, response, 200)
+
+ def test_list_aggregates(self):
+ self.test_aggregate_create()
+ response = self._do_get('os-aggregates')
+ subs = self._get_regexes()
+ self._verify_response('aggregates-list-get-resp', subs, response, 200)
+
+ def test_aggregate_get(self):
+ agg_id = self.test_aggregate_create()
+ response = self._do_get('os-aggregates/%s' % agg_id)
+ subs = self._get_regexes()
+ self._verify_response('aggregates-get-resp', subs, response, 200)
+
+ def test_add_metadata(self):
+ agg_id = self.test_aggregate_create()
+ response = self._do_post('os-aggregates/%s/action' % agg_id,
+ 'aggregate-metadata-post-req',
+ {'action': 'set_metadata'})
+ subs = self._get_regexes()
+ self._verify_response('aggregates-metadata-post-resp', subs,
+ response, 200)
+
+ def test_add_host(self):
+ aggregate_id = self.test_aggregate_create()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/%s/action' % aggregate_id,
+ 'aggregate-add-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-add-host-post-resp', subs,
+ response, 200)
+
+ def test_remove_host(self):
+ self.test_add_host()
+ subs = {
+ "host_name": self.compute.host,
+ }
+ response = self._do_post('os-aggregates/1/action',
+ 'aggregate-remove-host-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('aggregates-remove-host-post-resp',
+ subs, response, 200)
+
+ def test_update_aggregate(self):
+ aggregate_id = self.test_aggregate_create()
+ response = self._do_put('os-aggregates/%s' % aggregate_id,
+ 'aggregate-update-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('aggregate-update-post-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_attach_interfaces.py b/nova/tests/unit/integrated/v3/test_attach_interfaces.py
new file mode 100644
index 0000000000..f35edcb740
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_attach_interfaces.py
@@ -0,0 +1,166 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova import exception
+from nova.network import api as network_api
+from nova.tests.unit import fake_network_cache_model
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class AttachInterfacesSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-attach-interfaces'
+
+ def setUp(self):
+ super(AttachInterfacesSampleJsonTest, self).setUp()
+
+ def fake_list_ports(self, *args, **kwargs):
+ uuid = kwargs.get('device_id', None)
+ if not uuid:
+ raise exception.InstanceNotFound(instance_id=None)
+ port_data = {
+ "id": "ce531f90-199f-48c0-816c-13e38010b442",
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": uuid,
+ }
+ ports = {'ports': [port_data]}
+ return ports
+
+ def fake_show_port(self, context, port_id=None):
+ if not port_id:
+ raise exception.PortNotFound(port_id=None)
+ port_data = {
+ "id": port_id,
+ "network_id": "3cb9bc59-5699-4588-a4b1-b87f96708bc6",
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "mac_address": "fa:16:3e:4c:2c:30",
+ "fixed_ips": [
+ {
+ "ip_address": "192.168.1.3",
+ "subnet_id": "f8a6e8f8-c2ec-497c-9f23-da9616de54ef"
+ }
+ ],
+ "device_id": 'bece68a3-2f8b-4e66-9092-244493d6aba7',
+ }
+ port = {'port': port_data}
+ return port
+
+ def fake_attach_interface(self, context, instance,
+ network_id, port_id,
+ requested_ip='192.168.1.3'):
+ if not network_id:
+ network_id = "fake_net_uuid"
+ if not port_id:
+ port_id = "fake_port_uuid"
+ vif = fake_network_cache_model.new_vif()
+ vif['id'] = port_id
+ vif['network']['id'] = network_id
+ vif['network']['subnets'][0]['ips'][0] = requested_ip
+ return vif
+
+ def fake_detach_interface(self, context, instance, port_id):
+ pass
+
+ self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
+ self.stubs.Set(network_api.API, 'show_port', fake_show_port)
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface)
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['subnet_id'] = vanilla_regexes['uuid']
+ subs['net_id'] = vanilla_regexes['uuid']
+ subs['port_id'] = vanilla_regexes['uuid']
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ subs['ip_address'] = vanilla_regexes['ip']
+ return subs
+
+ def test_list_interfaces(self):
+ instance_uuid = self._post_server()
+ response = self._do_get('servers/%s/os-interface'
+ % instance_uuid)
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-list-resp', subs,
+ response, 200)
+
+ def _stub_show_for_instance(self, instance_uuid, port_id):
+ show_port = network_api.API().show_port(None, port_id)
+ show_port['port']['device_id'] = instance_uuid
+ self.stubs.Set(network_api.API, 'show_port', lambda *a, **k: show_port)
+
+ def test_show_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ self._stub_show_for_instance(instance_uuid, port_id)
+ response = self._do_get('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ subs = {
+ 'ip_address': '192.168.1.3',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': port_id,
+ 'port_state': 'ACTIVE'
+ }
+ self._verify_response('attach-interfaces-show-resp', subs,
+ response, 200)
+
+ def test_create_interfaces(self, instance_uuid=None):
+ if instance_uuid is None:
+ instance_uuid = self._post_server()
+ subs = {
+ 'net_id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
+ 'port_id': 'ce531f90-199f-48c0-816c-13e38010b442',
+ 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef',
+ 'ip_address': '192.168.1.3',
+ 'port_state': 'ACTIVE',
+ 'mac_addr': 'fa:16:3e:4c:2c:30',
+ }
+ self._stub_show_for_instance(instance_uuid, subs['port_id'])
+ response = self._do_post('servers/%s/os-interface'
+ % instance_uuid,
+ 'attach-interfaces-create-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('attach-interfaces-create-resp', subs,
+ response, 200)
+
+ def test_delete_interfaces(self):
+ instance_uuid = self._post_server()
+ port_id = 'ce531f90-199f-48c0-816c-13e38010b442'
+ response = self._do_delete('servers/%s/os-interface/%s' %
+ (instance_uuid, port_id))
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_availability_zone.py b/nova/tests/unit/integrated/v3/test_availability_zone.py
new file mode 100644
index 0000000000..6f59e2c264
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_availability_zone.py
@@ -0,0 +1,49 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('manager', 'nova.cells.opts', group='cells')
+
+
+class AvailabilityZoneJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-availability-zone"
+
+ def _setup_services(self):
+ self.conductor = self.start_service('conductor',
+ host='conductor', manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute', host='compute')
+ self.cert = self.start_service('cert', host='cert')
+ self.consoleauth = self.start_service('consoleauth',
+ host='consoleauth')
+ self.network = self.start_service('network', host='network')
+ self.scheduler = self.start_service('scheduler', host='scheduler')
+ self.cells = self.start_service('cells', host='cells',
+ manager=CONF.cells.manager)
+
+ def test_availability_zone_list(self):
+ response = self._do_get('os-availability-zone')
+ self._verify_response('availability-zone-list-resp', {}, response, 200)
+
+ def test_availability_zone_detail(self):
+ response = self._do_get('os-availability-zone/detail')
+ subs = self._get_regexes()
+ self._verify_response('availability-zone-detail-resp', subs, response,
+ 200)
+
+ def test_availability_zone_post(self):
+ self._post_server()
diff --git a/nova/tests/unit/integrated/v3/test_cells.py b/nova/tests/unit/integrated/v3/test_cells.py
new file mode 100644
index 0000000000..2d7aea9542
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_cells.py
@@ -0,0 +1,107 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova.cells import state
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class CellsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-cells"
+
+ def setUp(self):
+ # db_check_interval < 0 makes cells manager always hit the DB
+ self.flags(enable=True, db_check_interval=-1, group='cells')
+ super(CellsSampleJsonTest, self).setUp()
+ self._stub_cells()
+
+ def _stub_cells(self, num_cells=5):
+ self.cell_list = []
+ self.cells_next_id = 1
+
+ def _fake_cell_get_all(context):
+ return self.cell_list
+
+ def _fake_cell_get(inst, context, cell_name):
+ for cell in self.cell_list:
+ if cell['name'] == cell_name:
+ return cell
+ raise exception.CellNotFound(cell_name=cell_name)
+
+ for x in xrange(num_cells):
+ cell = models.Cell()
+ our_id = self.cells_next_id
+ self.cells_next_id += 1
+ cell.update({'id': our_id,
+ 'name': 'cell%s' % our_id,
+ 'transport_url': 'rabbit://username%s@/' % our_id,
+ 'is_parent': our_id % 2 == 0})
+ self.cell_list.append(cell)
+
+ self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_get', _fake_cell_get)
+
+ def test_cells_empty_list(self):
+ # Override this
+ self._stub_cells(num_cells=0)
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-empty-resp', subs, response, 200)
+
+ def test_cells_list(self):
+ response = self._do_get('os-cells')
+ subs = self._get_regexes()
+ self._verify_response('cells-list-resp', subs, response, 200)
+
+ def test_cells_get(self):
+ response = self._do_get('os-cells/cell3')
+ subs = self._get_regexes()
+ self._verify_response('cells-get-resp', subs, response, 200)
+
+ def test_get_cell_capacity(self):
+ self._mock_cell_capacity()
+ state_manager = state.CellStateManager()
+ my_state = state_manager.get_my_state()
+ response = self._do_get('os-cells/%s/capacities' %
+ my_state.name)
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def test_get_all_cells_capacity(self):
+ self._mock_cell_capacity()
+ response = self._do_get('os-cells/capacities')
+ subs = self._get_regexes()
+ return self._verify_response('cells-capacities-resp',
+ subs, response, 200)
+
+ def _mock_cell_capacity(self):
+ self.mox.StubOutWithMock(self.cells.manager.state_manager,
+ 'get_our_capacities')
+ response = {"ram_free":
+ {"units_by_mb": {"8192": 0, "512": 13,
+ "4096": 1, "2048": 3, "16384": 0},
+ "total_mb": 7680},
+ "disk_free":
+ {"units_by_mb": {"81920": 11, "20480": 46,
+ "40960": 23, "163840": 5, "0": 0},
+ "total_mb": 1052672}
+ }
+ self.cells.manager.state_manager.get_our_capacities(). \
+ AndReturn(response)
+ self.mox.ReplayAll()
diff --git a/nova/tests/unit/integrated/v3/test_certificates.py b/nova/tests/unit/integrated/v3/test_certificates.py
new file mode 100644
index 0000000000..96cbbc711c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_certificates.py
@@ -0,0 +1,31 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class CertificatesSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-certificates"
+
+ def test_create_certificates(self):
+ response = self._do_post('os-certificates',
+ 'certificate-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('certificate-create-resp', subs, response, 200)
+
+ def test_get_root_certificate(self):
+ response = self._do_get('os-certificates/root')
+ subs = self._get_regexes()
+ self._verify_response('certificate-get-root-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_cloudpipe.py b/nova/tests/unit/integrated/v3/test_cloudpipe.py
new file mode 100644
index 0000000000..b8cb28d077
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_cloudpipe.py
@@ -0,0 +1,80 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid as uuid_lib
+
+from oslo.config import cfg
+
+from nova.cloudpipe import pipelib
+from nova.network import api as network_api
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+CONF = cfg.CONF
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
+
+
+class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-cloudpipe"
+
+ def setUp(self):
+ super(CloudPipeSampleTest, self).setUp()
+
+ def get_user_data(self, project_id):
+ """Stub method to generate user data for cloudpipe tests."""
+ return "VVNFUiBEQVRB\n"
+
+ def network_api_get(self, context, network_uuid):
+ """Stub to get a valid network and its information."""
+ return {'vpn_public_address': '127.0.0.1',
+ 'vpn_public_port': 22}
+
+ self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
+ self.stubs.Set(network_api.API, "get",
+ network_api_get)
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['project_id'] = '[0-9a-f-]+'
+ return subs
+
+ def test_cloud_pipe_create(self):
+ # Get api samples of cloud pipe extension creation.
+ self.flags(vpn_image_id=fake.get_valid_image_id())
+ project = {'project_id': str(uuid_lib.uuid4().hex)}
+ response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
+ project)
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-create-resp', subs, response, 200)
+ return project
+
+ def test_cloud_pipe_list(self):
+ # Get api samples of cloud pipe extension get request.
+ project = self.test_cloud_pipe_create()
+ response = self._do_get('os-cloudpipe')
+ subs = self._get_regexes()
+ subs.update(project)
+ subs['image_id'] = CONF.vpn_image_id
+ self._verify_response('cloud-pipe-get-resp', subs, response, 200)
+
+ def test_cloud_pipe_update(self):
+ subs = {'vpn_ip': '192.168.1.1',
+ 'vpn_port': 2000}
+ response = self._do_put('os-cloudpipe/configure-project',
+ 'cloud-pipe-update-req',
+ subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_config_drive.py b/nova/tests/unit/integrated/v3/test_config_drive.py
new file mode 100644
index 0000000000..b8e7fc207a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_config_drive.py
@@ -0,0 +1,48 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConfigDriveSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-config-drive'
+
+ def setUp(self):
+ super(ConfigDriveSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fake.stub_out_image_service(self.stubs)
+
+ def test_config_drive_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('server-config-drive-get-resp', subs,
+ response, 200)
+
+ def test_config_drive_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ # config drive can be a string for True or empty value for False
+ subs['cdrive'] = '.*'
+ self._verify_response('servers-config-drive-details-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_console_auth_tokens.py b/nova/tests/unit/integrated/v3/test_console_auth_tokens.py
new file mode 100644
index 0000000000..d286458678
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_console_auth_tokens.py
@@ -0,0 +1,51 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+from oslo.serialization import jsonutils
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-console-auth-tokens"
+ extra_extensions_to_load = ["os-remote-consoles"]
+
+ def _get_console_url(self, data):
+ return jsonutils.loads(data)["console"]["url"]
+
+ def _get_console_token(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+
+ url = self._get_console_url(response.content)
+ return re.match('.+?token=([^&]+)', url).groups()[0]
+
+ def test_get_console_connect_info(self):
+ self.flags(enabled=True, group='rdp')
+
+ uuid = self._post_server()
+ token = self._get_console_token(uuid)
+
+ response = self._do_get('os-console-auth-tokens/%s' % token)
+
+ subs = self._get_regexes()
+ subs["uuid"] = uuid
+ subs["host"] = r"[\w\.\-]+"
+ subs["port"] = "[0-9]+"
+ subs["internal_access_path"] = ".*"
+ self._verify_response('get-console-connect-info-get-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_console_output.py b/nova/tests/unit/integrated/v3/test_console_output.py
new file mode 100644
index 0000000000..6ad9a1d9e6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_console_output.py
@@ -0,0 +1,27 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsoleOutputSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-console-output"
+
+ def test_get_console_output(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'console-output-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('console-output-post-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_consoles.py b/nova/tests/unit/integrated/v3/test_consoles.py
new file mode 100644
index 0000000000..7a889aa4cf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_consoles.py
@@ -0,0 +1,55 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsolesSamplesJsonTest(test_servers.ServersSampleBase):
+ sample_dir = "consoles"
+
+ def setUp(self):
+ super(ConsolesSamplesJsonTest, self).setUp()
+ self.flags(console_public_hostname='fake')
+ self.flags(console_host='fake')
+ self.flags(console_driver='nova.console.fake.FakeConsoleProxy')
+ self.console = self.start_service('console', host='fake')
+
+ def _create_consoles(self, server_uuid):
+ response = self._do_post('servers/%s/consoles' % server_uuid,
+ 'consoles-create-req', {})
+ self.assertEqual(response.status_code, 201)
+
+ def test_create_consoles(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+
+ def test_list_consoles(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_get('servers/%s/consoles' % uuid)
+ self._verify_response('consoles-list-get-resp', {}, response, 200)
+
+ def test_console_get(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_get('servers/%s/consoles/1' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('consoles-get-resp', subs, response, 200)
+
+ def test_console_delete(self):
+ uuid = self._post_server()
+ self._create_consoles(uuid)
+ response = self._do_delete('servers/%s/consoles/1' % uuid)
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_create_backup.py b/nova/tests/unit/integrated/v3/test_create_backup.py
new file mode 100644
index 0000000000..089a61fb5f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_create_backup.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-create-backup"
+
+ def setUp(self):
+ """setUp Method for PauseServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(CreateBackupSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ @mock.patch.object(fake._FakeImageService, 'detail', return_value=[])
+ def test_post_backup_server(self, mock_method):
+ # Get api samples to backup server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'create-backup-req', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_deferred_delete.py b/nova/tests/unit/integrated/v3/test_deferred_delete.py
new file mode 100644
index 0000000000..0b8d970900
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_deferred_delete.py
@@ -0,0 +1,42 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class DeferredDeleteSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-deferred-delete"
+
+ def setUp(self):
+ super(DeferredDeleteSampleJsonTests, self).setUp()
+ self.flags(reclaim_instance_interval=1)
+
+ def test_restore(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'restore-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_force_delete(self):
+ uuid = self._post_server()
+ response = self._do_delete('servers/%s' % uuid)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'force-delete-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_disk_config.py b/nova/tests/unit/integrated/v3/test_disk_config.py
new file mode 100644
index 0000000000..97eeb31ace
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_disk_config.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class DiskConfigJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-disk-config'
+ extra_extensions_to_load = ["images"]
+
+ def test_list_servers_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ self._verify_response('list-servers-detail-get', subs, response, 200)
+
+ def test_get_server(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-put-req', {})
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-update-put-resp', subs, response, 200)
+
+ def test_resize_server(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-resize-post-req', {})
+ self.assertEqual(response.status_code, 202)
+ # NOTE(tmello): Resize does not return response body
+ # Bug #1085213.
+ self.assertEqual(response.content, "")
+
+ def test_rebuild_server(self):
+ uuid = self._post_server()
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-req', subs)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-action-rebuild-resp',
+ subs, response, 202)
+
+ def test_get_image(self):
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_list_images(self):
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('image-list-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_evacuate.py b/nova/tests/unit/integrated/v3/test_evacuate.py
new file mode 100644
index 0000000000..1d63404b6d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_evacuate.py
@@ -0,0 +1,91 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+from nova.servicegroup import api as service_group_api
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class EvacuateJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-evacuate"
+
+ def _test_evacuate(self, req_subs, server_req, server_resp,
+ expected_resp_code):
+ self.uuid = self._post_server()
+
+ def fake_service_is_up(self, service):
+ """Simulate validation of instance host is down."""
+ return False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ """Simulate that given host is a valid host."""
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ def fake_check_instance_exists(self, context, instance):
+ """Simulate validation of instance does not exist."""
+ return False
+
+ self.stubs.Set(service_group_api.API, 'service_is_up',
+ fake_service_is_up)
+ self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
+ fake_service_get_by_compute_host)
+ self.stubs.Set(compute_manager.ComputeManager,
+ '_check_instance_exists',
+ fake_check_instance_exists)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ server_req, req_subs)
+ subs = self._get_regexes()
+ self._verify_response(server_resp, subs, response, expected_resp_code)
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ # Note (wingwj): The host can't be the same one
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ 'server-evacuate-resp', 200)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host='testHost')
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ 'server-evacuate-find-host-resp', 200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
diff --git a/nova/tests/unit/integrated/v3/test_extended_availability_zone.py b/nova/tests/unit/integrated/v3/test_extended_availability_zone.py
new file mode 100644
index 0000000000..accd4a2cdf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_availability_zone.py
@@ -0,0 +1,34 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedAvailabilityZoneJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-availability-zone"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_server_attributes.py b/nova/tests/unit/integrated/v3/test_extended_server_attributes.py
new file mode 100644
index 0000000000..1a00f45237
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_server_attributes.py
@@ -0,0 +1,42 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-extended-server-attributes"
+
+ def test_show(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_status.py b/nova/tests/unit/integrated/v3/test_extended_status.py
new file mode 100644
index 0000000000..8f952eaacc
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_status.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ExtendedStatusSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-status"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_extended_volumes.py b/nova/tests/unit/integrated/v3/test_extended_volumes.py
new file mode 100644
index 0000000000..f6500eaac6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extended_volumes.py
@@ -0,0 +1,151 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.compute import manager as compute_manager
+from nova import context
+from nova import db
+from nova import objects
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.volume import cinder
+
+
+class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-extended-volumes"
+
+ def _stub_compute_api_get_instance_bdms(self, server_id):
+
+ def fake_bdms_get_all_by_instance(context, instance_uuid,
+ use_slave=False):
+ bdms = [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdd'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f804',
+ 'instance_uuid': server_id, 'source_type': 'volume',
+ 'destination_type': 'volume', 'device_name': '/dev/sdc'})
+ ]
+ return bdms
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
+
+ def _stub_compute_api_get(self):
+ def fake_compute_api_get(self, context, instance_id, **kwargs):
+ want_objects = kwargs.get('want_objects')
+ if want_objects:
+ return fake_instance.fake_instance_obj(
+ context, **{'uuid': instance_id})
+ else:
+ return {'uuid': instance_id}
+
+ self.stubs.Set(compute_api.API, 'get', fake_compute_api_get)
+
+ def test_show(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fakes.stub_bdm_get_all_by_instance)
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+ def test_attach_volume(self):
+ bdm = objects.BlockDeviceMapping()
+ device_name = '/dev/vdd'
+ bdm['device_name'] = device_name
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ self.stubs.Set(compute_manager.ComputeManager,
+ "reserve_block_device_name",
+ lambda *a, **k: bdm)
+ self.stubs.Set(compute_manager.ComputeManager,
+ 'attach_volume',
+ lambda *a, **k: None)
+
+ volume = fakes.stub_volume_get(None, context.get_admin_context(),
+ 'a26887c6-c47b-4654-abb5-dfadf7d3f803')
+ subs = {
+ 'volume_id': volume['id'],
+ 'device': device_name,
+ 'disk_bus': 'ide',
+ 'device_type': 'cdrom'
+ }
+ server_id = self._post_server()
+ response = self._do_post('servers/%s/action'
+ % server_id,
+ 'attach-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_detach_volume(self):
+ server_id = self._post_server()
+ attach_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ self._stub_compute_api_get_instance_bdms(server_id)
+ self._stub_compute_api_get()
+ self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
+ self.stubs.Set(compute_api.API, 'detach_volume', lambda *a, **k: None)
+ subs = {
+ 'volume_id': attach_id,
+ }
+ response = self._do_post('servers/%s/action'
+ % server_id, 'detach-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_swap_volume(self):
+ server_id = self._post_server()
+ old_volume_id = "a26887c6-c47b-4654-abb5-dfadf7d3f803"
+ old_new_volume = 'a26887c6-c47b-4654-abb5-dfadf7d3f805'
+ self._stub_compute_api_get_instance_bdms(server_id)
+
+ def stub_volume_get(self, context, volume_id):
+ if volume_id == old_volume_id:
+ return fakes.stub_volume(volume_id, instance_uuid=server_id)
+ else:
+ return fakes.stub_volume(volume_id, instance_uuid=None,
+ attach_status='detached')
+
+ self.stubs.Set(cinder.API, 'get', stub_volume_get)
+ self.stubs.Set(cinder.API, 'begin_detaching', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'check_attach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'check_detach', lambda *a, **k: None)
+ self.stubs.Set(cinder.API, 'reserve_volume', lambda *a, **k: None)
+ self.stubs.Set(compute_manager.ComputeManager, 'swap_volume',
+ lambda *a, **k: None)
+ subs = {
+ 'old_volume_id': old_volume_id,
+ 'new_volume_id': old_new_volume
+ }
+ response = self._do_post('servers/%s/action' % server_id,
+ 'swap-volume-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_extension_info.py b/nova/tests/unit/integrated/v3/test_extension_info.py
new file mode 100644
index 0000000000..c23339b96e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_extension_info.py
@@ -0,0 +1,71 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova.api.openstack import extensions as api_extensions
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ExtensionInfoSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = "extension-info"
+
+ def test_list_extensions(self):
+ response = self._do_get('extensions')
+ subs = self._get_regexes()
+ self._verify_response('extensions-list-resp', subs, response, 200)
+
+ def test_get_extensions(self):
+ response = self._do_get('extensions/flavors')
+ subs = self._get_regexes()
+ self._verify_response('extensions-get-resp', subs, response, 200)
+
+
+class ExtensionInfoFormatTest(api_sample_base.ApiSampleTestBaseV3):
+ # NOTE: To check all extension formats, here makes authorize() return True
+ # always instead of fake_policy.py because most extensions are not set as
+ # "discoverable" in fake_policy.py.
+ all_extensions = True
+
+ def _test_list_extensions(self, key, pattern):
+ with mock.patch.object(api_extensions,
+ 'soft_extension_authorizer') as api_mock:
+ def fake_soft_extension_authorizer(api_name, extension_name):
+ def authorize(context, action=None):
+ return True
+ return authorize
+
+ api_mock.side_effect = fake_soft_extension_authorizer
+ response = self._do_get('extensions')
+ response = jsonutils.loads(response.content)
+ extensions = response['extensions']
+ pattern_comp = re.compile(pattern)
+ for ext in extensions:
+ self.assertIsNotNone(pattern_comp.match(ext[key]),
+ '%s does not match with %s' % (ext[key],
+ pattern))
+
+ def test_list_extensions_name_format(self):
+ # name should be CamelCase.
+ pattern = '^[A-Z]{1}[a-z]{1}[a-zA-Z]*$'
+ self._test_list_extensions('name', pattern)
+
+ def test_list_extensions_alias_format(self):
+ # alias should contain lowercase chars and '-' only.
+ pattern = '^[a-z-]+$'
+ self._test_list_extensions('alias', pattern)
diff --git a/nova/tests/unit/integrated/v3/test_fixed_ips.py b/nova/tests/unit/integrated/v3/test_fixed_ips.py
new file mode 100644
index 0000000000..cabeac018a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_fixed_ips.py
@@ -0,0 +1,109 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova import exception
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.tests.unit.objects import test_network
+from nova.tests.unit import utils as test_utils
+
+
+class FixedIpTest(test_servers.ServersSampleBase):
+ extension_name = "os-fixed-ips"
+
+ def setUp(self):
+ super(FixedIpTest, self).setUp()
+
+ instance = dict(test_utils.get_test_instance(),
+ hostname='openstack', host='host')
+ fake_fixed_ips = [{'id': 1,
+ 'address': '192.168.1.1',
+ 'network_id': 1,
+ 'virtual_interface_id': 1,
+ 'instance_uuid': '1',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ {'id': 2,
+ 'address': '192.168.1.2',
+ 'network_id': 1,
+ 'virtual_interface_id': 2,
+ 'instance_uuid': '2',
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'created_at': None,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': None,
+ 'instance': instance,
+ 'network': test_network.fake_network,
+ 'host': None},
+ ]
+
+ def fake_fixed_ip_get_by_address(context, address,
+ columns_to_join=None):
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return fixed_ip
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_get_by_address_detailed(context, address):
+ network = {'id': 1,
+ 'cidr': "192.168.1.0/24"}
+ host = {'host': "host",
+ 'hostname': 'openstack'}
+ for fixed_ip in fake_fixed_ips:
+ if fixed_ip['address'] == address:
+ return (fixed_ip, network, host)
+ raise exception.FixedIpNotFoundForAddress(address=address)
+
+ def fake_fixed_ip_update(context, address, values):
+ fixed_ip = fake_fixed_ip_get_by_address(context, address)
+ if fixed_ip is None:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ else:
+ for key in values:
+ fixed_ip[key] = values[key]
+
+ self.stubs.Set(db, "fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stubs.Set(db, "fixed_ip_get_by_address_detailed",
+ fake_fixed_ip_get_by_address_detailed)
+ self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+
+ def test_fixed_ip_reserve(self):
+ # Reserve a Fixed IP.
+ project = {'reserve': None}
+ response = self._do_post('os-fixed-ips/192.168.1.1/action',
+ 'fixedip-post-req',
+ project)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_get_fixed_ip(self):
+ # Return data about the given fixed ip.
+ response = self._do_get('os-fixed-ips/192.168.1.1')
+ project = {'cidr': '192.168.1.0/24',
+ 'hostname': 'openstack',
+ 'host': 'host',
+ 'address': '192.168.1.1'}
+ self._verify_response('fixedips-get-resp', project, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavor_access.py b/nova/tests/unit/integrated/v3/test_flavor_access.py
new file mode 100644
index 0000000000..66316856a3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_access.py
@@ -0,0 +1,89 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-access'
+
+ def _add_tenant(self):
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ 'flavor_id': 10,
+ }
+ response = self._do_post('flavors/10/action',
+ 'flavor-access-add-tenant-req',
+ subs)
+ self._verify_response('flavor-access-add-tenant-resp',
+ subs, response, 200)
+
+ def _create_flavor(self):
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': 'test_flavor'
+ }
+ response = self._do_post("flavors",
+ "flavor-access-create-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-access-create-resp", subs, response, 200)
+
+ def test_flavor_access_create(self):
+ self._create_flavor()
+
+ def test_flavor_access_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-access-detail-resp', subs, response, 200)
+
+ def test_flavor_access_list(self):
+ self._create_flavor()
+ self._add_tenant()
+ flavor_id = 10
+ response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'tenant_id': 'fake_tenant',
+ }
+ self._verify_response('flavor-access-list-resp', subs, response, 200)
+
+ def test_flavor_access_show(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-access-show-resp', subs, response, 200)
+
+ def test_flavor_access_add_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+
+ def test_flavor_access_remove_tenant(self):
+ self._create_flavor()
+ self._add_tenant()
+ subs = {
+ 'tenant_id': 'fake_tenant',
+ }
+ response = self._do_post('flavors/10/action',
+ "flavor-access-remove-tenant-req",
+ subs)
+ exp_subs = {
+ "tenant_id": self.api.project_id,
+ "flavor_id": "10"
+ }
+ self._verify_response('flavor-access-remove-tenant-resp',
+ exp_subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py b/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py
new file mode 100644
index 0000000000..ba823c7c24
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_extraspecs.py
@@ -0,0 +1,62 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-extra-specs'
+
+ def _flavor_extra_specs_create(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ response = self._do_post('flavors/1/os-extra_specs',
+ 'flavor-extra-specs-create-req', subs)
+ self._verify_response('flavor-extra-specs-create-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_get(self):
+ subs = {'value1': 'value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs/key1')
+ self._verify_response('flavor-extra-specs-get-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_list(self):
+ subs = {'value1': 'value1',
+ 'value2': 'value2'
+ }
+ self._flavor_extra_specs_create()
+ response = self._do_get('flavors/1/os-extra_specs')
+ self._verify_response('flavor-extra-specs-list-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_create(self):
+ self._flavor_extra_specs_create()
+
+ def test_flavor_extra_specs_update(self):
+ subs = {'value1': 'new_value1'}
+ self._flavor_extra_specs_create()
+ response = self._do_put('flavors/1/os-extra_specs/key1',
+ 'flavor-extra-specs-update-req', subs)
+ self._verify_response('flavor-extra-specs-update-resp',
+ subs, response, 200)
+
+ def test_flavor_extra_specs_delete(self):
+ self._flavor_extra_specs_create()
+ response = self._do_delete('flavors/1/os-extra_specs/key1')
+ self.assertEqual(response.status_code, 200)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_flavor_manage.py b/nova/tests/unit/integrated/v3/test_flavor_manage.py
new file mode 100644
index 0000000000..e7911ef0a6
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_manage.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'flavor-manage'
+
+ def _create_flavor(self):
+ """Create a flavor."""
+ subs = {
+ 'flavor_id': 10,
+ 'flavor_name': "test_flavor"
+ }
+ response = self._do_post("flavors",
+ "flavor-create-post-req",
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response("flavor-create-post-resp", subs, response, 200)
+
+ def test_create_flavor(self):
+ # Get api sample to create a flavor.
+ self._create_flavor()
+
+ def test_delete_flavor(self):
+ # Get api sample to delete a flavor.
+ self._create_flavor()
+ response = self._do_delete("flavors/10")
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_flavor_rxtx.py b/nova/tests/unit/integrated/v3/test_flavor_rxtx.py
new file mode 100644
index 0000000000..2b0840259e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavor_rxtx.py
@@ -0,0 +1,46 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-flavor-rxtx'
+
+ def test_flavor_rxtx_get(self):
+ flavor_id = 1
+ response = self._do_get('flavors/%s' % flavor_id)
+ subs = {
+ 'flavor_id': flavor_id,
+ 'flavor_name': 'm1.tiny'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-get-resp', subs, response, 200)
+
+ def test_flavors_rxtx_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavor-rxtx-list-resp', subs, response, 200)
+
+ def test_flavors_rxtx_create(self):
+ subs = {
+ 'flavor_id': 100,
+ 'flavor_name': 'flavortest'
+ }
+ response = self._do_post('flavors',
+ 'flavor-rxtx-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('flavor-rxtx-post-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_flavors.py b/nova/tests/unit/integrated/v3/test_flavors.py
new file mode 100644
index 0000000000..e8db9bc5a1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_flavors.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FlavorsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = 'flavors'
+
+ def test_flavors_get(self):
+ response = self._do_get('flavors/1')
+ subs = self._get_regexes()
+ self._verify_response('flavor-get-resp', subs, response, 200)
+
+ def test_flavors_list(self):
+ response = self._do_get('flavors')
+ subs = self._get_regexes()
+ self._verify_response('flavors-list-resp', subs, response, 200)
+
+ def test_flavors_detail(self):
+ response = self._do_get('flavors/detail')
+ subs = self._get_regexes()
+ self._verify_response('flavors-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ip_dns.py b/nova/tests/unit/integrated/v3/test_floating_ip_dns.py
new file mode 100644
index 0000000000..d0326b6535
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ip_dns.py
@@ -0,0 +1,91 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FloatingIpDNSTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ip-dns"
+
+ domain = 'domain1.example.org'
+ name = 'instance1'
+ scope = 'public'
+ project = 'project1'
+ dns_type = 'A'
+ ip = '192.168.1.1'
+
+ def _create_or_update(self):
+ subs = {'project': self.project,
+ 'scope': self.scope}
+ response = self._do_put('os-floating-ip-dns/%s' % self.domain,
+ 'floating-ip-dns-create-or-update-req', subs)
+ subs.update({'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-resp', subs,
+ response, 200)
+
+ def _create_or_update_entry(self):
+ subs = {'ip': self.ip, 'dns_type': self.dns_type}
+ response = self._do_put('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name),
+ 'floating-ip-dns-create-or-update-entry-req',
+ subs)
+ subs.update({'name': self.name, 'domain': self.domain})
+ self._verify_response('floating-ip-dns-create-or-update-entry-resp',
+ subs, response, 200)
+
+ def test_floating_ip_dns_list(self):
+ self._create_or_update()
+ response = self._do_get('os-floating-ip-dns')
+ subs = {'domain': self.domain,
+ 'project': self.project,
+ 'scope': self.scope}
+ self._verify_response('floating-ip-dns-list-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_create_or_update(self):
+ self._create_or_update()
+
+ def test_floating_ip_dns_delete(self):
+ self._create_or_update()
+ response = self._do_delete('os-floating-ip-dns/%s' % self.domain)
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_create_or_update_entry(self):
+ self._create_or_update_entry()
+
+ def test_floating_ip_dns_entry_get(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-get-resp', subs,
+ response, 200)
+
+ def test_floating_ip_dns_entry_delete(self):
+ self._create_or_update_entry()
+ response = self._do_delete('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.name))
+ self.assertEqual(response.status_code, 202)
+
+ def test_floating_ip_dns_entry_list(self):
+ self._create_or_update_entry()
+ response = self._do_get('os-floating-ip-dns/%s/entries/%s'
+ % (self.domain, self.ip))
+ subs = {'domain': self.domain,
+ 'ip': self.ip,
+ 'name': self.name}
+ self._verify_response('floating-ip-dns-entry-list-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ip_pools.py b/nova/tests/unit/integrated/v3/test_floating_ip_pools.py
new file mode 100644
index 0000000000..4db76801c7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ip_pools.py
@@ -0,0 +1,35 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import api as network_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class FloatingIPPoolsSampleTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ip-pools"
+
+ def test_list_floatingippools(self):
+ pool_list = ["pool1", "pool2"]
+
+ def fake_get_floating_ip_pools(self, context):
+ return pool_list
+
+ self.stubs.Set(network_api.API, "get_floating_ip_pools",
+ fake_get_floating_ip_pools)
+ response = self._do_get('os-floating-ip-pools')
+ subs = {
+ 'pool1': pool_list[0],
+ 'pool2': pool_list[1]
+ }
+ self._verify_response('floatingippools-list-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py b/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py
new file mode 100644
index 0000000000..9459b1bf61
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_floating_ips_bulk.py
@@ -0,0 +1,86 @@
+# Copyright 2014 IBM Corp.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova import context
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
+CONF.import_opt('public_interface', 'nova.network.linux_net')
+
+
+class FloatingIpsBulkTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-floating-ips-bulk"
+
+ def setUp(self):
+ super(FloatingIpsBulkTest, self).setUp()
+ pool = CONF.default_floating_pool
+ interface = CONF.public_interface
+
+ self.ip_pool = [
+ {
+ 'address': "10.10.10.1",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.2",
+ 'pool': pool,
+ 'interface': interface
+ },
+ {
+ 'address': "10.10.10.3",
+ 'pool': pool,
+ 'interface': interface,
+ 'host': "testHost"
+ },
+ ]
+ self.compute.db.floating_ip_bulk_create(
+ context.get_admin_context(), self.ip_pool)
+
+ self.addCleanup(self.compute.db.floating_ip_bulk_destroy,
+ context.get_admin_context(), self.ip_pool)
+
+ def test_floating_ips_bulk_list(self):
+ response = self._do_get('os-floating-ips-bulk')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_list_by_host(self):
+ response = self._do_get('os-floating-ips-bulk/testHost')
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-list-by-host-resp',
+ subs, response, 200)
+
+ def test_floating_ips_bulk_create(self):
+ response = self._do_post('os-floating-ips-bulk',
+ 'floating-ips-bulk-create-req',
+ {"ip_range": "192.168.1.0/24",
+ "pool": CONF.default_floating_pool,
+ "interface": CONF.public_interface})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-create-resp', subs,
+ response, 200)
+
+ def test_floating_ips_bulk_delete(self):
+ response = self._do_put('os-floating-ips-bulk/delete',
+ 'floating-ips-bulk-delete-req',
+ {"ip_range": "192.168.1.0/24"})
+ subs = self._get_regexes()
+ self._verify_response('floating-ips-bulk-delete-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_fping.py b/nova/tests/unit/integrated/v3/test_fping.py
new file mode 100644
index 0000000000..000c6d2484
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_fping.py
@@ -0,0 +1,45 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.api.openstack.compute.plugins.v3 import fping
+from nova.tests.unit.api.openstack.compute.contrib import test_fping
+from nova.tests.unit.integrated.v3 import test_servers
+from nova import utils
+
+
+class FpingSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-fping"
+
+ def setUp(self):
+ super(FpingSampleJsonTests, self).setUp()
+
+ def fake_check_fping(self):
+ pass
+ self.stubs.Set(utils, "execute", test_fping.execute)
+ self.stubs.Set(fping.FpingController, "check_fping",
+ fake_check_fping)
+
+ def test_get_fping(self):
+ self._post_server()
+ response = self._do_get('os-fping')
+ subs = self._get_regexes()
+ self._verify_response('fping-get-resp', subs, response, 200)
+
+ def test_get_fping_details(self):
+ uuid = self._post_server()
+ response = self._do_get('os-fping/%s' % (uuid))
+ subs = self._get_regexes()
+ self._verify_response('fping-get-details-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_hide_server_addresses.py b/nova/tests/unit/integrated/v3/test_hide_server_addresses.py
new file mode 100644
index 0000000000..908fef62d1
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hide_server_addresses.py
@@ -0,0 +1,39 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.compute import vm_states
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_hide_server_address_states',
+ 'nova.api.openstack.compute.plugins.v3.hide_server_addresses')
+
+
+class ServersSampleHideAddressesJsonTest(test_servers.ServersSampleJsonTest):
+ extension_name = 'os-hide-server-addresses'
+ # Override the sample dirname because
+ # test_servers.ServersSampleJsonTest does and so it won't default
+ # to the extension name
+ sample_dir = extension_name
+
+ def setUp(self):
+ # We override osapi_hide_server_address_states in order
+ # to have an example of in the json samples of the
+ # addresses being hidden
+ CONF.set_override("osapi_hide_server_address_states",
+ [vm_states.ACTIVE])
+ super(ServersSampleHideAddressesJsonTest, self).setUp()
diff --git a/nova/tests/unit/integrated/v3/test_hosts.py b/nova/tests/unit/integrated/v3/test_hosts.py
new file mode 100644
index 0000000000..7142ee885a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hosts.py
@@ -0,0 +1,57 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class HostsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-hosts"
+
+ def test_host_startup(self):
+ response = self._do_get('os-hosts/%s/startup' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-startup', subs, response, 200)
+
+ def test_host_reboot(self):
+ response = self._do_get('os-hosts/%s/reboot' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-reboot', subs, response, 200)
+
+ def test_host_shutdown(self):
+ response = self._do_get('os-hosts/%s/shutdown' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-shutdown', subs, response, 200)
+
+ def test_host_maintenance(self):
+ response = self._do_put('os-hosts/%s' % self.compute.host,
+ 'host-put-maintenance-req', {})
+ subs = self._get_regexes()
+ self._verify_response('host-put-maintenance-resp', subs, response, 200)
+
+ def test_host_get(self):
+ response = self._do_get('os-hosts/%s' % self.compute.host)
+ subs = self._get_regexes()
+ self._verify_response('host-get-resp', subs, response, 200)
+
+ def test_hosts_list(self):
+ response = self._do_get('os-hosts')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-resp', subs, response, 200)
+
+ def test_hosts_list_compute_service(self):
+ response = self._do_get('os-hosts?service=compute')
+ subs = self._get_regexes()
+ self._verify_response('hosts-list-compute-service-resp',
+ subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_hypervisors.py b/nova/tests/unit/integrated/v3/test_hypervisors.py
new file mode 100644
index 0000000000..f36f35ec84
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_hypervisors.py
@@ -0,0 +1,69 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-hypervisors"
+
+ def test_hypervisors_list(self):
+ response = self._do_get('os-hypervisors')
+ self._verify_response('hypervisors-list-resp', {}, response, 200)
+
+ def test_hypervisors_search(self):
+ response = self._do_get('os-hypervisors/fake/search')
+ self._verify_response('hypervisors-search-resp', {}, response, 200)
+
+ def test_hypervisors_servers(self):
+ response = self._do_get('os-hypervisors/fake/servers')
+ self._verify_response('hypervisors-servers-resp', {}, response, 200)
+
+ def test_hypervisors_detail(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-detail-resp', subs, response, 200)
+
+ def test_hypervisors_show(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-resp', subs, response, 200)
+
+ def test_hypervisors_statistics(self):
+ response = self._do_get('os-hypervisors/statistics')
+ self._verify_response('hypervisors-statistics-resp', {}, response, 200)
+
+ def test_hypervisors_uptime(self):
+ def fake_get_host_uptime(self, context, hyp):
+ return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
+ " 0.20, 0.12, 0.14")
+
+ self.stubs.Set(compute_api.HostAPI,
+ 'get_host_uptime', fake_get_host_uptime)
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ self._verify_response('hypervisors-uptime-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_image_size.py b/nova/tests/unit/integrated/v3/test_image_size.py
new file mode 100644
index 0000000000..8aeb08e9d8
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_image_size.py
@@ -0,0 +1,37 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ImageSizeSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "image-size"
+ extra_extensions_to_load = ["images", "image-metadata"]
+
+ def test_show(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_images.py b/nova/tests/unit/integrated/v3/test_images.py
new file mode 100644
index 0000000000..95dd0c971b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_images.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'images'
+ extra_extensions_to_load = ["image-metadata"]
+
+ def test_images_list(self):
+ # Get api sample of images get list request.
+ response = self._do_get('images')
+ subs = self._get_regexes()
+ self._verify_response('images-list-get-resp', subs, response, 200)
+
+ def test_image_get(self):
+ # Get api sample of one single image details request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-get-resp', subs, response, 200)
+
+ def test_images_details(self):
+ # Get api sample of all images details request.
+ response = self._do_get('images/detail')
+ subs = self._get_regexes()
+ self._verify_response('images-details-get-resp', subs, response, 200)
+
+ def test_image_metadata_get(self):
+ # Get api sample of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_get('images/%s/metadata' % image_id)
+ subs = self._get_regexes()
+ subs['image_id'] = image_id
+ self._verify_response('image-metadata-get-resp', subs, response, 200)
+
+ def test_image_metadata_post(self):
+ # Get api sample to update metadata of an image metadata request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_post(
+ 'images/%s/metadata' % image_id,
+ 'image-metadata-post-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-post-resp', subs, response, 200)
+
+ def test_image_metadata_put(self):
+ # Get api sample of image metadata put request.
+ image_id = fake.get_valid_image_id()
+ response = self._do_put('images/%s/metadata' % image_id,
+ 'image-metadata-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-metadata-put-resp', subs, response, 200)
+
+ def test_image_meta_key_get(self):
+ # Get api sample of an image metadata key request.
+ image_id = fake.get_valid_image_id()
+ key = "kernel_id"
+ response = self._do_get('images/%s/metadata/%s' % (image_id, key))
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-get', subs, response, 200)
+
+ def test_image_meta_key_put(self):
+ # Get api sample of image metadata key put request.
+ image_id = fake.get_valid_image_id()
+ key = "auto_disk_config"
+ response = self._do_put('images/%s/metadata/%s' % (image_id, key),
+ 'image-meta-key-put-req', {})
+ subs = self._get_regexes()
+ self._verify_response('image-meta-key-put-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_instance_actions.py b/nova/tests/unit/integrated/v3/test_instance_actions.py
new file mode 100644
index 0000000000..3285fa4a69
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_instance_actions.py
@@ -0,0 +1,84 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from nova.compute import api as compute_api
+from nova import db
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit import utils as test_utils
+
+
+class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-instance-actions'
+
+ def setUp(self):
+ super(ServerActionsSampleJsonTest, self).setUp()
+ self.actions = fake_server_actions.FAKE_ACTIONS
+ self.events = fake_server_actions.FAKE_EVENTS
+ self.instance = test_utils.get_test_instance()
+
+ def fake_instance_action_get_by_request_id(context, uuid, request_id):
+ return copy.deepcopy(self.actions[uuid][request_id])
+
+ def fake_server_actions_get(context, uuid):
+ return [copy.deepcopy(value) for value in
+ self.actions[uuid].itervalues()]
+
+ def fake_instance_action_events_get(context, action_id):
+ return copy.deepcopy(self.events[action_id])
+
+ def fake_instance_get_by_uuid(context, instance_id):
+ return self.instance
+
+ def fake_get(self, context, instance_uuid, **kwargs):
+ return {'uuid': instance_uuid}
+
+ self.stubs.Set(db, 'action_get_by_request_id',
+ fake_instance_action_get_by_request_id)
+ self.stubs.Set(db, 'actions_get', fake_server_actions_get)
+ self.stubs.Set(db, 'action_events_get',
+ fake_instance_action_events_get)
+ self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(compute_api.API, 'get', fake_get)
+
+ def test_instance_action_get(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ fake_request_id = fake_server_actions.FAKE_REQUEST_ID1
+ fake_action = self.actions[fake_uuid][fake_request_id]
+
+ response = self._do_get('servers/%s/os-instance-actions/%s' %
+ (fake_uuid, fake_request_id))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['instance_uuid'] = fake_uuid
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = fake_action['request_id']
+ subs['start_time'] = fake_action['start_time']
+ subs['result'] = '(Success)|(Error)'
+ subs['event'] = '(schedule)|(compute_create)'
+ self._verify_response('instance-action-get-resp', subs, response, 200)
+
+ def test_instance_actions_list(self):
+ fake_uuid = fake_server_actions.FAKE_UUID
+ response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
+ subs = self._get_regexes()
+ subs['action'] = '(reboot)|(resize)'
+ subs['integer_id'] = '[0-9]+'
+ subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
+ '-[0-9a-f]{4}-[0-9a-f]{12}')
+ self._verify_response('instance-actions-list-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_keypairs.py b/nova/tests/unit/integrated/v3/test_keypairs.py
new file mode 100644
index 0000000000..d079407985
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_keypairs.py
@@ -0,0 +1,72 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ sample_dir = "keypairs"
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
+ def test_keypairs_post(self, public_key=None):
+ """Get api sample of key pairs post request."""
+ key_name = 'keypair-' + str(uuid.uuid4())
+ response = self._do_post('os-keypairs', 'keypairs-post-req',
+ {'keypair_name': key_name})
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-post-resp', subs, response, 200)
+ # NOTE(maurosr): return the key_name is necessary cause the
+ # verification returns the label of the last compared information in
+ # the response, not necessarily the key name.
+ return key_name
+
+ def test_keypairs_import_key_post(self):
+ # Get api sample of key pairs post to import user's key.
+ key_name = 'keypair-' + str(uuid.uuid4())
+ subs = {
+ 'keypair_name': key_name,
+ 'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
+ "B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
+ "RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
+ "9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
+ "pSxsIbECHw== Generated-by-Nova"
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-import-post-resp', subs, response, 200)
+
+ def test_keypairs_list(self):
+ # Get api sample of key pairs list request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs')
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-list-resp', subs, response, 200)
+
+ def test_keypairs_get(self):
+ # Get api sample of key pairs get request.
+ key_name = self.test_keypairs_post()
+ response = self._do_get('os-keypairs/%s' % key_name)
+ subs = self._get_regexes()
+ subs['keypair_name'] = '(%s)' % key_name
+ self._verify_response('keypairs-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_lock_server.py b/nova/tests/unit/integrated/v3/test_lock_server.py
new file mode 100644
index 0000000000..0eb9676fbf
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_lock_server.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class LockServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-lock-server"
+
+ def setUp(self):
+ """setUp Method for LockServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(LockServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_lock_server(self):
+ # Get api samples to lock server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'lock-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_unlock_server(self):
+ # Get api samples to unlock server request.
+ self.test_post_lock_server()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'unlock-server', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_migrate_server.py b/nova/tests/unit/integrated/v3/test_migrate_server.py
new file mode 100644
index 0000000000..a43703fbc2
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_migrate_server.py
@@ -0,0 +1,71 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.conductor import manager as conductor_manager
+from nova import db
+from nova.tests.unit.integrated.v3 import test_servers
+from nova import utils
+
+
+class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-migrate-server"
+ ctype = 'json'
+
+ def setUp(self):
+ """setUp Method for MigrateServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(MigrateServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
+ def test_post_migrate(self, mock_cold_migrate):
+ # Get api samples to migrate server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'migrate-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_live_migrate_server(self):
+ # Get api samples to server live migrate request.
+ def fake_live_migrate(_self, context, instance, scheduler_hint,
+ block_migration, disk_over_commit):
+ self.assertEqual(self.uuid, instance["uuid"])
+ host = scheduler_hint["host"]
+ self.assertEqual(self.compute.host, host)
+
+ self.stubs.Set(conductor_manager.ComputeTaskManager,
+ '_live_migrate',
+ fake_live_migrate)
+
+ def fake_get_compute(context, host):
+ service = dict(host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=1,
+ updated_at='foo',
+ hypervisor_type='bar',
+ hypervisor_version=utils.convert_version_to_int(
+ '1.0'),
+ disabled=False)
+ return {'compute_node': [service]}
+ self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
+
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'live-migrate-server',
+ {'hostname': self.compute.host})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_migrations.py b/nova/tests/unit/integrated/v3/test_migrations.py
new file mode 100644
index 0000000000..ab8b214f6e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_migrations.py
@@ -0,0 +1,72 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova.compute import api as compute_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class MigrationsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-migrations"
+
+ def _stub_migrations(self, context, filters):
+ fake_migrations = [
+ {
+ 'id': 1234,
+ 'source_node': 'node1',
+ 'dest_node': 'node2',
+ 'source_compute': 'compute1',
+ 'dest_compute': 'compute2',
+ 'dest_host': '1.2.3.4',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_123',
+ 'old_instance_type_id': 1,
+ 'new_instance_type_id': 2,
+ 'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ },
+ {
+ 'id': 5678,
+ 'source_node': 'node10',
+ 'dest_node': 'node20',
+ 'source_compute': 'compute10',
+ 'dest_compute': 'compute20',
+ 'dest_host': '5.6.7.8',
+ 'status': 'Done',
+ 'instance_uuid': 'instance_id_456',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 6,
+ 'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
+ 'deleted_at': None,
+ 'deleted': False
+ }
+ ]
+ return fake_migrations
+
+ def setUp(self):
+ super(MigrationsSamplesJsonTest, self).setUp()
+ self.stubs.Set(compute_api.API, 'get_migrations',
+ self._stub_migrations)
+
+ def test_get_migrations(self):
+ response = self._do_get('os-migrations')
+ subs = self._get_regexes()
+
+ self.assertEqual(response.status_code, 200)
+ self._verify_response('migrations-get', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_multinic.py b/nova/tests/unit/integrated/v3/test_multinic.py
new file mode 100644
index 0000000000..3d55387632
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_multinic.py
@@ -0,0 +1,49 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class MultinicSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-multinic"
+
+ def _disable_instance_dns_manager(self):
+ # NOTE(markmc): it looks like multinic and instance_dns_manager are
+ # incompatible. See:
+ # https://bugs.launchpad.net/nova/+bug/1213251
+ self.flags(
+ instance_dns_manager='nova.network.noop_dns_driver.NoopDNSDriver')
+
+ def setUp(self):
+ self._disable_instance_dns_manager()
+ super(MultinicSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def _add_fixed_ip(self):
+ subs = {"networkId": 1}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-add-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_add_fixed_ip(self):
+ self._add_fixed_ip()
+
+ def test_remove_fixed_ip(self):
+ self._add_fixed_ip()
+
+ subs = {"ip": "10.0.0.4"}
+ response = self._do_post('servers/%s/action' % (self.uuid),
+ 'multinic-remove-fixed-ip-req', subs)
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_multiple_create.py b/nova/tests/unit/integrated/v3/test_multiple_create.py
new file mode 100644
index 0000000000..76c2083b0d
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_multiple_create.py
@@ -0,0 +1,45 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class MultipleCreateJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-multiple-create"
+
+ def test_multiple_create(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-post-req', subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-post-resp', subs, response, 202)
+
+ def test_multiple_create_without_reservation_id(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'min_count': "2",
+ 'max_count': "3"
+ }
+ response = self._do_post('servers', 'multiple-create-no-resv-post-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('multiple-create-no-resv-post-resp', subs,
+ response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_networks.py b/nova/tests/unit/integrated/v3/test_networks.py
new file mode 100644
index 0000000000..555c682c78
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_networks.py
@@ -0,0 +1,73 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import api as network_api
+from nova.tests.unit.api.openstack.compute.contrib import test_networks
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-networks"
+
+ def setUp(self):
+ super(NetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_disassociate(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_post('os-networks/%s/action' % uuid,
+ 'networks-disassociate-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+ def test_network_add(self):
+ response = self._do_post("os-networks/add",
+ 'network-add-req', {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_network_delete(self):
+ response = self._do_delete('os-networks/always_delete')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_networks_associate.py b/nova/tests/unit/integrated/v3/test_networks_associate.py
new file mode 100644
index 0000000000..fe109d4d6c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_networks_associate.py
@@ -0,0 +1,76 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.network import api as network_api
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.extensions')
+
+
+class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-networks-associate"
+ extra_extensions_to_load = ["os-networks"]
+
+ _sentinel = object()
+
+ def _get_flags(self):
+ f = super(NetworksAssociateJsonTests, self)._get_flags()
+ f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
+ # Networks_associate requires Networks to be update
+ f['osapi_compute_extension'].append(
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
+ return f
+
+ def setUp(self):
+ super(NetworksAssociateJsonTests, self).setUp()
+
+ def fake_associate(self, context, network_id,
+ host=NetworksAssociateJsonTests._sentinel,
+ project=NetworksAssociateJsonTests._sentinel):
+ return True
+
+ self.stubs.Set(network_api.API, "associate", fake_associate)
+
+ def test_disassociate(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-host-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_disassociate_project(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-disassociate-project-req',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_associate_host(self):
+ response = self._do_post('os-networks/1/action',
+ 'network-associate-host-req',
+ {"host": "testHost"})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_pause_server.py b/nova/tests/unit/integrated/v3/test_pause_server.py
new file mode 100644
index 0000000000..4993dc1048
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_pause_server.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class PauseServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-pause-server"
+
+ def setUp(self):
+ """setUp Method for PauseServer api samples extension
+
+ This method creates the server that will be used in each test
+ """
+ super(PauseServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_pause(self):
+ # Get api samples to pause server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'pause-server', {})
+ self.assertEqual(202, response.status_code)
+
+ def test_post_unpause(self):
+ # Get api samples to unpause server request.
+ self.test_post_pause()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'unpause-server', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/unit/integrated/v3/test_pci.py b/nova/tests/unit/integrated/v3/test_pci.py
new file mode 100644
index 0000000000..bb655a0ef0
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_pci.py
@@ -0,0 +1,182 @@
+# Copyright 2013 Intel.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import db
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+fake_db_dev_1 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '0000:04:10.0',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'status': 'available',
+ 'dev_id': 'pci_0000_04_10_0',
+ 'label': 'label_8086_1520',
+ 'instance_uuid': '69ba1044-0766-4ec0-b60d-09595de034a1',
+ 'request_id': None,
+ 'extra_info': '{"key1": "value1", "key2": "value2"}'
+ }
+
+fake_db_dev_2 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': '0000:04:10.1',
+ 'vendor_id': '8086',
+ 'product_id': '1520',
+ 'dev_type': 'type-VF',
+ 'status': 'available',
+ 'dev_id': 'pci_0000_04_10_1',
+ 'label': 'label_8086_1520',
+ 'instance_uuid': 'd5b446a6-a1b4-4d01-b4f0-eac37b3a62fc',
+ 'request_id': None,
+ 'extra_info': '{"key3": "value3", "key4": "value4"}'
+ }
+
+
+class ExtendedServerPciSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-pci"
+
+ def test_show(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedHyervisorPciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extra_extensions_to_load = ['os-hypervisors']
+ extension_name = 'os-pci'
+
+ def setUp(self):
+ super(ExtendedHyervisorPciSampleJsonTest, self).setUp()
+ self.fake_compute_node = {"cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "host_ip": "1.1.1.1",
+ "state": "up",
+ "status": "enabled",
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {"host": '043b3cacf6f34c90a'
+ '7245151fc8ebcda',
+ "disabled": False,
+ "disabled_reason": None},
+ "vcpus": 1,
+ "vcpus_used": 0,
+ "service_id": 2,
+ "pci_stats": [
+ {"count": 5,
+ "vendor_id": "8086",
+ "product_id": "1520",
+ "keya": "valuea",
+ "extra_info": {
+ "phys_function": '[["0x0000", '
+ '"0x04", "0x00",'
+ ' "0x1"]]',
+ "key1": "value1"}}]}
+
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get")
+ def test_pci_show(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
+ mock_db.return_value = self.fake_compute_node
+ hypervisor_id = 1
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs = {
+ 'hypervisor_id': hypervisor_id,
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-pci-show-resp',
+ subs, response, 200)
+
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get_all")
+ def test_pci_detail(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
+
+ mock_db.return_value = [self.fake_compute_node]
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/detail')
+
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-pci-detail-resp',
+ subs, response, 200)
+
+
+class PciSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-pci"
+
+ def _fake_pci_device_get_by_id(self, context, id):
+ return fake_db_dev_1
+
+ def _fake_pci_device_get_all_by_node(self, context, id):
+ return [fake_db_dev_1, fake_db_dev_2]
+
+ def test_pci_show(self):
+ self.stubs.Set(db, 'pci_device_get_by_id',
+ self._fake_pci_device_get_by_id)
+ response = self._do_get('os-pci/1')
+ subs = self._get_regexes()
+ self._verify_response('pci-show-resp', subs, response, 200)
+
+ def test_pci_index(self):
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ response = self._do_get('os-pci')
+ subs = self._get_regexes()
+ self._verify_response('pci-index-resp', subs, response, 200)
+
+ def test_pci_detail(self):
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
+ response = self._do_get('os-pci/detail')
+ subs = self._get_regexes()
+ self._verify_response('pci-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_quota_sets.py b/nova/tests/unit/integrated/v3/test_quota_sets.py
new file mode 100644
index 0000000000..8848fd9d2a
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_quota_sets.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class QuotaSetsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-quota-sets"
+
+ def test_show_quotas(self):
+ # Get api sample to show quotas.
+ response = self._do_get('os-quota-sets/fake_tenant')
+ self._verify_response('quotas-show-get-resp', {}, response, 200)
+
+ def test_show_quotas_defaults(self):
+ # Get api sample to show quotas defaults.
+ response = self._do_get('os-quota-sets/fake_tenant/defaults')
+ self._verify_response('quotas-show-defaults-get-resp',
+ {}, response, 200)
+
+ def test_update_quotas(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-post-req',
+ {})
+ self._verify_response('quotas-update-post-resp', {}, response, 200)
+
+ def test_delete_quotas(self):
+ # Get api sample to delete quota.
+ response = self._do_delete('os-quota-sets/fake_tenant')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_force(self):
+ # Get api sample to update quotas.
+ response = self._do_put('os-quota-sets/fake_tenant',
+ 'quotas-update-force-post-req',
+ {})
+ return self._verify_response('quotas-update-force-post-resp', {},
+ response, 200)
+
+ def test_show_quotas_for_user(self):
+ # Get api sample to show quotas for user.
+ response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
+ self._verify_response('user-quotas-show-get-resp', {}, response, 200)
+
+ def test_delete_quotas_for_user(self):
+ response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_update_quotas_for_user(self):
+ # Get api sample to update quotas for user.
+ response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
+ 'user-quotas-update-post-req',
+ {})
+ return self._verify_response('user-quotas-update-post-resp', {},
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_remote_consoles.py b/nova/tests/unit/integrated/v3/test_remote_consoles.py
new file mode 100644
index 0000000000..6f35aafb34
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_remote_consoles.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
+ extension_name = "os-remote-consoles"
+
+ def setUp(self):
+ super(ConsolesSampleJsonTests, self).setUp()
+ self.flags(vnc_enabled=True)
+ self.flags(enabled=True, group='spice')
+ self.flags(enabled=True, group='rdp')
+ self.flags(enabled=True, group='serial_console')
+
+ def test_get_vnc_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-vnc-console-post-resp', subs, response, 200)
+
+ def test_get_spice_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-spice-console-post-req',
+ {'action': 'os-getSPICEConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-spice-console-post-resp', subs,
+ response, 200)
+
+ def test_get_rdp_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-rdp-console-post-req',
+ {'action': 'os-getRDPConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-rdp-console-post-resp', subs,
+ response, 200)
+
+ def test_get_serial_console(self):
+ uuid = self._post_server()
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-serial-console-post-req',
+ {'action': 'os-getSerialConsole'})
+ subs = self._get_regexes()
+ subs["url"] = \
+ "((ws?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
+ self._verify_response('get-serial-console-post-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_rescue.py b/nova/tests/unit/integrated/v3/test_rescue.py
new file mode 100644
index 0000000000..65532607d5
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_rescue.py
@@ -0,0 +1,82 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class RescueJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-rescue"
+
+ def _rescue(self, uuid):
+ req_subs = {
+ 'password': 'MySecretPass'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ def _unrescue(self, uuid):
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-unrescue-req', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_rescue_with_image_ref_specified(self):
+ uuid = self._post_server()
+
+ req_subs = {
+ 'password': 'MySecretPass',
+ 'image_ref': '2341-Abc'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-rescue-req-with-image-ref', req_subs)
+ self._verify_response('server-rescue', req_subs, response, 200)
+
+ # Do a server get to make sure that the 'RESCUE' state is set
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'RESCUE'
+
+ self._verify_response('server-get-resp-rescue', subs, response, 200)
+
+ def test_server_unrescue(self):
+ uuid = self._post_server()
+
+ self._rescue(uuid)
+ self._unrescue(uuid)
+
+ # Do a server get to make sure that the 'ACTIVE' state is back
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['status'] = 'ACTIVE'
+
+ self._verify_response('server-get-resp-unrescue', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_scheduler_hints.py b/nova/tests/unit/integrated/v3/test_scheduler_hints.py
new file mode 100644
index 0000000000..6ecea5efc7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_scheduler_hints.py
@@ -0,0 +1,32 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class SchedulerHintsJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-scheduler-hints"
+
+ def test_scheduler_hints_post(self):
+ # Get api sample of scheduler hint post request.
+ subs = self._get_regexes()
+ subs.update({'image_id': fake.get_valid_image_id(),
+ 'image_near': str(uuid.uuid4())})
+ response = self._do_post('servers', 'scheduler-hints-post-req',
+ subs)
+ self._verify_response('scheduler-hints-post-resp', subs, response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_security_group_default_rules.py b/nova/tests/unit/integrated/v3/test_security_group_default_rules.py
new file mode 100644
index 0000000000..e0c2ec8132
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_security_group_default_rules.py
@@ -0,0 +1,40 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class SecurityGroupDefaultRulesSampleJsonTest(
+ api_sample_base.ApiSampleTestBaseV3):
+ extension_name = 'os-security-group-default-rules'
+
+ def test_security_group_default_rules_create(self):
+ response = self._do_post('os-security-group-default-rules',
+ 'security-group-default-rules-create-req',
+ {})
+ self._verify_response('security-group-default-rules-create-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_list(self):
+ self.test_security_group_default_rules_create()
+ response = self._do_get('os-security-group-default-rules')
+ self._verify_response('security-group-default-rules-list-resp',
+ {}, response, 200)
+
+ def test_security_group_default_rules_show(self):
+ self.test_security_group_default_rules_create()
+ rule_id = '1'
+ response = self._do_get('os-security-group-default-rules/%s' % rule_id)
+ self._verify_response('security-group-default-rules-show-resp',
+ {}, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_security_groups.py b/nova/tests/unit/integrated/v3/test_security_groups.py
new file mode 100644
index 0000000000..3afb26a06f
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_security_groups.py
@@ -0,0 +1,166 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network.security_group import neutron_driver
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+def fake_get(*args, **kwargs):
+ nova_group = {}
+ nova_group['id'] = 1
+ nova_group['description'] = 'default'
+ nova_group['name'] = 'default'
+ nova_group['project_id'] = 'openstack'
+ nova_group['rules'] = []
+ return nova_group
+
+
+def fake_get_instances_security_groups_bindings(self, context, servers,
+ detailed=False):
+ result = {}
+ for s in servers:
+ result[s.get('id')] = [{'name': 'test'}]
+ return result
+
+
+def fake_add_to_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_remove_from_instance(self, context, instance, security_group_name):
+ pass
+
+
+def fake_list(self, context, names=None, ids=None, project=None,
+ search_opts=None):
+ return [fake_get()]
+
+
+def fake_get_instance_security_groups(self, context, instance_uuid,
+ detailed=False):
+ return [fake_get()]
+
+
+def fake_create_security_group(self, context, name, description):
+ return fake_get()
+
+
+class SecurityGroupsJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-security-groups'
+
+ def setUp(self):
+ self.flags(security_group_api=('neutron'))
+ super(SecurityGroupsJsonTest, self).setUp()
+ self.stubs.Set(neutron_driver.SecurityGroupAPI, 'get', fake_get)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instances_security_groups_bindings',
+ fake_get_instances_security_groups_bindings)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'add_to_instance',
+ fake_add_to_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'remove_from_instance',
+ fake_remove_from_instance)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'list',
+ fake_list)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'get_instance_security_groups',
+ fake_get_instance_security_groups)
+ self.stubs.Set(neutron_driver.SecurityGroupAPI,
+ 'create_security_group',
+ fake_create_security_group)
+
+ def test_server_create(self):
+ self._post_server()
+
+ def test_server_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_server_detail(self):
+ self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
+
+ def _get_create_subs(self):
+ return {
+ 'group_name': 'default',
+ "description": "default",
+ }
+
+ def _create_security_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-security-groups',
+ 'security-group-post-req', subs)
+
+ def _add_group(self, uuid):
+ subs = {
+ 'group_name': 'test'
+ }
+ return self._do_post('servers/%s/action' % uuid,
+ 'security-group-add-post-req', subs)
+
+ def test_security_group_create(self):
+ response = self._create_security_group()
+ subs = self._get_create_subs()
+ self._verify_response('security-groups-create-resp', subs,
+ response, 200)
+
+ def test_security_groups_list(self):
+ # Get api sample of security groups get list request.
+ response = self._do_get('os-security-groups')
+ subs = self._get_regexes()
+ self._verify_response('security-groups-list-get-resp',
+ subs, response, 200)
+
+ def test_security_groups_get(self):
+ # Get api sample of security groups get request.
+ security_group_id = '11111111-1111-1111-1111-111111111111'
+ response = self._do_get('os-security-groups/%s' % security_group_id)
+ subs = self._get_regexes()
+ self._verify_response('security-groups-get-resp', subs, response, 200)
+
+ def test_security_groups_list_server(self):
+ # Get api sample of security groups for a specific server.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/os-security-groups' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-security-groups-list-resp',
+ subs, response, 200)
+
+ def test_security_groups_add(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ response = self._add_group(uuid)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_security_groups_remove(self):
+ self._create_security_group()
+ uuid = self._post_server()
+ self._add_group(uuid)
+ subs = {
+ 'group_name': 'test'
+ }
+ response = self._do_post('servers/%s/action' % uuid,
+ 'security-group-remove-post-req', subs)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_server_diagnostics.py b/nova/tests/unit/integrated/v3/test_server_diagnostics.py
new file mode 100644
index 0000000000..b2c41225e3
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_diagnostics.py
@@ -0,0 +1,27 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerDiagnosticsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-diagnostics"
+
+ def test_server_diagnostics_get(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/diagnostics' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-diagnostics-get-resp', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_server_external_events.py b/nova/tests/unit/integrated/v3/test_server_external_events.py
new file mode 100644
index 0000000000..9d2675a11c
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_external_events.py
@@ -0,0 +1,40 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerExternalEventsSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-external-events"
+
+ def setUp(self):
+ """setUp Method for AdminActions api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(ServerExternalEventsSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_create_event(self):
+ subs = {
+ 'uuid': self.uuid,
+ 'name': 'network-changed',
+ 'status': 'completed',
+ 'tag': 'foo',
+ }
+ response = self._do_post('os-server-external-events',
+ 'event-create-req',
+ subs)
+ subs.update(self._get_regexes())
+ self._verify_response('event-create-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_server_groups.py b/nova/tests/unit/integrated/v3/test_server_groups.py
new file mode 100644
index 0000000000..f5cc253cc9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_groups.py
@@ -0,0 +1,66 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerGroupsSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-server-groups"
+
+ def _get_create_subs(self):
+ return {'name': 'test'}
+
+ def _post_server_group(self):
+ """Verify the response status and returns the UUID of the
+ newly created server group.
+ """
+ subs = self._get_create_subs()
+ response = self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+ subs = self._get_regexes()
+ subs['name'] = 'test'
+ return self._verify_response('server-groups-post-resp',
+ subs, response, 200)
+
+ def _create_server_group(self):
+ subs = self._get_create_subs()
+ return self._do_post('os-server-groups',
+ 'server-groups-post-req', subs)
+
+ def test_server_groups_post(self):
+ return self._post_server_group()
+
+ def test_server_groups_list(self):
+ subs = self._get_create_subs()
+ uuid = self._post_server_group()
+ response = self._do_get('os-server-groups')
+ subs.update(self._get_regexes())
+ subs['id'] = uuid
+ self._verify_response('server-groups-list-resp',
+ subs, response, 200)
+
+ def test_server_groups_get(self):
+ # Get api sample of server groups get request.
+ subs = {'name': 'test'}
+ uuid = self._post_server_group()
+ subs['id'] = uuid
+ response = self._do_get('os-server-groups/%s' % uuid)
+
+ self._verify_response('server-groups-get-resp', subs, response, 200)
+
+ def test_server_groups_delete(self):
+ uuid = self._post_server_group()
+ response = self._do_delete('os-server-groups/%s' % uuid)
+ self.assertEqual(response.status_code, 204)
diff --git a/nova/tests/unit/integrated/v3/test_server_metadata.py b/nova/tests/unit/integrated/v3/test_server_metadata.py
new file mode 100644
index 0000000000..9b45af3d07
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_metadata.py
@@ -0,0 +1,80 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServersMetadataJsonTest(test_servers.ServersSampleBase):
+ extends_name = 'core_only'
+ sample_dir = 'server-metadata'
+
+ def _create_and_set(self, subs):
+ uuid = self._post_server()
+ response = self._do_put('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+ return uuid
+
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['value'] = '(Foo|Bar) Value'
+ return subs
+
+ def test_metadata_put_all(self):
+ # Test setting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ self._create_and_set(subs)
+
+ def test_metadata_post_all(self):
+ # Test updating all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_post('servers/%s/metadata' % uuid,
+ 'server-metadata-all-req',
+ subs)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_get_all(self):
+ # Test getting all metadata for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata' % uuid)
+ self._verify_response('server-metadata-all-resp', subs, response, 200)
+
+ def test_metadata_put(self):
+ # Test putting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ subs['value'] = 'Bar Value'
+ response = self._do_put('servers/%s/metadata/foo' % uuid,
+ 'server-metadata-req',
+ subs)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_get(self):
+ # Test getting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_get('servers/%s/metadata/foo' % uuid)
+ self._verify_response('server-metadata-resp', subs, response, 200)
+
+ def test_metadata_delete(self):
+ # Test deleting an individual metadata item for a server.
+ subs = {'value': 'Foo Value'}
+ uuid = self._create_and_set(subs)
+ response = self._do_delete('servers/%s/metadata/foo' % uuid)
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/unit/integrated/v3/test_server_usage.py b/nova/tests/unit/integrated/v3/test_server_usage.py
new file mode 100644
index 0000000000..1b6358bd43
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_server_usage.py
@@ -0,0 +1,39 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServerUsageSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'os-server-usage'
+
+ def setUp(self):
+ """setUp method for server usage."""
+ super(ServerUsageSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_show(self):
+ response = self._do_get('servers/%s' % self.uuid)
+ subs = self._get_regexes()
+ subs['id'] = self.uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_details(self):
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['id'] = self.uuid
+ subs['hostid'] = '[a-f0-9]+'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_servers.py b/nova/tests/unit/integrated/v3/test_servers.py
new file mode 100644
index 0000000000..dfa8f5a9d9
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_servers.py
@@ -0,0 +1,188 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api as compute_api
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ServersSampleBase(api_sample_base.ApiSampleTestBaseV3):
+ def _post_server(self):
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'glance_host': self._get_glance_host()
+ }
+ response = self._do_post('servers', 'server-post-req', subs)
+ subs = self._get_regexes()
+ return self._verify_response('server-post-resp', subs, response, 202)
+
+
+class ServersSampleJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def test_servers_post(self):
+ return self._post_server()
+
+ def test_servers_get(self):
+ uuid = self.test_servers_post()
+ response = self._do_get('servers/%s' % uuid)
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+ def test_servers_details(self):
+ uuid = self._post_server()
+ response = self._do_get('servers/detail')
+ subs = self._get_regexes()
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
+ self._verify_response('servers-details-resp', subs, response, 200)
+
+
+class ServersSampleAllExtensionJsonTest(ServersSampleJsonTest):
+ all_extensions = True
+
+
+class ServersActionsJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def _test_server_action(self, uuid, action, req_tpl,
+ subs=None, resp_tpl=None, code=202):
+ subs = subs or {}
+ subs.update({'action': action,
+ 'glance_host': self._get_glance_host()})
+ response = self._do_post('servers/%s/action' % uuid,
+ req_tpl,
+ subs)
+ if resp_tpl:
+ subs.update(self._get_regexes())
+ self._verify_response(resp_tpl, subs, response, code)
+ else:
+ self.assertEqual(response.status_code, code)
+ self.assertEqual(response.content, "")
+
+ def test_server_reboot_hard(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ 'server-action-reboot',
+ {"type": "HARD"})
+
+ def test_server_reboot_soft(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, "reboot",
+ 'server-action-reboot',
+ {"type": "SOFT"})
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ image = fake.get_valid_image_id()
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ }
+ self._test_server_action(uuid, 'rebuild',
+ 'server-action-rebuild',
+ subs,
+ 'server-action-rebuild-resp')
+
+ def _test_server_rebuild_preserve_ephemeral(self, value):
+ uuid = self._post_server()
+ image = fake.get_valid_image_id()
+ subs = {'host': self._get_host(),
+ 'uuid': image,
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'preserve_ephemeral': str(value).lower(),
+ 'action': 'rebuild',
+ 'glance_host': self._get_glance_host(),
+ }
+
+ def fake_rebuild(self_, context, instance, image_href, admin_password,
+ files_to_inject=None, **kwargs):
+ self.assertEqual(kwargs['preserve_ephemeral'], value)
+ self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild-preserve-ephemeral',
+ subs)
+ self.assertEqual(response.status_code, 202)
+
+ def test_server_rebuild_preserve_ephemeral_true(self):
+ self._test_server_rebuild_preserve_ephemeral(True)
+
+ def test_server_rebuild_preserve_ephemeral_false(self):
+ self._test_server_rebuild_preserve_ephemeral(False)
+
+ def test_server_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ uuid = self._post_server()
+ self._test_server_action(uuid, "resize",
+ 'server-action-resize',
+ {"id": 2,
+ "host": self._get_host()})
+ return uuid
+
+ def test_server_revert_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "revertResize",
+ 'server-action-revert-resize')
+
+ def test_server_confirm_resize(self):
+ uuid = self.test_server_resize()
+ self._test_server_action(uuid, "confirmResize",
+ 'server-action-confirm-resize',
+ code=204)
+
+ def test_server_create_image(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'createImage',
+ 'server-action-create-image',
+ {'name': 'foo-image'})
+
+
+class ServerStartStopJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ def _test_server_action(self, uuid, action, req_tpl):
+ response = self._do_post('servers/%s/action' % uuid,
+ req_tpl,
+ {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_server_start(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop', 'server-action-stop')
+ self._test_server_action(uuid, 'os-start', 'server-action-start')
+
+ def test_server_stop(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-stop', 'server-action-stop')
diff --git a/nova/tests/unit/integrated/v3/test_servers_ips.py b/nova/tests/unit/integrated/v3/test_servers_ips.py
new file mode 100644
index 0000000000..7c0b24b66b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_servers_ips.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class ServersIpsJsonTest(test_servers.ServersSampleBase):
+ extends_name = 'core_only'
+ sample_dir = 'server-ips'
+
+ def test_get(self):
+ # Test getting a server's IP information.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-resp', subs, response, 200)
+
+ def test_get_by_network(self):
+ # Test getting a server's IP information by network id.
+ uuid = self._post_server()
+ response = self._do_get('servers/%s/ips/private' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('server-ips-network-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_services.py b/nova/tests/unit/integrated/v3/test_services.py
new file mode 100644
index 0000000000..9ce9ffdbe7
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_services.py
@@ -0,0 +1,87 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import db
+from nova.tests.unit.api.openstack.compute.plugins.v3 import test_services
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-services"
+
+ def setUp(self):
+ super(ServicesJsonTest, self).setUp()
+ self.stubs.Set(db, "service_get_all",
+ test_services.fake_db_api_service_get_all)
+ self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
+ self.stubs.Set(timeutils, "utcnow_ts",
+ test_services.fake_utcnow_ts)
+ self.stubs.Set(db, "service_get_by_args",
+ test_services.fake_service_get_by_host_binary)
+ self.stubs.Set(db, "service_update",
+ test_services.fake_service_update)
+
+ def tearDown(self):
+ super(ServicesJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_services_list(self):
+ """Return a list of all agent builds."""
+ response = self._do_get('os-services')
+ subs = {'binary': 'nova-compute',
+ 'host': 'host1',
+ 'zone': 'nova',
+ 'status': 'disabled',
+ 'state': 'up'}
+ subs.update(self._get_regexes())
+ self._verify_response('services-list-get-resp', subs, response, 200)
+
+ def test_service_enable(self):
+ """Enable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/enable',
+ 'service-enable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-enable-put-resp', subs, response, 200)
+
+ def test_service_disable(self):
+ """Disable an existing agent build."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute'}
+ response = self._do_put('os-services/disable',
+ 'service-disable-put-req', subs)
+ subs = {"host": "host1",
+ "binary": "nova-compute"}
+ self._verify_response('service-disable-put-resp', subs, response, 200)
+
+ def test_service_disable_log_reason(self):
+ """Disable an existing service and log the reason."""
+ subs = {"host": "host1",
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test2'}
+ response = self._do_put('os-services/disable-log-reason',
+ 'service-disable-log-put-req', subs)
+ return self._verify_response('service-disable-log-put-resp',
+ subs, response, 200)
+
+ def test_service_delete(self):
+ """Delete an existing service."""
+ response = self._do_delete('os-services/1')
+ self.assertEqual(response.status_code, 204)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_shelve.py b/nova/tests/unit/integrated/v3/test_shelve.py
new file mode 100644
index 0000000000..4a2224e783
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_shelve.py
@@ -0,0 +1,50 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+CONF = cfg.CONF
+CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
+
+
+class ShelveJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-shelve"
+
+ def setUp(self):
+ super(ShelveJsonTest, self).setUp()
+ # Don't offload instance, so we can test the offload call.
+ CONF.set_override('shelved_offload_time', -1)
+
+ def _test_server_action(self, uuid, template, action):
+ response = self._do_post('servers/%s/action' % uuid,
+ template, {'action': action})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
+
+ def test_shelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+
+ def test_shelve_offload(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
+
+ def test_unshelve(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(uuid, 'os-unshelve', 'unshelve')
diff --git a/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py b/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py
new file mode 100644
index 0000000000..4508a36f8b
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_simple_tenant_usage.py
@@ -0,0 +1,61 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import urllib
+
+from oslo.utils import timeutils
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class SimpleTenantUsageSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-simple-tenant-usage"
+
+ def setUp(self):
+ """setUp method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).setUp()
+
+ started = timeutils.utcnow()
+ now = started + datetime.timedelta(hours=1)
+
+ timeutils.set_time_override(started)
+ self._post_server()
+ timeutils.set_time_override(now)
+
+ self.query = {
+ 'start': str(started),
+ 'end': str(now)
+ }
+
+ def tearDown(self):
+ """tearDown method for simple tenant usage."""
+ super(SimpleTenantUsageSampleJsonTest, self).tearDown()
+ timeutils.clear_time_override()
+
+ def test_get_tenants_usage(self):
+ # Get api sample to get all tenants usage request.
+ response = self._do_get('os-simple-tenant-usage?%s' % (
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get', subs, response, 200)
+
+ def test_get_tenant_usage_details(self):
+ # Get api sample to get specific tenant usage request.
+ tenant_id = 'openstack'
+ response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
+ urllib.urlencode(self.query)))
+ subs = self._get_regexes()
+ self._verify_response('simple-tenant-usage-get-specific', subs,
+ response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_suspend_server.py b/nova/tests/unit/integrated/v3/test_suspend_server.py
new file mode 100644
index 0000000000..11053b3e3e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_suspend_server.py
@@ -0,0 +1,41 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.integrated.v3 import test_servers
+
+
+class SuspendServerSamplesJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-suspend-server"
+ ctype = 'json'
+
+ def setUp(self):
+ """setUp Method for SuspendServer api samples extension
+
+ This method creates the server that will be used in each tests
+ """
+ super(SuspendServerSamplesJsonTest, self).setUp()
+ self.uuid = self._post_server()
+
+ def test_post_suspend(self):
+ # Get api samples to suspend server request.
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-suspend', {})
+ self.assertEqual(response.status_code, 202)
+
+ def test_post_resume(self):
+ # Get api samples to server resume request.
+ self.test_post_suspend()
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-resume', {})
+ self.assertEqual(response.status_code, 202)
diff --git a/nova/tests/unit/integrated/v3/test_tenant_networks.py b/nova/tests/unit/integrated/v3/test_tenant_networks.py
new file mode 100644
index 0000000000..72a888ba93
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_tenant_networks.py
@@ -0,0 +1,61 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+import nova.quota
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+CONF = cfg.CONF
+CONF.import_opt('enable_network_quota',
+ 'nova.api.openstack.compute.contrib.os_tenant_networks')
+
+
+class TenantNetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-tenant-networks"
+
+ def setUp(self):
+ super(TenantNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-tenant-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-res', subs, response, 200)
+
+ def test_create_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response, 200)
+
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
+ net = jsonutils.loads(response.content)
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/unit/integrated/v3/test_used_limits.py b/nova/tests/unit/integrated/v3/test_used_limits.py
new file mode 100644
index 0000000000..6682246c9e
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_used_limits.py
@@ -0,0 +1,34 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class UsedLimitsSamplesJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-used-limits"
+ extra_extensions_to_load = ["limits"]
+
+ def test_get_used_limits(self):
+ # Get api sample to used limits.
+ response = self._do_get('limits')
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
+
+ def test_get_used_limits_for_admin(self):
+ tenant_id = 'openstack'
+ response = self._do_get('limits?tenant_id=%s' % tenant_id)
+ subs = self._get_regexes()
+ self._verify_response('usedlimits-get-resp', subs, response, 200)
diff --git a/nova/tests/unit/integrated/v3/test_user_data.py b/nova/tests/unit/integrated/v3/test_user_data.py
new file mode 100644
index 0000000000..6410fa5a24
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_user_data.py
@@ -0,0 +1,36 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+
+from nova.tests.unit.image import fake
+from nova.tests.unit.integrated.v3 import api_sample_base
+
+
+class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-user-data"
+
+ def test_user_data_post(self):
+ user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
+ user_data = base64.b64encode(user_data_contents)
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'host': self._get_host(),
+ 'user_data': user_data
+ }
+ response = self._do_post('servers', 'userdata-post-req', subs)
+
+ subs.update(self._get_regexes())
+ self._verify_response('userdata-post-resp', subs, response, 202)
diff --git a/nova/tests/unit/integrated/v3/test_volumes.py b/nova/tests/unit/integrated/v3/test_volumes.py
new file mode 100644
index 0000000000..3c7ff6e460
--- /dev/null
+++ b/nova/tests/unit/integrated/v3/test_volumes.py
@@ -0,0 +1,184 @@
+# Copyright 2012 Nebula, Inc.
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.integrated.v3 import api_sample_base
+from nova.tests.unit.integrated.v3 import test_servers
+from nova.volume import cinder
+
+
+class SnapshotsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
+ extension_name = "os-volumes"
+
+ create_subs = {
+ 'snapshot_name': 'snap-001',
+ 'description': 'Daily backup',
+ 'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c'
+ }
+
+ def setUp(self):
+ super(SnapshotsSampleJsonTests, self).setUp()
+ self.stubs.Set(cinder.API, "get_all_snapshots",
+ fakes.stub_snapshot_get_all)
+ self.stubs.Set(cinder.API, "get_snapshot", fakes.stub_snapshot_get)
+
+ def _create_snapshot(self):
+ self.stubs.Set(cinder.API, "create_snapshot",
+ fakes.stub_snapshot_create)
+
+ response = self._do_post("os-snapshots",
+ "snapshot-create-req",
+ self.create_subs)
+ return response
+
+ def test_snapshots_create(self):
+ response = self._create_snapshot()
+ self.create_subs.update(self._get_regexes())
+ self._verify_response("snapshot-create-resp",
+ self.create_subs, response, 200)
+
+ def test_snapshots_delete(self):
+ self.stubs.Set(cinder.API, "delete_snapshot",
+ fakes.stub_snapshot_delete)
+ self._create_snapshot()
+ response = self._do_delete('os-snapshots/100')
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
+
+ def test_snapshots_detail(self):
+ response = self._do_get('os-snapshots/detail')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-detail-resp', subs, response, 200)
+
+ def test_snapshots_list(self):
+ response = self._do_get('os-snapshots')
+ subs = self._get_regexes()
+ self._verify_response('snapshots-list-resp', subs, response, 200)
+
+ def test_snapshots_show(self):
+ response = self._do_get('os-snapshots/100')
+ subs = {
+ 'snapshot_name': 'Default name',
+ 'description': 'Default description'
+ }
+ subs.update(self._get_regexes())
+ self._verify_response('snapshots-show-resp', subs, response, 200)
+
+
+class VolumesSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = "os-volumes"
+
+ def _get_volume_id(self):
+ return 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+
+ def _stub_volume(self, id, displayname="Volume Name",
+ displaydesc="Volume Description", size=100):
+ volume = {
+ 'id': id,
+ 'size': size,
+ 'availability_zone': 'zone1:host1',
+ 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
+ 'mountpoint': '/',
+ 'status': 'in-use',
+ 'attach_status': 'attached',
+ 'name': 'vol name',
+ 'display_name': displayname,
+ 'display_description': displaydesc,
+ 'created_at': datetime.datetime(2008, 12, 1, 11, 1, 55),
+ 'snapshot_id': None,
+ 'volume_type_id': 'fakevoltype',
+ 'volume_metadata': [],
+ 'volume_type': {'name': 'Backup'}
+ }
+ return volume
+
+ def _stub_volume_get(self, context, volume_id):
+ return self._stub_volume(volume_id)
+
+ def _stub_volume_delete(self, context, *args, **param):
+ pass
+
+ def _stub_volume_get_all(self, context, search_opts=None):
+ id = self._get_volume_id()
+ return [self._stub_volume(id)]
+
+ def _stub_volume_create(self, context, size, name, description, snapshot,
+ **param):
+ id = self._get_volume_id()
+ return self._stub_volume(id)
+
+ def setUp(self):
+ super(VolumesSampleJsonTest, self).setUp()
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ self.stubs.Set(cinder.API, "delete", self._stub_volume_delete)
+ self.stubs.Set(cinder.API, "get", self._stub_volume_get)
+ self.stubs.Set(cinder.API, "get_all", self._stub_volume_get_all)
+
+ def _post_volume(self):
+ subs_req = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+
+ self.stubs.Set(cinder.API, "create", self._stub_volume_create)
+ response = self._do_post('os-volumes', 'os-volumes-post-req',
+ subs_req)
+ subs = self._get_regexes()
+ subs.update(subs_req)
+ self._verify_response('os-volumes-post-resp', subs, response, 200)
+
+ def test_volumes_show(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ vol_id = self._get_volume_id()
+ response = self._do_get('os-volumes/%s' % vol_id)
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-get-resp', subs, response, 200)
+
+ def test_volumes_index(self):
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-index-resp', subs, response, 200)
+
+ def test_volumes_detail(self):
+ # For now, index and detail are the same.
+ # See the volumes api
+ subs = {
+ 'volume_name': "Volume Name",
+ 'volume_desc': "Volume Description",
+ }
+ response = self._do_get('os-volumes/detail')
+ subs.update(self._get_regexes())
+ self._verify_response('os-volumes-detail-resp', subs, response, 200)
+
+ def test_volumes_create(self):
+ self._post_volume()
+
+ def test_volumes_delete(self):
+ self._post_volume()
+ vol_id = self._get_volume_id()
+ response = self._do_delete('os-volumes/%s' % vol_id)
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, '')
diff --git a/nova/tests/keymgr/__init__.py b/nova/tests/unit/keymgr/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/keymgr/__init__.py
+++ b/nova/tests/unit/keymgr/__init__.py
diff --git a/nova/tests/keymgr/fake.py b/nova/tests/unit/keymgr/fake.py
index 25fb300c51..25fb300c51 100644
--- a/nova/tests/keymgr/fake.py
+++ b/nova/tests/unit/keymgr/fake.py
diff --git a/nova/tests/unit/keymgr/test_conf_key_mgr.py b/nova/tests/unit/keymgr/test_conf_key_mgr.py
new file mode 100644
index 0000000000..38bed78acf
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_conf_key_mgr.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the conf key manager.
+"""
+
+import array
+
+from oslo.config import cfg
+
+from nova.keymgr import conf_key_mgr
+from nova.keymgr import key
+from nova.tests.unit.keymgr import test_single_key_mgr
+
+
+CONF = cfg.CONF
+CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
+
+
+class ConfKeyManagerTestCase(test_single_key_mgr.SingleKeyManagerTestCase):
+ def __init__(self, *args, **kwargs):
+ super(ConfKeyManagerTestCase, self).__init__(*args, **kwargs)
+
+ self._hex_key = '0' * 64
+
+ def _create_key_manager(self):
+ CONF.set_default('fixed_key', default=self._hex_key, group='keymgr')
+ return conf_key_mgr.ConfKeyManager()
+
+ def setUp(self):
+ super(ConfKeyManagerTestCase, self).setUp()
+
+ encoded_key = array.array('B', self._hex_key.decode('hex')).tolist()
+ self.key = key.SymmetricKey('AES', encoded_key)
+
+ def test_init(self):
+ key_manager = self._create_key_manager()
+ self.assertEqual(self._hex_key, key_manager._hex_key)
+
+ def test_init_value_error(self):
+ CONF.set_default('fixed_key', default=None, group='keymgr')
+ self.assertRaises(ValueError, conf_key_mgr.ConfKeyManager)
+
+ def test_generate_hex_key(self):
+ key_manager = self._create_key_manager()
+ self.assertEqual(self._hex_key, key_manager._generate_hex_key())
diff --git a/nova/tests/keymgr/test_key.py b/nova/tests/unit/keymgr/test_key.py
index 14766fd201..14766fd201 100644
--- a/nova/tests/keymgr/test_key.py
+++ b/nova/tests/unit/keymgr/test_key.py
diff --git a/nova/tests/keymgr/test_key_mgr.py b/nova/tests/unit/keymgr/test_key_mgr.py
index cffcfc7a0c..cffcfc7a0c 100644
--- a/nova/tests/keymgr/test_key_mgr.py
+++ b/nova/tests/unit/keymgr/test_key_mgr.py
diff --git a/nova/tests/unit/keymgr/test_mock_key_mgr.py b/nova/tests/unit/keymgr/test_mock_key_mgr.py
new file mode 100644
index 0000000000..9d0c2174d1
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_mock_key_mgr.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the mock key manager.
+"""
+
+import array
+
+from nova import context
+from nova import exception
+from nova.keymgr import key as keymgr_key
+from nova.keymgr import mock_key_mgr
+from nova.tests.unit.keymgr import test_key_mgr
+
+
+class MockKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return mock_key_mgr.MockKeyManager()
+
+ def setUp(self):
+ super(MockKeyManagerTestCase, self).setUp()
+
+ self.ctxt = context.RequestContext('fake', 'fake')
+
+ def test_create_key(self):
+ key_id_1 = self.key_mgr.create_key(self.ctxt)
+ key_id_2 = self.key_mgr.create_key(self.ctxt)
+ # ensure that the UUIDs are unique
+ self.assertNotEqual(key_id_1, key_id_2)
+
+ def test_create_key_with_length(self):
+ for length in [64, 128, 256]:
+ key_id = self.key_mgr.create_key(self.ctxt, key_length=length)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+ self.assertEqual(length / 8, len(key.get_encoded()))
+
+ def test_create_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.create_key, None)
+
+ def test_store_key(self):
+ secret_key = array.array('B', ('0' * 64).decode('hex')).tolist()
+ _key = keymgr_key.SymmetricKey('AES', secret_key)
+ key_id = self.key_mgr.store_key(self.ctxt, _key)
+
+ actual_key = self.key_mgr.get_key(self.ctxt, key_id)
+ self.assertEqual(_key, actual_key)
+
+ def test_store_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.store_key, None, None)
+
+ def test_copy_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+
+ copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
+ copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
+
+ self.assertNotEqual(key_id, copied_key_id)
+ self.assertEqual(key, copied_key)
+
+ def test_copy_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.copy_key, None, None)
+
+ def test_get_key(self):
+ pass
+
+ def test_get_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.get_key, None, None)
+
+ def test_get_unknown_key(self):
+ self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, None)
+
+ def test_delete_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ self.key_mgr.delete_key(self.ctxt, key_id)
+
+ self.assertRaises(KeyError, self.key_mgr.get_key, self.ctxt, key_id)
+
+ def test_delete_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.delete_key, None, None)
+
+ def test_delete_unknown_key(self):
+ self.assertRaises(KeyError, self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py b/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py
new file mode 100644
index 0000000000..8e6d0c8a27
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_not_implemented_key_mgr.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the not implemented key manager.
+"""
+
+from nova.keymgr import not_implemented_key_mgr
+from nova.tests.unit.keymgr import test_key_mgr
+
+
+class NotImplementedKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return not_implemented_key_mgr.NotImplementedKeyManager()
+
+ def test_create_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.create_key, None)
+
+ def test_store_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.store_key, None, None)
+
+ def test_copy_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.copy_key, None, None)
+
+ def test_get_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.get_key, None, None)
+
+ def test_delete_key(self):
+ self.assertRaises(NotImplementedError,
+ self.key_mgr.delete_key, None, None)
diff --git a/nova/tests/unit/keymgr/test_single_key_mgr.py b/nova/tests/unit/keymgr/test_single_key_mgr.py
new file mode 100644
index 0000000000..3cf1de8da2
--- /dev/null
+++ b/nova/tests/unit/keymgr/test_single_key_mgr.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test cases for the single key manager.
+"""
+
+import array
+
+from nova import exception
+from nova.keymgr import key
+from nova.keymgr import single_key_mgr
+from nova.tests.unit.keymgr import test_mock_key_mgr
+
+
+class SingleKeyManagerTestCase(test_mock_key_mgr.MockKeyManagerTestCase):
+
+ def _create_key_manager(self):
+ return single_key_mgr.SingleKeyManager()
+
+ def setUp(self):
+ super(SingleKeyManagerTestCase, self).setUp()
+
+ self.key_id = '00000000-0000-0000-0000-000000000000'
+ encoded = array.array('B', ('0' * 64).decode('hex')).tolist()
+ self.key = key.SymmetricKey('AES', encoded)
+
+ def test___init__(self):
+ self.assertEqual(self.key,
+ self.key_mgr.get_key(self.ctxt, self.key_id))
+
+ def test_create_key(self):
+ key_id_1 = self.key_mgr.create_key(self.ctxt)
+ key_id_2 = self.key_mgr.create_key(self.ctxt)
+ # ensure that the UUIDs are the same
+ self.assertEqual(key_id_1, key_id_2)
+
+ def test_create_key_with_length(self):
+ pass
+
+ def test_store_null_context(self):
+ self.assertRaises(exception.Forbidden,
+ self.key_mgr.store_key, None, self.key)
+
+ def test_copy_key(self):
+ key_id = self.key_mgr.create_key(self.ctxt)
+ key = self.key_mgr.get_key(self.ctxt, key_id)
+
+ copied_key_id = self.key_mgr.copy_key(self.ctxt, key_id)
+ copied_key = self.key_mgr.get_key(self.ctxt, copied_key_id)
+
+ self.assertEqual(key_id, copied_key_id)
+ self.assertEqual(key, copied_key)
+
+ def test_delete_key(self):
+ pass
+
+ def test_delete_unknown_key(self):
+ self.assertRaises(exception.KeyManagerError,
+ self.key_mgr.delete_key, self.ctxt, None)
diff --git a/nova/tests/matchers.py b/nova/tests/unit/matchers.py
index b392e3e852..b392e3e852 100644
--- a/nova/tests/matchers.py
+++ b/nova/tests/unit/matchers.py
diff --git a/nova/tests/monkey_patch_example/__init__.py b/nova/tests/unit/monkey_patch_example/__init__.py
index bf0a9e4214..bf0a9e4214 100644
--- a/nova/tests/monkey_patch_example/__init__.py
+++ b/nova/tests/unit/monkey_patch_example/__init__.py
diff --git a/nova/tests/monkey_patch_example/example_a.py b/nova/tests/unit/monkey_patch_example/example_a.py
index 3fdb4dcc05..3fdb4dcc05 100644
--- a/nova/tests/monkey_patch_example/example_a.py
+++ b/nova/tests/unit/monkey_patch_example/example_a.py
diff --git a/nova/tests/monkey_patch_example/example_b.py b/nova/tests/unit/monkey_patch_example/example_b.py
index 2515fd2be4..2515fd2be4 100644
--- a/nova/tests/monkey_patch_example/example_b.py
+++ b/nova/tests/unit/monkey_patch_example/example_b.py
diff --git a/nova/tests/network/__init__.py b/nova/tests/unit/network/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/network/__init__.py
+++ b/nova/tests/unit/network/__init__.py
diff --git a/nova/tests/network/security_group/__init__.py b/nova/tests/unit/network/security_group/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/network/security_group/__init__.py
+++ b/nova/tests/unit/network/security_group/__init__.py
diff --git a/nova/tests/network/security_group/test_neutron_driver.py b/nova/tests/unit/network/security_group/test_neutron_driver.py
index 6a86c6df1a..6a86c6df1a 100644
--- a/nova/tests/network/security_group/test_neutron_driver.py
+++ b/nova/tests/unit/network/security_group/test_neutron_driver.py
diff --git a/nova/tests/unit/network/test_api.py b/nova/tests/unit/network/test_api.py
new file mode 100644
index 0000000000..efc7d29a4a
--- /dev/null
+++ b/nova/tests/unit/network/test_api.py
@@ -0,0 +1,589 @@
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for network API."""
+
+import contextlib
+import itertools
+
+import mock
+import mox
+
+from nova.compute import flavors
+from nova import context
+from nova import exception
+from nova import network
+from nova.network import api
+from nova.network import base_api
+from nova.network import floating_ips
+from nova.network import model as network_model
+from nova.network import rpcapi as network_rpcapi
+from nova import objects
+from nova.objects import fields
+from nova import policy
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_flavor
+from nova.tests.unit.objects import test_virtual_interface
+from nova import utils
+
+FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
+
+
+class NetworkPolicyTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkPolicyTestCase, self).setUp()
+
+ policy.reset()
+ policy.init()
+
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(NetworkPolicyTestCase, self).tearDown()
+ policy.reset()
+
+ def test_check_policy(self):
+ self.mox.StubOutWithMock(policy, 'enforce')
+ target = {
+ 'project_id': self.context.project_id,
+ 'user_id': self.context.user_id,
+ }
+ policy.enforce(self.context, 'network:get_all', target)
+ self.mox.ReplayAll()
+ api.check_policy(self.context, 'get_all')
+
+
+class ApiTestCase(test.TestCase):
+ def setUp(self):
+ super(ApiTestCase, self).setUp()
+ self.network_api = network.API()
+ self.context = context.RequestContext('fake-user',
+ 'fake-project')
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all(self, mock_get_all):
+ mock_get_all.return_value = mock.sentinel.get_all
+ self.assertEqual(mock.sentinel.get_all,
+ self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all_liberal(self, mock_get_all):
+ self.flags(network_manager='nova.network.manager.FlatDHCPManaager')
+ mock_get_all.return_value = mock.sentinel.get_all
+ self.assertEqual(mock.sentinel.get_all,
+ self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only="allow_none")
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all_no_networks(self, mock_get_all):
+ mock_get_all.side_effect = exception.NoNetworksFound
+ self.assertEqual([], self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ def test_get(self, mock_get):
+ mock_get.return_value = mock.sentinel.get_by_uuid
+ with mock.patch.object(self.context, 'elevated') as elevated:
+ elevated.return_value = mock.sentinel.elevated_context
+ self.assertEqual(mock.sentinel.get_by_uuid,
+ self.network_api.get(self.context, 'fake-uuid'))
+ mock_get.assert_called_once_with(mock.sentinel.elevated_context,
+ 'fake-uuid')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_instance')
+ def test_get_vifs_by_instance(self, mock_get_by_instance,
+ mock_get_by_id):
+ mock_get_by_instance.return_value = [
+ dict(test_virtual_interface.fake_vif,
+ network_id=123)]
+ mock_get_by_id.return_value = objects.Network()
+ mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
+ instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
+ vifs = self.network_api.get_vifs_by_instance(self.context,
+ instance)
+ self.assertEqual(1, len(vifs))
+ self.assertEqual(123, vifs[0].network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
+ mock_get_by_instance.assert_called_once_with(
+ self.context, str(mock.sentinel.inst_uuid), use_slave=False)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_address')
+ def test_get_vif_by_mac_address(self, mock_get_by_address,
+ mock_get_by_id):
+ mock_get_by_address.return_value = dict(
+ test_virtual_interface.fake_vif, network_id=123)
+ mock_get_by_id.return_value = objects.Network(
+ uuid=mock.sentinel.network_uuid)
+ vif = self.network_api.get_vif_by_mac_address(self.context,
+ mock.sentinel.mac)
+ self.assertEqual(123, vif.network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
+ mock_get_by_address.assert_called_once_with(self.context,
+ mock.sentinel.mac)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'project_id',
+ 'requested_networks', 'rxtx_factor', 'vpn', 'macs',
+ 'dhcp_options'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ flavor = flavors.get_default_flavor()
+ flavor['rxtx_factor'] = 0
+ sys_meta = flavors.save_flavor_info({}, flavor)
+ instance = dict(id=1, uuid='uuid', project_id='project_id',
+ host='host', system_metadata=utils.dict_to_metadata(sys_meta))
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'], **instance)
+ self.network_api.allocate_for_instance(
+ self.context, instance, 'vpn', 'requested_networks', macs=macs)
+
+ def _do_test_associate_floating_ip(self, orig_instance_uuid):
+ """Test post-association logic."""
+
+ new_instance = {'uuid': 'new-uuid'}
+
+ def fake_associate(*args, **kwargs):
+ return orig_instance_uuid
+
+ self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
+ fake_associate)
+
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=None,
+ use_slave=None):
+ return fake_instance.fake_db_instance(uuid=instance_uuid)
+
+ self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
+
+ def fake_get_nw_info(ctxt, instance):
+ class FakeNWInfo(object):
+ def json(self):
+ pass
+ return FakeNWInfo()
+
+ self.stubs.Set(self.network_api, '_get_instance_nw_info',
+ fake_get_nw_info)
+
+ if orig_instance_uuid:
+ expected_updated_instances = [new_instance['uuid'],
+ orig_instance_uuid]
+ else:
+ expected_updated_instances = [new_instance['uuid']]
+
+ def fake_instance_info_cache_update(context, instance_uuid, cache):
+ self.assertEqual(instance_uuid,
+ expected_updated_instances.pop())
+
+ self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
+ fake_instance_info_cache_update)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+ return
+
+ self.stubs.Set(base_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
+ self.network_api.associate_floating_ip(self.context,
+ new_instance,
+ '172.24.4.225',
+ '10.0.0.2')
+
+ def test_associate_preassociated_floating_ip(self):
+ self._do_test_associate_floating_ip('orig-uuid')
+
+ def test_associate_unassociated_floating_ip(self):
+ self._do_test_associate_floating_ip(None)
+
+ def test_get_floating_ip_invalid_id(self):
+ self.assertRaises(exception.InvalidID,
+ self.network_api.get_floating_ip,
+ self.context, '123zzz')
+
+ @mock.patch('nova.objects.FloatingIP.get_by_id')
+ def test_get_floating_ip(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip(self.context, 123))
+ mock_get.assert_called_once_with(self.context, 123)
+
+ @mock.patch('nova.objects.FloatingIP.get_pool_names')
+ def test_get_floating_ip_pools(self, mock_get):
+ pools = ['foo', 'bar']
+ mock_get.return_value = pools
+ self.assertEqual(pools,
+ self.network_api.get_floating_ip_pools(
+ self.context))
+
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip_by_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ floatings = mock.sentinel.floating_ips
+ mock_get.return_value = floatings
+ self.assertEqual(floatings,
+ self.network_api.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context,
+ self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ floatings = [objects.FloatingIP(id=1, address='1.2.3.4'),
+ objects.FloatingIP(id=2, address='5.6.7.8')]
+ mock_get.return_value = floatings
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network_api.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.fixed_address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.fixed_address)
+
+ def _stub_migrate_instance_calls(self, method, multi_host, info):
+ fake_flavor = flavors.get_default_flavor()
+ fake_flavor['rxtx_factor'] = 1.21
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, fake_flavor))
+ fake_instance = {'uuid': 'fake_uuid',
+ 'instance_type_id': fake_flavor['id'],
+ 'project_id': 'fake_project_id',
+ 'system_metadata': sys_meta}
+ fake_migration = {'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest'}
+
+ def fake_mig_inst_method(*args, **kwargs):
+ info['kwargs'] = kwargs
+
+ def fake_get_multi_addresses(*args, **kwargs):
+ return multi_host, ['fake_float1', 'fake_float2']
+
+ self.stubs.Set(network_rpcapi.NetworkAPI, method,
+ fake_mig_inst_method)
+ self.stubs.Set(self.network_api, '_get_multi_addresses',
+ fake_get_multi_addresses)
+
+ expected = {'instance_uuid': 'fake_uuid',
+ 'source_compute': 'fake_compute_source',
+ 'dest_compute': 'fake_compute_dest',
+ 'rxtx_factor': 1.21,
+ 'project_id': 'fake_project_id',
+ 'floating_addresses': None}
+ if multi_host:
+ expected['floating_addresses'] = ['fake_float1', 'fake_float2']
+ return fake_instance, fake_migration, expected
+
+ def test_migrate_instance_start_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', True, info)
+ expected['host'] = 'fake_compute_source'
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_start_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_start', False, info)
+ self.network_api.migrate_instance_start(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_with_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', True, info)
+ expected['host'] = 'fake_compute_dest'
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_migrate_instance_finish_without_multhost(self):
+ info = {'kwargs': {}}
+ arg1, arg2, expected = self._stub_migrate_instance_calls(
+ 'migrate_instance_finish', False, info)
+ self.network_api.migrate_instance_finish(self.context, arg1, arg2)
+ self.assertEqual(info['kwargs'], expected)
+
+ def test_is_multi_host_instance_has_no_fixed_ip(self):
+ def fake_fixed_ip_get_by_instance(ctxt, uuid):
+ raise exception.FixedIpNotFoundForInstance(instance_uuid=uuid)
+ self.stubs.Set(self.network_api.db, 'fixed_ip_get_by_instance',
+ fake_fixed_ip_get_by_instance)
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertFalse(result)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
+ def _test_is_multi_host_network_has_no_project_id(self, is_multi_host,
+ fip_get):
+ network = objects.Network(
+ id=123, project_id=None,
+ multi_host=is_multi_host)
+ fip_get.return_value = [
+ objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
+ floating_ips=objects.FloatingIPList())]
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_is_multi_host_network_has_no_project_id_multi(self):
+ self._test_is_multi_host_network_has_no_project_id(True)
+
+ def test_is_multi_host_network_has_no_project_id_non_multi(self):
+ self._test_is_multi_host_network_has_no_project_id(False)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
+ def _test_is_multi_host_network_has_project_id(self, is_multi_host,
+ fip_get):
+ network = objects.Network(
+ id=123, project_id=self.context.project_id,
+ multi_host=is_multi_host)
+ fip_get.return_value = [
+ objects.FixedIP(instance_uuid=FAKE_UUID, network=network,
+ floating_ips=objects.FloatingIPList())]
+ instance = {'uuid': FAKE_UUID}
+ result, floats = self.network_api._get_multi_addresses(self.context,
+ instance)
+ self.assertEqual(is_multi_host, result)
+
+ def test_is_multi_host_network_has_project_id_multi(self):
+ self._test_is_multi_host_network_has_project_id(True)
+
+ def test_is_multi_host_network_has_project_id_non_multi(self):
+ self._test_is_multi_host_network_has_project_id(False)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_project(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ self.network_api.associate(self.context, FAKE_UUID, project=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=False, project=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_host(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ self.network_api.associate(self.context, FAKE_UUID, host=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=True, project=False)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.associate')
+ def test_network_associate_project(self, mock_associate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ project = mock.sentinel.project
+ self.network_api.associate(self.context, FAKE_UUID, project=project)
+ mock_associate.assert_called_once_with(self.context, project,
+ network_id=net_obj.id,
+ force=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.save')
+ def test_network_associate_host(self, mock_save, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ host = str(mock.sentinel.host)
+ self.network_api.associate(self.context, FAKE_UUID, host=host)
+ mock_save.assert_called_once_with()
+ self.assertEqual(host, net_obj.host)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate(self, mock_disassociate, mock_get):
+ mock_get.return_value = objects.Network(context=self.context, id=123)
+ self.network_api.disassociate(self.context, FAKE_UUID)
+ mock_disassociate.assert_called_once_with(self.context, 123,
+ project=True, host=True)
+
+ def _test_refresh_cache(self, method, *args, **kwargs):
+ # This test verifies that no call to get_instance_nw_info() is made
+ # from the @refresh_cache decorator for the tested method.
+ with contextlib.nested(
+ mock.patch.object(self.network_api.network_rpcapi, method),
+ mock.patch.object(self.network_api.network_rpcapi,
+ 'get_instance_nw_info'),
+ mock.patch.object(network_model.NetworkInfo, 'hydrate'),
+ ) as (
+ method_mock, nwinfo_mock, hydrate_mock
+ ):
+ nw_info = network_model.NetworkInfo([])
+ method_mock.return_value = nw_info
+ hydrate_mock.return_value = nw_info
+ getattr(self.network_api, method)(*args, **kwargs)
+ hydrate_mock.assert_called_once_with(nw_info)
+ self.assertFalse(nwinfo_mock.called)
+
+ def test_allocate_for_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ vpn = 'fake-vpn'
+ requested_networks = 'fake-networks'
+ self._test_refresh_cache('allocate_for_instance', self.context,
+ instance, vpn, requested_networks)
+
+ def test_add_fixed_ip_to_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ network_id = 'fake-network-id'
+ self._test_refresh_cache('add_fixed_ip_to_instance', self.context,
+ instance, network_id)
+
+ def test_remove_fixed_ip_from_instance_refresh_cache(self):
+ sys_meta = flavors.save_flavor_info({}, test_flavor.fake_flavor)
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'],
+ system_metadata=sys_meta)
+ address = 'fake-address'
+ self._test_refresh_cache('remove_fixed_ip_from_instance', self.context,
+ instance, address)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ def test_get_fixed_ip_by_address(self, fip_get):
+ fip_get.return_value = test_fixed_ip.fake_fixed_ip
+ fip = self.network_api.get_fixed_ip_by_address(self.context,
+ 'fake-addr')
+ self.assertIsInstance(fip, objects.FixedIP)
+
+ @mock.patch('nova.objects.FixedIP.get_by_id')
+ def test_get_fixed_ip(self, mock_get_by_id):
+ mock_get_by_id.return_value = mock.sentinel.fixed_ip
+ self.assertEqual(mock.sentinel.fixed_ip,
+ self.network_api.get_fixed_ip(self.context,
+ mock.sentinel.id))
+ mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = objects.FixedIP(
+ instance_uuid = mock.sentinel.instance_uuid)
+ self.assertEqual(str(mock.sentinel.instance_uuid),
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = None
+ self.assertIsNone(
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
+
+@mock.patch('nova.network.api.API')
+@mock.patch('nova.db.instance_info_cache_update')
+class TestUpdateInstanceCache(test.TestCase):
+ def setUp(self):
+ super(TestUpdateInstanceCache, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {'uuid': FAKE_UUID}
+ vifs = [network_model.VIF(id='super_vif')]
+ self.nw_info = network_model.NetworkInfo(vifs)
+ self.nw_json = fields.NetworkModel.to_primitive(self, 'network_info',
+ self.nw_info)
+
+ def test_update_nw_info_none(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance, None)
+ api_mock._get_instance_nw_info.assert_called_once_with(self.context,
+ self.instance)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+ def test_update_nw_info_one_network(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance, self.nw_info)
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+ def test_update_nw_info_empty_list(self, db_mock, api_mock):
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ base_api.update_instance_cache_with_nw_info(api_mock, self.context,
+ self.instance,
+ network_model.NetworkInfo([]))
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': '[]'})
+
+ def test_decorator_return_object(self, db_mock, api_mock):
+ @base_api.refresh_cache
+ def func(self, context, instance):
+ return network_model.NetworkInfo([])
+ func(api_mock, self.context, self.instance)
+ self.assertFalse(api_mock._get_instance_nw_info.called)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': '[]'})
+
+ def test_decorator_return_none(self, db_mock, api_mock):
+ @base_api.refresh_cache
+ def func(self, context, instance):
+ pass
+ api_mock._get_instance_nw_info.return_value = self.nw_info
+ func(api_mock, self.context, self.instance)
+ api_mock._get_instance_nw_info.assert_called_once_with(self.context,
+ self.instance)
+ db_mock.assert_called_once_with(self.context, self.instance['uuid'],
+ {'network_info': self.nw_json})
+
+
+class NetworkHooksTestCase(test.BaseHookTestCase):
+ def test_instance_network_info_hook(self):
+ info_func = base_api.update_instance_cache_with_nw_info
+ self.assert_has_hook('instance_network_info', info_func)
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/unit/network/test_linux_net.py
index c07d43b2f3..c07d43b2f3 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/unit/network/test_linux_net.py
diff --git a/nova/tests/unit/network/test_manager.py b/nova/tests/unit/network/test_manager.py
new file mode 100644
index 0000000000..776160cd0c
--- /dev/null
+++ b/nova/tests/unit/network/test_manager.py
@@ -0,0 +1,3358 @@
+# Copyright 2011 Rackspace
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import fixtures
+import mock
+import mox
+import netaddr
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.db import exception as db_exc
+from oslo import messaging
+from oslo.utils import importutils
+import six
+
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova import ipv6
+from nova.network import floating_ips
+from nova.network import linux_net
+from nova.network import manager as network_manager
+from nova.network import model as net_model
+from nova import objects
+from nova.objects import quotas as quotas_obj
+from nova.objects import virtual_interface as vif_obj
+from nova.openstack.common import log as logging
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_ldap
+from nova.tests.unit import fake_network
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_floating_ip
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_service
+from nova import utils
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+HOST = "testhost"
+FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
+
+
+fake_inst = fake_instance.fake_db_instance
+
+
+networks = [{'id': 0,
+ 'uuid': FAKEUUID,
+ 'label': 'test0',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': '2001:db8::/64',
+ 'gateway_v6': '2001:db8::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'dhcp_server': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.0.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'},
+ {'id': 1,
+ 'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'label': 'test1',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '2001:db9::/64',
+ 'gateway_v6': '2001:db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.1.1',
+ 'dhcp_server': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'dns1': '192.168.0.1',
+ 'dns2': '192.168.0.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.1.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'}]
+
+fixed_ips = [{'id': 0,
+ 'network_id': 0,
+ 'address': '192.168.0.100',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []},
+ {'id': 0,
+ 'network_id': 1,
+ 'address': '192.168.1.100',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []},
+ {'id': 0,
+ 'network_id': 1,
+ 'address': '2001:db9:0:1::10',
+ 'instance_uuid': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []}]
+
+
+flavor = {'id': 0,
+ 'rxtx_cap': 3}
+
+
+floating_ip_fields = {'id': 0,
+ 'address': '192.168.10.100',
+ 'pool': 'nova',
+ 'interface': 'eth0',
+ 'fixed_ip_id': 0,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+vifs = [{'id': 0,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'uuid': '00000000-0000-0000-0000-0000000000000000',
+ 'network_id': 0,
+ 'instance_uuid': 0},
+ {'id': 1,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:01',
+ 'uuid': '00000000-0000-0000-0000-0000000000000001',
+ 'network_id': 1,
+ 'instance_uuid': 0},
+ {'id': 2,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'address': 'DE:AD:BE:EF:00:02',
+ 'uuid': '00000000-0000-0000-0000-0000000000000002',
+ 'network_id': 2,
+ 'instance_uuid': 0}]
+
+
+class FlatNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FlatNetworkTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.FlatManager(host=HOST)
+ self.network.instance_dns_domain = ''
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def test_get_instance_nw_info(self):
+ fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
+
+ nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
+ self.assertFalse(nw_info)
+
+ nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
+
+ for i, vif in enumerate(nw_info):
+ nid = i + 1
+ check = {'bridge': 'fake_br%d' % nid,
+ 'cidr': '192.168.%s.0/24' % nid,
+ 'cidr_v6': '2001:db8:0:%x::/64' % nid,
+ 'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
+ 'multi_host': False,
+ 'injected': False,
+ 'bridge_interface': None,
+ 'vlan': None,
+ 'broadcast': '192.168.%d.255' % nid,
+ 'dhcp_server': '192.168.1.1',
+ 'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
+ 'gateway': '192.168.%d.1' % nid,
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'label': 'test%d' % nid,
+ 'mac': 'DE:AD:BE:EF:00:%02x' % nid,
+ 'rxtx_cap': 30,
+ 'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
+ 'vif_uuid':
+ '00000000-0000-0000-0000-00000000000000%02d' % nid,
+ 'ovs_interfaceid': None,
+ 'qbh_params': None,
+ 'qbg_params': None,
+ 'should_create_vlan': False,
+ 'should_create_bridge': False,
+ 'ip': '192.168.%d.%03d' % (nid, nid + 99),
+ 'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
+ 'netmask': '255.255.255.0',
+ 'netmask_v6': 64,
+ 'physical_network': None,
+ }
+
+ network = vif['network']
+ net_v4 = vif['network']['subnets'][0]
+ net_v6 = vif['network']['subnets'][1]
+
+ vif_dict = dict(bridge=network['bridge'],
+ cidr=net_v4['cidr'],
+ cidr_v6=net_v6['cidr'],
+ id=vif['id'],
+ multi_host=network.get_meta('multi_host', False),
+ injected=network.get_meta('injected', False),
+ bridge_interface=
+ network.get_meta('bridge_interface'),
+ vlan=network.get_meta('vlan'),
+ broadcast=str(net_v4.as_netaddr().broadcast),
+ dhcp_server=network.get_meta('dhcp_server',
+ net_v4['gateway']['address']),
+ dns=[ip['address'] for ip in net_v4['dns']],
+ gateway=net_v4['gateway']['address'],
+ gateway_v6=net_v6['gateway']['address'],
+ label=network['label'],
+ mac=vif['address'],
+ rxtx_cap=vif.get_meta('rxtx_cap'),
+ vif_type=vif['type'],
+ vif_devname=vif.get('devname'),
+ vif_uuid=vif['id'],
+ ovs_interfaceid=vif.get('ovs_interfaceid'),
+ qbh_params=vif.get('qbh_params'),
+ qbg_params=vif.get('qbg_params'),
+ should_create_vlan=
+ network.get_meta('should_create_vlan', False),
+ should_create_bridge=
+ network.get_meta('should_create_bridge',
+ False),
+ ip=net_v4['ips'][i]['address'],
+ ip_v6=net_v6['ips'][i]['address'],
+ netmask=str(net_v4.as_netaddr().netmask),
+ netmask_v6=net_v6.as_netaddr()._prefixlen,
+ physical_network=
+ network.get_meta('physical_network', None))
+
+ self.assertThat(vif_dict, matchers.DictMatches(check))
+
+ def test_validate_networks(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[1])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[0])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_valid_fixed_ipv6(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '2001:db9:0:1::10')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **networks[1])])
+
+ ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
+ ip['network'] = dict(test_network.fake_network,
+ **networks[1])
+ ip['instance_uuid'] = None
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(ip)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_reserved(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, None, None, None, None, None)
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ self.assertEqual(4, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
+ def test_validate_reserved_start_end(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, dhcp_server='192.168.0.11',
+ allowed_start='192.168.0.10',
+ allowed_end='192.168.0.245')
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ # gateway defaults to beginning of allowed_start
+ self.assertEqual('192.168.0.10', network['gateway'])
+ # vpn_server doesn't conflict with dhcp_start
+ self.assertEqual('192.168.0.12', network['vpn_private_address'])
+ # dhcp_start doesn't conflict with dhcp_server
+ self.assertEqual('192.168.0.13', network['dhcp_start'])
+ # NOTE(vish): 10 from the beginning, 10 from the end, and
+ # 1 for the gateway, 1 for the dhcp server,
+ # 1 for the vpn server
+ self.assertEqual(23, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
+ def test_validate_reserved_start_out_of_range(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.AddressOutOfRange,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_start='192.168.1.10')
+
+ def test_validate_reserved_end_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidAddress,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_end='invalid')
+
+ def test_validate_cidr_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidCidr,
+ self.network.create_networks,
+ context_admin, 'fake', 'invalid', False,
+ 1, 256)
+
+ def test_validate_non_int_size(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidIntValue,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 'invalid')
+
+ def test_validate_networks_none_requested_networks(self):
+ self.network.validate_networks(self.context, None)
+
+ def test_validate_networks_empty_requested_networks(self):
+ requested_networks = []
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_invalid_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100.1'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100.1')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks, self.context,
+ requested_networks)
+
+ def test_validate_networks_empty_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ ''),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks,
+ self.context, requested_networks)
+
+ def test_validate_networks_none_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ None),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ None)]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ project_only=mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['id'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['uuid'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ def test_mini_dns_driver(self):
+ zone1 = "example.org"
+ zone2 = "example.com"
+ driver = self.network.instance_dns_manager
+ driver.create_entry("hostone", "10.0.0.1", "A", zone1)
+ driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
+ driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
+ driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
+ driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
+
+ driver.delete_entry("hostone", zone1)
+ driver.modify_address("hostfour", "10.0.0.1", zone1)
+ driver.modify_address("hostthree", "10.0.0.1", zone1)
+ names = driver.get_entries_by_address("10.0.0.1", zone1)
+ self.assertEqual(len(names), 2)
+ self.assertIn('hostthree', names)
+ self.assertIn('hostfour', names)
+
+ names = driver.get_entries_by_address("10.0.0.5", zone2)
+ self.assertEqual(len(names), 1)
+ self.assertIn('hostfive', names)
+
+ addresses = driver.get_entries_by_name("hosttwo", zone1)
+ self.assertEqual(len(addresses), 1)
+ self.assertIn('10.0.0.2', addresses)
+
+ self.assertRaises(exception.InvalidInput,
+ driver.create_entry,
+ "hostname",
+ "10.10.10.10",
+ "invalidtype",
+ zone1)
+
+ def test_mini_dns_driver_with_mixed_case(self):
+ zone1 = "example.org"
+ driver = self.network.instance_dns_manager
+ driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 1)
+ for n in addresses:
+ driver.delete_entry(n, zone1)
+ addresses = driver.get_entries_by_address("10.0.0.10", zone1)
+ self.assertEqual(len(addresses), 0)
+
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ def test_instance_dns(self, reserve):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ fixedip = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ self.mox.StubOutWithMock(db, 'network_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'network_update')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None
+ ).AndReturn(fixedip)
+
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
+ db.instance_get_by_uuid(self.context,
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(inst)
+
+ db.network_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['uuid'])
+
+ instance_manager = self.network.instance_dns_manager
+ addresses = instance_manager.get_entries_by_name(HOST,
+ self.network.instance_dns_domain)
+ self.assertEqual(len(addresses), 1)
+ self.assertEqual(addresses[0], fixedip['address'])
+ addresses = instance_manager.get_entries_by_name(FAKEUUID,
+ self.network.instance_dns_domain)
+ self.assertEqual(len(addresses), 1)
+ self.assertEqual(addresses[0], fixedip['address'])
+ exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
+ inst)
+ reserve.assert_called_once_with(self.context, fixed_ips=1,
+ project_id=exp_project,
+ user_id=exp_user)
+
+ def test_allocate_floating_ip(self):
+ self.assertIsNone(self.network.allocate_floating_ip(self.context,
+ 1, None))
+
+ def test_deallocate_floating_ip(self):
+ self.assertIsNone(self.network.deallocate_floating_ip(self.context,
+ 1, None))
+
+ def test_associate_floating_ip(self):
+ self.assertIsNone(self.network.associate_floating_ip(self.context,
+ None, None))
+
+ def test_disassociate_floating_ip(self):
+ self.assertIsNone(self.network.disassociate_floating_ip(self.context,
+ None, None))
+
+ def test_get_networks_by_uuids_ordering(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ self.mox.ReplayAll()
+ res = self.network._get_networks_by_uuids(self.context,
+ requested_networks)
+
+ self.assertEqual(res[0]['id'], 1)
+ self.assertEqual(res[1]['id'], 0)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ @mock.patch('nova.objects.quotas.ids_from_instance')
+ def test_allocate_calculates_quota_auth(self, util_method, reserve,
+ get_by_uuid):
+ inst = objects.Instance()
+ inst['uuid'] = 'nosuch'
+ get_by_uuid.return_value = inst
+ reserve.side_effect = exception.OverQuota(overs='testing',
+ quotas={'fixed_ips': 10},
+ headroom={'fixed_ips': 0})
+ util_method.return_value = ('foo', 'bar')
+ self.assertRaises(exception.FixedIpLimitExceeded,
+ self.network.allocate_fixed_ip,
+ self.context, 123, {'uuid': 'nosuch'})
+ util_method.assert_called_once_with(self.context, inst)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
+ @mock.patch('nova.objects.quotas.Quotas.reserve')
+ @mock.patch('nova.objects.quotas.ids_from_instance')
+ def test_deallocate_calculates_quota_auth(self, util_method, reserve,
+ get_by_address):
+ inst = objects.Instance(uuid='fake-uuid')
+ fip = objects.FixedIP(instance_uuid='fake-uuid',
+ virtual_interface_id=1)
+ get_by_address.return_value = fip
+ util_method.return_value = ('foo', 'bar')
+ # This will fail right after the reserve call when it tries
+ # to look up the fake instance we created above
+ self.assertRaises(exception.InstanceNotFound,
+ self.network.deallocate_fixed_ip,
+ self.context, '1.2.3.4', instance=inst)
+ util_method.assert_called_once_with(self.context, inst)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=netaddr.IPAddress('1.2.3.4'))
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.virtual_interface.VirtualInterface'
+ '.get_by_instance_and_network')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.save')
+ def test_allocate_fixed_ip_cleanup(self,
+ mock_fixedip_save,
+ mock_fixedip_associate,
+ mock_fixedip_disassociate,
+ mock_vif_get,
+ mock_instance_get):
+ address = netaddr.IPAddress('1.2.3.4')
+
+ fip = objects.FixedIP(instance_uuid='fake-uuid',
+ address=address,
+ virtual_interface_id=1)
+ mock_fixedip_associate.return_value = fip
+
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_instance_get.return_value = instance
+
+ mock_vif_get.return_value = vif_obj.VirtualInterface(
+ instance_uuid='fake-uuid', id=1)
+
+ with contextlib.nested(
+ mock.patch.object(self.network, '_setup_network_on_host'),
+ mock.patch.object(self.network, 'instance_dns_manager'),
+ mock.patch.object(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance')
+ ) as (mock_setup_network, mock_dns_manager, mock_ignored):
+ mock_setup_network.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=address)
+
+ mock_dns_manager.delete_entry.assert_has_calls([
+ mock.call(instance.display_name, ''),
+ mock.call(instance.uuid, '')
+ ])
+
+ mock_fixedip_disassociate.assert_called_once_with(self.context)
+
+
+class FlatDHCPNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FlatDHCPNetworkTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+ self.context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
+ @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
+ @mock.patch('nova.network.linux_net.iptables_manager._apply')
+ def test_init_host_iptables_defer_apply(self, iptable_apply,
+ floating_get_by_host,
+ fixed_get_by_id):
+ def get_by_id(context, fixed_ip_id, **kwargs):
+ net = objects.Network(bridge='testbridge',
+ cidr='192.168.1.0/24')
+ if fixed_ip_id == 1:
+ return objects.FixedIP(address='192.168.1.4',
+ network=net)
+ elif fixed_ip_id == 2:
+ return objects.FixedIP(address='192.168.1.5',
+ network=net)
+
+ def fake_apply():
+ fake_apply.count += 1
+
+ fake_apply.count = 0
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
+ float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
+ float1._context = ctxt
+ float2._context = ctxt
+
+ iptable_apply.side_effect = fake_apply
+ floating_get_by_host.return_value = [float1, float2]
+ fixed_get_by_id.side_effect = get_by_id
+
+ self.network.init_host()
+ self.assertEqual(1, fake_apply.count)
+
+
+class VlanNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(VlanNetworkTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.flags(use_local=True, group='conductor')
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+ self.context = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+ self.context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ def test_quota_driver_type(self):
+ self.assertEqual(objects.QuotasNoOp,
+ self.network.quotas_cls)
+
+ def test_vpn_allocate_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.1')
+ db.fixed_ip_associate(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ network_id=mox.IgnoreArg(),
+ reserved=True).AndReturn(fixed)
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.mox.ReplayAll()
+
+ network = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **networks[0]))
+ network.vpn_private_address = '192.168.0.2'
+ self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
+ vpn=True)
+
+ def test_vpn_allocate_fixed_ip_no_network_id(self):
+ network = dict(networks[0])
+ network['vpn_private_address'] = '192.168.0.2'
+ network['id'] = None
+ instance = db.instance_create(self.context, {})
+ self.assertRaises(exception.FixedIpNotFoundForNetwork,
+ self.network.allocate_fixed_ip,
+ self.context_admin,
+ instance['uuid'],
+ network,
+ vpn=True)
+
+ def test_allocate_fixed_ip(self):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.1')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.mox.ReplayAll()
+
+ network = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **networks[0]))
+ network.vpn_private_address = '192.168.0.2'
+ self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
+ address=netaddr.IPAddress('1.2.3.4'))
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1)
+
+ @mock.patch('nova.objects.instance.Instance.get_by_uuid')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.associate')
+ def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
+ mock_get):
+ mock_associate.side_effect = test.TestingException
+ instance = objects.Instance(context=self.context)
+ instance.create()
+ mock_get.return_value = instance
+ self.assertRaises(test.TestingException,
+ self.network.allocate_fixed_ip,
+ self.context, instance.uuid,
+ {'cidr': '24', 'id': 1, 'uuid': 'nosuch',
+ 'vpn_private_address': netaddr.IPAddress('1.2.3.4')
+ }, vpn=1)
+ mock_associate.assert_called_once_with(self.context,
+ '1.2.3.4',
+ instance.uuid,
+ 1, reserved=True)
+
+ def test_create_networks_too_big(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=4094, vlan_start=1)
+
+ def test_create_networks_too_many(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=100, vlan_start=1,
+ cidr='192.168.0.1/24', network_size=100)
+
+ def test_duplicate_vlan_raises(self):
+ # VLAN 100 is already used and we force the network to be created
+ # in that vlan (vlan=100).
+ self.assertRaises(exception.DuplicateVlan,
+ self.network.create_networks,
+ self.context_admin, label="fake", num_networks=1,
+ vlan=100, cidr='192.168.0.1/24', network_size=100)
+
+ def test_vlan_start(self):
+ # VLAN 100 and 101 are used, so this network shoud be created in 102
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=1,
+ vlan_start=100, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+
+ def test_vlan_start_multiple(self):
+ # VLAN 100 and 101 are used, so these networks shoud be created in 102
+ # and 103
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=2,
+ vlan_start=100, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+ self.assertEqual(networks[1]["vlan"], 103)
+
+ def test_vlan_start_used(self):
+ # VLAN 100 and 101 are used, but vlan_start=99.
+ networks = self.network.create_networks(
+ self.context_admin, label="fake", num_networks=1,
+ vlan_start=99, cidr='192.168.3.1/24',
+ network_size=100)
+
+ self.assertEqual(networks[0]["vlan"], 102)
+
+ def test_vlan_parameter(self):
+ # vlan parameter could not be greater than 4094
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan=4095, cidr='192.168.0.1/24')
+ error_msg = 'The vlan number cannot be greater than 4094'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ # vlan parameter could not be less than 1
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan=0, cidr='192.168.0.1/24')
+ error_msg = 'The vlan number cannot be less than 1'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ def test_vlan_be_integer(self):
+ # vlan must be an integer
+ exc = self.assertRaises(ValueError,
+ self.network.create_networks,
+ self.context_admin, label="fake",
+ num_networks=1,
+ vlan='fake', cidr='192.168.0.1/24')
+ error_msg = 'vlan must be an integer'
+ self.assertIn(error_msg, six.text_type(exc))
+
+ @mock.patch('nova.db.network_get')
+ def test_validate_networks(self, net_get):
+ def network_get(_context, network_id, project_only='allow_none'):
+ return dict(test_network.fake_network, **networks[network_id])
+
+ net_get.side_effect = network_get
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=networks[1]['id'],
+ network=dict(test_network.fake_network,
+ **networks[1]),
+ instance_uuid=None)
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(db_fixed1)
+ db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=networks[0]['id'],
+ network=dict(test_network.fake_network,
+ **networks[0]),
+ instance_uuid=None)
+ db.fixed_ip_get_by_address(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ columns_to_join=mox.IgnoreArg()
+ ).AndReturn(db_fixed2)
+
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_none_requested_networks(self):
+ self.network.validate_networks(self.context, None)
+
+ def test_validate_networks_empty_requested_networks(self):
+ requested_networks = []
+ self.mox.ReplayAll()
+
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_validate_networks_invalid_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ '192.168.1.100.1'),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ '192.168.0.100.1')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks, self.context,
+ requested_networks)
+
+ def test_validate_networks_empty_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FixedIpInvalid,
+ self.network.validate_networks,
+ self.context, requested_networks)
+
+ def test_validate_networks_none_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
+ ('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+ self.mox.ReplayAll()
+ self.network.validate_networks(self.context, requested_networks)
+
+ def test_floating_ip_owned_by_project(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ # raises because floating_ip project_id is None
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=None)
+ self.assertRaises(exception.Forbidden,
+ self.network._floating_ip_owned_by_project,
+ ctxt,
+ floating_ip)
+
+ # raises because floating_ip project_id is not equal to ctxt project_id
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=ctxt.project_id + '1')
+ self.assertRaises(exception.Forbidden,
+ self.network._floating_ip_owned_by_project,
+ ctxt,
+ floating_ip)
+
+ # does not raise (floating ip is owned by ctxt project)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=ctxt.project_id)
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ ctxt = context.RequestContext(None, None,
+ is_admin=True)
+
+ # does not raise (ctxt is admin)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id=None)
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ # does not raise (ctxt is admin)
+ floating_ip = objects.FloatingIP(address='10.0.0.1',
+ project_id='testproject')
+ self.network._floating_ip_owned_by_project(ctxt, floating_ip)
+
+ def test_allocate_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake_allocate_address(*args, **kwargs):
+ return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
+
+ self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
+ fake_allocate_address)
+
+ self.network.allocate_floating_ip(ctxt, ctxt.project_id)
+
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.commit')
+ def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip)
+
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=1)
+
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=None,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # this time should raise because floating ip is associated to fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.assertRaises(exception.FloatingIpAssociated,
+ self.network.deallocate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ mock_reserve.return_value = 'reserve'
+ # this time should not raise
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+ self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
+
+ mock_commit.assert_called_once_with(ctxt, 'reserve',
+ project_id='testproject')
+
+ @mock.patch('nova.db.fixed_ip_get')
+ def test_associate_floating_ip(self, fixed_get):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ network=test_network.fake_network)
+
+ # floating ip that's already associated
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1)
+
+ # floating ip that isn't associated
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=None)
+
+ # fixed ip with remote host
+ def fake4(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=123)
+
+ def fake4_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='jibberjabber')
+
+ # fixed ip with local host
+ def fake5(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=1234)
+
+ def fake5_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='testhost')
+
+ def fake6(ctxt, method, **kwargs):
+ self.local = False
+
+ def fake7(*args, **kwargs):
+ self.local = True
+
+ def fake8(*args, **kwargs):
+ raise processutils.ProcessExecutionError('',
+ 'Cannot find device "em0"\n')
+
+ def fake9(*args, **kwargs):
+ raise test.TestingException()
+
+ # raises because interface doesn't exist
+ self.stubs.Set(self.network.db,
+ 'floating_ip_fixed_ip_associate',
+ fake1)
+ self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
+ self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
+ self.assertRaises(exception.NoFloatingIpInterface,
+ self.network._associate_floating_ip,
+ ctxt,
+ '1.2.3.4',
+ '1.2.3.5',
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # raises because floating_ip is already associated to a fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
+
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ instance_uuid='fake_uuid',
+ network=test_network.fake_network)
+
+ # doesn't raise because we exit early if the address is the same
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
+
+ # raises because we call disassociate which is mocked
+ self.assertRaises(test.TestingException,
+ self.network.associate_floating_ip,
+ ctxt,
+ mox.IgnoreArg(),
+ 'new')
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+
+ # does not raise and makes call remotely
+ self.local = True
+ self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
+ self.stubs.Set(self.network.db, 'network_get', fake4_network)
+ self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
+ lambda **kw: self.network.network_rpcapi.client)
+ self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.assertFalse(self.local)
+
+ # does not raise and makes call locally
+ self.local = False
+ self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
+ self.stubs.Set(self.network.db, 'network_get', fake5_network)
+ self.stubs.Set(self.network, '_associate_floating_ip', fake7)
+ self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.assertTrue(self.local)
+
+ def test_add_floating_ip_nat_before_bind(self):
+ # Tried to verify order with documented mox record/verify
+ # functionality, but it doesn't seem to work since I can't make it
+ # fail. I'm using stubs and a flag for now, but if this mox feature
+ # can be made to work, it would be a better way to test this.
+ #
+ # self.mox.StubOutWithMock(self.network.driver,
+ # 'ensure_floating_forward')
+ # self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
+ #
+ # self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.network.driver.bind_floating_ip(mox.IgnoreArg(),
+ # mox.IgnoreArg())
+ # self.mox.ReplayAll()
+
+ nat_called = [False]
+
+ def fake_nat(*args, **kwargs):
+ nat_called[0] = True
+
+ def fake_bind(*args, **kwargs):
+ self.assertTrue(nat_called[0])
+
+ self.stubs.Set(self.network.driver,
+ 'ensure_floating_forward',
+ fake_nat)
+ self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
+
+ self.network.l3driver.add_floating_ip('fakefloat',
+ 'fakefixed',
+ 'fakeiface',
+ 'fakenet')
+
+ @mock.patch('nova.db.floating_ip_get_all_by_host')
+ @mock.patch('nova.db.fixed_ip_get')
+ def _test_floating_ip_init_host(self, fixed_get, floating_get,
+ public_interface, expected_arg):
+
+ floating_get.return_value = [
+ dict(test_floating_ip.fake_floating_ip,
+ interface='foo',
+ address='1.2.3.4'),
+ dict(test_floating_ip.fake_floating_ip,
+ interface='fakeiface',
+ address='1.2.3.5',
+ fixed_ip_id=1),
+ dict(test_floating_ip.fake_floating_ip,
+ interface='bar',
+ address='1.2.3.6',
+ fixed_ip_id=2),
+ ]
+
+ def fixed_ip_get(_context, fixed_ip_id, get_network):
+ if fixed_ip_id == 1:
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ network=test_network.fake_network)
+ raise exception.FixedIpNotFound(id=fixed_ip_id)
+ fixed_get.side_effect = fixed_ip_get
+
+ self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
+ self.flags(public_interface=public_interface)
+ self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
+ netaddr.IPAddress('1.2.3.4'),
+ expected_arg,
+ mox.IsA(objects.Network))
+ self.mox.ReplayAll()
+ self.network.init_host_floating_ips()
+ self.mox.UnsetStubs()
+ self.mox.VerifyAll()
+
+ def test_floating_ip_init_host_without_public_interface(self):
+ self._test_floating_ip_init_host(public_interface=False,
+ expected_arg='fakeiface')
+
+ def test_floating_ip_init_host_with_public_interface(self):
+ self._test_floating_ip_init_host(public_interface='fooiface',
+ expected_arg='fooiface')
+
+ def test_disassociate_floating_ip(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake1(*args, **kwargs):
+ pass
+
+ # floating ip that isn't associated
+ def fake2(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=None)
+
+ # floating ip that is associated
+ def fake3(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1,
+ project_id=ctxt.project_id)
+
+ # fixed ip with remote host
+ def fake4(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=123)
+
+ def fake4_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False,
+ host='jibberjabber')
+
+ # fixed ip with local host
+ def fake5(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ address='10.0.0.1',
+ pool='nova',
+ instance_uuid=FAKEUUID,
+ interface='eth0',
+ network_id=1234)
+
+ def fake5_network(*args, **kwargs):
+ return dict(test_network.fake_network,
+ multi_host=False, host='testhost')
+
+ def fake6(ctxt, method, **kwargs):
+ self.local = False
+
+ def fake7(*args, **kwargs):
+ self.local = True
+
+ def fake8(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1',
+ pool='nova',
+ interface='eth0',
+ fixed_ip_id=1,
+ auto_assigned=True,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
+
+ # raises because floating_ip is not associated to a fixed_ip
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
+ self.assertRaises(exception.FloatingIpNotAssociated,
+ self.network.disassociate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
+
+ # does not raise and makes call remotely
+ self.local = True
+ self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
+ self.stubs.Set(self.network.db, 'network_get', fake4_network)
+ self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
+ lambda **kw: self.network.network_rpcapi.client)
+ self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
+ self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
+ self.assertFalse(self.local)
+
+ # does not raise and makes call locally
+ self.local = False
+ self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
+ self.stubs.Set(self.network.db, 'network_get', fake5_network)
+ self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
+ self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
+ self.assertTrue(self.local)
+
+ # raises because auto_assigned floating IP cannot be disassociated
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
+ self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
+ self.network.disassociate_floating_ip,
+ ctxt,
+ mox.IgnoreArg())
+
+ def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ lambda *a, **kw: None)
+ self.mox.StubOutWithMock(db, 'network_get')
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
+
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
+
+ fixed = dict(test_fixed_ip.fake_fixed_ip,
+ address='192.168.0.101')
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ instance_uuid=mox.IgnoreArg(),
+ host=None).AndReturn(fixed)
+ db.network_get(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ project_only=mox.IgnoreArg()
+ ).AndReturn(dict(test_network.fake_network,
+ **networks[0]))
+ db.instance_get_by_uuid(mox.IgnoreArg(),
+ mox.IgnoreArg(), use_slave=False,
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(fake_inst(display_name=HOST,
+ uuid=FAKEUUID))
+ self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
+ networks[0]['id'])
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ def test_ip_association_and_allocation_of_other_project(self, net_get,
+ fixed_get):
+ """Makes sure that we cannot deallocaate or disassociate
+ a public ip of other project.
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ context1 = context.RequestContext('user', 'project1')
+ context2 = context.RequestContext('user', 'project2')
+
+ float_ip = db.floating_ip_create(context1.elevated(),
+ {'address': '1.2.3.4',
+ 'project_id': context1.project_id})
+
+ float_addr = float_ip['address']
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
+ 1, instance['uuid']).address
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+
+ # Associate the IP with non-admin user context
+ self.assertRaises(exception.Forbidden,
+ self.network.associate_floating_ip,
+ context2,
+ float_addr,
+ fix_addr)
+
+ # Deallocate address from other project
+ self.assertRaises(exception.Forbidden,
+ self.network.deallocate_floating_ip,
+ context2,
+ float_addr)
+
+ # Now Associates the address to the actual project
+ self.network.associate_floating_ip(context1, float_addr, fix_addr)
+
+ # Now try dis-associating from other project
+ self.assertRaises(exception.Forbidden,
+ self.network.disassociate_floating_ip,
+ context2,
+ float_addr)
+
+ # Clean up the ip addresses
+ self.network.disassociate_floating_ip(context1, float_addr)
+ self.network.deallocate_floating_ip(context1, float_addr)
+ self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
+ db.floating_ip_destroy(context1.elevated(), float_addr)
+ db.fixed_ip_disassociate(context1.elevated(), fix_addr)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
+ """Verify that release is called properly.
+
+ Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return vifs[0]
+
+ self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ instance_uuid=instance.uuid,
+ allocated=True,
+ virtual_interface_id=3,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+
+ self.flags(force_dhcp_release=True)
+ self.mox.StubOutWithMock(linux_net, 'release_dhcp')
+ linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
+ 'DE:AD:BE:EF:00:00')
+ self.mox.ReplayAll()
+ self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed_with_dhcp_exception(self, fixed_update, net_get,
+ fixed_get):
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return vifs[0]
+
+ with contextlib.nested(
+ mock.patch.object(db, 'virtual_interface_get', vif_get),
+ mock.patch.object(
+ utils, 'execute',
+ side_effect=processutils.ProcessExecutionError()),
+ ) as (_vif_get, _execute):
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1,
+ instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ instance_uuid=instance.uuid,
+ allocated=True,
+ virtual_interface_id=3,
+ network=dict(
+ test_network.fake_network,
+ **networks[1]))
+ self.flags(force_dhcp_release=True)
+ self.network.deallocate_fixed_ip(context1, fix_addr.address,
+ 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+ _execute.assert_called_once_with('dhcp_release',
+ networks[1]['bridge'],
+ fix_addr.address,
+ 'DE:AD:BE:EF:00:00',
+ run_as_root=True)
+
+ def test_deallocate_fixed_deleted(self):
+ # Verify doesn't deallocate deleted fixed_ip from deleted network.
+
+ def teardown_network_on_host(_context, network):
+ if network['id'] == 0:
+ raise test.TestingException()
+
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ teardown_network_on_host)
+
+ context1 = context.RequestContext('user', 'project1')
+ elevated = context1.elevated()
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+ network = db.network_create_safe(elevated, networks[0])
+
+ _fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fix_addr = _fix_addr.address
+ db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
+ elevated.read_deleted = 'yes'
+ delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
+ values = {'address': fix_addr,
+ 'network_id': network.id,
+ 'instance_uuid': delfixed['instance_uuid']}
+ db.fixed_ip_create(elevated, values)
+ elevated.read_deleted = 'no'
+ elevated.read_deleted = 'yes'
+
+ deallocate = self.network.deallocate_fixed_ip
+ self.assertRaises(test.TestingException, deallocate, context1,
+ fix_addr, 'fake')
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
+ """Verify that deallocate doesn't raise when no vif is returned.
+
+ Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
+ """
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+
+ def vif_get(_context, _vif_id):
+ return None
+
+ self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ allocated=True,
+ virtual_interface_id=3,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+ self.flags(force_dhcp_release=True)
+ fixed_update.return_value = fixed_get.return_value
+ self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
+ fixed_update.assert_called_once_with(context1, fix_addr.address,
+ {'allocated': False})
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
+ # Verify IP is not deallocated if the security group refresh fails.
+ net_get.return_value = dict(test_network.fake_network,
+ **networks[1])
+ context1 = context.RequestContext('user', 'project1')
+
+ instance = db.instance_create(context1,
+ {'project_id': 'project1'})
+
+ elevated = context1.elevated()
+ fix_addr = objects.FixedIP.associate_pool(elevated, 1,
+ instance['uuid'])
+
+ def fake_refresh(instance_uuid):
+ raise test.TestingException()
+ self.stubs.Set(self.network,
+ '_do_trigger_security_group_members_refresh_for_instance',
+ fake_refresh)
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ address=fix_addr.address,
+ allocated=True,
+ virtual_interface_id=3,
+ instance_uuid=instance.uuid,
+ network=dict(test_network.fake_network,
+ **networks[1]))
+ self.assertRaises(test.TestingException,
+ self.network.deallocate_fixed_ip,
+ context1, str(fix_addr.address), 'fake')
+ self.assertFalse(fixed_update.called)
+
+ def test_get_networks_by_uuids_ordering(self):
+ self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
+
+ requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
+ db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ [dict(test_network.fake_network, **net)
+ for net in networks])
+
+ self.mox.ReplayAll()
+ res = self.network._get_networks_by_uuids(self.context,
+ requested_networks)
+
+ self.assertEqual(res[0]['id'], 1)
+ self.assertEqual(res[1]['id'], 0)
+
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
+ @mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
+ @mock.patch('nova.network.linux_net.iptables_manager._apply')
+ def test_init_host_iptables_defer_apply(self, iptable_apply,
+ floating_get_by_host,
+ fixed_get_by_id):
+ def get_by_id(context, fixed_ip_id, **kwargs):
+ net = objects.Network(bridge='testbridge',
+ cidr='192.168.1.0/24')
+ if fixed_ip_id == 1:
+ return objects.FixedIP(address='192.168.1.4',
+ network=net)
+ elif fixed_ip_id == 2:
+ return objects.FixedIP(address='192.168.1.5',
+ network=net)
+
+ def fake_apply():
+ fake_apply.count += 1
+
+ fake_apply.count = 0
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
+ float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
+ float1._context = ctxt
+ float2._context = ctxt
+
+ iptable_apply.side_effect = fake_apply
+ floating_get_by_host.return_value = [float1, float2]
+ fixed_get_by_id.side_effect = get_by_id
+
+ self.network.init_host()
+ self.assertEqual(1, fake_apply.count)
+
+
+class _TestDomainObject(object):
+ def __init__(self, **kwargs):
+ for k, v in kwargs.iteritems():
+ self.__setattr__(k, v)
+
+
+class FakeNetwork(object):
+ def __init__(self, **kwargs):
+ self.vlan = None
+ for k, v in kwargs.iteritems():
+ self.__setattr__(k, v)
+
+ def __getitem__(self, item):
+ return getattr(self, item)
+
+
+class CommonNetworkTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(CommonNetworkTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.flags(ipv6_backend='rfc2462')
+ self.flags(use_local=True, group='conductor')
+ ipv6.reset_backend()
+
+ def test_validate_instance_zone_for_dns_domain(self):
+ domain = 'example.com'
+ az = 'test_az'
+ domains = {
+ domain: _TestDomainObject(
+ domain=domain,
+ availability_zone=az)}
+
+ def dnsdomain_get(context, instance_domain):
+ return domains.get(instance_domain)
+
+ self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
+ fake_instance = {'uuid': FAKEUUID,
+ 'availability_zone': az}
+
+ manager = network_manager.NetworkManager()
+ res = manager._validate_instance_zone_for_dns_domain(self.context,
+ fake_instance)
+ self.assertTrue(res)
+
+ def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
+ return None
+
+ def test_get_instance_nw_info_client_exceptions(self):
+ manager = network_manager.NetworkManager()
+ self.mox.StubOutWithMock(manager.db,
+ 'fixed_ip_get_by_instance')
+ manager.db.fixed_ip_get_by_instance(
+ self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
+ instance_id=FAKEUUID))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ manager.get_instance_nw_info,
+ self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
+
+ @mock.patch('nova.db.instance_get')
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_deallocate_for_instance_passes_host_info(self, fixed_get,
+ instance_get):
+ manager = fake_network.FakeNetworkManager()
+ db = manager.db
+ instance_get.return_value = fake_inst(uuid='ignoreduuid')
+ db.virtual_interface_delete_by_instance = lambda _x, _y: None
+ ctx = context.RequestContext('igonre', 'igonre')
+
+ fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
+ address='1.2.3.4',
+ network_id=123)]
+
+ manager.deallocate_for_instance(
+ ctx, instance=objects.Instance._from_db_object(self.context,
+ objects.Instance(), instance_get.return_value))
+
+ self.assertEqual([
+ (ctx, '1.2.3.4', 'fake-host')
+ ], manager.deallocate_fixed_ip_calls)
+
+ def test_deallocate_for_instance_with_requested_networks(self):
+ manager = fake_network.FakeNetworkManager()
+ db = manager.db
+ db.virtual_interface_delete_by_instance = mock.Mock()
+ ctx = context.RequestContext('igonre', 'igonre')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest.from_tuple(t)
+ for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
+ manager.deallocate_for_instance(
+ ctx,
+ instance=fake_instance.fake_instance_obj(ctx),
+ requested_networks=requested_networks)
+
+ self.assertEqual([
+ (ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
+ ], manager.deallocate_fixed_ip_calls)
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_remove_fixed_ip_from_instance(self, disassociate, get):
+ manager = fake_network.FakeNetworkManager()
+ get.return_value = [
+ dict(test_fixed_ip.fake_fixed_ip, **x)
+ for x in manager.db.fixed_ip_get_by_instance(None,
+ FAKEUUID)]
+ manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
+ HOST,
+ '10.0.0.1')
+
+ self.assertEqual(manager.deallocate_called, '10.0.0.1')
+ disassociate.assert_called_once_with(self.context, '10.0.0.1')
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_remove_fixed_ip_from_instance_bad_input(self, get):
+ manager = fake_network.FakeNetworkManager()
+ get.return_value = []
+ self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
+ manager.remove_fixed_ip_from_instance,
+ self.context, 99, HOST, 'bad input')
+
+ def test_validate_cidrs(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/24',
+ False, 1, 256, None, None, None,
+ None, None)
+ self.assertEqual(1, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/24', cidrs)
+
+ def test_validate_cidrs_split_exact_in_half(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/24',
+ False, 2, 128, None, None, None,
+ None, None)
+ self.assertEqual(2, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/25', cidrs)
+ self.assertIn('192.168.0.128/25', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.0/24')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 4, 256, None, None, None,
+ None, None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/24', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.9/25')]
+ # CidrConflict: requested cidr (192.168.2.0/24) conflicts with
+ # existing smaller cidr
+ args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
+ 1, 256, None, None, None, None, None)
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.2.0/25')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 4, 256, None, None, None, None,
+ None)
+ self.assertEqual(4, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
+ '192.168.4.0/24']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/24', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ self.mox.StubOutWithMock(manager.db, 'network_get_all')
+ get_all.return_value = [dict(test_network.fake_network, id=1,
+ cidr='192.168.2.9/29')]
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.2.0/24',
+ False, 3, 32, None, None, None, None,
+ None)
+ self.assertEqual(3, len(nets))
+ cidrs = [str(net['cidr']) for net in nets]
+ exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
+ for exp_cidr in exp_cidrs:
+ self.assertIn(exp_cidr, cidrs)
+ self.assertNotIn('192.168.2.0/27', cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_split_all_in_use(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ in_use = [dict(test_network.fake_network, **values) for values in
+ [{'id': 1, 'cidr': '192.168.2.9/29'},
+ {'id': 2, 'cidr': '192.168.2.64/26'},
+ {'id': 3, 'cidr': '192.168.2.128/26'}]]
+ get_all.return_value = in_use
+ args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
+ 3, 64, None, None, None, None, None)
+ # CidrConflict: Not enough subnets avail to satisfy requested num_
+ # networks - some subnets in requested range already
+ # in use
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_validate_cidrs_one_in_use(self):
+ manager = fake_network.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
+ None, None, None)
+ # ValueError: network_size * num_networks exceeds cidr size
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_already_used(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ cidr='192.168.0.0/24')]
+ # CidrConflict: cidr already in use
+ args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
+ 1, 256, None, None, None, None, None)
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_validate_cidrs_too_many(self):
+ manager = fake_network.FakeNetworkManager()
+ args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
+ None, None, None)
+ # ValueError: Not enough subnets avail to satisfy requested
+ # num_networks
+ self.assertRaises(ValueError, manager.create_networks, *args)
+
+ def test_validate_cidrs_split_partial(self):
+ manager = fake_network.FakeNetworkManager()
+ nets = manager.create_networks(self.context.elevated(), 'fake',
+ '192.168.0.0/16',
+ False, 2, 256, None, None, None, None,
+ None)
+ returned_cidrs = [str(net['cidr']) for net in nets]
+ self.assertIn('192.168.0.0/24', returned_cidrs)
+ self.assertIn('192.168.1.0/24', returned_cidrs)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_validate_cidrs_conflict_existing_supernet(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.0.0/8')]
+ args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
+ 1, 256, None, None, None, None, None)
+ # CidrConflict: requested cidr (192.168.0.0/24) conflicts
+ # with existing supernet
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_create_networks(self):
+ cidr = '192.168.0.0/24'
+ manager = fake_network.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertTrue(manager.create_networks(*args))
+
+ @mock.patch('nova.db.network_get_all')
+ def test_create_networks_cidr_already_used(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ get_all.return_value = [dict(test_network.fake_network,
+ id=1, cidr='192.168.0.0/24')]
+ args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertRaises(exception.CidrConflict,
+ manager.create_networks, *args)
+
+ def test_create_networks_many(self):
+ cidr = '192.168.0.0/16'
+ manager = fake_network.FakeNetworkManager()
+ self.stubs.Set(manager, '_create_fixed_ips',
+ self.fake_create_fixed_ips)
+ args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
+ 'fd00::/48', None, None, None, None, None]
+ self.assertTrue(manager.create_networks(*args))
+
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+ network_get.return_value = dict(test_network.fake_network,
+ **manager.db.network_get(None, 1))
+
+ # Greedy get eveything
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '.*'})
+ self.assertEqual(len(res), len(_vifs))
+
+ # Doesn't exist
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '10.0.0.1'})
+ self.assertFalse(res)
+
+ # Get instance 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '172.16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '173.16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ # Get instance 0 and 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '172.16.0.*'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 1 and 2
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip': '17..16.0.2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get')
+ def test_get_instance_uuids_by_ipv6_regex(self, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+
+ def _network_get(context, network_id, **args):
+ return dict(test_network.fake_network,
+ **manager.db.network_get(context, network_id))
+ network_get.side_effect = _network_get
+
+ # Greedy get eveything
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*'})
+ self.assertEqual(len(res), len(_vifs))
+
+ # Doesn't exist
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*1034.*'})
+ self.assertFalse(res)
+
+ # Get instance 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '2001:.*2'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': ip6})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ # Get instance 0 and 1
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': '.*ef0[1,2]'})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 1 and 2
+ ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'ip6': ip6})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 2)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+ self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
+ manager = fake_network.FakeNetworkManager(self.stubs)
+ fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
+ _vifs = manager.db.virtual_interface_get_all(None)
+ fake_context = context.RequestContext('user', 'project')
+ network_get.return_value = dict(test_network.fake_network,
+ **manager.db.network_get(None, 1))
+
+ # No regex for you!
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': '.*'})
+ self.assertFalse(res)
+
+ # Doesn't exist
+ ip = '10.0.0.1'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertFalse(res)
+
+ # Get instance 1
+ ip = '172.16.0.2'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
+
+ # Get instance 2
+ ip = '173.16.0.2'
+ res = manager.get_instance_uuids_by_ip_filter(fake_context,
+ {'fixed_ip': ip})
+ self.assertTrue(res)
+ self.assertEqual(len(res), 1)
+ self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_network(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.return_value = dict(test_network.fake_network, **networks[0])
+ uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ network = manager.get_network(fake_context, uuid)
+ self.assertEqual(network['uuid'], uuid)
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_network_not_found(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
+ uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.assertRaises(exception.NetworkNotFound,
+ manager.get_network, fake_context, uuid)
+
+ @mock.patch('nova.db.network_get_all')
+ def test_get_all_networks(self, get_all):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get_all.return_value = [dict(test_network.fake_network, **net)
+ for net in networks]
+ output = manager.get_all_networks(fake_context)
+ self.assertEqual(len(networks), 2)
+ self.assertEqual(output[0]['uuid'],
+ 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
+ self.assertEqual(output[1]['uuid'],
+ 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ @mock.patch('nova.db.network_disassociate')
+ def test_disassociate_network(self, disassociate, get):
+ manager = fake_network.FakeNetworkManager()
+ disassociate.return_value = True
+ fake_context = context.RequestContext('user', 'project')
+ get.return_value = dict(test_network.fake_network,
+ **networks[0])
+ uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ manager.disassociate_network(fake_context, uuid)
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_disassociate_network_not_found(self, get):
+ manager = fake_network.FakeNetworkManager()
+ fake_context = context.RequestContext('user', 'project')
+ get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
+ uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
+ self.assertRaises(exception.NetworkNotFound,
+ manager.disassociate_network, fake_context, uuid)
+
+ def _test_init_host_dynamic_fixed_range(self, net_manager):
+ self.flags(fake_network=True,
+ routing_source_ip='172.16.0.1',
+ metadata_host='172.16.0.1',
+ public_interface='eth1',
+ dmz_cidr=['10.0.3.0/24'])
+ binary_name = linux_net.get_binary_name()
+
+ # Stub out calls we don't want to really run, mock the db
+ self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
+ self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
+ lambda *args: None)
+ self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
+ lambda *args: None)
+ self.mox.StubOutWithMock(db, 'network_get_all_by_host')
+ fake_networks = [dict(test_network.fake_network, **n)
+ for n in networks]
+ db.network_get_all_by_host(mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).MultipleTimes().AndReturn(fake_networks)
+ self.mox.ReplayAll()
+
+ net_manager.init_host()
+
+ # Get the iptables rules that got created
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, networks[0]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[0]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[0]['cidr'],
+ networks[0]['cidr']),
+ '[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, networks[1]['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, networks[1]['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
+ '--ctstate DNAT -j ACCEPT' % (binary_name,
+ networks[1]['cidr'],
+ networks[1]['cidr'])]
+
+ # Compare the expected rules against the actual ones
+ for line in expected_lines:
+ self.assertIn(line, new_lines)
+
+ # Add an additional network and ensure the rules get configured
+ new_network = {'id': 2,
+ 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
+ 'label': 'test2',
+ 'injected': False,
+ 'multi_host': False,
+ 'cidr': '192.168.2.0/24',
+ 'cidr_v6': '2001:dba::/64',
+ 'gateway_v6': '2001:dba::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.2.1',
+ 'dhcp_server': '192.168.2.1',
+ 'broadcast': '192.168.2.255',
+ 'dns1': '192.168.2.1',
+ 'dns2': '192.168.2.2',
+ 'vlan': None,
+ 'host': HOST,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.2.2',
+ 'vpn_public_port': '22',
+ 'vpn_private_address': '10.0.0.2'}
+ new_network_obj = objects.Network._from_db_object(
+ self.context, objects.Network(),
+ dict(test_network.fake_network, **new_network))
+
+ ctxt = context.get_admin_context()
+ net_manager._setup_network_on_host(ctxt, new_network_obj)
+
+ # Get the new iptables rules that got created from adding a new network
+ current_lines = []
+ new_lines = linux_net.iptables_manager._modify_rules(current_lines,
+ linux_net.iptables_manager.ipv4['nat'],
+ table_name='nat')
+
+ # Add the new expected rules to the old ones
+ expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
+ '-j SNAT --to-source %s -o %s'
+ % (binary_name, new_network['cidr'],
+ CONF.routing_source_ip,
+ CONF.public_interface),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.metadata_host),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
+ % (binary_name, new_network['cidr'],
+ CONF.dmz_cidr[0]),
+ '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
+ '! --ctstate DNAT -j ACCEPT' % (binary_name,
+ new_network['cidr'],
+ new_network['cidr'])]
+
+ # Compare the expected rules (with new network) against the actual ones
+ for line in expected_lines:
+ self.assertIn(line, new_lines)
+
+ def test_flatdhcpmanager_dynamic_fixed_range(self):
+ """Test FlatDHCPManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.FlatDHCPManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
+ def test_vlanmanager_dynamic_fixed_range(self):
+ """Test VlanManager NAT rules for fixed_range."""
+ # Set the network manager
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+
+ # Test new behavior:
+ # CONF.fixed_range is not set, defaults to None
+ # Determine networks to NAT based on lookup
+ self._test_init_host_dynamic_fixed_range(self.network)
+
+ @mock.patch('nova.objects.quotas.Quotas.rollback')
+ @mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
+ @mock.patch('nova.network.manager.NetworkManager.'
+ '_do_trigger_security_group_members_refresh_for_instance')
+ def test_fixed_ip_cleanup_rollback(self, fake_trig,
+ fixed_get, rollback):
+ manager = network_manager.NetworkManager()
+
+ fake_trig.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ manager.deallocate_fixed_ip,
+ self.context, 'fake', 'fake',
+ instance=fake_inst(uuid='ignoreduuid'))
+ rollback.assert_called_once_with(self.context)
+
+ def test_fixed_cidr_out_of_range(self):
+ manager = network_manager.NetworkManager()
+ ctxt = context.get_admin_context()
+ self.assertRaises(exception.AddressOutOfRange,
+ manager.create_networks, ctxt, label="fake",
+ cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
+
+
+class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
+ network_manager.NetworkManager):
+ """Dummy manager that implements RPCAllocateFixedIP."""
+
+
+class RPCAllocateTestCase(test.TestCase):
+ """Tests nova.network.manager.RPCAllocateFixedIP."""
+ def setUp(self):
+ super(RPCAllocateTestCase, self).setUp()
+ self.flags(use_local=True, group='conductor')
+ self.rpc_fixed = TestRPCFixedManager()
+ self.context = context.RequestContext('fake', 'fake')
+
+ def test_rpc_allocate(self):
+ """Test to verify bug 855030 doesn't resurface.
+
+ Mekes sure _rpc_allocate_fixed_ip returns a value so the call
+ returns properly and the greenpool completes.
+ """
+ address = '10.10.10.10'
+
+ def fake_allocate(*args, **kwargs):
+ return address
+
+ def fake_network_get(*args, **kwargs):
+ return test_network.fake_network
+
+ self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
+ self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
+ rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
+ 'fake_instance',
+ 'fake_network')
+ self.assertEqual(rval, address)
+
+
+class TestFloatingIPManager(floating_ips.FloatingIP,
+ network_manager.NetworkManager):
+ """Dummy manager that implements FloatingIP."""
+
+
+class AllocateTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(AllocateTestCase, self).setUp()
+ dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
+ self.flags(instance_dns_manager=dns)
+ self.useFixture(test.SampleNetworks())
+ self.conductor = self.start_service(
+ 'conductor', manager=CONF.conductor.manager)
+ self.compute = self.start_service('compute')
+ self.network = self.start_service('network')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+ self.user_context = context.RequestContext('testuser',
+ 'testproject')
+
+ def test_allocate_for_instance(self):
+ address = "10.10.10.10"
+ self.flags(auto_assign_floating_ip=True)
+
+ db.floating_ip_create(self.context,
+ {'address': address,
+ 'pool': 'nova'})
+ inst = objects.Instance()
+ inst.host = self.compute.host
+ inst.display_name = HOST
+ inst.instance_type_id = 1
+ inst.uuid = FAKEUUID
+ inst.create(self.context)
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.user_context.project_id
+ nw_info = self.network.allocate_for_instance(self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=None)
+ self.assertEqual(1, len(nw_info))
+ fixed_ip = nw_info.fixed_ips()[0]['address']
+ self.assertTrue(utils.is_valid_ipv4(fixed_ip))
+ self.network.deallocate_for_instance(self.context,
+ instance=inst)
+
+ def test_allocate_for_instance_illegal_network(self):
+ networks = db.network_get_all(self.context)
+ requested_networks = []
+ for network in networks:
+ # set all networks to other projects
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host,
+ 'project_id': 'otherid'})
+ requested_networks.append((network['uuid'], None))
+ # set the first network to our project
+ db.network_update(self.context, networks[0]['id'],
+ {'project_id': self.user_context.project_id})
+
+ inst = objects.Instance()
+ inst.host = self.compute.host
+ inst.display_name = HOST
+ inst.instance_type_id = 1
+ inst.uuid = FAKEUUID
+ inst.create(self.context)
+ self.assertRaises(exception.NetworkNotFoundForProject,
+ self.network.allocate_for_instance, self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=self.context.project_id, macs=None,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_mac(self):
+ available_macs = set(['ca:fe:de:ad:be:ef'])
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ nw_info = self.network.allocate_for_instance(self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+ assigned_macs = [vif['address'] for vif in nw_info]
+ self.assertEqual(1, len(assigned_macs))
+ self.assertEqual(available_macs.pop(), assigned_macs[0])
+ self.network.deallocate_for_instance(self.context,
+ instance_id=inst['id'],
+ host=self.network.host,
+ project_id=project_id)
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ available_macs = set()
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'display_name': HOST,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host})
+ project_id = self.context.project_id
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self.network.allocate_for_instance,
+ self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=project_id, macs=available_macs)
+
+
+class FloatingIPTestCase(test.TestCase):
+ """Tests nova.network.manager.FloatingIP."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(FloatingIPTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = TestFloatingIPManager()
+ self.network.db = db
+ self.project_id = 'testproject'
+ self.context = context.RequestContext('testuser', self.project_id,
+ is_admin=False)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.service_get_by_host_and_topic')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
+ service_get,
+ inst_get, net_get,
+ fixed_get):
+ floating_ip = dict(test_floating_ip.fake_floating_ip,
+ fixed_ip_id=12)
+
+ fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=None,
+ instance_uuid='instance-uuid')
+
+ network = dict(test_network.fake_network,
+ multi_host=True)
+
+ instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ floating_get.return_value = floating_ip
+ fixed_get.return_value = fixed_ip
+ net_get.return_value = network
+ inst_get.return_value = instance
+ service_get.return_value = test_service.fake_service
+
+ self.stubs.Set(self.network.servicegroup_api,
+ 'service_is_up',
+ lambda _x: True)
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_disassociate_floating_ip')
+
+ self.network.network_rpcapi._disassociate_floating_ip(
+ ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_associate_floating_ip_multi_host_calls(self, floating_get,
+ inst_get, net_get,
+ fixed_get):
+ floating_ip = dict(test_floating_ip.fake_floating_ip,
+ fixed_ip_id=None)
+
+ fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
+ network_id=None,
+ instance_uuid='instance-uuid')
+
+ network = dict(test_network.fake_network,
+ multi_host=True)
+
+ instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
+
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ self.stubs.Set(self.network,
+ '_floating_ip_owned_by_project',
+ lambda _x, _y: True)
+
+ floating_get.return_value = floating_ip
+ fixed_get.return_value = fixed_ip
+ net_get.return_value = network
+ inst_get.return_value = instance
+
+ self.mox.StubOutWithMock(
+ self.network.network_rpcapi, '_associate_floating_ip')
+
+ self.network.network_rpcapi._associate_floating_ip(
+ ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
+ 'instance-uuid')
+ self.mox.ReplayAll()
+
+ self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
+
+ def test_double_deallocation(self):
+ instance_ref = db.instance_create(self.context,
+ {"project_id": self.project_id})
+ # Run it twice to make it fault if it does not handle
+ # instances without fixed networks
+ # If this fails in either, it does not handle having no addresses
+ self.network.deallocate_for_instance(self.context,
+ instance_id=instance_ref['id'])
+ self.network.deallocate_for_instance(self.context,
+ instance_id=instance_ref['id'])
+
+ def test_deallocate_floating_ip_quota_rollback(self):
+ ctxt = context.RequestContext('testuser', 'testproject',
+ is_admin=False)
+
+ def fake(*args, **kwargs):
+ return dict(test_floating_ip.fake_floating_ip,
+ address='10.0.0.1', fixed_ip_id=None,
+ project_id=ctxt.project_id)
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
+ self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
+ self.mox.StubOutWithMock(self.network,
+ '_floating_ip_owned_by_project')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
+ self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
+ quota.QUOTAS.reserve(self.context,
+ floating_ips=-1,
+ project_id='testproject').AndReturn('fake-rsv')
+ self.network._floating_ip_owned_by_project(self.context,
+ mox.IgnoreArg())
+ db.floating_ip_deallocate(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(None)
+ quota.QUOTAS.rollback(self.context, 'fake-rsv',
+ project_id='testproject')
+
+ self.mox.ReplayAll()
+ self.network.deallocate_floating_ip(self.context, '10.0.0.1')
+
+ def test_deallocation_deleted_instance(self):
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ lambda *args, **kwargs: None)
+ instance = objects.Instance()
+ instance.project_id = self.project_id
+ instance.deleted = True
+ instance.create(self.context)
+ network = db.network_create_safe(self.context.elevated(), {
+ 'project_id': self.project_id,
+ 'host': CONF.host,
+ 'label': 'foo'})
+ fixed = db.fixed_ip_create(self.context, {'allocated': True,
+ 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
+ 'network_id': network['id']})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
+ 'fixed_ip_id': fixed['id'],
+ 'project_id': self.project_id})
+ self.network.deallocate_for_instance(self.context, instance=instance)
+
+ def test_deallocation_duplicate_floating_ip(self):
+ self.stubs.Set(self.network, '_teardown_network_on_host',
+ lambda *args, **kwargs: None)
+ instance = objects.Instance()
+ instance.project_id = self.project_id
+ instance.create(self.context)
+ network = db.network_create_safe(self.context.elevated(), {
+ 'project_id': self.project_id,
+ 'host': CONF.host,
+ 'label': 'foo'})
+ fixed = db.fixed_ip_create(self.context, {'allocated': True,
+ 'instance_uuid': instance.uuid, 'address': '10.1.1.1',
+ 'network_id': network['id']})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10',
+ 'deleted': True})
+ db.floating_ip_create(self.context, {
+ 'address': '10.10.10.10', 'instance_uuid': instance.uuid,
+ 'fixed_ip_id': fixed['id'],
+ 'project_id': self.project_id})
+ self.network.deallocate_for_instance(self.context, instance=instance)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ @mock.patch('nova.db.floating_ip_update')
+ def test_migrate_instance_start(self, floating_update, floating_get,
+ fixed_get):
+ called = {'count': 0}
+
+ def fake_floating_ip_get_by_address(context, address):
+ return dict(test_floating_ip.fake_floating_ip,
+ address=address,
+ fixed_ip_id=0)
+
+ def fake_is_stale_floating_ip_address(context, floating_ip):
+ return str(floating_ip.address) == '172.24.4.23'
+
+ floating_get.side_effect = fake_floating_ip_get_by_address
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ instance_uuid='fake_uuid',
+ address='10.0.0.2',
+ network=test_network.fake_network)
+ floating_update.return_value = fake_floating_ip_get_by_address(
+ None, '1.2.3.4')
+
+ def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
+ network):
+ called['count'] += 1
+
+ def fake_clean_conntrack(fixed_ip):
+ if not str(fixed_ip) == "10.0.0.2":
+ raise exception.FixedIpInvalid(address=fixed_ip)
+
+ self.stubs.Set(self.network, '_is_stale_floating_ip_address',
+ fake_is_stale_floating_ip_address)
+ self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
+ fake_remove_floating_ip)
+ self.stubs.Set(self.network.driver, 'clean_conntrack',
+ fake_clean_conntrack)
+ self.mox.ReplayAll()
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_start(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source',
+ dest='fake_dest')
+
+ self.assertEqual(called['count'], 2)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.floating_ip_update')
+ def test_migrate_instance_finish(self, floating_update, fixed_get):
+ called = {'count': 0}
+
+ def fake_floating_ip_get_by_address(context, address):
+ return dict(test_floating_ip.fake_floating_ip,
+ address=address,
+ fixed_ip_id=0)
+
+ def fake_is_stale_floating_ip_address(context, floating_ip):
+ return str(floating_ip.address) == '172.24.4.23'
+
+ fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
+ instance_uuid='fake_uuid',
+ address='10.0.0.2',
+ network=test_network.fake_network)
+ floating_update.return_value = fake_floating_ip_get_by_address(
+ None, '1.2.3.4')
+
+ def fake_add_floating_ip(floating_addr, fixed_addr, interface,
+ network):
+ called['count'] += 1
+
+ self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
+ fake_floating_ip_get_by_address)
+ self.stubs.Set(self.network, '_is_stale_floating_ip_address',
+ fake_is_stale_floating_ip_address)
+ self.stubs.Set(self.network.l3driver, 'add_floating_ip',
+ fake_add_floating_ip)
+ self.mox.ReplayAll()
+ addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
+ self.network.migrate_instance_finish(self.context,
+ instance_uuid=FAKEUUID,
+ floating_addresses=addresses,
+ host='fake_dest',
+ rxtx_factor=3,
+ project_id=self.project_id,
+ source='fake_source')
+
+ self.assertEqual(called['count'], 2)
+
+ def test_floating_dns_create_conflict(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+
+ self.assertRaises(exception.FloatingIpDNSExists,
+ self.network.add_dns_entry, self.context,
+ address1, name1, "A", zone)
+
+ def test_floating_create_and_get(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertFalse(entries)
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+ self.network.add_dns_entry(self.context, address1, name2, "A", zone)
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertEqual(len(entries), 2)
+ self.assertEqual(entries[0], name1)
+ self.assertEqual(entries[1], name2)
+
+ entries = self.network.get_dns_entries_by_name(self.context,
+ name1, zone)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ def test_floating_dns_delete(self):
+ zone = "example.org"
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+
+ self.network.add_dns_entry(self.context, address1, name1, "A", zone)
+ self.network.add_dns_entry(self.context, address1, name2, "A", zone)
+ self.network.delete_dns_entry(self.context, name1, zone)
+
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address1, zone)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], name2)
+
+ self.assertRaises(exception.NotFound,
+ self.network.delete_dns_entry, self.context,
+ name1, zone)
+
+ def test_floating_dns_domains_public(self):
+ zone1 = "testzone"
+ domain1 = "example.org"
+ domain2 = "example.com"
+ address1 = '10.10.10.10'
+ entryname = 'testentry'
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.create_public_dns_domain, self.context,
+ domain1, zone1)
+ self.network.create_public_dns_domain(context_admin, domain1,
+ 'testproject')
+ self.network.create_public_dns_domain(context_admin, domain2,
+ 'fakeproject')
+
+ domains = self.network.get_dns_domains(self.context)
+ self.assertEqual(len(domains), 2)
+ self.assertEqual(domains[0]['domain'], domain1)
+ self.assertEqual(domains[1]['domain'], domain2)
+ self.assertEqual(domains[0]['project'], 'testproject')
+ self.assertEqual(domains[1]['project'], 'fakeproject')
+
+ self.network.add_dns_entry(self.context, address1, entryname,
+ 'A', domain1)
+ entries = self.network.get_dns_entries_by_name(self.context,
+ entryname, domain1)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.delete_dns_domain, self.context,
+ domain1)
+ self.network.delete_dns_domain(context_admin, domain1)
+ self.network.delete_dns_domain(context_admin, domain2)
+
+ # Verify that deleting the domain deleted the associated entry
+ entries = self.network.get_dns_entries_by_name(self.context,
+ entryname, domain1)
+ self.assertFalse(entries)
+
+ def test_delete_all_by_ip(self):
+ domain1 = "example.org"
+ domain2 = "example.com"
+ address = "10.10.10.10"
+ name1 = "foo"
+ name2 = "bar"
+
+ def fake_domains(context):
+ return [{'domain': 'example.org', 'scope': 'public'},
+ {'domain': 'example.com', 'scope': 'public'},
+ {'domain': 'test.example.org', 'scope': 'public'}]
+
+ self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.network.create_public_dns_domain(context_admin, domain1,
+ 'testproject')
+ self.network.create_public_dns_domain(context_admin, domain2,
+ 'fakeproject')
+
+ domains = self.network.get_dns_domains(self.context)
+ for domain in domains:
+ self.network.add_dns_entry(self.context, address,
+ name1, "A", domain['domain'])
+ self.network.add_dns_entry(self.context, address,
+ name2, "A", domain['domain'])
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address,
+ domain['domain'])
+ self.assertEqual(len(entries), 2)
+
+ self.network._delete_all_entries_for_ip(self.context, address)
+
+ for domain in domains:
+ entries = self.network.get_dns_entries_by_address(self.context,
+ address,
+ domain['domain'])
+ self.assertFalse(entries)
+
+ self.network.delete_dns_domain(context_admin, domain1)
+ self.network.delete_dns_domain(context_admin, domain2)
+
+ def test_mac_conflicts(self):
+ # Make sure MAC collisions are retried.
+ self.flags(create_unique_mac_address_attempts=3)
+ ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
+ macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
+
+ # Create a VIF with aa:aa:aa:aa:aa:aa
+ crash_test_dummy_vif = {
+ 'address': macs[1],
+ 'instance_uuid': 'fake_uuid',
+ 'network_id': 123,
+ 'uuid': 'fake_uuid',
+ }
+ self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
+
+ # Hand out a collision first, then a legit MAC
+ def fake_gen_mac():
+ return macs.pop()
+ self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
+
+ # SQLite doesn't seem to honor the uniqueness constraint on the
+ # address column, so fake the collision-avoidance here
+ def fake_vif_save(vif):
+ if vif.address == crash_test_dummy_vif['address']:
+ raise db_exc.DBError("If you're smart, you'll retry!")
+ # NOTE(russellb) The VirtualInterface object requires an ID to be
+ # set, and we expect it to get set automatically when we do the
+ # save.
+ vif.id = 1
+ self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
+
+ # Attempt to add another and make sure that both MACs are consumed
+ # by the retry loop
+ self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
+ self.assertEqual(macs, [])
+
+ def test_deallocate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.deallocate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_associate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.associate_floating_ip,
+ self.context, '1.2.3.4', '10.0.0.1')
+
+ def test_disassociate_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
+ self.network.db.floating_ip_get_by_address(
+ self.context, '1.2.3.4').AndRaise(
+ exception.FloatingIpNotFoundForAddress(address='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.disassociate_floating_ip,
+ self.context, '1.2.3.4')
+
+ def test_get_floating_ip_client_exceptions(self):
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
+ self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
+ self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
+ exception.FloatingIpNotFound(id='fake'))
+ self.mox.ReplayAll()
+ self.assertRaises(messaging.ExpectedException,
+ self.network.get_floating_ip,
+ self.context, 'fake-id')
+
+ def _test_associate_floating_ip_failure(self, stdout, expected_exception):
+ def _fake_catchall(*args, **kwargs):
+ return dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+
+ def _fake_add_floating_ip(*args, **kwargs):
+ raise processutils.ProcessExecutionError(stdout)
+
+ self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
+ _fake_catchall)
+ self.stubs.Set(self.network.db, 'floating_ip_disassociate',
+ _fake_catchall)
+ self.stubs.Set(self.network.l3driver, 'add_floating_ip',
+ _fake_add_floating_ip)
+
+ self.assertRaises(expected_exception,
+ self.network._associate_floating_ip, self.context,
+ '1.2.3.4', '1.2.3.5', '', '')
+
+ def test_associate_floating_ip_failure(self):
+ self._test_associate_floating_ip_failure(None,
+ processutils.ProcessExecutionError)
+
+ def test_associate_floating_ip_failure_interface_not_found(self):
+ self._test_associate_floating_ip_failure('Cannot find device',
+ exception.NoFloatingIpInterface)
+
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ mock_get.return_value = mock.sentinel.floating
+ self.assertEqual(mock.sentinel.floating,
+ self.network.get_floating_ip_by_address(
+ self.context,
+ mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ mock_get.return_value = mock.sentinel.floatings
+ self.assertEqual(mock.sentinel.floatings,
+ self.network.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context, self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
+ objects.FloatingIP(address='5.6.7.8')]
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
+
+class InstanceDNSTestCase(test.TestCase):
+ """Tests nova.network.manager instance DNS."""
+ def setUp(self):
+ super(InstanceDNSTestCase, self).setUp()
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.flags(log_dir=self.tempdir)
+ self.flags(use_local=True, group='conductor')
+ self.network = TestFloatingIPManager()
+ self.network.db = db
+ self.project_id = 'testproject'
+ self.context = context.RequestContext('testuser', self.project_id,
+ is_admin=False)
+
+ def test_dns_domains_private(self):
+ zone1 = 'testzone'
+ domain1 = 'example.org'
+
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.create_private_dns_domain, self.context,
+ domain1, zone1)
+
+ self.network.create_private_dns_domain(context_admin, domain1, zone1)
+ domains = self.network.get_dns_domains(self.context)
+ self.assertEqual(len(domains), 1)
+ self.assertEqual(domains[0]['domain'], domain1)
+ self.assertEqual(domains[0]['availability_zone'], zone1)
+
+ self.assertRaises(exception.AdminRequired,
+ self.network.delete_dns_domain, self.context,
+ domain1)
+ self.network.delete_dns_domain(context_admin, domain1)
+
+
+domain1 = "example.org"
+domain2 = "example.com"
+
+
+class LdapDNSTestCase(test.TestCase):
+ """Tests nova.network.ldapdns.LdapDNS."""
+ def setUp(self):
+ super(LdapDNSTestCase, self).setUp()
+
+ self.useFixture(test.ReplaceModule('ldap', fake_ldap))
+ dns_class = 'nova.network.ldapdns.LdapDNS'
+ self.driver = importutils.import_object(dns_class)
+
+ attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
+ 'domain', 'dcobject', 'top'],
+ 'associateddomain': ['root'],
+ 'dc': ['root']}
+ self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
+ self.driver.create_domain(domain1)
+ self.driver.create_domain(domain2)
+
+ def tearDown(self):
+ self.driver.delete_domain(domain1)
+ self.driver.delete_domain(domain2)
+ super(LdapDNSTestCase, self).tearDown()
+
+ def test_ldap_dns_domains(self):
+ domains = self.driver.get_domains()
+ self.assertEqual(len(domains), 2)
+ self.assertIn(domain1, domains)
+ self.assertIn(domain2, domains)
+
+ def test_ldap_dns_create_conflict(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+
+ self.assertRaises(exception.FloatingIpDNSExists,
+ self.driver.create_entry,
+ name1, address1, "A", domain1)
+
+ def test_ldap_dns_create_and_get(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertFalse(entries)
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+ self.driver.create_entry(name2, address1, "A", domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertEqual(len(entries), 2)
+ self.assertEqual(entries[0], name1)
+ self.assertEqual(entries[1], name2)
+
+ entries = self.driver.get_entries_by_name(name1, domain1)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], address1)
+
+ def test_ldap_dns_delete(self):
+ address1 = "10.10.10.11"
+ name1 = "foo"
+ name2 = "bar"
+
+ self.driver.create_entry(name1, address1, "A", domain1)
+ self.driver.create_entry(name2, address1, "A", domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ self.assertEqual(len(entries), 2)
+
+ self.driver.delete_entry(name1, domain1)
+ entries = self.driver.get_entries_by_address(address1, domain1)
+ LOG.debug("entries: %s" % entries)
+ self.assertEqual(len(entries), 1)
+ self.assertEqual(entries[0], name2)
+
+ self.assertRaises(exception.NotFound,
+ self.driver.delete_entry,
+ name1, domain1)
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
new file mode 100644
index 0000000000..456d4c3a18
--- /dev/null
+++ b/nova/tests/unit/network/test_network_info.py
@@ -0,0 +1,800 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+from nova.network import model
+from nova import test
+from nova.tests.unit import fake_network_cache_model
+from nova.virt import netutils
+
+
+class RouteTests(test.NoDBTestCase):
+ def test_create_route_with_attrs(self):
+ route = fake_network_cache_model.new_route()
+ fake_network_cache_model.new_ip(dict(address='192.168.1.1'))
+ self.assertEqual(route['cidr'], '0.0.0.0/24')
+ self.assertEqual(route['gateway']['address'], '192.168.1.1')
+ self.assertEqual(route['interface'], 'eth0')
+
+ def test_routes_equal(self):
+ route1 = model.Route()
+ route2 = model.Route()
+ self.assertEqual(route1, route2)
+
+ def test_routes_not_equal(self):
+ route1 = model.Route(cidr='1.1.1.0/24')
+ route2 = model.Route(cidr='2.2.2.0/24')
+ self.assertNotEqual(route1, route2)
+
+ route1 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.1')
+ route2 = model.Route(cidr='1.1.1.1/24', gateway='1.1.1.2')
+ self.assertNotEqual(route1, route2)
+
+ route1 = model.Route(cidr='1.1.1.1/24', interface='tap0')
+ route2 = model.Route(cidr='1.1.1.1/24', interface='tap1')
+ self.assertNotEqual(route1, route2)
+
+ def test_hydrate(self):
+ route = model.Route.hydrate(
+ {'gateway': fake_network_cache_model.new_ip(
+ dict(address='192.168.1.1'))})
+ self.assertIsNone(route['cidr'])
+ self.assertEqual(route['gateway']['address'], '192.168.1.1')
+ self.assertIsNone(route['interface'])
+
+
+class IPTests(test.NoDBTestCase):
+ def test_ip_equal(self):
+ ip1 = model.IP(address='127.0.0.1')
+ ip2 = model.IP(address='127.0.0.1')
+ self.assertEqual(ip1, ip2)
+
+ def test_ip_not_equal(self):
+ ip1 = model.IP(address='127.0.0.1')
+ ip2 = model.IP(address='172.0.0.3')
+ self.assertNotEqual(ip1, ip2)
+
+ ip1 = model.IP(address='127.0.0.1', type=1)
+ ip2 = model.IP(address='172.0.0.1', type=2)
+ self.assertNotEqual(ip1, ip2)
+
+ ip1 = model.IP(address='127.0.0.1', version=4)
+ ip2 = model.IP(address='172.0.0.1', version=6)
+ self.assertNotEqual(ip1, ip2)
+
+
+class FixedIPTests(test.NoDBTestCase):
+ def test_createnew_fixed_ip_with_attrs(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ self.assertEqual(fixed_ip['address'], '192.168.1.100')
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertEqual(fixed_ip['version'], 4)
+
+ def test_create_fixed_ipv6(self):
+ fixed_ip = model.FixedIP(address='::1')
+ self.assertEqual(fixed_ip['address'], '::1')
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertEqual(fixed_ip['version'], 6)
+
+ def test_create_fixed_bad_ip_fails(self):
+ self.assertRaises(exception.InvalidIpAddressError,
+ model.FixedIP,
+ address='picklespicklespickles')
+
+ def test_equate_two_fixed_ips(self):
+ fixed_ip = model.FixedIP(address='::1')
+ fixed_ip2 = model.FixedIP(address='::1')
+ self.assertEqual(fixed_ip, fixed_ip2)
+
+ def test_equate_two_dissimilar_fixed_ips_fails(self):
+ fixed_ip = model.FixedIP(address='::1')
+ fixed_ip2 = model.FixedIP(address='::2')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', type='1')
+ fixed_ip2 = model.FixedIP(address='::1', type='2')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', version='6')
+ fixed_ip2 = model.FixedIP(address='::1', version='4')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ fixed_ip = model.FixedIP(address='::1', floating_ips='1.1.1.1')
+ fixed_ip2 = model.FixedIP(address='::1', floating_ips='8.8.8.8')
+ self.assertNotEqual(fixed_ip, fixed_ip2)
+
+ def test_hydrate(self):
+ fixed_ip = model.FixedIP.hydrate({})
+ self.assertEqual(fixed_ip['floating_ips'], [])
+ self.assertIsNone(fixed_ip['address'])
+ self.assertEqual(fixed_ip['type'], 'fixed')
+ self.assertIsNone(fixed_ip['version'])
+
+ def test_add_floating_ip(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ fixed_ip.add_floating_ip('192.168.1.101')
+ self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
+
+ def test_add_floating_ip_repeatedly_only_one_instance(self):
+ fixed_ip = model.FixedIP(address='192.168.1.100')
+ for i in xrange(10):
+ fixed_ip.add_floating_ip('192.168.1.101')
+ self.assertEqual(fixed_ip['floating_ips'], ['192.168.1.101'])
+
+
+class SubnetTests(test.NoDBTestCase):
+ def test_create_subnet_with_attrs(self):
+ subnet = fake_network_cache_model.new_subnet()
+
+ route1 = fake_network_cache_model.new_route()
+
+ self.assertEqual(subnet['cidr'], '10.10.0.0/24')
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
+ self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3'))])
+ self.assertEqual(subnet['routes'], [route1])
+ self.assertEqual(subnet['version'], 4)
+
+ def test_subnet_equal(self):
+ subnet1 = fake_network_cache_model.new_subnet()
+ subnet2 = fake_network_cache_model.new_subnet()
+ self.assertEqual(subnet1, subnet2)
+
+ def test_subnet_not_equal(self):
+ subnet1 = model.Subnet(cidr='1.1.1.0/24')
+ subnet2 = model.Subnet(cidr='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(dns='1.1.1.0/24')
+ subnet2 = model.Subnet(dns='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(gateway='1.1.1.1/24')
+ subnet2 = model.Subnet(gateway='2.2.2.1/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(ips='1.1.1.0/24')
+ subnet2 = model.Subnet(ips='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(routes='1.1.1.0/24')
+ subnet2 = model.Subnet(routes='2.2.2.0/24')
+ self.assertNotEqual(subnet1, subnet2)
+
+ subnet1 = model.Subnet(version='4')
+ subnet2 = model.Subnet(version='6')
+ self.assertNotEqual(subnet1, subnet2)
+
+ def test_add_route(self):
+ subnet = fake_network_cache_model.new_subnet()
+ route1 = fake_network_cache_model.new_route()
+ route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
+ subnet.add_route(route2)
+ self.assertEqual(subnet['routes'], [route1, route2])
+
+ def test_add_route_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ route1 = fake_network_cache_model.new_route()
+ route2 = fake_network_cache_model.new_route({'cidr': '1.1.1.1/24'})
+ for i in xrange(10):
+ subnet.add_route(route2)
+ self.assertEqual(subnet['routes'], [route1, route2])
+
+ def test_add_dns(self):
+ subnet = fake_network_cache_model.new_subnet()
+ dns = fake_network_cache_model.new_ip(dict(address='9.9.9.9'))
+ subnet.add_dns(dns)
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
+ fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
+
+ def test_add_dns_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ for i in xrange(10):
+ subnet.add_dns(fake_network_cache_model.new_ip(
+ dict(address='9.9.9.9')))
+ self.assertEqual(subnet['dns'],
+ [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
+ fake_network_cache_model.new_ip(dict(address='2.3.4.5')),
+ fake_network_cache_model.new_ip(dict(address='9.9.9.9'))])
+
+ def test_add_ip(self):
+ subnet = fake_network_cache_model.new_subnet()
+ subnet.add_ip(fake_network_cache_model.new_ip(
+ dict(address='192.168.1.102')))
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3')),
+ fake_network_cache_model.new_ip(
+ dict(address='192.168.1.102'))])
+
+ def test_add_ip_a_lot(self):
+ subnet = fake_network_cache_model.new_subnet()
+ for i in xrange(10):
+ subnet.add_ip(fake_network_cache_model.new_fixed_ip(
+ dict(address='192.168.1.102')))
+ self.assertEqual(subnet['ips'],
+ [fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='10.10.0.3')),
+ fake_network_cache_model.new_fixed_ip(
+ dict(address='192.168.1.102'))])
+
+ def test_hydrate(self):
+ subnet_dict = {
+ 'cidr': '255.255.255.0',
+ 'dns': [fake_network_cache_model.new_ip(dict(address='1.1.1.1'))],
+ 'ips': [fake_network_cache_model.new_fixed_ip(
+ dict(address='2.2.2.2'))],
+ 'routes': [fake_network_cache_model.new_route()],
+ 'version': 4,
+ 'gateway': fake_network_cache_model.new_ip(
+ dict(address='3.3.3.3'))}
+ subnet = model.Subnet.hydrate(subnet_dict)
+
+ self.assertEqual(subnet['cidr'], '255.255.255.0')
+ self.assertEqual(subnet['dns'], [fake_network_cache_model.new_ip(
+ dict(address='1.1.1.1'))])
+ self.assertEqual(subnet['gateway']['address'], '3.3.3.3')
+ self.assertEqual(subnet['ips'], [fake_network_cache_model.new_fixed_ip(
+ dict(address='2.2.2.2'))])
+ self.assertEqual(subnet['routes'], [
+ fake_network_cache_model.new_route()])
+ self.assertEqual(subnet['version'], 4)
+
+
+class NetworkTests(test.NoDBTestCase):
+ def test_create_network(self):
+ network = fake_network_cache_model.new_network()
+ self.assertEqual(network['id'], 1)
+ self.assertEqual(network['bridge'], 'br0')
+ self.assertEqual(network['label'], 'public')
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255'))])
+
+ def test_add_subnet(self):
+ network = fake_network_cache_model.new_network()
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ dict(cidr='0.0.0.0')))
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255')),
+ fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
+
+ def test_add_subnet_a_lot(self):
+ network = fake_network_cache_model.new_network()
+ for i in xrange(10):
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ dict(cidr='0.0.0.0')))
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255')),
+ fake_network_cache_model.new_subnet(dict(cidr='0.0.0.0'))])
+
+ def test_network_equal(self):
+ network1 = model.Network()
+ network2 = model.Network()
+ self.assertEqual(network1, network2)
+
+ def test_network_not_equal(self):
+ network1 = model.Network(id='1')
+ network2 = model.Network(id='2')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(bridge='br-int')
+ network2 = model.Network(bridge='br0')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(label='net1')
+ network2 = model.Network(label='net2')
+ self.assertNotEqual(network1, network2)
+
+ network1 = model.Network(subnets='1.1.1.0/24')
+ network2 = model.Network(subnets='2.2.2.0/24')
+ self.assertNotEqual(network1, network2)
+
+ def test_hydrate(self):
+ fake_network_cache_model.new_subnet()
+ fake_network_cache_model.new_subnet(dict(cidr='255.255.255.255'))
+ network = model.Network.hydrate(fake_network_cache_model.new_network())
+
+ self.assertEqual(network['id'], 1)
+ self.assertEqual(network['bridge'], 'br0')
+ self.assertEqual(network['label'], 'public')
+ self.assertEqual(network['subnets'],
+ [fake_network_cache_model.new_subnet(),
+ fake_network_cache_model.new_subnet(
+ dict(cidr='255.255.255.255'))])
+
+
+class VIFTests(test.NoDBTestCase):
+ def test_create_vif(self):
+ vif = fake_network_cache_model.new_vif()
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_vif_equal(self):
+ vif1 = model.VIF()
+ vif2 = model.VIF()
+ self.assertEqual(vif1, vif2)
+
+ def test_vif_not_equal(self):
+ vif1 = model.VIF(id=1)
+ vif2 = model.VIF(id=2)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(address='00:00:00:00:00:11')
+ vif2 = model.VIF(address='00:00:00:00:00:22')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(network='net1')
+ vif2 = model.VIF(network='net2')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(type='ovs')
+ vif2 = model.VIF(type='linuxbridge')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(devname='ovs1234')
+ vif2 = model.VIF(devname='linuxbridge1234')
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(qbh_params=1)
+ vif2 = model.VIF(qbh_params=None)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(qbg_params=1)
+ vif2 = model.VIF(qbg_params=None)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(active=True)
+ vif2 = model.VIF(active=False)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(vnic_type=model.VNIC_TYPE_NORMAL)
+ vif2 = model.VIF(vnic_type=model.VNIC_TYPE_DIRECT)
+ self.assertNotEqual(vif1, vif2)
+
+ vif1 = model.VIF(profile={'pci_slot': '0000:0a:00.1'})
+ vif2 = model.VIF(profile={'pci_slot': '0000:0a:00.2'})
+ self.assertNotEqual(vif1, vif2)
+
+ def test_create_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = fake_network_cache_model.new_vif(vif_dict)
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_vif_get_fixed_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ fixed_ips = vif.fixed_ips()
+ ips = [
+ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.2')),
+ fake_network_cache_model.new_fixed_ip(dict(address='10.10.0.3'))
+ ] * 2
+ self.assertEqual(fixed_ips, ips)
+
+ def test_vif_get_floating_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
+ floating_ips = vif.floating_ips()
+ self.assertEqual(floating_ips, ['192.168.1.1'])
+
+ def test_vif_get_labeled_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ labeled_ips = vif.labeled_ips()
+ ip_dict = {
+ 'network_id': 1,
+ 'ips': [fake_network_cache_model.new_ip(
+ {'address': '10.10.0.2', 'type': 'fixed'}),
+ fake_network_cache_model.new_ip(
+ {'address': '10.10.0.3', 'type': 'fixed'})] * 2,
+ 'network_label': 'public'}
+ self.assertEqual(labeled_ips, ip_dict)
+
+ def test_hydrate(self):
+ fake_network_cache_model.new_network()
+ vif = model.VIF.hydrate(fake_network_cache_model.new_vif())
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+ def test_hydrate_vif_with_type(self):
+ vif_dict = dict(
+ id=1,
+ address='aa:aa:aa:aa:aa:aa',
+ network=fake_network_cache_model.new_network(),
+ type='bridge')
+ vif = model.VIF.hydrate(fake_network_cache_model.new_vif(vif_dict))
+ self.assertEqual(vif['id'], 1)
+ self.assertEqual(vif['address'], 'aa:aa:aa:aa:aa:aa')
+ self.assertEqual(vif['type'], 'bridge')
+ self.assertEqual(vif['network'],
+ fake_network_cache_model.new_network())
+
+
+class NetworkInfoTests(test.NoDBTestCase):
+ def test_create_model(self):
+ ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def test_create_async_model(self):
+ def async_wrapper():
+ return model.NetworkInfo(
+ [fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def test_create_async_model_exceptions(self):
+ def async_wrapper():
+ raise test.TestingException()
+
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertRaises(test.TestingException, ninfo.wait)
+ # 2nd one doesn't raise
+ self.assertIsNone(ninfo.wait())
+ # Test that do_raise=False works on .wait()
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertIsNone(ninfo.wait(do_raise=False))
+ # Test we also raise calling a method
+ ninfo = model.NetworkInfoAsyncWrapper(async_wrapper)
+ self.assertRaises(test.TestingException, ninfo.fixed_ips)
+
+ def test_get_floating_ips(self):
+ vif = fake_network_cache_model.new_vif()
+ vif['network']['subnets'][0]['ips'][0].add_floating_ip('192.168.1.1')
+ ninfo = model.NetworkInfo([vif,
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ self.assertEqual(ninfo.floating_ips(), ['192.168.1.1'])
+
+ def test_hydrate(self):
+ ninfo = model.NetworkInfo([fake_network_cache_model.new_vif(),
+ fake_network_cache_model.new_vif(
+ {'address': 'bb:bb:bb:bb:bb:bb'})])
+ model.NetworkInfo.hydrate(ninfo)
+ self.assertEqual(ninfo.fixed_ips(),
+ [fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.2'}),
+ fake_network_cache_model.new_fixed_ip(
+ {'address': '10.10.0.3'})] * 4)
+
+ def _setup_injected_network_scenario(self, should_inject=True,
+ use_ipv4=True, use_ipv6=False,
+ gateway=True, dns=True,
+ two_interfaces=False,
+ libvirt_virt_type=None):
+ """Check that netutils properly decides whether to inject based on
+ whether the supplied subnet is static or dynamic.
+ """
+ network = fake_network_cache_model.new_network({'subnets': []})
+
+ subnet_dict = {}
+ if not gateway:
+ subnet_dict['gateway'] = None
+
+ if not dns:
+ subnet_dict['dns'] = None
+
+ if not should_inject:
+ subnet_dict['dhcp_server'] = '10.10.0.1'
+
+ if use_ipv4:
+ network.add_subnet(
+ fake_network_cache_model.new_subnet(subnet_dict))
+
+ if should_inject and use_ipv6:
+ gateway_ip = fake_network_cache_model.new_ip(dict(
+ address='1234:567::1'))
+ ip = fake_network_cache_model.new_ip(dict(
+ address='1234:567::2'))
+ ipv6_subnet_dict = dict(
+ cidr='1234:567::/48',
+ gateway=gateway_ip,
+ dns=[fake_network_cache_model.new_ip(
+ dict(address='2001:4860:4860::8888')),
+ fake_network_cache_model.new_ip(
+ dict(address='2001:4860:4860::8844'))],
+ ips=[ip])
+ if not gateway:
+ ipv6_subnet_dict['gateway'] = None
+ network.add_subnet(fake_network_cache_model.new_subnet(
+ ipv6_subnet_dict))
+
+ # Behave as though CONF.flat_injected is True
+ network['meta']['injected'] = True
+ vif = fake_network_cache_model.new_vif({'network': network})
+ vifs = [vif]
+ if two_interfaces:
+ vifs.append(vif)
+
+ nwinfo = model.NetworkInfo(vifs)
+ return netutils.get_injected_network_template(
+ nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
+
+ def test_injection_dynamic(self):
+ expected = None
+ template = self._setup_injected_network_scenario(should_inject=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+"""
+ template = self._setup_injected_network_scenario()
+ self.assertEqual(expected, template)
+
+ def test_injection_static_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+"""
+ template = self._setup_injected_network_scenario(gateway=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_no_dns(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+"""
+ template = self._setup_injected_network_scenario(dns=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_ipv6(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_ipv6_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True,
+ gateway=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_static_with_ipv4_off(self):
+ expected = None
+ template = self._setup_injected_network_scenario(use_ipv4=False)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_two_interfaces(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth0 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+iface eth1 inet6 static
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+"""
+ template = self._setup_injected_network_scenario(use_ipv6=True,
+ two_interfaces=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, gateway=False, two_interfaces=True,
+ libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
diff --git a/nova/tests/unit/network/test_neutronv2.py b/nova/tests/unit/network/test_neutronv2.py
new file mode 100644
index 0000000000..a34c8cc899
--- /dev/null
+++ b/nova/tests/unit/network/test_neutronv2.py
@@ -0,0 +1,3194 @@
+# Copyright 2012 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import collections
+import contextlib
+import copy
+import uuid
+
+import mock
+import mox
+from neutronclient.common import exceptions
+from neutronclient.v2_0 import client
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import six
+
+from nova.compute import flavors
+from nova import context
+from nova import exception
+from nova.network import model
+from nova.network import neutronv2
+from nova.network.neutronv2 import api as neutronapi
+from nova.network.neutronv2 import constants
+from nova import objects
+from nova.openstack.common import policy as common_policy
+from nova.pci import manager as pci_manager
+from nova.pci import whitelist as pci_whitelist
+from nova import policy
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+
+CONF = cfg.CONF
+
+# NOTE: Neutron client raises Exception which is discouraged by HACKING.
+# We set this variable here and use it for assertions below to avoid
+# the hacking checks until we can make neutron client throw a custom
+# exception class instead.
+NEUTRON_CLIENT_EXCEPTION = Exception
+
+
+class MyComparator(mox.Comparator):
+ def __init__(self, lhs):
+ self.lhs = lhs
+
+ def _com_dict(self, lhs, rhs):
+ if len(lhs) != len(rhs):
+ return False
+ for key, value in lhs.iteritems():
+ if key not in rhs:
+ return False
+ rhs_value = rhs[key]
+ if not self._com(value, rhs_value):
+ return False
+ return True
+
+ def _com_list(self, lhs, rhs):
+ if len(lhs) != len(rhs):
+ return False
+ for lhs_value in lhs:
+ if lhs_value not in rhs:
+ return False
+ return True
+
+ def _com(self, lhs, rhs):
+ if lhs is None:
+ return rhs is None
+ if isinstance(lhs, dict):
+ if not isinstance(rhs, dict):
+ return False
+ return self._com_dict(lhs, rhs)
+ if isinstance(lhs, list):
+ if not isinstance(rhs, list):
+ return False
+ return self._com_list(lhs, rhs)
+ if isinstance(lhs, tuple):
+ if not isinstance(rhs, tuple):
+ return False
+ return self._com_list(lhs, rhs)
+ return lhs == rhs
+
+ def equals(self, rhs):
+ return self._com(self.lhs, rhs)
+
+ def __repr__(self):
+ return str(self.lhs)
+
+
+class TestNeutronClient(test.TestCase):
+ def test_withtoken(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token')
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_strategy=CONF.neutron.auth_strategy,
+ endpoint_url=CONF.neutron.url,
+ token=my_context.auth_token,
+ timeout=CONF.neutron.url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+ neutronv2.get_client(my_context)
+
+ def test_withouttoken(self):
+ my_context = context.RequestContext('userid', 'my_tenantid')
+ self.assertRaises(exceptions.Unauthorized,
+ neutronv2.get_client,
+ my_context)
+
+ def test_withtoken_context_is_admin(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token',
+ is_admin=True)
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_strategy=CONF.neutron.auth_strategy,
+ endpoint_url=CONF.neutron.url,
+ token=my_context.auth_token,
+ timeout=CONF.neutron.url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+ # Note that although we have admin set in the context we
+ # are not asking for an admin client, and so we auth with
+ # our own token
+ neutronv2.get_client(my_context)
+
+ def test_withouttoken_keystone_connection_error(self):
+ self.flags(auth_strategy='keystone', group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ my_context = context.RequestContext('userid', 'my_tenantid')
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ neutronv2.get_client,
+ my_context)
+
+ def test_reuse_admin_token(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = 'new_token'
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ with contextlib.nested(
+ mock.patch.object(client.Client, "list_networks",
+ side_effect=mock.Mock),
+ mock.patch.object(client.Client, 'get_auth_info',
+ return_value={'auth_token': 'new_token1'}),
+ ):
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+
+ def test_admin_token_updated(self):
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = 'new_token'
+ tokens = [{'auth_token': 'new_token1'}, {'auth_token': 'new_token'}]
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ with contextlib.nested(
+ mock.patch.object(client.Client, "list_networks",
+ side_effect=mock.Mock),
+ mock.patch.object(client.Client, 'get_auth_info',
+ side_effect=tokens.pop),
+ ):
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token', token_store.admin_auth_token)
+ client1 = neutronv2.get_client(my_context, True)
+ client1.list_networks(retrieve_all=False)
+ self.assertEqual('new_token1', token_store.admin_auth_token)
+
+
+class TestNeutronv2Base(test.TestCase):
+
+ def setUp(self):
+ super(TestNeutronv2Base, self).setUp()
+ self.context = context.RequestContext('userid', 'my_tenantid')
+ setattr(self.context,
+ 'auth_token',
+ 'bff4a5a6b9eb4ea2a6efec6eefb77936')
+ self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
+ 'uuid': str(uuid.uuid4()),
+ 'display_name': 'test_instance',
+ 'availability_zone': 'nova',
+ 'host': 'some_host',
+ 'security_groups': []}
+ self.instance2 = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
+ 'uuid': str(uuid.uuid4()),
+ 'display_name': 'test_instance2',
+ 'availability_zone': 'nova',
+ 'security_groups': []}
+ self.nets1 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'my_tenantid'}]
+ self.nets2 = []
+ self.nets2.append(self.nets1[0])
+ self.nets2.append({'id': 'my_netid2',
+ 'name': 'my_netname2',
+ 'subnets': ['mysubnid2'],
+ 'tenant_id': 'my_tenantid'})
+ self.nets3 = self.nets2 + [{'id': 'my_netid3',
+ 'name': 'my_netname3',
+ 'tenant_id': 'my_tenantid'}]
+ self.nets4 = [{'id': 'his_netid4',
+ 'name': 'his_netname4',
+ 'tenant_id': 'his_tenantid'}]
+ # A network request with external networks
+ self.nets5 = self.nets1 + [{'id': 'the-external-one',
+ 'name': 'out-of-this-world',
+ 'router:external': True,
+ 'tenant_id': 'should-be-an-admin'}]
+ # A network request with a duplicate
+ self.nets6 = []
+ self.nets6.append(self.nets1[0])
+ self.nets6.append(self.nets1[0])
+ # A network request with a combo
+ self.nets7 = []
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+ # A network request with only external network
+ self.nets8 = [self.nets5[1]]
+
+ self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
+ self.nets5, self.nets6, self.nets7, self.nets8]
+
+ self.port_address = '10.0.1.2'
+ self.port_data1 = [{'network_id': 'my_netid1',
+ 'device_id': self.instance2['uuid'],
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid1',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'fixed_ips': [{'ip_address': self.port_address,
+ 'subnet_id': 'my_subid1'}],
+ 'mac_address': 'my_mac1', }]
+ self.float_data1 = [{'port_id': 'my_portid1',
+ 'fixed_ip_address': self.port_address,
+ 'floating_ip_address': '172.0.1.2'}]
+ self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
+ 'subnet_id': 'my_subid1'}],
+ 'status': 'ACTIVE',
+ 'admin_state_up': True}]
+ self.port_address2 = '10.0.2.2'
+ self.port_data2 = []
+ self.port_data2.append(self.port_data1[0])
+ self.port_data2.append({'network_id': 'my_netid2',
+ 'device_id': self.instance['uuid'],
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid2',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'fixed_ips':
+ [{'ip_address': self.port_address2,
+ 'subnet_id': 'my_subid2'}],
+ 'mac_address': 'my_mac2', })
+ self.float_data2 = []
+ self.float_data2.append(self.float_data1[0])
+ self.float_data2.append({'port_id': 'my_portid2',
+ 'fixed_ip_address': '10.0.2.2',
+ 'floating_ip_address': '172.0.2.2'})
+ self.port_data3 = [{'network_id': 'my_netid1',
+ 'device_id': 'device_id3',
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'device_owner': 'compute:nova',
+ 'id': 'my_portid3',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'fixed_ips': [], # no fixed ip
+ 'mac_address': 'my_mac3', }]
+ self.subnet_data1 = [{'id': 'my_subid1',
+ 'cidr': '10.0.1.0/24',
+ 'network_id': 'my_netid1',
+ 'gateway_ip': '10.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
+ self.subnet_data2 = []
+ self.subnet_data_n = [{'id': 'my_subid1',
+ 'cidr': '10.0.1.0/24',
+ 'network_id': 'my_netid1',
+ 'gateway_ip': '10.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']},
+ {'id': 'my_subid2',
+ 'cidr': '20.0.1.0/24',
+ 'network_id': 'my_netid2',
+ 'gateway_ip': '20.0.1.1',
+ 'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
+ self.subnet_data2.append({'id': 'my_subid2',
+ 'cidr': '10.0.2.0/24',
+ 'network_id': 'my_netid2',
+ 'gateway_ip': '10.0.2.1',
+ 'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
+
+ self.fip_pool = {'id': '4fdbfd74-eaf8-4884-90d9-00bd6f10c2d3',
+ 'name': 'ext_net',
+ 'router:external': True,
+ 'tenant_id': 'admin_tenantid'}
+ self.fip_pool_nova = {'id': '435e20c3-d9f1-4f1b-bee5-4611a1dd07db',
+ 'name': 'nova',
+ 'router:external': True,
+ 'tenant_id': 'admin_tenantid'}
+ self.fip_unassociated = {'tenant_id': 'my_tenantid',
+ 'id': 'fip_id1',
+ 'floating_ip_address': '172.24.4.227',
+ 'floating_network_id': self.fip_pool['id'],
+ 'port_id': None,
+ 'fixed_ip_address': None,
+ 'router_id': None}
+ fixed_ip_address = self.port_data2[1]['fixed_ips'][0]['ip_address']
+ self.fip_associated = {'tenant_id': 'my_tenantid',
+ 'id': 'fip_id2',
+ 'floating_ip_address': '172.24.4.228',
+ 'floating_network_id': self.fip_pool['id'],
+ 'port_id': self.port_data2[1]['id'],
+ 'fixed_ip_address': fixed_ip_address,
+ 'router_id': 'router_id1'}
+ self._returned_nw_info = []
+ self.mox.StubOutWithMock(neutronv2, 'get_client')
+ self.moxed_client = self.mox.CreateMock(client.Client)
+ self.addCleanup(CONF.reset)
+ self.addCleanup(self.mox.VerifyAll)
+ self.addCleanup(self.mox.UnsetStubs)
+ self.addCleanup(self.stubs.UnsetAll)
+
+ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.instance2 = fake_instance.fake_instance_obj(self.context,
+ **self.instance2)
+
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, 'get_instance_nw_info')
+ has_portbinding = False
+ has_extra_dhcp_opts = False
+ dhcp_options = kwargs.get('dhcp_options')
+ if dhcp_options is not None:
+ has_extra_dhcp_opts = True
+
+ if kwargs.get('portbinding'):
+ has_portbinding = True
+ api.extensions[constants.PORTBINDING_EXT] = 1
+ self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ neutronv2.get_client(
+ mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(has_portbinding)
+ else:
+ self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ # Net idx is 1-based for compatibility with existing unit tests
+ nets = self.nets[net_idx - 1]
+ ports = {}
+ fixed_ips = {}
+ macs = kwargs.get('macs')
+ if macs:
+ macs = set(macs)
+ req_net_ids = []
+ ordered_networks = []
+ port = {}
+ if 'requested_networks' in kwargs:
+ for request in kwargs['requested_networks']:
+ if request.port_id:
+ if request.port_id == 'my_portid3':
+ self.moxed_client.show_port(request.port_id
+ ).AndReturn(
+ {'port': {'id': 'my_portid3',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports['my_netid1'] = [self.port_data1[0],
+ self.port_data3[0]]
+ ports[request.port_id] = self.port_data3[0]
+ request.network_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
+ else:
+ self.moxed_client.show_port(request.port_id).AndReturn(
+ {'port': {'id': 'my_portid1',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports[request.port_id] = self.port_data1[0]
+ request.network_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
+ else:
+ fixed_ips[request.network_id] = request.address
+ req_net_ids.append(request.network_id)
+ ordered_networks.append(request)
+ else:
+ for n in nets:
+ ordered_networks.append(
+ objects.NetworkRequest(network_id=n['id']))
+ if kwargs.get('_break') == 'pre_list_networks':
+ self.mox.ReplayAll()
+ return api
+ # search all req_net_ids as in api.py
+ search_ids = req_net_ids
+ if search_ids:
+ mox_list_params = {'id': mox.SameElementsAs(search_ids)}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ else:
+ mox_list_params = {'tenant_id': self.instance.project_id,
+ 'shared': False}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ mox_list_params = {'shared': True}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': []})
+
+ if (('requested_networks' not in kwargs or
+ kwargs['requested_networks'].as_tuples() == [(None, None, None)])
+ and len(nets) > 1):
+ self.mox.ReplayAll()
+ return api
+
+ ports_in_requested_net_order = []
+ nets_in_requested_net_order = []
+ for request in ordered_networks:
+ port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ # Network lookup for available network_id
+ network = None
+ for net in nets:
+ if net['id'] == request.network_id:
+ network = net
+ break
+ # if net_id did not pass validate_networks() and not available
+ # here then skip it safely not continuing with a None Network
+ else:
+ continue
+ if has_portbinding:
+ port_req_body['port']['binding:host_id'] = (
+ self.instance.get('host'))
+ if not has_portbinding:
+ api._populate_neutron_extension_values(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(),
+ mox.IgnoreArg(), neutron=self.moxed_client).AndReturn(None)
+ else:
+ # since _populate_neutron_extension_values() will call
+ # _has_port_binding_extension()
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client).\
+ AndReturn(has_portbinding)
+ if request.port_id:
+ port = ports[request.port_id]
+ self.moxed_client.update_port(request.port_id,
+ MyComparator(port_req_body)
+ ).AndReturn(
+ {'port': port})
+ ports_in_requested_net_order.append(request.port_id)
+ else:
+ request.address = fixed_ips.get(request.network_id)
+ if request.address:
+ port_req_body['port']['fixed_ips'] = [{'ip_address':
+ request.address}]
+ port_req_body['port']['network_id'] = request.network_id
+ port_req_body['port']['admin_state_up'] = True
+ port_req_body['port']['tenant_id'] = \
+ self.instance.project_id
+ if macs:
+ port_req_body['port']['mac_address'] = macs.pop()
+ if has_portbinding:
+ port_req_body['port']['binding:host_id'] = (
+ self.instance.get('host'))
+ res_port = {'port': {'id': 'fake'}}
+ if has_extra_dhcp_opts:
+ port_req_body['port']['extra_dhcp_opts'] = dhcp_options
+ if kwargs.get('_break') == 'mac' + request.network_id:
+ self.mox.ReplayAll()
+ return api
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndReturn(res_port)
+ ports_in_requested_net_order.append(res_port['port']['id'])
+
+ nets_in_requested_net_order.append(network)
+
+ api.get_instance_nw_info(mox.IgnoreArg(),
+ self.instance,
+ networks=nets_in_requested_net_order,
+ port_ids=ports_in_requested_net_order,
+ admin_client=None
+ ).AndReturn(self._returned_nw_info)
+ self.mox.ReplayAll()
+ return api
+
+ def _verify_nw_info(self, nw_inf, index=0):
+ id_suffix = index + 1
+ self.assertEqual('10.0.%s.2' % id_suffix,
+ nw_inf.fixed_ips()[index]['address'])
+ self.assertEqual('172.0.%s.2' % id_suffix,
+ nw_inf.fixed_ips()[index].floating_ip_addresses()[0])
+ self.assertEqual('my_netname%s' % id_suffix,
+ nw_inf[index]['network']['label'])
+ self.assertEqual('my_portid%s' % id_suffix, nw_inf[index]['id'])
+ self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
+ self.assertEqual('10.0.%s.0/24' % id_suffix,
+ nw_inf[index]['network']['subnets'][0]['cidr'])
+
+ ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
+ version=4, type='dns')
+ self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
+
+ def _get_instance_nw_info(self, number):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(mox.IgnoreArg(),
+ self.instance['uuid'],
+ mox.IgnoreArg())
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ nets = number == 1 and self.nets1 or self.nets2
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+
+ instance = copy.copy(self.instance)
+ # This line here does not wrap net_info_cache in jsonutils.dumps()
+ # intentionally to test the other code path when it's not unicode.
+ instance['info_cache'] = {'network_info': net_info_cache}
+
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': port_data})
+ net_ids = [port['network_id'] for port in port_data]
+ nets = number == 1 and self.nets1 or self.nets2
+ self.moxed_client.list_networks(
+ id=net_ids).AndReturn({'networks': nets})
+ for i in xrange(1, number + 1):
+ float_data = number == 1 and self.float_data1 or self.float_data2
+ for ip in port_data[i - 1]['fixed_ips']:
+ float_data = [x for x in float_data
+ if x['fixed_ip_address'] == ip['ip_address']]
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=port_data[i - 1]['id']).AndReturn(
+ {'floatingips': float_data})
+ subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
+ self.moxed_client.list_subnets(
+ id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
+ {'subnets': subnet_data})
+ self.moxed_client.list_ports(
+ network_id=subnet_data[0]['network_id'],
+ device_owner='network:dhcp').AndReturn(
+ {'ports': []})
+ self.mox.ReplayAll()
+ nw_inf = api.get_instance_nw_info(self.context, instance)
+ for i in xrange(0, number):
+ self._verify_nw_info(nw_inf, i)
+
+ def _allocate_for_instance(self, net_idx=1, **kwargs):
+ api = self._stub_allocate_for_instance(net_idx, **kwargs)
+ return api.allocate_for_instance(self.context, self.instance, **kwargs)
+
+
+class TestNeutronv2(TestNeutronv2Base):
+
+ def setUp(self):
+ super(TestNeutronv2, self).setUp()
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ def test_get_instance_nw_info_1(self):
+ # Test to get one port in one network and subnet.
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self._get_instance_nw_info(1)
+
+ def test_get_instance_nw_info_2(self):
+ # Test to get one port in each of two networks and subnets.
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self._get_instance_nw_info(2)
+
+ def test_get_instance_nw_info_with_nets_add_interface(self):
+ # This tests that adding an interface to an instance does not
+ # remove the first instance from the instance.
+ network_model = model.Network(id='network_id',
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': self.port_data2[0]['id'],
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ self.port_data2,
+ self.nets2,
+ [self.port_data2[1]['id']])
+
+ def test_get_instance_nw_info_remove_ports_from_neutron(self):
+ # This tests that when a port is removed in neutron it
+ # is also removed from the nova.
+ network_model = model.Network(id=self.port_data2[0]['network_id'],
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': 'network_id',
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ self.port_data2,
+ None,
+ None)
+
+ def test_get_instance_nw_info_ignores_neturon_ports(self):
+ # Tests that only ports in the network_cache are updated
+ # and ports returned from neutron that match the same
+ # instance_id/device_id are ignored.
+ port_data2 = copy.copy(self.port_data2)
+
+ # set device_id on the ports to be the same.
+ port_data2[1]['device_id'] = port_data2[0]['device_id']
+ network_model = model.Network(id='network_id',
+ bridge='br-int',
+ injected='injected',
+ label='fake_network',
+ tenant_id='fake_tenant')
+ network_cache = {'info_cache': {
+ 'network_info': [{'id': 'network_id',
+ 'address': 'mac_address',
+ 'network': network_model,
+ 'type': 'ovs',
+ 'ovs_interfaceid': 'ovs_interfaceid',
+ 'devname': 'devname'}]}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ port_data2,
+ None,
+ None)
+
+ def _fake_get_instance_nw_info_helper(self, network_cache,
+ current_neutron_ports,
+ networks=None, port_ids=None):
+ """Helper function to test get_instance_nw_info.
+
+ :param network_cache - data already in the nova network cache.
+ :param current_neutron_ports - updated list of ports from neutron.
+ :param networks - networks of ports being added to instance.
+ :param port_ids - new ports being added to instance.
+ """
+
+ # keep a copy of the original ports/networks to pass to
+ # get_instance_nw_info() as the code below changes them.
+ original_port_ids = copy.copy(port_ids)
+ original_networks = copy.copy(networks)
+
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(
+ mox.IgnoreArg(),
+ self.instance['uuid'], mox.IgnoreArg())
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': current_neutron_ports})
+
+ ifaces = network_cache['info_cache']['network_info']
+
+ if port_ids is None:
+ port_ids = [iface['id'] for iface in ifaces]
+ net_ids = [iface['network']['id'] for iface in ifaces]
+ nets = [{'id': iface['network']['id'],
+ 'name': iface['network']['label'],
+ 'tenant_id': iface['network']['meta']['tenant_id']}
+ for iface in ifaces]
+ if networks is None:
+ self.moxed_client.list_networks(
+ id=net_ids).AndReturn({'networks': nets})
+ else:
+ networks = networks + [
+ dict(id=iface['network']['id'],
+ name=iface['network']['label'],
+ tenant_id=iface['network']['meta']['tenant_id'])
+ for iface in ifaces]
+ port_ids = [iface['id'] for iface in ifaces] + port_ids
+
+ index = 0
+
+ current_neutron_port_map = {}
+ for current_neutron_port in current_neutron_ports:
+ current_neutron_port_map[current_neutron_port['id']] = (
+ current_neutron_port)
+ for port_id in port_ids:
+ current_neutron_port = current_neutron_port_map.get(port_id)
+ if current_neutron_port:
+ for ip in current_neutron_port['fixed_ips']:
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=current_neutron_port['id']).AndReturn(
+ {'floatingips': [self.float_data2[index]]})
+ self.moxed_client.list_subnets(
+ id=mox.SameElementsAs([ip['subnet_id']])
+ ).AndReturn(
+ {'subnets': [self.subnet_data_n[index]]})
+ self.moxed_client.list_ports(
+ network_id=current_neutron_port['network_id'],
+ device_owner='network:dhcp').AndReturn(
+ {'ports': self.dhcp_port_data1})
+ index += 1
+ self.mox.ReplayAll()
+
+ self.instance['info_cache'] = network_cache
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = network_cache['info_cache']
+ nw_infs = api.get_instance_nw_info(self.context,
+ instance,
+ networks=original_networks,
+ port_ids=original_port_ids)
+
+ self.assertEqual(index, len(nw_infs))
+ # ensure that nic ordering is preserved
+ for iface_index in range(index):
+ self.assertEqual(nw_infs[iface_index]['id'],
+ port_ids[iface_index])
+
+ def test_get_instance_nw_info_without_subnet(self):
+ # Test get instance_nw_info for a port without subnet.
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(
+ mox.IgnoreArg(),
+ self.instance['uuid'], mox.IgnoreArg())
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': self.port_data3})
+ self.moxed_client.list_networks(
+ id=[self.port_data1[0]['network_id']]).AndReturn(
+ {'networks': self.nets1})
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ net_info_cache = []
+ for port in self.port_data3:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = {'network_info':
+ six.text_type(
+ jsonutils.dumps(net_info_cache))}
+
+ self.mox.ReplayAll()
+
+ nw_inf = api.get_instance_nw_info(self.context,
+ instance)
+
+ id_suffix = 3
+ self.assertEqual(0, len(nw_inf.fixed_ips()))
+ self.assertEqual('my_netname1', nw_inf[0]['network']['label'])
+ self.assertEqual('my_portid%s' % id_suffix, nw_inf[0]['id'])
+ self.assertEqual('my_mac%s' % id_suffix, nw_inf[0]['address'])
+ self.assertEqual(0, len(nw_inf[0]['network']['subnets']))
+
+ def test_refresh_neutron_extensions_cache(self):
+ api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.QOS_QUEUE}]})
+ self.mox.ReplayAll()
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg())
+ self.assertEqual(
+ {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
+ api.extensions)
+
+ def test_populate_neutron_extension_values_rxtx_factor(self):
+ api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.QOS_QUEUE}]})
+ self.mox.ReplayAll()
+ flavor = flavors.get_default_flavor()
+ flavor['rxtx_factor'] = 1
+ sys_meta = utils.dict_to_metadata(
+ flavors.save_flavor_info({}, flavor))
+ instance = {'system_metadata': sys_meta}
+ port_req_body = {'port': {}}
+ api._populate_neutron_extension_values(self.context, instance,
+ None, port_req_body)
+ self.assertEqual(port_req_body['port']['rxtx_factor'], 1)
+
+ def test_allocate_for_instance_1(self):
+ # Allocate one port in one network env.
+ self._allocate_for_instance(1)
+
+ def test_allocate_for_instance_2(self):
+ # Allocate one port in two networks env.
+ api = self._stub_allocate_for_instance(net_idx=2)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set, the
+ # _allocate_for_instance helper checks that the mac is used to create a
+ # port.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
+ def test_allocate_for_instance_accepts_only_portid(self):
+ # Make sure allocate_for_instance works when only a portid is provided
+ self._returned_nw_info = self.port_data1
+ result = self._allocate_for_instance(
+ requested_networks=objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')]))
+ self.assertEqual(self.port_data1, result)
+
+ def test_allocate_for_instance_not_enough_macs_via_ports(self):
+ # using a hypervisor MAC via a pre-created port will stop it being
+ # used to dynamically create a port on a network. We put the network
+ # first in requested_networks so that if the code were to not pre-check
+ # requested ports, it would incorrectly assign the mac and not fail.
+ requested_networks = objects.NetworkRequestList(
+ objects = [
+ objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac1']),
+ _break='mac' + self.nets2[1]['id'])
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['my_mac1']))
+
+ def test_allocate_for_instance_not_enough_macs(self):
+ # If not enough MAC addresses are available to allocate to networks, an
+ # error should be raised.
+ # We could pass in macs=set(), but that wouldn't tell us that
+ # allocate_for_instance tracks used macs properly, so we pass in one
+ # mac, and ask for two networks.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(network_id=self.nets2[0]['id'])])
+ api = self._stub_allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2']),
+ _break='mac' + self.nets2[0]['id'])
+ with mock.patch.object(api, '_delete_ports'):
+ self.assertRaises(exception.PortNotFree,
+ api.allocate_for_instance, self.context,
+ self.instance,
+ requested_networks=requested_networks,
+ macs=set(['my_mac2']))
+
+ def test_allocate_for_instance_two_macs_two_networks(self):
+ # If two MACs are available and two networks requested, two new ports
+ # get made and no exceptions raised.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets2[1]['id']),
+ objects.NetworkRequest(network_id=self.nets2[0]['id'])])
+ self._allocate_for_instance(
+ net_idx=2, requested_networks=requested_networks,
+ macs=set(['my_mac2', 'my_mac1']))
+
+ def test_allocate_for_instance_mac_conflicting_requested_port(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ net_idx=1, requested_networks=requested_networks,
+ macs=set(['unknown:mac']),
+ _break='pre_list_networks')
+ self.assertRaises(exception.PortNotUsable,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks,
+ macs=set(['unknown:mac']))
+
+ def test_allocate_for_instance_without_requested_networks(self):
+ api = self._stub_allocate_for_instance(net_idx=3)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_requested_non_available_network(self):
+ """verify that a non available network is ignored.
+ self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
+ Do not create a port on a non available network self.nets3[2].
+ """
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets3[0], self.nets3[2], self.nets3[1])])
+ self._allocate_for_instance(net_idx=2,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets3[1], self.nets3[0], self.nets3[2])])
+ self._allocate_for_instance(net_idx=3,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
+ # specify only first and last network
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=self.nets1[0]['id'],
+ address='10.0.1.0')])
+ self._allocate_for_instance(net_idx=1,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_requested_networks_with_port(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=1,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_no_networks(self):
+ """verify the exception thrown when there are no networks defined."""
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ self.mox.ReplayAll()
+ nwinfo = api.allocate_for_instance(self.context, self.instance)
+ self.assertEqual(len(nwinfo), 0)
+
+ def test_allocate_for_instance_ex1(self):
+ """verify we will delete created ports
+ if we fail to allocate all net resources.
+
+ Mox to raise exception when creating a second port.
+ In this case, the code should delete the first created port.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(False)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets2[0], self.nets2[1])])
+ self.moxed_client.list_networks(
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
+ index = 0
+ for network in self.nets2:
+ binding_port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ port_req_body = {
+ 'port': {
+ 'network_id': network['id'],
+ 'admin_state_up': True,
+ 'tenant_id': self.instance.project_id,
+ },
+ }
+ port_req_body['port'].update(binding_port_req_body['port'])
+ port = {'id': 'portid_' + network['id']}
+
+ api._populate_neutron_extension_values(self.context,
+ self.instance, None, binding_port_req_body,
+ neutron=self.moxed_client).AndReturn(None)
+ if index == 0:
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndReturn({'port': port})
+ else:
+ NeutronOverQuota = exceptions.OverQuotaClient()
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
+ index += 1
+ self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.PortLimitExceeded,
+ api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_ex2(self):
+ """verify we have no port to delete
+ if we fail to allocate the first net resource.
+
+ Mox to raise exception when creating the first port.
+ In this case, the code should not delete any ports.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ neutron=self.moxed_client,
+ refresh_cache=True).AndReturn(False)
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets2[0], self.nets2[1])])
+ self.moxed_client.list_networks(
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
+ binding_port_req_body = {
+ 'port': {
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ },
+ }
+ port_req_body = {
+ 'port': {
+ 'network_id': self.nets2[0]['id'],
+ 'admin_state_up': True,
+ 'device_id': self.instance.uuid,
+ 'tenant_id': self.instance.project_id,
+ },
+ }
+ api._populate_neutron_extension_values(self.context,
+ self.instance, None, binding_port_req_body,
+ neutron=self.moxed_client).AndReturn(None)
+ self.moxed_client.create_port(
+ MyComparator(port_req_body)).AndRaise(
+ Exception("fail to create port"))
+ self.mox.ReplayAll()
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_no_port_or_network(self):
+ class BailOutEarly(Exception):
+ pass
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ self.mox.StubOutWithMock(api, '_get_available_networks')
+ # Make sure we get an empty list and then bail out of the rest
+ # of the function
+ api._get_available_networks(self.context, self.instance.project_id,
+ [],
+ neutron=self.moxed_client).\
+ AndRaise(BailOutEarly)
+ self.mox.ReplayAll()
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest()])
+ self.assertRaises(BailOutEarly,
+ api.allocate_for_instance,
+ self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_second_time(self):
+ # Make sure that allocate_for_instance only returns ports that it
+ # allocated during _that_ run.
+ new_port = {'id': 'fake'}
+ self._returned_nw_info = self.port_data1 + [new_port]
+ nw_info = self._allocate_for_instance()
+ self.assertEqual(nw_info, [new_port])
+
+ def test_allocate_for_instance_port_in_use(self):
+ # If a port is already in use, an exception should be raised.
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ requested_networks=requested_networks,
+ _break='pre_list_networks',
+ _device=True)
+ self.assertRaises(exception.PortInUse,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks)
+
+ def test_allocate_for_instance_with_externalnet_forbidden(self):
+ """Only one network is available, it's external, and the client
+ is unauthorized to use it.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ # no networks in the tenant
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': model.NetworkInfo([])})
+ # external network is shared
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': self.nets8})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.ExternalNetworkAttachForbidden,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_externalnet_multiple(self):
+ """Multiple networks are available, one the client is authorized
+ to use, and an external one the client is unauthorized to use.
+ """
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.moxed_client.list_extensions().AndReturn({'extensions': []})
+ # network found in the tenant
+ self.moxed_client.list_networks(
+ tenant_id=self.instance.project_id,
+ shared=False).AndReturn(
+ {'networks': self.nets1})
+ # external network is shared
+ self.moxed_client.list_networks(shared=True).AndReturn(
+ {'networks': self.nets8})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(
+ exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_externalnet_admin_ctx(self):
+ """Only one network is available, it's external, and the client
+ is authorized.
+ """
+ admin_ctx = context.RequestContext('userid', 'my_tenantid',
+ is_admin=True)
+ api = self._stub_allocate_for_instance(net_idx=8)
+ api.allocate_for_instance(admin_ctx, self.instance)
+
+ def _deallocate_for_instance(self, number, requested_networks=None):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ api = neutronapi.API()
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ ret_data = copy.deepcopy(port_data)
+ if requested_networks:
+ if isinstance(requested_networks, objects.NetworkRequestList):
+ # NOTE(danms): Temporary and transitional
+ with mock.patch('nova.utils.is_neutron', return_value=True):
+ requested_networks = requested_networks.as_tuples()
+ for net, fip, port, request_id in requested_networks:
+ ret_data.append({'network_id': net,
+ 'device_id': self.instance.uuid,
+ 'device_owner': 'compute:nova',
+ 'id': port,
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'fixed_ips': [],
+ 'mac_address': 'fake_mac', })
+ self.moxed_client.list_ports(
+ device_id=self.instance.uuid).AndReturn(
+ {'ports': ret_data})
+ if requested_networks:
+ for net, fip, port, request_id in requested_networks:
+ self.moxed_client.update_port(port)
+ for port in reversed(port_data):
+ self.moxed_client.delete_port(port['id'])
+
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(self.context,
+ self.instance.uuid,
+ {'network_info': '[]'})
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.deallocate_for_instance(self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_deallocate_for_instance_1_with_requested(self):
+ requested = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='fake-net',
+ address='1.2.3.4',
+ port_id='fake-port')])
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(1, requested_networks=requested)
+
+ def test_deallocate_for_instance_2_with_requested(self):
+ requested = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='fake-net',
+ address='1.2.3.4',
+ port_id='fake-port')])
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(2, requested_networks=requested)
+
+ def test_deallocate_for_instance_1(self):
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(1)
+
+ def test_deallocate_for_instance_2(self):
+ # Test to deallocate in two ports env.
+ self._deallocate_for_instance(2)
+
+ def test_deallocate_for_instance_port_not_found(self):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ port_data = self.port_data1
+ self.moxed_client.list_ports(
+ device_id=self.instance.uuid).AndReturn(
+ {'ports': port_data})
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ for port in reversed(port_data):
+ self.moxed_client.delete_port(port['id']).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.deallocate_for_instance(self.context, self.instance)
+
+ def _test_deallocate_port_for_instance(self, number):
+ port_data = number == 1 and self.port_data1 or self.port_data2
+ nets = number == 1 and self.nets1 or self.nets2
+ self.moxed_client.delete_port(port_data[0]['id'])
+
+ net_info_cache = []
+ for port in port_data:
+ net_info_cache.append({"network": {"id": port['network_id']},
+ "id": port['id']})
+ instance = copy.copy(self.instance)
+ instance['info_cache'] = {'network_info':
+ six.text_type(
+ jsonutils.dumps(net_info_cache))}
+ api = neutronapi.API()
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id=self.instance['project_id'],
+ device_id=self.instance['uuid']).AndReturn(
+ {'ports': port_data[1:]})
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+ net_ids = [port['network_id'] for port in port_data]
+ self.moxed_client.list_networks(id=net_ids).AndReturn(
+ {'networks': nets})
+ float_data = number == 1 and self.float_data1 or self.float_data2
+ for data in port_data[1:]:
+ for ip in data['fixed_ips']:
+ self.moxed_client.list_floatingips(
+ fixed_ip_address=ip['ip_address'],
+ port_id=data['id']).AndReturn(
+ {'floatingips': float_data[1:]})
+ for port in port_data[1:]:
+ self.moxed_client.list_subnets(id=['my_subid2']).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ nwinfo = api.deallocate_port_for_instance(self.context, instance,
+ port_data[0]['id'])
+ self.assertEqual(len(nwinfo), len(port_data[1:]))
+ if len(port_data) > 1:
+ self.assertEqual(nwinfo[0]['network']['id'], 'my_netid2')
+
+ def test_deallocate_port_for_instance_1(self):
+ # Test to deallocate the first and only port
+ self._test_deallocate_port_for_instance(1)
+
+ def test_deallocate_port_for_instance_2(self):
+ # Test to deallocate the first port of two
+ self._test_deallocate_port_for_instance(2)
+
+ def test_list_ports(self):
+ search_opts = {'parm': 'value'}
+ self.moxed_client.list_ports(**search_opts)
+ self.mox.ReplayAll()
+ neutronapi.API().list_ports(self.context, **search_opts)
+
+ def test_show_port(self):
+ self.moxed_client.show_port('foo')
+ self.mox.ReplayAll()
+ neutronapi.API().show_port(self.context, 'foo')
+
+ def test_validate_networks(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_without_port_quota_on_network_side(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_ex_1(self):
+ requested_networks = [('my_netid1', None, None, None)]
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(['my_netid1'])).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ try:
+ api.validate_networks(self.context, requested_networks, 1)
+ except exception.NetworkNotFound as ex:
+ self.assertIn("my_netid2", six.text_type(ex))
+
+ def test_validate_networks_ex_2(self):
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid2', None, None, None),
+ ('my_netid3', None, None, None)]
+ ids = ['my_netid1', 'my_netid2', 'my_netid3']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ try:
+ api.validate_networks(self.context, requested_networks, 1)
+ except exception.NetworkNotFound as ex:
+ self.assertIn("my_netid2, my_netid3", six.text_type(ex))
+
+ def test_validate_networks_duplicate_disable(self):
+ """Verify that the correct exception is thrown when duplicate
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its default value: False
+ """
+ requested_networks = [('my_netid1', None, None, None),
+ ('my_netid1', None, None, None)]
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkDuplicated,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_duplicate_enable(self):
+ """Verify that no duplicateNetworks exception is thrown when duplicate
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its non default value: True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid1')])
+ ids = ['my_netid1', 'my_netid1']
+
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_allocate_for_instance_with_requested_networks_duplicates(self):
+ # specify a duplicate network to allocate to instance
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id=net['id'])
+ for net in (self.nets6[0], self.nets6[1])])
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_port(self):
+ # specify first port and last port that are in same network
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port['id'])
+ for port in (self.port_data1[0], self.port_data3[0])])
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_combo(self):
+ # specify a combo net_idx=7 : net2, port in net1, net2, port in net1
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid2'),
+ objects.NetworkRequest(port_id=self.port_data1[0]['id']),
+ objects.NetworkRequest(network_id='my_netid2'),
+ objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
+ self._allocate_for_instance(net_idx=7,
+ requested_networks=requested_networks)
+
+ def test_validate_networks_not_specified(self):
+ requested_networks = objects.NetworkRequestList(objects=[])
+ self.moxed_client.list_networks(
+ tenant_id=self.context.project_id,
+ shared=False).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_networks(
+ shared=True).AndReturn(
+ {'networks': self.nets2})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_not_found(self):
+ # Verify that the correct exception is thrown when a non existent
+ # port is passed to validate_networks.
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(
+ network_id='my_netid1',
+ port_id='3123-ad34-bc43-32332ca33e')])
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exception.PortNotFound,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_show_rasies_non404(self):
+ # Verify that the correct exception is thrown when a non existent
+ # port is passed to validate_networks.
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(
+ network_id='my_netid1',
+ port_id='3123-ad34-bc43-32332ca33e')])
+
+ NeutronNotFound = exceptions.NeutronClientException(status_code=0)
+ self.moxed_client.show_port(requested_networks[0].port_id).AndRaise(
+ NeutronNotFound)
+ self.mox.ReplayAll()
+ # Expected call from setUp.
+ neutronv2.get_client(None)
+ api = neutronapi.API()
+ self.assertRaises(exceptions.NeutronClientException,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_in_use(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=self.port_data3[0]['id'])])
+ self.moxed_client.show_port(self.port_data3[0]['id']).\
+ AndReturn({'port': self.port_data3[0]})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.PortInUse,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_port_no_subnet_id(self):
+ port_a = self.port_data3[0]
+ port_a['device_id'] = None
+ port_a['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.PortRequiresFixedIP,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_no_subnet_id(self):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='his_netid4')])
+ ids = ['his_netid4']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets4})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkRequiresSubnet,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_in_same_network_disable(self):
+ """Verify that duplicateNetworks exception is thrown when ports on same
+ duplicate network are passed to validate_networks, when nova config
+ flag allow_duplicate_networks is set to its default False
+ """
+ self.flags(allow_duplicate_networks=False, group='neutron')
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data1[0]
+ self.assertEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ self.assertRaises(exception.NetworkDuplicated,
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_in_same_network_enable(self):
+ """Verify that duplicateNetworks exception is not thrown when ports
+ on same duplicate network are passed to validate_networks, when nova
+ config flag allow_duplicate_networks is set to its True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data1[0]
+ self.assertEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_ports_not_in_same_network(self):
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data2[1]
+ self.assertNotEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_validate_networks_no_quota(self):
+ # Test validation for a request for one instance needing
+ # two ports, where the quota is 2 and 2 ports are in use
+ # => instances which can be created = 0
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 2}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 0)
+
+ def test_validate_networks_with_ports_and_networks(self):
+ # Test validation for a request for one instance needing
+ # one port allocated via nova with another port being passed in.
+ port_b = self.port_data2[1]
+ port_b['device_id'] = None
+ port_b['device_owner'] = None
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ ids = ['my_netid1']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 5}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_one_port_and_no_networks(self):
+ # Test that show quota is not called if no networks are
+ # passed in and only ports.
+ port_b = self.port_data2[1]
+ port_b['device_id'] = None
+ port_b['device_owner'] = None
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_some_quota(self):
+ # Test validation for a request for two instance needing
+ # two ports each, where the quota is 5 and 2 ports are in use
+ # => instances which can be created = 1
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 5}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 2)
+ self.assertEqual(max_count, 1)
+
+ def test_validate_networks_unlimited_quota(self):
+ # Test validation for a request for two instance needing
+ # two ports each, where the quota is -1 (unlimited)
+ # => instances which can be created = 1
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='my_netid1'),
+ objects.NetworkRequest(network_id='my_netid2')])
+ ids = ['my_netid1', 'my_netid2']
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets2})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': self.port_data2})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': -1}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 2)
+ self.assertEqual(max_count, 2)
+
+ def test_validate_networks_no_quota_but_ports_supplied(self):
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data2[1]
+ self.assertNotEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port_a['id']),
+ objects.NetworkRequest(port_id=port_b['id'])])
+ self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ max_count = api.validate_networks(self.context,
+ requested_networks, 1)
+ self.assertEqual(max_count, 1)
+
+ def _mock_list_ports(self, port_data=None):
+ if port_data is None:
+ port_data = self.port_data2
+ address = self.port_address
+ self.moxed_client.list_ports(
+ fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
+ {'ports': port_data})
+ self.mox.ReplayAll()
+ return address
+
+ def test_get_instance_uuids_by_ip_filter(self):
+ self._mock_list_ports()
+ filters = {'ip': '^10\\.0\\.1\\.2$'}
+ api = neutronapi.API()
+ result = api.get_instance_uuids_by_ip_filter(self.context, filters)
+ self.assertEqual(self.instance2['uuid'], result[0]['instance_uuid'])
+ self.assertEqual(self.instance['uuid'], result[1]['instance_uuid'])
+
+ def test_get_fixed_ip_by_address_fails_for_no_ports(self):
+ address = self._mock_list_ports(port_data=[])
+ api = neutronapi.API()
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ api.get_fixed_ip_by_address,
+ self.context, address)
+
+ def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
+ address = self._mock_list_ports(port_data=self.port_data1)
+ api = neutronapi.API()
+ result = api.get_fixed_ip_by_address(self.context, address)
+ self.assertEqual(self.instance2['uuid'], result['instance_uuid'])
+
+ def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
+ address = self._mock_list_ports()
+ api = neutronapi.API()
+ self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
+ api.get_fixed_ip_by_address,
+ self.context, address)
+
+ def _get_available_networks(self, prv_nets, pub_nets,
+ req_ids=None, context=None):
+ api = neutronapi.API()
+ nets = prv_nets + pub_nets
+ if req_ids:
+ mox_list_params = {'id': req_ids}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': nets})
+ else:
+ mox_list_params = {'tenant_id': self.instance['project_id'],
+ 'shared': False}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': prv_nets})
+ mox_list_params = {'shared': True}
+ self.moxed_client.list_networks(
+ **mox_list_params).AndReturn({'networks': pub_nets})
+
+ self.mox.ReplayAll()
+ rets = api._get_available_networks(
+ context if context else self.context,
+ self.instance['project_id'],
+ req_ids)
+ self.assertEqual(rets, nets)
+
+ def test_get_available_networks_all_private(self):
+ self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
+
+ def test_get_available_networks_all_public(self):
+ self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
+
+ def test_get_available_networks_private_and_public(self):
+ self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
+
+ def test_get_available_networks_with_network_ids(self):
+ prv_nets = [self.nets3[0]]
+ pub_nets = [self.nets3[-1]]
+ # specify only first and last network
+ req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
+ self._get_available_networks(prv_nets, pub_nets, req_ids)
+
+ def test_get_available_networks_with_custom_policy(self):
+ rules = {'network:attach_external_network':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req_ids = [net['id'] for net in self.nets5]
+ self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
+
+ def test_get_floating_ip_pools(self):
+ api = neutronapi.API()
+ search_opts = {'router:external': True}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
+ self.mox.ReplayAll()
+ pools = api.get_floating_ip_pools(self.context)
+ expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
+ self.assertEqual(expected, pools)
+
+ def _get_expected_fip_model(self, fip_data, idx=0):
+ expected = {'id': fip_data['id'],
+ 'address': fip_data['floating_ip_address'],
+ 'pool': self.fip_pool['name'],
+ 'project_id': fip_data['tenant_id'],
+ 'fixed_ip_id': fip_data['port_id'],
+ 'fixed_ip':
+ {'address': fip_data['fixed_ip_address']},
+ 'instance': ({'uuid': self.port_data2[idx]['device_id']}
+ if fip_data['port_id']
+ else None)}
+ return expected
+
+ def _test_get_floating_ip(self, fip_data, idx=0, by_address=False):
+ api = neutronapi.API()
+ fip_id = fip_data['id']
+ net_id = fip_data['floating_network_id']
+ address = fip_data['floating_ip_address']
+ if by_address:
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [fip_data]})
+ else:
+ self.moxed_client.show_floatingip(fip_id).\
+ AndReturn({'floatingip': fip_data})
+ self.moxed_client.show_network(net_id).\
+ AndReturn({'network': self.fip_pool})
+ if fip_data['port_id']:
+ self.moxed_client.show_port(fip_data['port_id']).\
+ AndReturn({'port': self.port_data2[idx]})
+ self.mox.ReplayAll()
+
+ expected = self._get_expected_fip_model(fip_data, idx)
+
+ if by_address:
+ fip = api.get_floating_ip_by_address(self.context, address)
+ else:
+ fip = api.get_floating_ip(self.context, fip_id)
+ self.assertEqual(expected, fip)
+
+ def test_get_floating_ip_unassociated(self):
+ self._test_get_floating_ip(self.fip_unassociated, idx=0)
+
+ def test_get_floating_ip_associated(self):
+ self._test_get_floating_ip(self.fip_associated, idx=1)
+
+ def test_get_floating_ip_by_address(self):
+ self._test_get_floating_ip(self.fip_unassociated, idx=0,
+ by_address=True)
+
+ def test_get_floating_ip_by_address_associated(self):
+ self._test_get_floating_ip(self.fip_associated, idx=1,
+ by_address=True)
+
+ def test_get_floating_ip_by_address_not_found(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': []})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpNotFoundForAddress,
+ api.get_floating_ip_by_address,
+ self.context, address)
+
+ def test_get_floating_ip_by_id_not_found(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(status_code=404)
+ floating_ip_id = self.fip_unassociated['id']
+ self.moxed_client.show_floatingip(floating_ip_id).\
+ AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpNotFound,
+ api.get_floating_ip,
+ self.context, floating_ip_id)
+
+ def test_get_floating_ip_raises_non404(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(status_code=0)
+ floating_ip_id = self.fip_unassociated['id']
+ self.moxed_client.show_floatingip(floating_ip_id).\
+ AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ self.assertRaises(exceptions.NeutronClientException,
+ api.get_floating_ip,
+ self.context, floating_ip_id)
+
+ def test_get_floating_ip_by_address_multiple_found(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated] * 2})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpMultipleFoundForAddress,
+ api.get_floating_ip_by_address,
+ self.context, address)
+
+ def test_get_floating_ips_by_project(self):
+ api = neutronapi.API()
+ project_id = self.context.project_id
+ self.moxed_client.list_floatingips(tenant_id=project_id).\
+ AndReturn({'floatingips': [self.fip_unassociated,
+ self.fip_associated]})
+ search_opts = {'router:external': True}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
+ self.moxed_client.list_ports(tenant_id=project_id).\
+ AndReturn({'ports': self.port_data2})
+ self.mox.ReplayAll()
+
+ expected = [self._get_expected_fip_model(self.fip_unassociated),
+ self._get_expected_fip_model(self.fip_associated, idx=1)]
+ fips = api.get_floating_ips_by_project(self.context)
+ self.assertEqual(expected, fips)
+
+ def _test_get_instance_id_by_floating_address(self, fip_data,
+ associated=False):
+ api = neutronapi.API()
+ address = fip_data['floating_ip_address']
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [fip_data]})
+ if associated:
+ self.moxed_client.show_port(fip_data['port_id']).\
+ AndReturn({'port': self.port_data2[1]})
+ self.mox.ReplayAll()
+
+ if associated:
+ expected = self.port_data2[1]['device_id']
+ else:
+ expected = None
+ fip = api.get_instance_id_by_floating_address(self.context, address)
+ self.assertEqual(expected, fip)
+
+ def test_get_instance_id_by_floating_address(self):
+ self._test_get_instance_id_by_floating_address(self.fip_unassociated)
+
+ def test_get_instance_id_by_floating_address_associated(self):
+ self._test_get_instance_id_by_floating_address(self.fip_associated,
+ associated=True)
+
+ def test_allocate_floating_ip(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context, 'ext_net')
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_allocate_floating_ip_addr_gen_fail(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndRaise(exceptions.IpAddressGenerationFailureClient)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoMoreFloatingIps,
+ api.allocate_floating_ip, self.context, 'ext_net')
+
+ def test_allocate_floating_ip_exhausted_fail(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool['name']
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndRaise(exceptions.ExternalIpAddressExhaustedClient)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NoMoreFloatingIps,
+ api.allocate_floating_ip, self.context, 'ext_net')
+
+ def test_allocate_floating_ip_with_pool_id(self):
+ api = neutronapi.API()
+ pool_id = self.fip_pool['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'id': pool_id}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context, pool_id)
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_allocate_floating_ip_with_default_pool(self):
+ api = neutronapi.API()
+ pool_name = self.fip_pool_nova['name']
+ pool_id = self.fip_pool_nova['id']
+ search_opts = {'router:external': True,
+ 'fields': 'id',
+ 'name': pool_name}
+ self.moxed_client.list_networks(**search_opts).\
+ AndReturn({'networks': [self.fip_pool_nova]})
+ self.moxed_client.create_floatingip(
+ {'floatingip': {'floating_network_id': pool_id}}).\
+ AndReturn({'floatingip': self.fip_unassociated})
+ self.mox.ReplayAll()
+ fip = api.allocate_floating_ip(self.context)
+ self.assertEqual(fip, self.fip_unassociated['floating_ip_address'])
+
+ def test_release_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fip_id = self.fip_unassociated['id']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.delete_floatingip(fip_id)
+ self.mox.ReplayAll()
+ api.release_floating_ip(self.context, address)
+
+ def test_disassociate_and_release_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fip_id = self.fip_unassociated['id']
+ floating_ip = {'address': address}
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.delete_floatingip(fip_id)
+ self.mox.ReplayAll()
+ api.disassociate_and_release_floating_ip(self.context, None,
+ floating_ip)
+
+ def test_release_floating_ip_associated(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FloatingIpAssociated,
+ api.release_floating_ip, self.context, address)
+
+ def _setup_mock_for_refresh_cache(self, api, instances):
+ nw_info = self.mox.CreateMock(model.NetworkInfo)
+ self.mox.StubOutWithMock(api, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ for instance in instances:
+ nw_info.json()
+ api._get_instance_nw_info(mox.IgnoreArg(), instance).\
+ AndReturn(nw_info)
+ api.db.instance_info_cache_update(mox.IgnoreArg(),
+ instance['uuid'],
+ mox.IgnoreArg())
+
+ def test_associate_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fixed_address = self.port_address2
+ fip_id = self.fip_unassociated['id']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[1]]})
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': self.fip_associated['port_id'],
+ 'fixed_ip_address': fixed_address}})
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+
+ self.mox.ReplayAll()
+ api.associate_floating_ip(self.context, self.instance,
+ address, fixed_address)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_reassociate_floating_ip(self, mock_get):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ new_fixed_address = self.port_address
+ fip_id = self.fip_associated['id']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance2['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[0]]})
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': 'my_portid1',
+ 'fixed_ip_address': new_fixed_address}})
+ self.moxed_client.show_port(self.fip_associated['port_id']).\
+ AndReturn({'port': self.port_data2[1]})
+
+ mock_get.return_value = fake_instance.fake_instance_obj(
+ self.context, **self.instance)
+ self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
+ self.instance2])
+
+ self.mox.ReplayAll()
+ api.associate_floating_ip(self.context, self.instance2,
+ address, new_fixed_address)
+
+ def test_associate_floating_ip_not_found_fixed_ip(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ fixed_address = self.fip_associated['fixed_ip_address']
+
+ search_opts = {'device_owner': 'compute:nova',
+ 'device_id': self.instance['uuid']}
+ self.moxed_client.list_ports(**search_opts).\
+ AndReturn({'ports': [self.port_data2[0]]})
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.FixedIpNotFoundForAddress,
+ api.associate_floating_ip, self.context,
+ self.instance, address, fixed_address)
+
+ def test_disassociate_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_associated['floating_ip_address']
+ fip_id = self.fip_associated['id']
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_associated]})
+ self.moxed_client.update_floatingip(
+ fip_id, {'floatingip': {'port_id': None}})
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+
+ self.mox.ReplayAll()
+ api.disassociate_floating_ip(self.context, self.instance, address)
+
+ def test_add_fixed_ip_to_instance(self):
+ api = neutronapi.API()
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+ network_id = 'my_netid1'
+ search_opts = {'network_id': network_id}
+ self.moxed_client.list_subnets(
+ **search_opts).AndReturn({'subnets': self.subnet_data_n})
+
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': 'compute:nova',
+ 'network_id': network_id}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [{'subnet_id': 'my_subid1'},
+ {'subnet_id': 'my_subid1'}],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = [{'subnet_id': 'my_subid1'}]
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.add_fixed_ip_to_instance(self.context, self.instance, network_id)
+
+ def test_remove_fixed_ip_from_instance(self):
+ api = neutronapi.API()
+ self._setup_mock_for_refresh_cache(api, [self.instance])
+ address = '10.0.0.3'
+ zone = 'compute:%s' % self.instance['availability_zone']
+ search_opts = {'device_id': self.instance['uuid'],
+ 'device_owner': zone,
+ 'fixed_ips': 'ip_address=%s' % address}
+ self.moxed_client.list_ports(
+ **search_opts).AndReturn({'ports': self.port_data1})
+ port_req_body = {
+ 'port': {
+ 'fixed_ips': [],
+ },
+ }
+ port = self.port_data1[0]
+ port['fixed_ips'] = []
+ self.moxed_client.update_port('my_portid1',
+ MyComparator(port_req_body)).AndReturn({'port': port})
+
+ self.mox.ReplayAll()
+ api.remove_fixed_ip_from_instance(self.context, self.instance, address)
+
+ def test_list_floating_ips_without_l3_support(self):
+ api = neutronapi.API()
+ NeutronNotFound = exceptions.NeutronClientException(
+ status_code=404)
+ self.moxed_client.list_floatingips(
+ fixed_ip_address='1.1.1.1', port_id=1).AndRaise(NeutronNotFound)
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ floatingips = api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', 1)
+ self.assertEqual(floatingips, [])
+
+ def test_nw_info_get_ips(self):
+ fake_port = {
+ 'fixed_ips': [
+ {'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ }
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
+ api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', 'port-id').AndReturn(
+ [{'floating_ip_address': '10.0.0.1'}])
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ result = api._nw_info_get_ips(self.moxed_client, fake_port)
+ self.assertEqual(len(result), 1)
+ self.assertEqual(result[0]['address'], '1.1.1.1')
+ self.assertEqual(result[0]['floating_ips'][0]['address'], '10.0.0.1')
+
+ def test_nw_info_get_subnets(self):
+ fake_port = {
+ 'fixed_ips': [
+ {'ip_address': '1.1.1.1'},
+ {'ip_address': '2.2.2.2'}],
+ 'id': 'port-id',
+ }
+ fake_subnet = model.Subnet(cidr='1.0.0.0/8')
+ fake_ips = [model.IP(x['ip_address']) for x in fake_port['fixed_ips']]
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_get_subnets_from_port')
+ api._get_subnets_from_port(self.context, fake_port).AndReturn(
+ [fake_subnet])
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ subnets = api._nw_info_get_subnets(self.context, fake_port, fake_ips)
+ self.assertEqual(len(subnets), 1)
+ self.assertEqual(len(subnets[0]['ips']), 1)
+ self.assertEqual(subnets[0]['ips'][0]['address'], '1.1.1.1')
+
+ def _test_nw_info_build_network(self, vif_type):
+ fake_port = {
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ 'network_id': 'net-id',
+ 'binding:vif_type': vif_type,
+ }
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
+ api = neutronapi.API()
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ net, iid = api._nw_info_build_network(fake_port, fake_nets,
+ fake_subnets)
+ self.assertEqual(net['subnets'], fake_subnets)
+ self.assertEqual(net['id'], 'net-id')
+ self.assertEqual(net['label'], 'foo')
+ self.assertEqual(net.get_meta('tenant_id'), 'tenant')
+ self.assertEqual(net.get_meta('injected'), CONF.flat_injected)
+ return net, iid
+
+ def test_nw_info_build_network_ovs(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_OVS)
+ self.assertEqual(net['bridge'], CONF.neutron.ovs_bridge)
+ self.assertNotIn('should_create_bridge', net)
+ self.assertEqual(iid, 'port-id')
+
+ def test_nw_info_build_network_dvs(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
+ self.assertEqual('foo-net-id', net['bridge'])
+ self.assertNotIn('should_create_bridge', net)
+ self.assertNotIn('ovs_interfaceid', net)
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_network_bridge(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
+ self.assertEqual(net['bridge'], 'brqnet-id')
+ self.assertTrue(net['should_create_bridge'])
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_network_other(self):
+ net, iid = self._test_nw_info_build_network(None)
+ self.assertIsNone(net['bridge'])
+ self.assertNotIn('should_create_bridge', net)
+ self.assertIsNone(iid)
+
+ def test_nw_info_build_no_match(self):
+ fake_port = {
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ 'network_id': 'net-id1',
+ 'tenant_id': 'tenant',
+ 'binding:vif_type': model.VIF_TYPE_OVS,
+ }
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
+ api = neutronapi.API()
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ net, iid = api._nw_info_build_network(fake_port, fake_nets,
+ fake_subnets)
+ self.assertEqual(fake_subnets, net['subnets'])
+ self.assertEqual('net-id1', net['id'])
+ self.assertEqual('net-id1', net['id'])
+ self.assertEqual('tenant', net['meta']['tenant_id'])
+
+ def test_build_network_info_model(self):
+ api = neutronapi.API()
+ fake_inst = {'project_id': 'fake', 'uuid': 'uuid',
+ 'info_cache': {'network_info': []}}
+ fake_ports = [
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port1',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:01',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=False and status='DOWN' thus vif.active=True
+ {'id': 'port2',
+ 'network_id': 'net-id',
+ 'admin_state_up': False,
+ 'status': 'DOWN',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:02',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=True and status='DOWN' thus vif.active=False
+ {'id': 'port0',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'DOWN',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:03',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ 'binding:vif_details': {},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port3',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:04',
+ 'binding:vif_type': model.VIF_TYPE_HW_VEB,
+ 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
+ 'binding:profile': {'pci_vendor_info': '1137:0047',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'},
+ 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ {'id': 'port4',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:05',
+ 'binding:vif_type': model.VIF_TYPE_802_QBH,
+ 'binding:vnic_type': model.VNIC_TYPE_MACVTAP,
+ 'binding:profile': {'pci_vendor_info': '1137:0047',
+ 'pci_slot': '0000:0a:00.2',
+ 'physical_network': 'phynet1'},
+ 'binding:vif_details': {model.VIF_DETAILS_PROFILEID: 'pfid'},
+ },
+ # admin_state_up=True and status='ACTIVE' thus vif.active=True
+ # This port has no binding:vnic_type to verify default is assumed
+ {'id': 'port5',
+ 'network_id': 'net-id',
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'mac_address': 'de:ad:be:ef:00:06',
+ 'binding:vif_type': model.VIF_TYPE_BRIDGE,
+ # No binding:vnic_type
+ 'binding:vif_details': {},
+ },
+ # This does not match the networks we provide below,
+ # so it should be ignored (and is here to verify that)
+ {'id': 'port6',
+ 'network_id': 'other-net-id',
+ 'admin_state_up': True,
+ 'status': 'DOWN',
+ 'binding:vnic_type': model.VNIC_TYPE_NORMAL,
+ },
+ ]
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [
+ {'id': 'net-id',
+ 'name': 'foo',
+ 'tenant_id': 'fake',
+ }
+ ]
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).MultipleTimes(
+ ).AndReturn(self.moxed_client)
+ self.moxed_client.list_ports(
+ tenant_id='fake', device_id='uuid').AndReturn(
+ {'ports': fake_ports})
+
+ self.mox.StubOutWithMock(api, '_get_floating_ips_by_fixed_and_port')
+ self.mox.StubOutWithMock(api, '_get_subnets_from_port')
+ requested_ports = [fake_ports[2], fake_ports[0], fake_ports[1],
+ fake_ports[3], fake_ports[4], fake_ports[5]]
+ for requested_port in requested_ports:
+ api._get_floating_ips_by_fixed_and_port(
+ self.moxed_client, '1.1.1.1', requested_port['id']).AndReturn(
+ [{'floating_ip_address': '10.0.0.1'}])
+ for requested_port in requested_ports:
+ api._get_subnets_from_port(self.context, requested_port
+ ).AndReturn(fake_subnets)
+
+ self.mox.ReplayAll()
+ neutronv2.get_client('fake')
+ nw_infos = api._build_network_info_model(self.context, fake_inst,
+ fake_nets,
+ [fake_ports[2]['id'],
+ fake_ports[0]['id'],
+ fake_ports[1]['id'],
+ fake_ports[3]['id'],
+ fake_ports[4]['id'],
+ fake_ports[5]['id']])
+ self.assertEqual(len(nw_infos), 6)
+ index = 0
+ for nw_info in nw_infos:
+ self.assertEqual(nw_info['address'],
+ requested_ports[index]['mac_address'])
+ self.assertEqual(nw_info['devname'], 'tapport' + str(index))
+ self.assertIsNone(nw_info['ovs_interfaceid'])
+ self.assertEqual(nw_info['type'],
+ requested_ports[index]['binding:vif_type'])
+ if nw_info['type'] == model.VIF_TYPE_BRIDGE:
+ self.assertEqual(nw_info['network']['bridge'], 'brqnet-id')
+ self.assertEqual(nw_info['vnic_type'],
+ requested_ports[index].get('binding:vnic_type',
+ model.VNIC_TYPE_NORMAL))
+ self.assertEqual(nw_info.get('details'),
+ requested_ports[index].get('binding:vif_details'))
+ self.assertEqual(nw_info.get('profile'),
+ requested_ports[index].get('binding:profile'))
+ index += 1
+
+ self.assertEqual(nw_infos[0]['active'], False)
+ self.assertEqual(nw_infos[1]['active'], True)
+ self.assertEqual(nw_infos[2]['active'], True)
+ self.assertEqual(nw_infos[3]['active'], True)
+ self.assertEqual(nw_infos[4]['active'], True)
+ self.assertEqual(nw_infos[5]['active'], True)
+
+ self.assertEqual(nw_infos[0]['id'], 'port0')
+ self.assertEqual(nw_infos[1]['id'], 'port1')
+ self.assertEqual(nw_infos[2]['id'], 'port2')
+ self.assertEqual(nw_infos[3]['id'], 'port3')
+ self.assertEqual(nw_infos[4]['id'], 'port4')
+ self.assertEqual(nw_infos[5]['id'], 'port5')
+
+ def test_get_subnets_from_port(self):
+ api = neutronapi.API()
+
+ port_data = copy.copy(self.port_data1[0])
+ subnet_data1 = copy.copy(self.subnet_data1)
+ subnet_data1[0]['host_routes'] = [
+ {'destination': '192.168.0.0/24', 'nexthop': '1.0.0.10'}
+ ]
+
+ self.moxed_client.list_subnets(
+ id=[port_data['fixed_ips'][0]['subnet_id']]
+ ).AndReturn({'subnets': subnet_data1})
+ self.moxed_client.list_ports(
+ network_id=subnet_data1[0]['network_id'],
+ device_owner='network:dhcp').AndReturn({'ports': []})
+ self.mox.ReplayAll()
+
+ subnets = api._get_subnets_from_port(self.context, port_data)
+
+ self.assertEqual(len(subnets), 1)
+ self.assertEqual(len(subnets[0]['routes']), 1)
+ self.assertEqual(subnets[0]['routes'][0]['cidr'],
+ subnet_data1[0]['host_routes'][0]['destination'])
+ self.assertEqual(subnets[0]['routes'][0]['gateway']['address'],
+ subnet_data1[0]['host_routes'][0]['nexthop'])
+
+ def test_get_all_empty_list_networks(self):
+ api = neutronapi.API()
+ self.moxed_client.list_networks().AndReturn({'networks': []})
+ self.mox.ReplayAll()
+ networks = api.get_all(self.context)
+ self.assertEqual(networks, [])
+
+ def test_get_floating_ips_by_fixed_address(self):
+ # NOTE(lbragstad): We need to reset the mocks in order to assert
+ # a NotImplementedError is raised when calling the method under test.
+ self.mox.ResetAll()
+ fake_fixed = '192.168.1.4'
+ api = neutronapi.API()
+ self.assertRaises(NotImplementedError,
+ api.get_floating_ips_by_fixed_address,
+ self.context, fake_fixed)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_1(self, mock_get_client):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ test_port = {
+ 'port': {'id': 'my_port_id1',
+ 'network_id': 'net-id',
+ 'binding:vnic_type': model.VNIC_TYPE_DIRECT,
+ },
+ }
+ test_net = {'network': {'provider:physical_network': 'phynet1'}}
+
+ mock_client = mock_get_client()
+ mock_client.show_port.return_value = test_port
+ mock_client.show_network.return_value = test_net
+ vnic_type, phynet_name = api._get_port_vnic_info(
+ self.context, mock_client, test_port['port']['id'])
+
+ mock_client.show_port.assert_called_once_with(test_port['port']['id'],
+ fields=['binding:vnic_type', 'network_id'])
+ mock_client.show_network.assert_called_once_with(
+ test_port['port']['network_id'],
+ fields='provider:physical_network')
+ self.assertEqual(model.VNIC_TYPE_DIRECT, vnic_type)
+ self.assertEqual(phynet_name, 'phynet1')
+
+ def _test_get_port_vnic_info(self, mock_get_client,
+ binding_vnic_type=None):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ test_port = {
+ 'port': {'id': 'my_port_id2',
+ 'network_id': 'net-id',
+ },
+ }
+
+ if binding_vnic_type:
+ test_port['port']['binding:vnic_type'] = binding_vnic_type
+
+ mock_client = mock_get_client()
+ mock_client.show_port.return_value = test_port
+ vnic_type, phynet_name = api._get_port_vnic_info(
+ self.context, mock_client, test_port['port']['id'])
+
+ mock_client.show_port.assert_called_once_with(test_port['port']['id'],
+ fields=['binding:vnic_type', 'network_id'])
+ self.assertEqual(model.VNIC_TYPE_NORMAL, vnic_type)
+ self.assertFalse(phynet_name)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_2(self, mock_get_client):
+ self._test_get_port_vnic_info(mock_get_client,
+ binding_vnic_type=model.VNIC_TYPE_NORMAL)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_get_port_vnic_info_3(self, mock_get_client):
+ self._test_get_port_vnic_info(mock_get_client)
+
+ @mock.patch.object(neutronapi.API, "_get_port_vnic_info")
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def test_create_pci_requests_for_sriov_ports(self, mock_get_client,
+ mock_get_port_vnic_info):
+ api = neutronapi.API()
+ self.mox.ResetAll()
+ requested_networks = objects.NetworkRequestList(
+ objects = [
+ objects.NetworkRequest(port_id='my_portid1'),
+ objects.NetworkRequest(network_id='net1'),
+ objects.NetworkRequest(port_id='my_portid2'),
+ objects.NetworkRequest(port_id='my_portid3'),
+ objects.NetworkRequest(port_id='my_portid4')])
+ pci_requests = objects.InstancePCIRequests(requests=[])
+ mock_get_port_vnic_info.side_effect = [
+ (model.VNIC_TYPE_DIRECT, 'phynet1'),
+ (model.VNIC_TYPE_NORMAL, ''),
+ (model.VNIC_TYPE_MACVTAP, 'phynet1'),
+ (model.VNIC_TYPE_MACVTAP, 'phynet2')
+ ]
+ api.create_pci_requests_for_sriov_ports(
+ None, pci_requests, requested_networks)
+ self.assertEqual(3, len(pci_requests.requests))
+ has_pci_request_id = [net.pci_request_id is not None for net in
+ requested_networks.objects]
+ expected_results = [True, False, False, True, True]
+ self.assertEqual(expected_results, has_pci_request_id)
+
+
+class TestNeutronv2WithMock(test.TestCase):
+ """Used to test Neutron V2 API with mock."""
+
+ def setUp(self):
+ super(TestNeutronv2WithMock, self).setUp()
+ self.api = neutronapi.API()
+ self.context = context.RequestContext(
+ 'fake-user', 'fake-project',
+ auth_token='bff4a5a6b9eb4ea2a6efec6eefb77936')
+
+ @mock.patch('oslo.concurrency.lockutils.lock')
+ def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
+ instance = objects.Instance(uuid=uuid.uuid4())
+ api = neutronapi.API()
+ mock_lock.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ api.get_instance_nw_info, 'context', instance)
+ mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
+
+ def _test_validate_networks_fixed_ip_no_dup(self, nets, requested_networks,
+ ids, list_port_values):
+
+ def _fake_list_ports(**search_opts):
+ for args, return_value in list_port_values:
+ if args == search_opts:
+ return return_value
+ self.fail('Unexpected call to list_ports %s' % search_opts)
+
+ with contextlib.nested(
+ mock.patch.object(client.Client, 'list_ports',
+ side_effect=_fake_list_ports),
+ mock.patch.object(client.Client, 'list_networks',
+ return_value={'networks': nets}),
+ mock.patch.object(client.Client, 'show_quota',
+ return_value={'quota': {'port': 50}})) as (
+ list_ports_mock, list_networks_mock, show_quota_mock):
+
+ self.api.validate_networks(self.context, requested_networks, 1)
+
+ self.assertEqual(len(list_port_values),
+ len(list_ports_mock.call_args_list))
+ list_networks_mock.assert_called_once_with(id=ids)
+ show_quota_mock.assert_called_once_with(tenant_id='fake-project')
+
+ def test_validate_networks_fixed_ip_no_dup1(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is not already in use because no fixed ips in use
+
+ nets1 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'fake-project'}]
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None)]
+ ids = ['my_netid1']
+ list_port_values = [({'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'},
+ {'ports': []}),
+ ({'tenant_id': 'fake-project'},
+ {'ports': []})]
+ self._test_validate_networks_fixed_ip_no_dup(nets1, requested_networks,
+ ids, list_port_values)
+
+ def test_validate_networks_fixed_ip_no_dup2(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is not already in use because not used on this net id
+
+ nets2 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'fake-project'},
+ {'id': 'my_netid2',
+ 'name': 'my_netname2',
+ 'subnets': ['mysubnid2'],
+ 'tenant_id': 'fake-project'}]
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None),
+ ('my_netid2', '10.0.1.3', None, None)]
+ ids = ['my_netid1', 'my_netid2']
+ list_port_values = [({'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'},
+ {'ports': []}),
+ ({'network_id': 'my_netid2',
+ 'fixed_ips': 'ip_address=10.0.1.3',
+ 'fields': 'device_id'},
+ {'ports': []}),
+
+ ({'tenant_id': 'fake-project'},
+ {'ports': []})]
+
+ self._test_validate_networks_fixed_ip_no_dup(nets2, requested_networks,
+ ids, list_port_values)
+
+ def test_validate_networks_fixed_ip_dup(self):
+ # Test validation for a request for a network with a
+ # fixed ip that is already in use
+
+ requested_networks = [('my_netid1', '10.0.1.2', None, None)]
+ list_port_mock_params = {'network_id': 'my_netid1',
+ 'fixed_ips': 'ip_address=10.0.1.2',
+ 'fields': 'device_id'}
+ list_port_mock_return = {'ports': [({'device_id': 'my_deviceid'})]}
+
+ with mock.patch.object(client.Client, 'list_ports',
+ return_value=list_port_mock_return) as (
+ list_ports_mock):
+
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ self.api.validate_networks,
+ self.context, requested_networks, 1)
+
+ list_ports_mock.assert_called_once_with(**list_port_mock_params)
+
+ def test_allocate_floating_ip_exceed_limit(self):
+ # Verify that the correct exception is thrown when quota exceed
+ pool_name = 'dummy'
+ api = neutronapi.API()
+ with contextlib.nested(
+ mock.patch.object(client.Client, 'create_floatingip'),
+ mock.patch.object(api,
+ '_get_floating_ip_pool_id_by_name_or_id')) as (
+ create_mock, get_mock):
+ create_mock.side_effect = exceptions.OverQuotaClient()
+
+ self.assertRaises(exception.FloatingIpLimitExceeded,
+ api.allocate_floating_ip,
+ self.context, pool_name)
+
+ def test_create_port_for_instance_no_more_ip(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+
+ with mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.IpAddressGenerationFailureClient()) as (
+ create_port_mock):
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone}}
+ self.assertRaises(exception.NoMoreFixedIps,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body)
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ @mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.MacAddressInUseClient())
+ def test_create_port_for_instance_mac_address_in_use(self,
+ create_port_mock):
+ # Create fake data.
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
+ available_macs = set(['XX:XX:XX:XX:XX:XX'])
+ # Run the code.
+ self.assertRaises(exception.PortInUse,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body,
+ available_macs=available_macs)
+ # Assert the calls.
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ @mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.IpAddressInUseClient())
+ def test_create_port_for_fixed_ip_in_use(self, create_port_mock):
+ # Create fake data.
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
+ fake_ip = '1.1.1.1'
+ # Run the code.
+ self.assertRaises(exception.FixedIpAlreadyInUse,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body,
+ fixed_ip=fake_ip)
+ # Assert the calls.
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ def test_get_network_detail_not_found(self):
+ api = neutronapi.API()
+ expected_exc = exceptions.NetworkNotFoundClient()
+ network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
+ with mock.patch.object(client.Client, 'show_network',
+ side_effect=expected_exc) as (
+ fake_show_network):
+ self.assertRaises(exception.NetworkNotFound,
+ api.get,
+ self.context,
+ network_uuid)
+ fake_show_network.assert_called_once_with(network_uuid)
+
+ def test_deallocate_for_instance_uses_delete_helper(self):
+ # setup fake data
+ instance = fake_instance.fake_instance_obj(self.context)
+ port_data = {'ports': [{'id': str(uuid.uuid4())}]}
+ ports = set([port['id'] for port in port_data.get('ports')])
+ api = neutronapi.API()
+ # setup mocks
+ mock_client = mock.Mock()
+ mock_client.list_ports.return_value = port_data
+ with contextlib.nested(
+ mock.patch.object(neutronv2, 'get_client',
+ return_value=mock_client),
+ mock.patch.object(api, '_delete_ports')
+ ) as (
+ mock_get_client, mock_delete
+ ):
+ # run the code
+ api.deallocate_for_instance(self.context, instance)
+ # assert the calls
+ mock_client.list_ports.assert_called_once_with(
+ device_id=instance.uuid)
+ mock_delete.assert_called_once_with(
+ mock_client, instance, ports, raise_if_fail=True)
+
+ def _test_delete_ports(self, expect_raise):
+ results = [exceptions.NeutronClientException, None]
+ mock_client = mock.Mock()
+ with mock.patch.object(mock_client, 'delete_port',
+ side_effect=results):
+ api = neutronapi.API()
+ api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1', 'port2'],
+ raise_if_fail=expect_raise)
+
+ def test_delete_ports_raise(self):
+ self.assertRaises(exceptions.NeutronClientException,
+ self._test_delete_ports, True)
+
+ def test_delete_ports_no_raise(self):
+ self._test_delete_ports(False)
+
+ def test_delete_ports_never_raise_404(self):
+ mock_client = mock.Mock()
+ mock_client.delete_port.side_effect = exceptions.PortNotFoundClient
+ api = neutronapi.API()
+ api._delete_ports(mock_client, {'uuid': 'foo'}, ['port1'],
+ raise_if_fail=True)
+ mock_client.delete_port.assert_called_once_with('port1')
+
+ def test_deallocate_port_for_instance_fails(self):
+ mock_client = mock.Mock()
+ api = neutronapi.API()
+ with contextlib.nested(
+ mock.patch.object(neutronv2, 'get_client',
+ return_value=mock_client),
+ mock.patch.object(api, '_delete_ports',
+ side_effect=exceptions.Unauthorized),
+ mock.patch.object(api, 'get_instance_nw_info')
+ ) as (
+ get_client, delete_ports, get_nw_info
+ ):
+ self.assertRaises(exceptions.Unauthorized,
+ api.deallocate_port_for_instance,
+ self.context, instance={'uuid': 'fake'},
+ port_id='fake')
+ # make sure that we didn't try to reload nw info
+ self.assertFalse(get_nw_info.called)
+
+ @mock.patch.object(neutronv2, 'get_client', return_value=mock.Mock())
+ def _test_show_port_exceptions(self, client_exc, expected_nova_exc,
+ get_client_mock):
+ show_port_mock = mock.Mock(side_effect=client_exc)
+ get_client_mock.return_value.show_port = show_port_mock
+ self.assertRaises(expected_nova_exc, self.api.show_port,
+ self.context, 'fake_port_id')
+
+ def test_show_port_not_found(self):
+ self._test_show_port_exceptions(exceptions.PortNotFoundClient,
+ exception.PortNotFound)
+
+ def test_show_port_forbidden(self):
+ self._test_show_port_exceptions(exceptions.Unauthorized,
+ exception.Forbidden)
+
+
+class TestNeutronv2ModuleMethods(test.TestCase):
+
+ def test_gather_port_ids_and_networks_wrong_params(self):
+ api = neutronapi.API()
+
+ # Test with networks not None and port_ids is None
+ self.assertRaises(exception.NovaException,
+ api._gather_port_ids_and_networks,
+ 'fake_context', 'fake_instance',
+ [{'network': {'name': 'foo'}}], None)
+
+ # Test with networks is None and port_ids not None
+ self.assertRaises(exception.NovaException,
+ api._gather_port_ids_and_networks,
+ 'fake_context', 'fake_instance',
+ None, ['list', 'of', 'port_ids'])
+
+ def test_ensure_requested_network_ordering_no_preference_ids(self):
+ l = [1, 2, 3]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x,
+ l,
+ None)
+
+ def test_ensure_requested_network_ordering_no_preference_hashes(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ None)
+
+ self.assertEqual(l, [{'id': 3}, {'id': 1}, {'id': 2}])
+
+ def test_ensure_requested_network_ordering_with_preference(self):
+ l = [{'id': 3}, {'id': 1}, {'id': 2}]
+
+ neutronapi._ensure_requested_network_ordering(
+ lambda x: x['id'],
+ l,
+ [1, 2, 3])
+
+ self.assertEqual(l, [{'id': 1}, {'id': 2}, {'id': 3}])
+
+
+class TestNeutronv2Portbinding(TestNeutronv2Base):
+
+ def test_allocate_for_instance_portbinding(self):
+ self._allocate_for_instance(1, portbinding=True)
+
+ def test_populate_neutron_extension_values_binding(self):
+ api = neutronapi.API()
+ neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ self.moxed_client)
+ self.moxed_client.list_extensions().AndReturn(
+ {'extensions': [{'name': constants.PORTBINDING_EXT}]})
+ self.mox.ReplayAll()
+ host_id = 'my_host_id'
+ instance = {'host': host_id}
+ port_req_body = {'port': {}}
+ api._populate_neutron_extension_values(self.context, instance,
+ None, port_req_body)
+ self.assertEqual(port_req_body['port']['binding:host_id'], host_id)
+ self.assertFalse(port_req_body['port'].get('binding:profile'))
+
+ @mock.patch.object(pci_whitelist, 'get_pci_device_devspec')
+ @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ def test_populate_neutron_extension_values_binding_sriov(self,
+ mock_get_instance_pci_devs,
+ mock_get_pci_device_devspec):
+ api = neutronapi.API()
+ host_id = 'my_host_id'
+ instance = {'host': host_id}
+ port_req_body = {'port': {}}
+ pci_req_id = 'my_req_id'
+ pci_dev = {'vendor_id': '1377',
+ 'product_id': '0047',
+ 'address': '0000:0a:00.1',
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address'])
+ mydev = PciDevice(**pci_dev)
+ profile = {'pci_vendor_info': '1377:0047',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1',
+ }
+
+ mock_get_instance_pci_devs.return_value = [mydev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'phynet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+ api._populate_neutron_binding_profile(instance,
+ pci_req_id, port_req_body)
+
+ self.assertEqual(port_req_body['port']['binding:profile'], profile)
+
+ def test_migrate_instance_finish_binding_false(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(False)
+ self.mox.ReplayAll()
+ api.migrate_instance_finish(self.context, None, None)
+
+ def test_migrate_instance_finish_binding_true(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(True)
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ search_opts = {'device_id': self.instance['uuid'],
+ 'tenant_id': self.instance['project_id']}
+ ports = {'ports': [{'id': 'test1'}]}
+ self.moxed_client.list_ports(**search_opts).AndReturn(ports)
+ migration = {'source_compute': self.instance.get('host'),
+ 'dest_compute': 'dest_host', }
+ port_req_body = {'port':
+ {'binding:host_id': migration['dest_compute']}}
+ self.moxed_client.update_port('test1',
+ port_req_body).AndReturn(None)
+ self.mox.ReplayAll()
+ api.migrate_instance_finish(self.context, self.instance, migration)
+
+ def test_migrate_instance_finish_binding_true_exception(self):
+ api = neutronapi.API()
+ self.mox.StubOutWithMock(api, '_has_port_binding_extension')
+ api._has_port_binding_extension(mox.IgnoreArg(),
+ refresh_cache=True).AndReturn(True)
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
+ self.moxed_client)
+ search_opts = {'device_id': self.instance['uuid'],
+ 'tenant_id': self.instance['project_id']}
+ ports = {'ports': [{'id': 'test1'}]}
+ self.moxed_client.list_ports(**search_opts).AndReturn(ports)
+ migration = {'source_compute': self.instance.get('host'),
+ 'dest_compute': 'dest_host', }
+ port_req_body = {'port':
+ {'binding:host_id': migration['dest_compute']}}
+ self.moxed_client.update_port('test1',
+ port_req_body).AndRaise(
+ Exception("fail to update port"))
+ self.mox.ReplayAll()
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ api.migrate_instance_finish,
+ self.context, self.instance, migration)
+
+ def test_associate_not_implemented(self):
+ api = neutronapi.API()
+ self.assertRaises(NotImplementedError,
+ api.associate,
+ self.context, 'id')
+
+
+class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
+ def setUp(self):
+ super(TestNeutronv2ExtraDhcpOpts, self).setUp()
+ neutronv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ def test_allocate_for_instance_1_with_extra_dhcp_opts_turned_off(self):
+ self._allocate_for_instance(1, extra_dhcp_opts=False)
+
+ def test_allocate_for_instance_extradhcpopts(self):
+ dhcp_opts = [{'opt_name': 'bootfile-name',
+ 'opt_value': 'pxelinux.0'},
+ {'opt_name': 'tftp-server',
+ 'opt_value': '123.123.123.123'},
+ {'opt_name': 'server-ip-address',
+ 'opt_value': '123.123.123.456'}]
+
+ self._allocate_for_instance(1, dhcp_options=dhcp_opts)
+
+
+class TestNeutronClientForAdminScenarios(test.TestCase):
+
+ def _test_get_client_for_admin(self, use_id=False, admin_context=False):
+
+ def client_mock(*args, **kwargs):
+ client.Client.httpclient = mock.MagicMock()
+
+ self.flags(auth_strategy=None, group='neutron')
+ self.flags(url='http://anyhost/', group='neutron')
+ self.flags(url_timeout=30, group='neutron')
+ if use_id:
+ self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
+ self.flags(admin_user_id='admin_user_id', group='neutron')
+
+ if admin_context:
+ my_context = context.get_admin_context()
+ else:
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ kwargs = {
+ 'auth_url': CONF.neutron.admin_auth_url,
+ 'password': CONF.neutron.admin_password,
+ 'endpoint_url': CONF.neutron.url,
+ 'auth_strategy': None,
+ 'timeout': CONF.neutron.url_timeout,
+ 'insecure': False,
+ 'ca_cert': None,
+ 'token': None}
+ if use_id:
+ kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
+ kwargs['user_id'] = CONF.neutron.admin_user_id
+ else:
+ kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
+ kwargs['username'] = CONF.neutron.admin_username
+ client.Client.__init__(**kwargs).WithSideEffects(client_mock)
+ self.mox.ReplayAll()
+
+ # clean global
+ token_store = neutronv2.AdminTokenStore.get()
+ token_store.admin_auth_token = None
+ if admin_context:
+ # Note that the context does not contain a token but is
+ # an admin context which will force an elevation to admin
+ # credentials.
+ neutronv2.get_client(my_context)
+ else:
+ # Note that the context is not elevated, but the True is passed in
+ # which will force an elevation to admin credentials even though
+ # the context has an auth_token.
+ neutronv2.get_client(my_context, True)
+
+ def test_get_client_for_admin(self):
+ self._test_get_client_for_admin()
+
+ def test_get_client_for_admin_with_id(self):
+ self._test_get_client_for_admin(use_id=True)
+
+ def test_get_client_for_admin_context(self):
+ self._test_get_client_for_admin(admin_context=True)
+
+ def test_get_client_for_admin_context_with_id(self):
+ self._test_get_client_for_admin(use_id=True, admin_context=True)
diff --git a/nova/tests/unit/network/test_rpcapi.py b/nova/tests/unit/network/test_rpcapi.py
new file mode 100644
index 0000000000..f24fdd02d2
--- /dev/null
+++ b/nova/tests/unit/network/test_rpcapi.py
@@ -0,0 +1,353 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for nova.network.rpcapi
+"""
+
+import collections
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.network import rpcapi as network_rpcapi
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+
+
+class NetworkRpcAPITestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NetworkRpcAPITestCase, self).setUp()
+ self.flags(multi_host=True)
+
+ # Used to specify the default value expected if no real value is passed
+ DefaultArg = collections.namedtuple('DefaultArg', ['value'])
+
+ def _test_network_api(self, method, rpc_method, **kwargs):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+
+ rpcapi = network_rpcapi.NetworkAPI()
+ self.assertIsNotNone(rpcapi.client)
+ self.assertEqual(rpcapi.client.target.topic, CONF.network_topic)
+
+ expected_retval = 'foo' if rpc_method == 'call' else None
+ expected_version = kwargs.pop('version', None)
+ expected_fanout = kwargs.pop('fanout', None)
+ expected_kwargs = kwargs.copy()
+
+ for k, v in expected_kwargs.items():
+ if isinstance(v, self.DefaultArg):
+ expected_kwargs[k] = v.value
+ kwargs.pop(k)
+
+ prepare_kwargs = {}
+ if expected_version:
+ prepare_kwargs['version'] = expected_version
+ if expected_fanout:
+ prepare_kwargs['fanout'] = True
+
+ if 'source_compute' in expected_kwargs:
+ # Fix up for migrate_instance_* calls.
+ expected_kwargs['source'] = expected_kwargs.pop('source_compute')
+ expected_kwargs['dest'] = expected_kwargs.pop('dest_compute')
+
+ targeted_methods = [
+ 'lease_fixed_ip', 'release_fixed_ip', 'rpc_setup_network_on_host',
+ '_rpc_allocate_fixed_ip', 'deallocate_fixed_ip', 'update_dns',
+ '_associate_floating_ip', '_disassociate_floating_ip',
+ 'lease_fixed_ip', 'release_fixed_ip', 'migrate_instance_start',
+ 'migrate_instance_finish',
+ 'allocate_for_instance', 'deallocate_for_instance',
+ ]
+ targeted_by_instance = ['deallocate_for_instance']
+ if method in targeted_methods and ('host' in expected_kwargs or
+ 'instance' in expected_kwargs):
+ if method in targeted_by_instance:
+ host = expected_kwargs['instance']['host']
+ else:
+ host = expected_kwargs['host']
+ if method not in ['allocate_for_instance',
+ 'deallocate_fixed_ip']:
+ expected_kwargs.pop('host')
+ if CONF.multi_host:
+ prepare_kwargs['server'] = host
+
+ self.mox.StubOutWithMock(rpcapi, 'client')
+
+ version_check = [
+ 'deallocate_for_instance', 'deallocate_fixed_ip',
+ 'allocate_for_instance',
+ ]
+ if method in version_check:
+ rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
+
+ if prepare_kwargs:
+ rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
+
+ rpc_method = getattr(rpcapi.client, rpc_method)
+ rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
+
+ self.mox.ReplayAll()
+
+ retval = getattr(rpcapi, method)(ctxt, **kwargs)
+ self.assertEqual(retval, expected_retval)
+
+ def test_create_networks(self):
+ self._test_network_api('create_networks', rpc_method='call',
+ arg1='arg', arg2='arg')
+
+ def test_delete_network(self):
+ self._test_network_api('delete_network', rpc_method='call',
+ uuid='fake_uuid', fixed_range='range')
+
+ def test_disassociate_network(self):
+ self._test_network_api('disassociate_network', rpc_method='call',
+ network_uuid='fake_uuid')
+
+ def test_associate_host_and_project(self):
+ self._test_network_api('associate', rpc_method='call',
+ network_uuid='fake_uuid',
+ associations={'host': "testHost",
+ 'project': 'testProject'},
+ version="1.5")
+
+ def test_get_fixed_ip(self):
+ self._test_network_api('get_fixed_ip', rpc_method='call', id='id')
+
+ def test_get_fixed_ip_by_address(self):
+ self._test_network_api('get_fixed_ip_by_address', rpc_method='call',
+ address='a.b.c.d')
+
+ def test_get_floating_ip(self):
+ self._test_network_api('get_floating_ip', rpc_method='call', id='id')
+
+ def test_get_floating_ip_pools(self):
+ self._test_network_api('get_floating_ip_pools', rpc_method='call',
+ version="1.7")
+
+ def test_get_floating_ip_by_address(self):
+ self._test_network_api('get_floating_ip_by_address', rpc_method='call',
+ address='a.b.c.d')
+
+ def test_get_floating_ips_by_project(self):
+ self._test_network_api('get_floating_ips_by_project',
+ rpc_method='call')
+
+ def test_get_floating_ips_by_fixed_address(self):
+ self._test_network_api('get_floating_ips_by_fixed_address',
+ rpc_method='call', fixed_address='w.x.y.z')
+
+ def test_get_instance_id_by_floating_address(self):
+ self._test_network_api('get_instance_id_by_floating_address',
+ rpc_method='call', address='w.x.y.z')
+
+ def test_allocate_floating_ip(self):
+ self._test_network_api('allocate_floating_ip', rpc_method='call',
+ project_id='fake_id', pool='fake_pool', auto_assigned=False)
+
+ def test_deallocate_floating_ip(self):
+ self._test_network_api('deallocate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_allocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('allocate_floating_ip', rpc_method='call',
+ project_id='fake_id', pool='fake_pool', auto_assigned=False)
+
+ def test_deallocate_floating_ip_no_multi(self):
+ self.flags(multi_host=False)
+ self._test_network_api('deallocate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_associate_floating_ip(self):
+ self._test_network_api('associate_floating_ip', rpc_method='call',
+ floating_address='blah', fixed_address='foo',
+ affect_auto_assigned=True)
+
+ def test_disassociate_floating_ip(self):
+ self._test_network_api('disassociate_floating_ip', rpc_method='call',
+ address='addr', affect_auto_assigned=True)
+
+ def test_allocate_for_instance(self):
+ self._test_network_api('allocate_for_instance', rpc_method='call',
+ instance_id='fake_id', project_id='fake_id', host='fake_host',
+ rxtx_factor='fake_factor', vpn=False, requested_networks={},
+ macs=[], version='1.13')
+
+ def test_deallocate_for_instance(self):
+ instance = fake_instance.fake_instance_obj(context.get_admin_context())
+ self._test_network_api('deallocate_for_instance', rpc_method='call',
+ requested_networks=self.DefaultArg(None), instance=instance,
+ version='1.11')
+
+ def test_deallocate_for_instance_with_expected_networks(self):
+ instance = fake_instance.fake_instance_obj(context.get_admin_context())
+ self._test_network_api('deallocate_for_instance', rpc_method='call',
+ instance=instance, requested_networks={}, version='1.11')
+
+ def test_add_fixed_ip_to_instance(self):
+ self._test_network_api('add_fixed_ip_to_instance', rpc_method='call',
+ instance_id='fake_id', rxtx_factor='fake_factor',
+ host='fake_host', network_id='fake_id', version='1.9')
+
+ def test_remove_fixed_ip_from_instance(self):
+ self._test_network_api('remove_fixed_ip_from_instance',
+ rpc_method='call', instance_id='fake_id',
+ rxtx_factor='fake_factor', host='fake_host',
+ address='fake_address', version='1.9')
+
+ def test_add_network_to_project(self):
+ self._test_network_api('add_network_to_project', rpc_method='call',
+ project_id='fake_id', network_uuid='fake_uuid')
+
+ def test_get_instance_nw_info(self):
+ self._test_network_api('get_instance_nw_info', rpc_method='call',
+ instance_id='fake_id', rxtx_factor='fake_factor',
+ host='fake_host', project_id='fake_id', version='1.9')
+
+ def test_validate_networks(self):
+ self._test_network_api('validate_networks', rpc_method='call',
+ networks={})
+
+ def test_get_instance_uuids_by_ip_filter(self):
+ self._test_network_api('get_instance_uuids_by_ip_filter',
+ rpc_method='call', filters={})
+
+ def test_get_dns_domains(self):
+ self._test_network_api('get_dns_domains', rpc_method='call')
+
+ def test_add_dns_entry(self):
+ self._test_network_api('add_dns_entry', rpc_method='call',
+ address='addr', name='name', dns_type='foo', domain='domain')
+
+ def test_modify_dns_entry(self):
+ self._test_network_api('modify_dns_entry', rpc_method='call',
+ address='addr', name='name', domain='domain')
+
+ def test_delete_dns_entry(self):
+ self._test_network_api('delete_dns_entry', rpc_method='call',
+ name='name', domain='domain')
+
+ def test_delete_dns_domain(self):
+ self._test_network_api('delete_dns_domain', rpc_method='call',
+ domain='fake_domain')
+
+ def test_get_dns_entries_by_address(self):
+ self._test_network_api('get_dns_entries_by_address', rpc_method='call',
+ address='fake_address', domain='fake_domain')
+
+ def test_get_dns_entries_by_name(self):
+ self._test_network_api('get_dns_entries_by_name', rpc_method='call',
+ name='fake_name', domain='fake_domain')
+
+ def test_create_private_dns_domain(self):
+ self._test_network_api('create_private_dns_domain', rpc_method='call',
+ domain='fake_domain', av_zone='fake_zone')
+
+ def test_create_public_dns_domain(self):
+ self._test_network_api('create_public_dns_domain', rpc_method='call',
+ domain='fake_domain', project='fake_project')
+
+ def test_setup_networks_on_host(self):
+ self._test_network_api('setup_networks_on_host', rpc_method='call',
+ instance_id='fake_id', host='fake_host', teardown=False)
+
+ def test_lease_fixed_ip(self):
+ self._test_network_api('lease_fixed_ip', rpc_method='cast',
+ host='fake_host', address='fake_addr')
+
+ def test_release_fixed_ip(self):
+ self._test_network_api('release_fixed_ip', rpc_method='cast',
+ host='fake_host', address='fake_addr')
+
+ def test_set_network_host(self):
+ self._test_network_api('set_network_host', rpc_method='call',
+ network_ref={})
+
+ def test_rpc_setup_network_on_host(self):
+ self._test_network_api('rpc_setup_network_on_host', rpc_method='call',
+ network_id='fake_id', teardown=False, host='fake_host')
+
+ def test_rpc_allocate_fixed_ip(self):
+ self._test_network_api('_rpc_allocate_fixed_ip', rpc_method='call',
+ instance_id='fake_id', network_id='fake_id', address='addr',
+ vpn=True, host='fake_host')
+
+ def test_deallocate_fixed_ip(self):
+ instance = fake_instance.fake_db_instance()
+ self._test_network_api('deallocate_fixed_ip', rpc_method='call',
+ address='fake_addr', host='fake_host', instance=instance,
+ version='1.12')
+
+ def test_update_dns(self):
+ self._test_network_api('update_dns', rpc_method='cast', fanout=True,
+ network_ids='fake_id', version='1.3')
+
+ def test__associate_floating_ip(self):
+ self._test_network_api('_associate_floating_ip', rpc_method='call',
+ floating_address='fake_addr', fixed_address='fixed_address',
+ interface='fake_interface', host='fake_host',
+ instance_uuid='fake_uuid', version='1.6')
+
+ def test__disassociate_floating_ip(self):
+ self._test_network_api('_disassociate_floating_ip', rpc_method='call',
+ address='fake_addr', interface='fake_interface',
+ host='fake_host', instance_uuid='fake_uuid', version='1.6')
+
+ def test_migrate_instance_start(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host=self.DefaultArg(None),
+ version='1.2')
+
+ def test_migrate_instance_start_multi_host(self):
+ self._test_network_api('migrate_instance_start', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
+
+ def test_migrate_instance_finish(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host=self.DefaultArg(None),
+ version='1.2')
+
+ def test_migrate_instance_finish_multi_host(self):
+ self._test_network_api('migrate_instance_finish', rpc_method='call',
+ instance_uuid='fake_instance_uuid',
+ rxtx_factor='fake_factor',
+ project_id='fake_project',
+ source_compute='fake_src_compute',
+ dest_compute='fake_dest_compute',
+ floating_addresses='fake_floating_addresses',
+ host='fake_host',
+ version='1.2')
diff --git a/nova/tests/objects/__init__.py b/nova/tests/unit/objects/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/objects/__init__.py
+++ b/nova/tests/unit/objects/__init__.py
diff --git a/nova/tests/unit/objects/test_agent.py b/nova/tests/unit/objects/test_agent.py
new file mode 100644
index 0000000000..86be0cd361
--- /dev/null
+++ b/nova/tests/unit/objects/test_agent.py
@@ -0,0 +1,103 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.objects import agent as agent_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_agent = {
+ 'id': 1,
+ 'hypervisor': 'novavm',
+ 'os': 'linux',
+ 'architecture': 'DISC',
+ 'version': '1.0',
+ 'url': 'http://openstack.org/novavm/agents/novavm_agent_v1.0.rpm',
+ 'md5hash': '8cb151f3adc23a92db8ddbe084796823',
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+}
+
+
+class _TestAgent(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_get_by_triple(self, mock_get):
+ mock_get.return_value = fake_agent
+ agent = agent_obj.Agent.get_by_triple(self.context,
+ 'novavm', 'linux', 'DISC')
+ self._compare(self, fake_agent, agent)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_get_by_triple_none(self, mock_get):
+ mock_get.return_value = None
+ agent = agent_obj.Agent.get_by_triple(self.context,
+ 'novavm', 'linux', 'DISC')
+ self.assertIsNone(agent)
+
+ @mock.patch('nova.db.agent_build_create')
+ def test_create(self, mock_create):
+ mock_create.return_value = fake_agent
+ agent = agent_obj.Agent(context=self.context)
+ agent.hypervisor = 'novavm'
+ agent.create()
+ mock_create.assert_called_once_with(self.context,
+ {'hypervisor': 'novavm'})
+ self._compare(self, fake_agent, agent)
+
+ @mock.patch('nova.db.agent_build_create')
+ def test_create_with_id(self, mock_create):
+ agent = agent_obj.Agent(context=self.context, id=123)
+ self.assertRaises(exception.ObjectActionError, agent.create)
+ self.assertFalse(mock_create.called)
+
+ @mock.patch('nova.db.agent_build_destroy')
+ def test_destroy(self, mock_destroy):
+ agent = agent_obj.Agent(context=self.context, id=123)
+ agent.destroy()
+ mock_destroy.assert_called_once_with(self.context, 123)
+
+ @mock.patch('nova.db.agent_build_update')
+ def test_save(self, mock_update):
+ mock_update.return_value = fake_agent
+ agent = agent_obj.Agent(context=self.context, id=123)
+ agent.obj_reset_changes()
+ agent.hypervisor = 'novavm'
+ agent.save()
+ mock_update.assert_called_once_with(self.context, 123,
+ {'hypervisor': 'novavm'})
+
+ @mock.patch('nova.db.agent_build_get_all')
+ def test_get_all(self, mock_get_all):
+ mock_get_all.return_value = [fake_agent]
+ agents = agent_obj.AgentList.get_all(self.context, hypervisor='novavm')
+ self.assertEqual(1, len(agents))
+ self._compare(self, fake_agent, agents[0])
+ mock_get_all.assert_called_once_with(self.context, hypervisor='novavm')
+
+
+class TestAgent(test_objects._LocalTest, _TestAgent):
+ pass
+
+
+class TestAgentRemote(test_objects._RemoteTest, _TestAgent):
+ pass
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
new file mode 100644
index 0000000000..67ea514bc7
--- /dev/null
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -0,0 +1,199 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import aggregate
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_aggregate = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'name': 'fake-aggregate',
+ 'hosts': ['foo', 'bar'],
+ 'metadetails': {'this': 'that'},
+ }
+
+SUBS = {'metadata': 'metadetails'}
+
+
+class _TestAggregateObject(object):
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get')
+ db.aggregate_get(self.context, 123).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate.get_by_id(self.context, 123)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'aggregate_create')
+ db.aggregate_create(self.context, {'name': 'foo'},
+ metadata={'one': 'two'}).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'one': 'two'}
+ agg.create(self.context)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'aggregate_create')
+ db.aggregate_create(self.context, {'name': 'foo'},
+ metadata={'one': 'two'}).AndReturn(fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'one': 'two'}
+ agg.create(self.context)
+ self.assertRaises(exception.ObjectActionError, agg.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'aggregate_update')
+ db.aggregate_update(self.context, 123, {'name': 'baz'}).AndReturn(
+ fake_aggregate)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.name = 'baz'
+ agg.save(self.context)
+ self.compare_obj(agg, fake_aggregate, subs=SUBS)
+
+ def test_save_and_create_no_hosts(self):
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo', 'bar']
+ self.assertRaises(exception.ObjectActionError,
+ agg.create, self.context)
+ self.assertRaises(exception.ObjectActionError,
+ agg.save, self.context)
+
+ def test_update_metadata(self):
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_delete')
+ self.mox.StubOutWithMock(db, 'aggregate_metadata_add')
+ db.aggregate_metadata_delete(self.context, 123, 'todelete')
+ db.aggregate_metadata_add(self.context, 123, {'toadd': 'myval'})
+ self.mox.ReplayAll()
+ fake_notifier.NOTIFICATIONS = []
+ agg = aggregate.Aggregate()
+ agg._context = self.context
+ agg.id = 123
+ agg.metadata = {'foo': 'bar'}
+ agg.obj_reset_changes()
+ agg.update_metadata({'todelete': None, 'toadd': 'myval'})
+ self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('aggregate.updatemetadata.start', msg.event_type)
+ self.assertEqual({'todelete': None, 'toadd': 'myval'},
+ msg.payload['meta_data'])
+ msg = fake_notifier.NOTIFICATIONS[1]
+ self.assertEqual('aggregate.updatemetadata.end', msg.event_type)
+ self.assertEqual({'todelete': None, 'toadd': 'myval'},
+ msg.payload['meta_data'])
+ self.assertEqual({'foo': 'bar', 'toadd': 'myval'}, agg.metadata)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'aggregate_delete')
+ db.aggregate_delete(self.context, 123)
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.destroy(self.context)
+
+ def test_add_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_host_add')
+ db.aggregate_host_add(self.context, 123, 'bar'
+ ).AndReturn({'host': 'bar'})
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo']
+ agg._context = self.context
+ agg.add_host('bar')
+ self.assertEqual(agg.hosts, ['foo', 'bar'])
+
+ def test_delete_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_host_delete')
+ db.aggregate_host_delete(self.context, 123, 'foo')
+ self.mox.ReplayAll()
+ agg = aggregate.Aggregate()
+ agg.id = 123
+ agg.hosts = ['foo', 'bar']
+ agg._context = self.context
+ agg.delete_host('foo')
+ self.assertEqual(agg.hosts, ['bar'])
+
+ def test_availability_zone(self):
+ agg = aggregate.Aggregate()
+ agg.metadata = {'availability_zone': 'foo'}
+ self.assertEqual('foo', agg.availability_zone)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get_all')
+ db.aggregate_get_all(self.context).AndReturn([fake_aggregate])
+ self.mox.ReplayAll()
+ aggs = aggregate.AggregateList.get_all(self.context)
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ def test_by_host(self):
+ self.mox.StubOutWithMock(db, 'aggregate_get_by_host')
+ db.aggregate_get_by_host(self.context, 'fake-host', key=None,
+ ).AndReturn([fake_aggregate])
+ self.mox.ReplayAll()
+ aggs = aggregate.AggregateList.get_by_host(self.context, 'fake-host')
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this')
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['baz'])
+ self.assertEqual(0, len(aggs))
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['foo', 'bar'])
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+
+class TestAggregateObject(test_objects._LocalTest,
+ _TestAggregateObject):
+ pass
+
+
+class TestRemoteAggregateObject(test_objects._RemoteTest,
+ _TestAggregateObject):
+ pass
diff --git a/nova/tests/unit/objects/test_bandwidth_usage.py b/nova/tests/unit/objects/test_bandwidth_usage.py
new file mode 100644
index 0000000000..933e7ff643
--- /dev/null
+++ b/nova/tests/unit/objects/test_bandwidth_usage.py
@@ -0,0 +1,124 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova.objects import bandwidth_usage
+from nova import test
+from nova.tests.unit.objects import test_objects
+
+
+class _TestBandwidthUsage(test.TestCase):
+
+ def setUp(self):
+ super(_TestBandwidthUsage, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ now, start_period = self._time_now_and_start_period()
+ self.expected_bw_usage = self._fake_bw_usage(
+ time=now, start_period=start_period)
+
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ @staticmethod
+ def _fake_bw_usage(time=None, start_period=None, bw_in=100,
+ bw_out=200, last_ctr_in=12345, last_ctr_out=67890):
+ fake_bw_usage = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'instance_uuid': 'fake_uuid1',
+ 'mac': 'fake_mac1',
+ 'start_period': start_period,
+ 'bw_in': bw_in,
+ 'bw_out': bw_out,
+ 'last_ctr_in': last_ctr_in,
+ 'last_ctr_out': last_ctr_out,
+ 'last_refreshed': time
+ }
+ return fake_bw_usage
+
+ @staticmethod
+ def _time_now_and_start_period():
+ now = timeutils.utcnow().replace(tzinfo=iso8601.iso8601.Utc(),
+ microsecond=0)
+ start_period = now - datetime.timedelta(seconds=10)
+ return now, start_period
+
+ @mock.patch.object(db, 'bw_usage_get')
+ def test_get_by_instance_uuid_and_mac(self, mock_get):
+ mock_get.return_value = self.expected_bw_usage
+ bw_usage = bandwidth_usage.BandwidthUsage.get_by_instance_uuid_and_mac(
+ self.context, 'fake_uuid', 'fake_mac',
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+ @mock.patch.object(db, 'bw_usage_get_by_uuids')
+ def test_get_by_uuids(self, mock_get_by_uuids):
+ mock_get_by_uuids.return_value = [self.expected_bw_usage]
+
+ bw_usages = bandwidth_usage.BandwidthUsageList.get_by_uuids(
+ self.context, ['fake_uuid'],
+ start_period=self.expected_bw_usage['start_period'])
+ self.assertEqual(len(bw_usages), 1)
+ self._compare(self, self.expected_bw_usage, bw_usages[0])
+
+ @mock.patch.object(db, 'bw_usage_update')
+ def test_create(self, mock_create):
+ mock_create.return_value = self.expected_bw_usage
+
+ bw_usage = bandwidth_usage.BandwidthUsage()
+ bw_usage.create(self.context, 'fake_uuid', 'fake_mac',
+ 100, 200, 12345, 67890,
+ start_period=self.expected_bw_usage['start_period'])
+
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+ @mock.patch.object(db, 'bw_usage_update')
+ def test_update(self, mock_update):
+ expected_bw_usage1 = self._fake_bw_usage(
+ time=self.expected_bw_usage['last_refreshed'],
+ start_period=self.expected_bw_usage['start_period'],
+ last_ctr_in=42, last_ctr_out=42)
+
+ mock_update.side_effect = [expected_bw_usage1, self.expected_bw_usage]
+
+ bw_usage = bandwidth_usage.BandwidthUsage()
+ bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
+ 100, 200, 42, 42,
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, expected_bw_usage1, bw_usage)
+ bw_usage.create(self.context, 'fake_uuid1', 'fake_mac1',
+ 100, 200, 12345, 67890,
+ start_period=self.expected_bw_usage['start_period'])
+ self._compare(self, self.expected_bw_usage, bw_usage)
+
+
+class TestBandwidthUsageObject(test_objects._LocalTest,
+ _TestBandwidthUsage):
+ pass
+
+
+class TestRemoteBandwidthUsageObject(test_objects._RemoteTest,
+ _TestBandwidthUsage):
+ pass
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
new file mode 100644
index 0000000000..32bb51fe96
--- /dev/null
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -0,0 +1,333 @@
+# Copyright 2013 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import block_device as block_device_obj
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+class _TestBlockDeviceMappingObject(object):
+ def fake_bdm(self, instance=None):
+ instance = instance or {}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'instance_uuid': instance.get('uuid') or 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1
+ })
+ if instance:
+ fake_bdm['instance'] = instance
+ return fake_bdm
+
+ def _test_save(self, cell_type=None):
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+
+ fake_bdm = self.fake_bdm()
+ with contextlib.nested(
+ mock.patch.object(
+ db, 'block_device_mapping_update', return_value=fake_bdm),
+ mock.patch.object(
+ cells_rpcapi.CellsAPI, 'bdm_update_or_create_at_top')
+ ) as (bdm_update_mock, cells_update_mock):
+ bdm_object = objects.BlockDeviceMapping()
+ bdm_object.id = 123
+ bdm_object.volume_id = 'fake_volume_id'
+ bdm_object.save(self.context)
+
+ bdm_update_mock.assert_called_once_with(
+ self.context, 123, {'volume_id': 'fake_volume_id'},
+ legacy=False)
+ if cell_type != 'compute':
+ self.assertFalse(cells_update_mock.called)
+ else:
+ self.assertEqual(1, cells_update_mock.call_count)
+ self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
+ self.assertIsInstance(cells_update_mock.call_args[0][1],
+ block_device_obj.BlockDeviceMapping)
+ self.assertEqual(cells_update_mock.call_args[1], {})
+
+ def test_save_nocells(self):
+ self._test_save()
+
+ def test_save_apicell(self):
+ self._test_save(cell_type='api')
+
+ def test_save_computecell(self):
+ self._test_save(cell_type='compute')
+
+ def test_save_instance_changed(self):
+ bdm_object = objects.BlockDeviceMapping()
+ bdm_object.instance = objects.Instance()
+ self.assertRaises(exception.ObjectActionError,
+ bdm_object.save, self.context)
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id(self, get_by_vol_id):
+ get_by_vol_id.return_value = self.fake_bdm()
+
+ vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 'fake-volume-id')
+ for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
+ self.assertFalse(vol_bdm.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id_not_found(self, get_by_vol_id):
+ get_by_vol_id.return_value = None
+
+ self.assertRaises(exception.VolumeBDMNotFound,
+ objects.BlockDeviceMapping.get_by_volume_id,
+ self.context, 'fake-volume-id')
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_instance_uuid_missmatch(self, get_by_vol_id):
+ fake_bdm_vol = self.fake_bdm(instance={'uuid': 'other-fake-instance'})
+ get_by_vol_id.return_value = fake_bdm_vol
+
+ self.assertRaises(exception.InvalidVolume,
+ objects.BlockDeviceMapping.get_by_volume_id,
+ self.context, 'fake-volume-id',
+ instance_uuid='fake-instance')
+
+ @mock.patch.object(db, 'block_device_mapping_get_by_volume_id')
+ def test_get_by_volume_id_with_expected(self, get_by_vol_id):
+ get_by_vol_id.return_value = self.fake_bdm(
+ fake_instance.fake_db_instance())
+
+ vol_bdm = objects.BlockDeviceMapping.get_by_volume_id(
+ self.context, 'fake-volume-id', expected_attrs=['instance'])
+ for attr in block_device_obj.BLOCK_DEVICE_OPTIONAL_ATTRS:
+ self.assertTrue(vol_bdm.obj_attr_is_set(attr))
+ get_by_vol_id.assert_called_once_with(self.context, 'fake-volume-id',
+ ['instance'])
+ self.assertRemotes()
+
+ def _test_create_mocked(self, cell_type=None):
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict(values)
+
+ with contextlib.nested(
+ mock.patch.object(
+ db, 'block_device_mapping_create', return_value=fake_bdm),
+ mock.patch.object(cells_rpcapi.CellsAPI,
+ 'bdm_update_or_create_at_top')
+ ) as (bdm_create_mock, cells_update_mock):
+ bdm = objects.BlockDeviceMapping(**values)
+
+ if cell_type == 'api':
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+ elif cell_type == 'compute':
+ bdm.create(self.context)
+ bdm_create_mock.assert_called_once_with(
+ self.context, values, legacy=False)
+ self.assertEqual(1, cells_update_mock.call_count)
+ self.assertTrue(len(cells_update_mock.call_args[0]) > 1)
+ self.assertIsInstance(cells_update_mock.call_args[0][1],
+ block_device_obj.BlockDeviceMapping)
+ self.assertEqual(cells_update_mock.call_args[1],
+ {'create': True})
+ else:
+ bdm.create(self.context)
+ self.assertFalse(cells_update_mock.called)
+ bdm_create_mock.assert_called_once_with(
+ self.context, values, legacy=False)
+
+ def test_create_nocells(self):
+ self._test_create_mocked()
+
+ def test_create_apicell(self):
+ self._test_create_mocked(cell_type='api')
+
+ def test_create_computecell(self):
+ self._test_create_mocked(cell_type='compute')
+
+ def test_create(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ bdm = objects.BlockDeviceMapping(**values)
+ with mock.patch.object(cells_rpcapi.CellsAPI,
+ 'bdm_update_or_create_at_top'):
+ bdm.create(self.context)
+
+ for k, v in values.iteritems():
+ self.assertEqual(v, getattr(bdm, k))
+
+ def test_create_fails(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance'}
+ bdm = objects.BlockDeviceMapping(**values)
+ bdm.create(self.context)
+
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+
+ def test_create_fails_instance(self):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume',
+ 'instance_uuid': 'fake-instance',
+ 'instance': objects.Instance()}
+ bdm = objects.BlockDeviceMapping(**values)
+ self.assertRaises(exception.ObjectActionError,
+ bdm.create, self.context)
+
+ def _test_destroy_mocked(self, cell_type=None):
+ values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
+ 'destination_type': 'volume', 'id': 1,
+ 'instance_uuid': 'fake-instance', 'device_name': 'fake'}
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+ with contextlib.nested(
+ mock.patch.object(db, 'block_device_mapping_destroy'),
+ mock.patch.object(cells_rpcapi.CellsAPI, 'bdm_destroy_at_top')
+ ) as (bdm_del, cells_destroy):
+ bdm = objects.BlockDeviceMapping(**values)
+ bdm.destroy(self.context)
+ bdm_del.assert_called_once_with(self.context, values['id'])
+ if cell_type != 'compute':
+ self.assertFalse(cells_destroy.called)
+ else:
+ cells_destroy.assert_called_once_with(
+ self.context, values['instance_uuid'],
+ device_name=values['device_name'],
+ volume_id=values['volume_id'])
+
+ def test_destroy_nocells(self):
+ self._test_destroy_mocked()
+
+ def test_destroy_apicell(self):
+ self._test_destroy_mocked(cell_type='api')
+
+ def test_destroy_computecell(self):
+ self._test_destroy_mocked(cell_type='compute')
+
+
+class TestBlockDeviceMappingObject(test_objects._LocalTest,
+ _TestBlockDeviceMappingObject):
+ pass
+
+
+class TestRemoteBlockDeviceMappingObject(test_objects._RemoteTest,
+ _TestBlockDeviceMappingObject):
+ pass
+
+
+class _TestBlockDeviceMappingListObject(object):
+ def fake_bdm(self, bdm_id):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': bdm_id, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1,
+ })
+ return fake_bdm
+
+ @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
+ def test_get_by_instance_uuid(self, get_all_by_inst):
+ fakes = [self.fake_bdm(123), self.fake_bdm(456)]
+ get_all_by_inst.return_value = fakes
+ bdm_list = (
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, 'fake_instance_uuid'))
+ for faked, got in zip(fakes, bdm_list):
+ self.assertIsInstance(got, objects.BlockDeviceMapping)
+ self.assertEqual(faked['id'], got.id)
+
+ @mock.patch.object(db, 'block_device_mapping_get_all_by_instance')
+ def test_get_by_instance_uuid_no_result(self, get_all_by_inst):
+ get_all_by_inst.return_value = None
+ bdm_list = (
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.context, 'fake_instance_uuid'))
+ self.assertEqual(0, len(bdm_list))
+
+ def test_root_volume_metadata(self):
+ fake_volume = {
+ 'volume_image_metadata': {'vol_test_key': 'vol_test_value'}}
+
+ class FakeVolumeApi(object):
+ def get(*args, **kwargs):
+ return fake_volume
+
+ block_device_mapping = block_device_obj.block_device_make_list(None, [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake_volume_id',
+ 'delete_on_termination': False})])
+
+ volume_meta = block_device_mapping.root_metadata(
+ self.context, None, FakeVolumeApi())
+ self.assertEqual(fake_volume['volume_image_metadata'], volume_meta)
+
+ def test_root_image_metadata(self):
+ fake_image = {'properties': {'img_test_key': 'img_test_value'}}
+
+ class FakeImageApi(object):
+ def show(*args, **kwargs):
+ return fake_image
+
+ block_device_mapping = block_device_obj.block_device_make_list(None, [
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'boot_index': 0,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'image_id': "fake-image",
+ 'delete_on_termination': True})])
+
+ image_meta = block_device_mapping.root_metadata(
+ self.context, FakeImageApi(), None)
+ self.assertEqual(fake_image['properties'], image_meta)
+
+
+class TestBlockDeviceMappingListObject(test_objects._LocalTest,
+ _TestBlockDeviceMappingListObject):
+ pass
+
+
+class TestRemoteBlockDeviceMappingListObject(
+ test_objects._RemoteTest, _TestBlockDeviceMappingListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
new file mode 100644
index 0000000000..0bbf8050c8
--- /dev/null
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -0,0 +1,240 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import compute_node
+from nova.objects import hv_spec
+from nova.objects import service
+from nova.tests.unit.objects import test_objects
+from nova.virt import hardware
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_stats = {'num_foo': '10'}
+fake_stats_db_format = jsonutils.dumps(fake_stats)
+# host_ip is coerced from a string to an IPAddress
+# but needs to be converted to a string for the database format
+fake_host_ip = '127.0.0.1'
+fake_numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512)])
+fake_numa_topology_db_format = fake_numa_topology.to_json()
+fake_hv_spec = hv_spec.HVSpec(arch='foo', hv_type='bar', vm_mode='foobar')
+fake_supported_hv_specs = [fake_hv_spec]
+# for backward compatibility, each supported instance object
+# is stored as a list in the database
+fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
+fake_compute_node = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'service_id': 456,
+ 'vcpus': 4,
+ 'memory_mb': 4096,
+ 'local_gb': 1024,
+ 'vcpus_used': 2,
+ 'memory_mb_used': 2048,
+ 'local_gb_used': 512,
+ 'hypervisor_type': 'Hyper-Dan-VM-ware',
+ 'hypervisor_version': 1001,
+ 'hypervisor_hostname': 'vm.danplanet.com',
+ 'free_ram_mb': 1024,
+ 'free_disk_gb': 256,
+ 'current_workload': 100,
+ 'running_vms': 2013,
+ 'cpu_info': 'Schmintel i786',
+ 'disk_available_least': 256,
+ 'metrics': '',
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'numa_topology': fake_numa_topology_db_format,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }
+
+
+class _TestComputeNodeObject(object):
+ def supported_hv_specs_comparator(self, expected, obj_val):
+ obj_val = [inst.to_list() for inst in obj_val]
+ self.json_comparator(expected, obj_val)
+
+ def comparators(self):
+ return {'stats': self.json_comparator,
+ 'host_ip': self.str_comparator,
+ 'supported_hv_specs': self.supported_hv_specs_comparator}
+
+ def subs(self):
+ return {'supported_hv_specs': 'supported_instances'}
+
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get')
+ db.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode.get_by_id(self.context, 123)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_get_by_service_id(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ db.compute_node_get_by_service_id(self.context, 456).AndReturn(
+ fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(
+ self.context,
+ {
+ 'service_id': 456,
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.service_id = 456
+ compute.stats = fake_stats
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
+ compute.host_ip = fake_host_ip
+ compute.supported_hv_specs = fake_supported_hv_specs
+ compute.create(self.context)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'compute_node_create')
+ db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
+ fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.service_id = 456
+ compute.create(self.context)
+ self.assertRaises(exception.ObjectActionError, compute.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'compute_node_update')
+ db.compute_node_update(
+ self.context, 123,
+ {
+ 'vcpus_used': 3,
+ 'stats': fake_stats_db_format,
+ 'host_ip': fake_host_ip,
+ 'supported_instances': fake_supported_hv_specs_db_format,
+ }).AndReturn(fake_compute_node)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.id = 123
+ compute.vcpus_used = 3
+ compute.stats = fake_stats
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
+ compute.host_ip = fake_host_ip
+ compute.supported_hv_specs = fake_supported_hv_specs
+ compute.save(self.context)
+ self.compare_obj(compute, fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ @mock.patch.object(db, 'compute_node_create',
+ return_value=fake_compute_node)
+ def test_set_id_failure(self, db_mock):
+ compute = compute_node.ComputeNode()
+ compute.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ compute, 'id', 124)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'compute_node_delete')
+ db.compute_node_delete(self.context, 123)
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute.id = 123
+ compute.destroy(self.context)
+
+ def test_service(self):
+ self.mox.StubOutWithMock(service.Service, 'get_by_id')
+ service.Service.get_by_id(self.context, 456).AndReturn('my-service')
+ self.mox.ReplayAll()
+ compute = compute_node.ComputeNode()
+ compute._context = self.context
+ compute.id = 123
+ compute.service_id = 456
+ self.assertEqual('my-service', compute.service)
+ # Make sure it doesn't call Service.get_by_id() again
+ self.assertEqual('my-service', compute.service)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(self.context).AndReturn([fake_compute_node])
+ self.mox.ReplayAll()
+ computes = compute_node.ComputeNodeList.get_all(self.context)
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_get_by_hypervisor(self):
+ self.mox.StubOutWithMock(db, 'compute_node_search_by_hypervisor')
+ db.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
+ [fake_compute_node])
+ self.mox.ReplayAll()
+ computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
+ 'hyper')
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ @mock.patch('nova.db.service_get')
+ def test_get_by_service(self, service_get):
+ service_get.return_value = {'compute_node': [fake_compute_node]}
+ fake_service = service.Service(id=123)
+ computes = compute_node.ComputeNodeList.get_by_service(self.context,
+ fake_service)
+ self.assertEqual(1, len(computes))
+ self.compare_obj(computes[0], fake_compute_node,
+ subs=self.subs(),
+ comparators=self.comparators())
+
+ def test_compat_numa_topology(self):
+ compute = compute_node.ComputeNode()
+ primitive = compute.obj_to_primitive(target_version='1.4')
+ self.assertNotIn('numa_topology', primitive)
+
+ def test_compat_supported_hv_specs(self):
+ compute = compute_node.ComputeNode()
+ compute.supported_hv_specs = fake_supported_hv_specs
+ primitive = compute.obj_to_primitive(target_version='1.5')
+ self.assertNotIn('supported_hv_specs', primitive)
+
+
+class TestComputeNodeObject(test_objects._LocalTest,
+ _TestComputeNodeObject):
+ pass
+
+
+class TestRemoteComputeNodeObject(test_objects._RemoteTest,
+ _TestComputeNodeObject):
+ pass
diff --git a/nova/tests/unit/objects/test_dns_domain.py b/nova/tests/unit/objects/test_dns_domain.py
new file mode 100644
index 0000000000..45f42ff237
--- /dev/null
+++ b/nova/tests/unit/objects/test_dns_domain.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import dns_domain
+from nova.tests.unit.objects import test_objects
+
+
+fake_dnsd = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'domain': 'blah.example.com',
+ 'scope': 'private',
+ 'availability_zone': 'overthere',
+ 'project_id': '867530niner',
+}
+
+
+class _TestDNSDomain(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_domain(self):
+ with mock.patch.object(db, 'dnsdomain_get') as get:
+ get.return_value = fake_dnsd
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self._compare(self, fake_dnsd, dnsd)
+
+ def test_register_for_zone(self):
+ dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
+ 'domain', 'zone')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('zone', dnsd.availability_zone)
+
+ def test_register_for_project(self):
+ dns_domain.DNSDomain.register_for_project(self.context.elevated(),
+ 'domain', 'project')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('project', dnsd.project_id)
+
+ def test_delete_by_domain(self):
+ dns_domain.DNSDomain.register_for_zone(self.context.elevated(),
+ 'domain', 'zone')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertEqual('domain', dnsd.domain)
+ self.assertEqual('zone', dnsd.availability_zone)
+
+ dns_domain.DNSDomain.delete_by_domain(self.context.elevated(),
+ 'domain')
+ dnsd = dns_domain.DNSDomain.get_by_domain(self.context, 'domain')
+ self.assertIsNone(dnsd)
+
+ def test_get_all(self):
+ with mock.patch.object(db, 'dnsdomain_get_all') as get:
+ get.return_value = [fake_dnsd]
+ dns_domain.DNSDomainList.get_all(self.context)
+
+
+class TestDNSDomainObject(test_objects._LocalTest,
+ _TestDNSDomain):
+ pass
+
+
+class TestRemoteDNSDomainObject(test_objects._RemoteTest,
+ _TestDNSDomain):
+ pass
diff --git a/nova/tests/unit/objects/test_ec2.py b/nova/tests/unit/objects/test_ec2.py
new file mode 100644
index 0000000000..cc79cb1e49
--- /dev/null
+++ b/nova/tests/unit/objects/test_ec2.py
@@ -0,0 +1,192 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import ec2 as ec2_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_map = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'uuid': 'fake-uuid-2',
+}
+
+
+class _TestEC2InstanceMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ imap = ec2_obj.EC2InstanceMapping()
+ imap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_instance_create') as create:
+ create.return_value = fake_map
+ imap.create(self.context)
+
+ self.assertEqual(self.context, imap._context)
+ imap._context = None
+ self._compare(self, fake_map, imap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_instance_get_by_uuid') as get:
+ get.return_value = fake_map
+ imap = ec2_obj.EC2InstanceMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, imap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_instance_get_by_id') as get:
+ get.return_value = fake_map
+ imap = ec2_obj.EC2InstanceMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, imap)
+
+
+class TestEC2InstanceMapping(test_objects._LocalTest, _TestEC2InstanceMapping):
+ pass
+
+
+class TestRemoteEC2InstanceMapping(test_objects._RemoteTest,
+ _TestEC2InstanceMapping):
+ pass
+
+
+class _TestEC2VolumeMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ vmap = ec2_obj.EC2VolumeMapping()
+ vmap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_volume_create') as create:
+ create.return_value = fake_map
+ vmap.create(self.context)
+
+ self.assertEqual(self.context, vmap._context)
+ vmap._context = None
+ self._compare(self, fake_map, vmap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_volume_get_by_uuid') as get:
+ get.return_value = fake_map
+ vmap = ec2_obj.EC2VolumeMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, vmap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_volume_get_by_id') as get:
+ get.return_value = fake_map
+ vmap = ec2_obj.EC2VolumeMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, vmap)
+
+
+class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping):
+ pass
+
+
+class TestRemoteEC2VolumeMapping(test_objects._RemoteTest,
+ _TestEC2VolumeMapping):
+ pass
+
+
+class _TestEC2SnapshotMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ smap = ec2_obj.EC2SnapshotMapping()
+ smap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_snapshot_create') as create:
+ create.return_value = fake_map
+ smap.create(self.context)
+
+ self.assertEqual(self.context, smap._context)
+ smap._context = None
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, smap)
+
+
+class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping):
+ pass
+
+
+class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest,
+ _TestEC2SnapshotMapping):
+ pass
+
+
+class _TestS3ImageMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ s3imap = ec2_obj.S3ImageMapping()
+ s3imap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 's3_image_create') as create:
+ create.return_value = fake_map
+ s3imap.create(self.context)
+
+ self.assertEqual(self.context, s3imap._context)
+ s3imap._context = None
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 's3_image_get_by_uuid') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_s3_id(self):
+ with mock.patch.object(db, 's3_image_get') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, s3imap)
+
+
+class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping):
+ pass
+
+
+class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping):
+ pass
diff --git a/nova/tests/unit/objects/test_external_event.py b/nova/tests/unit/objects/test_external_event.py
new file mode 100644
index 0000000000..c3e319243f
--- /dev/null
+++ b/nova/tests/unit/objects/test_external_event.py
@@ -0,0 +1,46 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.objects import external_event as external_event_obj
+from nova.tests.unit.objects import test_objects
+
+
+class _TestInstanceExternalEventObject(object):
+ def test_make_key(self):
+ key = external_event_obj.InstanceExternalEvent.make_key('foo', 'bar')
+ self.assertEqual('foo-bar', key)
+
+ def test_make_key_no_tag(self):
+ key = external_event_obj.InstanceExternalEvent.make_key('foo')
+ self.assertEqual('foo', key)
+
+ def test_key(self):
+ event = external_event_obj.InstanceExternalEvent(name='foo',
+ tag='bar')
+ with mock.patch.object(event, 'make_key') as make_key:
+ make_key.return_value = 'key'
+ self.assertEqual('key', event.key)
+ make_key.assert_called_once_with('foo', 'bar')
+
+
+class TestInstanceExternalEventObject(test_objects._LocalTest,
+ _TestInstanceExternalEventObject):
+ pass
+
+
+class TestRemoteInstanceExternalEventObject(test_objects._RemoteTest,
+ _TestInstanceExternalEventObject):
+ pass
diff --git a/nova/tests/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 806d97773a..806d97773a 100644
--- a/nova/tests/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
diff --git a/nova/tests/unit/objects/test_fixed_ip.py b/nova/tests/unit/objects/test_fixed_ip.py
new file mode 100644
index 0000000000..116827416d
--- /dev/null
+++ b/nova/tests/unit/objects/test_fixed_ip.py
@@ -0,0 +1,339 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+import netaddr
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.objects import fixed_ip
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_objects
+
+
+fake_fixed_ip = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'address': '192.168.1.100',
+ 'network_id': None,
+ 'virtual_interface_id': None,
+ 'instance_uuid': None,
+ 'allocated': False,
+ 'leased': False,
+ 'reserved': False,
+ 'host': None,
+ 'network': None,
+ 'virtual_interface': None,
+ 'floating_ips': [],
+ }
+
+
+class _TestFixedIPObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ if field in ('default_route', 'floating_ips'):
+ continue
+ if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS:
+ if obj.obj_attr_is_set(field) and db_obj[field] is not None:
+ obj_val = obj[field].uuid
+ db_val = db_obj[field]['uuid']
+ else:
+ continue
+ else:
+ obj_val = obj[field]
+ db_val = db_obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ def test_get_by_id(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123)
+ get.assert_called_once_with(self.context, 123, get_network=False)
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get')
+ @mock.patch('nova.db.network_get')
+ def test_get_by_id_with_extras(self, network_get, fixed_get):
+ db_fixed = dict(fake_fixed_ip,
+ network=test_network.fake_network)
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123,
+ expected_attrs=['network'])
+ fixed_get.assert_called_once_with(self.context, 123, get_network=True)
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertFalse(network_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ def test_get_by_address(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=[])
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get')
+ def test_get_by_address_with_extras(self, instance_get, network_get,
+ fixed_get):
+ db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
+ instance=fake_instance.fake_db_instance())
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
+ expected_attrs=['network',
+ 'instance'])
+ fixed_get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=['network',
+ 'instance'])
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid)
+ self.assertFalse(network_get.called)
+ self.assertFalse(instance_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_address')
+ @mock.patch('nova.db.network_get')
+ @mock.patch('nova.db.instance_get')
+ def test_get_by_address_with_extras_deleted_instance(self, instance_get,
+ network_get,
+ fixed_get):
+ db_fixed = dict(fake_fixed_ip, network=test_network.fake_network,
+ instance=None)
+ fixed_get.return_value = db_fixed
+ fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4',
+ expected_attrs=['network',
+ 'instance'])
+ fixed_get.assert_called_once_with(self.context, '1.2.3.4',
+ columns_to_join=['network',
+ 'instance'])
+ self._compare(fixedip, db_fixed)
+ self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid)
+ self.assertIsNone(fixedip.instance)
+ self.assertFalse(network_get.called)
+ self.assertFalse(instance_get.called)
+
+ @mock.patch('nova.db.fixed_ip_get_by_floating_address')
+ def test_get_by_floating_address(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_floating_address')
+ def test_get_by_floating_address_none(self, get):
+ get.return_value = None
+ fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self.assertIsNone(fixedip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_network_host')
+ def test_get_by_network_and_host(self, get):
+ get.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context,
+ 123, 'host')
+ get.assert_called_once_with(self.context, 123, 'host')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_associate')
+ def test_associate(self, associate):
+ associate.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4',
+ 'fake-uuid')
+ associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid',
+ network_id=None, reserved=False)
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_associate_pool')
+ def test_associate_pool(self, associate):
+ associate.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123,
+ 'fake-uuid', 'host')
+ associate.assert_called_with(self.context, 123,
+ instance_uuid='fake-uuid',
+ host='host')
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_disassociate_by_address(self, disassociate):
+ fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4')
+ disassociate.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout')
+ def test_disassociate_all_by_timeout(self, disassociate):
+ now = timeutils.utcnow()
+ now_tz = timeutils.parse_isotime(
+ timeutils.isotime(now)).replace(
+ tzinfo=iso8601.iso8601.Utc())
+ disassociate.return_value = 123
+ result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context,
+ 'host', now)
+ self.assertEqual(123, result)
+ # NOTE(danms): be pedantic about timezone stuff
+ args, kwargs = disassociate.call_args_list[0]
+ self.assertEqual(now_tz, args[2])
+ self.assertEqual((self.context, 'host'), args[:2])
+ self.assertEqual({}, kwargs)
+
+ @mock.patch('nova.db.fixed_ip_create')
+ def test_create(self, create):
+ create.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP(address='1.2.3.4')
+ fixedip.create(self.context)
+ create.assert_called_once_with(
+ self.context, {'address': '1.2.3.4'})
+ self._compare(fixedip, fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_update')
+ def test_save(self, update):
+ update.return_value = fake_fixed_ip
+ fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
+ instance_uuid='fake-uuid')
+ self.assertRaises(exception.ObjectActionError, fixedip.save)
+ fixedip.obj_reset_changes(['address'])
+ fixedip.save()
+ update.assert_called_once_with(self.context, '1.2.3.4',
+ {'instance_uuid': 'fake-uuid'})
+
+ @mock.patch('nova.db.fixed_ip_disassociate')
+ def test_disassociate(self, disassociate):
+ fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4',
+ instance_uuid='fake-uuid')
+ fixedip.obj_reset_changes()
+ fixedip.disassociate()
+ disassociate.assert_called_once_with(self.context, '1.2.3.4')
+ self.assertIsNone(fixedip.instance_uuid)
+
+ @mock.patch('nova.db.fixed_ip_get_all')
+ def test_get_all(self, get_all):
+ get_all.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_all(self.context)
+ self.assertEqual(1, len(fixedips))
+ get_all.assert_called_once_with(self.context)
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_instance')
+ def test_get_by_instance(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context,
+ 'fake-uuid')
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 'fake-uuid')
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ip_get_by_host')
+ def test_get_by_host(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host')
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 'host')
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ @mock.patch('nova.db.fixed_ips_by_virtual_interface')
+ def test_get_by_virtual_interface_id(self, get):
+ get.return_value = [fake_fixed_ip]
+ fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id(
+ self.context, 123)
+ self.assertEqual(1, len(fixedips))
+ get.assert_called_once_with(self.context, 123)
+ self._compare(fixedips[0], fake_fixed_ip)
+
+ def test_floating_ips_do_not_lazy_load(self):
+ fixedip = fixed_ip.FixedIP()
+ self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips)
+
+ @mock.patch('nova.db.fixed_ip_bulk_create')
+ def test_bulk_create(self, bulk):
+ fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'),
+ fixed_ip.FixedIP(address='192.168.1.2')]
+ fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips)
+ bulk.assert_called_once_with(self.context,
+ [{'address': '192.168.1.1'},
+ {'address': '192.168.1.2'}])
+
+ @mock.patch('nova.db.network_get_associated_fixed_ips')
+ def test_get_by_network(self, get):
+ info = {'address': '1.2.3.4',
+ 'instance_uuid': 'fake-uuid',
+ 'network_id': 0,
+ 'vif_id': 1,
+ 'vif_address': 'de:ad:be:ee:f0:00',
+ 'instance_hostname': 'fake-host',
+ 'instance_updated': datetime.datetime(1955, 11, 5),
+ 'instance_created': datetime.datetime(1955, 11, 5),
+ 'allocated': True,
+ 'leased': True,
+ 'default_route': True,
+ }
+ get.return_value = [info]
+ fixed_ips = fixed_ip.FixedIPList.get_by_network(
+ self.context, {'id': 0}, host='fake-host')
+ get.assert_called_once_with(self.context, 0, host='fake-host')
+ self.assertEqual(1, len(fixed_ips))
+ fip = fixed_ips[0]
+ self.assertEqual('1.2.3.4', str(fip.address))
+ self.assertEqual('fake-uuid', fip.instance_uuid)
+ self.assertEqual(0, fip.network_id)
+ self.assertEqual(1, fip.virtual_interface_id)
+ self.assertTrue(fip.allocated)
+ self.assertTrue(fip.leased)
+ self.assertEqual('fake-uuid', fip.instance.uuid)
+ self.assertEqual('fake-host', fip.instance.hostname)
+ self.assertIsInstance(fip.instance.created_at, datetime.datetime)
+ self.assertIsInstance(fip.instance.updated_at, datetime.datetime)
+ self.assertEqual(1, fip.virtual_interface.id)
+ self.assertEqual(info['vif_address'], fip.virtual_interface.address)
+
+ @mock.patch('nova.db.network_get_associated_fixed_ips')
+ def test_backport_default_route(self, mock_get):
+ info = {'address': '1.2.3.4',
+ 'instance_uuid': 'fake-uuid',
+ 'network_id': 0,
+ 'vif_id': 1,
+ 'vif_address': 'de:ad:be:ee:f0:00',
+ 'instance_hostname': 'fake-host',
+ 'instance_updated': datetime.datetime(1955, 11, 5),
+ 'instance_created': datetime.datetime(1955, 11, 5),
+ 'allocated': True,
+ 'leased': True,
+ 'default_route': True,
+ }
+ mock_get.return_value = [info]
+ fixed_ips = fixed_ip.FixedIPList.get_by_network(
+ self.context, {'id': 0}, host='fake-host')
+ primitive = fixed_ips[0].obj_to_primitive()
+ self.assertIn('default_route', primitive['nova_object.data'])
+ fixed_ips[0].obj_make_compatible(primitive['nova_object.data'], '1.1')
+ self.assertNotIn('default_route', primitive['nova_object.data'])
+
+
+class TestFixedIPObject(test_objects._LocalTest,
+ _TestFixedIPObject):
+ pass
+
+
+class TestRemoteFixedIPObject(test_objects._RemoteTest,
+ _TestFixedIPObject):
+ pass
diff --git a/nova/tests/unit/objects/test_flavor.py b/nova/tests/unit/objects/test_flavor.py
new file mode 100644
index 0000000000..a7189d4caa
--- /dev/null
+++ b/nova/tests/unit/objects/test_flavor.py
@@ -0,0 +1,253 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova.objects import flavor as flavor_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_flavor = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'name': 'm1.foo',
+ 'memory_mb': 1024,
+ 'vcpus': 4,
+ 'root_gb': 20,
+ 'ephemeral_gb': 0,
+ 'flavorid': 'm1.foo',
+ 'swap': 0,
+ 'rxtx_factor': 1.0,
+ 'vcpu_weight': 1,
+ 'disabled': False,
+ 'is_public': True,
+ 'extra_specs': {'foo': 'bar'},
+ }
+
+
+class _TestFlavor(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'flavor_get') as get:
+ get.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_id(self.context, 1)
+ self._compare(self, fake_flavor, flavor)
+
+ def test_get_by_name(self):
+ with mock.patch.object(db, 'flavor_get_by_name') as get_by_name:
+ get_by_name.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo')
+ self._compare(self, fake_flavor, flavor)
+
+ def test_get_by_flavor_id(self):
+ with mock.patch.object(db, 'flavor_get_by_flavor_id') as get_by_id:
+ get_by_id.return_value = fake_flavor
+ flavor = flavor_obj.Flavor.get_by_flavor_id(self.context,
+ 'm1.foo')
+ self._compare(self, fake_flavor, flavor)
+
+ def test_add_access(self):
+ elevated = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
+ with mock.patch.object(db, 'flavor_access_add') as add:
+ flavor.add_access('456')
+ add.assert_called_once_with(elevated, '123', '456')
+
+ def test_add_access_with_dirty_projects(self):
+ flavor = flavor_obj.Flavor(context=self.context, projects=['1'])
+ self.assertRaises(exception.ObjectActionError,
+ flavor.add_access, '2')
+
+ def test_remove_access(self):
+ elevated = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
+ with mock.patch.object(db, 'flavor_access_remove') as remove:
+ flavor.remove_access('456')
+ remove.assert_called_once_with(elevated, '123', '456')
+
+ def test_create(self):
+ flavor = flavor_obj.Flavor()
+ flavor.name = 'm1.foo'
+ flavor.extra_specs = fake_flavor['extra_specs']
+
+ with mock.patch.object(db, 'flavor_create') as create:
+ create.return_value = fake_flavor
+ flavor.create(self.context)
+
+ self.assertEqual(self.context, flavor._context)
+ # NOTE(danms): Orphan this to avoid lazy-loads
+ flavor._context = None
+ self._compare(self, fake_flavor, flavor)
+
+ def test_create_with_projects(self):
+ context = self.context.elevated()
+ flavor = flavor_obj.Flavor()
+ flavor.name = 'm1.foo'
+ flavor.extra_specs = fake_flavor['extra_specs']
+ flavor.projects = ['project-1', 'project-2']
+
+ db_flavor = dict(fake_flavor, projects=list(flavor.projects))
+
+ with mock.patch.multiple(db, flavor_create=mock.DEFAULT,
+ flavor_access_get_by_flavor_id=mock.DEFAULT
+ ) as methods:
+ methods['flavor_create'].return_value = db_flavor
+ methods['flavor_access_get_by_flavor_id'].return_value = [
+ {'project_id': 'project-1'},
+ {'project_id': 'project-2'}]
+ flavor.create(context)
+ methods['flavor_create'].assert_called_once_with(
+ context,
+ {'name': 'm1.foo',
+ 'extra_specs': fake_flavor['extra_specs']},
+ projects=['project-1', 'project-2'])
+
+ self.assertEqual(context, flavor._context)
+ # NOTE(danms): Orphan this to avoid lazy-loads
+ flavor._context = None
+ self._compare(self, fake_flavor, flavor)
+ self.assertEqual(['project-1', 'project-2'], flavor.projects)
+
+ def test_create_with_id(self):
+ flavor = flavor_obj.Flavor(id=123)
+ self.assertRaises(exception.ObjectActionError, flavor.create,
+ self.context)
+
+ @mock.patch('nova.db.flavor_access_add')
+ @mock.patch('nova.db.flavor_access_remove')
+ @mock.patch('nova.db.flavor_extra_specs_delete')
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_save(self, mock_update, mock_delete, mock_remove, mock_add):
+ ctxt = self.context.elevated()
+ extra_specs = {'key1': 'value1', 'key2': 'value2'}
+ projects = ['project-1', 'project-2']
+ flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo',
+ extra_specs=extra_specs, projects=projects)
+ flavor.obj_reset_changes()
+
+ # Test deleting an extra_specs key and project
+ del flavor.extra_specs['key1']
+ del flavor.projects[-1]
+ self.assertEqual(set(['extra_specs', 'projects']),
+ flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'value2'}, flavor.extra_specs)
+ mock_delete.assert_called_once_with(ctxt, 'foo', 'key1')
+ self.assertEqual(['project-1'], flavor.projects)
+ mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2')
+
+ # Test updating an extra_specs key value
+ flavor.extra_specs['key2'] = 'foobar'
+ self.assertEqual(set(['extra_specs']), flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'foobar'}, flavor.extra_specs)
+ mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'})
+
+ # Test adding an extra_specs and project
+ flavor.extra_specs['key3'] = 'value3'
+ flavor.projects.append('project-3')
+ self.assertEqual(set(['extra_specs', 'projects']),
+ flavor.obj_what_changed())
+ flavor.save()
+ self.assertEqual({'key2': 'foobar', 'key3': 'value3'},
+ flavor.extra_specs)
+ mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar',
+ 'key3': 'value3'})
+ self.assertEqual(['project-1', 'project-3'], flavor.projects)
+ mock_add.assert_called_once_with(ctxt, 'foo', 'project-3')
+
+ @mock.patch('nova.db.flavor_create')
+ @mock.patch('nova.db.flavor_extra_specs_delete')
+ @mock.patch('nova.db.flavor_extra_specs_update_or_create')
+ def test_save_deleted_extra_specs(self, mock_update, mock_delete,
+ mock_create):
+ mock_create.return_value = dict(fake_flavor,
+ extra_specs={'key1': 'value1'})
+ ctxt = self.context.elevated()
+ flavor = flavor_obj.Flavor(context=ctxt)
+ flavor.flavorid = 'test'
+ flavor.extra_specs = {'key1': 'value1'}
+ flavor.create()
+ flavor.extra_specs = {}
+ flavor.save()
+ mock_delete.assert_called_once_with(ctxt, flavor.flavorid,
+ 'key1')
+ self.assertFalse(mock_update.called)
+
+ def test_save_invalid_fields(self):
+ flavor = flavor_obj.Flavor(id=123)
+ self.assertRaises(exception.ObjectActionError, flavor.save)
+
+ def test_destroy(self):
+ flavor = flavor_obj.Flavor(id=123, name='foo')
+ with mock.patch.object(db, 'flavor_destroy') as destroy:
+ flavor.destroy(self.context)
+ destroy.assert_called_once_with(self.context, flavor.name)
+
+ def test_load_projects(self):
+ flavor = flavor_obj.Flavor(context=self.context, flavorid='foo')
+ with mock.patch.object(db, 'flavor_access_get_by_flavor_id') as get:
+ get.return_value = [{'project_id': 'project-1'}]
+ projects = flavor.projects
+
+ self.assertEqual(['project-1'], projects)
+ self.assertNotIn('projects', flavor.obj_what_changed())
+
+ def test_load_anything_else(self):
+ flavor = flavor_obj.Flavor()
+ self.assertRaises(exception.ObjectActionError,
+ getattr, flavor, 'name')
+
+
+class TestFlavor(test_objects._LocalTest, _TestFlavor):
+ pass
+
+
+class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor):
+ pass
+
+
+class _TestFlavorList(object):
+ def test_get_all(self):
+ with mock.patch.object(db, 'flavor_get_all') as get_all:
+ get_all.return_value = [fake_flavor]
+ filters = {'min_memory_mb': 4096}
+ flavors = flavor_obj.FlavorList.get_all(self.context,
+ inactive=False,
+ filters=filters,
+ sort_key='id',
+ sort_dir='asc')
+ self.assertEqual(1, len(flavors))
+ _TestFlavor._compare(self, fake_flavor, flavors[0])
+ get_all.assert_called_once_with(self.context, inactive=False,
+ filters=filters, sort_key='id',
+ sort_dir='asc', limit=None,
+ marker=None)
+
+
+class TestFlavorList(test_objects._LocalTest, _TestFlavorList):
+ pass
+
+
+class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList):
+ pass
diff --git a/nova/tests/unit/objects/test_floating_ip.py b/nova/tests/unit/objects/test_floating_ip.py
new file mode 100644
index 0000000000..8454505bb0
--- /dev/null
+++ b/nova/tests/unit/objects/test_floating_ip.py
@@ -0,0 +1,259 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import netaddr
+
+from nova import exception
+from nova import objects
+from nova.objects import floating_ip
+from nova.tests.unit.objects import test_fixed_ip
+from nova.tests.unit.objects import test_network
+from nova.tests.unit.objects import test_objects
+
+fake_floating_ip = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'address': '172.17.0.1',
+ 'fixed_ip_id': None,
+ 'project_id': None,
+ 'host': None,
+ 'auto_assigned': False,
+ 'pool': None,
+ 'interface': None,
+ 'fixed_ip': None,
+}
+
+
+class _TestFloatingIPObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ if field in floating_ip.FLOATING_IP_OPTIONAL_ATTRS:
+ if obj.obj_attr_is_set(field):
+ obj_val = obj[field].id
+ db_val = db_obj[field]['id']
+ else:
+ continue
+ else:
+ obj_val = obj[field]
+ db_val = db_obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.floating_ip_get')
+ def test_get_by_id(self, get):
+ db_floatingip = dict(fake_floating_ip,
+ fixed_ip=test_fixed_ip.fake_fixed_ip)
+ get.return_value = db_floatingip
+ floatingip = floating_ip.FloatingIP.get_by_id(self.context, 123)
+ get.assert_called_once_with(self.context, 123)
+ self._compare(floatingip, db_floatingip)
+
+ @mock.patch('nova.db.floating_ip_get_by_address')
+ def test_get_by_address(self, get):
+ get.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP.get_by_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self._compare(floatingip, fake_floating_ip)
+
+ @mock.patch('nova.db.floating_ip_get_pools')
+ def test_get_pool_names(self, get):
+ get.return_value = [{'name': 'a'}, {'name': 'b'}]
+ self.assertEqual(['a', 'b'],
+ floating_ip.FloatingIP.get_pool_names(self.context))
+
+ @mock.patch('nova.db.floating_ip_allocate_address')
+ def test_allocate_address(self, allocate):
+ allocate.return_value = '1.2.3.4'
+ self.assertEqual('1.2.3.4',
+ floating_ip.FloatingIP.allocate_address(self.context,
+ 'project',
+ 'pool'))
+ allocate.assert_called_with(self.context, 'project', 'pool',
+ auto_assigned=False)
+
+ @mock.patch('nova.db.floating_ip_fixed_ip_associate')
+ def test_associate(self, associate):
+ db_fixed = dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+ associate.return_value = db_fixed
+ floatingip = floating_ip.FloatingIP.associate(self.context,
+ '172.17.0.1',
+ '192.168.1.1',
+ 'host')
+ associate.assert_called_with(self.context, '172.17.0.1',
+ '192.168.1.1', 'host')
+ self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
+ self.assertEqual('172.17.0.1', str(floatingip.address))
+ self.assertEqual('host', floatingip.host)
+
+ @mock.patch('nova.db.floating_ip_deallocate')
+ def test_deallocate(self, deallocate):
+ floating_ip.FloatingIP.deallocate(self.context, '1.2.3.4')
+ deallocate.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_destroy')
+ def test_destroy(self, destroy):
+ floating_ip.FloatingIP.destroy(self.context, '1.2.3.4')
+ destroy.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_disassociate')
+ def test_disassociate(self, disassociate):
+ db_fixed = dict(test_fixed_ip.fake_fixed_ip,
+ network=test_network.fake_network)
+ disassociate.return_value = db_fixed
+ floatingip = floating_ip.FloatingIP.disassociate(self.context,
+ '1.2.3.4')
+ disassociate.assert_called_with(self.context, '1.2.3.4')
+ self.assertEqual(db_fixed['id'], floatingip.fixed_ip.id)
+ self.assertEqual('1.2.3.4', str(floatingip.address))
+
+ @mock.patch('nova.db.floating_ip_update')
+ def test_save(self, update):
+ update.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123, address='1.2.3.4',
+ host='foo')
+ floatingip.obj_reset_changes(['address', 'id'])
+ floatingip.save()
+ self.assertEqual(set(), floatingip.obj_what_changed())
+ update.assert_called_with(self.context, '1.2.3.4',
+ {'host': 'foo'})
+
+ def test_save_errors(self):
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123, host='foo')
+ floatingip.obj_reset_changes()
+ floating_ip.address = '1.2.3.4'
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ floatingip.obj_reset_changes()
+ floatingip.fixed_ip_id = 1
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ @mock.patch('nova.db.floating_ip_update')
+ def test_save_no_fixedip(self, update):
+ update.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123)
+ floatingip.fixed_ip = objects.FixedIP(context=self.context,
+ id=456)
+ self.assertNotIn('fixed_ip', update.calls[1])
+
+ @mock.patch('nova.db.floating_ip_get_all')
+ def test_get_all(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_all(self.context)
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context)
+
+ @mock.patch('nova.db.floating_ip_get_all_by_host')
+ def test_get_by_host(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_host(self.context,
+ 'host')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 'host')
+
+ @mock.patch('nova.db.floating_ip_get_all_by_project')
+ def test_get_by_project(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_project(self.context,
+ 'project')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 'project')
+
+ @mock.patch('nova.db.floating_ip_get_by_fixed_address')
+ def test_get_by_fixed_address(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_fixed_address(
+ self.context, '1.2.3.4')
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, '1.2.3.4')
+
+ @mock.patch('nova.db.floating_ip_get_by_fixed_ip_id')
+ def test_get_by_fixed_ip_id(self, get):
+ get.return_value = [fake_floating_ip]
+ floatingips = floating_ip.FloatingIPList.get_by_fixed_ip_id(
+ self.context, 123)
+ self.assertEqual(1, len(floatingips))
+ self._compare(floatingips[0], fake_floating_ip)
+ get.assert_called_with(self.context, 123)
+
+ @mock.patch('nova.db.instance_floating_address_get_all')
+ def test_get_addresses_by_instance(self, get_all):
+ expected = ['1.2.3.4', '4.5.6.7']
+ get_all.return_value = list(expected)
+ ips = floating_ip.FloatingIP.get_addresses_by_instance(
+ self.context, {'uuid': '1234'})
+ self.assertEqual(expected, ips)
+ get_all.assert_called_once_with(self.context, '1234')
+
+ def test_make_ip_info(self):
+ result = objects.FloatingIPList.make_ip_info('1.2.3.4', 'pool', 'eth0')
+ self.assertEqual({'address': '1.2.3.4', 'pool': 'pool',
+ 'interface': 'eth0'},
+ result)
+
+ @mock.patch('nova.db.floating_ip_bulk_create')
+ def test_bulk_create(self, create_mock):
+ def fake_create(ctxt, ip_info):
+ return [{'id': 1, 'address': ip['address'], 'fixed_ip_id': 1,
+ 'project_id': 'foo', 'host': 'host',
+ 'auto_assigned': False, 'pool': ip['pool'],
+ 'interface': ip['interface'], 'fixed_ip': None,
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False}
+ for ip in ip_info]
+
+ create_mock.side_effect = fake_create
+ ips = [objects.FloatingIPList.make_ip_info('1.1.1.1', 'pool', 'eth0'),
+ objects.FloatingIPList.make_ip_info('1.1.1.2', 'loop', 'eth1')]
+ result = objects.FloatingIPList.create(None, ips)
+ self.assertIs(result, None)
+ result = objects.FloatingIPList.create(None, ips, want_result=True)
+ self.assertEqual('1.1.1.2', str(result[1].address))
+
+ @mock.patch('nova.db.floating_ip_bulk_destroy')
+ def test_bulk_destroy(self, destroy_mock):
+ ips = [{'address': '1.2.3.4'}, {'address': '4.5.6.7'}]
+ objects.FloatingIPList.destroy(None, ips)
+ destroy_mock.assert_called_once_with(None, ips)
+
+ def test_backport_fixedip_1_1(self):
+ floating = objects.FloatingIP()
+ fixed = objects.FixedIP()
+ floating.fixed_ip = fixed
+ primitive = floating.obj_to_primitive(target_version='1.1')
+ self.assertEqual('1.1',
+ primitive['nova_object.data']['fixed_ip']['nova_object.version'])
+
+
+class TestFloatingIPObject(test_objects._LocalTest,
+ _TestFloatingIPObject):
+ pass
+
+
+class TestRemoteFloatingIPObject(test_objects._RemoteTest,
+ _TestFloatingIPObject):
+ pass
diff --git a/nova/tests/unit/objects/test_hv_spec.py b/nova/tests/unit/objects/test_hv_spec.py
new file mode 100644
index 0000000000..94782cd3a1
--- /dev/null
+++ b/nova/tests/unit/objects/test_hv_spec.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import arch
+from nova.compute import hvtype
+from nova.compute import vm_mode
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+spec_dict = {
+ 'arch': arch.I686,
+ 'hv_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM
+}
+
+spec_list = [
+ arch.I686,
+ hvtype.KVM,
+ vm_mode.HVM
+]
+
+
+class _TestHVSpecObject(object):
+
+ def test_hv_spec_from_list(self):
+ spec_obj = objects.HVSpec.from_list(spec_list)
+ self.compare_obj(spec_obj, spec_dict)
+
+ def test_hv_spec_to_list(self):
+ spec_obj = objects.HVSpec()
+ spec_obj.arch = arch.I686
+ spec_obj.hv_type = hvtype.KVM
+ spec_obj.vm_mode = vm_mode.HVM
+ spec = spec_obj.to_list()
+ self.assertEqual(spec_list, spec)
+
+
+class TestHVSpecObject(test_objects._LocalTest,
+ _TestHVSpecObject):
+ pass
+
+
+class TestRemoteHVSpecObject(test_objects._RemoteTest,
+ _TestHVSpecObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
new file mode 100644
index 0000000000..b24fd0143d
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance.py
@@ -0,0 +1,1196 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import iso8601
+import mock
+import mox
+import netaddr
+from oslo.utils import timeutils
+
+from nova.cells import rpcapi as cells_rpcapi
+from nova.compute import flavors
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import notifications
+from nova import objects
+from nova.objects import instance
+from nova.objects import instance_info_cache
+from nova.objects import instance_numa_topology
+from nova.objects import pci_device
+from nova.objects import security_group
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_instance_numa_topology
+from nova.tests.unit.objects import test_instance_pci_requests
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_security_group
+from nova import utils
+
+
+class _TestInstanceObject(object):
+ @property
+ def fake_instance(self):
+ fake_instance = fakes.stub_instance(id=2,
+ access_ipv4='1.2.3.4',
+ access_ipv6='::1')
+ fake_instance['cell_name'] = 'api!child'
+ fake_instance['scheduled_at'] = None
+ fake_instance['terminated_at'] = None
+ fake_instance['deleted_at'] = None
+ fake_instance['created_at'] = None
+ fake_instance['updated_at'] = None
+ fake_instance['launched_at'] = (
+ fake_instance['launched_at'].replace(
+ tzinfo=iso8601.iso8601.Utc(), microsecond=0))
+ fake_instance['deleted'] = False
+ fake_instance['info_cache']['instance_uuid'] = fake_instance['uuid']
+ fake_instance['security_groups'] = []
+ fake_instance['pci_devices'] = []
+ fake_instance['user_id'] = self.context.user_id
+ fake_instance['project_id'] = self.context.project_id
+ return fake_instance
+
+ def test_datetime_deserialization(self):
+ red_letter_date = timeutils.parse_isotime(
+ timeutils.isotime(datetime.datetime(1955, 11, 5)))
+ inst = instance.Instance(uuid='fake-uuid', launched_at=red_letter_date)
+ primitive = inst.obj_to_primitive()
+ expected = {'nova_object.name': 'Instance',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.16',
+ 'nova_object.data':
+ {'uuid': 'fake-uuid',
+ 'launched_at': '1955-11-05T00:00:00Z'},
+ 'nova_object.changes': ['launched_at', 'uuid']}
+ self.assertEqual(primitive, expected)
+ inst2 = instance.Instance.obj_from_primitive(primitive)
+ self.assertIsInstance(inst2.launched_at, datetime.datetime)
+ self.assertEqual(inst2.launched_at, red_letter_date)
+
+ def test_ip_deserialization(self):
+ inst = instance.Instance(uuid='fake-uuid', access_ip_v4='1.2.3.4',
+ access_ip_v6='::1')
+ primitive = inst.obj_to_primitive()
+ expected = {'nova_object.name': 'Instance',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.16',
+ 'nova_object.data':
+ {'uuid': 'fake-uuid',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '::1'},
+ 'nova_object.changes': ['uuid', 'access_ip_v6',
+ 'access_ip_v4']}
+ self.assertEqual(primitive, expected)
+ inst2 = instance.Instance.obj_from_primitive(primitive)
+ self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
+ self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
+ self.assertEqual(inst2.access_ip_v4, netaddr.IPAddress('1.2.3.4'))
+ self.assertEqual(inst2.access_ip_v6, netaddr.IPAddress('::1'))
+
+ def test_get_without_expected(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, 'uuid',
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, 'uuid',
+ expected_attrs=[])
+ for attr in instance.INSTANCE_OPTIONAL_ATTRS:
+ self.assertFalse(inst.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ def test_get_with_expected(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ self.mox.StubOutWithMock(
+ db, 'instance_extra_get_by_instance_uuid')
+
+ exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
+ exp_cols.remove('fault')
+ exp_cols.remove('numa_topology')
+ exp_cols.remove('pci_requests')
+
+ db.instance_get_by_uuid(
+ self.context, 'uuid',
+ columns_to_join=exp_cols,
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ fake_faults = test_instance_fault.fake_faults
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [self.fake_instance['uuid']]
+ ).AndReturn(fake_faults)
+ fake_topology = test_instance_numa_topology.fake_db_topology
+ db.instance_extra_get_by_instance_uuid(
+ self.context, self.fake_instance['uuid'],
+ columns=['numa_topology']
+ ).AndReturn(fake_topology)
+ fake_requests = test_instance_pci_requests.fake_pci_requests
+ db.instance_extra_get_by_instance_uuid(
+ self.context, self.fake_instance['uuid'],
+ columns=['pci_requests']
+ ).AndReturn(fake_requests)
+
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(
+ self.context, 'uuid',
+ expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
+ for attr in instance.INSTANCE_OPTIONAL_ATTRS:
+ self.assertTrue(inst.obj_attr_is_set(attr))
+ self.assertRemotes()
+
+ def test_get_by_id(self):
+ self.mox.StubOutWithMock(db, 'instance_get')
+ db.instance_get(self.context, 'instid',
+ columns_to_join=['info_cache',
+ 'security_groups']
+ ).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_id(self.context, 'instid')
+ self.assertEqual(inst.uuid, self.fake_instance['uuid'])
+ self.assertRemotes()
+
+ def test_load(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_uuid = self.fake_instance['uuid']
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ fake_inst2 = dict(self.fake_instance,
+ system_metadata=[{'key': 'foo', 'value': 'bar'}])
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['system_metadata'],
+ use_slave=False
+ ).AndReturn(fake_inst2)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertFalse(hasattr(inst, '_system_metadata'))
+ sys_meta = inst.system_metadata
+ self.assertEqual(sys_meta, {'foo': 'bar'})
+ self.assertTrue(hasattr(inst, '_system_metadata'))
+ # Make sure we don't run load again
+ sys_meta2 = inst.system_metadata
+ self.assertEqual(sys_meta2, {'foo': 'bar'})
+ self.assertRemotes()
+
+ def test_load_invalid(self):
+ inst = instance.Instance(context=self.context, uuid='fake-uuid')
+ self.assertRaises(exception.ObjectActionError,
+ inst.obj_load_attr, 'foo')
+
+ def test_get_remote(self):
+ # isotime doesn't have microseconds and is always UTC
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_instance = self.fake_instance
+ db.instance_get_by_uuid(self.context, 'fake-uuid',
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
+ self.assertEqual(inst.id, fake_instance['id'])
+ self.assertEqual(inst.launched_at, fake_instance['launched_at'])
+ self.assertEqual(str(inst.access_ip_v4),
+ fake_instance['access_ip_v4'])
+ self.assertEqual(str(inst.access_ip_v6),
+ fake_instance['access_ip_v6'])
+ self.assertRemotes()
+
+ def test_refresh(self):
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ fake_uuid = self.fake_instance['uuid']
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(dict(self.fake_instance,
+ host='orig-host'))
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(dict(self.fake_instance,
+ host='new-host'))
+ self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
+ 'refresh')
+ instance_info_cache.InstanceInfoCache.refresh()
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(inst.host, 'orig-host')
+ inst.refresh()
+ self.assertEqual(inst.host, 'new-host')
+ self.assertRemotes()
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ def test_refresh_does_not_recurse(self):
+ inst = instance.Instance(context=self.context, uuid='fake-uuid',
+ metadata={})
+ inst_copy = instance.Instance()
+ inst_copy.uuid = inst.uuid
+ self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
+ instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
+ expected_attrs=['metadata'],
+ use_slave=False
+ ).AndReturn(inst_copy)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.OrphanedObjectError, inst.refresh)
+
+ def _save_test_helper(self, cell_type, save_kwargs):
+ """Common code for testing save() for cells/non-cells."""
+ if cell_type:
+ self.flags(enable=True, cell_type=cell_type, group='cells')
+ else:
+ self.flags(enable=False, group='cells')
+
+ old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
+ vm_state='old', task_state='old')
+ fake_uuid = old_ref['uuid']
+
+ expected_updates = dict(vm_state='meow', task_state='wuff',
+ user_data='new')
+
+ new_ref = dict(old_ref, host='newhost', **expected_updates)
+ exp_vm_state = save_kwargs.get('expected_vm_state')
+ exp_task_state = save_kwargs.get('expected_task_state')
+ admin_reset = save_kwargs.get('admin_state_reset', False)
+ if exp_vm_state:
+ expected_updates['expected_vm_state'] = exp_vm_state
+ if exp_task_state:
+ if (exp_task_state == 'image_snapshot' and
+ 'instance_version' in save_kwargs and
+ save_kwargs['instance_version'] == '1.9'):
+ expected_updates['expected_task_state'] = [
+ 'image_snapshot', 'image_snapshot_pending']
+ else:
+ expected_updates['expected_task_state'] = exp_task_state
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
+ self.mox.StubOutWithMock(cells_api_mock,
+ 'instance_update_at_top')
+ self.mox.StubOutWithMock(cells_api_mock,
+ 'instance_update_from_api')
+ self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(old_ref)
+ db.instance_update_and_get_original(
+ self.context, fake_uuid, expected_updates,
+ update_cells=False,
+ columns_to_join=['info_cache', 'security_groups',
+ 'system_metadata']
+ ).AndReturn((old_ref, new_ref))
+ if cell_type == 'api':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
+ cells_api_mock.instance_update_from_api(
+ self.context, mox.IsA(instance.Instance),
+ exp_vm_state, exp_task_state, admin_reset)
+ elif cell_type == 'compute':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
+ cells_api_mock.instance_update_at_top(self.context, new_ref)
+ notifications.send_update(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'])
+ if 'instance_version' in save_kwargs:
+ inst.VERSION = save_kwargs.pop('instance_version')
+ self.assertEqual('old', inst.task_state)
+ self.assertEqual('old', inst.vm_state)
+ self.assertEqual('old', inst.user_data)
+ inst.vm_state = 'meow'
+ inst.task_state = 'wuff'
+ inst.user_data = 'new'
+ inst.save(**save_kwargs)
+ self.assertEqual('newhost', inst.host)
+ self.assertEqual('meow', inst.vm_state)
+ self.assertEqual('wuff', inst.task_state)
+ self.assertEqual('new', inst.user_data)
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ def test_save(self):
+ self._save_test_helper(None, {})
+
+ def test_save_in_api_cell(self):
+ self._save_test_helper('api', {})
+
+ def test_save_in_compute_cell(self):
+ self._save_test_helper('compute', {})
+
+ def test_save_exp_vm_state(self):
+ self._save_test_helper(None, {'expected_vm_state': ['meow']})
+
+ def test_save_exp_task_state(self):
+ self._save_test_helper(None, {'expected_task_state': ['meow']})
+
+ def test_save_exp_task_state_havana(self):
+ self._save_test_helper(None, {
+ 'expected_task_state': 'image_snapshot',
+ 'instance_version': '1.9'})
+
+ def test_save_exp_vm_state_api_cell(self):
+ self._save_test_helper('api', {'expected_vm_state': ['meow']})
+
+ def test_save_exp_task_state_api_cell(self):
+ self._save_test_helper('api', {'expected_task_state': ['meow']})
+
+ def test_save_exp_task_state_api_cell_admin_reset(self):
+ self._save_test_helper('api', {'admin_state_reset': True})
+
+ def test_save_rename_sends_notification(self):
+ # Tests that simply changing the 'display_name' on the instance
+ # will send a notification.
+ self.flags(enable=False, group='cells')
+ old_ref = dict(self.fake_instance, display_name='hello')
+ fake_uuid = old_ref['uuid']
+ expected_updates = dict(display_name='goodbye')
+ new_ref = dict(old_ref, **expected_updates)
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(old_ref)
+ db.instance_update_and_get_original(
+ self.context, fake_uuid, expected_updates, update_cells=False,
+ columns_to_join=['info_cache', 'security_groups',
+ 'system_metadata']
+ ).AndReturn((old_ref, new_ref))
+ notifications.send_update(self.context, mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
+ use_slave=False)
+ self.assertEqual('hello', inst.display_name)
+ inst.display_name = 'goodbye'
+ inst.save()
+ self.assertEqual('goodbye', inst.display_name)
+ self.assertEqual(set([]), inst.obj_what_changed())
+
+ @mock.patch('nova.db.instance_update_and_get_original')
+ @mock.patch('nova.objects.Instance._from_db_object')
+ def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
+ # NOTE(danms): This tests that we don't update the pci_devices
+ # field from the contents of the database. This is not because we
+ # don't necessarily want to, but because the way pci_devices is
+ # currently implemented it causes versioning issues. When that is
+ # resolved, this test should go away.
+ mock_update.return_value = None, None
+ inst = instance.Instance(context=self.context, id=123)
+ inst.uuid = 'foo'
+ inst.pci_devices = pci_device.PciDeviceList()
+ inst.save()
+ self.assertNotIn('pci_devices',
+ mock_fdo.call_args_list[0][1]['expected_attrs'])
+
+ def test_get_deleted(self):
+ fake_inst = dict(self.fake_instance, id=123, deleted=123)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(danms): Make sure it's actually a bool
+ self.assertEqual(inst.deleted, True)
+
+ def test_get_not_cleaned(self):
+ fake_inst = dict(self.fake_instance, id=123, cleaned=None)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(mikal): Make sure it's actually a bool
+ self.assertEqual(inst.cleaned, False)
+
+ def test_get_cleaned(self):
+ fake_inst = dict(self.fake_instance, id=123, cleaned=1)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ # NOTE(mikal): Make sure it's actually a bool
+ self.assertEqual(inst.cleaned, True)
+
+ def test_with_info_cache(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
+ nwinfo1_json = nwinfo1.json()
+ nwinfo2_json = nwinfo2.json()
+ fake_inst['info_cache'] = dict(
+ test_instance_info_cache.fake_info_cache,
+ network_info=nwinfo1_json,
+ instance_uuid=fake_uuid)
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ db.instance_info_cache_update(self.context, fake_uuid,
+ {'network_info': nwinfo2_json})
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(inst.info_cache.network_info, nwinfo1)
+ self.assertEqual(inst.info_cache.instance_uuid, fake_uuid)
+ inst.info_cache.network_info = nwinfo2
+ inst.save()
+
+ def test_with_info_cache_none(self):
+ fake_inst = dict(self.fake_instance, info_cache=None)
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['info_cache'])
+ self.assertIsNone(inst.info_cache)
+
+ def test_with_security_groups(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_inst['security_groups'] = [
+ {'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
+ 'user_id': 'fake-user', 'project_id': 'fake_project',
+ 'created_at': None, 'updated_at': None, 'deleted_at': None,
+ 'deleted': False},
+ {'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
+ 'user_id': 'fake-user', 'project_id': 'fake_project',
+ 'created_at': None, 'updated_at': None, 'deleted_at': None,
+ 'deleted': False},
+ ]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ db.security_group_update(self.context, 1, {'description': 'changed'}
+ ).AndReturn(fake_inst['security_groups'][0])
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(len(inst.security_groups), 2)
+ for index, group in enumerate(fake_inst['security_groups']):
+ for key in group:
+ self.assertEqual(group[key],
+ inst.security_groups[index][key])
+ self.assertIsInstance(inst.security_groups[index],
+ security_group.SecurityGroup)
+ self.assertEqual(inst.security_groups.obj_what_changed(), set())
+ inst.security_groups[0].description = 'changed'
+ inst.save()
+ self.assertEqual(inst.security_groups.obj_what_changed(), set())
+
+ def test_with_empty_security_groups(self):
+ fake_inst = dict(self.fake_instance, security_groups=[])
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
+ self.assertEqual(0, len(inst.security_groups))
+
+ def test_with_empty_pci_devices(self):
+ fake_inst = dict(self.fake_instance, pci_devices=[])
+ fake_uuid = fake_inst['uuid']
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['pci_devices'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['pci_devices'])
+ self.assertEqual(len(inst.pci_devices), 0)
+
+ def test_with_pci_devices(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_inst['pci_devices'] = [
+ {'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': 'a1',
+ 'vendor_id': 'v1',
+ 'product_id': 'p1',
+ 'dev_type': 't',
+ 'status': 'allocated',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': fake_uuid,
+ 'request_id': None,
+ 'extra_info': '{}'},
+ {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'allocated',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': fake_uuid,
+ 'request_id': None,
+ 'extra_info': '{}'},
+ ]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=['pci_devices'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ ['pci_devices'])
+ self.assertEqual(len(inst.pci_devices), 2)
+ self.assertEqual(inst.pci_devices[0].instance_uuid, fake_uuid)
+ self.assertEqual(inst.pci_devices[1].instance_uuid, fake_uuid)
+
+ def test_with_fault(self):
+ fake_inst = dict(self.fake_instance)
+ fake_uuid = fake_inst['uuid']
+ fake_faults = [dict(x, instance_uuid=fake_uuid)
+ for x in test_instance_fault.fake_faults['fake-uuid']]
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_get_by_uuid(self.context, fake_uuid,
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(self.fake_instance)
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
+ self.mox.ReplayAll()
+ inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
+ expected_attrs=['fault'])
+ self.assertEqual(fake_faults[0], dict(inst.fault.items()))
+ self.assertRemotes()
+
+ def test_iteritems_with_extra_attrs(self):
+ self.stubs.Set(instance.Instance, 'name', 'foo')
+ inst = instance.Instance(uuid='fake-uuid')
+ self.assertEqual(inst.items(),
+ {'uuid': 'fake-uuid',
+ 'name': 'foo',
+ }.items())
+
+ def _test_metadata_change_tracking(self, which):
+ inst = instance.Instance(uuid='fake-uuid')
+ setattr(inst, which, {})
+ inst.obj_reset_changes()
+ getattr(inst, which)['foo'] = 'bar'
+ self.assertEqual(set([which]), inst.obj_what_changed())
+ inst.obj_reset_changes()
+ self.assertEqual(set(), inst.obj_what_changed())
+
+ def test_metadata_change_tracking(self):
+ self._test_metadata_change_tracking('metadata')
+
+ def test_system_metadata_change_tracking(self):
+ self._test_metadata_change_tracking('system_metadata')
+
+ def test_create_stubbed(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ vals = {'host': 'foo-host',
+ 'memory_mb': 128,
+ 'system_metadata': {'foo': 'bar'}}
+ fake_inst = fake_instance.fake_db_instance(**vals)
+ db.instance_create(self.context, vals).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance(host='foo-host', memory_mb=128,
+ system_metadata={'foo': 'bar'})
+ inst.create(self.context)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ db.instance_create(self.context, {}).AndReturn(self.fake_instance)
+ self.mox.ReplayAll()
+ inst = instance.Instance()
+ inst.create(self.context)
+ self.assertEqual(self.fake_instance['id'], inst.id)
+
+ def test_create_with_values(self):
+ inst1 = instance.Instance(user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ host='foo-host')
+ inst1.create(self.context)
+ self.assertEqual(inst1.host, 'foo-host')
+ inst2 = instance.Instance.get_by_uuid(self.context, inst1.uuid)
+ self.assertEqual(inst2.host, 'foo-host')
+
+ def test_create_with_numa_topology(self):
+ inst = instance.Instance(uuid=self.fake_instance['uuid'],
+ numa_topology=instance_numa_topology.InstanceNUMATopology
+ .obj_from_topology(
+ test_instance_numa_topology.fake_numa_topology))
+
+ inst.create(self.context)
+ self.assertIsNotNone(inst.numa_topology)
+ got_numa_topo = (
+ instance_numa_topology.InstanceNUMATopology
+ .get_by_instance_uuid(self.context, inst.uuid))
+ self.assertEqual(inst.numa_topology.id, got_numa_topo.id)
+
+ def test_recreate_fails(self):
+ inst = instance.Instance(user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ host='foo-host')
+ inst.create(self.context)
+ self.assertRaises(exception.ObjectActionError, inst.create,
+ self.context)
+
+ def test_create_with_special_things(self):
+ self.mox.StubOutWithMock(db, 'instance_create')
+ fake_inst = fake_instance.fake_db_instance()
+ db.instance_create(self.context,
+ {'host': 'foo-host',
+ 'security_groups': ['foo', 'bar'],
+ 'info_cache': {'network_info': '[]'},
+ }
+ ).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ secgroups = security_group.SecurityGroupList()
+ secgroups.objects = []
+ for name in ('foo', 'bar'):
+ secgroup = security_group.SecurityGroup()
+ secgroup.name = name
+ secgroups.objects.append(secgroup)
+ info_cache = instance_info_cache.InstanceInfoCache()
+ info_cache.network_info = network_model.NetworkInfo()
+ inst = instance.Instance(host='foo-host', security_groups=secgroups,
+ info_cache=info_cache)
+ inst.create(self.context)
+
+ def test_destroy_stubbed(self):
+ self.mox.StubOutWithMock(db, 'instance_destroy')
+ deleted_at = datetime.datetime(1955, 11, 6)
+ fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
+ deleted=True)
+ db.instance_destroy(self.context, 'fake-uuid',
+ constraint=None).AndReturn(fake_inst)
+ self.mox.ReplayAll()
+ inst = instance.Instance(id=1, uuid='fake-uuid', host='foo')
+ inst.destroy(self.context)
+ self.assertEqual(timeutils.normalize_time(inst.deleted_at),
+ timeutils.normalize_time(deleted_at))
+ self.assertTrue(inst.deleted)
+
+ def test_destroy(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance(id=db_inst['id'], uuid=db_inst['uuid'])
+ inst.destroy(self.context)
+ self.assertRaises(exception.InstanceNotFound,
+ db.instance_get_by_uuid, self.context,
+ db_inst['uuid'])
+
+ def test_destroy_host_constraint(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'host': 'foo'}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ inst.host = None
+ self.assertRaises(exception.ObjectActionError,
+ inst.destroy)
+
+ def test_name_does_not_trigger_lazy_loads(self):
+ values = {'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'host': 'foo'}
+ db_inst = db.instance_create(self.context, values)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ self.assertFalse(inst.obj_attr_is_set('fault'))
+ self.flags(instance_name_template='foo-%(uuid)s')
+ self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
+ self.assertFalse(inst.obj_attr_is_set('fault'))
+
+ def test_from_db_object_not_overwrite_info_cache(self):
+ info_cache = instance_info_cache.InstanceInfoCache()
+ inst = instance.Instance(context=self.context,
+ info_cache=info_cache)
+ db_inst = fake_instance.fake_db_instance()
+ db_inst['info_cache'] = dict(
+ test_instance_info_cache.fake_info_cache)
+ inst._from_db_object(self.context, inst, db_inst,
+ expected_attrs=['info_cache'])
+ self.assertIs(info_cache, inst.info_cache)
+
+ def test_compat_strings(self):
+ unicode_attributes = ['user_id', 'project_id', 'image_ref',
+ 'kernel_id', 'ramdisk_id', 'hostname',
+ 'key_name', 'key_data', 'host', 'node',
+ 'user_data', 'availability_zone',
+ 'display_name', 'display_description',
+ 'launched_on', 'locked_by', 'os_type',
+ 'architecture', 'vm_mode', 'root_device_name',
+ 'default_ephemeral_device',
+ 'default_swap_device', 'config_drive',
+ 'cell_name']
+ inst = instance.Instance()
+ expected = {}
+ for key in unicode_attributes:
+ inst[key] = u'\u2603'
+ expected[key] = '?'
+ primitive = inst.obj_to_primitive(target_version='1.6')
+ self.assertEqual(expected, primitive['nova_object.data'])
+ self.assertEqual('1.6', primitive['nova_object.version'])
+
+ def test_compat_pci_devices(self):
+ inst = instance.Instance()
+ inst.pci_devices = pci_device.PciDeviceList()
+ primitive = inst.obj_to_primitive(target_version='1.5')
+ self.assertNotIn('pci_devices', primitive)
+
+ def test_compat_info_cache(self):
+ inst = instance.Instance()
+ inst.info_cache = instance_info_cache.InstanceInfoCache()
+ primitive = inst.obj_to_primitive(target_version='1.9')
+ self.assertEqual(
+ '1.4',
+ primitive['nova_object.data']['info_cache']['nova_object.version'])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_with_pci_requests(self, mock_get):
+ mock_get.return_value = objects.InstancePCIRequests()
+ db_instance = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id})
+ instance = objects.Instance.get_by_uuid(
+ self.context, db_instance['uuid'],
+ expected_attrs=['pci_requests'])
+ self.assertTrue(instance.obj_attr_is_set('pci_requests'))
+ self.assertIsNotNone(instance.pci_requests)
+
+ def _test_get_flavor(self, namespace):
+ prefix = '%s_' % namespace if namespace is not None else ''
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'system_metadata': flavors.save_flavor_info(
+ {}, flavors.get_default_flavor(), prefix)})
+ db_flavor = flavors.extract_flavor(db_inst, prefix)
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ flavor = inst.get_flavor(namespace)
+ self.assertEqual(db_flavor['flavorid'], flavor.flavorid)
+
+ def test_get_flavor(self):
+ self._test_get_flavor(None)
+ self._test_get_flavor('foo')
+
+ def _test_set_flavor(self, namespace):
+ prefix = '%s_' % namespace if namespace is not None else ''
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ })
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ db_flavor = flavors.get_default_flavor()
+ inst.set_flavor(db_flavor, namespace)
+ db_inst = db.instance_get(self.context, db_inst['id'])
+ self.assertEqual(
+ db_flavor['flavorid'], flavors.extract_flavor(
+ db_inst, prefix)['flavorid'])
+
+ def test_set_flavor(self):
+ self._test_set_flavor(None)
+ self._test_set_flavor('foo')
+
+ def test_delete_flavor(self):
+ namespace = 'foo'
+ prefix = '%s_' % namespace
+ db_inst = db.instance_create(self.context, {
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'system_metadata': flavors.save_flavor_info(
+ {}, flavors.get_default_flavor(), prefix)})
+ inst = instance.Instance.get_by_uuid(self.context, db_inst['uuid'])
+ inst.delete_flavor(namespace)
+ db_inst = db.instance_get(self.context, db_inst['id'])
+ self.assertEqual({}, utils.instance_sys_meta(db_inst))
+
+ def test_delete_flavor_no_namespace_fails(self):
+ inst = instance.Instance(system_metadata={})
+ self.assertRaises(KeyError, inst.delete_flavor, None)
+ self.assertRaises(KeyError, inst.delete_flavor, '')
+
+ @mock.patch.object(db, 'instance_metadata_delete')
+ def test_delete_metadata_key(self, db_delete):
+ inst = instance.Instance(context=self.context,
+ id=1, uuid='fake-uuid')
+ inst.metadata = {'foo': '1', 'bar': '2'}
+ inst.obj_reset_changes()
+ inst.delete_metadata_key('foo')
+ self.assertEqual({'bar': '2'}, inst.metadata)
+ self.assertEqual({}, inst.obj_get_changes())
+ db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
+
+ def test_reset_changes(self):
+ inst = instance.Instance()
+ inst.metadata = {'1985': 'present'}
+ inst.system_metadata = {'1955': 'past'}
+ self.assertEqual({}, inst._orig_metadata)
+ inst.obj_reset_changes(['metadata'])
+ self.assertEqual({'1985': 'present'}, inst._orig_metadata)
+ self.assertEqual({}, inst._orig_system_metadata)
+
+ def test_load_generic_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_generic') as mock_load:
+ def fake_load(name):
+ inst.system_metadata = {}
+
+ mock_load.side_effect = fake_load
+ inst.system_metadata
+ mock_load.assert_called_once_with('system_metadata')
+
+ def test_load_fault_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_fault') as mock_load:
+ def fake_load():
+ inst.fault = None
+
+ mock_load.side_effect = fake_load
+ inst.fault
+ mock_load.assert_called_once_with()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_load_generic(self, mock_get):
+ inst2 = instance.Instance(metadata={'foo': 'bar'})
+ mock_get.return_value = inst2
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ inst.metadata
+ self.assertEqual({'foo': 'bar'}, inst.metadata)
+ mock_get.assert_called_once_with(self.context,
+ uuid='fake-uuid',
+ expected_attrs=['metadata'])
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
+ @mock.patch('nova.db.instance_fault_get_by_instance_uuids')
+ def test_load_fault(self, mock_get):
+ fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
+ mock_get.return_value = {'fake': [fake_fault]}
+ inst = instance.Instance(context=self.context, uuid='fake')
+ fault = inst.fault
+ mock_get.assert_called_once_with(self.context, ['fake'])
+ self.assertEqual(fake_fault['id'], fault.id)
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
+
+class TestInstanceObject(test_objects._LocalTest,
+ _TestInstanceObject):
+ pass
+
+
+class TestRemoteInstanceObject(test_objects._RemoteTest,
+ _TestInstanceObject):
+ pass
+
+
+class _TestInstanceListObject(object):
+ def fake_instance(self, id, updates=None):
+ fake_instance = fakes.stub_instance(id=2,
+ access_ipv4='1.2.3.4',
+ access_ipv6='::1')
+ fake_instance['scheduled_at'] = None
+ fake_instance['terminated_at'] = None
+ fake_instance['deleted_at'] = None
+ fake_instance['created_at'] = None
+ fake_instance['updated_at'] = None
+ fake_instance['launched_at'] = (
+ fake_instance['launched_at'].replace(
+ tzinfo=iso8601.iso8601.Utc(), microsecond=0))
+ fake_instance['info_cache'] = {'network_info': '[]',
+ 'instance_uuid': fake_instance['uuid']}
+ fake_instance['security_groups'] = []
+ fake_instance['deleted'] = 0
+ if updates:
+ fake_instance.update(updates)
+ return fake_instance
+
+ def test_get_all_by_filters(self):
+ fakes = [self.fake_instance(1), self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
+ 'asc', limit=None, marker=None,
+ columns_to_join=['metadata'],
+ use_slave=False).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_filters(
+ self.context, {'foo': 'bar'}, 'uuid', 'asc',
+ expected_attrs=['metadata'], use_slave=False)
+
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_all_by_filters_works_for_cleaned(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2, updates={'deleted': 2,
+ 'cleaned': None})]
+ self.context.read_deleted = 'yes'
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
+ db.instance_get_all_by_filters(self.context,
+ {'deleted': True, 'cleaned': False},
+ 'uuid', 'asc', limit=None, marker=None,
+ columns_to_join=['metadata'],
+ use_slave=False).AndReturn(
+ [fakes[1]])
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_filters(
+ self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
+ expected_attrs=['metadata'], use_slave=False)
+
+ self.assertEqual(1, len(inst_list))
+ self.assertIsInstance(inst_list.objects[0], instance.Instance)
+ self.assertEqual(inst_list.objects[0].uuid, fakes[1]['uuid'])
+ self.assertRemotes()
+
+ def test_get_by_host(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ db.instance_get_all_by_host(self.context, 'foo',
+ columns_to_join=None,
+ use_slave=False).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertEqual(inst_list.objects[i]._context, self.context)
+ self.assertEqual(inst_list.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_host_and_node(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
+ db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar'
+ ).AndReturn(fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host_and_node(self.context,
+ 'foo', 'bar')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_by_host_and_not_type(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
+ db.instance_get_all_by_host_and_not_type(self.context, 'foo',
+ type_id='bar').AndReturn(
+ fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_by_host_and_not_type(
+ self.context, 'foo', 'bar')
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_hung_in_rebooting(self):
+ fakes = [self.fake_instance(1),
+ self.fake_instance(2)]
+ dt = timeutils.isotime()
+ self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
+ db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
+ fakes)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList.get_hung_in_rebooting(self.context,
+ dt)
+ for i in range(0, len(fakes)):
+ self.assertIsInstance(inst_list.objects[i], instance.Instance)
+ self.assertEqual(inst_list.objects[i].uuid, fakes[i]['uuid'])
+ self.assertRemotes()
+
+ def test_get_active_by_window_joined(self):
+ fakes = [self.fake_instance(1), self.fake_instance(2)]
+ # NOTE(mriedem): Send in a timezone-naive datetime since the
+ # InstanceList.get_active_by_window_joined method should convert it
+ # to tz-aware for the DB API call, which we'll assert with our stub.
+ dt = timeutils.utcnow()
+
+ def fake_instance_get_active_by_window_joined(context, begin, end,
+ project_id, host):
+ # make sure begin is tz-aware
+ self.assertIsNotNone(begin.utcoffset())
+ self.assertIsNone(end)
+ return fakes
+
+ with mock.patch.object(db, 'instance_get_active_by_window_joined',
+ fake_instance_get_active_by_window_joined):
+ inst_list = instance.InstanceList.get_active_by_window_joined(
+ self.context, dt)
+
+ for fake, obj in zip(fakes, inst_list.objects):
+ self.assertIsInstance(obj, instance.Instance)
+ self.assertEqual(obj.uuid, fake['uuid'])
+ self.assertRemotes()
+
+ def test_with_fault(self):
+ fake_insts = [
+ fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
+ fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
+ ]
+ fake_faults = test_instance_fault.fake_faults
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_get_all_by_host(self.context, 'host',
+ columns_to_join=[],
+ use_slave=False
+ ).AndReturn(fake_insts)
+ db.instance_fault_get_by_instance_uuids(
+ self.context, [x['uuid'] for x in fake_insts]
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ instances = instance.InstanceList.get_by_host(self.context, 'host',
+ expected_attrs=['fault'],
+ use_slave=False)
+ self.assertEqual(2, len(instances))
+ self.assertEqual(fake_faults['fake-uuid'][0],
+ dict(instances[0].fault.iteritems()))
+ self.assertIsNone(instances[1].fault)
+
+ def test_fill_faults(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+
+ inst1 = instance.Instance(uuid='uuid1')
+ inst2 = instance.Instance(uuid='uuid2')
+ insts = [inst1, inst2]
+ for inst in insts:
+ inst.obj_reset_changes()
+ db_faults = {
+ 'uuid1': [{'id': 123,
+ 'instance_uuid': 'uuid1',
+ 'code': 456,
+ 'message': 'Fake message',
+ 'details': 'No details',
+ 'host': 'foo',
+ 'deleted': False,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'created_at': None,
+ }
+ ]}
+
+ db.instance_fault_get_by_instance_uuids(self.context,
+ [x.uuid for x in insts],
+ ).AndReturn(db_faults)
+ self.mox.ReplayAll()
+ inst_list = instance.InstanceList()
+ inst_list._context = self.context
+ inst_list.objects = insts
+ faulty = inst_list.fill_faults()
+ self.assertEqual(faulty, ['uuid1'])
+ self.assertEqual(inst_list[0].fault.message,
+ db_faults['uuid1'][0]['message'])
+ self.assertIsNone(inst_list[1].fault)
+ for inst in inst_list:
+ self.assertEqual(inst.obj_what_changed(), set())
+
+ def test_get_by_security_group(self):
+ fake_secgroup = dict(test_security_group.fake_secgroup)
+ fake_secgroup['instances'] = [
+ fake_instance.fake_db_instance(id=1,
+ system_metadata={'foo': 'bar'}),
+ fake_instance.fake_db_instance(id=2),
+ ]
+
+ with mock.patch.object(db, 'security_group_get') as sgg:
+ sgg.return_value = fake_secgroup
+ secgroup = security_group.SecurityGroup()
+ secgroup.id = fake_secgroup['id']
+ instances = instance.InstanceList.get_by_security_group(
+ self.context, secgroup)
+
+ self.assertEqual(2, len(instances))
+ self.assertEqual([1, 2], [x.id for x in instances])
+ self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
+ self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
+
+
+class TestInstanceListObject(test_objects._LocalTest,
+ _TestInstanceListObject):
+ pass
+
+
+class TestRemoteInstanceListObject(test_objects._RemoteTest,
+ _TestInstanceListObject):
+ pass
+
+
+class TestInstanceObjectMisc(test.NoDBTestCase):
+ def test_expected_cols(self):
+ self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
+ self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
+ self.assertIsNone(instance._expected_cols(None))
diff --git a/nova/tests/unit/objects/test_instance_action.py b/nova/tests/unit/objects/test_instance_action.py
new file mode 100644
index 0000000000..488ba6fa2a
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_action.py
@@ -0,0 +1,365 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import traceback
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova.objects import instance_action
+from nova import test
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_action = {
+ 'created_at': NOW,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'action': 'fake-action',
+ 'instance_uuid': 'fake-uuid',
+ 'request_id': 'fake-request',
+ 'user_id': 'fake-user',
+ 'project_id': 'fake-project',
+ 'start_time': NOW,
+ 'finish_time': None,
+ 'message': 'foo',
+}
+fake_event = {
+ 'created_at': NOW,
+ 'deleted_at': None,
+ 'updated_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'event': 'fake-event',
+ 'action_id': 123,
+ 'start_time': NOW,
+ 'finish_time': None,
+ 'result': 'fake-result',
+ 'traceback': 'fake-tb',
+}
+
+
+class _TestInstanceActionObject(object):
+ @mock.patch.object(db, 'action_get_by_request_id')
+ def test_get_by_request_id(self, mock_get):
+ context = self.context
+ mock_get.return_value = fake_action
+ action = instance_action.InstanceAction.get_by_request_id(
+ context, 'fake-uuid', 'fake-request')
+ self.compare_obj(action, fake_action)
+ mock_get.assert_called_once_with(context,
+ 'fake-uuid', 'fake-request')
+
+ def test_pack_action_start(self):
+ values = instance_action.InstanceAction.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ self.assertEqual(values['request_id'], self.context.request_id)
+ self.assertEqual(values['user_id'], self.context.user_id)
+ self.assertEqual(values['project_id'], self.context.project_id)
+ self.assertEqual(values['instance_uuid'], 'fake-uuid')
+ self.assertEqual(values['action'], 'fake-action')
+ self.assertEqual(values['start_time'].replace(tzinfo=None),
+ self.context.timestamp)
+
+ def test_pack_action_finish(self):
+ timeutils.set_time_override(override_time=NOW)
+ values = instance_action.InstanceAction.pack_action_finish(
+ self.context, 'fake-uuid')
+ self.assertEqual(values['request_id'], self.context.request_id)
+ self.assertEqual(values['instance_uuid'], 'fake-uuid')
+ self.assertEqual(values['finish_time'].replace(tzinfo=None), NOW)
+
+ @mock.patch.object(db, 'action_start')
+ def test_action_start(self, mock_start):
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ mock_start.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action', want_result=True)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'action_start')
+ def test_action_start_no_result(self, mock_start):
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ mock_start.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action', want_result=False)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(action)
+
+ @mock.patch.object(db, 'action_finish')
+ def test_action_finish(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_finish(
+ self.context, 'fake-uuid')
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_finish(
+ self.context, 'fake-uuid', want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'action_finish')
+ def test_action_finish_no_result(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceAction
+ expected_packed_values = test_class.pack_action_finish(
+ self.context, 'fake-uuid')
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_finish(
+ self.context, 'fake-uuid', want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(action)
+
+ @mock.patch.object(db, 'action_finish')
+ @mock.patch.object(db, 'action_start')
+ def test_finish(self, mock_start, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ expected_packed_action_start = {
+ 'request_id': self.context.request_id,
+ 'user_id': self.context.user_id,
+ 'project_id': self.context.project_id,
+ 'instance_uuid': 'fake-uuid',
+ 'action': 'fake-action',
+ 'start_time': self.context.timestamp,
+ }
+ expected_packed_action_finish = {
+ 'request_id': self.context.request_id,
+ 'instance_uuid': 'fake-uuid',
+ 'finish_time': NOW,
+ }
+ mock_start.return_value = fake_action
+ mock_finish.return_value = fake_action
+ action = instance_action.InstanceAction.action_start(
+ self.context, 'fake-uuid', 'fake-action')
+ action.finish(self.context)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_action_start)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_action_finish)
+ self.compare_obj(action, fake_action)
+
+ @mock.patch.object(db, 'actions_get')
+ def test_get_list(self, mock_get):
+ fake_actions = [dict(fake_action, id=1234),
+ dict(fake_action, id=5678)]
+ mock_get.return_value = fake_actions
+ obj_list = instance_action.InstanceActionList.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ for index, action in enumerate(obj_list):
+ self.compare_obj(action, fake_actions[index])
+ mock_get.assert_called_once_with(self.context, 'fake-uuid')
+
+
+class TestInstanceActionObject(test_objects._LocalTest,
+ _TestInstanceActionObject):
+ pass
+
+
+class TestRemoteInstanceActionObject(test_objects._RemoteTest,
+ _TestInstanceActionObject):
+ pass
+
+
+class _TestInstanceActionEventObject(object):
+ @mock.patch.object(db, 'action_event_get_by_id')
+ def test_get_by_id(self, mock_get):
+ mock_get.return_value = fake_event
+ event = instance_action.InstanceActionEvent.get_by_id(
+ self.context, 'fake-action-id', 'fake-event-id')
+ self.compare_obj(event, fake_event)
+ mock_get.assert_called_once_with(self.context,
+ 'fake-action-id', 'fake-event-id')
+
+ @mock.patch.object(db, 'action_event_start')
+ def test_event_start(self, mock_start):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_start(
+ self.context, 'fake-uuid', 'fake-event')
+ mock_start.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_start(
+ self.context, 'fake-uuid', 'fake-event', want_result=True)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(db, 'action_event_start')
+ def test_event_start_no_result(self, mock_start):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_start(
+ self.context, 'fake-uuid', 'fake-event')
+ mock_start.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_start(
+ self.context, 'fake-uuid', 'fake-event', want_result=False)
+ mock_start.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+ mock_finish.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_finish(
+ self.context, 'fake-uuid', 'fake-event', want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_no_result(self, mock_finish):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+ mock_finish.return_value = fake_event
+ event = instance_action.InstanceActionEvent.event_finish(
+ self.context, 'fake-uuid', 'fake-event', want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure(self, mock_finish, mock_tb):
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
+ want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_legacy(self, mock_finish, mock_tb):
+ # Tests that exc_tb is serialized when it's not a string type.
+ mock_tb.return_value = 'fake-tb'
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ fake_tb = mock.sentinel.fake_tb
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', exc_val='val',
+ exc_tb=fake_tb, want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+ mock_tb.assert_called_once_with(fake_tb)
+
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_legacy_unicode(self, mock_finish):
+ # Tests that traceback.format_tb is not called when exc_tb is unicode.
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', unicode('fake-tb'))
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', exc_val='val',
+ exc_tb=unicode('fake-tb'), want_result=True)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.compare_obj(event, fake_event)
+
+ @mock.patch.object(traceback, 'format_tb')
+ @mock.patch.object(db, 'action_event_finish')
+ def test_event_finish_with_failure_no_result(self, mock_finish, mock_tb):
+ # Tests that traceback.format_tb is not called when exc_tb is a str
+ # and want_result is False, so no event should come back.
+ mock_tb.return_value = 'fake-tb'
+ timeutils.set_time_override(override_time=NOW)
+ test_class = instance_action.InstanceActionEvent
+ expected_packed_values = test_class.pack_action_event_finish(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb')
+ expected_packed_values['finish_time'] = timeutils.utcnow()
+
+ mock_finish.return_value = fake_event
+ event = test_class.event_finish_with_failure(
+ self.context, 'fake-uuid', 'fake-event', 'val', 'fake-tb',
+ want_result=False)
+ mock_finish.assert_called_once_with(self.context,
+ expected_packed_values)
+ self.assertIsNone(event)
+ self.assertFalse(mock_tb.called)
+
+ @mock.patch.object(db, 'action_events_get')
+ def test_get_by_action(self, mock_get):
+ fake_events = [dict(fake_event, id=1234),
+ dict(fake_event, id=5678)]
+ mock_get.return_value = fake_events
+ obj_list = instance_action.InstanceActionEventList.get_by_action(
+ self.context, 'fake-action-id')
+ for index, event in enumerate(obj_list):
+ self.compare_obj(event, fake_events[index])
+ mock_get.assert_called_once_with(self.context, 'fake-action-id')
+
+ @mock.patch('nova.objects.instance_action.InstanceActionEvent.'
+ 'pack_action_event_finish')
+ @mock.patch('traceback.format_tb')
+ def test_event_finish_with_failure_serialized(self, mock_format,
+ mock_pack):
+ mock_format.return_value = 'traceback'
+ mock_pack.side_effect = test.TestingException
+ self.assertRaises(
+ test.TestingException,
+ instance_action.InstanceActionEvent.event_finish_with_failure,
+ self.context, 'fake-uuid', 'fake-event',
+ exc_val=mock.sentinel.exc_val,
+ exc_tb=mock.sentinel.exc_tb)
+ mock_pack.assert_called_once_with(self.context, 'fake-uuid',
+ 'fake-event',
+ exc_val=str(mock.sentinel.exc_val),
+ exc_tb='traceback')
+ mock_format.assert_called_once_with(mock.sentinel.exc_tb)
+
+
+class TestInstanceActionEventObject(test_objects._LocalTest,
+ _TestInstanceActionEventObject):
+ pass
+
+
+class TestRemoteInstanceActionEventObject(test_objects._RemoteTest,
+ _TestInstanceActionEventObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_fault.py b/nova/tests/unit/objects/test_instance_fault.py
new file mode 100644
index 0000000000..97716d42d3
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_fault.py
@@ -0,0 +1,126 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova.objects import instance_fault
+from nova.tests.unit.objects import test_objects
+
+
+fake_faults = {
+ 'fake-uuid': [
+ {'id': 1, 'instance_uuid': 'fake-uuid', 'code': 123, 'message': 'msg1',
+ 'details': 'details', 'host': 'host', 'deleted': False,
+ 'created_at': None, 'updated_at': None, 'deleted_at': None},
+ {'id': 2, 'instance_uuid': 'fake-uuid', 'code': 456, 'message': 'msg2',
+ 'details': 'details', 'host': 'host', 'deleted': False,
+ 'created_at': None, 'updated_at': None, 'deleted_at': None},
+ ]
+ }
+
+
+class _TestInstanceFault(object):
+ def test_get_latest_for_instance(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ fault = instance_fault.InstanceFault.get_latest_for_instance(
+ self.context, 'fake-uuid')
+ for key in fake_faults['fake-uuid'][0]:
+ self.assertEqual(fake_faults['fake-uuid'][0][key], fault[key])
+
+ def test_get_latest_for_instance_with_none(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn({})
+ self.mox.ReplayAll()
+ fault = instance_fault.InstanceFault.get_latest_for_instance(
+ self.context, 'fake-uuid')
+ self.assertIsNone(fault)
+
+ def test_get_by_instance(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn(fake_faults)
+ self.mox.ReplayAll()
+ faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
+ self.context, ['fake-uuid'])
+ for index, db_fault in enumerate(fake_faults['fake-uuid']):
+ for key in db_fault:
+ self.assertEqual(fake_faults['fake-uuid'][index][key],
+ faults[index][key])
+
+ def test_get_by_instance_with_none(self):
+ self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
+ db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
+ ).AndReturn({})
+ self.mox.ReplayAll()
+ faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
+ self.context, ['fake-uuid'])
+ self.assertEqual(0, len(faults))
+
+ @mock.patch('nova.cells.rpcapi.CellsAPI.instance_fault_create_at_top')
+ @mock.patch('nova.db.instance_fault_create')
+ def _test_create(self, update_cells, mock_create, cells_fault_create):
+ mock_create.return_value = fake_faults['fake-uuid'][1]
+ fault = instance_fault.InstanceFault()
+ fault.instance_uuid = 'fake-uuid'
+ fault.code = 456
+ fault.message = 'foo'
+ fault.details = 'you screwed up'
+ fault.host = 'myhost'
+ fault.create(self.context)
+ self.assertEqual(2, fault.id)
+ mock_create.assert_called_once_with(self.context,
+ {'instance_uuid': 'fake-uuid',
+ 'code': 456,
+ 'message': 'foo',
+ 'details': 'you screwed up',
+ 'host': 'myhost'})
+ if update_cells:
+ cells_fault_create.assert_called_once_with(
+ self.context, fake_faults['fake-uuid'][1])
+ else:
+ self.assertFalse(cells_fault_create.called)
+
+ def test_create_no_cells(self):
+ self.flags(enable=False, group='cells')
+ self._test_create(False)
+
+ def test_create_api_cell(self):
+ self.flags(cell_type='api', enable=True, group='cells')
+ self._test_create(False)
+
+ def test_create_compute_cell(self):
+ self.flags(cell_type='compute', enable=True, group='cells')
+ self._test_create(True)
+
+ def test_create_already_created(self):
+ fault = instance_fault.InstanceFault()
+ fault.id = 1
+ self.assertRaises(exception.ObjectActionError,
+ fault.create, self.context)
+
+
+class TestInstanceFault(test_objects._LocalTest,
+ _TestInstanceFault):
+ pass
+
+
+class TestInstanceFaultRemote(test_objects._RemoteTest,
+ _TestInstanceFault):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
new file mode 100644
index 0000000000..0e20f54145
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_group.py
@@ -0,0 +1,350 @@
+# Copyright (c) 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova import exception
+from nova.objects import instance_group
+from nova.tests.unit import fake_notifier
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit import utils as tests_utils
+
+
+class _TestInstanceGroupObjects(object):
+
+ def setUp(self):
+ super(_TestInstanceGroupObjects, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ members=members)
+
+ def test_get_by_uuid(self):
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ members = ['instance_id1', 'instance_id2']
+ db_result = self._create_instance_group(self.context, values,
+ policies=policies,
+ members=members)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.members, members)
+ self.assertEqual(obj_result.policies, policies)
+
+ def test_get_by_instance_uuid(self):
+ values = self._get_default_values()
+ policies = ['policy1', 'policy2']
+ members = ['instance_id1', 'instance_id2']
+ db_result = self._create_instance_group(self.context, values,
+ policies=policies,
+ members=members)
+ obj_result = instance_group.InstanceGroup.get_by_instance_uuid(
+ self.context, 'instance_id1')
+ self.assertEqual(obj_result.uuid, db_result.uuid)
+
+ def test_refresh(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.name, 'fake_name')
+ values = {'name': 'new_name', 'user_id': 'new_user',
+ 'project_id': 'new_project'}
+ db.instance_group_update(self.context, db_result['uuid'],
+ values)
+ obj_result.refresh()
+ self.assertEqual(obj_result.name, 'new_name')
+ self.assertEqual(set([]), obj_result.obj_what_changed())
+
+ def test_save_simple(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ self.assertEqual(obj_result.name, 'fake_name')
+ obj_result.name = 'new_name'
+ obj_result.save()
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['name'], 'new_name')
+
+ def test_save_policies(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ policies = ['policy1', 'policy2']
+ obj_result.policies = policies
+ obj_result.save()
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['policies'], policies)
+
+ def test_save_members(self):
+ values = self._get_default_values()
+ db_result = self._create_instance_group(self.context, values)
+ obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
+ db_result.uuid)
+ members = ['instance1', 'instance2']
+ obj_result.members = members
+ fake_notifier.NOTIFICATIONS = []
+ obj_result.save()
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.update', msg.event_type)
+ self.assertEqual(members, msg.payload['members'])
+ result = db.instance_group_get(self.context, db_result['uuid'])
+ self.assertEqual(result['members'], members)
+
+ def test_create(self):
+ group1 = instance_group.InstanceGroup()
+ group1.uuid = 'fake-uuid'
+ group1.name = 'fake-name'
+ fake_notifier.NOTIFICATIONS = []
+ group1.create(self.context)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(group1.name, msg.payload['name'])
+ self.assertEqual(group1.uuid, msg.payload['server_group_id'])
+ self.assertEqual('servergroup.create', msg.event_type)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.uuid, group2.uuid)
+ self.assertEqual(group1.name, group2.name)
+ result = db.instance_group_get(self.context, group1.uuid)
+ self.assertEqual(group1.id, result.id)
+ self.assertEqual(group1.uuid, result.uuid)
+ self.assertEqual(group1.name, result.name)
+
+ def test_create_with_policies(self):
+ group1 = instance_group.InstanceGroup()
+ group1.policies = ['policy1', 'policy2']
+ group1.create(self.context)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.policies, group2.policies)
+
+ def test_create_with_members(self):
+ group1 = instance_group.InstanceGroup()
+ group1.members = ['instance1', 'instance2']
+ group1.create(self.context)
+ group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group1.uuid)
+ self.assertEqual(group1.id, group2.id)
+ self.assertEqual(group1.members, group2.members)
+
+ def test_recreate_fails(self):
+ group = instance_group.InstanceGroup()
+ group.create(self.context)
+ self.assertRaises(exception.ObjectActionError, group.create,
+ self.context)
+
+ def test_destroy(self):
+ values = self._get_default_values()
+ result = self._create_instance_group(self.context, values)
+ group = instance_group.InstanceGroup()
+ group.id = result.id
+ group.uuid = result.uuid
+ fake_notifier.NOTIFICATIONS = []
+ group.destroy(self.context)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.delete', msg.event_type)
+ self.assertEqual(group.uuid, msg.payload['server_group_id'])
+ self.assertRaises(exception.InstanceGroupNotFound,
+ db.instance_group_get, self.context, result['uuid'])
+
+ def _populate_instances(self):
+ instances = [(str(uuid.uuid4()), 'f1', 'p1'),
+ (str(uuid.uuid4()), 'f2', 'p1'),
+ (str(uuid.uuid4()), 'f3', 'p2'),
+ (str(uuid.uuid4()), 'f4', 'p2')]
+ for instance in instances:
+ values = self._get_default_values()
+ values['uuid'] = instance[0]
+ values['name'] = instance[1]
+ values['project_id'] = instance[2]
+ self._create_instance_group(self.context, values)
+ return instances
+
+ def test_list_all(self):
+ self._populate_instances()
+ inst_list = instance_group.InstanceGroupList.get_all(self.context)
+ groups = db.instance_group_get_all(self.context)
+ self.assertEqual(len(groups), len(inst_list.objects))
+ self.assertEqual(len(groups), 4)
+ for i in range(0, len(groups)):
+ self.assertIsInstance(inst_list.objects[i],
+ instance_group.InstanceGroup)
+ self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
+
+ def test_list_by_project_id(self):
+ self._populate_instances()
+ project_ids = ['p1', 'p2']
+ for id in project_ids:
+ il = instance_group.InstanceGroupList.get_by_project_id(
+ self.context, id)
+ groups = db.instance_group_get_all_by_project_id(self.context, id)
+ self.assertEqual(len(groups), len(il.objects))
+ self.assertEqual(len(groups), 2)
+ for i in range(0, len(groups)):
+ self.assertIsInstance(il.objects[i],
+ instance_group.InstanceGroup)
+ self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
+ self.assertEqual(il.objects[i].name, groups[i]['name'])
+ self.assertEqual(il.objects[i].project_id, id)
+
+ def test_get_by_name(self):
+ self._populate_instances()
+ ctxt = context.RequestContext('fake_user', 'p1')
+ ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
+ self.assertEqual('f1', ig.name)
+
+ def test_get_by_hint(self):
+ instances = self._populate_instances()
+ for instance in instances:
+ ctxt = context.RequestContext('fake_user', instance[2])
+ ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
+ self.assertEqual(instance[1], ig.name)
+ ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
+ self.assertEqual(instance[0], ig.uuid)
+
+ def test_add_members(self):
+ instance_ids = ['fakeid1', 'fakeid2']
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ fake_notifier.NOTIFICATIONS = []
+ members = instance_group.InstanceGroup.add_members(self.context,
+ group.uuid, instance_ids)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ msg = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('servergroup.addmember', msg.event_type)
+ self.assertEqual(group.uuid, msg.payload['server_group_id'])
+ self.assertEqual(instance_ids, msg.payload['instance_uuids'])
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ for instance in instance_ids:
+ self.assertIn(instance, members)
+ self.assertIn(instance, group.members)
+
+ def test_get_hosts(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.host = 'hostA'
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.host = 'hostB'
+ instance2.save()
+ instance3 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance3.host = 'hostB'
+ instance3.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ hosts = group.get_hosts(self.context)
+ self.assertEqual(2, len(hosts))
+ self.assertIn('hostA', hosts)
+ self.assertIn('hostB', hosts)
+ hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
+ self.assertEqual(1, len(hosts))
+ self.assertIn('hostB', hosts)
+
+ def test_get_hosts_with_some_none(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.host = None
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.host = 'hostB'
+ instance2.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ hosts = group.get_hosts(self.context)
+ self.assertEqual(1, len(hosts))
+ self.assertIn('hostB', hosts)
+
+ def test_obj_make_compatible(self):
+ group = instance_group.InstanceGroup(uuid='fake-uuid',
+ name='fake-name')
+ group.create(self.context)
+ group_primitive = group.obj_to_primitive()
+ group.obj_make_compatible(group_primitive, '1.6')
+ self.assertEqual({}, group_primitive['metadetails'])
+
+ def test_count_members_by_user(self):
+ instance1 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance1.user_id = 'user1'
+ instance1.save()
+ instance2 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance2.user_id = 'user2'
+ instance2.save()
+ instance3 = tests_utils.get_test_instance(self.context,
+ flavor=flavors.get_default_flavor(), obj=True)
+ instance3.user_id = 'user2'
+ instance3.save()
+
+ instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
+ values = self._get_default_values()
+ group = self._create_instance_group(self.context, values)
+ instance_group.InstanceGroup.add_members(self.context, group.uuid,
+ instance_ids)
+
+ group = instance_group.InstanceGroup.get_by_uuid(self.context,
+ group.uuid)
+ count_user1 = group.count_members_by_user(self.context, 'user1')
+ count_user2 = group.count_members_by_user(self.context, 'user2')
+ count_user3 = group.count_members_by_user(self.context, 'user3')
+ self.assertEqual(1, count_user1)
+ self.assertEqual(2, count_user2)
+ self.assertEqual(0, count_user3)
+
+
+class TestInstanceGroupObject(test_objects._LocalTest,
+ _TestInstanceGroupObjects):
+ pass
+
+
+class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
+ _TestInstanceGroupObjects):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_info_cache.py b/nova/tests/unit/objects/test_instance_info_cache.py
new file mode 100644
index 0000000000..9a72772030
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_info_cache.py
@@ -0,0 +1,117 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.cells import opts as cells_opts
+from nova.cells import rpcapi as cells_rpcapi
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova.objects import instance_info_cache
+from nova.tests.unit.objects import test_objects
+
+
+fake_info_cache = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'instance_uuid': 'fake-uuid',
+ 'network_info': '[]',
+ }
+
+
+class _TestInstanceInfoCacheObject(object):
+ def test_get_by_instance_uuid(self):
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(
+ dict(fake_info_cache, network_info=nwinfo.json()))
+ self.mox.ReplayAll()
+ obj = instance_info_cache.InstanceInfoCache.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ self.assertEqual(obj.instance_uuid, 'fake-uuid')
+ self.assertEqual(obj.network_info, nwinfo)
+ self.assertRemotes()
+
+ def test_get_by_instance_uuid_no_entries(self):
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid').AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(
+ exception.InstanceInfoCacheNotFound,
+ instance_info_cache.InstanceInfoCache.get_by_instance_uuid,
+ self.context, 'fake-uuid')
+
+ def test_new(self):
+ obj = instance_info_cache.InstanceInfoCache.new(self.context,
+ 'fake-uuid')
+ self.assertEqual(set(['instance_uuid', 'network_info']),
+ obj.obj_what_changed())
+ self.assertEqual('fake-uuid', obj.instance_uuid)
+ self.assertIsNone(obj.network_info)
+
+ def _save_helper(self, cell_type, update_cells):
+ obj = instance_info_cache.InstanceInfoCache()
+ cells_api = cells_rpcapi.CellsAPI()
+
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ self.mox.StubOutWithMock(cells_opts, 'get_cell_type')
+ self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(cells_api,
+ 'instance_info_cache_update_at_top')
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ db.instance_info_cache_update(
+ self.context, 'fake-uuid',
+ {'network_info': nwinfo.json()}).AndReturn('foo')
+ if update_cells:
+ cells_opts.get_cell_type().AndReturn(cell_type)
+ if cell_type == 'compute':
+ cells_rpcapi.CellsAPI().AndReturn(cells_api)
+ cells_api.instance_info_cache_update_at_top(
+ self.context, 'foo')
+ self.mox.ReplayAll()
+ obj._context = self.context
+ obj.instance_uuid = 'fake-uuid'
+ obj.network_info = nwinfo
+ obj.save(update_cells=update_cells)
+
+ def test_save_with_update_cells_and_compute_cell(self):
+ self._save_helper('compute', True)
+
+ def test_save_with_update_cells_and_non_compute_cell(self):
+ self._save_helper(None, True)
+
+ def test_save_without_update_cells(self):
+ self._save_helper(None, False)
+
+ def test_refresh(self):
+ obj = instance_info_cache.InstanceInfoCache.new(self.context,
+ 'fake-uuid1')
+ self.mox.StubOutWithMock(db, 'instance_info_cache_get')
+ db.instance_info_cache_get(self.context, 'fake-uuid1').AndReturn(
+ fake_info_cache)
+ self.mox.ReplayAll()
+ obj.refresh()
+ self.assertEqual(fake_info_cache['instance_uuid'], obj.instance_uuid)
+
+
+class TestInstanceInfoCacheObject(test_objects._LocalTest,
+ _TestInstanceInfoCacheObject):
+ pass
+
+
+class TestInstanceInfoCacheObjectRemote(test_objects._RemoteTest,
+ _TestInstanceInfoCacheObject):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_numa_topology.py b/nova/tests/unit/objects/test_instance_numa_topology.py
new file mode 100644
index 0000000000..82c34ccda2
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_numa_topology.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+
+from nova import exception
+from nova import objects
+from nova.tests.unit.objects import test_objects
+from nova.virt import hardware
+
+fake_numa_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([1, 2]), 512, hardware.VirtPageSize(2048)),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([3, 4]), 512, hardware.VirtPageSize(2048))])
+
+fake_db_topology = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'instance_uuid': str(uuid.uuid4()),
+ 'numa_topology': fake_numa_topology.to_json()
+ }
+
+
+class _TestInstanceNUMATopology(object):
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ def test_create(self, mock_update):
+ topo_obj = objects.InstanceNUMATopology.obj_from_topology(
+ fake_numa_topology)
+ topo_obj.instance_uuid = fake_db_topology['instance_uuid']
+ topo_obj.create(self.context)
+ self.assertEqual(1, len(mock_update.call_args_list))
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid(self, mock_get):
+ mock_get.return_value = fake_db_topology
+ numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid(
+ self.context, 'fake_uuid')
+ self.assertEqual(fake_db_topology['instance_uuid'],
+ numa_topology.instance_uuid)
+ for obj_cell, topo_cell in zip(
+ numa_topology.cells, fake_numa_topology.cells):
+ self.assertIsInstance(obj_cell, objects.InstanceNUMACell)
+ self.assertEqual(topo_cell.cpuset, obj_cell.cpuset)
+ self.assertEqual(topo_cell.memory, obj_cell.memory)
+ self.assertEqual(topo_cell.pagesize.size_kb, obj_cell.pagesize)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid_missing(self, mock_get):
+ mock_get.return_value = None
+ self.assertRaises(
+ exception.NumaTopologyNotFound,
+ objects.InstanceNUMATopology.get_by_instance_uuid,
+ self.context, 'fake_uuid')
+
+
+class TestInstanceNUMATopology(test_objects._LocalTest,
+ _TestInstanceNUMATopology):
+ pass
+
+
+class TestInstanceNUMATopologyRemote(test_objects._RemoteTest,
+ _TestInstanceNUMATopology):
+ pass
diff --git a/nova/tests/unit/objects/test_instance_pci_requests.py b/nova/tests/unit/objects/test_instance_pci_requests.py
new file mode 100644
index 0000000000..541d503ff4
--- /dev/null
+++ b/nova/tests/unit/objects/test_instance_pci_requests.py
@@ -0,0 +1,191 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+FAKE_UUID = '79a53d6b-0893-4838-a971-15f4f382e7c2'
+FAKE_REQUEST_UUID = '69b53d6b-0793-4839-c981-f5c4f382e7d2'
+
+# NOTE(danms): Yes, these are the same right now, but going forward,
+# we have changes to make which will be reflected in the format
+# in instance_extra, but not in system_metadata.
+fake_pci_requests = [
+ {'count': 2,
+ 'spec': [{'vendor_id': '8086',
+ 'device_id': '1502'}],
+ 'alias_name': 'alias_1',
+ 'is_new': False,
+ 'request_id': FAKE_REQUEST_UUID},
+ {'count': 2,
+ 'spec': [{'vendor_id': '6502',
+ 'device_id': '07B5'}],
+ 'alias_name': 'alias_2',
+ 'is_new': True,
+ 'request_id': FAKE_REQUEST_UUID},
+ ]
+
+fake_legacy_pci_requests = [
+ {'count': 2,
+ 'spec': [{'vendor_id': '8086',
+ 'device_id': '1502'}],
+ 'alias_name': 'alias_1'},
+ {'count': 1,
+ 'spec': [{'vendor_id': '6502',
+ 'device_id': '07B5'}],
+ 'alias_name': 'alias_2'},
+ ]
+
+
+class _TestInstancePCIRequests(object):
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_get_by_instance_uuid(self, mock_get):
+ mock_get.return_value = {
+ 'instance_uuid': FAKE_UUID,
+ 'pci_requests': jsonutils.dumps(fake_pci_requests),
+ }
+ requests = objects.InstancePCIRequests.get_by_instance_uuid(
+ self.context, FAKE_UUID)
+ self.assertEqual(2, len(requests.requests))
+ for index, request in enumerate(requests.requests):
+ self.assertEqual(fake_pci_requests[index]['alias_name'],
+ request.alias_name)
+ self.assertEqual(fake_pci_requests[index]['count'],
+ request.count)
+ self.assertEqual(fake_pci_requests[index]['spec'],
+ [dict(x.items()) for x in request.spec])
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_by_instance_uuid_and_newness(self, mock_get):
+ pcir = objects.InstancePCIRequests
+ mock_get.return_value = objects.InstancePCIRequests(
+ instance_uuid='fake-uuid',
+ requests=[objects.InstancePCIRequest(count=1, is_new=False),
+ objects.InstancePCIRequest(count=2, is_new=True)])
+ old_req = pcir.get_by_instance_uuid_and_newness(self.context,
+ 'fake-uuid',
+ False)
+ mock_get.return_value = objects.InstancePCIRequests(
+ instance_uuid='fake-uuid',
+ requests=[objects.InstancePCIRequest(count=1, is_new=False),
+ objects.InstancePCIRequest(count=2, is_new=True)])
+ new_req = pcir.get_by_instance_uuid_and_newness(self.context,
+ 'fake-uuid',
+ True)
+ self.assertEqual(1, old_req.requests[0].count)
+ self.assertEqual(2, new_req.requests[0].count)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ def test_get_by_instance_current(self, mock_get):
+ instance = objects.Instance(uuid='fake-uuid',
+ system_metadata={})
+ objects.InstancePCIRequests.get_by_instance(self.context,
+ instance)
+ mock_get.assert_called_once_with(self.context, 'fake-uuid')
+
+ def test_get_by_instance_legacy(self):
+ fakesysmeta = {
+ 'pci_requests': jsonutils.dumps([fake_legacy_pci_requests[0]]),
+ 'new_pci_requests': jsonutils.dumps([fake_legacy_pci_requests[1]]),
+ }
+ instance = objects.Instance(uuid='fake-uuid',
+ system_metadata=fakesysmeta)
+ requests = objects.InstancePCIRequests.get_by_instance(self.context,
+ instance)
+ self.assertEqual(2, len(requests.requests))
+ self.assertEqual('alias_1', requests.requests[0].alias_name)
+ self.assertFalse(requests.requests[0].is_new)
+ self.assertEqual('alias_2', requests.requests[1].alias_name)
+ self.assertTrue(requests.requests[1].is_new)
+
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ def test_save(self, mock_update):
+ requests = objects.InstancePCIRequests(
+ context=self.context,
+ instance_uuid=FAKE_UUID,
+ requests=[objects.InstancePCIRequest(
+ count=1,
+ spec=[{'foo': 'bar'}, {'baz': 'bat'}],
+ alias_name='alias_1',
+ is_new=False,
+ request_id=FAKE_REQUEST_UUID)])
+ requests.save()
+ self.assertEqual(FAKE_UUID, mock_update.call_args_list[0][0][1])
+ self.assertEqual(
+ [{'count': 1, 'is_new': False,
+ 'alias_name': 'alias_1',
+ 'spec': [{'foo': 'bar'}, {'baz': 'bat'}],
+ 'request_id': FAKE_REQUEST_UUID}],
+ jsonutils.loads(
+ mock_update.call_args_list[0][0][2]['pci_requests']))
+
+ @mock.patch('nova.db.instance_extra_update_by_uuid')
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid')
+ def test_save_and_reload(self, mock_get, mock_update):
+ database = {}
+
+ def _save(context, uuid, values):
+ database.setdefault(uuid, {'instance_uuid': uuid})
+ database[uuid].update(values)
+
+ def _get(context, uuid, columns):
+ return database.get(uuid, {})
+
+ mock_update.side_effect = _save
+ mock_get.side_effect = _get
+
+ requests = objects.InstancePCIRequests(
+ context=self.context,
+ instance_uuid=FAKE_UUID,
+ requests=[objects.InstancePCIRequest(
+ count=1, is_new=False, alias_name='alias_1',
+ spec=[{'foo': 'bar'}])])
+ requests.save()
+ _requests = objects.InstancePCIRequests.get_by_instance_uuid(
+ self.context, FAKE_UUID)
+
+ self.assertEqual(requests.instance_uuid, _requests.instance_uuid)
+ self.assertEqual(len(requests.requests), len(_requests.requests))
+ self.assertEqual(requests.requests[0].alias_name,
+ _requests.requests[0].alias_name)
+
+ def test_new_compatibility(self):
+ request = objects.InstancePCIRequest(is_new=False)
+ self.assertFalse(request.new)
+
+ def test_backport_1_0(self):
+ requests = objects.InstancePCIRequests(
+ requests=[objects.InstancePCIRequest(count=1,
+ request_id=FAKE_UUID),
+ objects.InstancePCIRequest(count=2,
+ request_id=FAKE_UUID)])
+ primitive = requests.obj_to_primitive(target_version='1.0')
+ backported = objects.InstancePCIRequests.obj_from_primitive(
+ primitive)
+ self.assertEqual('1.0', backported.VERSION)
+ self.assertEqual(2, len(backported.requests))
+ self.assertFalse(backported.requests[0].obj_attr_is_set('request_id'))
+ self.assertFalse(backported.requests[1].obj_attr_is_set('request_id'))
+
+
+class TestInstancePCIRequests(test_objects._LocalTest,
+ _TestInstancePCIRequests):
+ pass
+
+
+class TestRemoteInstancePCIRequests(test_objects._RemoteTest,
+ _TestInstancePCIRequests):
+ pass
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
new file mode 100644
index 0000000000..da0d52831d
--- /dev/null
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -0,0 +1,109 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import keypair
+from nova.tests.unit.objects import test_objects
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_keypair = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'name': 'foo-keypair',
+ 'user_id': 'fake-user',
+ 'fingerprint': 'fake-fingerprint',
+ 'public_key': 'fake\npublic\nkey',
+ }
+
+
+class _TestKeyPairObject(object):
+ def test_get_by_name(self):
+ self.mox.StubOutWithMock(db, 'key_pair_get')
+ db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn(
+ fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user',
+ 'foo-keypair')
+ self.compare_obj(keypair_obj, fake_keypair)
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'key_pair_create')
+ db.key_pair_create(self.context,
+ {'name': 'foo-keypair',
+ 'public_key': 'keydata'}).AndReturn(fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.public_key = 'keydata'
+ keypair_obj.create(self.context)
+ self.compare_obj(keypair_obj, fake_keypair)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'key_pair_create')
+ db.key_pair_create(self.context,
+ {'name': 'foo-keypair',
+ 'public_key': 'keydata'}).AndReturn(fake_keypair)
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.public_key = 'keydata'
+ keypair_obj.create(self.context)
+ self.assertRaises(exception.ObjectActionError, keypair_obj.create,
+ self.context)
+
+ def test_destroy(self):
+ self.mox.StubOutWithMock(db, 'key_pair_destroy')
+ db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
+ self.mox.ReplayAll()
+ keypair_obj = keypair.KeyPair()
+ keypair_obj.id = 123
+ keypair_obj.user_id = 'fake-user'
+ keypair_obj.name = 'foo-keypair'
+ keypair_obj.destroy(self.context)
+
+ def test_destroy_by_name(self):
+ self.mox.StubOutWithMock(db, 'key_pair_destroy')
+ db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
+ self.mox.ReplayAll()
+ keypair.KeyPair.destroy_by_name(self.context, 'fake-user',
+ 'foo-keypair')
+
+ def test_get_by_user(self):
+ self.mox.StubOutWithMock(db, 'key_pair_get_all_by_user')
+ self.mox.StubOutWithMock(db, 'key_pair_count_by_user')
+ db.key_pair_get_all_by_user(self.context, 'fake-user').AndReturn(
+ [fake_keypair])
+ db.key_pair_count_by_user(self.context, 'fake-user').AndReturn(1)
+ self.mox.ReplayAll()
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user')
+ self.assertEqual(1, len(keypairs))
+ self.compare_obj(keypairs[0], fake_keypair)
+ self.assertEqual(1, keypair.KeyPairList.get_count_by_user(self.context,
+ 'fake-user'))
+
+
+class TestMigrationObject(test_objects._LocalTest,
+ _TestKeyPairObject):
+ pass
+
+
+class TestRemoteMigrationObject(test_objects._RemoteTest,
+ _TestKeyPairObject):
+ pass
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
new file mode 100644
index 0000000000..eeb57db618
--- /dev/null
+++ b/nova/tests/unit/objects/test_migration.py
@@ -0,0 +1,184 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import exception
+from nova.objects import migration
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+
+
+def fake_db_migration(**updates):
+ db_instance = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'source_compute': 'compute-source',
+ 'dest_compute': 'compute-dest',
+ 'source_node': 'node-source',
+ 'dest_node': 'node-dest',
+ 'dest_host': 'host-dest',
+ 'old_instance_type_id': 42,
+ 'new_instance_type_id': 84,
+ 'instance_uuid': 'fake-uuid',
+ 'status': 'migrating',
+ }
+
+ if updates:
+ db_instance.update(updates)
+ return db_instance
+
+
+class _TestMigrationObject(object):
+ def test_get_by_id(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_get')
+ db.migration_get(ctxt, fake_migration['id']).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration.get_by_id(ctxt, fake_migration['id'])
+ self.compare_obj(mig, fake_migration)
+
+ def test_get_by_instance_and_status(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status')
+ db.migration_get_by_instance_and_status(ctxt,
+ fake_migration['id'],
+ 'migrating'
+ ).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration.get_by_instance_and_status(
+ ctxt, fake_migration['id'], 'migrating')
+ self.compare_obj(mig, fake_migration)
+
+ def test_create(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
+ fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.source_compute = 'foo'
+ mig.create(ctxt)
+ self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
+
+ def test_recreate_fails(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_create')
+ db.migration_create(ctxt, {'source_compute': 'foo'}).AndReturn(
+ fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.source_compute = 'foo'
+ mig.create(ctxt)
+ self.assertRaises(exception.ObjectActionError, mig.create,
+ self.context)
+
+ def test_save(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ self.mox.StubOutWithMock(db, 'migration_update')
+ db.migration_update(ctxt, 123, {'source_compute': 'foo'}
+ ).AndReturn(fake_migration)
+ self.mox.ReplayAll()
+ mig = migration.Migration()
+ mig.id = 123
+ mig.source_compute = 'foo'
+ mig.save(ctxt)
+ self.assertEqual(fake_migration['dest_compute'], mig.dest_compute)
+
+ def test_instance(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ fake_inst = fake_instance.fake_db_instance()
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(fake_inst)
+ mig = migration.Migration._from_db_object(ctxt,
+ migration.Migration(),
+ fake_migration)
+ mig._context = ctxt
+ self.mox.ReplayAll()
+ self.assertEqual(mig.instance.host, fake_inst['host'])
+
+ def test_get_unconfirmed_by_dest_compute(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_unconfirmed_by_dest_compute')
+ db.migration_get_unconfirmed_by_dest_compute(
+ ctxt, 'window', 'foo',
+ use_slave=False).AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = (
+ migration.MigrationList.get_unconfirmed_by_dest_compute(
+ ctxt, 'window', 'foo', use_slave=False))
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+ def test_get_in_progress_by_host_and_node(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_in_progress_by_host_and_node')
+ db.migration_get_in_progress_by_host_and_node(
+ ctxt, 'host', 'node').AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = (
+ migration.MigrationList.get_in_progress_by_host_and_node(
+ ctxt, 'host', 'node'))
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+ def test_get_by_filters(self):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ db_migrations = [fake_migration, dict(fake_migration, id=456)]
+ self.mox.StubOutWithMock(
+ db, 'migration_get_all_by_filters')
+ filters = {'foo': 'bar'}
+ db.migration_get_all_by_filters(ctxt, filters).AndReturn(db_migrations)
+ self.mox.ReplayAll()
+ migrations = migration.MigrationList.get_by_filters(ctxt, filters)
+ self.assertEqual(2, len(migrations))
+ for index, db_migration in enumerate(db_migrations):
+ self.compare_obj(migrations[index], db_migration)
+
+
+class TestMigrationObject(test_objects._LocalTest,
+ _TestMigrationObject):
+ pass
+
+
+class TestRemoteMigrationObject(test_objects._RemoteTest,
+ _TestMigrationObject):
+ pass
diff --git a/nova/tests/unit/objects/test_network.py b/nova/tests/unit/objects/test_network.py
new file mode 100644
index 0000000000..0ba6ed06e9
--- /dev/null
+++ b/nova/tests/unit/objects/test_network.py
@@ -0,0 +1,232 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import netaddr
+
+from nova.objects import network as network_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_network = {
+ 'deleted': False,
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'id': 1,
+ 'label': 'Fake Network',
+ 'injected': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '1234::/64',
+ 'multi_host': False,
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'netmask_v6': 64,
+ 'gateway_v6': '1234::1',
+ 'bridge': 'br100',
+ 'bridge_interface': 'eth0',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': None,
+ 'vpn_public_address': None,
+ 'vpn_public_port': None,
+ 'vpn_private_address': None,
+ 'dhcp_start': '192.168.1.10',
+ 'rxtx_base': None,
+ 'project_id': None,
+ 'priority': None,
+ 'host': None,
+ 'uuid': 'fake-uuid',
+ 'mtu': None,
+ 'dhcp_server': '192.168.1.1',
+ 'enable_dhcp': True,
+ 'share_address': False,
+}
+
+
+class _TestNetworkObject(object):
+ def _compare(self, obj, db_obj):
+ for field in obj.fields:
+ db_val = db_obj[field]
+ obj_val = obj[field]
+ if isinstance(obj_val, netaddr.IPAddress):
+ obj_val = str(obj_val)
+ if isinstance(obj_val, netaddr.IPNetwork):
+ obj_val = str(obj_val)
+ if field == 'netmask_v6':
+ db_val = str(netaddr.IPNetwork('1::/%i' % db_val).netmask)
+ self.assertEqual(db_val, obj_val)
+
+ @mock.patch('nova.db.network_get')
+ def test_get_by_id(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_id(self.context, 'foo')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, 'foo',
+ project_only='allow_none')
+
+ @mock.patch('nova.db.network_get_by_uuid')
+ def test_get_by_uuid(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_uuid(self.context, 'foo')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, 'foo')
+
+ @mock.patch('nova.db.network_get_by_cidr')
+ def test_get_by_cidr(self, get):
+ get.return_value = fake_network
+ network = network_obj.Network.get_by_cidr(self.context,
+ '192.168.1.0/24')
+ self._compare(network, fake_network)
+ get.assert_called_once_with(self.context, '192.168.1.0/24')
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ def test_save(self, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.save()
+ network.label = 'bar'
+ update.return_value = result
+ network.save()
+ update.assert_called_once_with(self.context, network.id,
+ {'label': 'bar'})
+ self.assertFalse(set_host.called)
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ @mock.patch('nova.db.network_get')
+ def test_save_with_host(self, get, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.host = 'foo'
+ get.return_value = result
+ network.save()
+ set_host.assert_called_once_with(self.context, network.id, 'foo')
+ self.assertFalse(update.called)
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_update')
+ @mock.patch('nova.db.network_set_host')
+ def test_save_with_host_and_other(self, set_host, update):
+ result = dict(fake_network, injected=True)
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ network.obj_reset_changes()
+ network.host = 'foo'
+ network.label = 'bar'
+ update.return_value = result
+ network.save()
+ set_host.assert_called_once_with(self.context, network.id, 'foo')
+ update.assert_called_once_with(self.context, network.id,
+ {'label': 'bar'})
+ self._compare(network, result)
+
+ @mock.patch('nova.db.network_associate')
+ def test_associate(self, associate):
+ network_obj.Network.associate(self.context, 'project',
+ network_id=123)
+ associate.assert_called_once_with(self.context, 'project',
+ network_id=123, force=False)
+
+ @mock.patch('nova.db.network_disassociate')
+ def test_disassociate(self, disassociate):
+ network_obj.Network.disassociate(self.context, 123,
+ host=True, project=True)
+ disassociate.assert_called_once_with(self.context, 123, True, True)
+
+ @mock.patch('nova.db.network_create_safe')
+ def test_create(self, create):
+ create.return_value = fake_network
+ network = network_obj.Network(context=self.context, label='foo')
+ network.create()
+ create.assert_called_once_with(self.context, {'label': 'foo'})
+ self._compare(network, fake_network)
+
+ @mock.patch('nova.db.network_delete_safe')
+ def test_destroy(self, delete):
+ network = network_obj.Network(context=self.context, id=123)
+ network.destroy()
+ delete.assert_called_once_with(self.context, 123)
+ self.assertTrue(network.deleted)
+ self.assertNotIn('deleted', network.obj_what_changed())
+
+ @mock.patch('nova.db.network_get_all')
+ def test_get_all(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_all(self.context)
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, 'allow_none')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_get_all_by_uuids')
+ def test_get_all_by_uuids(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_uuids(self.context,
+ ['foo'])
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, ['foo'], 'allow_none')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_get_all_by_host')
+ def test_get_all_by_host(self, get_all):
+ get_all.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_host(self.context, 'host')
+ self.assertEqual(1, len(networks))
+ get_all.assert_called_once_with(self.context, 'host')
+ self._compare(networks[0], fake_network)
+
+ @mock.patch('nova.db.network_in_use_on_host')
+ def test_in_use_on_host(self, in_use):
+ in_use.return_value = True
+ self.assertTrue(network_obj.Network.in_use_on_host(self.context,
+ 123, 'foo'))
+ in_use.assert_called_once_with(self.context, 123, 'foo')
+
+ @mock.patch('nova.db.project_get_networks')
+ def test_get_all_by_project(self, get_nets):
+ get_nets.return_value = [fake_network]
+ networks = network_obj.NetworkList.get_by_project(self.context, 123)
+ self.assertEqual(1, len(networks))
+ get_nets.assert_called_once_with(self.context, 123, associate=True)
+ self._compare(networks[0], fake_network)
+
+ def test_compat_version_1_1(self):
+ network = network_obj.Network._from_db_object(self.context,
+ network_obj.Network(),
+ fake_network)
+ primitive = network.obj_to_primitive(target_version='1.1')
+ self.assertNotIn('mtu', primitive)
+ self.assertNotIn('enable_dhcp', primitive)
+ self.assertNotIn('dhcp_server', primitive)
+ self.assertNotIn('share_address', primitive)
+
+
+class TestNetworkObject(test_objects._LocalTest,
+ _TestNetworkObject):
+ pass
+
+
+class TestRemoteNetworkObject(test_objects._RemoteTest,
+ _TestNetworkObject):
+ pass
diff --git a/nova/tests/unit/objects/test_network_request.py b/nova/tests/unit/objects/test_network_request.py
new file mode 100644
index 0000000000..bbe6010226
--- /dev/null
+++ b/nova/tests/unit/objects/test_network_request.py
@@ -0,0 +1,102 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import objects
+from nova.tests.unit.objects import test_objects
+
+
+FAKE_UUID = '0C5C9AD2-F967-4E92-A7F3-24410F697440'
+
+
+class _TestNetworkRequestObject(object):
+ def test_basic(self):
+ request = objects.NetworkRequest()
+ request.network_id = '456'
+ request.address = '1.2.3.4'
+ request.port_id = FAKE_UUID
+
+ def test_load(self):
+ request = objects.NetworkRequest()
+ self.assertIsNone(request.port_id)
+
+ def test_to_tuple_neutron(self):
+ request = objects.NetworkRequest(network_id='123',
+ address='1.2.3.4',
+ port_id=FAKE_UUID,
+ )
+ with mock.patch('nova.utils.is_neutron', return_value=True):
+ self.assertEqual(('123', '1.2.3.4', FAKE_UUID, None),
+ request.to_tuple())
+
+ def test_to_tuple_nova(self):
+ request = objects.NetworkRequest(network_id='123',
+ address='1.2.3.4',
+ port_id=FAKE_UUID)
+ with mock.patch('nova.utils.is_neutron', return_value=False):
+ self.assertEqual(('123', '1.2.3.4'),
+ request.to_tuple())
+
+ def test_from_tuple_neutron(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4', FAKE_UUID, None))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertEqual(FAKE_UUID, request.port_id)
+
+ def test_from_tuple_neutron_without_pci_request_id(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4', FAKE_UUID))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertEqual(FAKE_UUID, request.port_id)
+
+ def test_from_tuple_nova(self):
+ request = objects.NetworkRequest.from_tuple(
+ ('123', '1.2.3.4'))
+ self.assertEqual('123', request.network_id)
+ self.assertEqual('1.2.3.4', str(request.address))
+ self.assertIsNone(request.port_id)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_list_as_tuples(self, is_neutron):
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='123'),
+ objects.NetworkRequest(network_id='456')])
+ self.assertEqual(
+ [('123', None, None, None), ('456', None, None, None)],
+ requests.as_tuples())
+
+ def test_is_single_unspecified(self):
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(network_id='123')])
+ self.assertFalse(requests.is_single_unspecified)
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(),
+ objects.NetworkRequest()])
+ self.assertFalse(requests.is_single_unspecified)
+ requests = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest()])
+ self.assertTrue(requests.is_single_unspecified)
+
+
+class TestNetworkRequestObject(test_objects._LocalTest,
+ _TestNetworkRequestObject):
+ pass
+
+
+class TestNetworkRequestRemoteObject(test_objects._RemoteTest,
+ _TestNetworkRequestObject):
+ pass
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
new file mode 100644
index 0000000000..f7eb53808b
--- /dev/null
+++ b/nova/tests/unit/objects/test_objects.py
@@ -0,0 +1,1126 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import datetime
+import hashlib
+import inspect
+import os
+import pprint
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+from testtools import matchers
+
+from nova.conductor import rpcapi as conductor_rpcapi
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base
+from nova.objects import fields
+from nova.openstack.common import log
+from nova import rpc
+from nova import test
+from nova.tests.unit import fake_notifier
+from nova import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
+ VERSION = '1.0'
+ fields = {'baz': fields.Field(fields.Integer())}
+
+
+class MyObj(base.NovaPersistentObject, base.NovaObject):
+ VERSION = '1.6'
+ fields = {'foo': fields.Field(fields.Integer()),
+ 'bar': fields.Field(fields.String()),
+ 'missing': fields.Field(fields.String()),
+ 'readonly': fields.Field(fields.Integer(), read_only=True),
+ 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True)
+ }
+
+ @staticmethod
+ def _from_db_object(context, obj, db_obj):
+ self = MyObj()
+ self.foo = db_obj['foo']
+ self.bar = db_obj['bar']
+ self.missing = db_obj['missing']
+ self.readonly = 1
+ return self
+
+ def obj_load_attr(self, attrname):
+ setattr(self, attrname, 'loaded!')
+
+ @base.remotable_classmethod
+ def query(cls, context):
+ obj = cls(foo=1, bar='bar')
+ obj.obj_reset_changes()
+ return obj
+
+ @base.remotable
+ def marco(self, context):
+ return 'polo'
+
+ @base.remotable
+ def _update_test(self, context):
+ if context.project_id == 'alternate':
+ self.bar = 'alternate-context'
+ else:
+ self.bar = 'updated'
+
+ @base.remotable
+ def save(self, context):
+ self.obj_reset_changes()
+
+ @base.remotable
+ def refresh(self, context):
+ self.foo = 321
+ self.bar = 'refreshed'
+ self.obj_reset_changes()
+
+ @base.remotable
+ def modify_save_modify(self, context):
+ self.bar = 'meow'
+ self.save()
+ self.foo = 42
+ self.rel_object = MyOwnedObject(baz=42)
+
+ def obj_make_compatible(self, primitive, target_version):
+ # NOTE(danms): Simulate an older version that had a different
+ # format for the 'bar' attribute
+ if target_version == '1.1' and 'bar' in primitive:
+ primitive['bar'] = 'old%s' % primitive['bar']
+
+
+class MyObjDiffVers(MyObj):
+ VERSION = '1.5'
+
+ @classmethod
+ def obj_name(cls):
+ return 'MyObj'
+
+
+class MyObj2(object):
+ @classmethod
+ def obj_name(cls):
+ return 'MyObj'
+
+ @base.remotable_classmethod
+ def query(cls, *args, **kwargs):
+ pass
+
+
+class RandomMixInWithNoFields(object):
+ """Used to test object inheritance using a mixin that has no fields."""
+ pass
+
+
+class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
+ fields = {'new_field': fields.Field(fields.String())}
+
+
+class TestMetaclass(test.TestCase):
+ def test_obj_tracking(self):
+
+ @six.add_metaclass(base.NovaObjectMetaclass)
+ class NewBaseClass(object):
+ VERSION = '1.0'
+ fields = {}
+
+ @classmethod
+ def obj_name(cls):
+ return cls.__name__
+
+ class Fake1TestObj1(NewBaseClass):
+ @classmethod
+ def obj_name(cls):
+ return 'fake1'
+
+ class Fake1TestObj2(Fake1TestObj1):
+ pass
+
+ class Fake1TestObj3(Fake1TestObj1):
+ VERSION = '1.1'
+
+ class Fake2TestObj1(NewBaseClass):
+ @classmethod
+ def obj_name(cls):
+ return 'fake2'
+
+ class Fake1TestObj4(Fake1TestObj3):
+ VERSION = '1.2'
+
+ class Fake2TestObj2(Fake2TestObj1):
+ VERSION = '1.1'
+
+ class Fake1TestObj5(Fake1TestObj1):
+ VERSION = '1.1'
+
+ # Newest versions first in the list. Duplicate versions take the
+ # newest object.
+ expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
+ 'fake2': [Fake2TestObj2, Fake2TestObj1]}
+ self.assertEqual(expected, NewBaseClass._obj_classes)
+ # The following should work, also.
+ self.assertEqual(expected, Fake1TestObj1._obj_classes)
+ self.assertEqual(expected, Fake1TestObj2._obj_classes)
+ self.assertEqual(expected, Fake1TestObj3._obj_classes)
+ self.assertEqual(expected, Fake1TestObj4._obj_classes)
+ self.assertEqual(expected, Fake1TestObj5._obj_classes)
+ self.assertEqual(expected, Fake2TestObj1._obj_classes)
+ self.assertEqual(expected, Fake2TestObj2._obj_classes)
+
+ def test_field_checking(self):
+ def create_class(field):
+ class TestField(base.NovaObject):
+ VERSION = '1.5'
+ fields = {'foo': field()}
+ return TestField
+
+ create_class(fields.IPV4AndV6AddressField)
+ self.assertRaises(exception.ObjectFieldInvalid,
+ create_class, fields.IPV4AndV6Address)
+ self.assertRaises(exception.ObjectFieldInvalid,
+ create_class, int)
+
+
+class TestObjToPrimitive(test.TestCase):
+
+ def test_obj_to_primitive_list(self):
+ class MyObjElement(base.NovaObject):
+ fields = {'foo': fields.IntegerField()}
+
+ def __init__(self, foo):
+ super(MyObjElement, self).__init__()
+ self.foo = foo
+
+ class MyList(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
+
+ mylist = MyList()
+ mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
+ self.assertEqual([1, 2, 3],
+ [x['foo'] for x in base.obj_to_primitive(mylist)])
+
+ def test_obj_to_primitive_dict(self):
+ myobj = MyObj(foo=1, bar='foo')
+ self.assertEqual({'foo': 1, 'bar': 'foo'},
+ base.obj_to_primitive(myobj))
+
+ def test_obj_to_primitive_recursive(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyObj')}
+
+ mylist = MyList(objects=[MyObj(), MyObj()])
+ for i, value in enumerate(mylist):
+ value.foo = i
+ self.assertEqual([{'foo': 0}, {'foo': 1}],
+ base.obj_to_primitive(mylist))
+
+ def test_obj_to_primitive_with_ip_addr(self):
+ class TestObject(base.NovaObject):
+ fields = {'addr': fields.IPAddressField(),
+ 'cidr': fields.IPNetworkField()}
+
+ obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
+ self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
+ base.obj_to_primitive(obj))
+
+
+class TestObjMakeList(test.TestCase):
+
+ def test_obj_make_list(self):
+ class MyList(base.ObjectListBase, base.NovaObject):
+ pass
+
+ db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
+ {'foo': 2, 'bar': 'bat', 'missing': 'apple'},
+ ]
+ mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
+ self.assertEqual(2, len(mylist))
+ self.assertEqual('ctxt', mylist._context)
+ for index, item in enumerate(mylist):
+ self.assertEqual(db_objs[index]['foo'], item.foo)
+ self.assertEqual(db_objs[index]['bar'], item.bar)
+ self.assertEqual(db_objs[index]['missing'], item.missing)
+
+
+def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
+ comparators=None):
+ """Compare a NovaObject and a dict-like database object.
+
+ This automatically converts TZ-aware datetimes and iterates over
+ the fields of the object.
+
+ :param:test: The TestCase doing the comparison
+ :param:obj: The NovaObject to examine
+ :param:db_obj: The dict-like database object to use as reference
+ :param:subs: A dict of objkey=dbkey field substitutions
+ :param:allow_missing: A list of fields that may not be in db_obj
+ :param:comparators: Map of comparator functions to use for certain fields
+ """
+
+ if subs is None:
+ subs = {}
+ if allow_missing is None:
+ allow_missing = []
+ if comparators is None:
+ comparators = {}
+
+ for key in obj.fields:
+ if key in allow_missing and not obj.obj_attr_is_set(key):
+ continue
+ obj_val = obj[key]
+ db_key = subs.get(key, key)
+ db_val = db_obj[db_key]
+ if isinstance(obj_val, datetime.datetime):
+ obj_val = obj_val.replace(tzinfo=None)
+
+ if key in comparators:
+ comparator = comparators[key]
+ comparator(db_val, obj_val)
+ else:
+ test.assertEqual(db_val, obj_val)
+
+
+class _BaseTestCase(test.TestCase):
+ def setUp(self):
+ super(_BaseTestCase, self).setUp()
+ self.remote_object_calls = list()
+ self.user_id = 'fake-user'
+ self.project_id = 'fake-project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
+ comparators=None):
+ compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
+ comparators=comparators)
+
+ def json_comparator(self, expected, obj_val):
+ # json-ify an object field for comparison with its db str
+ # equivalent
+ self.assertEqual(expected, jsonutils.dumps(obj_val))
+
+ def str_comparator(self, expected, obj_val):
+ """Compare an object field to a string in the db by performing
+ a simple coercion on the object field value.
+ """
+ self.assertEqual(expected, str(obj_val))
+
+ def assertNotIsInstance(self, obj, cls, msg=None):
+ """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
+ try:
+ f = super(_BaseTestCase, self).assertNotIsInstance
+ except AttributeError:
+ self.assertThat(obj,
+ matchers.Not(matchers.IsInstance(cls)),
+ message=msg or '')
+ else:
+ f(obj, cls, msg=msg)
+
+
+class _LocalTest(_BaseTestCase):
+ def setUp(self):
+ super(_LocalTest, self).setUp()
+ # Just in case
+ base.NovaObject.indirection_api = None
+
+ def assertRemotes(self):
+ self.assertEqual(self.remote_object_calls, [])
+
+
+@contextlib.contextmanager
+def things_temporarily_local():
+ # Temporarily go non-remote so the conductor handles
+ # this request directly
+ _api = base.NovaObject.indirection_api
+ base.NovaObject.indirection_api = None
+ yield
+ base.NovaObject.indirection_api = _api
+
+
+class _RemoteTest(_BaseTestCase):
+ def _testable_conductor(self):
+ self.conductor_service = self.start_service(
+ 'conductor', manager='nova.conductor.manager.ConductorManager')
+ self.remote_object_calls = list()
+
+ orig_object_class_action = \
+ self.conductor_service.manager.object_class_action
+ orig_object_action = \
+ self.conductor_service.manager.object_action
+
+ def fake_object_class_action(*args, **kwargs):
+ self.remote_object_calls.append((kwargs.get('objname'),
+ kwargs.get('objmethod')))
+ with things_temporarily_local():
+ result = orig_object_class_action(*args, **kwargs)
+ return (base.NovaObject.obj_from_primitive(result, context=args[0])
+ if isinstance(result, base.NovaObject) else result)
+ self.stubs.Set(self.conductor_service.manager, 'object_class_action',
+ fake_object_class_action)
+
+ def fake_object_action(*args, **kwargs):
+ self.remote_object_calls.append((kwargs.get('objinst'),
+ kwargs.get('objmethod')))
+ with things_temporarily_local():
+ result = orig_object_action(*args, **kwargs)
+ return result
+ self.stubs.Set(self.conductor_service.manager, 'object_action',
+ fake_object_action)
+
+ # Things are remoted by default in this session
+ base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
+
+ # To make sure local and remote contexts match
+ self.stubs.Set(rpc.RequestContextSerializer,
+ 'serialize_context',
+ lambda s, c: c)
+ self.stubs.Set(rpc.RequestContextSerializer,
+ 'deserialize_context',
+ lambda s, c: c)
+
+ def setUp(self):
+ super(_RemoteTest, self).setUp()
+ self._testable_conductor()
+
+ def assertRemotes(self):
+ self.assertNotEqual(self.remote_object_calls, [])
+
+
+class _TestObject(object):
+ def test_object_attrs_in_init(self):
+ # Spot check a few
+ objects.Instance
+ objects.InstanceInfoCache
+ objects.SecurityGroup
+ # Now check the test one in this file. Should be newest version
+ self.assertEqual('1.6', objects.MyObj.VERSION)
+
+ def test_hydration_type_error(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 'a'}}
+ self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
+
+ def test_hydration(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 1}}
+ real_method = MyObj._obj_from_primitive
+
+ def _obj_from_primitive(*args):
+ return real_method(*args)
+
+ with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
+ ofp.side_effect = _obj_from_primitive
+ obj = MyObj.obj_from_primitive(primitive)
+ ofp.assert_called_once_with(None, '1.5', primitive)
+ self.assertEqual(obj.foo, 1)
+
+ def test_hydration_version_different(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.2',
+ 'nova_object.data': {'foo': 1}}
+ obj = MyObj.obj_from_primitive(primitive)
+ self.assertEqual(obj.foo, 1)
+ self.assertEqual('1.2', obj.VERSION)
+
+ def test_hydration_bad_ns(self):
+ primitive = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'foo',
+ 'nova_object.version': '1.5',
+ 'nova_object.data': {'foo': 1}}
+ self.assertRaises(exception.UnsupportedObjectError,
+ MyObj.obj_from_primitive, primitive)
+
+ def test_dehydration(self):
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.data': {'foo': 1}}
+ obj = MyObj(foo=1)
+ obj.obj_reset_changes()
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_object_property(self):
+ obj = MyObj(foo=1)
+ self.assertEqual(obj.foo, 1)
+
+ def test_object_property_type_error(self):
+ obj = MyObj()
+
+ def fail():
+ obj.foo = 'a'
+ self.assertRaises(ValueError, fail)
+
+ def test_object_dict_syntax(self):
+ obj = MyObj(foo=123, bar='bar')
+ self.assertEqual(obj['foo'], 123)
+ self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
+ [('bar', 'bar'), ('foo', 123)])
+ self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
+ [('bar', 'bar'), ('foo', 123)])
+
+ def test_load(self):
+ obj = MyObj()
+ self.assertEqual(obj.bar, 'loaded!')
+
+ def test_load_in_base(self):
+ class Foo(base.NovaObject):
+ fields = {'foobar': fields.Field(fields.Integer())}
+ obj = Foo()
+ with self.assertRaisesRegexp(NotImplementedError, ".*foobar.*"):
+ obj.foobar
+
+ def test_loaded_in_primitive(self):
+ obj = MyObj(foo=1)
+ obj.obj_reset_changes()
+ self.assertEqual(obj.bar, 'loaded!')
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.changes': ['bar'],
+ 'nova_object.data': {'foo': 1,
+ 'bar': 'loaded!'}}
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_changes_in_primitive(self):
+ obj = MyObj(foo=123)
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ primitive = obj.obj_to_primitive()
+ self.assertIn('nova_object.changes', primitive)
+ obj2 = MyObj.obj_from_primitive(primitive)
+ self.assertEqual(obj2.obj_what_changed(), set(['foo']))
+ obj2.obj_reset_changes()
+ self.assertEqual(obj2.obj_what_changed(), set())
+
+ def test_obj_class_from_name(self):
+ obj = base.NovaObject.obj_class_from_name('MyObj', '1.5')
+ self.assertEqual('1.5', obj.VERSION)
+
+ def test_obj_class_from_name_latest_compatible(self):
+ obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
+ self.assertEqual('1.6', obj.VERSION)
+
+ def test_unknown_objtype(self):
+ self.assertRaises(exception.UnsupportedObjectError,
+ base.NovaObject.obj_class_from_name, 'foo', '1.0')
+
+ def test_obj_class_from_name_supported_version(self):
+ error = None
+ try:
+ base.NovaObject.obj_class_from_name('MyObj', '1.25')
+ except exception.IncompatibleObjectVersion as error:
+ pass
+
+ self.assertIsNotNone(error)
+ self.assertEqual('1.6', error.kwargs['supported'])
+
+ def test_with_alternate_context(self):
+ ctxt1 = context.RequestContext('foo', 'foo')
+ ctxt2 = context.RequestContext('bar', 'alternate')
+ obj = MyObj.query(ctxt1)
+ obj._update_test(ctxt2)
+ self.assertEqual(obj.bar, 'alternate-context')
+ self.assertRemotes()
+
+ def test_orphaned_object(self):
+ obj = MyObj.query(self.context)
+ obj._context = None
+ self.assertRaises(exception.OrphanedObjectError,
+ obj._update_test)
+ self.assertRemotes()
+
+ def test_changed_1(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj._update_test(self.context)
+ self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
+ self.assertEqual(obj.foo, 123)
+ self.assertRemotes()
+
+ def test_changed_2(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj.save(self.context)
+ self.assertEqual(obj.obj_what_changed(), set([]))
+ self.assertEqual(obj.foo, 123)
+ self.assertRemotes()
+
+ def test_changed_3(self):
+ obj = MyObj.query(self.context)
+ obj.foo = 123
+ self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ obj.refresh(self.context)
+ self.assertEqual(obj.obj_what_changed(), set([]))
+ self.assertEqual(obj.foo, 321)
+ self.assertEqual(obj.bar, 'refreshed')
+ self.assertRemotes()
+
+ def test_changed_4(self):
+ obj = MyObj.query(self.context)
+ obj.bar = 'something'
+ self.assertEqual(obj.obj_what_changed(), set(['bar']))
+ obj.modify_save_modify(self.context)
+ self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
+ self.assertEqual(obj.foo, 42)
+ self.assertEqual(obj.bar, 'meow')
+ self.assertIsInstance(obj.rel_object, MyOwnedObject)
+ self.assertRemotes()
+
+ def test_changed_with_sub_object(self):
+ class ParentObject(base.NovaObject):
+ fields = {'foo': fields.IntegerField(),
+ 'bar': fields.ObjectField('MyObj'),
+ }
+ obj = ParentObject()
+ self.assertEqual(set(), obj.obj_what_changed())
+ obj.foo = 1
+ self.assertEqual(set(['foo']), obj.obj_what_changed())
+ bar = MyObj()
+ obj.bar = bar
+ self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
+ obj.obj_reset_changes()
+ self.assertEqual(set(), obj.obj_what_changed())
+ bar.foo = 1
+ self.assertEqual(set(['bar']), obj.obj_what_changed())
+
+ def test_static_result(self):
+ obj = MyObj.query(self.context)
+ self.assertEqual(obj.bar, 'bar')
+ result = obj.marco()
+ self.assertEqual(result, 'polo')
+ self.assertRemotes()
+
+ def test_updates(self):
+ obj = MyObj.query(self.context)
+ self.assertEqual(obj.foo, 1)
+ obj._update_test()
+ self.assertEqual(obj.bar, 'updated')
+ self.assertRemotes()
+
+ def test_base_attributes(self):
+ dt = datetime.datetime(1955, 11, 5)
+ obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
+ deleted=False)
+ expected = {'nova_object.name': 'MyObj',
+ 'nova_object.namespace': 'nova',
+ 'nova_object.version': '1.6',
+ 'nova_object.changes':
+ ['deleted', 'created_at', 'deleted_at', 'updated_at'],
+ 'nova_object.data':
+ {'created_at': timeutils.isotime(dt),
+ 'updated_at': timeutils.isotime(dt),
+ 'deleted_at': None,
+ 'deleted': False,
+ }
+ }
+ self.assertEqual(obj.obj_to_primitive(), expected)
+
+ def test_contains(self):
+ obj = MyObj()
+ self.assertNotIn('foo', obj)
+ obj.foo = 1
+ self.assertIn('foo', obj)
+ self.assertNotIn('does_not_exist', obj)
+
+ def test_obj_attr_is_set(self):
+ obj = MyObj(foo=1)
+ self.assertTrue(obj.obj_attr_is_set('foo'))
+ self.assertFalse(obj.obj_attr_is_set('bar'))
+ self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
+
+ def test_get(self):
+ obj = MyObj(foo=1)
+ # Foo has value, should not get the default
+ self.assertEqual(obj.get('foo', 2), 1)
+ # Foo has value, should return the value without error
+ self.assertEqual(obj.get('foo'), 1)
+ # Bar is not loaded, so we should get the default
+ self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
+ # Bar without a default should lazy-load
+ self.assertEqual(obj.get('bar'), 'loaded!')
+ # Bar now has a default, but loaded value should be returned
+ self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
+ # Invalid attribute should raise AttributeError
+ self.assertRaises(AttributeError, obj.get, 'nothing')
+ # ...even with a default
+ self.assertRaises(AttributeError, obj.get, 'nothing', 3)
+
+ def test_object_inheritance(self):
+ base_fields = base.NovaPersistentObject.fields.keys()
+ myobj_fields = ['foo', 'bar', 'missing',
+ 'readonly', 'rel_object'] + base_fields
+ myobj3_fields = ['new_field']
+ self.assertTrue(issubclass(TestSubclassedObject, MyObj))
+ self.assertEqual(len(myobj_fields), len(MyObj.fields))
+ self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
+ self.assertEqual(len(myobj_fields) + len(myobj3_fields),
+ len(TestSubclassedObject.fields))
+ self.assertEqual(set(myobj_fields) | set(myobj3_fields),
+ set(TestSubclassedObject.fields.keys()))
+
+ def test_get_changes(self):
+ obj = MyObj()
+ self.assertEqual({}, obj.obj_get_changes())
+ obj.foo = 123
+ self.assertEqual({'foo': 123}, obj.obj_get_changes())
+ obj.bar = 'test'
+ self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
+ obj.obj_reset_changes()
+ self.assertEqual({}, obj.obj_get_changes())
+
+ def test_obj_fields(self):
+ class TestObj(base.NovaObject):
+ fields = {'foo': fields.Field(fields.Integer())}
+ obj_extra_fields = ['bar']
+
+ @property
+ def bar(self):
+ return 'this is bar'
+
+ obj = TestObj()
+ self.assertEqual(['foo', 'bar'], obj.obj_fields)
+
+ def test_obj_constructor(self):
+ obj = MyObj(context=self.context, foo=123, bar='abc')
+ self.assertEqual(123, obj.foo)
+ self.assertEqual('abc', obj.bar)
+ self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
+
+ def test_obj_read_only(self):
+ obj = MyObj(context=self.context, foo=123, bar='abc')
+ obj.readonly = 1
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ obj, 'readonly', 2)
+
+ def test_obj_repr(self):
+ obj = MyObj(foo=123)
+ self.assertEqual('MyObj(bar=<?>,created_at=<?>,deleted=<?>,'
+ 'deleted_at=<?>,foo=123,missing=<?>,readonly=<?>,'
+ 'rel_object=<?>,updated_at=<?>)', repr(obj))
+
+
+class TestObject(_LocalTest, _TestObject):
+ pass
+
+
+class TestRemoteObject(_RemoteTest, _TestObject):
+ def test_major_version_mismatch(self):
+ MyObj2.VERSION = '2.0'
+ self.assertRaises(exception.IncompatibleObjectVersion,
+ MyObj2.query, self.context)
+
+ def test_minor_version_greater(self):
+ MyObj2.VERSION = '1.7'
+ self.assertRaises(exception.IncompatibleObjectVersion,
+ MyObj2.query, self.context)
+
+ def test_minor_version_less(self):
+ MyObj2.VERSION = '1.2'
+ obj = MyObj2.query(self.context)
+ self.assertEqual(obj.bar, 'bar')
+ self.assertRemotes()
+
+ def test_compat(self):
+ MyObj2.VERSION = '1.1'
+ obj = MyObj2.query(self.context)
+ self.assertEqual('oldbar', obj.bar)
+
+
+class TestObjectListBase(test.TestCase):
+ def test_list_like_operations(self):
+ class MyElement(base.NovaObject):
+ fields = {'foo': fields.IntegerField()}
+
+ def __init__(self, foo):
+ super(MyElement, self).__init__()
+ self.foo = foo
+
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('MyElement')}
+
+ objlist = Foo(context='foo',
+ objects=[MyElement(1), MyElement(2), MyElement(3)])
+ self.assertEqual(list(objlist), objlist.objects)
+ self.assertEqual(len(objlist), 3)
+ self.assertIn(objlist.objects[0], objlist)
+ self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
+ self.assertEqual(objlist[:1]._context, 'foo')
+ self.assertEqual(objlist[2], objlist.objects[2])
+ self.assertEqual(objlist.count(objlist.objects[0]), 1)
+ self.assertEqual(objlist.index(objlist.objects[1]), 1)
+ objlist.sort(key=lambda x: x.foo, reverse=True)
+ self.assertEqual([3, 2, 1],
+ [x.foo for x in objlist])
+
+ def test_serialization(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.Field(fields.String())}
+
+ obj = Foo(objects=[])
+ for i in 'abc':
+ bar = Bar(foo=i)
+ obj.objects.append(bar)
+
+ obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
+ self.assertFalse(obj is obj2)
+ self.assertEqual([x.foo for x in obj],
+ [y.foo for y in obj2])
+
+ def _test_object_list_version_mappings(self, list_obj_class):
+ # Figure out what sort of object this list is for
+ list_field = list_obj_class.fields['objects']
+ item_obj_field = list_field._type._element_type
+ item_obj_name = item_obj_field._type._obj_name
+
+ # Look through all object classes of this type and make sure that
+ # the versions we find are covered by the parent list class
+ for item_class in base.NovaObject._obj_classes[item_obj_name]:
+ self.assertIn(
+ item_class.VERSION,
+ list_obj_class.child_versions.values(),
+ 'Version mapping is incomplete for %s' % (
+ list_obj_class.__name__))
+
+ def test_object_version_mappings(self):
+ # Find all object list classes and make sure that they at least handle
+ # all the current object versions
+ for obj_classes in base.NovaObject._obj_classes.values():
+ for obj_class in obj_classes:
+ if issubclass(obj_class, base.ObjectListBase):
+ self._test_object_list_version_mappings(obj_class)
+
+ def test_list_changes(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.StringField()}
+
+ obj = Foo(objects=[])
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.objects.append(Bar(foo='test'))
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.obj_reset_changes()
+ # This should still look dirty because the child is dirty
+ self.assertEqual(set(['objects']), obj.obj_what_changed())
+ obj.objects[0].obj_reset_changes()
+ # This should now look clean because the child is clean
+ self.assertEqual(set(), obj.obj_what_changed())
+
+ def test_initialize_objects(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'foo': fields.StringField()}
+
+ obj = Foo()
+ self.assertEqual([], obj.objects)
+ self.assertEqual(set(), obj.obj_what_changed())
+
+ def test_obj_repr(self):
+ class Foo(base.ObjectListBase, base.NovaObject):
+ fields = {'objects': fields.ListOfObjectsField('Bar')}
+
+ class Bar(base.NovaObject):
+ fields = {'uuid': fields.StringField()}
+
+ obj = Foo(objects=[Bar(uuid='fake-uuid')])
+ self.assertEqual('Foo(objects=[Bar(fake-uuid)])', repr(obj))
+
+
+class TestObjectSerializer(_BaseTestCase):
+ def test_serialize_entity_primitive(self):
+ ser = base.NovaObjectSerializer()
+ for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
+ self.assertEqual(thing, ser.serialize_entity(None, thing))
+
+ def test_deserialize_entity_primitive(self):
+ ser = base.NovaObjectSerializer()
+ for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
+ self.assertEqual(thing, ser.deserialize_entity(None, thing))
+
+ def test_deserialize_entity_newer_version(self):
+ ser = base.NovaObjectSerializer()
+ ser._conductor = mock.Mock()
+ ser._conductor.object_backport.return_value = 'backported'
+ obj = MyObj()
+ obj.VERSION = '1.25'
+ primitive = obj.obj_to_primitive()
+ result = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual('backported', result)
+ ser._conductor.object_backport.assert_called_with(self.context,
+ primitive,
+ '1.6')
+
+ def test_object_serialization(self):
+ ser = base.NovaObjectSerializer()
+ obj = MyObj()
+ primitive = ser.serialize_entity(self.context, obj)
+ self.assertIn('nova_object.name', primitive)
+ obj2 = ser.deserialize_entity(self.context, primitive)
+ self.assertIsInstance(obj2, MyObj)
+ self.assertEqual(self.context, obj2._context)
+
+ def test_object_serialization_iterables(self):
+ ser = base.NovaObjectSerializer()
+ obj = MyObj()
+ for iterable in (list, tuple, set):
+ thing = iterable([obj])
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(1, len(primitive))
+ for item in primitive:
+ self.assertNotIsInstance(item, base.NovaObject)
+ thing2 = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual(1, len(thing2))
+ for item in thing2:
+ self.assertIsInstance(item, MyObj)
+ # dict case
+ thing = {'key': obj}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(1, len(primitive))
+ for item in primitive.itervalues():
+ self.assertNotIsInstance(item, base.NovaObject)
+ thing2 = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual(1, len(thing2))
+ for item in thing2.itervalues():
+ self.assertIsInstance(item, MyObj)
+
+ # object-action updates dict case
+ thing = {'foo': obj.obj_to_primitive()}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(thing, primitive)
+ thing2 = ser.deserialize_entity(self.context, thing)
+ self.assertIsInstance(thing2['foo'], base.NovaObject)
+
+
+# NOTE(danms): The hashes in this list should only be changed if
+# they come with a corresponding version bump in the affected
+# objects
+object_data = {
+ 'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d',
+ 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25',
+ 'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5',
+ 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a',
+ 'BandwidthUsage': '1.1-bdab751673947f0ac7de108540a1a8ce',
+ 'BandwidthUsageList': '1.1-76898106a9db393cd5f42c557389c507',
+ 'BlockDeviceMapping': '1.4-9968ffe513e7672484b0f528b034cd0f',
+ 'BlockDeviceMappingList': '1.5-83767968de6e91e9705bddaae02bc649',
+ 'ComputeNode': '1.6-d2ea9b8f4a6e95ff6a683266eebddbff',
+ 'ComputeNodeList': '1.6-205aa2ea08d49f6ce87df1fcd2407b4e',
+ 'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
+ 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4',
+ 'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99',
+ 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836',
+ 'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143',
+ 'FixedIP': '1.6-2472964d39e50da67202109eb85cd173',
+ 'FixedIPList': '1.6-f2f740de66bc2d90627004bd311690ad',
+ 'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4',
+ 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721',
+ 'FloatingIP': '1.6-27eb68b7c9c620dd5f0561b5a3be0e82',
+ 'FloatingIPList': '1.7-f376f63ed99243f9d90841b7f6732bbf',
+ 'HVSpec': '1.0-c4d8377cc4fe519930e60c1d8265a142',
+ 'Instance': '1.16-b00c09fb92ae80b393943f56e84abd9c',
+ 'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663',
+ 'InstanceActionEvent': '1.1-42dbdba74bd06e0619ca75cd3397cd1b',
+ 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e',
+ 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266',
+ 'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7',
+ 'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e',
+ 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d',
+ 'InstanceGroup': '1.9-95ece99f092e8f4f88327cdbb44162c9',
+ 'InstanceGroupList': '1.6-c6b78f3c9d9080d33c08667e80589817',
+ 'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f',
+ 'InstanceList': '1.10-03dd7839cd11cff75c3661c9e4227900',
+ 'InstanceNUMACell': '1.1-8d2a13c8360cc9ea1b68c9c6c4476857',
+ 'InstanceNUMATopology': '1.1-86b95d263c4c68411d44c6741b8d2bb0',
+ 'InstancePCIRequest': '1.1-e082d174f4643e5756ba098c47c1510f',
+ 'InstancePCIRequests': '1.1-bc7c6684d8579ee49d6a3b8aef756918',
+ 'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a',
+ 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8',
+ 'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed',
+ 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353',
+ 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93',
+ 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
+ 'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e',
+ 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e',
+ 'NetworkRequest': '1.1-f31192f5a725017707f989585e12d7dc',
+ 'NetworkRequestList': '1.1-beeab521ac9450f1f5ef4eaa945a783c',
+ 'PciDevice': '1.2-29e35c3199f3b98ce66e5d1212612818',
+ 'PciDeviceList': '1.1-2896df4f5b06579e5f35adba5fcae9db',
+ 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418',
+ 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f',
+ 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2',
+ 'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b',
+ 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f',
+ 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576',
+ 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c',
+ 'Service': '1.5-82bbfd46a744a9c89bc44b47a1b81683',
+ 'ServiceList': '1.3-4a1a5822dea268d0d7f892f5106bb2e1',
+ 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd',
+ 'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2',
+ 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6',
+}
+
+
+object_relationships = {
+ 'BlockDeviceMapping': {'Instance': '1.16'},
+ 'FixedIP': {'Instance': '1.16', 'Network': '1.2',
+ 'VirtualInterface': '1.0',
+ 'FloatingIPList': '1.7'},
+ 'FloatingIP': {'FixedIP': '1.6'},
+ 'Instance': {'InstanceFault': '1.2',
+ 'InstanceInfoCache': '1.5',
+ 'InstanceNUMATopology': '1.1',
+ 'PciDeviceList': '1.1',
+ 'SecurityGroupList': '1.0',
+ 'InstancePCIRequests': '1.1'},
+ 'MyObj': {'MyOwnedObject': '1.0'},
+ 'SecurityGroupRule': {'SecurityGroup': '1.1'},
+ 'Service': {'ComputeNode': '1.6'},
+ 'TestSubclassedObject': {'MyOwnedObject': '1.0'}
+}
+
+
+class TestObjectVersions(test.TestCase):
+ def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
+ """Follow a chain of remotable things down to the original function."""
+ if isinstance(thing, classmethod):
+ return self._find_remotable_method(cls, thing.__get__(None, cls))
+ elif inspect.ismethod(thing) and hasattr(thing, 'remotable'):
+ return self._find_remotable_method(cls, thing.original_fn,
+ parent_was_remotable=True)
+ elif parent_was_remotable:
+ # We must be the first non-remotable thing underneath a stack of
+ # remotable things (i.e. the actual implementation method)
+ return thing
+ else:
+ # This means the top-level thing never hit a remotable layer
+ return None
+
+ def _get_fingerprint(self, obj_name):
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
+ fields = obj_class.fields.items()
+ fields.sort()
+ methods = []
+ for name in dir(obj_class):
+ thing = getattr(obj_class, name)
+ if inspect.ismethod(thing) or isinstance(thing, classmethod):
+ method = self._find_remotable_method(obj_class, thing)
+ if method:
+ methods.append((name, inspect.getargspec(method)))
+ methods.sort()
+ # NOTE(danms): Things that need a version bump are any fields
+ # and their types, or the signatures of any remotable methods.
+ # Of course, these are just the mechanical changes we can detect,
+ # but many other things may require a version bump (method behavior
+ # and return value changes, for example).
+ if hasattr(obj_class, 'child_versions'):
+ relevant_data = (fields, methods, obj_class.child_versions)
+ else:
+ relevant_data = (fields, methods)
+ fingerprint = '%s-%s' % (obj_class.VERSION,
+ hashlib.md5(str(relevant_data)).hexdigest())
+ return fingerprint
+
+ def test_versions(self):
+ fingerprints = {}
+ for obj_name in base.NovaObject._obj_classes:
+ fingerprints[obj_name] = self._get_fingerprint(obj_name)
+
+ if os.getenv('GENERATE_HASHES'):
+ file('object_hashes.txt', 'w').write(
+ pprint.pformat(fingerprints))
+ raise test.TestingException(
+ 'Generated hashes in object_hashes.txt')
+
+ stored = set(object_data.items())
+ computed = set(fingerprints.items())
+ changed = stored.symmetric_difference(computed)
+ expected = {}
+ actual = {}
+ for name, hash in changed:
+ expected[name] = object_data.get(name)
+ actual[name] = fingerprints.get(name)
+
+ self.assertEqual(expected, actual,
+ 'Some objects have changed; please make sure the '
+ 'versions have been bumped, and then update their '
+ 'hashes here.')
+
+ def _build_tree(self, tree, obj_class):
+ obj_name = obj_class.obj_name()
+ if obj_name in tree:
+ return
+
+ for name, field in obj_class.fields.items():
+ if isinstance(field._type, fields.Object):
+ sub_obj_name = field._type._obj_name
+ sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
+ self._build_tree(tree, sub_obj_class)
+ tree.setdefault(obj_name, {})
+ tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
+
+ def test_relationships(self):
+ tree = {}
+ for obj_name in base.NovaObject._obj_classes.keys():
+ self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
+
+ stored = set([(x, str(y)) for x, y in object_relationships.items()])
+ computed = set([(x, str(y)) for x, y in tree.items()])
+ changed = stored.symmetric_difference(computed)
+ expected = {}
+ actual = {}
+ for name, deps in changed:
+ expected[name] = object_relationships.get(name)
+ actual[name] = tree.get(name)
+ self.assertEqual(expected, actual,
+ 'Some objects have changed dependencies. '
+ 'Please make sure to bump the versions of '
+ 'parent objects and provide a rule in their '
+ 'obj_make_compatible() routines to backlevel '
+ 'the child object.')
+
+ def test_obj_make_compatible(self):
+ # Iterate all object classes and verify that we can run
+ # obj_make_compatible with every older version than current.
+ # This doesn't actually test the data conversions, but it at least
+ # makes sure the method doesn't blow up on something basic like
+ # expecting the wrong version format.
+ for obj_name in base.NovaObject._obj_classes:
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
+ version = utils.convert_version_to_tuple(obj_class.VERSION)
+ for n in range(version[1]):
+ test_version = '%d.%d' % (version[0], n)
+ LOG.info('testing obj: %s version: %s' %
+ (obj_name, test_version))
+ obj_class().obj_to_primitive(target_version=test_version)
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
new file mode 100644
index 0000000000..804709a262
--- /dev/null
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from nova import context
+from nova import db
+from nova.objects import instance
+from nova.objects import pci_device
+from nova.tests.unit.objects import test_objects
+
+dev_dict = {
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'status': 'available'}
+
+
+fake_db_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': 'a',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+
+
+fake_db_dev_1 = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 2,
+ 'compute_node_id': 1,
+ 'address': 'a1',
+ 'vendor_id': 'v1',
+ 'product_id': 'p1',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+
+
+class _TestPciDeviceObject(object):
+ def _create_fake_instance(self):
+ self.inst = instance.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = pci_device.PciDeviceList()
+
+ def _create_fake_pci_device(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
+ db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+
+ def test_create_pci_device(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['compute_node_id', 'product_id', 'vendor_id',
+ 'status', 'address', 'extra_info']))
+
+ def test_pci_device_extra_info(self):
+ self.dev_dict = copy.copy(dev_dict)
+ self.dev_dict['k1'] = 'v1'
+ self.dev_dict['k2'] = 'v2'
+ self.pci_device = pci_device.PciDevice.create(self.dev_dict)
+ extra_value = self.pci_device.extra_info
+ self.assertEqual(extra_value.get('k1'), 'v1')
+ self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['compute_node_id', 'address', 'product_id',
+ 'vendor_id', 'status', 'extra_info']))
+
+ def test_update_device(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.obj_reset_changes()
+ changes = {'product_id': 'p2', 'vendor_id': 'v2'}
+ self.pci_device.update_device(changes)
+ self.assertEqual(self.pci_device.vendor_id, 'v2')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['vendor_id', 'product_id']))
+
+ def test_update_device_same_value(self):
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.obj_reset_changes()
+ changes = {'product_id': 'p', 'vendor_id': 'v2'}
+ self.pci_device.update_device(changes)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.vendor_id, 'v2')
+ self.assertEqual(self.pci_device.obj_what_changed(),
+ set(['vendor_id', 'product_id']))
+
+ def test_get_by_dev_addr(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
+ db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_dev_id(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_id')
+ db.pci_device_get_by_id(ctxt, 1).AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_id(ctxt, 1)
+ self.assertEqual(self.pci_device.product_id, 'p')
+ self.assertEqual(self.pci_device.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_save(self):
+ ctxt = context.get_admin_context()
+ self._create_fake_pci_device()
+ return_dev = dict(fake_db_dev, status='available',
+ instance_uuid='fake-uuid-3')
+ self.pci_device.status = 'allocated'
+ self.pci_device.instance_uuid = 'fake-uuid-2'
+ expected_updates = dict(status='allocated',
+ instance_uuid='fake-uuid-2')
+ self.mox.StubOutWithMock(db, 'pci_device_update')
+ db.pci_device_update(ctxt, 1, 'a',
+ expected_updates).AndReturn(return_dev)
+ self.mox.ReplayAll()
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.pci_device.status, 'available')
+ self.assertEqual(self.pci_device.instance_uuid,
+ 'fake-uuid-3')
+ self.assertRemotes()
+
+ def test_save_no_extra_info(self):
+ return_dev = dict(fake_db_dev, status='available',
+ instance_uuid='fake-uuid-3')
+
+ def _fake_update(ctxt, node_id, addr, updates):
+ self.extra_info = updates.get('extra_info')
+ return return_dev
+
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_update', _fake_update)
+ self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.extra_info, '{}')
+
+ def test_save_removed(self):
+ ctxt = context.get_admin_context()
+ self._create_fake_pci_device()
+ self.pci_device.status = 'removed'
+ self.mox.StubOutWithMock(db, 'pci_device_destroy')
+ db.pci_device_destroy(ctxt, 1, 'a')
+ self.mox.ReplayAll()
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.pci_device.status, 'deleted')
+ self.assertRemotes()
+
+ def test_save_deleted(self):
+ def _fake_destroy(ctxt, node_id, addr):
+ self.called = True
+
+ def _fake_update(ctxt, node_id, addr, updates):
+ self.called = True
+ ctxt = context.get_admin_context()
+ self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
+ self.stubs.Set(db, 'pci_device_update', _fake_update)
+ self._create_fake_pci_device()
+ self.pci_device.status = 'deleted'
+ self.called = False
+ self.pci_device.save(ctxt)
+ self.assertEqual(self.called, False)
+
+
+class TestPciDeviceObject(test_objects._LocalTest,
+ _TestPciDeviceObject):
+ pass
+
+
+class TestPciDeviceObjectRemote(test_objects._RemoteTest,
+ _TestPciDeviceObject):
+ pass
+
+
+fake_pci_devs = [fake_db_dev, fake_db_dev_1]
+
+
+class _TestPciDeviceListObject(object):
+ def test_get_by_compute_node(self):
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_all_by_node')
+ db.pci_device_get_all_by_node(ctxt, 1).AndReturn(fake_pci_devs)
+ self.mox.ReplayAll()
+ devs = pci_device.PciDeviceList.get_by_compute_node(ctxt, 1)
+ for i in range(len(fake_pci_devs)):
+ self.assertIsInstance(devs[i], pci_device.PciDevice)
+ self.assertEqual(fake_pci_devs[i]['vendor_id'], devs[i].vendor_id)
+ self.assertRemotes()
+
+ def test_get_by_instance_uuid(self):
+ ctxt = context.get_admin_context()
+ fake_db_1 = dict(fake_db_dev, address='a1',
+ status='allocated', instance_uuid='1')
+ fake_db_2 = dict(fake_db_dev, address='a2',
+ status='allocated', instance_uuid='1')
+ self.mox.StubOutWithMock(db, 'pci_device_get_all_by_instance_uuid')
+ db.pci_device_get_all_by_instance_uuid(ctxt, '1').AndReturn(
+ [fake_db_1, fake_db_2])
+ self.mox.ReplayAll()
+ devs = pci_device.PciDeviceList.get_by_instance_uuid(ctxt, '1')
+ self.assertEqual(len(devs), 2)
+ for i in range(len(fake_pci_devs)):
+ self.assertIsInstance(devs[i], pci_device.PciDevice)
+ self.assertEqual(devs[0].vendor_id, 'v')
+ self.assertEqual(devs[1].vendor_id, 'v')
+ self.assertRemotes()
+
+
+class TestPciDeviceListObject(test_objects._LocalTest,
+ _TestPciDeviceListObject):
+ pass
+
+
+class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
+ _TestPciDeviceListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_quotas.py b/nova/tests/unit/objects/test_quotas.py
new file mode 100644
index 0000000000..02781a7cd5
--- /dev/null
+++ b/nova/tests/unit/objects/test_quotas.py
@@ -0,0 +1,167 @@
+# Copyright 2013 Rackspace Hosting.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import context
+from nova.objects import quotas as quotas_obj
+from nova import quota
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.objects import test_objects
+
+
+QUOTAS = quota.QUOTAS
+
+
+class TestQuotasModule(test.NoDBTestCase):
+ def setUp(self):
+ super(TestQuotasModule, self).setUp()
+ self.context = context.RequestContext('fake_user1', 'fake_proj1')
+ self.instance = fake_instance.fake_db_instance(
+ project_id='fake_proj2', user_id='fake_user2')
+
+ def test_ids_from_instance_non_admin(self):
+ project_id, user_id = quotas_obj.ids_from_instance(
+ self.context, self.instance)
+ self.assertEqual('fake_user2', user_id)
+ self.assertEqual('fake_proj1', project_id)
+
+ def test_ids_from_instance_admin(self):
+ project_id, user_id = quotas_obj.ids_from_instance(
+ self.context.elevated(), self.instance)
+ self.assertEqual('fake_user2', user_id)
+ self.assertEqual('fake_proj2', project_id)
+
+
+class _TestQuotasObject(object):
+ def setUp(self):
+ super(_TestQuotasObject, self).setUp()
+ self.context = context.RequestContext('fake_user1', 'fake_proj1')
+ self.instance = fake_instance.fake_db_instance(
+ project_id='fake_proj2', user_id='fake_user2')
+
+ def test_from_reservations(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertIsNone(quotas.project_id)
+ self.assertIsNone(quotas.user_id)
+
+ def test_from_reservations_bogus(self):
+ fake_reservations = [_TestQuotasObject, _TestQuotasObject]
+ self.assertRaises(ValueError,
+ quotas_obj.Quotas.from_reservations,
+ self.context, fake_reservations)
+
+ def test_from_reservations_instance(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations,
+ instance=self.instance)
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('fake_proj1', quotas.project_id)
+ self.assertEqual('fake_user2', quotas.user_id)
+
+ def test_from_reservations_instance_admin(self):
+ fake_reservations = ['1', '2']
+ elevated = self.context.elevated()
+ quotas = quotas_obj.Quotas.from_reservations(
+ elevated, fake_reservations,
+ instance=self.instance)
+ self.assertEqual(elevated, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('fake_proj2', quotas.project_id)
+ self.assertEqual('fake_user2', quotas.user_id)
+
+ def test_reserve(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas()
+
+ self.mox.StubOutWithMock(QUOTAS, 'reserve')
+ QUOTAS.reserve(self.context, expire='expire',
+ project_id='project_id', user_id='user_id',
+ moo='cow').AndReturn(fake_reservations)
+
+ self.mox.ReplayAll()
+ quotas.reserve(self.context, expire='expire',
+ project_id='project_id', user_id='user_id',
+ moo='cow')
+ self.assertEqual(self.context, quotas._context)
+ self.assertEqual(fake_reservations, quotas.reservations)
+ self.assertEqual('project_id', quotas.project_id)
+ self.assertEqual('user_id', quotas.user_id)
+
+ def test_commit(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+
+ self.mox.StubOutWithMock(QUOTAS, 'commit')
+ QUOTAS.commit(self.context, fake_reservations,
+ project_id=None, user_id=None)
+
+ self.mox.ReplayAll()
+ quotas.commit()
+ self.assertIsNone(quotas.reservations)
+
+ def test_commit_none_reservations(self):
+ quotas = quotas_obj.Quotas.from_reservations(self.context, None)
+ self.mox.StubOutWithMock(QUOTAS, 'commit')
+ self.mox.ReplayAll()
+ quotas.commit()
+
+ def test_rollback(self):
+ fake_reservations = ['1', '2']
+ quotas = quotas_obj.Quotas.from_reservations(
+ self.context, fake_reservations)
+
+ self.mox.StubOutWithMock(QUOTAS, 'rollback')
+ QUOTAS.rollback(self.context, fake_reservations,
+ project_id=None, user_id=None)
+
+ self.mox.ReplayAll()
+ quotas.rollback()
+ self.assertIsNone(quotas.reservations)
+
+ def test_rollback_none_reservations(self):
+ quotas = quotas_obj.Quotas.from_reservations(self.context, None)
+ self.mox.StubOutWithMock(QUOTAS, 'rollback')
+ self.mox.ReplayAll()
+ quotas.rollback()
+
+ @mock.patch('nova.db.quota_create')
+ def test_create_limit(self, mock_create):
+ quotas_obj.Quotas.create_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_create.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
+ @mock.patch('nova.db.quota_update')
+ def test_update_limit(self, mock_update):
+ quotas_obj.Quotas.update_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_update.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
+
+class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest):
+ pass
+
+
+class TestRemoteQuotasObject(_TestQuotasObject, test_objects._RemoteTest):
+ pass
diff --git a/nova/tests/unit/objects/test_security_group.py b/nova/tests/unit/objects/test_security_group.py
new file mode 100644
index 0000000000..91966d0676
--- /dev/null
+++ b/nova/tests/unit/objects/test_security_group.py
@@ -0,0 +1,175 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova.objects import instance
+from nova.objects import security_group
+from nova.tests.unit.objects import test_objects
+
+
+fake_secgroup = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'name': 'fake-name',
+ 'description': 'fake-desc',
+ 'user_id': 'fake-user',
+ 'project_id': 'fake-project',
+ }
+
+
+class _TestSecurityGroupObject(object):
+ def _fix_deleted(self, db_secgroup):
+ # NOTE(danms): Account for the difference in 'deleted'
+ return dict(db_secgroup.items(), deleted=False)
+
+ def test_get(self):
+ self.mox.StubOutWithMock(db, 'security_group_get')
+ db.security_group_get(self.context, 1).AndReturn(fake_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup.get(self.context, 1)
+ self.assertEqual(self._fix_deleted(fake_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_get_by_name(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_by_name')
+ db.security_group_get_by_name(self.context, 'fake-project',
+ 'fake-name').AndReturn(fake_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup.get_by_name(self.context,
+ 'fake-project',
+ 'fake-name')
+ self.assertEqual(self._fix_deleted(fake_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_in_use(self):
+ self.mox.StubOutWithMock(db, 'security_group_in_use')
+ db.security_group_in_use(self.context, 123).AndReturn(True)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup()
+ secgroup.id = 123
+ self.assertTrue(secgroup.in_use(self.context))
+ self.assertRemotes()
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ updated_secgroup = dict(fake_secgroup, project_id='changed')
+ db.security_group_update(self.context, 1,
+ {'description': 'foobar'}).AndReturn(
+ updated_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.description = 'foobar'
+ secgroup.save(self.context)
+ self.assertEqual(self._fix_deleted(updated_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+ def test_save_no_changes(self):
+ self.mox.StubOutWithMock(db, 'security_group_update')
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.save(self.context)
+
+ def test_refresh(self):
+ updated_secgroup = dict(fake_secgroup, description='changed')
+ self.mox.StubOutWithMock(db, 'security_group_get')
+ db.security_group_get(self.context, 1).AndReturn(updated_secgroup)
+ self.mox.ReplayAll()
+ secgroup = security_group.SecurityGroup._from_db_object(
+ self.context, security_group.SecurityGroup(), fake_secgroup)
+ secgroup.refresh(self.context)
+ self.assertEqual(self._fix_deleted(updated_secgroup),
+ dict(secgroup.items()))
+ self.assertEqual(secgroup.obj_what_changed(), set())
+ self.assertRemotes()
+
+
+class TestSecurityGroupObject(test_objects._LocalTest,
+ _TestSecurityGroupObject):
+ pass
+
+
+class TestSecurityGroupObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupObject):
+ pass
+
+
+fake_secgroups = [
+ dict(fake_secgroup, id=1, name='secgroup1'),
+ dict(fake_secgroup, id=2, name='secgroup2'),
+ ]
+
+
+class _TestSecurityGroupListObject(object):
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_all')
+ db.security_group_get_all(self.context).AndReturn(fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_all(self.context)
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+ self.assertEqual(secgroup_list[i]._context, self.context)
+
+ def test_get_by_project(self):
+ self.mox.StubOutWithMock(db, 'security_group_get_by_project')
+ db.security_group_get_by_project(self.context,
+ 'fake-project').AndReturn(
+ fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_by_project(
+ self.context, 'fake-project')
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+
+ def test_get_by_instance(self):
+ inst = instance.Instance()
+ inst.uuid = 'fake-inst-uuid'
+ self.mox.StubOutWithMock(db, 'security_group_get_by_instance')
+ db.security_group_get_by_instance(self.context,
+ 'fake-inst-uuid').AndReturn(
+ fake_secgroups)
+ self.mox.ReplayAll()
+ secgroup_list = security_group.SecurityGroupList.get_by_instance(
+ self.context, inst)
+ for i in range(len(fake_secgroups)):
+ self.assertIsInstance(secgroup_list[i],
+ security_group.SecurityGroup)
+ self.assertEqual(fake_secgroups[i]['id'],
+ secgroup_list[i]['id'])
+
+
+class TestSecurityGroupListObject(test_objects._LocalTest,
+ _TestSecurityGroupListObject):
+ pass
+
+
+class TestSecurityGroupListObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupListObject):
+ pass
diff --git a/nova/tests/unit/objects/test_security_group_rule.py b/nova/tests/unit/objects/test_security_group_rule.py
new file mode 100644
index 0000000000..481be189a5
--- /dev/null
+++ b/nova/tests/unit/objects/test_security_group_rule.py
@@ -0,0 +1,95 @@
+# Copyright 2013 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova import exception
+from nova import objects
+from nova.tests.unit.objects import test_objects
+from nova.tests.unit.objects import test_security_group
+
+fake_rule = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 1,
+ 'protocol': 'tcp',
+ 'from_port': 22,
+ 'to_port': 22,
+ 'cidr': '0.0.0.0/0',
+ }
+
+
+class _TestSecurityGroupRuleObject(object):
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'security_group_rule_get') as sgrg:
+ sgrg.return_value = fake_rule
+ rule = objects.SecurityGroupRule.get_by_id(
+ self.context, 1)
+ for field in fake_rule:
+ if field == 'cidr':
+ self.assertEqual(fake_rule[field], str(rule[field]))
+ else:
+ self.assertEqual(fake_rule[field], rule[field])
+ sgrg.assert_called_with(self.context, 1)
+
+ def test_get_by_security_group(self):
+ secgroup = objects.SecurityGroup()
+ secgroup.id = 123
+ rule = dict(fake_rule)
+ rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123)
+ stupid_method = 'security_group_rule_get_by_security_group'
+ with mock.patch.object(db, stupid_method) as sgrgbsg:
+ sgrgbsg.return_value = [rule]
+ rules = (objects.SecurityGroupRuleList.
+ get_by_security_group(self.context, secgroup))
+ self.assertEqual(1, len(rules))
+ self.assertEqual(123, rules[0].grantee_group.id)
+
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_create(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.protocol = 'tcp'
+ secgroup = objects.SecurityGroup()
+ secgroup.id = 123
+ parentgroup = objects.SecurityGroup()
+ parentgroup.id = 223
+ rule.grantee_group = secgroup
+ rule.parent_group = parentgroup
+ rule.create(self.context)
+ updates = db_mock.call_args[0][1]
+ self.assertEqual(fake_rule['id'], rule.id)
+ self.assertEqual(updates['group_id'], rule.grantee_group.id)
+ self.assertEqual(updates['parent_group_id'], rule.parent_group.id)
+
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_set_id_failure(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ rule, 'id', 124)
+
+
+class TestSecurityGroupRuleObject(test_objects._LocalTest,
+ _TestSecurityGroupRuleObject):
+ pass
+
+
+class TestSecurityGroupRuleObjectRemote(test_objects._RemoteTest,
+ _TestSecurityGroupRuleObject):
+ pass
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
new file mode 100644
index 0000000000..d8a72056a5
--- /dev/null
+++ b/nova/tests/unit/objects/test_service.py
@@ -0,0 +1,226 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import db
+from nova import exception
+from nova.objects import aggregate
+from nova.objects import service
+from nova.tests.unit.objects import test_compute_node
+from nova.tests.unit.objects import test_objects
+
+NOW = timeutils.utcnow().replace(microsecond=0)
+fake_service = {
+ 'created_at': NOW,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'host': 'fake-host',
+ 'binary': 'fake-service',
+ 'topic': 'fake-service-topic',
+ 'report_count': 1,
+ 'disabled': False,
+ 'disabled_reason': None,
+ }
+
+OPTIONAL = ['availability_zone', 'compute_node']
+
+
+class _TestServiceObject(object):
+ def supported_hv_specs_comparator(self, expected, obj_val):
+ obj_val = [inst.to_list() for inst in obj_val]
+ self.json_comparator(expected, obj_val)
+
+ def comparators(self):
+ return {'stats': self.json_comparator,
+ 'host_ip': self.str_comparator,
+ 'supported_hv_specs': self.supported_hv_specs_comparator}
+
+ def subs(self):
+ return {'supported_hv_specs': 'supported_instances'}
+
+ def _test_query(self, db_method, obj_method, *args, **kwargs):
+ self.mox.StubOutWithMock(db, db_method)
+ getattr(db, db_method)(self.context, *args, **kwargs).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ obj = getattr(service.Service, obj_method)(self.context, *args,
+ **kwargs)
+ self.compare_obj(obj, fake_service, allow_missing=OPTIONAL)
+
+ def test_get_by_id(self):
+ self._test_query('service_get', 'get_by_id', 123)
+
+ def test_get_by_host_and_topic(self):
+ self._test_query('service_get_by_host_and_topic',
+ 'get_by_host_and_topic', 'fake-host', 'fake-topic')
+
+ def test_get_by_compute_host(self):
+ self._test_query('service_get_by_compute_host', 'get_by_compute_host',
+ 'fake-host')
+
+ def test_get_by_args(self):
+ self._test_query('service_get_by_args', 'get_by_args', 'fake-host',
+ 'fake-service')
+
+ def test_with_compute_node(self):
+ self.mox.StubOutWithMock(db, 'service_get')
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ _fake_service = dict(
+ fake_service, compute_node=[test_compute_node.fake_compute_node])
+ db.service_get(self.context, 123).AndReturn(_fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service.get_by_id(self.context, 123)
+ self.assertTrue(service_obj.obj_attr_is_set('compute_node'))
+ self.compare_obj(service_obj.compute_node,
+ test_compute_node.fake_compute_node,
+ subs=self.subs(),
+ allow_missing=OPTIONAL,
+ comparators=self.comparators())
+
+ def test_create(self):
+ self.mox.StubOutWithMock(db, 'service_create')
+ db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.host = 'fake-host'
+ service_obj.create(self.context)
+ self.assertEqual(fake_service['id'], service_obj.id)
+
+ def test_recreate_fails(self):
+ self.mox.StubOutWithMock(db, 'service_create')
+ db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.host = 'fake-host'
+ service_obj.create(self.context)
+ self.assertRaises(exception.ObjectActionError, service_obj.create,
+ self.context)
+
+ def test_save(self):
+ self.mox.StubOutWithMock(db, 'service_update')
+ db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn(
+ fake_service)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.id = 123
+ service_obj.host = 'fake-host'
+ service_obj.save(self.context)
+
+ @mock.patch.object(db, 'service_create',
+ return_value=fake_service)
+ def test_set_id_failure(self, db_mock):
+ service_obj = service.Service()
+ service_obj.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ service_obj, 'id', 124)
+
+ def _test_destroy(self):
+ self.mox.StubOutWithMock(db, 'service_destroy')
+ db.service_destroy(self.context, 123)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj.id = 123
+ service_obj.destroy(self.context)
+
+ def test_destroy(self):
+ # The test harness needs db.service_destroy to work,
+ # so avoid leaving it broken here after we're done
+ orig_service_destroy = db.service_destroy
+ try:
+ self._test_destroy()
+ finally:
+ db.service_destroy = orig_service_destroy
+
+ def test_get_by_topic(self):
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ db.service_get_all_by_topic(self.context, 'fake-topic').AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_by_topic(self.context, 'fake-topic')
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_by_host(self):
+ self.mox.StubOutWithMock(db, 'service_get_all_by_host')
+ db.service_get_all_by_host(self.context, 'fake-host').AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_by_host(self.context, 'fake-host')
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_all(self):
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ db.service_get_all(self.context, disabled=False).AndReturn(
+ [fake_service])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_all(self.context, disabled=False)
+ self.assertEqual(1, len(services))
+ self.compare_obj(services[0], fake_service, allow_missing=OPTIONAL)
+
+ def test_get_all_with_az(self):
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ self.mox.StubOutWithMock(aggregate.AggregateList,
+ 'get_by_metadata_key')
+ db.service_get_all(self.context, disabled=None).AndReturn(
+ [dict(fake_service, topic='compute')])
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'availability_zone': 'test-az'}
+ agg.create(self.context)
+ agg.hosts = [fake_service['host']]
+ aggregate.AggregateList.get_by_metadata_key(self.context,
+ 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
+ self.mox.ReplayAll()
+ services = service.ServiceList.get_all(self.context, set_zones=True)
+ self.assertEqual(1, len(services))
+ self.assertEqual('test-az', services[0].availability_zone)
+
+ def test_compute_node(self):
+ self.mox.StubOutWithMock(db, 'compute_node_get_by_service_id')
+ db.compute_node_get_by_service_id(self.context, 123).AndReturn(
+ test_compute_node.fake_compute_node)
+ self.mox.ReplayAll()
+ service_obj = service.Service()
+ service_obj._context = self.context
+ service_obj.id = 123
+ self.compare_obj(service_obj.compute_node,
+ test_compute_node.fake_compute_node,
+ subs=self.subs(),
+ allow_missing=OPTIONAL,
+ comparators=self.comparators())
+ # Make sure it doesn't re-fetch this
+ service_obj.compute_node
+
+ def test_load_when_orphaned(self):
+ service_obj = service.Service()
+ service_obj.id = 123
+ self.assertRaises(exception.OrphanedObjectError,
+ getattr, service_obj, 'compute_node')
+
+
+class TestServiceObject(test_objects._LocalTest,
+ _TestServiceObject):
+ pass
+
+
+class TestRemoteServiceObject(test_objects._RemoteTest,
+ _TestServiceObject):
+ pass
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
new file mode 100644
index 0000000000..6c416315c4
--- /dev/null
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -0,0 +1,126 @@
+# Copyright (C) 2014, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import db
+from nova.objects import virtual_interface as vif_obj
+from nova.tests.unit.objects import test_objects
+
+
+fake_vif = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': 1,
+ 'address': '00:00:00:00:00:00',
+ 'network_id': 123,
+ 'instance_uuid': 'fake-uuid',
+ 'uuid': 'fake-uuid-2',
+}
+
+
+class _TestVirtualInterface(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_get_by_id(self):
+ with mock.patch.object(db, 'virtual_interface_get') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_id(self.context, 1)
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_uuid') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_address(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_address') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_address(self.context,
+ '00:00:00:00:00:00')
+ self._compare(self, fake_vif, vif)
+
+ def test_get_by_instance_and_network(self):
+ with mock.patch.object(db,
+ 'virtual_interface_get_by_instance_and_network') as get:
+ get.return_value = fake_vif
+ vif = vif_obj.VirtualInterface.get_by_instance_and_network(
+ self.context, 'fake-uuid', 123)
+ self._compare(self, fake_vif, vif)
+
+ def test_create(self):
+ vif = vif_obj.VirtualInterface()
+ vif.address = '00:00:00:00:00:00'
+ vif.network_id = 123
+ vif.instance_uuid = 'fake-uuid'
+ vif.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'virtual_interface_create') as create:
+ create.return_value = fake_vif
+ vif.create(self.context)
+
+ self.assertEqual(self.context, vif._context)
+ vif._context = None
+ self._compare(self, fake_vif, vif)
+
+ def test_delete_by_instance_uuid(self):
+ with mock.patch.object(db,
+ 'virtual_interface_delete_by_instance') as delete:
+ vif_obj.VirtualInterface.delete_by_instance_uuid(self.context,
+ 'fake-uuid')
+ delete.assert_called_with(self.context, 'fake-uuid')
+
+
+class TestVirtualInterfaceObject(test_objects._LocalTest,
+ _TestVirtualInterface):
+ pass
+
+
+class TestRemoteVirtualInterfaceObject(test_objects._RemoteTest,
+ _TestVirtualInterface):
+ pass
+
+
+class _TestVirtualInterfaceList(object):
+ def test_get_all(self):
+ with mock.patch.object(db, 'virtual_interface_get_all') as get:
+ get.return_value = [fake_vif]
+ vifs = vif_obj.VirtualInterfaceList.get_all(self.context)
+ self.assertEqual(1, len(vifs))
+ _TestVirtualInterface._compare(self, fake_vif, vifs[0])
+
+ def test_get_by_instance_uuid(self):
+ with mock.patch.object(db, 'virtual_interface_get_by_instance') as get:
+ get.return_value = [fake_vif]
+ vifs = vif_obj.VirtualInterfaceList.get_by_instance_uuid(
+ self.context, 'fake-uuid')
+ self.assertEqual(1, len(vifs))
+ _TestVirtualInterface._compare(self, fake_vif, vifs[0])
+
+
+class TestVirtualInterfaceList(test_objects._LocalTest,
+ _TestVirtualInterfaceList):
+ pass
+
+
+class TestRemoteVirtualInterfaceList(test_objects._RemoteTest,
+ _TestVirtualInterfaceList):
+ pass
diff --git a/nova/tests/pci/__init__.py b/nova/tests/unit/pci/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/pci/__init__.py
+++ b/nova/tests/unit/pci/__init__.py
diff --git a/nova/tests/pci/fakes.py b/nova/tests/unit/pci/fakes.py
index b56dfc20a8..b56dfc20a8 100644
--- a/nova/tests/pci/fakes.py
+++ b/nova/tests/unit/pci/fakes.py
diff --git a/nova/tests/pci/test_device.py b/nova/tests/unit/pci/test_device.py
index 2406ac254b..2406ac254b 100644
--- a/nova/tests/pci/test_device.py
+++ b/nova/tests/unit/pci/test_device.py
diff --git a/nova/tests/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index d7b6098871..d7b6098871 100644
--- a/nova/tests/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
new file mode 100644
index 0000000000..787ea41bd2
--- /dev/null
+++ b/nova/tests/unit/pci/test_manager.py
@@ -0,0 +1,364 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova import objects
+from nova.pci import device
+from nova.pci import manager
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+from nova.tests.unit.pci import fakes as pci_fakes
+
+
+fake_pci = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p',
+ 'vendor_id': 'v',
+ 'request_id': None,
+ 'status': 'available'}
+fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
+ product_id='p1', vendor_id='v1')
+fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
+
+
+fake_db_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'vendor_id': 'v',
+ 'product_id': 'p',
+ 'dev_type': 't',
+ 'status': 'available',
+ 'dev_id': 'i',
+ 'label': 'l',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ 'request_id': None,
+ }
+fake_db_dev_1 = dict(fake_db_dev, vendor_id='v1',
+ product_id='p1', id=2,
+ address='0000:00:00.2')
+fake_db_dev_2 = dict(fake_db_dev, id=3, address='0000:00:00.3')
+fake_db_devs = [fake_db_dev, fake_db_dev_1, fake_db_dev_2]
+
+
+fake_pci_requests = [
+ {'count': 1,
+ 'spec': [{'vendor_id': 'v'}]},
+ {'count': 1,
+ 'spec': [{'vendor_id': 'v1'}]}]
+
+
+class PciDevTrackerTestCase(test.TestCase):
+ def _create_fake_instance(self):
+ self.inst = objects.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = objects.PciDeviceList()
+ self.inst.vm_state = vm_states.ACTIVE
+ self.inst.task_state = None
+
+ def _fake_get_pci_devices(self, ctxt, node_id):
+ return fake_db_devs[:]
+
+ def _fake_pci_device_update(self, ctxt, node_id, address, value):
+ self.update_called += 1
+ self.called_values = value
+ fake_return = copy.deepcopy(fake_db_dev)
+ return fake_return
+
+ def _fake_pci_device_destroy(self, ctxt, node_id, address):
+ self.destroy_called += 1
+
+ def _create_pci_requests_object(self, mock_get, requests):
+ pci_reqs = []
+ for request in requests:
+ pci_req_obj = objects.InstancePCIRequest(count=request['count'],
+ spec=request['spec'])
+ pci_reqs.append(pci_req_obj)
+ mock_get.return_value = objects.InstancePCIRequests(requests=pci_reqs)
+
+ def setUp(self):
+ super(PciDevTrackerTestCase, self).setUp()
+ self.stubs.Set(db, 'pci_device_get_all_by_node',
+ self._fake_get_pci_devices)
+ # The fake_pci_whitelist must be called before creating the fake
+ # devices
+ patcher = pci_fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+ self._create_fake_instance()
+ self.tracker = manager.PciDevTracker(1)
+
+ def test_pcidev_tracker_create(self):
+ self.assertEqual(len(self.tracker.pci_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(self.tracker.stale.keys(), [])
+ self.assertEqual(len(self.tracker.stats.pools), 2)
+ self.assertEqual(self.tracker.node_id, 1)
+
+ def test_pcidev_tracker_create_no_nodeid(self):
+ self.tracker = manager.PciDevTracker()
+ self.assertEqual(len(self.tracker.pci_devs), 0)
+
+ def test_set_hvdev_new_dev(self):
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(len(self.tracker.pci_devs), 4)
+ self.assertEqual(set([dev['address'] for
+ dev in self.tracker.pci_devs]),
+ set(['0000:00:00.1', '0000:00:00.2',
+ '0000:00:00.3', '0000:00:00.4']))
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1', 'v2']))
+
+ def test_set_hvdev_changed(self):
+ fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_v2)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ def test_set_hvdev_remove(self):
+ self.tracker.set_hvdevs([fake_pci])
+ self.assertEqual(len([dev for dev in self.tracker.pci_devs
+ if dev['status'] == 'removed']),
+ 2)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_set_hvdev_changed_stal(self, mock_get):
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker._claim_instance(mock.sentinel.context, self.inst)
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.assertEqual(len(self.tracker.stale), 1)
+ self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_active(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_fail(self, mock_get):
+ pci_requests = copy.deepcopy(fake_pci_requests)
+ pci_requests[0]['count'] = 4
+ self._create_pci_requests_object(mock_get, pci_requests)
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.tracker.update_pci_for_instance,
+ None,
+ self.inst)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_deleted(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.inst.vm_state = vm_states.DELETED
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_resize_source(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.inst.task_state = task_states.RESIZE_MIGRATED
+ self.tracker.update_pci_for_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_instance_resize_dest(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
+ self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
+ self.inst.task_state = task_states.RESIZE_FINISH
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self.assertEqual(len(self.tracker.allocations['fake-inst-uuid']), 2)
+ self.assertNotIn('fake-inst-uuid', self.tracker.claims)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_migration_in(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_update_pci_for_migration_out(self, mock_get):
+ self._create_pci_requests_object(mock_get, fake_pci_requests)
+ self.tracker.update_pci_for_migration(None, self.inst)
+ self.tracker.update_pci_for_migration(None, self.inst, sign=-1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+ self.assertEqual(set([dev['vendor_id'] for
+ dev in self.tracker.pci_devs]),
+ set(['v', 'v1']))
+
+ def test_save(self):
+ self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
+ ctxt = context.get_admin_context()
+ fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
+ copy.deepcopy(fake_pci_v3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.update_called = 0
+ self.tracker.save(ctxt)
+ self.assertEqual(self.update_called, 3)
+
+ def test_save_removed(self):
+ self.stubs.Set(db, "pci_device_update", self._fake_pci_device_update)
+ self.stubs.Set(db, "pci_device_destroy", self._fake_pci_device_destroy)
+ self.destroy_called = 0
+ ctxt = context.get_admin_context()
+ self.assertEqual(len(self.tracker.pci_devs), 3)
+ dev = self.tracker.pci_devs[0]
+ self.update_called = 0
+ device.remove(dev)
+ self.tracker.save(ctxt)
+ self.assertEqual(len(self.tracker.pci_devs), 2)
+ self.assertEqual(self.destroy_called, 1)
+
+ def test_set_compute_node_id(self):
+ self.tracker = manager.PciDevTracker()
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_2)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ self.tracker.set_compute_node_id(1)
+ self.assertEqual(self.tracker.node_id, 1)
+ self.assertEqual(self.tracker.pci_devs[0].compute_node_id, 1)
+ fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
+ fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
+ copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_3)]
+ self.tracker.set_hvdevs(fake_pci_devs)
+ for dev in self.tracker.pci_devs:
+ self.assertEqual(dev.compute_node_id, 1)
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage(self, mock_get):
+ inst_2 = copy.copy(self.inst)
+ inst_2.uuid = 'uuid5'
+ migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
+
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker.update_pci_for_instance(None, inst_2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
+
+ self.tracker.clean_usage([self.inst], [migr], [orph])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
+ self.assertEqual(
+ set([dev['vendor_id'] for dev in free_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage_claims(self, mock_get):
+ inst_2 = copy.copy(self.inst)
+ inst_2.uuid = 'uuid5'
+ migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ orph = {'uuid': 'uuid3', 'vm_state': vm_states.BUILDING}
+
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
+ self.tracker.update_pci_for_instance(None, self.inst)
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}])
+ self.tracker.update_pci_for_migration(None, inst_2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.tracker.clean_usage([self.inst], [migr], [orph])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
+ self.assertEqual(
+ set([dev['vendor_id'] for dev in free_devs]),
+ set(['v', 'v1']))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_clean_usage_no_request_match_no_claims(self, mock_get):
+ # Tests the case that there is no match for the request so the
+ # claims mapping is set to None for the instance when the tracker
+ # calls clean_usage.
+ self._create_pci_requests_object(mock_get, [])
+ self.tracker.update_pci_for_migration(None, instance=self.inst, sign=1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
+ self.tracker.clean_usage([], [], [])
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
+ self.assertEqual(
+ set([dev['address'] for dev in free_devs]),
+ set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
+
+
+class PciGetInstanceDevs(test.TestCase):
+ def test_get_devs_object(self):
+ def _fake_obj_load_attr(foo, attrname):
+ if attrname == 'pci_devices':
+ self.load_attr_called = True
+ foo.pci_devices = objects.PciDeviceList()
+
+ inst = fakes.stub_instance(id='1')
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'instance_get')
+ db.instance_get(ctxt, '1', columns_to_join=[]
+ ).AndReturn(inst)
+ self.mox.ReplayAll()
+ inst = objects.Instance.get_by_id(ctxt, '1', expected_attrs=[])
+ self.stubs.Set(objects.Instance, 'obj_load_attr', _fake_obj_load_attr)
+
+ self.load_attr_called = False
+ manager.get_instance_pci_devs(inst)
+ self.assertEqual(self.load_attr_called, True)
diff --git a/nova/tests/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 32c768b0c0..32c768b0c0 100644
--- a/nova/tests/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
new file mode 100644
index 0000000000..6960cf93cf
--- /dev/null
+++ b/nova/tests/unit/pci/test_stats.py
@@ -0,0 +1,267 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import exception
+from nova import objects
+from nova.pci import stats
+from nova.pci import whitelist
+from nova import test
+from nova.tests.unit.pci import fakes
+
+fake_pci_1 = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.1',
+ 'product_id': 'p1',
+ 'vendor_id': 'v1',
+ 'status': 'available',
+ 'extra_k1': 'v1',
+ 'request_id': None,
+ }
+
+
+fake_pci_2 = dict(fake_pci_1, vendor_id='v2',
+ product_id='p2',
+ address='0000:00:00.2')
+
+
+fake_pci_3 = dict(fake_pci_1, address='0000:00:00.3')
+
+
+pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v1'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v2'}])]
+
+
+pci_requests_multiple = [objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': 'v1'}]),
+ objects.InstancePCIRequest(count=3,
+ spec=[{'vendor_id': 'v2'}])]
+
+
+class PciDeviceStatsTestCase(test.NoDBTestCase):
+ def _create_fake_devs(self):
+ self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
+ self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
+ self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
+
+ map(self.pci_stats.add_device,
+ [self.fake_dev_1, self.fake_dev_2, self.fake_dev_3])
+
+ def setUp(self):
+ super(PciDeviceStatsTestCase, self).setUp()
+ self.pci_stats = stats.PciDeviceStats()
+ # The following two calls need to be made before adding the devices.
+ patcher = fakes.fake_pci_whitelist()
+ self.addCleanup(patcher.stop)
+ self._create_fake_devs()
+
+ def test_add_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
+ set(['v1', 'v2']))
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set([1, 2]))
+
+ def test_remove_device(self):
+ self.pci_stats.remove_device(self.fake_dev_2)
+ self.assertEqual(len(self.pci_stats.pools), 1)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+
+ def test_remove_device_exception(self):
+ self.pci_stats.remove_device(self.fake_dev_2)
+ self.assertRaises(exception.PciDevicePoolEmpty,
+ self.pci_stats.remove_device,
+ self.fake_dev_2)
+
+ def test_json_creat(self):
+ m = jsonutils.dumps(self.pci_stats)
+ new_stats = stats.PciDeviceStats(m)
+
+ self.assertEqual(len(new_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in new_stats]),
+ set([1, 2]))
+ self.assertEqual(set([d['vendor_id'] for d in new_stats]),
+ set(['v1', 'v2']))
+
+ def test_support_requests(self):
+ self.assertEqual(self.pci_stats.support_requests(pci_requests),
+ True)
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set((1, 2)))
+
+ def test_support_requests_failed(self):
+ self.assertEqual(
+ self.pci_stats.support_requests(pci_requests_multiple), False)
+ self.assertEqual(len(self.pci_stats.pools), 2)
+ self.assertEqual(set([d['count'] for d in self.pci_stats]),
+ set([1, 2]))
+
+ def test_apply_requests(self):
+ self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 1)
+ self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
+
+ def test_apply_requests_failed(self):
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ pci_requests_multiple)
+
+ def test_consume_requests(self):
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['v1', 'v2']),
+ set([dev['vendor_id'] for dev in devs]))
+
+ def test_consume_requests_empty(self):
+ devs = self.pci_stats.consume_requests([])
+ self.assertEqual(0, len(devs))
+
+ def test_consume_requests_failed(self):
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple)
+
+
+@mock.patch.object(whitelist, 'get_pci_devices_filter')
+class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(PciDeviceStatsWithTagsTestCase, self).setUp()
+ self.pci_stats = stats.PciDeviceStats()
+ self._create_whitelist()
+
+ def _create_whitelist(self):
+ white_list = ['{"vendor_id":"1137","product_id":"0071",'
+ '"address":"*:0a:00.*","physical_network":"physnet1"}',
+ '{"vendor_id":"1137","product_id":"0072"}']
+ self.pci_wlist = whitelist.PciHostDevicesWhiteList(white_list)
+
+ def _create_pci_devices(self):
+ self.pci_tagged_devices = []
+ for dev in range(4):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0a:00.%d' % dev,
+ 'vendor_id': '1137',
+ 'product_id': '0071',
+ 'status': 'available',
+ 'request_id': None}
+ self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
+
+ self.pci_untagged_devices = []
+ for dev in range(3):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0b:00.%d' % dev,
+ 'vendor_id': '1137',
+ 'product_id': '0072',
+ 'status': 'available',
+ 'request_id': None}
+ self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
+
+ map(self.pci_stats.add_device, self.pci_tagged_devices)
+ map(self.pci_stats.add_device, self.pci_untagged_devices)
+
+ def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
+ self.assertEqual(vendor_id, pool['vendor_id'])
+ self.assertEqual(product_id, pool['product_id'])
+ self.assertEqual(count, pool['count'])
+ if tags:
+ for k, v in tags.iteritems():
+ self.assertEqual(v, pool[k])
+
+ def _assertPools(self):
+ # Pools are ordered based on the number of keys. 'product_id',
+ # 'vendor_id' are always part of the keys. When tags are present,
+ # they are also part of the keys. In this test class, we have
+ # two pools with the second one having the tag 'physical_network'
+ # and the value 'physnet1'
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
+ len(self.pci_untagged_devices))
+ self.assertEqual(self.pci_untagged_devices,
+ self.pci_stats.pools[0]['devices'])
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
+ len(self.pci_tagged_devices),
+ physical_network='physnet1')
+ self.assertEqual(self.pci_tagged_devices,
+ self.pci_stats.pools[1]['devices'])
+
+ def test_add_devices(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ self._assertPools()
+
+ def test_consume_reqeusts(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'physical_network': 'physnet1'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '1137',
+ 'product_id': '0072'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['0071', '0072']),
+ set([dev['product_id'] for dev in devs]))
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ physical_network='physnet1')
+
+ def test_add_device_no_devspec(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '2345',
+ 'product_id': '0172',
+ 'status': 'available',
+ 'request_id': None}
+ pci_dev_obj = objects.PciDevice.create(pci_dev)
+ self.pci_stats.add_device(pci_dev_obj)
+ # There should be no change
+ self.assertIsNone(
+ self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
+ self._assertPools()
+
+ def test_remove_device_no_devspec(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '2345',
+ 'product_id': '0172',
+ 'status': 'available',
+ 'request_id': None}
+ pci_dev_obj = objects.PciDevice.create(pci_dev)
+ self.pci_stats.remove_device(pci_dev_obj)
+ # There should be no change
+ self.assertIsNone(
+ self.pci_stats._create_pool_keys_from_dev(pci_dev_obj))
+ self._assertPools()
+
+ def test_remove_device(self, mock_get_dev_filter):
+ mock_get_dev_filter.return_value = self.pci_wlist
+ self._create_pci_devices()
+ dev1 = self.pci_untagged_devices.pop()
+ self.pci_stats.remove_device(dev1)
+ dev2 = self.pci_tagged_devices.pop()
+ self.pci_stats.remove_device(dev2)
+ self._assertPools()
diff --git a/nova/tests/pci/test_utils.py b/nova/tests/unit/pci/test_utils.py
index 77a0ce24f5..77a0ce24f5 100644
--- a/nova/tests/pci/test_utils.py
+++ b/nova/tests/unit/pci/test_utils.py
diff --git a/nova/tests/pci/test_whitelist.py b/nova/tests/unit/pci/test_whitelist.py
index cb5891dffb..cb5891dffb 100644
--- a/nova/tests/pci/test_whitelist.py
+++ b/nova/tests/unit/pci/test_whitelist.py
diff --git a/nova/tests/unit/policy_fixture.py b/nova/tests/unit/policy_fixture.py
new file mode 100644
index 0000000000..cf28875240
--- /dev/null
+++ b/nova/tests/unit/policy_fixture.py
@@ -0,0 +1,73 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import fixtures
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.openstack.common import policy as common_policy
+import nova.policy
+from nova.tests.unit import fake_policy
+
+CONF = cfg.CONF
+
+
+class PolicyFixture(fixtures.Fixture):
+
+ def setUp(self):
+ super(PolicyFixture, self).setUp()
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ policy_file.write(fake_policy.policy_data)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
+
+ def set_rules(self, rules):
+ policy = nova.policy._ENFORCER
+ policy.set_rules(dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items()))
+
+
+class RoleBasedPolicyFixture(fixtures.Fixture):
+
+ def __init__(self, role="admin", *args, **kwargs):
+ super(RoleBasedPolicyFixture, self).__init__(*args, **kwargs)
+ self.role = role
+
+ def setUp(self):
+ """Copy live policy.json file and convert all actions to
+ allow users of the specified role only
+ """
+ super(RoleBasedPolicyFixture, self).setUp()
+ policy = jsonutils.load(open(CONF.policy_file))
+
+ # Convert all actions to require specified role
+ for action, rule in policy.iteritems():
+ policy[action] = 'role:%s' % self.role
+
+ self.policy_dir = self.useFixture(fixtures.TempDir())
+ self.policy_file_name = os.path.join(self.policy_dir.path,
+ 'policy.json')
+ with open(self.policy_file_name, 'w') as policy_file:
+ jsonutils.dump(policy, policy_file)
+ CONF.set_override('policy_file', self.policy_file_name)
+ nova.policy.reset()
+ nova.policy.init()
+ self.addCleanup(nova.policy.reset)
diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/unit/scheduler/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/scheduler/__init__.py
+++ b/nova/tests/unit/scheduler/__init__.py
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index d1b2918d33..d1b2918d33 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
diff --git a/nova/tests/scheduler/filters/__init__.py b/nova/tests/unit/scheduler/filters/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/scheduler/filters/__init__.py
+++ b/nova/tests/unit/scheduler/filters/__init__.py
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
new file mode 100644
index 0000000000..d47d10a57d
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -0,0 +1,258 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.scheduler.filters import affinity_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+CONF.import_opt('my_ip', 'nova.netconf')
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestDifferentHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDifferentHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.DifferentHostFilter()
+
+ def test_affinity_different_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.instances]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'different_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_different_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+@mock.patch('nova.compute.api.API.get_all')
+class TestSameHostFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSameHostFilter, self).setUp()
+ self.filt_cls = affinity_filter.SameHostFilter()
+
+ def test_affinity_same_filter_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_no_list_passes(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = [mock.sentinel.images]
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': 'fake'}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_fails(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ get_all_mock.return_value = []
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'same_host': ['fake'], }}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ get_all_mock.assert_called_once_with(mock.sentinel.ctx,
+ {'host': 'host1',
+ 'uuid': ['fake'],
+ 'deleted': False})
+
+ def test_affinity_same_filter_handles_none(self, get_all_mock):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(get_all_mock.called)
+
+
+class TestSimpleCIDRAffinityFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestSimpleCIDRAffinityFilter, self).setUp()
+ self.filt_cls = affinity_filter.SimpleCIDRAffinityFilter()
+
+ def test_affinity_simple_cidr_filter_passes(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/24',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_fails(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ host.host_ip = '10.8.1.1'
+
+ affinity_ip = "10.8.1.100"
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': {
+ 'cidr': '/32',
+ 'build_near_host_ip': affinity_ip}}
+
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_affinity_simple_cidr_filter_handles_none(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ affinity_ip = CONF.my_ip.split('.')[0:3]
+ affinity_ip.append('100')
+ affinity_ip = str.join('.', affinity_ip)
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'scheduler_hints': None}
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+
+class TestGroupAffinityFilter(test.NoDBTestCase):
+
+ def _test_group_anti_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': [policy]}
+ filter_properties['group_hosts'] = []
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties['group_hosts'] = ['host2']
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_passes(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_passes_legacy(self):
+ self._test_group_anti_affinity_filter_passes(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_anti_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host1']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_anti_affinity_filter_fails(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.ServerGroupAntiAffinityFilter(),
+ 'anti-affinity')
+
+ def test_group_anti_affinity_filter_fails_legacy(self):
+ self._test_group_anti_affinity_filter_fails(
+ affinity_filter.GroupAntiAffinityFilter(),
+ 'legacy')
+
+ def _test_group_affinity_filter_passes(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['anti-affinity']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ filter_properties = {'group_policies': ['affinity'],
+ 'group_hosts': ['host1']}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_passes(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_passes_legacy(self):
+ self._test_group_affinity_filter_passes(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
+
+ def _test_group_affinity_filter_fails(self, filt_cls, policy):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'group_policies': [policy],
+ 'group_hosts': ['host2']}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_group_affinity_filter_fails(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.ServerGroupAffinityFilter(), 'affinity')
+
+ def test_group_affinity_filter_fails_legacy(self):
+ self._test_group_affinity_filter_fails(
+ affinity_filter.GroupAffinityFilter(), 'legacy')
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
new file mode 100644
index 0000000000..b4eacf321f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -0,0 +1,98 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggImagePropsIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggImagePropsIsolationFilter, self).setUp()
+ self.filt_cls = aipi.AggregateImagePropertiesIsolation()
+
+ def test_aggregate_image_properties_isolation_passes(self, agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_multi_props_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar2'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_imgprops_passes(self,
+ agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'no-bar'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_not_match2_fails(self,
+ agg_mock):
+ agg_mock.return_value = {'foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_image_properties_isolation_props_namespace(self,
+ agg_mock):
+ self.flags(aggregate_image_properties_isolation_namespace="np")
+ agg_mock.return_value = {'np.foo': 'bar', 'foo2': 'bar2'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'image': {
+ 'properties': {'np.foo': 'bar',
+ 'foo2': 'bar3'}}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
new file mode 100644
index 0000000000..4512841062
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -0,0 +1,72 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateInstanceExtraSpecsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateInstanceExtraSpecsFilter, self).setUp()
+ self.filt_cls = agg_specs.AggregateInstanceExtraSpecsFilter()
+
+ def test_aggregate_filter_passes_no_extra_specs(self, agg_mock):
+ capabilities = {'opt1': 1, 'opt2': 2}
+
+ filter_properties = {'context': mock.sentinel.ctx, 'instance_type':
+ {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(agg_mock.called)
+
+ def _do_test_aggregate_filter_extra_specs(self, especs, passes):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024, 'extra_specs': especs}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024})
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_filter_passes_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ # Un-scoped extra spec
+ 'opt1': '1',
+ # Scoped extra spec that applies to this filter
+ 'aggregate_instance_extra_specs:opt2': '2',
+ # Scoped extra spec that does not apply to this filter
+ 'trust:trusted_host': 'true',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_passes_with_key_same_as_scope(self, agg_mock):
+ agg_mock.return_value = {'aggregate_instance_extra_specs': '1'}
+ especs = {
+ # Un-scoped extra spec, make sure we don't blow up if it
+ # happens to match our scope.
+ 'aggregate_instance_extra_specs': '1',
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=True)
+
+ def test_aggregate_filter_fails_extra_specs_simple(self, agg_mock):
+ agg_mock.return_value = {'opt1': '1', 'opt2': '2'}
+ especs = {
+ 'opt1': '1',
+ 'opt2': '222',
+ 'trust:trusted_host': 'true'
+ }
+ self._do_test_aggregate_filter_extra_specs(especs, passes=False)
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
new file mode 100644
index 0000000000..70fe5e2d41
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAggregateMultitenancyIsolationFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateMultitenancyIsolationFilter, self).setUp()
+ self.filt_cls = ami.AggregateMultiTenancyIsolation()
+
+ def test_aggregate_multi_tenancy_isolation_with_meta_passes(self,
+ agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'my_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_fails(self, agg_mock):
+ agg_mock.return_value = {'filter_tenant_id': 'other_tenantid'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_multi_tenancy_isolation_no_meta_passes(self, agg_mock):
+ agg_mock.return_value = {}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'project_id': 'my_tenantid'}}}
+ host = fakes.FakeHostState('host1', 'compute', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
new file mode 100644
index 0000000000..3cf860dfb2
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -0,0 +1,48 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import availability_zone_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.db.aggregate_metadata_get_by_host')
+class TestAvailabilityZoneFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAvailabilityZoneFilter, self).setUp()
+ self.filt_cls = availability_zone_filter.AvailabilityZoneFilter()
+
+ @staticmethod
+ def _make_zone_request(zone):
+ return {
+ 'context': mock.sentinel.ctx,
+ 'request_spec': {
+ 'instance_properties': {
+ 'availability_zone': zone
+ }
+ }
+ }
+
+ def test_availability_zone_filter_same(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('nova')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, request))
+
+ def test_availability_zone_filter_different(self, agg_mock):
+ agg_mock.return_value = {'availability_zone': 'nova'}
+ request = self._make_zone_request('bad')
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, request))
diff --git a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
new file mode 100644
index 0000000000..506b207d2a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
@@ -0,0 +1,99 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import six
+
+from nova.scheduler.filters import compute_capabilities_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestComputeCapabilitiesFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestComputeCapabilitiesFilter, self).setUp()
+ self.filt_cls = compute_capabilities_filter.ComputeCapabilitiesFilter()
+
+ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
+ # In real OpenStack runtime environment,compute capabilities
+ # value may be number, so we should use number to do unit test.
+ capabilities = {}
+ capabilities.update(ecaps)
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'extra_specs': especs}}
+ host_state = {'free_ram_mb': 1024}
+ host_state.update(capabilities)
+ host = fakes.FakeHostState('host1', 'node1', host_state)
+ assertion = self.assertTrue if passes else self.assertFalse
+ assertion(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_compute_filter_pass_cpu_info_as_text_type(self):
+ cpu_info = """ { "vendor": "Intel", "model": "core2duo",
+ "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
+ {"cores": 1, "threads":1, "sockets": 1}} """
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=True)
+
+ def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
+ cpu_info = "cpu_info"
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=False)
+
+ def test_compute_filter_passes_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '2', 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_fails_extra_specs_simple(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'opt1': '1', 'opt2': '222', 'trust:trusted_host': 'true'},
+ passes=False)
+
+ def test_compute_filter_pass_extra_specs_simple_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': 1, 'opt2': 2}},
+ especs={'capabilities:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_pass_extra_specs_same_as_scope(self):
+ # Make sure this still works even if the key is the same as the scope
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'capabilities': 1},
+ especs={'capabilities': '1'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_simple_with_wrong_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'opt1': 1, 'opt2': 2},
+ especs={'wrong_scope:opt1': '1',
+ 'trust:trusted_host': 'true'},
+ passes=True)
+
+ def test_compute_filter_extra_specs_pass_multi_level_with_scope(self):
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'stats': {'opt1': {'a': 1, 'b': {'aa': 2}}, 'opt2': 2}},
+ especs={'opt1:a': '1', 'capabilities:opt1:b:aa': '2',
+ 'trust:trusted_host': 'true'},
+ passes=True)
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
new file mode 100644
index 0000000000..7e31e1ef5a
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -0,0 +1,50 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+@mock.patch('nova.servicegroup.API.service_is_up')
+class TestComputeFilter(test.NoDBTestCase):
+
+ def test_compute_filter_manual_disable(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(service_up_mock.called)
+
+ def test_compute_filter_sgapi_passes(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = True
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
+
+ def test_compute_filter_sgapi_fails(self, service_up_mock):
+ filt_cls = compute_filter.ComputeFilter()
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ service = {'disabled': False, 'updated_at': 'now'}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'service': service})
+ service_up_mock.return_value = False
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ service_up_mock.assert_called_once_with(service)
diff --git a/nova/tests/unit/scheduler/filters/test_core_filters.py b/nova/tests/unit/scheduler/filters/test_core_filters.py
new file mode 100644
index 0000000000..cfe2c51be6
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_core_filters.py
@@ -0,0 +1,87 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import core_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestCoreFilter(test.NoDBTestCase):
+
+ def test_core_filter_passes(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails_safe(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_core_filter_fails(self):
+ self.filt_cls = core_filter.CoreFilter()
+ filter_properties = {'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_value_error(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 7})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ self.assertEqual(4 * 2, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_default_value(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set([])
+ # False: fallback to default flag w/o aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'cpu_allocation_ratio')
+ # True: use ratio from aggregates
+ agg_mock.return_value = set(['3'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 3, host.limits['vcpu'])
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_core_filter_conflict_values(self, agg_mock):
+ self.filt_cls = core_filter.AggregateCoreFilter()
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'vcpus': 1}}
+ self.flags(cpu_allocation_ratio=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ {'vcpus_total': 4, 'vcpus_used': 8})
+ agg_mock.return_value = set(['2', '3'])
+ # use the minimum ratio from aggregates
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(4 * 2, host.limits['vcpu'])
diff --git a/nova/tests/unit/scheduler/filters/test_disk_filters.py b/nova/tests/unit/scheduler/filters/test_disk_filters.py
new file mode 100644
index 0000000000..14e9328732
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_disk_filters.py
@@ -0,0 +1,100 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import disk_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestDiskFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestDiskFilter, self).setUp()
+
+ def test_disk_filter_passes(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1, 'swap': 512}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_fails(self):
+ self.flags(disk_allocation_ratio=1.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 10,
+ 'ephemeral_gb': 1, 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ def test_disk_filter_oversubscribe(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 18, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(12 * 10.0, host.limits['disk_gb'])
+
+ def test_disk_filter_oversubscribe_fail(self):
+ self.flags(disk_allocation_ratio=10.0)
+ filt_cls = disk_filter.DiskFilter()
+ filter_properties = {'instance_type': {'root_gb': 100,
+ 'ephemeral_gb': 19, 'swap': 1024}}
+ # 1GB used... so 119GB allowed...
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_value_error(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_disk_filter_default_value(self, agg_mock):
+ filt_cls = disk_filter.AggregateDiskFilter()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': mock.sentinel.ctx,
+ 'instance_type': {'root_gb': 2,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1})
+ # Uses global conf.
+ agg_mock.return_value = set([])
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'disk_allocation_ratio')
+
+ agg_mock.return_value = set(['2'])
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/filters/test_extra_specs_ops.py b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
index 5f8f912a81..5f8f912a81 100644
--- a/nova/tests/scheduler/filters/test_extra_specs_ops.py
+++ b/nova/tests/unit/scheduler/filters/test_extra_specs_ops.py
diff --git a/nova/tests/unit/scheduler/filters/test_image_props_filters.py b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
new file mode 100644
index 0000000000..ee3a175dce
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_image_props_filters.py
@@ -0,0 +1,189 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import arch
+from nova.compute import hvtype
+from nova.compute import vm_mode
+from nova.scheduler.filters import image_props_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+
+
+class TestImagePropsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestImagePropsFilter, self).setUp()
+ self.filt_cls = image_props_filter.ImagePropertiesFilter()
+
+ def test_image_properties_filter_passes_same_inst_props_and_version(self):
+ img_props = {'properties': {'_architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0,<6.2'
+ }}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_inst_props(self):
+ img_props = {'properties': {'architecture': arch.ARMV7,
+ 'hypervisor_type': hvtype.QEMU,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_different_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.2'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_partial_inst_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_inst_props(self):
+ filter_properties = {'request_spec': {}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_without_host_props(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'enabled': True,
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_passes_without_hyper_version(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)]}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_fails_with_unsupported_hyper_ver(self):
+ img_props = {'properties': {'architecture': arch.X86_64,
+ 'hypervisor_type': hvtype.KVM,
+ 'vm_mode': vm_mode.HVM,
+ 'hypervisor_version_requires': '>=6.0'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ capabilities = {'enabled': True,
+ 'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': 5000}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_pv_mode_compat(self):
+ # if an old image has 'pv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'pv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.XEN, vm_mode.XEN)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_hvm_mode_compat(self):
+ # if an old image has 'hv' for a vm_mode it should be treated as xen
+ img_props = {'properties': {'vm_mode': 'hv'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.X86_64, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_arch_compat(self):
+ # if an old image has 'x86_32' for arch it should be treated as i686
+ img_props = {'properties': {'architecture': 'x86_32'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.KVM, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_xen_hvtype_compat(self):
+ # if an old image has 'xapi' for hvtype it should be treated as xen
+ img_props = {'properties': {'hypervisor_type': 'xapi'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.XEN, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_image_properties_filter_baremetal_vmmode_compat(self):
+ # if an old image has 'baremetal' for vmmode it should be
+ # treated as hvm
+ img_props = {'properties': {'vm_mode': 'baremetal'}}
+ filter_properties = {'request_spec': {'image': img_props}}
+ hypervisor_version = utils.convert_version_to_int('6.0.0')
+ capabilities = {'supported_instances':
+ [(arch.I686, hvtype.BAREMETAL, vm_mode.HVM)],
+ 'hypervisor_version': hypervisor_version}
+ host = fakes.FakeHostState('host1', 'node1', capabilities)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
new file mode 100644
index 0000000000..c558b7711f
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova.scheduler.filters import io_ops_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_iops_passes(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_iops_fails(self):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.IoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 8})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value(self, agg_mock):
+ self.flags(max_io_ops_per_host=7)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
+ agg_mock.return_value = set(['8'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_filter_num_iops_value_error(self, agg_mock):
+ self.flags(max_io_ops_per_host=8)
+ self.filt_cls = io_ops_filter.AggregateIoOpsFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ agg_mock.return_value = set(['XXX'])
+ filter_properties = {'context': mock.sentinel.ctx}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_io_ops_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
new file mode 100644
index 0000000000..343c86264c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_isolated_hosts_filter.py
@@ -0,0 +1,90 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import isolated_hosts_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestIsolatedHostsFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestIsolatedHostsFilter, self).setUp()
+ self.filt_cls = isolated_hosts_filter.IsolatedHostsFilter()
+
+ def _do_test_isolated_hosts(self, host_in_list, image_in_list,
+ set_flags=True,
+ restrict_isolated_hosts_to_isolated_images=True):
+ if set_flags:
+ self.flags(isolated_images=['isolated_image'],
+ isolated_hosts=['isolated_host'],
+ restrict_isolated_hosts_to_isolated_images=
+ restrict_isolated_hosts_to_isolated_images)
+ host_name = 'isolated_host' if host_in_list else 'free_host'
+ image_ref = 'isolated_image' if image_in_list else 'free_image'
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': {'image_ref': image_ref}
+ }
+ }
+ host = fakes.FakeHostState(host_name, 'node', {})
+ return self.filt_cls.host_passes(host, filter_properties)
+
+ def test_isolated_hosts_fails_isolated_on_non_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(False, True))
+
+ def test_isolated_hosts_fails_non_isolated_on_isolated(self):
+ self.assertFalse(self._do_test_isolated_hosts(True, False))
+
+ def test_isolated_hosts_passes_isolated_on_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(True, True))
+
+ def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
+ self.assertTrue(self._do_test_isolated_hosts(False, False))
+
+ def test_isolated_hosts_no_config(self):
+ # If there are no hosts nor isolated images in the config, it should
+ # not filter at all. This is the default config.
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_hosts_config(self):
+ self.flags(isolated_images=['isolated_image'])
+ # If there are no hosts in the config, it should only filter out
+ # images that are listed
+ self.assertFalse(self._do_test_isolated_hosts(False, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_no_images_config(self):
+ self.flags(isolated_hosts=['isolated_host'])
+ # If there are no images in the config, it should only filter out
+ # isolated_hosts
+ self.assertTrue(self._do_test_isolated_hosts(False, True, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, False, False))
+ self.assertFalse(self._do_test_isolated_hosts(True, True, False))
+ self.assertTrue(self._do_test_isolated_hosts(False, False, False))
+
+ def test_isolated_hosts_less_restrictive(self):
+ # If there are isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, False, True, False))
+ # If there are isolated hosts and isolated images
+ self.assertTrue(self._do_test_isolated_hosts(True, True, True, False))
+ # If there are non isolated hosts and non isolated images
+ self.assertTrue(self._do_test_isolated_hosts(False, False, True,
+ False))
+ # If there are non isolated hosts and isolated images
+ self.assertFalse(self._do_test_isolated_hosts(False, True, True,
+ False))
diff --git a/nova/tests/unit/scheduler/filters/test_json_filters.py b/nova/tests/unit/scheduler/filters/test_json_filters.py
new file mode 100644
index 0000000000..c5ddca7520
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_json_filters.py
@@ -0,0 +1,289 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.serialization import jsonutils
+
+from nova.scheduler.filters import json_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestJsonFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestJsonFilter, self).setUp()
+ self.filt_cls = json_filter.JsonFilter()
+ self.json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024]])
+
+ def test_json_filter_passes(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_passes_with_no_query(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 0,
+ 'free_disk_mb': 0})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_memory(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_disk(self):
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'root_gb': 200,
+ 'ephemeral_gb': 0},
+ 'scheduler_hints': {'query': self.json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': (200 * 1024) - 1})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_fails_on_service_disabled(self):
+ json_query = jsonutils.dumps(
+ ['and', ['>=', '$free_ram_mb', 1024],
+ ['>=', '$free_disk_mb', 200 * 1024],
+ ['not', '$service.disabled']])
+ filter_properties = {'instance_type': {'memory_mb': 1024,
+ 'local_gb': 200},
+ 'scheduler_hints': {'query': json_query}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024,
+ 'free_disk_mb': 200 * 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_happy_day(self):
+ # Test json filter more thoroughly.
+ raw = ['and',
+ '$capabilities.enabled',
+ ['=', '$capabilities.opt1', 'match'],
+ ['or',
+ ['and',
+ ['<', '$free_ram_mb', 30],
+ ['<', '$free_disk_mb', 300]],
+ ['and',
+ ['>', '$free_ram_mb', 30],
+ ['>', '$free_disk_mb', 300]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 10,
+ 'free_disk_mb': 200,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Passes
+ capabilities = {'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities being disabled
+ capabilities = {'enabled': False, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 40,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to being exact memory/disk we don't want
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 30,
+ 'free_disk_mb': 300,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to memory lower but disk higher
+ capabilities = {'enabled': True, 'opt1': 'match'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ # Fails due to capabilities 'opt1' not equal
+ capabilities = {'enabled': True, 'opt1': 'no-match'}
+ service = {'enabled': True}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 20,
+ 'free_disk_mb': 400,
+ 'capabilities': capabilities,
+ 'service': service})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_basic_operators(self):
+ host = fakes.FakeHostState('host1', 'node1', {})
+ # (operator, arguments, expected_result)
+ ops_to_test = [
+ ['=', [1, 1], True],
+ ['=', [1, 2], False],
+ ['<', [1, 2], True],
+ ['<', [1, 1], False],
+ ['<', [2, 1], False],
+ ['>', [2, 1], True],
+ ['>', [2, 2], False],
+ ['>', [2, 3], False],
+ ['<=', [1, 2], True],
+ ['<=', [1, 1], True],
+ ['<=', [2, 1], False],
+ ['>=', [2, 1], True],
+ ['>=', [2, 2], True],
+ ['>=', [2, 3], False],
+ ['in', [1, 1], True],
+ ['in', [1, 1, 2, 3], True],
+ ['in', [4, 1, 2, 3], False],
+ ['not', [True], False],
+ ['not', [False], True],
+ ['or', [True, False], True],
+ ['or', [False, False], False],
+ ['and', [True, True], True],
+ ['and', [False, False], False],
+ ['and', [True, False], False],
+ # Nested ((True or False) and (2 > 1)) == Passes
+ ['and', [['or', True, False], ['>', 2, 1]], True]]
+
+ for (op, args, expected) in ops_to_test:
+ raw = [op] + args
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertEqual(expected,
+ self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, True, False, True] and if any are True
+ # then it passes...
+ raw = ['not', True, False, True, False]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ # This results in [False, False, False] and if any are True
+ # then it passes...which this doesn't
+ raw = ['not', True, True, True]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_operator_raises(self):
+ raw = ['!=', 1, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+ self.assertRaises(KeyError,
+ self.filt_cls.host_passes, host, filter_properties)
+
+ def test_json_filter_empty_filters_pass(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = []
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ raw = {}
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_invalid_num_arguments_fails(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['>', 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_json_filter_unknown_variable_ignored(self):
+ host = fakes.FakeHostState('host1', 'node1',
+ {})
+
+ raw = ['=', '$........', 1, 1]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ raw = ['=', '$foo', 2, 2]
+ filter_properties = {
+ 'scheduler_hints': {
+ 'query': jsonutils.dumps(raw),
+ },
+ }
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_metrics_filters.py b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
new file mode 100644
index 0000000000..9ae0f6c77c
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_metrics_filters.py
@@ -0,0 +1,34 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import metrics_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestMetricsFilter(test.NoDBTestCase):
+
+ def test_metrics_filter_pass(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1, bar=2)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertTrue(filt_cls.host_passes(host, None))
+
+ def test_metrics_filter_missing_metrics(self):
+ self.flags(weight_setting=['foo=1', 'bar=2'], group='metrics')
+ filt_cls = metrics_filter.MetricsFilter()
+ metrics = dict(foo=1)
+ host = fakes.FakeHostState('host1', 'node1',
+ attribute_dict={'metrics': metrics})
+ self.assertFalse(filt_cls.host_passes(host, None))
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
new file mode 100644
index 0000000000..3db0eeb6e7
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import num_instances_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestNumInstancesFilter(test.NoDBTestCase):
+
+ def test_filter_num_instances_passes(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 4})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_filter_num_instances_fails(self):
+ self.flags(max_instances_per_host=5)
+ self.filt_cls = num_instances_filter.NumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {}
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value(self, agg_mock):
+ self.flags(max_instances_per_host=4)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_instances': 5})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set([])
+ # No aggregate defined for that host.
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
+ agg_mock.return_value = set(['6'])
+ # Aggregate defined for that host.
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_filter_aggregate_num_instances_value_error(self, agg_mock):
+ self.flags(max_instances_per_host=6)
+ self.filt_cls = num_instances_filter.AggregateNumInstancesFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {'context': mock.sentinel.ctx}
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'host1',
+ 'max_instances_per_host')
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
new file mode 100644
index 0000000000..3c8eb049c8
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -0,0 +1,151 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import objects
+from nova.objects import base as obj_base
+from nova.scheduler.filters import numa_topology_filter
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.scheduler import fakes
+from nova.virt import hardware
+
+
+class TestNUMATopologyFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestNUMATopologyFilter, self).setUp()
+ self.filt_cls = numa_topology_filter.NUMATopologyFilter()
+
+ def test_numa_topology_filter_pass(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = None
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_fit(self):
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([2]), 512),
+ hardware.VirtNUMATopologyCellInstance(2, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_memory(self):
+ self.flags(ram_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_fail_cpu(self):
+ self.flags(cpu_allocation_ratio=1)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([3, 4, 5]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_numa_topology_filter_pass_set_limit(self):
+ self.flags(cpu_allocation_ratio=21)
+ self.flags(ram_allocation_ratio=1.3)
+
+ instance_topology = hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(0, set([1]), 512),
+ hardware.VirtNUMATopologyCellInstance(1, set([3]), 512)])
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ instance.numa_topology = (
+ objects.InstanceNUMATopology.obj_from_topology(
+ instance_topology))
+ filter_properties = {
+ 'request_spec': {
+ 'instance_properties': jsonutils.to_primitive(
+ obj_base.obj_to_primitive(instance))}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'numa_topology': fakes.NUMA_TOPOLOGY})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ limits_topology = hardware.VirtNUMALimitTopology.from_json(
+ host.limits['numa_topology'])
+ self.assertEqual(limits_topology.cells[0].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[1].cpu_limit, 42)
+ self.assertEqual(limits_topology.cells[0].memory_limit, 665)
+ self.assertEqual(limits_topology.cells[1].memory_limit, 665)
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
new file mode 100644
index 0000000000..57dd5ebc02
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -0,0 +1,67 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import objects
+from nova.scheduler.filters import pci_passthrough_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestPCIPassthroughFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestPCIPassthroughFilter, self).setUp()
+ self.filt_cls = pci_passthrough_filter.PciPassthroughFilter()
+
+ def test_pci_passthrough_pass(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = True
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_fail(self):
+ pci_stats_mock = mock.MagicMock()
+ pci_stats_mock.support_requests.return_value = False
+ request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086'}])
+ requests = objects.InstancePCIRequests(requests=[request])
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={'pci_stats': pci_stats_mock})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests)
+
+ def test_pci_passthrough_no_pci_request(self):
+ filter_properties = {}
+ host = fakes.FakeHostState('h1', 'n1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_pci_passthrough_compute_stats(self):
+ requests = [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]
+ filter_properties = {'pci_requests': requests}
+ host = fakes.FakeHostState(
+ 'host1', 'node1',
+ attribute_dict={})
+ self.assertRaises(AttributeError, self.filt_cls.host_passes,
+ host, filter_properties)
diff --git a/nova/tests/unit/scheduler/filters/test_ram_filters.py b/nova/tests/unit/scheduler/filters/test_ram_filters.py
new file mode 100644
index 0000000000..c7a6df58c9
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_ram_filters.py
@@ -0,0 +1,89 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import ram_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRamFilter, self).setUp()
+ self.filt_cls = ram_filter.RamFilter()
+
+ def test_ram_filter_fails_on_memory(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_passes(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 1.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_ram_filter_oversubscribe(self):
+ ram_filter.RamFilter.ram_allocation_ratio = 2.0
+ filter_properties = {'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': -1024, 'total_usable_ram_mb': 2048})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(2048 * 2.0, host.limits['memory_mb'])
+
+
+@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+class TestAggregateRamFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAggregateRamFilter, self).setUp()
+ self.filt_cls = ram_filter.AggregateRamFilter()
+
+ def test_aggregate_ram_filter_value_error(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1024, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['XXX'])
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_default_value(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ # False: fallback to default flag w/o aggregates
+ agg_mock.return_value = set()
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.return_value = set(['2.0'])
+ # True: use ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 2.0, host.limits['memory_mb'])
+
+ def test_aggregate_ram_filter_conflict_values(self, agg_mock):
+ self.flags(ram_allocation_ratio=1.0)
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_ram_mb': 1023, 'total_usable_ram_mb': 1024})
+ agg_mock.return_value = set(['1.5', '2.0'])
+ # use the minimum ratio from aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertEqual(1024 * 1.5, host.limits['memory_mb'])
diff --git a/nova/tests/unit/scheduler/filters/test_retry_filters.py b/nova/tests/unit/scheduler/filters/test_retry_filters.py
new file mode 100644
index 0000000000..04510cd419
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_retry_filters.py
@@ -0,0 +1,46 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.scheduler.filters import retry_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestRetryFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestRetryFilter, self).setUp()
+ self.filt_cls = retry_filter.RetryFilter()
+
+ def test_retry_filter_disabled(self):
+ # Test case where retry/re-scheduling is disabled.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ filter_properties = {}
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_pass(self):
+ # Node not previously tried.
+ host = fakes.FakeHostState('host1', 'nodeX', {})
+ retry = dict(num_attempts=2,
+ hosts=[['host1', 'node1'], # same host, different node
+ ['host2', 'node2'], # different host and node
+ ])
+ filter_properties = dict(retry=retry)
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_retry_filter_fail(self):
+ # Node was already tried.
+ host = fakes.FakeHostState('host1', 'node1', {})
+ retry = dict(num_attempts=1,
+ hosts=[['host1', 'node1']])
+ filter_properties = dict(retry=retry)
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_trusted_filters.py b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
new file mode 100644
index 0000000000..b6afb92ae0
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_trusted_filters.py
@@ -0,0 +1,203 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+import requests
+
+from nova.scheduler.filters import trusted_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+@mock.patch.object(trusted_filter.AttestationService, '_request')
+class TestTrustedFilter(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestTrustedFilter, self).setUp()
+ # TrustedFilter's constructor creates the attestation cache, which
+ # calls to get a list of all the compute nodes.
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ }
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+
+ def test_trusted_filter_default_passes(self, req_mock):
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(req_mock.called)
+
+ def test_trusted_filter_trusted_and_trusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ req_mock.assert_called_once_with("POST", "PollHosts", ["node1"])
+
+ def test_trusted_filter_trusted_and_untrusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_trusted_fails(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_untrusted_and_untrusted_passes(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+
+ def test_trusted_filter_update_cache(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": timeutils.isotime()}]}
+
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+
+ timeutils.set_time_override(timeutils.utcnow())
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout + 80)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertTrue(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_update_cache_timezone(self, req_mock):
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'untrusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ timeutils.set_time_override(
+ timeutils.normalize_time(
+ timeutils.parse_isotime("2012-09-09T09:10:40Z")))
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+
+ req_mock.reset_mock()
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ req_mock.reset_mock()
+ timeutils.advance_time_seconds(
+ CONF.trusted_computing.attestation_auth_timeout - 10)
+ self.filt_cls.host_passes(host, filter_properties)
+ self.assertFalse(req_mock.called)
+
+ timeutils.clear_time_override()
+
+ def test_trusted_filter_combine_hosts(self, req_mock):
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ },
+ {'hypervisor_hostname': 'node2',
+ 'service': {'host': 'host2'},
+ },
+ ]
+ with mock.patch('nova.db.compute_node_get_all') as mocked:
+ mocked.return_value = fake_compute_nodes
+ self.filt_cls = trusted_filter.TrustedFilter()
+ oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ self.filt_cls.host_passes(host, filter_properties) # Fill the caches
+ req_mock.assert_called_once_with("POST", "PollHosts",
+ ["node1", "node2"])
+
+ def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self,
+ req_mock):
+ oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%c")},
+ {"host_name": "host2",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%D")},
+ # This is just a broken date to ensure that
+ # we're not just arbitrarily accepting any
+ # date format.
+ ]}
+ req_mock.return_value = requests.codes.OK, oat_data
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'host1', {})
+ bad_host = fakes.FakeHostState('host2', 'host2', {})
+
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(self.filt_cls.host_passes(bad_host,
+ filter_properties))
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
new file mode 100644
index 0000000000..3aebba1a76
--- /dev/null
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -0,0 +1,56 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.scheduler.filters import type_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class TestTypeFilter(test.NoDBTestCase):
+
+ @mock.patch('nova.db.instance_get_all_by_host_and_not_type')
+ def test_type_filter(self, get_mock):
+ self.filt_cls = type_filter.TypeAffinityFilter()
+
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ filter_properties = {'context': mock.MagicMock(),
+ 'instance_type': {'id': 'fake1'}}
+ get_mock.return_value = []
+ # True since empty
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ get_mock.assert_called_once_with(
+ mock.ANY, # context...
+ 'fake_host',
+ 'fake1'
+ )
+ get_mock.return_value = [mock.sentinel.instances]
+ # False since not empty
+ self.assertFalse(self.filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('nova.scheduler.filters.utils.aggregate_values_from_db')
+ def test_aggregate_type_filter(self, agg_mock):
+ self.filt_cls = type_filter.AggregateTypeAffinityFilter()
+
+ filter_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake1'}}
+ filter2_properties = {'context': mock.sentinel.ctx,
+ 'instance_type': {'name': 'fake2'}}
+ host = fakes.FakeHostState('fake_host', 'fake_node', {})
+ agg_mock.return_value = set(['fake1'])
+ # True since no aggregates
+ self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
+ agg_mock.assert_called_once_with(mock.sentinel.ctx, 'fake_host',
+ 'instance_type')
+ # False since type matches aggregate, metadata
+ self.assertFalse(self.filt_cls.host_passes(host, filter2_properties))
diff --git a/nova/tests/scheduler/ironic_fakes.py b/nova/tests/unit/scheduler/ironic_fakes.py
index 5c63afafe3..5c63afafe3 100644
--- a/nova/tests/scheduler/ironic_fakes.py
+++ b/nova/tests/unit/scheduler/ironic_fakes.py
diff --git a/nova/tests/scheduler/test_baremetal_host_manager.py b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
index 1f6e2d70fa..1f6e2d70fa 100644
--- a/nova/tests/scheduler/test_baremetal_host_manager.py
+++ b/nova/tests/unit/scheduler/test_baremetal_host_manager.py
diff --git a/nova/tests/unit/scheduler/test_caching_scheduler.py b/nova/tests/unit/scheduler/test_caching_scheduler.py
new file mode 100644
index 0000000000..15525f1b20
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_caching_scheduler.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.utils import timeutils
+
+from nova import exception
+from nova.scheduler import caching_scheduler
+from nova.scheduler import host_manager
+from nova.tests.unit.scheduler import test_scheduler
+
+ENABLE_PROFILER = False
+
+
+class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Caching Scheduler."""
+
+ driver_cls = caching_scheduler.CachingScheduler
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = []
+ context = mock.Mock()
+
+ self.driver.run_periodic_tasks(context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+ context.elevated.assert_called_with()
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
+ self.driver.all_host_states = []
+
+ self.driver._get_all_host_states(self.context)
+
+ self.assertFalse(mock_up_hosts.called)
+ self.assertEqual([], self.driver.all_host_states)
+
+ @mock.patch.object(caching_scheduler.CachingScheduler,
+ "_get_up_hosts")
+ def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
+ mock_up_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_all_host_states(self.context)
+
+ self.assertTrue(mock_up_hosts.called)
+ self.assertEqual(["asdf"], self.driver.all_host_states)
+ self.assertEqual(["asdf"], result)
+
+ def test_get_up_hosts(self):
+ with mock.patch.object(self.driver.host_manager,
+ "get_all_host_states") as mock_get_hosts:
+ mock_get_hosts.return_value = ["asdf"]
+
+ result = self.driver._get_up_hosts(self.context)
+
+ self.assertTrue(mock_get_hosts.called)
+ self.assertEqual(mock_get_hosts.return_value, result)
+
+ def test_select_destination_raises_with_no_hosts(self):
+ fake_request_spec = self._get_fake_request_spec()
+ self.driver.all_host_states = []
+
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations,
+ self.context, fake_request_spec, {})
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destination_works(self, mock_get_extra):
+ fake_request_spec = self._get_fake_request_spec()
+ fake_host = self._get_fake_host_state()
+ self.driver.all_host_states = [fake_host]
+
+ result = self._test_select_destinations(fake_request_spec)
+
+ self.assertEqual(1, len(result))
+ self.assertEqual(result[0]["host"], fake_host.host)
+
+ def _test_select_destinations(self, request_spec):
+ return self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ def _get_fake_request_spec(self):
+ flavor = {
+ "flavorid": "small",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ }
+ instance_properties = {
+ "os_type": "linux",
+ "project_id": "1234",
+ "memory_mb": 512,
+ "root_gb": 1,
+ "ephemeral_gb": 1,
+ "vcpus": 1,
+ "uuid": 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
+ }
+ request_spec = {
+ "instance_type": flavor,
+ "instance_properties": instance_properties,
+ "num_instances": 1,
+ }
+ return request_spec
+
+ def _get_fake_host_state(self, index=0):
+ host_state = host_manager.HostState(
+ 'host_%s' % index,
+ 'node_%s' % index)
+ host_state.free_ram_mb = 50000
+ host_state.service = {
+ "disabled": False,
+ "updated_at": timeutils.utcnow(),
+ "created_at": timeutils.utcnow(),
+ }
+ return host_state
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_performance_check_select_destination(self, mock_get_extra):
+ hosts = 2
+ requests = 1
+
+ self.flags(service_down_time=240)
+
+ request_spec = self._get_fake_request_spec()
+ host_states = []
+ for x in xrange(hosts):
+ host_state = self._get_fake_host_state(x)
+ host_states.append(host_state)
+ self.driver.all_host_states = host_states
+
+ def run_test():
+ a = timeutils.utcnow()
+
+ for x in xrange(requests):
+ self.driver.select_destinations(
+ self.context, request_spec, {})
+
+ b = timeutils.utcnow()
+ c = b - a
+
+ seconds = (c.days * 24 * 60 * 60 + c.seconds)
+ microseconds = seconds * 1000 + c.microseconds / 1000.0
+ per_request_ms = microseconds / requests
+ return per_request_ms
+
+ per_request_ms = None
+ if ENABLE_PROFILER:
+ import pycallgraph
+ from pycallgraph import output
+ config = pycallgraph.Config(max_depth=10)
+ config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
+ 'pycallgraph.*',
+ 'unittest.*',
+ 'nova.tests.unit.*',
+ ])
+ graphviz = output.GraphvizOutput(output_file='scheduler.png')
+
+ with pycallgraph.PyCallGraph(output=graphviz):
+ per_request_ms = run_test()
+
+ else:
+ per_request_ms = run_test()
+
+ # This has proved to be around 1 ms on a random dev box
+ # But this is here so you can do simply performance testing easily.
+ self.assertTrue(per_request_ms < 1000)
+
+
+if __name__ == '__main__':
+ # A handy tool to help profile the schedulers performance
+ ENABLE_PROFILER = True
+ import unittest
+ suite = unittest.TestSuite()
+ test = "test_performance_check_select_destination"
+ test_case = CachingSchedulerTestCase(test)
+ suite.addTest(test_case)
+ runner = unittest.TextTestRunner()
+ runner.run(suite)
diff --git a/nova/tests/unit/scheduler/test_chance_scheduler.py b/nova/tests/unit/scheduler/test_chance_scheduler.py
new file mode 100644
index 0000000000..73a4696ec3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_chance_scheduler.py
@@ -0,0 +1,182 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Chance Scheduler.
+"""
+
+import random
+
+import mox
+
+from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import chance
+from nova.scheduler import driver
+from nova.tests.unit.scheduler import test_scheduler
+
+
+class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Chance Scheduler."""
+
+ driver_cls = chance.ChanceScheduler
+
+ def test_filter_hosts_avoid(self):
+ """Test to make sure _filter_hosts() filters original hosts if
+ avoid_original_host is True.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': ['host2']}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, ['host1', 'host3'])
+
+ def test_filter_hosts_no_avoid(self):
+ """Test to make sure _filter_hosts() does not filter original
+ hosts if avoid_original_host is False.
+ """
+
+ hosts = ['host1', 'host2', 'host3']
+ request_spec = dict(instance_properties=dict(host='host2'))
+ filter_properties = {'ignore_hosts': []}
+
+ filtered = self.driver._filter_hosts(request_spec, hosts,
+ filter_properties=filter_properties)
+ self.assertEqual(filtered, hosts)
+
+ def test_basic_schedule_run_instance(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+
+ def inc_launch_index(*args):
+ request_spec['instance_properties']['launch_index'] = (
+ request_spec['instance_properties']['launch_index'] + 1)
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+ self.mox.StubOutWithMock(driver, 'instance_update_db')
+ self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ # instance 1
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+ driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance1)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
+ instance=instance1, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ # instance 2
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host1')
+ driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects(
+ inc_launch_index).AndReturn(instance2)
+ compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
+ instance=instance2, requested_networks=None,
+ injected_files=None, admin_password=None, is_first_time=None,
+ request_spec=request_spec, filter_properties={},
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(ctxt, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_basic_schedule_run_instance_no_hosts(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ uuid = 'fake-uuid1'
+ instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
+ request_spec = {'instance_uuids': [uuid],
+ 'instance_properties': instance_opts}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ # instance 1
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
+ old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
+ {'vm_state': vm_states.ERROR,
+ 'task_state': None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(ctxt, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.driver.schedule_run_instance(
+ ctxt, request_spec, None, None, None, None, {}, False)
+
+ def test_select_destinations(self):
+ ctxt = context.RequestContext('fake', 'fake', False)
+ ctxt_elevated = 'fake-context-elevated'
+ request_spec = {'num_instances': 2}
+
+ self.mox.StubOutWithMock(ctxt, 'elevated')
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.mox.StubOutWithMock(random, 'choice')
+
+ hosts_full = ['host1', 'host2', 'host3', 'host4']
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host3')
+
+ ctxt.elevated().AndReturn(ctxt_elevated)
+ self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
+ random.choice(hosts_full).AndReturn('host2')
+
+ self.mox.ReplayAll()
+ dests = self.driver.select_destinations(ctxt, request_spec, {})
+ self.assertEqual(2, len(dests))
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual('host3', host)
+ self.assertIsNone(node)
+ (host, node) = (dests[1]['host'], dests[1]['nodename'])
+ self.assertEqual('host2', host)
+ self.assertIsNone(node)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.mox.StubOutWithMock(self.driver, 'hosts_up')
+ self.driver.hosts_up(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn([1, 2])
+ self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
+ self.mox.ReplayAll()
+
+ request_spec = {'num_instances': 1}
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ request_spec, {})
diff --git a/nova/tests/scheduler/test_client.py b/nova/tests/unit/scheduler/test_client.py
index 5ea915c4f6..5ea915c4f6 100644
--- a/nova/tests/scheduler/test_client.py
+++ b/nova/tests/unit/scheduler/test_client.py
diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py
new file mode 100644
index 0000000000..96231ef13a
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_filter_scheduler.py
@@ -0,0 +1,596 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Filter Scheduler.
+"""
+
+import mock
+import mox
+
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.scheduler import driver
+from nova.scheduler import filter_scheduler
+from nova.scheduler import host_manager
+from nova.scheduler import utils as scheduler_utils
+from nova.scheduler import weights
+from nova.tests.unit.scheduler import fakes
+from nova.tests.unit.scheduler import test_scheduler
+
+
+def fake_get_filtered_hosts(hosts, filter_properties, index):
+ return list(hosts)
+
+
+class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
+ """Test case for Filter Scheduler."""
+
+ driver_cls = filter_scheduler.FilterScheduler
+
+ def test_run_instance_no_hosts(self):
+ sched = fakes.FakeFilterScheduler()
+ uuid = 'fake-uuid1'
+ fake_context = context.RequestContext('user', 'project')
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
+ 'ephemeral_gb': 0},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None,
+ None, None, {}, False)
+
+ def test_run_instance_non_admin(self):
+ self.was_admin = False
+
+ def fake_get(context, *args, **kwargs):
+ # make sure this is called with admin context, even though
+ # we're using user context below
+ self.was_admin = context.is_admin
+ return {}
+
+ sched = fakes.FakeFilterScheduler()
+ self.stubs.Set(sched.host_manager, 'get_all_host_states', fake_get)
+
+ fake_context = context.RequestContext('user', 'project')
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ old_ref, new_ref = db.instance_update_and_get_original(fake_context,
+ uuid, {'vm_state': vm_states.ERROR, 'task_state':
+ None}).AndReturn(({}, {}))
+ compute_utils.add_instance_fault_from_exc(fake_context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ sched.schedule_run_instance(
+ fake_context, request_spec, None, None, None, None, {}, False)
+ self.assertTrue(self.was_admin)
+
+ def test_scheduler_includes_launch_index(self):
+ fake_context = context.RequestContext('user', 'project')
+ instance_opts = {'fake_opt1': 'meow'}
+ request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
+ 'instance_properties': instance_opts}
+ instance1 = {'uuid': 'fake-uuid1'}
+ instance2 = {'uuid': 'fake-uuid2'}
+
+ def _has_launch_index(expected_index):
+ """Return a function that verifies the expected index."""
+ def _check_launch_index(value):
+ if 'instance_properties' in value:
+ if 'launch_index' in value['instance_properties']:
+ index = value['instance_properties']['launch_index']
+ if index == expected_index:
+ return True
+ return False
+ return _check_launch_index
+
+ self.mox.StubOutWithMock(self.driver, '_schedule')
+ self.mox.StubOutWithMock(self.driver, '_provision_resource')
+
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.driver._schedule(fake_context, request_spec,
+ expected_filter_properties).AndReturn(['host1', 'host2'])
+ # instance 1
+ self.driver._provision_resource(
+ fake_context, 'host1',
+ mox.Func(_has_launch_index(0)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False).AndReturn(instance1)
+ # instance 2
+ self.driver._provision_resource(
+ fake_context, 'host2',
+ mox.Func(_has_launch_index(1)), expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid2',
+ legacy_bdm_in_spec=False).AndReturn(instance2)
+ self.mox.ReplayAll()
+
+ self.driver.schedule_run_instance(fake_context, request_spec,
+ None, None, None, None, {}, False)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_happy_day(self, mock_get_extra):
+ """Make sure there's nothing glaringly wrong with _schedule()
+ by doing a happy day pass through.
+ """
+
+ self.next_weight = 1.0
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'num_instances': 10,
+ 'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}}
+ self.mox.ReplayAll()
+ weighed_hosts = sched._schedule(fake_context, request_spec, {})
+ self.assertEqual(len(weighed_hosts), 10)
+ for weighed_host in weighed_hosts:
+ self.assertIsNotNone(weighed_host.obj)
+
+ def test_max_attempts(self):
+ self.flags(scheduler_max_attempts=4)
+ self.assertEqual(4, scheduler_utils._max_attempts())
+
+ def test_invalid_max_attempts(self):
+ self.flags(scheduler_max_attempts=0)
+ self.assertRaises(exception.NovaException,
+ scheduler_utils._max_attempts)
+
+ def test_retry_disabled(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=1)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_hosts(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_hosts': ['force_host']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_force_nodes(self):
+ # Retry info should not get populated when re-scheduling is off.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'force_nodes': ['force_node']}
+
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_one(self):
+ # Test retry logic on initial scheduling attempt.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {}
+ expected_filter_properties = {'retry': {'num_attempts': 1,
+ 'hosts': []}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_attempt_two(self):
+ # Test retry logic when re-scheduling.
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 1}}
+ expected_filter_properties = {'retry': {'num_attempts': 2}}
+ self.mox.StubOutWithMock(sched, '_schedule')
+ self.mox.StubOutWithMock(sched, '_provision_resource')
+
+ sched._schedule(self.context, request_spec,
+ expected_filter_properties).AndReturn(['host1'])
+ sched._provision_resource(
+ self.context, 'host1',
+ request_spec, expected_filter_properties,
+ None, None, None, None,
+ instance_uuid='fake-uuid1',
+ legacy_bdm_in_spec=False)
+
+ self.mox.ReplayAll()
+
+ sched.schedule_run_instance(self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_retry_exceeded_max_attempts(self):
+ # Test for necessary explosion when max retries is exceeded and that
+ # the information needed in request_spec is still present for error
+ # handling
+ self.flags(scheduler_max_attempts=2)
+ sched = fakes.FakeFilterScheduler()
+ request_spec = dict(instance_properties={},
+ instance_uuids=['fake-uuid1'])
+ filter_properties = {'retry': {'num_attempts': 2}}
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
+ self.context, request_spec, None, None,
+ None, None, filter_properties, False)
+
+ def test_add_retry_host(self):
+ retry = dict(num_attempts=1, hosts=[])
+ filter_properties = dict(retry=retry)
+ host = "fakehost"
+ node = "fakenode"
+
+ scheduler_utils._add_retry_host(filter_properties, host, node)
+
+ hosts = filter_properties['retry']['hosts']
+ self.assertEqual(1, len(hosts))
+ self.assertEqual([host, node], hosts[0])
+
+ def test_post_select_populate(self):
+ # Test addition of certain filter props after a node is selected.
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+
+ host_state = host_manager.HostState('host', 'node')
+ host_state.limits['vcpus'] = 5
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ self.assertEqual(['host', 'node'],
+ filter_properties['retry']['hosts'][0])
+
+ self.assertEqual({'vcpus': 5}, host_state.limits)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_host_pool(self, mock_get_extra):
+ """Make sure the scheduler_host_subset_size property works properly."""
+
+ self.flags(scheduler_host_subset_size=2)
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_large_host_pool(self, mock_get_extra):
+ """Hosts should still be chosen if pool size
+ is larger than number of filtered hosts.
+ """
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.flags(scheduler_host_subset_size=20)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chose
+ self.assertEqual(len(hosts), 1)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_schedule_chooses_best_host(self, mock_get_extra):
+ """If scheduler_host_subset_size is 1, the largest host with greatest
+ weight should be returned.
+ """
+
+ self.flags(scheduler_host_subset_size=1)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ self.next_weight = 50
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ this_weight = self.next_weight
+ self.next_weight = 0
+ host_state = hosts[0]
+ return [weights.WeighedHost(host_state, this_weight)]
+
+ instance_properties = {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'}
+
+ request_spec = dict(instance_properties=instance_properties,
+ instance_type={})
+
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+
+ filter_properties = {}
+ self.mox.ReplayAll()
+ hosts = sched._schedule(self.context, request_spec,
+ filter_properties=filter_properties)
+
+ # one host should be chosen
+ self.assertEqual(1, len(hosts))
+
+ self.assertEqual(50, hosts[0].weight)
+
+ @mock.patch('nova.db.instance_extra_get_by_instance_uuid',
+ return_value={'numa_topology': None,
+ 'pci_requests': None})
+ def test_select_destinations(self, mock_get_extra):
+ """select_destinations is basically a wrapper around _schedule().
+
+ Similar to the _schedule tests, this just does a happy path test to
+ ensure there is nothing glaringly wrong.
+ """
+
+ self.next_weight = 1.0
+
+ selected_hosts = []
+ selected_nodes = []
+
+ def _fake_weigh_objects(_self, functions, hosts, options):
+ self.next_weight += 2.0
+ host_state = hosts[0]
+ selected_hosts.append(host_state.host)
+ selected_nodes.append(host_state.nodename)
+ return [weights.WeighedHost(host_state, self.next_weight)]
+
+ sched = fakes.FakeFilterScheduler()
+ fake_context = context.RequestContext('user', 'project',
+ is_admin=True)
+
+ self.stubs.Set(sched.host_manager, 'get_filtered_hosts',
+ fake_get_filtered_hosts)
+ self.stubs.Set(weights.HostWeightHandler,
+ 'get_weighed_objects', _fake_weigh_objects)
+ fakes.mox_host_manager_db_calls(self.mox, fake_context)
+
+ request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
+ 'instance_properties': {'project_id': 1,
+ 'root_gb': 512,
+ 'memory_mb': 512,
+ 'ephemeral_gb': 0,
+ 'vcpus': 1,
+ 'os_type': 'Linux',
+ 'uuid': 'fake-uuid'},
+ 'num_instances': 1}
+ self.mox.ReplayAll()
+ dests = sched.select_destinations(fake_context, request_spec, {})
+ (host, node) = (dests[0]['host'], dests[0]['nodename'])
+ self.assertEqual(host, selected_hosts[0])
+ self.assertEqual(node, selected_nodes[0])
+
+ @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
+ def test_select_destinations_notifications(self, mock_schedule):
+ mock_schedule.return_value = [mock.Mock()]
+
+ with mock.patch.object(self.driver.notifier, 'info') as mock_info:
+ request_spec = {'num_instances': 1}
+
+ self.driver.select_destinations(self.context, request_spec, {})
+
+ expected = [
+ mock.call(self.context, 'scheduler.select_destinations.start',
+ dict(request_spec=request_spec)),
+ mock.call(self.context, 'scheduler.select_destinations.end',
+ dict(request_spec=request_spec))]
+ self.assertEqual(expected, mock_info.call_args_list)
+
+ def test_select_destinations_no_valid_host(self):
+
+ def _return_no_host(*args, **kwargs):
+ return []
+
+ self.stubs.Set(self.driver, '_schedule', _return_no_host)
+ self.assertRaises(exception.NoValidHost,
+ self.driver.select_destinations, self.context,
+ {'num_instances': 1}, {})
+
+ def test_select_destinations_no_valid_host_not_enough(self):
+ # Tests that we have fewer hosts available than number of instances
+ # requested to build.
+ with mock.patch.object(self.driver, '_schedule',
+ return_value=[mock.sentinel.host1]):
+ try:
+ self.driver.select_destinations(
+ self.context, {'num_instances': 2}, {})
+ self.fail('Expected NoValidHost to be raised.')
+ except exception.NoValidHost as e:
+ # Make sure that we provided a reason why NoValidHost.
+ self.assertIn('reason', e.kwargs)
+ self.assertTrue(len(e.kwargs['reason']) > 0)
+
+ def test_handles_deleted_instance(self):
+ """Test instance deletion while being scheduled."""
+
+ def _raise_instance_not_found(*args, **kwargs):
+ raise exception.InstanceNotFound(instance_id='123')
+
+ self.stubs.Set(driver, 'instance_update_db',
+ _raise_instance_not_found)
+
+ sched = fakes.FakeFilterScheduler()
+
+ fake_context = context.RequestContext('user', 'project')
+ host_state = host_manager.HostState('host2', 'node2')
+ weighted_host = weights.WeighedHost(host_state, 1.42)
+ filter_properties = {}
+
+ uuid = 'fake-uuid1'
+ instance_properties = {'project_id': 1, 'os_type': 'Linux'}
+ request_spec = {'instance_type': {'memory_mb': 1, 'local_gb': 1},
+ 'instance_properties': instance_properties,
+ 'instance_uuids': [uuid]}
+ sched._provision_resource(fake_context, weighted_host,
+ request_spec, filter_properties,
+ None, None, None, None)
diff --git a/nova/tests/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
index 6469829078..6469829078 100644
--- a/nova/tests/scheduler/test_filters.py
+++ b/nova/tests/unit/scheduler/test_filters.py
diff --git a/nova/tests/scheduler/test_filters_utils.py b/nova/tests/unit/scheduler/test_filters_utils.py
index 48792fae35..48792fae35 100644
--- a/nova/tests/scheduler/test_filters_utils.py
+++ b/nova/tests/unit/scheduler/test_filters_utils.py
diff --git a/nova/tests/unit/scheduler/test_host_filters.py b/nova/tests/unit/scheduler/test_host_filters.py
new file mode 100644
index 0000000000..caed938aa3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_filters.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filters.
+"""
+
+from nova.scheduler import filters
+from nova.scheduler.filters import all_hosts_filter
+from nova.scheduler.filters import compute_filter
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HostFiltersTestCase(test.NoDBTestCase):
+
+ def test_filter_handler(self):
+ # Double check at least a couple of known filters exist
+ filter_handler = filters.HostFilterHandler()
+ classes = filter_handler.get_matching_classes(
+ ['nova.scheduler.filters.all_filters'])
+ self.assertIn(all_hosts_filter.AllHostsFilter, classes)
+ self.assertIn(compute_filter.ComputeFilter, classes)
+
+ def test_all_host_filter(self):
+ filt_cls = all_hosts_filter.AllHostsFilter()
+ host = fakes.FakeHostState('host1', 'node1', {})
+ self.assertTrue(filt_cls.host_passes(host, {}))
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
new file mode 100644
index 0000000000..b891baf7b4
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -0,0 +1,545 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For HostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import db
+from nova import exception
+from nova.i18n import _LW
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+from nova import utils
+from nova.virt import hardware
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class HostManagerTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
+ 'fake-node') for x in xrange(1, 5)]
+ self.fake_hosts += [host_manager.HostState('fake_multihost',
+ 'fake-node%s' % x) for x in xrange(1, 5)]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(len(filter_classes), 1)
+ self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_all_host_states(self):
+
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ self.mox.StubOutWithMock(host_manager.LOG, 'warn')
+
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # node 3 host physical disk space is greater than database
+ host_manager.LOG.warn(_LW("Host %(hostname)s has more disk space than "
+ "database expected (%(physical)sgb > "
+ "%(database)sgb)"),
+ {'physical': 3333, 'database': 3072,
+ 'hostname': 'node3'})
+ # Invalid service
+ host_manager.LOG.warn(_LW("No service for compute ID %s"), 5)
+
+ self.mox.ReplayAll()
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ # Check that .service is set properly
+ for i in xrange(4):
+ compute_node = fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(host_states_map[state_key].service,
+ compute_node['service'])
+ self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
+ 512)
+ # 511GB
+ self.assertEqual(host_states_map[('host1', 'node1')].free_disk_mb,
+ 524288)
+ self.assertEqual(host_states_map[('host2', 'node2')].free_ram_mb,
+ 1024)
+ # 1023GB
+ self.assertEqual(host_states_map[('host2', 'node2')].free_disk_mb,
+ 1048576)
+ self.assertEqual(host_states_map[('host3', 'node3')].free_ram_mb,
+ 3072)
+ # 3071GB
+ self.assertEqual(host_states_map[('host3', 'node3')].free_disk_mb,
+ 3145728)
+ self.assertThat(
+ hardware.VirtNUMAHostTopology.from_json(
+ host_states_map[('host3', 'node3')].numa_topology
+ )._to_dict(),
+ matchers.DictMatches(fakes.NUMA_TOPOLOGY._to_dict()))
+ self.assertEqual(host_states_map[('host4', 'node4')].free_ram_mb,
+ 8192)
+ # 8191GB
+ self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
+ 8388608)
+
+
+class HostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for HostManager class."""
+
+ def setUp(self):
+ super(HostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = host_manager.HostManager()
+ self.fake_hosts = [
+ host_manager.HostState('host1', 'node1'),
+ host_manager.HostState('host2', 'node2'),
+ host_manager.HostState('host3', 'node3'),
+ host_manager.HostState('host4', 'node4')
+ ]
+ self.addCleanup(timeutils.clear_time_override)
+
+ def test_get_all_host_states(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 3)
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 0)
+
+
+class HostStateTestCase(test.NoDBTestCase):
+ """Test case for HostState class."""
+
+ # update_from_compute_node() and consume_from_instance() are tested
+ # in HostManagerTestCase.test_get_all_host_states()
+
+ def test_stat_consumption_from_compute_node(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=1, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_type='htype',
+ hypervisor_hostname='hostname', cpu_info='cpu_info',
+ supported_instances='{}',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertEqual('127.0.0.1', host.host_ip)
+ self.assertEqual('htype', host.hypervisor_type)
+ self.assertEqual('hostname', host.hypervisor_hostname)
+ self.assertEqual('cpu_info', host.cpu_info)
+ self.assertEqual({}, host.supported_instances)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_non_pci(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.RESIZE_MIGRATING: '1',
+ 'num_task_%s' % task_states.MIGRATING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ def test_stat_consumption_from_compute_node_rescue_unshelving(self):
+ stats = {
+ 'num_instances': '5',
+ 'num_proj_12345': '3',
+ 'num_proj_23456': '1',
+ 'num_vm_%s' % vm_states.BUILDING: '2',
+ 'num_vm_%s' % vm_states.SUSPENDED: '1',
+ 'num_task_%s' % task_states.UNSHELVING: '1',
+ 'num_task_%s' % task_states.RESCUING: '2',
+ 'num_os_type_linux': '4',
+ 'num_os_type_windoze': '1',
+ 'io_workload': '42',
+ }
+ stats = jsonutils.dumps(stats)
+
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(stats=stats, memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int, numa_topology=None)
+
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(5, host.num_instances)
+ self.assertEqual(42, host.num_io_ops)
+ self.assertEqual(10, len(host.stats))
+
+ self.assertIsNone(host.pci_stats)
+ self.assertEqual(hyper_ver_int, host.hypervisor_version)
+
+ @mock.patch('nova.virt.hardware.get_host_numa_usage_from_instance')
+ def test_stat_consumption_from_instance(self, numa_usage_mock):
+ numa_usage_mock.return_value = 'fake-consumed-once'
+ host = host_manager.HostState("fakehost", "fakenode")
+
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.BUILDING,
+ task_state=task_states.SCHEDULING, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+ numa_usage_mock.assert_called_once_with(host, instance)
+ self.assertEqual('fake-consumed-once', host.numa_topology)
+
+ numa_usage_mock.return_value = 'fake-consumed-twice'
+ instance = dict(root_gb=0, ephemeral_gb=0, memory_mb=0, vcpus=0,
+ project_id='12345', vm_state=vm_states.PAUSED,
+ task_state=None, os_type='Linux',
+ uuid='fake-uuid')
+ host.consume_from_instance(instance)
+
+ self.assertEqual(2, host.num_instances)
+ self.assertEqual(1, host.num_io_ops)
+ self.assertEqual(2, numa_usage_mock.call_count)
+ self.assertEqual(((host, instance),), numa_usage_mock.call_args)
+ self.assertEqual('fake-consumed-twice', host.numa_topology)
+
+ def test_resources_consumption_from_compute_node(self):
+ metrics = [
+ dict(name='res1',
+ value=1.0,
+ source='source1',
+ timestamp=None),
+ dict(name='res2',
+ value="string2",
+ source='source2',
+ timestamp=None),
+ ]
+ hyper_ver_int = utils.convert_version_to_int('6.0.0')
+ compute = dict(metrics=jsonutils.dumps(metrics),
+ memory_mb=0, free_disk_gb=0, local_gb=0,
+ local_gb_used=0, free_ram_mb=0, vcpus=0, vcpus_used=0,
+ updated_at=None, host_ip='127.0.0.1',
+ hypervisor_version=hyper_ver_int,
+ numa_topology=fakes.NUMA_TOPOLOGY.to_json())
+ host = host_manager.HostState("fakehost", "fakenode")
+ host.update_from_compute_node(compute)
+
+ self.assertEqual(len(host.metrics), 2)
+ self.assertEqual(set(['res1', 'res2']), set(host.metrics.keys()))
+ self.assertEqual(1.0, host.metrics['res1'].value)
+ self.assertEqual('source1', host.metrics['res1'].source)
+ self.assertEqual('string2', host.metrics['res2'].value)
+ self.assertEqual('source2', host.metrics['res2'].source)
+ self.assertIsInstance(host.numa_topology, six.string_types)
diff --git a/nova/tests/unit/scheduler/test_ironic_host_manager.py b/nova/tests/unit/scheduler/test_ironic_host_manager.py
new file mode 100644
index 0000000000..50ec038cb3
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_ironic_host_manager.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2014 OpenStack Foundation
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For IronicHostManager
+"""
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import db
+from nova import exception
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova.scheduler import ironic_host_manager
+from nova import test
+from nova.tests.unit.scheduler import ironic_fakes
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class IronicHostManagerTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+
+ def test_manager_public_api_signatures(self):
+ self.assertPublicAPISignatures(host_manager.HostManager(),
+ self.host_manager)
+
+ def test_state_public_api_signatures(self):
+ self.assertPublicAPISignatures(
+ host_manager.HostState("dummy",
+ "dummy"),
+ ironic_host_manager.IronicNodeState("dummy",
+ "dummy")
+ )
+
+ def test_get_all_host_states(self):
+ # Ensure .service is set and we have the values we expect to.
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ for i in range(4):
+ compute_node = ironic_fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(compute_node['service'],
+ host_states_map[state_key].service)
+ self.assertEqual(jsonutils.loads(compute_node['stats']),
+ host_states_map[state_key].stats)
+ self.assertEqual(compute_node['free_ram_mb'],
+ host_states_map[state_key].free_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host_states_map[state_key].free_disk_mb)
+
+
+class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ ironic_driver = "nova.virt.ironic.driver.IronicDriver"
+ supported_instances = '[["i386", "baremetal", "baremetal"]]'
+ self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(
+ ironic_driver=ironic_driver,
+ cpu_arch='i386')),
+ supported_instances=supported_instances,
+ free_disk_gb=10, free_ram_mb=1024,
+ hypervisor_type='ironic',
+ hypervisor_version = 1,
+ hypervisor_hostname = 'fake_host')
+
+ @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
+ def test_create_ironic_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_ironic_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4uuid']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(3, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(0, len(host_states_map))
+
+ def test_update_from_compute_node(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ self.assertEqual(1024, host.free_ram_mb)
+ self.assertEqual(1024, host.total_usable_ram_mb)
+ self.assertEqual(10240, host.free_disk_mb)
+ self.assertEqual(1, host.vcpus_total)
+ self.assertEqual(0, host.vcpus_used)
+ self.assertEqual(jsonutils.loads(self.compute_node['stats']),
+ host.stats)
+ self.assertEqual('ironic', host.hypervisor_type)
+ self.assertEqual(1, host.hypervisor_version)
+ self.assertEqual('fake_host', host.hypervisor_hostname)
+
+ def test_consume_identical_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_larger_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_smaller_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+
+class IronicHostManagerTestFilters(test.NoDBTestCase):
+ """Test filters work for IronicHostManager."""
+
+ def setUp(self):
+ super(IronicHostManagerTestFilters, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(1, len(filter_classes))
+ self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
index 0ba0feb540..0ba0feb540 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
diff --git a/nova/tests/unit/scheduler/test_scheduler.py b/nova/tests/unit/scheduler/test_scheduler.py
new file mode 100644
index 0000000000..2435d60343
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler.py
@@ -0,0 +1,378 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler
+"""
+
+import mox
+from oslo.config import cfg
+
+from nova.compute import api as compute_api
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.image import glance
+from nova import objects
+from nova import rpc
+from nova.scheduler import driver
+from nova.scheduler import manager
+from nova import servicegroup
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_server_actions
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit.objects import test_instance_fault
+from nova.tests.unit.scheduler import fakes
+
+CONF = cfg.CONF
+
+
+class SchedulerManagerTestCase(test.NoDBTestCase):
+ """Test case for scheduler manager."""
+
+ manager_cls = manager.SchedulerManager
+ driver_cls = driver.Scheduler
+ driver_cls_name = 'nova.scheduler.driver.Scheduler'
+
+ def setUp(self):
+ super(SchedulerManagerTestCase, self).setUp()
+ self.flags(scheduler_driver=self.driver_cls_name)
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+ self.manager = self.manager_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.fake_args = (1, 2, 3)
+ self.fake_kwargs = {'cat': 'meow', 'dog': 'woof'}
+ fake_server_actions.stub_out_action_events(self.stubs)
+
+ def test_1_correct_init(self):
+ # Correct scheduler driver
+ manager = self.manager
+ self.assertIsInstance(manager.driver, self.driver_cls)
+
+ def _mox_schedule_method_helper(self, method_name):
+ # Make sure the method exists that we're going to test call
+ def stub_method(*args, **kwargs):
+ pass
+
+ setattr(self.manager.driver, method_name, stub_method)
+
+ self.mox.StubOutWithMock(self.manager.driver,
+ method_name)
+
+ def test_run_instance_exception_puts_instance_in_error_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('schedule_run_instance')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_properties': inst,
+ 'instance_uuids': [fake_instance_uuid]}
+
+ self.manager.driver.schedule_run_instance(self.context,
+ request_spec, None, None, None, None, {}, False).AndRaise(
+ exception.NoValidHost(reason=""))
+ old, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context,
+ new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.run_instance(self.context, request_spec,
+ None, None, None, None, {}, False)
+
+ def test_prep_resize_no_valid_host_back_in_active_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+ inst = {"vm_state": "", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_no_valid_host_back_in_shutoff_state(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid, "vm_state": "stopped"}
+ inst = {"vm_state": "stopped", "task_state": ""}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {'instance_type': 'fake_type',
+ 'instance_uuids': [fake_instance_uuid],
+ 'instance_properties': {'uuid': fake_instance_uuid}}
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ exception.NoValidHost(reason=""))
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.STOPPED, "task_state": None}).AndReturn(
+ (inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(exception.NoValidHost), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(**kwargs)
+
+ def test_prep_resize_exception_host_in_error_state_and_raise(self):
+ fake_instance_uuid = 'fake-instance-id'
+ fake_instance = {'uuid': fake_instance_uuid}
+
+ self._mox_schedule_method_helper('select_destinations')
+
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ request_spec = {
+ 'instance_properties': {'uuid': fake_instance_uuid},
+ 'instance_uuids': [fake_instance_uuid]
+ }
+ kwargs = {
+ 'context': self.context,
+ 'image': 'fake_image',
+ 'request_spec': request_spec,
+ 'filter_properties': 'fake_props',
+ 'instance': fake_instance,
+ 'instance_type': 'fake_type',
+ 'reservations': list('fake_res'),
+ }
+
+ self.manager.driver.select_destinations(
+ self.context, request_spec, 'fake_props').AndRaise(
+ test.TestingException('something happened'))
+
+ inst = {
+ "vm_state": "",
+ "task_state": "",
+ }
+ old_ref, new_ref = db.instance_update_and_get_original(self.context,
+ fake_instance_uuid,
+ {"vm_state": vm_states.ERROR,
+ "task_state": None}).AndReturn((inst, inst))
+ compute_utils.add_instance_fault_from_exc(self.context, new_ref,
+ mox.IsA(test.TestingException), mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(test.TestingException, self.manager.prep_resize,
+ **kwargs)
+
+ def test_set_vm_state_and_notify_adds_instance_fault(self):
+ request = {'instance_properties': {'uuid': 'fake-uuid'}}
+ updates = {'vm_state': 'foo'}
+ fake_inst = {'uuid': 'fake-uuid'}
+
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ db.instance_update_and_get_original(self.context, 'fake-uuid',
+ updates).AndReturn((None,
+ fake_inst))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ notifier.error(self.context, 'scheduler.foo', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'},
+ self.context, None, request)
+
+ def test_prep_resize_post_populates_retry(self):
+ self.manager.driver = fakes.FakeFilterScheduler()
+
+ image = 'image'
+ instance_uuid = 'fake-instance-id'
+ instance = fake_instance.fake_db_instance(uuid=instance_uuid)
+
+ instance_properties = {'project_id': 'fake', 'os_type': 'Linux'}
+ instance_type = "m1.tiny"
+ request_spec = {'instance_properties': instance_properties,
+ 'instance_type': instance_type,
+ 'instance_uuids': [instance_uuid]}
+ retry = {'hosts': [], 'num_attempts': 1}
+ filter_properties = {'retry': retry}
+ reservations = None
+
+ hosts = [dict(host='host', nodename='node', limits={})]
+
+ self._mox_schedule_method_helper('select_destinations')
+ self.manager.driver.select_destinations(
+ self.context, request_spec, filter_properties).AndReturn(hosts)
+
+ self.mox.StubOutWithMock(self.manager.compute_rpcapi, 'prep_resize')
+ self.manager.compute_rpcapi.prep_resize(self.context, image,
+ mox.IsA(objects.Instance),
+ instance_type, 'host', reservations, request_spec=request_spec,
+ filter_properties=filter_properties, node='node')
+
+ self.mox.ReplayAll()
+ self.manager.prep_resize(self.context, image, request_spec,
+ filter_properties, instance, instance_type, reservations)
+
+ self.assertEqual([['host', 'node']],
+ filter_properties['retry']['hosts'])
+
+
+class SchedulerTestCase(test.NoDBTestCase):
+ """Test case for base scheduler driver class."""
+
+ # So we can subclass this test and re-use tests if we need.
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerTestCase, self).setUp()
+ self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
+
+ def fake_show(meh, context, id, **kwargs):
+ if id:
+ return {'id': id, 'min_disk': None, 'min_ram': None,
+ 'name': 'fake_name',
+ 'status': 'active',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id',
+ 'something_else': 'meow'}}
+ else:
+ raise exception.ImageNotFound(image_id=id)
+
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.image_service = glance.get_default_image_service()
+
+ self.driver = self.driver_cls()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.topic = 'fake_topic'
+ self.servicegroup_api = servicegroup.API()
+
+ def test_hosts_up(self):
+ service1 = {'host': 'host1'}
+ service2 = {'host': 'host2'}
+ services = [service1, service2]
+
+ self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
+ self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+
+ db.service_get_all_by_topic(self.context,
+ self.topic).AndReturn(services)
+ self.servicegroup_api.service_is_up(service1).AndReturn(False)
+ self.servicegroup_api.service_is_up(service2).AndReturn(True)
+
+ self.mox.ReplayAll()
+ result = self.driver.hosts_up(self.context, self.topic)
+ self.assertEqual(result, ['host2'])
+
+ def test_handle_schedule_error_adds_instance_fault(self):
+ instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(db, 'instance_fault_create')
+ db.instance_update_and_get_original(self.context, instance['uuid'],
+ mox.IgnoreArg()).AndReturn(
+ (None, instance))
+ db.instance_fault_create(self.context, mox.IgnoreArg()).AndReturn(
+ test_instance_fault.fake_faults['fake-uuid'][0])
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier('scheduler').AndReturn(notifier)
+ notifier.error(self.context, 'scheduler.run_instance', mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver.handle_schedule_error(self.context,
+ exception.NoValidHost('test'),
+ instance['uuid'], {})
+
+
+class SchedulerDriverBaseTestCase(SchedulerTestCase):
+ """Test cases for base scheduler driver class methods
+ that will fail if the driver is changed.
+ """
+
+ def test_unimplemented_schedule_run_instance(self):
+ fake_request_spec = {'instance_properties':
+ {'uuid': 'uuid'}}
+
+ self.assertRaises(NotImplementedError,
+ self.driver.schedule_run_instance,
+ self.context, fake_request_spec, None, None, None,
+ None, None, False)
+
+ def test_unimplemented_select_destinations(self):
+ self.assertRaises(NotImplementedError,
+ self.driver.select_destinations, self.context, {}, {})
+
+
+class SchedulerInstanceGroupData(test.TestCase):
+
+ driver_cls = driver.Scheduler
+
+ def setUp(self):
+ super(SchedulerInstanceGroupData, self).setUp()
+ self.user_id = 'fake_user'
+ self.project_id = 'fake_project'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.driver = self.driver_cls()
+
+ def _get_default_values(self):
+ return {'name': 'fake_name',
+ 'user_id': self.user_id,
+ 'project_id': self.project_id}
+
+ def _create_instance_group(self, context, values, policies=None,
+ metadata=None, members=None):
+ return db.instance_group_create(context, values, policies=policies,
+ metadata=metadata, members=members)
diff --git a/nova/tests/scheduler/test_scheduler_options.py b/nova/tests/unit/scheduler/test_scheduler_options.py
index 29d42ccd2f..29d42ccd2f 100644
--- a/nova/tests/scheduler/test_scheduler_options.py
+++ b/nova/tests/unit/scheduler/test_scheduler_options.py
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
new file mode 100644
index 0000000000..0dfade7deb
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
@@ -0,0 +1,314 @@
+# Copyright (c) 2013 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Utils
+"""
+import contextlib
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+
+from nova.compute import flavors
+from nova.compute import utils as compute_utils
+from nova import db
+from nova import exception
+from nova import notifications
+from nova import objects
+from nova import rpc
+from nova.scheduler import utils as scheduler_utils
+from nova import test
+from nova.tests.unit import fake_instance
+
+CONF = cfg.CONF
+
+
+class SchedulerUtilsTestCase(test.NoDBTestCase):
+ """Test case for scheduler utils methods."""
+ def setUp(self):
+ super(SchedulerUtilsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ def test_build_request_spec_without_image(self):
+ image = None
+ instance = {'uuid': 'fake-uuid'}
+ instance_type = {'flavorid': 'fake-id'}
+
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
+ flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
+ db.flavor_extra_specs_get(self.context, mox.IgnoreArg()).AndReturn([])
+ self.mox.ReplayAll()
+
+ request_spec = scheduler_utils.build_request_spec(self.context, image,
+ [instance])
+ self.assertEqual({}, request_spec['image'])
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(db, 'flavor_extra_specs_get')
+ def test_build_request_spec_with_object(self, flavor_extra_specs_get,
+ extract_flavor):
+ instance_type = {'flavorid': 'fake-id'}
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ extract_flavor.return_value = instance_type
+ flavor_extra_specs_get.return_value = []
+
+ request_spec = scheduler_utils.build_request_spec(self.context, None,
+ [instance])
+ self.assertIsInstance(request_spec['instance_properties'], dict)
+
+ def _test_set_vm_state_and_notify(self, request_spec,
+ expected_uuids):
+ updates = dict(vm_state='fake-vm-state')
+ service = 'fake-service'
+ method = 'fake-method'
+ exc_info = 'exc_info'
+
+ self.mox.StubOutWithMock(compute_utils,
+ 'add_instance_fault_from_exc')
+ self.mox.StubOutWithMock(notifications, 'send_update')
+ self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+
+ self.mox.StubOutWithMock(rpc, 'get_notifier')
+ notifier = self.mox.CreateMockAnything()
+ rpc.get_notifier(service).AndReturn(notifier)
+
+ old_ref = 'old_ref'
+ new_ref = 'new_ref'
+
+ for _uuid in expected_uuids:
+ db.instance_update_and_get_original(
+ self.context, _uuid, updates).AndReturn((old_ref, new_ref))
+ notifications.send_update(self.context, old_ref, new_ref,
+ service=service)
+ compute_utils.add_instance_fault_from_exc(
+ self.context,
+ new_ref, exc_info, mox.IsA(tuple))
+
+ payload = dict(request_spec=request_spec,
+ instance_properties=request_spec.get(
+ 'instance_properties', {}),
+ instance_id=_uuid,
+ state='fake-vm-state',
+ method=method,
+ reason=exc_info)
+ event_type = '%s.%s' % (service, method)
+ notifier.error(self.context, event_type, payload)
+
+ self.mox.ReplayAll()
+
+ scheduler_utils.set_vm_state_and_notify(self.context,
+ service,
+ method,
+ updates,
+ exc_info,
+ request_spec,
+ db)
+
+ def test_set_vm_state_and_notify_rs_uuids(self):
+ expected_uuids = ['1', '2', '3']
+ request_spec = dict(instance_uuids=expected_uuids)
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def test_set_vm_state_and_notify_uuid_from_instance_props(self):
+ expected_uuids = ['fake-uuid']
+ request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
+ self._test_set_vm_state_and_notify(request_spec, expected_uuids)
+
+ def _test_populate_filter_props(self, host_state_obj=True,
+ with_retry=True,
+ force_hosts=None,
+ force_nodes=None):
+ if force_hosts is None:
+ force_hosts = []
+ if force_nodes is None:
+ force_nodes = []
+ if with_retry:
+ if not force_hosts and not force_nodes:
+ filter_properties = dict(retry=dict(hosts=[]))
+ else:
+ filter_properties = dict(force_hosts=force_hosts,
+ force_nodes=force_nodes)
+ else:
+ filter_properties = dict()
+
+ if host_state_obj:
+ class host_state(object):
+ host = 'fake-host'
+ nodename = 'fake-node'
+ limits = 'fake-limits'
+ else:
+ host_state = dict(host='fake-host',
+ nodename='fake-node',
+ limits='fake-limits')
+
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+ if with_retry and not force_hosts and not force_nodes:
+ # So we can check for 2 hosts
+ scheduler_utils.populate_filter_properties(filter_properties,
+ host_state)
+
+ if force_hosts:
+ expected_limits = None
+ else:
+ expected_limits = 'fake-limits'
+ self.assertEqual(expected_limits,
+ filter_properties.get('limits'))
+
+ if with_retry and not force_hosts and not force_nodes:
+ self.assertEqual([['fake-host', 'fake-node'],
+ ['fake-host', 'fake-node']],
+ filter_properties['retry']['hosts'])
+ else:
+ self.assertNotIn('retry', filter_properties)
+
+ def test_populate_filter_props(self):
+ self._test_populate_filter_props()
+
+ def test_populate_filter_props_host_dict(self):
+ self._test_populate_filter_props(host_state_obj=False)
+
+ def test_populate_filter_props_no_retry(self):
+ self._test_populate_filter_props(with_retry=False)
+
+ def test_populate_filter_props_force_hosts_no_retry(self):
+ self._test_populate_filter_props(force_hosts=['force-host'])
+
+ def test_populate_filter_props_force_nodes_no_retry(self):
+ self._test_populate_filter_props(force_nodes=['force-node'])
+
+ @mock.patch.object(scheduler_utils, '_max_attempts')
+ def test_populate_retry_exception_at_max_attempts(self, _max_attempts):
+ _max_attempts.return_value = 2
+ msg = 'The exception text was preserved!'
+ filter_properties = dict(retry=dict(num_attempts=2, hosts=[],
+ exc=[msg]))
+ nvh = self.assertRaises(exception.NoValidHost,
+ scheduler_utils.populate_retry,
+ filter_properties, 'fake-uuid')
+ # make sure 'msg' is a substring of the complete exception text
+ self.assertIn(msg, nvh.message)
+
+ def _check_parse_options(self, opts, sep, converter, expected):
+ good = scheduler_utils.parse_options(opts,
+ sep=sep,
+ converter=converter)
+ for item in expected:
+ self.assertIn(item, good)
+
+ def test_parse_options(self):
+ # check normal
+ self._check_parse_options(['foo=1', 'bar=-2.1'],
+ '=',
+ float,
+ [('foo', 1.0), ('bar', -2.1)])
+ # check convert error
+ self._check_parse_options(['foo=a1', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check separator missing
+ self._check_parse_options(['foo', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+ # check key missing
+ self._check_parse_options(['=5', 'bar=-2.1'],
+ '=',
+ float,
+ [('bar', -2.1)])
+
+ def test_validate_filters_configured(self):
+ self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
+ self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
+
+ def _create_server_group(self, policy='anti-affinity'):
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.name = 'pele'
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+ return group
+
+ def _group_details_in_filter_properties(self, group, func='get_by_uuid',
+ hint=None, policy=None):
+ group_hint = hint
+ group_hosts = ['hostB']
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, func, return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ group_info = scheduler_utils.setup_instance_group(
+ self.context, group_hint, group_hosts)
+ self.assertEqual(
+ (set(['hostA', 'hostB']), [policy]),
+ group_info)
+
+ def test_group_details_in_filter_properties(self):
+ for policy in ['affinity', 'anti-affinity']:
+ group = self._create_server_group(policy)
+ self._group_details_in_filter_properties(group, func='get_by_uuid',
+ hint=group.uuid,
+ policy=policy)
+
+ def _group_filter_with_filter_not_configured(self, policy):
+ self.flags(scheduler_default_filters=['f1', 'f2'])
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
+ return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
+ scheduler_utils._SUPPORTS_AFFINITY = None
+ self.assertRaises(exception.NoValidHost,
+ scheduler_utils.setup_instance_group,
+ self.context, group.uuid)
+
+ def test_group_filter_with_filter_not_configured(self):
+ policies = ['anti-affinity', 'affinity']
+ for policy in policies:
+ self._group_filter_with_filter_not_configured(policy)
+
+ def test_group_uuid_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_uuid',
+ group.uuid, 'anti-affinity')
+
+ def test_group_name_details_in_filter_properties(self):
+ group = self._create_server_group()
+ self._group_details_in_filter_properties(group, 'get_by_name',
+ group.name, 'anti-affinity')
diff --git a/nova/tests/unit/scheduler/test_weights.py b/nova/tests/unit/scheduler/test_weights.py
new file mode 100644
index 0000000000..5f168bf5df
--- /dev/null
+++ b/nova/tests/unit/scheduler/test_weights.py
@@ -0,0 +1,338 @@
+# Copyright 2011-2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler weights.
+"""
+
+from oslo.serialization import jsonutils
+
+from nova import context
+from nova import exception
+from nova.openstack.common.fixture import mockpatch
+from nova.scheduler import weights
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit.scheduler import fakes
+
+
+class TestWeighedHost(test.NoDBTestCase):
+ def test_dict_conversion(self):
+ host_state = fakes.FakeHostState('somehost', None, {})
+ host = weights.WeighedHost(host_state, 'someweight')
+ expected = {'weight': 'someweight',
+ 'host': 'somehost'}
+ self.assertThat(host.to_dict(), matchers.DictMatches(expected))
+
+ def test_all_weighers(self):
+ classes = weights.all_weighers()
+ class_names = [cls.__name__ for cls in classes]
+ self.assertIn('RAMWeigher', class_names)
+ self.assertIn('MetricsWeigher', class_names)
+ self.assertIn('IoOpsWeigher', class_names)
+
+
+class RamWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(RamWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.ram.RAMWeigher'])
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_multiplier1(self):
+ self.flags(ram_weight_multiplier=0.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # We do not know the host, all have same weight.
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(0.0, weighed_host.weight)
+
+ def test_ram_filter_multiplier2(self):
+ self.flags(ram_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * 2, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_ram_filter_negative(self):
+ self.flags(ram_weight_multiplier=1.0)
+ hostinfo_list = self._get_all_hosts()
+ host_attr = {'id': 100, 'memory_mb': 8192, 'free_ram_mb': -512}
+ host_state = fakes.FakeHostState('negative', 'negative', host_attr)
+ hostinfo_list = list(hostinfo_list) + [host_state]
+
+ # host1: free_ram_mb=512
+ # host2: free_ram_mb=1024
+ # host3: free_ram_mb=3072
+ # host4: free_ram_mb=8192
+ # negativehost: free_ram_mb=-512
+
+ # so, host4 should win
+ weights = self.weight_handler.get_weighed_objects(self.weight_classes,
+ hostinfo_list, {})
+
+ weighed_host = weights[0]
+ self.assertEqual(1, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ # and negativehost should lose
+ weighed_host = weights[-1]
+ self.assertEqual(0, weighed_host.weight)
+ self.assertEqual('negative', weighed_host.obj.host)
+
+
+class MetricsWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(MetricsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=fakes.COMPUTE_NODES_METRICS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.metrics.MetricsWeigher'])
+
+ def _get_weighed_host(self, hosts, setting, weight_properties=None):
+ if not weight_properties:
+ weight_properties = {}
+ self.flags(weight_setting=setting, group='metrics')
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, settings, expected_weight, expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list, settings)
+ self.assertEqual(expected_weight, weighed_host.weight)
+ self.assertEqual(expected_host, weighed_host.obj.host)
+
+ def test_single_resource(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host4 should win:
+ setting = ['foo=1']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host2 should win:
+ setting = ['foo=0.0001', 'bar=1']
+ self._do_test(setting, 1.0, 'host2')
+
+ def test_single_resourcenegtive_ratio(self):
+ # host1: foo=512
+ # host2: foo=1024
+ # host3: foo=3072
+ # host4: foo=8192
+ # so, host1 should win:
+ setting = ['foo=-1']
+ self._do_test(setting, 1.0, 'host1')
+
+ def test_multiple_resource_missing_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar']
+ self._do_test(setting, 1.0, 'host4')
+
+ def test_multiple_resource_wrong_ratio(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # so, host4 should win:
+ setting = ['foo=0.0001', 'bar = 2.0t']
+ self._do_test(setting, 1.0, 'host4')
+
+ def _check_parsing_result(self, weigher, setting, results):
+ self.flags(weight_setting=setting, group='metrics')
+ weigher._parse_setting()
+ self.assertEqual(len(weigher.setting), len(results))
+ for item in results:
+ self.assertIn(item, weigher.setting)
+
+ def test_parse_setting(self):
+ weigher = self.weight_classes[0]()
+ self._check_parsing_result(weigher,
+ ['foo=1'],
+ [('foo', 1.0)])
+ self._check_parsing_result(weigher,
+ ['foo=1', 'bar=-2.1'],
+ [('foo', 1.0), ('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo=a1', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['foo', 'bar=-2.1'],
+ [('bar', -2.1)])
+ self._check_parsing_result(weigher,
+ ['=5', 'bar=-2.1'],
+ [('bar', -2.1)])
+
+ def test_metric_not_found_required(self):
+ setting = ['foo=1', 'zot=2']
+ self.assertRaises(exception.ComputeHostMetricNotFound,
+ self._do_test,
+ setting,
+ 8192,
+ 'host4')
+
+ def test_metric_not_found_non_required(self):
+ # host1: foo=512, bar=1
+ # host2: foo=1024, bar=2
+ # host3: foo=3072, bar=1
+ # host4: foo=8192, bar=0
+ # host5: foo=768, bar=0, zot=1
+ # host6: foo=2048, bar=0, zot=2
+ # so, host5 should win:
+ self.flags(required=False, group='metrics')
+ setting = ['foo=0.0001', 'zot=-1']
+ self._do_test(setting, 1.0, 'host5')
+
+
+COMPUTE_NODES_IO_OPS = [
+ # host1: num_io_ops=1
+ dict(id=1, local_gb=1024, memory_mb=1024, vcpus=1,
+ disk_available_least=None, free_ram_mb=512, vcpus_used=1,
+ free_disk_gb=512, local_gb_used=0, updated_at=None,
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '1'})),
+ # host2: num_io_ops=2
+ dict(id=2, local_gb=2048, memory_mb=2048, vcpus=2,
+ disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
+ free_disk_gb=1024, local_gb_used=0, updated_at=None,
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '2'})),
+ # host3: num_io_ops=0, so host3 should win in the case of default
+ # io_ops_weight_multiplier configure.
+ dict(id=3, local_gb=4096, memory_mb=4096, vcpus=4,
+ disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
+ free_disk_gb=3072, local_gb_used=0, updated_at=None,
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '0'})),
+ # host4: num_io_ops=4, so host4 should win in the case of positive
+ # io_ops_weight_multiplier configure.
+ dict(id=4, local_gb=8192, memory_mb=8192, vcpus=8,
+ disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
+ free_disk_gb=8888, local_gb_used=0, updated_at=None,
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4', host_ip='127.0.0.1',
+ hypervisor_version=0, numa_topology=None,
+ stats=jsonutils.dumps({'io_workload': '4'})),
+ # Broken entry
+ dict(id=5, local_gb=1024, memory_mb=1024, vcpus=1, service=None),
+]
+
+
+class IoOpsWeigherTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IoOpsWeigherTestCase, self).setUp()
+ self.useFixture(mockpatch.Patch(
+ 'nova.db.compute_node_get_all',
+ return_value=COMPUTE_NODES_IO_OPS))
+ self.host_manager = fakes.FakeHostManager()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weight_classes = self.weight_handler.get_matching_classes(
+ ['nova.scheduler.weights.io_ops.IoOpsWeigher'])
+
+ def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
+ if io_ops_weight_multiplier is not None:
+ self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier)
+ return self.weight_handler.get_weighed_objects(self.weight_classes,
+ hosts, {})[0]
+
+ def _get_all_hosts(self):
+ ctxt = context.get_admin_context()
+ return self.host_manager.get_all_host_states(ctxt)
+
+ def _do_test(self, io_ops_weight_multiplier, expected_weight,
+ expected_host):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list,
+ io_ops_weight_multiplier)
+ self.assertEqual(weighed_host.weight, expected_weight)
+ if expected_host:
+ self.assertEqual(weighed_host.obj.host, expected_host)
+
+ def test_io_ops_weight_multiplier_by_default(self):
+ self._do_test(io_ops_weight_multiplier=None,
+ expected_weight=0.0,
+ expected_host='host3')
+
+ def test_io_ops_weight_multiplier_zero_value(self):
+ # We do not know the host, all have same weight.
+ self._do_test(io_ops_weight_multiplier=0.0,
+ expected_weight=0.0,
+ expected_host=None)
+
+ def test_io_ops_weight_multiplier_positive_value(self):
+ self._do_test(io_ops_weight_multiplier=2.0,
+ expected_weight=2.0,
+ expected_host='host4')
diff --git a/nova/tests/servicegroup/__init__.py b/nova/tests/unit/servicegroup/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/servicegroup/__init__.py
+++ b/nova/tests/unit/servicegroup/__init__.py
diff --git a/nova/tests/unit/servicegroup/test_db_servicegroup.py b/nova/tests/unit/servicegroup/test_db_servicegroup.py
new file mode 100644
index 0000000000..1cb47a6ce4
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_db_servicegroup.py
@@ -0,0 +1,144 @@
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+import fixtures
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class DBServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(DBServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='db')
+ self.down_time = 15
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API()
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_DB_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ self.useFixture(test.TimeOverride())
+ timeutils.advance_time_seconds(self.down_time + 1)
+ self.servicegroup_api._driver._report_state(serv)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ timeutils.advance_time_seconds(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ service_ref1 = db.service_get_by_args(self._ctx,
+ host1,
+ self._binary)
+ service_ref2 = db.service_get_by_args(self._ctx,
+ host2,
+ self._binary)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertIn(service_ref1['host'], services)
+ self.assertIn(service_ref2['host'], services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertIn(service_id, services)
+
+ def test_service_is_up(self):
+ fts_func = datetime.datetime.fromtimestamp
+ fake_now = 1000
+ down_time = 15
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow')
+ self.servicegroup_api = servicegroup.API()
+
+ # Up (equal)
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time),
+ 'created_at': fts_func(fake_now - self.down_time)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time + 1),
+ 'created_at': fts_func(fake_now - self.down_time + 1)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow().AndReturn(fts_func(fake_now))
+ service = {'updated_at': fts_func(fake_now - self.down_time - 3),
+ 'created_at': fts_func(fake_now - self.down_time - 3)}
+ self.mox.ReplayAll()
+ result = self.servicegroup_api.service_is_up(service)
+ self.assertFalse(result)
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
new file mode 100644
index 0000000000..b04d86de7d
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -0,0 +1,213 @@
+# Copyright (c) 2013 Akira Yoshiyama <akirayoshiyama at gmail dot com>
+#
+# This is derived from test_db_servicegroup.py.
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+from oslo.utils import timeutils
+
+from nova import context
+from nova import db
+from nova import service
+from nova import servicegroup
+from nova import test
+
+
+class ServiceFixture(fixtures.Fixture):
+
+ def __init__(self, host, binary, topic):
+ super(ServiceFixture, self).__init__()
+ self.host = host
+ self.binary = binary
+ self.topic = topic
+ self.serv = None
+
+ def setUp(self):
+ super(ServiceFixture, self).setUp()
+ self.serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager',
+ 1, 1)
+ self.addCleanup(self.serv.kill)
+
+
+class MemcachedServiceGroupTestCase(test.TestCase):
+
+ def setUp(self):
+ super(MemcachedServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ self.flags(servicegroup_driver='mc')
+ self.down_time = 15
+ self.flags(enable_new_services=True)
+ self.flags(service_down_time=self.down_time)
+ self.servicegroup_api = servicegroup.API(test=True)
+ self._host = 'foo'
+ self._binary = 'nova-fake'
+ self._topic = 'unittest'
+ self._ctx = context.get_admin_context()
+
+ def test_memcached_driver(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ hostkey = str("%s:%s" % (self._topic, self._host))
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=self.down_time)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ self.useFixture(test.TimeOverride())
+ timeutils.advance_time_seconds(self.down_time + 1)
+ self.servicegroup_api._driver._report_state(serv)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+
+ self.assertTrue(self.servicegroup_api.service_is_up(service_ref))
+ serv.stop()
+ timeutils.advance_time_seconds(self.down_time + 1)
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_ref))
+
+ def test_get_all(self):
+ host1 = self._host + '_1'
+ host2 = self._host + '_2'
+ host3 = self._host + '_3'
+
+ serv1 = self.useFixture(
+ ServiceFixture(host1, self._binary, self._topic)).serv
+ serv1.start()
+
+ serv2 = self.useFixture(
+ ServiceFixture(host2, self._binary, self._topic)).serv
+ serv2.start()
+
+ serv3 = self.useFixture(
+ ServiceFixture(host3, self._binary, self._topic)).serv
+ serv3.start()
+
+ db.service_get_by_args(self._ctx, host1, self._binary)
+ db.service_get_by_args(self._ctx, host2, self._binary)
+ db.service_get_by_args(self._ctx, host3, self._binary)
+
+ host1key = str("%s:%s" % (self._topic, host1))
+ host2key = str("%s:%s" % (self._topic, host2))
+ host3key = str("%s:%s" % (self._topic, host3))
+ self.servicegroup_api._driver.mc.set(host1key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host2key,
+ timeutils.utcnow(),
+ time=self.down_time)
+ self.servicegroup_api._driver.mc.set(host3key,
+ timeutils.utcnow(),
+ time=-1)
+
+ services = self.servicegroup_api.get_all(self._topic)
+
+ self.assertIn(host1, services)
+ self.assertIn(host2, services)
+ self.assertNotIn(host3, services)
+
+ service_id = self.servicegroup_api.get_one(self._topic)
+ self.assertIn(service_id, services)
+
+ def test_service_is_up(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ service_ref = db.service_get_by_args(self._ctx,
+ self._host,
+ self._binary)
+ fake_now = 1000
+ down_time = 15
+ self.flags(service_down_time=down_time)
+ self.mox.StubOutWithMock(timeutils, 'utcnow_ts')
+ self.servicegroup_api = servicegroup.API()
+ hostkey = str("%s:%s" % (self._topic, self._host))
+
+ # Up (equal)
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Up
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time - 2)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertTrue(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+ # Down
+ timeutils.utcnow_ts().AndReturn(fake_now)
+ timeutils.utcnow_ts().AndReturn(fake_now + down_time + 1)
+ self.mox.ReplayAll()
+ self.servicegroup_api._driver.mc.set(hostkey,
+ timeutils.utcnow(),
+ time=down_time)
+ result = self.servicegroup_api.service_is_up(service_ref)
+ self.assertFalse(result)
+
+ self.mox.ResetAll()
+
+ def test_report_state(self):
+ serv = self.useFixture(
+ ServiceFixture(self._host, self._binary, self._topic)).serv
+ serv.start()
+ db.service_get_by_args(self._ctx, self._host, self._binary)
+ self.servicegroup_api = servicegroup.API()
+
+ # updating model_disconnected
+ serv.model_disconnected = True
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertFalse(serv.model_disconnected)
+
+ # handling exception
+ serv.model_disconnected = True
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
+
+ delattr(serv, 'model_disconnected')
+ self.servicegroup_api._driver.mc = None
+ self.servicegroup_api._driver._report_state(serv)
+ self.assertTrue(serv.model_disconnected)
diff --git a/nova/tests/unit/servicegroup/test_zk_driver.py b/nova/tests/unit/servicegroup/test_zk_driver.py
new file mode 100644
index 0000000000..5a9f23f5e0
--- /dev/null
+++ b/nova/tests/unit/servicegroup/test_zk_driver.py
@@ -0,0 +1,65 @@
+# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
+# Copyright 2012 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test the ZooKeeper driver for servicegroup.
+
+You need to install ZooKeeper locally and related dependencies
+to run the test. It's unclear how to install python-zookeeper lib
+in venv so you might have to run the test without it.
+
+To set up in Ubuntu 12.04:
+$ sudo apt-get install zookeeper zookeeperd python-zookeeper
+$ sudo pip install evzookeeper
+$ nosetests nova.tests.unit.servicegroup.test_zk_driver
+"""
+
+import eventlet
+
+from nova import servicegroup
+from nova import test
+
+
+class ZKServiceGroupTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ZKServiceGroupTestCase, self).setUp()
+ servicegroup.API._driver = None
+ from nova.servicegroup.drivers import zk
+ self.flags(servicegroup_driver='zk')
+ self.flags(address='localhost:2181', group="zookeeper")
+ try:
+ zk.ZooKeeperDriver()
+ except ImportError:
+ self.skipTest("Unable to test due to lack of ZooKeeper")
+
+ def test_join_leave(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ self.servicegroup_api.join(service_id['host'], service_id['topic'])
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ self.servicegroup_api.leave(service_id['host'], service_id['topic'])
+ # make sure zookeeper is updated and watcher is triggered
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
+
+ def test_stop(self):
+ self.servicegroup_api = servicegroup.API()
+ service_id = {'topic': 'unittest', 'host': 'serviceA'}
+ pulse = self.servicegroup_api.join(service_id['host'],
+ service_id['topic'], None)
+ self.assertTrue(self.servicegroup_api.service_is_up(service_id))
+ pulse.stop()
+ eventlet.sleep(1)
+ self.assertFalse(self.servicegroup_api.service_is_up(service_id))
diff --git a/nova/tests/ssl_cert/ca.crt b/nova/tests/unit/ssl_cert/ca.crt
index 9d66ca6270..9d66ca6270 100644
--- a/nova/tests/ssl_cert/ca.crt
+++ b/nova/tests/unit/ssl_cert/ca.crt
diff --git a/nova/tests/ssl_cert/certificate.crt b/nova/tests/unit/ssl_cert/certificate.crt
index 3c1aa6363b..3c1aa6363b 100644
--- a/nova/tests/ssl_cert/certificate.crt
+++ b/nova/tests/unit/ssl_cert/certificate.crt
diff --git a/nova/tests/ssl_cert/privatekey.key b/nova/tests/unit/ssl_cert/privatekey.key
index b63df3d29d..b63df3d29d 100644
--- a/nova/tests/ssl_cert/privatekey.key
+++ b/nova/tests/unit/ssl_cert/privatekey.key
diff --git a/nova/tests/test_api_validation.py b/nova/tests/unit/test_api_validation.py
index bc694f4d70..bc694f4d70 100644
--- a/nova/tests/test_api_validation.py
+++ b/nova/tests/unit/test_api_validation.py
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
new file mode 100644
index 0000000000..2066a8f370
--- /dev/null
+++ b/nova/tests/unit/test_availability_zones.py
@@ -0,0 +1,255 @@
+# Copyright 2013 Netease Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for availability zones
+"""
+
+from oslo.config import cfg
+
+from nova import availability_zones as az
+from nova import context
+from nova import db
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+CONF = cfg.CONF
+CONF.import_opt('internal_service_availability_zone',
+ 'nova.availability_zones')
+CONF.import_opt('default_availability_zone',
+ 'nova.availability_zones')
+
+
+class AvailabilityZoneTestCases(test.TestCase):
+ """Test case for aggregate based availability zone."""
+
+ def setUp(self):
+ super(AvailabilityZoneTestCases, self).setUp()
+ self.host = 'me'
+ self.availability_zone = 'nova-test'
+ self.default_az = CONF.default_availability_zone
+ self.default_in_az = CONF.internal_service_availability_zone
+ self.context = context.get_admin_context()
+ self.agg = self._create_az('az_agg', self.availability_zone)
+
+ def tearDown(self):
+ db.aggregate_delete(self.context, self.agg['id'])
+ super(AvailabilityZoneTestCases, self).tearDown()
+
+ def _create_az(self, agg_name, az_name):
+ agg_meta = {'name': agg_name}
+ agg = db.aggregate_create(self.context, agg_meta)
+
+ metadata = {'availability_zone': az_name}
+ db.aggregate_metadata_add(self.context, agg['id'], metadata)
+
+ return agg
+
+ def _update_az(self, aggregate, az_name):
+ metadata = {'availability_zone': az_name}
+ db.aggregate_update(self.context, aggregate['id'], metadata)
+
+ def _create_service_with_topic(self, topic, host, disabled=False):
+ values = {
+ 'binary': 'bin',
+ 'host': host,
+ 'topic': topic,
+ 'disabled': disabled,
+ }
+ return db.service_create(self.context, values)
+
+ def _destroy_service(self, service):
+ return db.service_destroy(self.context, service['id'])
+
+ def _add_to_aggregate(self, service, aggregate):
+ return db.aggregate_host_add(self.context,
+ aggregate['id'], service['host'])
+
+ def _delete_from_aggregate(self, service, aggregate):
+ return db.aggregate_host_delete(self.context,
+ aggregate['id'], service['host'])
+
+ def test_rest_availability_zone_reset_cache(self):
+ az._get_cache().add('cache', 'fake_value')
+ az.reset_cache()
+ self.assertIsNone(az._get_cache().get('cache'))
+
+ def test_update_host_availability_zone_cache(self):
+ """Test availability zone cache could be update."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ cache_key = az._make_cache_key(self.host)
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ az.update_host_availability_zone_cache(self.context, self.host)
+ self.assertEqual(az._get_cache().get(cache_key), 'az1')
+ az.update_host_availability_zone_cache(self.context, self.host, 'az2')
+ self.assertEqual(az._get_cache().get(cache_key), 'az2')
+
+ def test_set_availability_zone_compute_service(self):
+ """Test for compute service get right availability zone."""
+ service = self._create_service_with_topic('compute', self.host)
+ services = db.service_get_all(self.context)
+
+ # The service is not add into aggregate, so confirm it is default
+ # availability zone.
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.default_az)
+
+ # The service is added into aggregate, confirm return the aggregate
+ # availability zone.
+ self._add_to_aggregate(service, self.agg)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.availability_zone)
+
+ self._destroy_service(service)
+
+ def test_set_availability_zone_unicode_key(self):
+ """Test set availability zone cache key is unicode."""
+ service = self._create_service_with_topic('network', self.host)
+ services = db.service_get_all(self.context)
+ az.set_availability_zones(self.context, services)
+ self.assertIsInstance(services[0]['host'], unicode)
+ cached_key = az._make_cache_key(services[0]['host'])
+ self.assertIsInstance(cached_key, str)
+ self._destroy_service(service)
+
+ def test_set_availability_zone_not_compute_service(self):
+ """Test not compute service get right availability zone."""
+ service = self._create_service_with_topic('network', self.host)
+ services = db.service_get_all(self.context)
+ new_service = az.set_availability_zones(self.context, services)[0]
+ self.assertEqual(new_service['availability_zone'],
+ self.default_in_az)
+ self._destroy_service(service)
+
+ def test_get_host_availability_zone(self):
+ """Test get right availability zone by given host."""
+ self.assertEqual(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
+ service = self._create_service_with_topic('compute', self.host)
+ self._add_to_aggregate(service, self.agg)
+
+ self.assertEqual(self.availability_zone,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_update_host_availability_zone(self):
+ """Test availability zone could be update by given host."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEqual(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Update AZ
+ new_az_name = 'az2'
+ self._update_az(agg_az1, new_az_name)
+ self.assertEqual(new_az_name,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_delete_host_availability_zone(self):
+ """Test availability zone could be deleted successfully."""
+ service = self._create_service_with_topic('compute', self.host)
+
+ # Create a new aggregate with an AZ and add the host to the AZ
+ az_name = 'az1'
+ agg_az1 = self._create_az('agg-az1', az_name)
+ self._add_to_aggregate(service, agg_az1)
+ self.assertEqual(az_name,
+ az.get_host_availability_zone(self.context, self.host))
+ # Delete the AZ via deleting the aggregate
+ self._delete_from_aggregate(service, agg_az1)
+ self.assertEqual(self.default_az,
+ az.get_host_availability_zone(self.context, self.host))
+
+ def test_get_availability_zones(self):
+ """Test get_availability_zones."""
+
+ # When the param get_only_available of get_availability_zones is set
+ # to default False, it returns two lists, zones with at least one
+ # enabled services, and zones with no enabled services,
+ # when get_only_available is set to True, only return a list of zones
+ # with at least one enabled servies.
+ # Use the following test data:
+ #
+ # zone host enabled
+ # nova-test host1 Yes
+ # nova-test host2 No
+ # nova-test2 host3 Yes
+ # nova-test3 host4 No
+ # <default> host5 No
+
+ agg2 = self._create_az('agg-az2', 'nova-test2')
+ agg3 = self._create_az('agg-az3', 'nova-test3')
+
+ service1 = self._create_service_with_topic('compute', 'host1',
+ disabled=False)
+ service2 = self._create_service_with_topic('compute', 'host2',
+ disabled=True)
+ service3 = self._create_service_with_topic('compute', 'host3',
+ disabled=False)
+ service4 = self._create_service_with_topic('compute', 'host4',
+ disabled=True)
+ self._create_service_with_topic('compute', 'host5',
+ disabled=True)
+
+ self._add_to_aggregate(service1, self.agg)
+ self._add_to_aggregate(service2, self.agg)
+ self._add_to_aggregate(service3, agg2)
+ self._add_to_aggregate(service4, agg3)
+
+ zones, not_zones = az.get_availability_zones(self.context)
+
+ self.assertEqual(zones, ['nova-test', 'nova-test2'])
+ self.assertEqual(not_zones, ['nova-test3', 'nova'])
+
+ zones = az.get_availability_zones(self.context, True)
+
+ self.assertEqual(zones, ['nova-test', 'nova-test2'])
+
+ zones, not_zones = az.get_availability_zones(self.context,
+ with_hosts=True)
+
+ self.assertEqual(zones, [(u'nova-test2', set([u'host3'])),
+ (u'nova-test', set([u'host1']))])
+ self.assertEqual(not_zones, [(u'nova-test3', set([u'host4'])),
+ (u'nova', set([u'host5']))])
+
+ def test_get_instance_availability_zone_default_value(self):
+ """Test get right availability zone by given an instance."""
+ fake_inst_id = 162
+ fake_inst = fakes.stub_instance(fake_inst_id, host=self.host)
+
+ self.assertEqual(self.default_az,
+ az.get_instance_availability_zone(self.context, fake_inst))
+
+ def test_get_instance_availability_zone_from_aggregate(self):
+ """Test get availability zone from aggregate by given an instance."""
+ host = 'host170'
+ service = self._create_service_with_topic('compute', host)
+ self._add_to_aggregate(service, self.agg)
+
+ fake_inst_id = 174
+ fake_inst = fakes.stub_instance(fake_inst_id, host=host)
+
+ self.assertEqual(self.availability_zone,
+ az.get_instance_availability_zone(self.context, fake_inst))
diff --git a/nova/tests/test_baserpc.py b/nova/tests/unit/test_baserpc.py
index d9013fb99e..d9013fb99e 100644
--- a/nova/tests/test_baserpc.py
+++ b/nova/tests/unit/test_baserpc.py
diff --git a/nova/tests/unit/test_bdm.py b/nova/tests/unit/test_bdm.py
new file mode 100644
index 0000000000..52a0ca45ef
--- /dev/null
+++ b/nova/tests/unit/test_bdm.py
@@ -0,0 +1,248 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for Block Device Mapping Code.
+"""
+
+from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
+from nova import test
+from nova.tests.unit import matchers
+
+
+class BlockDeviceMappingEc2CloudTestCase(test.NoDBTestCase):
+ """Test Case for Block Device Mapping."""
+
+ def fake_ec2_vol_id_to_uuid(obj, ec2_id):
+ if ec2_id == 'vol-87654321':
+ return '22222222-3333-4444-5555-666666666666'
+ elif ec2_id == 'vol-98765432':
+ return '77777777-8888-9999-0000-aaaaaaaaaaaa'
+ else:
+ return 'OhNoooo'
+
+ def fake_ec2_snap_id_to_uuid(obj, ec2_id):
+ if ec2_id == 'snap-12345678':
+ return '00000000-1111-2222-3333-444444444444'
+ elif ec2_id == 'snap-23456789':
+ return '11111111-2222-3333-4444-555555555555'
+ else:
+ return 'OhNoooo'
+
+ def _assertApply(self, action, bdm_list):
+ for bdm, expected_result in bdm_list:
+ self.assertThat(action(bdm), matchers.DictMatches(expected_result))
+
+ def test_parse_block_device_mapping(self):
+ self.stubs.Set(ec2utils,
+ 'ec2_vol_id_to_uuid',
+ self.fake_ec2_vol_id_to_uuid)
+ self.stubs.Set(ec2utils,
+ 'ec2_snap_id_to_uuid',
+ self.fake_ec2_snap_id_to_uuid)
+ bdm_list = [
+ ({'device_name': '/dev/fake0',
+ 'ebs': {'snapshot_id': 'snap-12345678',
+ 'volume_size': 1}},
+ {'device_name': '/dev/fake0',
+ 'snapshot_id': '00000000-1111-2222-3333-444444444444',
+ 'volume_size': 1,
+ 'delete_on_termination': True}),
+
+ ({'device_name': '/dev/fake1',
+ 'ebs': {'snapshot_id': 'snap-23456789',
+ 'delete_on_termination': False}},
+ {'device_name': '/dev/fake1',
+ 'snapshot_id': '11111111-2222-3333-4444-555555555555',
+ 'delete_on_termination': False}),
+
+ ({'device_name': '/dev/fake2',
+ 'ebs': {'snapshot_id': 'vol-87654321',
+ 'volume_size': 2}},
+ {'device_name': '/dev/fake2',
+ 'volume_id': '22222222-3333-4444-5555-666666666666',
+ 'volume_size': 2,
+ 'delete_on_termination': True}),
+
+ ({'device_name': '/dev/fake3',
+ 'ebs': {'snapshot_id': 'vol-98765432',
+ 'delete_on_termination': False}},
+ {'device_name': '/dev/fake3',
+ 'volume_id': '77777777-8888-9999-0000-aaaaaaaaaaaa',
+ 'delete_on_termination': False}),
+
+ ({'device_name': '/dev/fake4',
+ 'ebs': {'no_device': True}},
+ {'device_name': '/dev/fake4',
+ 'no_device': True}),
+
+ ({'device_name': '/dev/fake5',
+ 'virtual_name': 'ephemeral0'},
+ {'device_name': '/dev/fake5',
+ 'virtual_name': 'ephemeral0'}),
+
+ ({'device_name': '/dev/fake6',
+ 'virtual_name': 'swap'},
+ {'device_name': '/dev/fake6',
+ 'virtual_name': 'swap'}),
+ ]
+ self._assertApply(cloud._parse_block_device_mapping, bdm_list)
+
+ def test_format_block_device_mapping(self):
+ bdm_list = [
+ ({'device_name': '/dev/fake0',
+ 'snapshot_id': 0x12345678,
+ 'volume_size': 1,
+ 'delete_on_termination': True},
+ {'deviceName': '/dev/fake0',
+ 'ebs': {'snapshotId': 'snap-12345678',
+ 'volumeSize': 1,
+ 'deleteOnTermination': True}}),
+
+ ({'device_name': '/dev/fake1',
+ 'snapshot_id': 0x23456789},
+ {'deviceName': '/dev/fake1',
+ 'ebs': {'snapshotId': 'snap-23456789'}}),
+
+ ({'device_name': '/dev/fake2',
+ 'snapshot_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'deviceName': '/dev/fake2',
+ 'ebs': {'snapshotId': 'snap-23456789',
+ 'deleteOnTermination': False}}),
+
+ ({'device_name': '/dev/fake3',
+ 'volume_id': 0x12345678,
+ 'volume_size': 1,
+ 'delete_on_termination': True},
+ {'deviceName': '/dev/fake3',
+ 'ebs': {'snapshotId': 'vol-12345678',
+ 'volumeSize': 1,
+ 'deleteOnTermination': True}}),
+
+ ({'device_name': '/dev/fake4',
+ 'volume_id': 0x23456789},
+ {'deviceName': '/dev/fake4',
+ 'ebs': {'snapshotId': 'vol-23456789'}}),
+
+ ({'device_name': '/dev/fake5',
+ 'volume_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'deviceName': '/dev/fake5',
+ 'ebs': {'snapshotId': 'vol-23456789',
+ 'deleteOnTermination': False}}),
+ ]
+ self._assertApply(cloud._format_block_device_mapping, bdm_list)
+
+ def test_format_mapping(self):
+ properties = {
+ 'mappings': [
+ {'virtual': 'ami',
+ 'device': 'sda1'},
+ {'virtual': 'root',
+ 'device': '/dev/sda1'},
+
+ {'virtual': 'swap',
+ 'device': 'sdb1'},
+ {'virtual': 'swap',
+ 'device': 'sdb2'},
+ {'virtual': 'swap',
+ 'device': 'sdb3'},
+ {'virtual': 'swap',
+ 'device': 'sdb4'},
+
+ {'virtual': 'ephemeral0',
+ 'device': 'sdc1'},
+ {'virtual': 'ephemeral1',
+ 'device': 'sdc2'},
+ {'virtual': 'ephemeral2',
+ 'device': 'sdc3'},
+ ],
+
+ 'block_device_mapping': [
+ # root
+ {'device_name': '/dev/sda1',
+ 'snapshot_id': 0x12345678,
+ 'delete_on_termination': False},
+
+
+ # overwrite swap
+ {'device_name': '/dev/sdb2',
+ 'snapshot_id': 0x23456789,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'snapshot_id': 0x3456789A},
+ {'device_name': '/dev/sdb4',
+ 'no_device': True},
+
+ # overwrite ephemeral
+ {'device_name': '/dev/sdc2',
+ 'snapshot_id': 0x3456789A,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdc3',
+ 'snapshot_id': 0x456789AB},
+ {'device_name': '/dev/sdc4',
+ 'no_device': True},
+
+ # volume
+ {'device_name': '/dev/sdd1',
+ 'snapshot_id': 0x87654321,
+ 'delete_on_termination': False},
+ {'device_name': '/dev/sdd2',
+ 'snapshot_id': 0x98765432},
+ {'device_name': '/dev/sdd3',
+ 'snapshot_id': 0xA9875463},
+ {'device_name': '/dev/sdd4',
+ 'no_device': True}]}
+
+ expected_result = {
+ 'blockDeviceMapping': [
+ # root
+ {'deviceName': '/dev/sda1',
+ 'ebs': {'snapshotId': 'snap-12345678',
+ 'deleteOnTermination': False}},
+
+ # swap
+ {'deviceName': '/dev/sdb1',
+ 'virtualName': 'swap'},
+ {'deviceName': '/dev/sdb2',
+ 'ebs': {'snapshotId': 'snap-23456789',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdb3',
+ 'ebs': {'snapshotId': 'snap-3456789a'}},
+
+ # ephemeral
+ {'deviceName': '/dev/sdc1',
+ 'virtualName': 'ephemeral0'},
+ {'deviceName': '/dev/sdc2',
+ 'ebs': {'snapshotId': 'snap-3456789a',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdc3',
+ 'ebs': {'snapshotId': 'snap-456789ab'}},
+
+ # volume
+ {'deviceName': '/dev/sdd1',
+ 'ebs': {'snapshotId': 'snap-87654321',
+ 'deleteOnTermination': False}},
+ {'deviceName': '/dev/sdd2',
+ 'ebs': {'snapshotId': 'snap-98765432'}},
+ {'deviceName': '/dev/sdd3',
+ 'ebs': {'snapshotId': 'snap-a9875463'}}]}
+
+ result = {}
+ cloud._format_mappings(properties, result)
+ self.assertEqual(result['blockDeviceMapping'].sort(),
+ expected_result['blockDeviceMapping'].sort())
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
new file mode 100644
index 0000000000..2dff327e88
--- /dev/null
+++ b/nova/tests/unit/test_block_device.py
@@ -0,0 +1,604 @@
+# Copyright 2011 Isaku Yamahata
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests for Block Device utility functions.
+"""
+
+from nova import block_device
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import matchers
+
+
+class BlockDeviceTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(BlockDeviceTestCase, self).setUp()
+ BDM = block_device.BlockDeviceDict
+
+ self.new_mapping = [
+ BDM({'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'volume_size': 1,
+ 'guest_format': 'swap',
+ 'boot_index': -1}),
+ BDM({'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'volume_size': 10,
+ 'delete_on_termination': True,
+ 'boot_index': -1}),
+ BDM({'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'boot_index': 0}),
+ BDM({'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1}),
+ BDM({'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'}),
+ ]
+
+ def test_properties(self):
+ root_device0 = '/dev/sda'
+ root_device1 = '/dev/sdb'
+ mappings = [{'virtual': 'root',
+ 'device': root_device0}]
+
+ properties0 = {'mappings': mappings}
+ properties1 = {'mappings': mappings,
+ 'root_device_name': root_device1}
+
+ self.assertIsNone(block_device.properties_root_device_name({}))
+ self.assertEqual(
+ block_device.properties_root_device_name(properties0),
+ root_device0)
+ self.assertEqual(
+ block_device.properties_root_device_name(properties1),
+ root_device1)
+
+ def test_ephemeral(self):
+ self.assertFalse(block_device.is_ephemeral('ephemeral'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral0'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral1'))
+ self.assertTrue(block_device.is_ephemeral('ephemeral11'))
+ self.assertFalse(block_device.is_ephemeral('root'))
+ self.assertFalse(block_device.is_ephemeral('swap'))
+ self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
+
+ self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0)
+ self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1)
+ self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11)
+
+ self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
+ self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
+ self.assertFalse(block_device.is_swap_or_ephemeral('root'))
+ self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
+
+ def test_mappings_prepend_dev(self):
+ mapping = [
+ {'virtual': 'ami', 'device': '/dev/sda'},
+ {'virtual': 'root', 'device': 'sda'},
+ {'virtual': 'ephemeral0', 'device': 'sdb'},
+ {'virtual': 'swap', 'device': 'sdc'},
+ {'virtual': 'ephemeral1', 'device': 'sdd'},
+ {'virtual': 'ephemeral2', 'device': 'sde'}]
+
+ expected = [
+ {'virtual': 'ami', 'device': '/dev/sda'},
+ {'virtual': 'root', 'device': 'sda'},
+ {'virtual': 'ephemeral0', 'device': '/dev/sdb'},
+ {'virtual': 'swap', 'device': '/dev/sdc'},
+ {'virtual': 'ephemeral1', 'device': '/dev/sdd'},
+ {'virtual': 'ephemeral2', 'device': '/dev/sde'}]
+
+ prepended = block_device.mappings_prepend_dev(mapping)
+ self.assertEqual(prepended.sort(), expected.sort())
+
+ def test_strip_dev(self):
+ self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda')
+ self.assertEqual(block_device.strip_dev('sda'), 'sda')
+
+ def test_strip_prefix(self):
+ self.assertEqual(block_device.strip_prefix('/dev/sda'), 'a')
+ self.assertEqual(block_device.strip_prefix('a'), 'a')
+ self.assertEqual(block_device.strip_prefix('xvda'), 'a')
+ self.assertEqual(block_device.strip_prefix('vda'), 'a')
+
+ def test_get_device_letter(self):
+ self.assertEqual(block_device.get_device_letter(''), '')
+ self.assertEqual(block_device.get_device_letter('/dev/sda1'), 'a')
+ self.assertEqual(block_device.get_device_letter('/dev/xvdb'), 'b')
+ self.assertEqual(block_device.get_device_letter('/dev/d'), 'd')
+ self.assertEqual(block_device.get_device_letter('a'), 'a')
+ self.assertEqual(block_device.get_device_letter('sdb2'), 'b')
+ self.assertEqual(block_device.get_device_letter('vdc'), 'c')
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'num': 0,
+ 'virtual_name': 'ephemeral0',
+ 'device_name': '/dev/sdc1',
+ 'size': 1},
+ {'num': 2,
+ 'virtual_name': 'ephemeral2',
+ 'device_name': '/dev/sdd',
+ 'size': 1}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ in_mapping = block_device.volume_in_mapping(
+ device_name, block_device_info)
+ self.assertEqual(in_mapping, true_or_false)
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_get_root_bdm(self):
+ root_bdm = {'device_name': 'vda', 'boot_index': 0}
+ bdms = [root_bdm,
+ {'device_name': 'vdb', 'boot_index': 1},
+ {'device_name': 'vdc', 'boot_index': -1},
+ {'device_name': 'vdd'}]
+ self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
+ self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
+ self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
+ self.assertIsNone(block_device.get_root_bdm([]))
+
+ def test_get_bdm_ephemeral_disk_size(self):
+ size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
+ self.assertEqual(10, size)
+
+ def test_get_bdm_swap_list(self):
+ swap_list = block_device.get_bdm_swap_list(self.new_mapping)
+ self.assertEqual(1, len(swap_list))
+ self.assertEqual(1, swap_list[0].get('id'))
+
+ def test_get_bdm_local_disk_num(self):
+ size = block_device.get_bdm_local_disk_num(self.new_mapping)
+ self.assertEqual(2, size)
+
+ def test_new_format_is_swap(self):
+ expected_results = [True, False, False, False, False]
+ for expected, bdm in zip(expected_results, self.new_mapping):
+ res = block_device.new_format_is_swap(bdm)
+ self.assertEqual(expected, res)
+
+ def test_new_format_is_ephemeral(self):
+ expected_results = [False, True, False, False, False]
+ for expected, bdm in zip(expected_results, self.new_mapping):
+ res = block_device.new_format_is_ephemeral(bdm)
+ self.assertEqual(expected, res)
+
+ def test_validate_device_name(self):
+ for value in [' ', 10, None, 'a' * 260]:
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.validate_device_name,
+ value)
+
+ def test_validate_and_default_volume_size(self):
+ bdm = {}
+ for value in [-1, 'a', 2.5]:
+ bdm['volume_size'] = value
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.validate_and_default_volume_size,
+ bdm)
+
+ def test_get_bdms_to_connect(self):
+ root_bdm = {'device_name': 'vda', 'boot_index': 0}
+ bdms = [root_bdm,
+ {'device_name': 'vdb', 'boot_index': 1},
+ {'device_name': 'vdc', 'boot_index': -1},
+ {'device_name': 'vde', 'boot_index': None},
+ {'device_name': 'vdd'}]
+ self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
+ exclude_root_mapping=True))
+ self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
+
+
+class TestBlockDeviceDict(test.NoDBTestCase):
+ def setUp(self):
+ super(TestBlockDeviceDict, self).setUp()
+
+ BDM = block_device.BlockDeviceDict
+
+ self.api_mapping = [
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1},
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'boot_index': -1},
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-volume-id-1',
+ 'boot_index': 0},
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-snapshot-id-1',
+ 'boot_index': -1},
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'},
+ ]
+
+ self.new_mapping = [
+ BDM({'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'boot_index': -1}),
+ BDM({'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'boot_index': -1}),
+ BDM({'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'boot_index': 0}),
+ BDM({'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1}),
+ BDM({'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'}),
+ ]
+
+ self.legacy_mapping = [
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'delete_on_termination': True,
+ 'virtual_name': 'swap'},
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'delete_on_termination': True,
+ 'virtual_name': 'ephemeral0'},
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': "{'fake': 'connection_info'}"},
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2'},
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'no_device': True,
+ 'device_name': '/dev/vdc'},
+ ]
+
+ self.new_mapping_source_image = [
+ BDM({'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda3',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 'fake-volume-id-3',
+ 'boot_index': -1}),
+ BDM({'id': 7, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda4',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'image_id': 'fake-image-id-2',
+ 'boot_index': -1}),
+ ]
+
+ self.legacy_mapping_source_image = [
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda3',
+ 'connection_info': "{'fake': 'connection_info'}",
+ 'volume_id': 'fake-volume-id-3'},
+ ]
+
+ def test_init(self):
+ def fake_validate(obj, dct):
+ pass
+
+ self.stubs.Set(block_device.BlockDeviceDict, '_fields',
+ set(['field1', 'field2']))
+ self.stubs.Set(block_device.BlockDeviceDict, '_db_only_fields',
+ set(['db_field1', 'db_field2']))
+ self.stubs.Set(block_device.BlockDeviceDict, '_validate',
+ fake_validate)
+
+ # Make sure db fields are not picked up if they are not
+ # in the original dict
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
+ 'field2': 'bar',
+ 'db_field1': 'baz'})
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Make sure all expected fields are defaulted
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIsNone(dev_dict['field2'])
+ self.assertNotIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Unless they are not meant to be
+ dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
+ do_not_default=set(['field2']))
+ self.assertIn('field1', dev_dict)
+ self.assertNotIn('field2', dev_dict)
+ self.assertNotIn('db_field1', dev_dict)
+ self.assertNotIn('db_field2', dev_dict)
+
+ # Passing kwargs to constructor works
+ dev_dict = block_device.BlockDeviceDict(field1='foo')
+ self.assertIn('field1', dev_dict)
+ self.assertIn('field2', dev_dict)
+ self.assertIsNone(dev_dict['field2'])
+ dev_dict = block_device.BlockDeviceDict(
+ {'field1': 'foo'}, field2='bar')
+ self.assertEqual('foo', dev_dict['field1'])
+ self.assertEqual('bar', dev_dict['field2'])
+
+ def test_init_prepend_dev_to_device_name(self):
+ bdm = {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': 'vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertEqual('/dev/vda', bdm_dict['device_name'])
+
+ bdm['device_name'] = '/dev/vdb'
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertEqual('/dev/vdb', bdm_dict['device_name'])
+
+ bdm['device_name'] = None
+ bdm_dict = block_device.BlockDeviceDict(bdm)
+ self.assertIsNone(bdm_dict['device_name'])
+
+ def test_validate(self):
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ {'bogus_field': 'lame_val'})
+
+ lame_bdm = dict(self.new_mapping[2])
+ del lame_bdm['source_type']
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_bdm)
+
+ lame_bdm['no_device'] = True
+ block_device.BlockDeviceDict(lame_bdm)
+
+ lame_dev_bdm = dict(self.new_mapping[2])
+ lame_dev_bdm['device_name'] = "not a valid name"
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_dev_bdm)
+
+ lame_dev_bdm['device_name'] = ""
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_dev_bdm)
+
+ cool_volume_size_bdm = dict(self.new_mapping[2])
+ cool_volume_size_bdm['volume_size'] = '42'
+ cool_volume_size_bdm = block_device.BlockDeviceDict(
+ cool_volume_size_bdm)
+ self.assertEqual(cool_volume_size_bdm['volume_size'], 42)
+
+ lame_volume_size_bdm = dict(self.new_mapping[2])
+ lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ lame_volume_size_bdm)
+
+ truthy_bdm = dict(self.new_mapping[2])
+ truthy_bdm['delete_on_termination'] = '1'
+ truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
+ self.assertEqual(truthy_bdm['delete_on_termination'], True)
+
+ verbose_bdm = dict(self.new_mapping[2])
+ verbose_bdm['boot_index'] = 'first'
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict,
+ verbose_bdm)
+
+ def test_from_legacy(self):
+ for legacy, new in zip(self.legacy_mapping, self.new_mapping):
+ self.assertThat(
+ block_device.BlockDeviceDict.from_legacy(legacy),
+ matchers.IsSubDictOf(new))
+
+ def test_from_legacy_mapping(self):
+ def _get_image_bdms(bdms):
+ return [bdm for bdm in bdms if bdm['source_type'] == 'image']
+
+ def _get_bootable_bdms(bdms):
+ return [bdm for bdm in bdms if bdm['boot_index'] >= 0]
+
+ new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
+ self.assertEqual(len(_get_image_bdms(new_no_img)), 0)
+
+ for new, expected in zip(new_no_img, self.new_mapping):
+ self.assertThat(new, matchers.IsSubDictOf(expected))
+
+ new_with_img = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref')
+ image_bdms = _get_image_bdms(new_with_img)
+ boot_bdms = _get_bootable_bdms(new_with_img)
+ self.assertEqual(len(image_bdms), 1)
+ self.assertEqual(len(boot_bdms), 1)
+ self.assertEqual(image_bdms[0]['boot_index'], 0)
+ self.assertEqual(boot_bdms[0]['source_type'], 'image')
+
+ new_with_img_and_root = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref', 'sda1')
+ image_bdms = _get_image_bdms(new_with_img_and_root)
+ boot_bdms = _get_bootable_bdms(new_with_img_and_root)
+ self.assertEqual(len(image_bdms), 0)
+ self.assertEqual(len(boot_bdms), 1)
+ self.assertEqual(boot_bdms[0]['boot_index'], 0)
+ self.assertEqual(boot_bdms[0]['source_type'], 'volume')
+
+ new_no_root = block_device.from_legacy_mapping(
+ self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
+ self.assertEqual(len(_get_image_bdms(new_no_root)), 0)
+ self.assertEqual(len(_get_bootable_bdms(new_no_root)), 0)
+
+ def test_from_api(self):
+ for api, new in zip(self.api_mapping, self.new_mapping):
+ new['connection_info'] = None
+ if new['snapshot_id']:
+ new['volume_id'] = None
+ self.assertThat(
+ block_device.BlockDeviceDict.from_api(api),
+ matchers.IsSubDictOf(new))
+
+ def test_from_api_invalid_blank_id(self):
+ api_dict = {'id': 1,
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'uuid': 'fake-volume-id-1',
+ 'delete_on_termination': True,
+ 'boot_index': -1}
+ self.assertRaises(exception.InvalidBDMFormat,
+ block_device.BlockDeviceDict.from_api, api_dict)
+
+ def test_legacy(self):
+ for legacy, new in zip(self.legacy_mapping, self.new_mapping):
+ self.assertThat(
+ legacy,
+ matchers.IsSubDictOf(new.legacy()))
+
+ def test_legacy_mapping(self):
+ got_legacy = block_device.legacy_mapping(self.new_mapping)
+
+ for legacy, expected in zip(got_legacy, self.legacy_mapping):
+ self.assertThat(expected, matchers.IsSubDictOf(legacy))
+
+ def test_legacy_source_image(self):
+ for legacy, new in zip(self.legacy_mapping_source_image,
+ self.new_mapping_source_image):
+ if new['destination_type'] == 'volume':
+ self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
+ else:
+ self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
+
+ def test_legacy_mapping_source_image(self):
+ got_legacy = block_device.legacy_mapping(self.new_mapping)
+
+ for legacy, expected in zip(got_legacy, self.legacy_mapping):
+ self.assertThat(expected, matchers.IsSubDictOf(legacy))
+
+ def test_legacy_mapping_from_object_list(self):
+ bdm1 = objects.BlockDeviceMapping()
+ bdm1 = objects.BlockDeviceMapping._from_db_object(
+ None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
+ self.new_mapping[0]))
+ bdm2 = objects.BlockDeviceMapping()
+ bdm2 = objects.BlockDeviceMapping._from_db_object(
+ None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
+ self.new_mapping[1]))
+ bdmlist = objects.BlockDeviceMappingList()
+ bdmlist.objects = [bdm1, bdm2]
+ block_device.legacy_mapping(bdmlist)
+
+ def test_image_mapping(self):
+ removed_fields = ['id', 'instance_uuid', 'connection_info',
+ 'device_name', 'created_at', 'updated_at',
+ 'deleted_at', 'deleted']
+ for bdm in self.new_mapping:
+ mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
+ bdm).get_image_mapping()
+ for fld in removed_fields:
+ self.assertNotIn(fld, mapping_bdm)
+
+ def _test_snapshot_from_bdm(self, template):
+ snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
+ self.assertEqual(snapshot['snapshot_id'], 'new-snapshot-id')
+ self.assertEqual(snapshot['source_type'], 'snapshot')
+ self.assertEqual(snapshot['destination_type'], 'volume')
+ for key in ['disk_bus', 'device_type', 'boot_index']:
+ self.assertEqual(snapshot[key], template[key])
+
+ def test_snapshot_from_bdm(self):
+ for bdm in self.new_mapping:
+ self._test_snapshot_from_bdm(bdm)
+
+ def test_snapshot_from_object(self):
+ for bdm in self.new_mapping[:-1]:
+ obj = objects.BlockDeviceMapping()
+ obj = objects.BlockDeviceMapping._from_db_object(
+ None, obj, fake_block_device.FakeDbBlockDeviceDict(
+ bdm))
+ self._test_snapshot_from_bdm(obj)
diff --git a/nova/tests/test_cinder.py b/nova/tests/unit/test_cinder.py
index 913b4e4de7..913b4e4de7 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/unit/test_cinder.py
diff --git a/nova/tests/unit/test_configdrive2.py b/nova/tests/unit/test_configdrive2.py
new file mode 100644
index 0000000000..f6bcaea99d
--- /dev/null
+++ b/nova/tests/unit/test_configdrive2.py
@@ -0,0 +1,104 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import os
+import tempfile
+
+import mox
+from oslo.config import cfg
+
+from nova import context
+from nova.openstack.common import fileutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+from nova.virt import configdrive
+
+CONF = cfg.CONF
+
+
+class FakeInstanceMD(object):
+ def metadata_for_config_drive(self):
+ yield ('this/is/a/path/hello', 'This is some content')
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+
+ def test_create_configdrive_iso(self):
+ CONF.set_override('config_drive_format', 'iso9660')
+ imagefile = None
+
+ try:
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet', '-J', '-r',
+ '-V', 'config-2', mox.IgnoreArg(), attempts=1,
+ run_as_root=False).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_iso_')
+ os.close(fd)
+ c.make_drive(imagefile)
+
+ finally:
+ if imagefile:
+ fileutils.delete_if_exists(imagefile)
+
+ def test_create_configdrive_vfat(self):
+ CONF.set_override('config_drive_format', 'vfat')
+ imagefile = None
+ try:
+ self.mox.StubOutWithMock(utils, 'mkfs')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(utils, 'trycmd')
+
+ utils.mkfs('vfat', mox.IgnoreArg(),
+ label='config-2').AndReturn(None)
+ utils.trycmd('mount', '-o', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ run_as_root=True).AndReturn((None, None))
+ utils.execute('umount', mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ with configdrive.ConfigDriveBuilder(FakeInstanceMD()) as c:
+ (fd, imagefile) = tempfile.mkstemp(prefix='cd_vfat_')
+ os.close(fd)
+ c.make_drive(imagefile)
+
+ # NOTE(mikal): we can't check for a VFAT output here because the
+ # filesystem creation stuff has been mocked out because it
+ # requires root permissions
+
+ finally:
+ if imagefile:
+ fileutils.delete_if_exists(imagefile)
+
+ def test_config_drive_required_by_image_property(self):
+ inst = fake_instance.fake_instance_obj(context.get_admin_context())
+ inst.config_drive = ''
+ inst.system_metadata = {
+ utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'mandatory'}
+ self.assertTrue(configdrive.required_by(inst))
+
+ inst.system_metadata = {
+ utils.SM_IMAGE_PROP_PREFIX + 'img_config_drive': 'optional'}
+ self.assertFalse(configdrive.required_by(inst))
diff --git a/nova/tests/test_context.py b/nova/tests/unit/test_context.py
index 773f9e77f5..773f9e77f5 100644
--- a/nova/tests/test_context.py
+++ b/nova/tests/unit/test_context.py
diff --git a/nova/tests/test_crypto.py b/nova/tests/unit/test_crypto.py
index 49634626a3..49634626a3 100644
--- a/nova/tests/test_crypto.py
+++ b/nova/tests/unit/test_crypto.py
diff --git a/nova/tests/test_exception.py b/nova/tests/unit/test_exception.py
index 6b1617047c..6b1617047c 100644
--- a/nova/tests/test_exception.py
+++ b/nova/tests/unit/test_exception.py
diff --git a/nova/tests/test_flavors.py b/nova/tests/unit/test_flavors.py
index 46fd81d6db..46fd81d6db 100644
--- a/nova/tests/test_flavors.py
+++ b/nova/tests/unit/test_flavors.py
diff --git a/nova/tests/test_hacking.py b/nova/tests/unit/test_hacking.py
index 69089c0cd3..69089c0cd3 100644
--- a/nova/tests/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
diff --git a/nova/tests/test_hooks.py b/nova/tests/unit/test_hooks.py
index 9017787151..9017787151 100644
--- a/nova/tests/test_hooks.py
+++ b/nova/tests/unit/test_hooks.py
diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/unit/test_instance_types_extra_specs.py
index 8031376045..8031376045 100644
--- a/nova/tests/test_instance_types_extra_specs.py
+++ b/nova/tests/unit/test_instance_types_extra_specs.py
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/unit/test_iptables_network.py
index bd20b101bb..bd20b101bb 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/unit/test_iptables_network.py
diff --git a/nova/tests/test_ipv6.py b/nova/tests/unit/test_ipv6.py
index 4aa6c2a803..4aa6c2a803 100644
--- a/nova/tests/test_ipv6.py
+++ b/nova/tests/unit/test_ipv6.py
diff --git a/nova/tests/test_linuxscsi.py b/nova/tests/unit/test_linuxscsi.py
index 8b1a26a546..8b1a26a546 100644
--- a/nova/tests/test_linuxscsi.py
+++ b/nova/tests/unit/test_linuxscsi.py
diff --git a/nova/tests/unit/test_loadables.py b/nova/tests/unit/test_loadables.py
new file mode 100644
index 0000000000..9f29d850e9
--- /dev/null
+++ b/nova/tests/unit/test_loadables.py
@@ -0,0 +1,113 @@
+# Copyright 2012 OpenStack Foundation # All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Loadable class handling.
+"""
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_loadables
+
+
+class LoadablesTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LoadablesTestCase, self).setUp()
+ self.fake_loader = fake_loadables.FakeLoader()
+ # The name that we imported above for testing
+ self.test_package = 'nova.tests.unit.fake_loadables'
+
+ def test_loader_init(self):
+ self.assertEqual(self.fake_loader.package, self.test_package)
+ # Test the path of the module
+ ending_path = '/' + self.test_package.replace('.', '/')
+ self.assertTrue(self.fake_loader.path.endswith(ending_path))
+ self.assertEqual(self.fake_loader.loadable_cls_type,
+ fake_loadables.FakeLoadable)
+
+ def _compare_classes(self, classes, expected):
+ class_names = [cls.__name__ for cls in classes]
+ self.assertEqual(set(class_names), set(expected))
+
+ def test_get_all_classes(self):
+ classes = self.fake_loader.get_all_classes()
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_underscore(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2._FakeLoadableSubClass7']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type1(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass4',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_wrong_type2(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.FakeLoadableSubClass1',
+ prefix + '.fake_loadable2.FakeLoadableSubClass8']
+ self.assertRaises(exception.ClassNotFound,
+ self.fake_loader.get_matching_classes,
+ test_classes)
+
+ def test_get_matching_classes_with_one_function(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.FakeLoadableSubClass5']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass5']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_two_functions(self):
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_valid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ 'FakeLoadableSubClass2',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
+
+ def test_get_matching_classes_with_function_including_invalids(self):
+ # When using a method, no checking is done on valid classes.
+ prefix = self.test_package
+ test_classes = [prefix + '.fake_loadable1.return_invalid_classes',
+ prefix + '.fake_loadable2.return_valid_class']
+ classes = self.fake_loader.get_matching_classes(test_classes)
+ expected_class_names = ['FakeLoadableSubClass1',
+ '_FakeLoadableSubClass3',
+ 'FakeLoadableSubClass4',
+ 'FakeLoadableSubClass6']
+ self._compare_classes(classes, expected_class_names)
diff --git a/nova/tests/unit/test_matchers.py b/nova/tests/unit/test_matchers.py
new file mode 100644
index 0000000000..77fefafca8
--- /dev/null
+++ b/nova/tests/unit/test_matchers.py
@@ -0,0 +1,349 @@
+# Copyright 2012 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import testtools
+from testtools.tests.matchers import helpers
+
+from nova.tests.unit import matchers
+
+
+class TestDictMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictMatches(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ matches_mismatches = [
+ {},
+ {'foo': 'bar', 'baz': 'qux'},
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}},
+ ]
+
+ str_examples = [
+ ("DictMatches({'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Keys in d1 and not d2: set(['foo', 'baz', 'cat'])."
+ " Keys in d2 and not d1: set([])", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestDictListMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.DictListMatches(
+ [{'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'},
+ ])
+
+ matches_matches = [
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ matches_mismatches = [
+ [],
+ {},
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ [{'foo': 'bar', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'cat': 'yorkie'}],
+ [{'foo': 'bop', 'baz': False,
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'dog': 'yorkie'}],
+ ]
+
+ str_examples = [
+ ("DictListMatches([{'baz': 'DONTCARE', 'cat':"
+ " {'fluffy': False, 'tabby': True}, 'foo': 'bar'},\n"
+ " {'dog': 'yorkie'}])",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Length mismatch: len(L1)=2 != len(L2)=0", {}, matches_matcher),
+ ("Dictionaries do not match at fluffy. d1: True d2: False",
+ [{'foo': 'bar', 'baz': 'qoox',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'dog': 'yorkie'}],
+ matches_matcher),
+ ]
+
+
+class TestIsSubDictOf(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.IsSubDictOf(
+ {'foo': 'bar', 'baz': 'DONTCARE',
+ 'cat': {'tabby': True, 'fluffy': False}}
+ )
+
+ matches_matches = [
+ {'foo': 'bar', 'baz': 'noox', 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux'}
+ ]
+
+ matches_mismatches = [
+ {'foo': 'bop', 'baz': 'qux',
+ 'cat': {'tabby': True, 'fluffy': False}},
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}},
+ {'foo': 'bar', 'cat': {'tabby': True, 'fluffy': False}, 'dog': None},
+ ]
+
+ str_examples = [
+ ("IsSubDictOf({'foo': 'bar', 'baz': 'DONTCARE',"
+ " 'cat': {'fluffy': False, 'tabby': True}})",
+ matches_matcher),
+ ]
+
+ describe_examples = [
+ ("Dictionaries do not match at fluffy. d1: False d2: True",
+ {'foo': 'bar', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': True}}, matches_matcher),
+ ("Dictionaries do not match at foo. d1: bar d2: bop",
+ {'foo': 'bop', 'baz': 'quux',
+ 'cat': {'tabby': True, 'fluffy': False}}, matches_matcher),
+ ]
+
+
+class TestXMLMatches(testtools.TestCase, helpers.TestMatchersInterface):
+
+ matches_matcher = matchers.XMLMatches("""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="DONTCARE"/>
+ <children>
+ <!--This is a comment-->
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>DONTCARE</child3>
+ <?spam processing instruction?>
+ </children>
+</root>""")
+
+ matches_matches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key2="spam" key1="spam"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children><child1>child 1</child1>
+<child2>child 2</child2>
+<child3>blah</child3>
+ </children>
+</root>""",
+ ]
+
+ matches_mismatches = ["""<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""",
+ ]
+
+ str_examples = [
+ ("XMLMatches('<?xml version=\"1.0\"?>\\n"
+ "<root>\\n"
+ " <text>some text here</text>\\n"
+ " <text>some other text here</text>\\n"
+ " <attrs key1=\"spam\" key2=\"DONTCARE\"/>\\n"
+ " <children>\\n"
+ " <!--This is a comment-->\\n"
+ " <child1>child 1</child1>\\n"
+ " <child2>child 2</child2>\\n"
+ " <child3>DONTCARE</child3>\\n"
+ " <?spam processing instruction?>\\n"
+ " </children>\\n"
+ "</root>')", matches_matcher),
+ ]
+
+ describe_examples = [
+ ("/root/text[1]: XML text value mismatch: expected text value: "
+ "'some other text here'; actual value: 'mismatch text'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>mismatch text</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attributes mismatch: keys only in expected: "
+ "key2; keys only in actual: key3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key3="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/attrs[2]: XML attribute value mismatch: expected value of "
+ "attribute key1: 'spam'; actual value: 'quux'",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="quux" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML tag mismatch at index 1: expected tag "
+ "<child2>; actual tag <child4>",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child4>child 4</child4>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML expected child element <child3> not "
+ "present at index 2",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ </children>
+</root>""", matches_matcher),
+ ("/root/children[3]: XML unexpected child element <child4> "
+ "present at index 3",
+ """<?xml version="1.0"?>
+<root>
+ <text>some text here</text>
+ <text>some other text here</text>
+ <attrs key1="spam" key2="quux"/>
+ <children>
+ <child1>child 1</child1>
+ <child2>child 2</child2>
+ <child3>child 3</child3>
+ <child4>child 4</child4>
+ </children>
+</root>""", matches_matcher),
+ ]
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
new file mode 100644
index 0000000000..90f57f1af2
--- /dev/null
+++ b/nova/tests/unit/test_metadata.py
@@ -0,0 +1,865 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for metadata service."""
+
+import base64
+import hashlib
+import hmac
+import re
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+import webob
+
+from nova.api.metadata import base
+from nova.api.metadata import handler
+from nova.api.metadata import password
+from nova import block_device
+from nova.compute import flavors
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api
+from nova import exception
+from nova.network import api as network_api
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit.objects import test_instance_info_cache
+from nova.tests.unit.objects import test_security_group
+from nova.virt import netutils
+
+CONF = cfg.CONF
+
+USER_DATA_STRING = ("This is an encoded string")
+ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING)
+
+INSTANCE = fake_instance.fake_db_instance(**
+ {'id': 1,
+ 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
+ 'name': 'fake',
+ 'project_id': 'test',
+ 'key_name': "mykey",
+ 'key_data': "ssh-rsa AAAAB3Nzai....N3NtHw== someuser@somehost",
+ 'host': 'test',
+ 'launch_index': 1,
+ 'instance_type': {'name': 'm1.tiny'},
+ 'reservation_id': 'r-xxxxxxxx',
+ 'user_data': ENCODE_USER_DATA_STRING,
+ 'image_ref': 7,
+ 'vcpus': 1,
+ 'fixed_ips': [],
+ 'root_device_name': '/dev/sda1',
+ 'info_cache': test_instance_info_cache.fake_info_cache,
+ 'hostname': 'test.novadomain',
+ 'display_name': 'my_displayname',
+ 'metadata': {},
+ 'system_metadata': {},
+ })
+
+
+def fake_inst_obj(context):
+ return objects.Instance._from_db_object(
+ context, objects.Instance(), INSTANCE,
+ expected_attrs=['metadata', 'system_metadata',
+ 'info_cache'])
+
+
+def get_default_sys_meta():
+ return flavors.save_flavor_info(
+ {}, flavors.get_default_flavor())
+
+
+def return_non_existing_address(*args, **kwarg):
+ raise exception.NotFound()
+
+
+def fake_InstanceMetadata(stubs, inst_data, address=None,
+ sgroups=None, content=None, extra_md=None,
+ vd_driver=None, network_info=None):
+ content = content or []
+ extra_md = extra_md or {}
+ if sgroups is None:
+ sgroups = [dict(test_security_group.fake_secgroup,
+ name='default')]
+
+ def sg_get(*args, **kwargs):
+ return sgroups
+
+ stubs.Set(api, 'security_group_get_by_instance', sg_get)
+ return base.InstanceMetadata(inst_data, address=address,
+ content=content, extra_md=extra_md,
+ vd_driver=vd_driver, network_info=network_info)
+
+
+def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
+ fake_get_metadata=None, headers=None,
+ fake_get_metadata_by_instance_id=None):
+
+ def get_metadata_by_remote_address(address):
+ return mdinst
+
+ app = handler.MetadataRequestHandler()
+
+ if fake_get_metadata is None:
+ fake_get_metadata = get_metadata_by_remote_address
+
+ if stubs:
+ stubs.Set(app, 'get_metadata_by_remote_address', fake_get_metadata)
+
+ if fake_get_metadata_by_instance_id:
+ stubs.Set(app, 'get_metadata_by_instance_id',
+ fake_get_metadata_by_instance_id)
+
+ request = webob.Request.blank(relpath)
+ request.remote_addr = address
+
+ if headers is not None:
+ request.headers.update(headers)
+
+ response = request.get_response(app)
+ return response
+
+
+class MetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def test_can_pickle_metadata(self):
+ # Make sure that InstanceMetadata is possible to pickle. This is
+ # required for memcache backend to work correctly.
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ pickle.dumps(md, protocol=0)
+
+ def test_user_data(self):
+ inst = self.instance.obj_clone()
+ inst['user_data'] = base64.b64encode("happy")
+ md = fake_InstanceMetadata(self.stubs, inst)
+ self.assertEqual(
+ md.get_ec2_metadata(version='2009-04-04')['user-data'], "happy")
+
+ def test_no_user_data(self):
+ inst = self.instance.obj_clone()
+ inst.user_data = None
+ md = fake_InstanceMetadata(self.stubs, inst)
+ obj = object()
+ self.assertEqual(
+ md.get_ec2_metadata(version='2009-04-04').get('user-data', obj),
+ obj)
+
+ def test_security_groups(self):
+ inst = self.instance.obj_clone()
+ sgroups = [dict(test_security_group.fake_secgroup, name='default'),
+ dict(test_security_group.fake_secgroup, name='other')]
+ expected = ['default', 'other']
+
+ md = fake_InstanceMetadata(self.stubs, inst, sgroups=sgroups)
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['security-groups'], expected)
+
+ def test_local_hostname_fqdn(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-hostname'],
+ "%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
+
+ def test_format_instance_mapping(self):
+ # Make sure that _format_instance_mappings works.
+ ctxt = None
+ instance_ref0 = objects.Instance(**{'id': 0,
+ 'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
+ 'root_device_name': None,
+ 'default_ephemeral_device': None,
+ 'default_swap_device': None})
+ instance_ref1 = objects.Instance(**{'id': 0,
+ 'uuid': 'b65cee2f-8c69-4aeb-be2f-f79742548fc2',
+ 'root_device_name': '/dev/sda1',
+ 'default_ephemeral_device': None,
+ 'default_swap_device': None})
+
+ def fake_bdm_get(ctxt, uuid, use_slave=False):
+ return [fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': 87654321,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': True,
+ 'device_name': '/dev/sdh'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap',
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'volume_id': None,
+ 'snapshot_id': None,
+ 'no_device': None,
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': None,
+ 'delete_on_termination': None,
+ 'device_name': '/dev/sdb'})]
+
+ self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
+ fake_bdm_get)
+
+ expected = {'ami': 'sda1',
+ 'root': '/dev/sda1',
+ 'ephemeral0': '/dev/sdb',
+ 'swap': '/dev/sdc',
+ 'ebs0': '/dev/sdh'}
+
+ conductor_api.LocalAPI()
+
+ self.assertEqual(base._format_instance_mapping(ctxt,
+ instance_ref0), block_device._DEFAULT_MAPPINGS)
+ self.assertEqual(base._format_instance_mapping(ctxt,
+ instance_ref1), expected)
+
+ def test_pubkey(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance.obj_clone())
+ pubkey_ent = md.lookup("/2009-04-04/meta-data/public-keys")
+
+ self.assertEqual(base.ec2_md_print(pubkey_ent),
+ "0=%s" % self.instance['key_name'])
+ self.assertEqual(base.ec2_md_print(pubkey_ent['0']['openssh-key']),
+ self.instance['key_data'])
+
+ def test_image_type_ramdisk(self):
+ inst = self.instance.obj_clone()
+ inst['ramdisk_id'] = 'ari-853667c0'
+ md = fake_InstanceMetadata(self.stubs, inst)
+ data = md.lookup("/latest/meta-data/ramdisk-id")
+
+ self.assertIsNotNone(data)
+ self.assertTrue(re.match('ari-[0-9a-f]{8}', data))
+
+ def test_image_type_kernel(self):
+ inst = self.instance.obj_clone()
+ inst['kernel_id'] = 'aki-c2e26ff2'
+ md = fake_InstanceMetadata(self.stubs, inst)
+ data = md.lookup("/2009-04-04/meta-data/kernel-id")
+
+ self.assertTrue(re.match('aki-[0-9a-f]{8}', data))
+
+ self.assertEqual(
+ md.lookup("/ec2/2009-04-04/meta-data/kernel-id"), data)
+
+ inst.kernel_id = None
+ md = fake_InstanceMetadata(self.stubs, inst)
+ self.assertRaises(base.InvalidMetadataPath,
+ md.lookup, "/2009-04-04/meta-data/kernel-id")
+
+ def test_check_version(self):
+ inst = self.instance.obj_clone()
+ md = fake_InstanceMetadata(self.stubs, inst)
+
+ self.assertTrue(md._check_version('1.0', '2009-04-04'))
+ self.assertFalse(md._check_version('2009-04-04', '1.0'))
+
+ self.assertFalse(md._check_version('2009-04-04', '2008-09-01'))
+ self.assertTrue(md._check_version('2008-09-01', '2009-04-04'))
+
+ self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
+
+ def test_InstanceMetadata_uses_passed_network_info(self):
+ network_info = []
+
+ self.mox.StubOutWithMock(netutils, "get_injected_network_template")
+ netutils.get_injected_network_template(network_info).AndReturn(False)
+ self.mox.ReplayAll()
+
+ base.InstanceMetadata(fake_inst_obj(self.context),
+ network_info=network_info)
+
+ def test_InstanceMetadata_invoke_metadata_for_config_drive(self):
+ inst = self.instance.obj_clone()
+ inst_md = base.InstanceMetadata(inst)
+ for (path, value) in inst_md.metadata_for_config_drive():
+ self.assertIsNotNone(path)
+
+ def test_InstanceMetadata_queries_network_API_when_needed(self):
+ network_info_from_api = []
+
+ self.mox.StubOutWithMock(netutils, "get_injected_network_template")
+
+ netutils.get_injected_network_template(
+ network_info_from_api).AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ base.InstanceMetadata(fake_inst_obj(self.context))
+
+ def test_local_ipv4_from_nw_info(self):
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ num_networks=2)
+ expected_local = "192.168.1.100"
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=nw_info)
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
+
+ def test_local_ipv4_from_address(self):
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs,
+ num_networks=2)
+ expected_local = "fake"
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=nw_info, address="fake")
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], expected_local)
+
+ def test_local_ipv4_from_nw_none(self):
+ md = fake_InstanceMetadata(self.stubs, self.instance,
+ network_info=[])
+ data = md.get_ec2_metadata(version='2009-04-04')
+ self.assertEqual(data['meta-data']['local-ipv4'], '')
+
+
+class OpenStackMetadataTestCase(test.TestCase):
+ def setUp(self):
+ super(OpenStackMetadataTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance['system_metadata'] = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def test_with_primitive_instance(self):
+ mdinst = fake_InstanceMetadata(self.stubs, INSTANCE)
+ result = mdinst.lookup('/openstack')
+ self.assertIn('latest', result)
+
+ def test_top_level_listing(self):
+ # request for /openstack/<version>/ should show metadata.json
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack")
+
+ # trailing / should not affect anything
+ self.assertEqual(result, mdinst.lookup("/openstack/"))
+
+ # the 'content' should not show up in directory listing
+ self.assertNotIn(base.CONTENT_DIR, result)
+ self.assertIn('2012-08-10', result)
+ self.assertIn('latest', result)
+
+ def test_version_content_listing(self):
+ # request for /openstack/<version>/ should show metadata.json
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ listing = mdinst.lookup("/openstack/2012-08-10")
+ self.assertIn("meta_data.json", listing)
+
+ def test_returns_apis_supported_in_havana_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ havana_supported_apis = mdinst.lookup("/openstack/2013-10-17")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME,
+ base.VD_JSON_NAME], havana_supported_apis)
+
+ def test_returns_apis_supported_in_folsom_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ folsom_supported_apis = mdinst.lookup("/openstack/2012-08-10")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME],
+ folsom_supported_apis)
+
+ def test_returns_apis_supported_in_grizzly_version(self):
+ mdinst = fake_InstanceMetadata(self.stubs, self.instance)
+ grizzly_supported_apis = mdinst.lookup("/openstack/2013-04-04")
+
+ self.assertEqual([base.MD_JSON_NAME, base.UD_NAME, base.PASS_NAME],
+ grizzly_supported_apis)
+
+ def test_metadata_json(self):
+ inst = self.instance.obj_clone()
+ content = [
+ ('/etc/my.conf', "content of my.conf"),
+ ('/root/hello', "content of /root/hello"),
+ ]
+
+ mdinst = fake_InstanceMetadata(self.stubs, inst,
+ content=content)
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ mdjson = mdinst.lookup("/openstack/latest/meta_data.json")
+
+ mddict = jsonutils.loads(mdjson)
+
+ self.assertEqual(mddict['uuid'], self.instance['uuid'])
+ self.assertIn('files', mddict)
+
+ self.assertIn('public_keys', mddict)
+ self.assertEqual(mddict['public_keys'][self.instance['key_name']],
+ self.instance['key_data'])
+
+ self.assertIn('launch_index', mddict)
+ self.assertEqual(mddict['launch_index'], self.instance['launch_index'])
+
+ # verify that each of the things we put in content
+ # resulted in an entry in 'files', that their content
+ # there is as expected, and that /content lists them.
+ for (path, content) in content:
+ fent = [f for f in mddict['files'] if f['path'] == path]
+ self.assertEqual(1, len(fent))
+ fent = fent[0]
+ found = mdinst.lookup("/openstack%s" % fent['content_path'])
+ self.assertEqual(found, content)
+
+ def test_extra_md(self):
+ # make sure extra_md makes it through to metadata
+ inst = self.instance.obj_clone()
+ extra = {'foo': 'bar', 'mylist': [1, 2, 3],
+ 'mydict': {"one": 1, "two": 2}}
+ mdinst = fake_InstanceMetadata(self.stubs, inst, extra_md=extra)
+
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ mddict = jsonutils.loads(mdjson)
+
+ for key, val in extra.iteritems():
+ self.assertEqual(mddict[key], val)
+
+ def test_password(self):
+ # make sure extra_md makes it through to metadata
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ result = mdinst.lookup("/openstack/latest/password")
+ self.assertEqual(result, password.handle_password)
+
+ def test_userdata(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ userdata_found = mdinst.lookup("/openstack/2012-08-10/user_data")
+ self.assertEqual(USER_DATA_STRING, userdata_found)
+
+ # since we had user-data in this instance, it should be in listing
+ self.assertIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
+
+ inst.user_data = None
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # since this instance had no user-data it should not be there.
+ self.assertNotIn('user_data', mdinst.lookup("/openstack/2012-08-10"))
+
+ self.assertRaises(base.InvalidMetadataPath,
+ mdinst.lookup, "/openstack/2012-08-10/user_data")
+
+ def test_random_seed(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # verify that 2013-04-04 has the 'random' field
+ mdjson = mdinst.lookup("/openstack/2013-04-04/meta_data.json")
+ mddict = jsonutils.loads(mdjson)
+
+ self.assertIn("random_seed", mddict)
+ self.assertEqual(len(base64.b64decode(mddict["random_seed"])), 512)
+
+ # verify that older version do not have it
+ mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
+ self.assertNotIn("random_seed", jsonutils.loads(mdjson))
+
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = jsonutils.loads(
+ mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
+ def test_vendor_data_presence(self):
+ inst = self.instance.obj_clone()
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+
+ # verify that 2013-10-17 has the vendor_data.json file
+ result = mdinst.lookup("/openstack/2013-10-17")
+ self.assertIn('vendor_data.json', result)
+
+ # verify that older version do not have it
+ result = mdinst.lookup("/openstack/2013-04-04")
+ self.assertNotIn('vendor_data.json', result)
+
+ def test_vendor_data_response(self):
+ inst = self.instance.obj_clone()
+
+ mydata = {'mykey1': 'value1', 'mykey2': 'value2'}
+
+ class myVdriver(base.VendorDataDriver):
+ def __init__(self, *args, **kwargs):
+ super(myVdriver, self).__init__(*args, **kwargs)
+ data = mydata.copy()
+ uuid = kwargs['instance']['uuid']
+ data.update({'inst_uuid': uuid})
+ self.data = data
+
+ def get(self):
+ return self.data
+
+ mdinst = fake_InstanceMetadata(self.stubs, inst, vd_driver=myVdriver)
+
+ # verify that 2013-10-17 has the vendor_data.json file
+ vdpath = "/openstack/2013-10-17/vendor_data.json"
+ vd = jsonutils.loads(mdinst.lookup(vdpath))
+
+ # the instance should be passed through, and our class copies the
+ # uuid through to 'inst_uuid'.
+ self.assertEqual(vd['inst_uuid'], inst['uuid'])
+
+ # check the other expected values
+ for k, v in mydata.items():
+ self.assertEqual(vd[k], v)
+
+
+class MetadataHandlerTestCase(test.TestCase):
+ """Test that metadata is returning proper values."""
+
+ def setUp(self):
+ super(MetadataHandlerTestCase, self).setUp()
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+
+ def test_callable(self):
+
+ def verify(req, meta_data):
+ self.assertIsInstance(meta_data, CallableMD)
+ return "foo"
+
+ class CallableMD(object):
+ def lookup(self, path_info):
+ return verify
+
+ response = fake_request(self.stubs, CallableMD(), "/bar")
+ self.assertEqual(response.status_int, 200)
+ self.assertEqual(response.body, "foo")
+
+ def test_root(self):
+ expected = "\n".join(base.VERSIONS) + "\nlatest"
+ response = fake_request(self.stubs, self.mdinst, "/")
+ self.assertEqual(response.body, expected)
+
+ response = fake_request(self.stubs, self.mdinst, "/foo/../")
+ self.assertEqual(response.body, expected)
+
+ def test_root_metadata_proxy_enabled(self):
+ self.flags(service_metadata_proxy=True,
+ group='neutron')
+
+ expected = "\n".join(base.VERSIONS) + "\nlatest"
+ response = fake_request(self.stubs, self.mdinst, "/")
+ self.assertEqual(response.body, expected)
+
+ response = fake_request(self.stubs, self.mdinst, "/foo/../")
+ self.assertEqual(response.body, expected)
+
+ def test_version_root(self):
+ response = fake_request(self.stubs, self.mdinst, "/2009-04-04")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body, 'meta-data/\nuser-data')
+
+ response = fake_request(self.stubs, self.mdinst, "/9999-99-99")
+ self.assertEqual(response.status_int, 404)
+
+ def test_json_data(self):
+ response = fake_request(self.stubs, self.mdinst,
+ "/openstack/latest/meta_data.json")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("application/json"))
+
+ response = fake_request(self.stubs, self.mdinst,
+ "/openstack/latest/vendor_data.json")
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("application/json"))
+
+ def test_user_data_non_existing_fixed_address(self):
+ self.stubs.Set(network_api.API, 'get_fixed_ip_by_address',
+ return_non_existing_address)
+ response = fake_request(None, self.mdinst, "/2009-04-04/user-data",
+ "127.1.1.1")
+ self.assertEqual(response.status_int, 404)
+
+ def test_fixed_address_none(self):
+ response = fake_request(None, self.mdinst,
+ relpath="/2009-04-04/user-data", address=None)
+ self.assertEqual(response.status_int, 500)
+
+ def test_invalid_path_is_404(self):
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data-invalid")
+ self.assertEqual(response.status_int, 404)
+
+ def test_user_data_with_use_forwarded_header(self):
+ expected_addr = "192.192.192.2"
+
+ def fake_get_metadata(address):
+ if address == expected_addr:
+ return self.mdinst
+ else:
+ raise Exception("Expected addr of %s, got %s" %
+ (expected_addr, address))
+
+ self.flags(use_forwarded_for=True)
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="168.168.168.1",
+ fake_get_metadata=fake_get_metadata,
+ headers={'X-Forwarded-For': expected_addr})
+
+ self.assertEqual(response.status_int, 200)
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body,
+ base64.b64decode(self.instance['user_data']))
+
+ response = fake_request(self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="168.168.168.1",
+ fake_get_metadata=fake_get_metadata,
+ headers=None)
+ self.assertEqual(response.status_int, 500)
+
+ @mock.patch('nova.utils.constant_time_compare')
+ def test_by_instance_id_uses_constant_time_compare(self, mock_compare):
+ mock_compare.side_effect = test.TestingException
+
+ req = webob.Request.blank('/')
+ hnd = handler.MetadataRequestHandler()
+
+ req.headers['X-Instance-ID'] = 'fake-inst'
+ req.headers['X-Tenant-ID'] = 'fake-proj'
+
+ self.assertRaises(test.TestingException,
+ hnd._handle_instance_id_request, req)
+
+ self.assertEqual(1, mock_compare.call_count)
+
+ def test_user_data_with_neutron_instance_id(self):
+ expected_instance_id = 'a-b-c-d'
+
+ def fake_get_metadata(instance_id, remote_address):
+ if remote_address is None:
+ raise Exception('Expected X-Forwared-For header')
+ elif instance_id == expected_instance_id:
+ return self.mdinst
+ else:
+ # raise the exception to aid with 500 response code test
+ raise Exception("Expected instance_id of %s, got %s" %
+ (expected_instance_id, instance_id))
+
+ signed = hmac.new(
+ CONF.neutron.metadata_proxy_shared_secret,
+ expected_instance_id,
+ hashlib.sha256).hexdigest()
+
+ # try a request with service disabled
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+ self.assertEqual(response.status_int, 200)
+
+ # now enable the service
+ self.flags(service_metadata_proxy=True,
+ group='neutron')
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 200)
+ response_ctype = response.headers['Content-Type']
+ self.assertTrue(response_ctype.startswith("text/plain"))
+ self.assertEqual(response.body,
+ base64.b64decode(self.instance['user_data']))
+
+ # mismatched signature
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': ''})
+
+ self.assertEqual(response.status_int, 403)
+
+ # missing X-Tenant-ID from request
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 400)
+
+ # mismatched X-Tenant-ID
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'FAKE',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 404)
+
+ # without X-Forwarded-For
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Instance-ID': 'a-b-c-d',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+
+ self.assertEqual(response.status_int, 500)
+
+ # unexpected Instance-ID
+ signed = hmac.new(
+ CONF.neutron.metadata_proxy_shared_secret,
+ 'z-z-z-z',
+ hashlib.sha256).hexdigest()
+
+ response = fake_request(
+ self.stubs, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=fake_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Instance-ID': 'z-z-z-z',
+ 'X-Tenant-ID': 'test',
+ 'X-Instance-ID-Signature': signed})
+ self.assertEqual(response.status_int, 500)
+
+ def test_get_metadata(self):
+ def _test_metadata_path(relpath):
+ # recursively confirm a http 200 from all meta-data elements
+ # available at relpath.
+ response = fake_request(self.stubs, self.mdinst,
+ relpath=relpath)
+ for item in response.body.split('\n'):
+ if 'public-keys' in relpath:
+ # meta-data/public-keys/0=keyname refers to
+ # meta-data/public-keys/0
+ item = item.split('=')[0]
+ if item.endswith('/'):
+ path = relpath + '/' + item
+ _test_metadata_path(path)
+ continue
+
+ path = relpath + '/' + item
+ response = fake_request(self.stubs, self.mdinst, relpath=path)
+ self.assertEqual(response.status_int, 200, message=path)
+
+ _test_metadata_path('/2009-04-04/meta-data')
+
+
+class MetadataPasswordTestCase(test.TestCase):
+ def setUp(self):
+ super(MetadataPasswordTestCase, self).setUp()
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+ self.context = context.RequestContext('fake', 'fake')
+ self.instance = fake_inst_obj(self.context)
+ self.instance.system_metadata = get_default_sys_meta()
+ self.flags(use_local=True, group='conductor')
+ self.mdinst = fake_InstanceMetadata(self.stubs, self.instance,
+ address=None, sgroups=None)
+ self.flags(use_local=True, group='conductor')
+
+ def test_get_password(self):
+ request = webob.Request.blank('')
+ self.mdinst.password = 'foo'
+ result = password.handle_password(request, self.mdinst)
+ self.assertEqual(result, 'foo')
+
+ def test_bad_method(self):
+ request = webob.Request.blank('')
+ request.method = 'PUT'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ password.handle_password, request, self.mdinst)
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def _try_set_password(self, get_by_uuid, val='bar'):
+ request = webob.Request.blank('')
+ request.method = 'POST'
+ request.body = val
+ get_by_uuid.return_value = self.instance
+
+ with mock.patch.object(self.instance, 'save') as save:
+ password.handle_password(request, self.mdinst)
+ save.assert_called_once_with()
+
+ self.assertIn('password_0', self.instance.system_metadata)
+
+ def test_set_password(self):
+ self.mdinst.password = ''
+ self._try_set_password()
+
+ def test_conflict(self):
+ self.mdinst.password = 'foo'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self._try_set_password)
+
+ def test_too_large(self):
+ self.mdinst.password = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._try_set_password,
+ val=('a' * (password.MAX_SIZE + 1)))
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
new file mode 100644
index 0000000000..bce03da1c3
--- /dev/null
+++ b/nova/tests/unit/test_notifications.py
@@ -0,0 +1,394 @@
+# Copyright (c) 2012 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for common notifications."""
+
+import copy
+
+import mock
+from oslo.config import cfg
+
+from nova.compute import flavors
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova.network import api as network_api
+from nova import notifications
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_notifier
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class NotificationsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NotificationsTestCase, self).setUp()
+
+ self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
+ 1)
+
+ def fake_get_nw_info(cls, ctxt, instance):
+ self.assertTrue(ctxt.is_admin)
+ return self.net_info
+
+ self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+ fake_network.set_stub_network_methods(self.stubs)
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ network_manager='nova.network.manager.FlatManager',
+ notify_on_state_change="vm_and_task_state",
+ host='testhost')
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ self.instance = self._wrapped_create()
+
+ def _wrapped_create(self, params=None):
+ instance_type = flavors.get_flavor_by_name('m1.tiny')
+ sys_meta = flavors.save_flavor_info({}, instance_type)
+ inst = {}
+ inst['image_ref'] = 1
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = instance_type['id']
+ inst['root_gb'] = 0
+ inst['ephemeral_gb'] = 0
+ inst['access_ip_v4'] = '1.2.3.4'
+ inst['access_ip_v6'] = 'feed:5eed'
+ inst['display_name'] = 'test_instance'
+ inst['hostname'] = 'test_instance_hostname'
+ inst['node'] = 'test_instance_node'
+ inst['system_metadata'] = sys_meta
+ if params:
+ inst.update(params)
+ return db.instance_create(self.context, inst)
+
+ def test_send_api_fault_disabled(self):
+ self.flags(notify_api_faults=False)
+ notifications.send_api_fault("http://example.com/foo", 500, None)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_api_fault(self):
+ self.flags(notify_api_faults=True)
+ exception = None
+ try:
+ # Get a real exception with a call stack.
+ raise test.TestingException("junk")
+ except test.TestingException as e:
+ exception = e
+
+ notifications.send_api_fault("http://example.com/foo", 500, exception)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ n = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual(n.priority, 'ERROR')
+ self.assertEqual(n.event_type, 'api.fault')
+ self.assertEqual(n.payload['url'], 'http://example.com/foo')
+ self.assertEqual(n.payload['status'], 500)
+ self.assertIsNotNone(n.payload['exception'])
+
+ def test_notif_disabled(self):
+
+ # test config disable of the notifications
+ self.flags(notify_on_state_change=None)
+
+ old = copy.copy(self.instance)
+ self.instance["vm_state"] = vm_states.ACTIVE
+
+ old_vm_state = old['vm_state']
+ new_vm_state = self.instance["vm_state"]
+ old_task_state = old['task_state']
+ new_task_state = self.instance["task_state"]
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ verify_states=True)
+
+ notifications.send_update(self.context, old, self.instance)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_task_notif(self):
+
+ # test config disable of just the task state notifications
+ self.flags(notify_on_state_change="vm_state")
+
+ # we should not get a notification on task stgate chagne now
+ old = copy.copy(self.instance)
+ self.instance["task_state"] = task_states.SPAWNING
+
+ old_vm_state = old['vm_state']
+ new_vm_state = self.instance["vm_state"]
+ old_task_state = old['task_state']
+ new_task_state = self.instance["task_state"]
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ verify_states=True)
+
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ # ok now enable task state notifications and re-try
+ self.flags(notify_on_state_change="vm_and_task_state")
+
+ notifications.send_update(self.context, old, self.instance)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_no_notif(self):
+
+ # test notification on send no initial vm state:
+ old_vm_state = self.instance['vm_state']
+ new_vm_state = self.instance['vm_state']
+ old_task_state = self.instance['task_state']
+ new_task_state = self.instance['task_state']
+
+ notifications.send_update_with_states(self.context, self.instance,
+ old_vm_state, new_vm_state, old_task_state, new_task_state,
+ service="compute", host=None, verify_states=True)
+
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_on_vm_change(self):
+
+ # pretend we just transitioned to ACTIVE:
+ params = {"vm_state": vm_states.ACTIVE}
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
+ self.instance['uuid'], params)
+ notifications.send_update(self.context, old_ref, new_ref)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_send_on_task_change(self):
+
+ # pretend we just transitioned to task SPAWNING:
+ params = {"task_state": task_states.SPAWNING}
+ (old_ref, new_ref) = db.instance_update_and_get_original(self.context,
+ self.instance['uuid'], params)
+ notifications.send_update(self.context, old_ref, new_ref)
+
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ def test_no_update_with_states(self):
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ task_states.SPAWNING, verify_states=True)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+ def test_vm_update_with_states(self):
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.ACTIVE, task_states.SPAWNING,
+ task_states.SPAWNING, verify_states=True)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+ display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
+ node = self.instance["node"]
+
+ self.assertEqual(vm_states.BUILDING, payload["old_state"])
+ self.assertEqual(vm_states.ACTIVE, payload["state"])
+ self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
+ self.assertEqual(task_states.SPAWNING, payload["new_task_state"])
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+ self.assertEqual(payload["display_name"], display_name)
+ self.assertEqual(payload["hostname"], hostname)
+ self.assertEqual(payload["node"], node)
+
+ def test_task_update_with_states(self):
+ self.flags(notify_on_state_change="vm_and_task_state")
+
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, verify_states=True)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+ display_name = self.instance["display_name"]
+ hostname = self.instance["hostname"]
+
+ self.assertEqual(vm_states.BUILDING, payload["old_state"])
+ self.assertEqual(vm_states.BUILDING, payload["state"])
+ self.assertEqual(task_states.SPAWNING, payload["old_task_state"])
+ self.assertIsNone(payload["new_task_state"])
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+ self.assertEqual(payload["display_name"], display_name)
+ self.assertEqual(payload["hostname"], hostname)
+
+ def test_update_no_service_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.testhost', notif.publisher_id)
+
+ def test_update_with_service_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, service="testservice")
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('testservice.testhost', notif.publisher_id)
+
+ def test_update_with_host_name(self):
+ notifications.send_update_with_states(self.context, self.instance,
+ vm_states.BUILDING, vm_states.BUILDING, task_states.SPAWNING,
+ None, host="someotherhost")
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+
+ # service name should default to 'compute'
+ notif = fake_notifier.NOTIFICATIONS[0]
+ self.assertEqual('compute.someotherhost', notif.publisher_id)
+
+ def test_payload_has_fixed_ip_labels(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("fixed_ips", info)
+ self.assertEqual(info["fixed_ips"][0]["label"], "test1")
+
+ def test_payload_has_vif_mac_address(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("fixed_ips", info)
+ self.assertEqual(self.net_info[0]['address'],
+ info["fixed_ips"][0]["vif_mac"])
+
+ def test_payload_has_cell_name_empty(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("cell_name", info)
+ self.assertIsNone(self.instance['cell_name'])
+ self.assertEqual("", info["cell_name"])
+
+ def test_payload_has_cell_name(self):
+ self.instance['cell_name'] = "cell1"
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("cell_name", info)
+ self.assertEqual("cell1", info["cell_name"])
+
+ def test_payload_has_progress_empty(self):
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("progress", info)
+ self.assertIsNone(self.instance['progress'])
+ self.assertEqual("", info["progress"])
+
+ def test_payload_has_progress(self):
+ self.instance['progress'] = 50
+ info = notifications.info_from_instance(self.context, self.instance,
+ self.net_info, None)
+ self.assertIn("progress", info)
+ self.assertEqual(50, info["progress"])
+
+ def test_send_access_ip_update(self):
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ access_ip_v4 = self.instance["access_ip_v4"]
+ access_ip_v6 = self.instance["access_ip_v6"]
+
+ self.assertEqual(payload["access_ip_v4"], access_ip_v4)
+ self.assertEqual(payload["access_ip_v6"], access_ip_v6)
+
+ def test_send_name_update(self):
+ param = {"display_name": "new_display_name"}
+ new_name_inst = self._wrapped_create(params=param)
+ notifications.send_update(self.context, self.instance, new_name_inst)
+ self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
+ notif = fake_notifier.NOTIFICATIONS[0]
+ payload = notif.payload
+ old_display_name = self.instance["display_name"]
+ new_display_name = new_name_inst["display_name"]
+
+ self.assertEqual(payload["old_display_name"], old_display_name)
+ self.assertEqual(payload["display_name"], new_display_name)
+
+ def test_send_no_state_change(self):
+ called = [False]
+
+ def sending_no_state_change(context, instance, **kwargs):
+ called[0] = True
+ self.stubs.Set(notifications, '_send_instance_update_notification',
+ sending_no_state_change)
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertTrue(called[0])
+
+ def test_fail_sending_update(self):
+ def fail_sending(context, instance, **kwargs):
+ raise Exception('failed to notify')
+ self.stubs.Set(notifications, '_send_instance_update_notification',
+ fail_sending)
+
+ notifications.send_update(self.context, self.instance, self.instance)
+ self.assertEqual(0, len(fake_notifier.NOTIFICATIONS))
+
+
+class NotificationsFormatTestCase(test.NoDBTestCase):
+
+ def test_state_computation(self):
+ instance = {'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state}
+ states = notifications._compute_states_payload(instance)
+ self.assertEqual(mock.sentinel.vm_state, states['state'])
+ self.assertEqual(mock.sentinel.vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
+ self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
+
+ states = notifications._compute_states_payload(
+ instance,
+ old_vm_state=mock.sentinel.old_vm_state,
+ )
+ self.assertEqual(mock.sentinel.vm_state, states['state'])
+ self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.task_state, states['old_task_state'])
+ self.assertEqual(mock.sentinel.task_state, states['new_task_state'])
+
+ states = notifications._compute_states_payload(
+ instance,
+ old_vm_state=mock.sentinel.old_vm_state,
+ old_task_state=mock.sentinel.old_task_state,
+ new_vm_state=mock.sentinel.new_vm_state,
+ new_task_state=mock.sentinel.new_task_state,
+ )
+
+ self.assertEqual(mock.sentinel.new_vm_state, states['state'])
+ self.assertEqual(mock.sentinel.old_vm_state, states['old_state'])
+ self.assertEqual(mock.sentinel.old_task_state,
+ states['old_task_state'])
+ self.assertEqual(mock.sentinel.new_task_state,
+ states['new_task_state'])
diff --git a/nova/tests/unit/test_nova_manage.py b/nova/tests/unit/test_nova_manage.py
new file mode 100644
index 0000000000..9ffaf66e81
--- /dev/null
+++ b/nova/tests/unit/test_nova_manage.py
@@ -0,0 +1,467 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2011 Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import StringIO
+import sys
+
+import fixtures
+import mock
+
+from nova.cmd import manage
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit.objects import test_network
+
+
+class FixedIpCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(FixedIpCommandsTestCase, self).setUp()
+ db_fakes.stub_out_db_network_api(self.stubs)
+ self.commands = manage.FixedIpCommands()
+
+ def test_reserve(self):
+ self.commands.reserve('192.168.0.100')
+ address = db.fixed_ip_get_by_address(context.get_admin_context(),
+ '192.168.0.100')
+ self.assertEqual(address['reserved'], True)
+
+ def test_reserve_nonexistent_address(self):
+ self.assertEqual(2, self.commands.reserve('55.55.55.55'))
+
+ def test_unreserve(self):
+ self.commands.unreserve('192.168.0.100')
+ address = db.fixed_ip_get_by_address(context.get_admin_context(),
+ '192.168.0.100')
+ self.assertEqual(address['reserved'], False)
+
+ def test_unreserve_nonexistent_address(self):
+ self.assertEqual(2, self.commands.unreserve('55.55.55.55'))
+
+ def test_list(self):
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout',
+ StringIO.StringIO()))
+ self.commands.list()
+ self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
+
+ def test_list_just_one_host(self):
+ def fake_fixed_ip_get_by_host(*args, **kwargs):
+ return [db_fakes.fixed_ip_fields]
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.db.fixed_ip_get_by_host',
+ fake_fixed_ip_get_by_host))
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout',
+ StringIO.StringIO()))
+ self.commands.list('banana')
+ self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
+
+
+class FloatingIpCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(FloatingIpCommandsTestCase, self).setUp()
+ db_fakes.stub_out_db_network_api(self.stubs)
+ self.commands = manage.FloatingIpCommands()
+
+ def test_address_to_hosts(self):
+ def assert_loop(result, expected):
+ for ip in result:
+ self.assertIn(str(ip), expected)
+
+ address_to_hosts = self.commands.address_to_hosts
+ # /32 and /31
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/32')
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/31')
+ # /30
+ expected = ["192.168.100.%s" % i for i in range(1, 3)]
+ result = address_to_hosts('192.168.100.0/30')
+ self.assertEqual(2, len(list(result)))
+ assert_loop(result, expected)
+ # /29
+ expected = ["192.168.100.%s" % i for i in range(1, 7)]
+ result = address_to_hosts('192.168.100.0/29')
+ self.assertEqual(6, len(list(result)))
+ assert_loop(result, expected)
+ # /28
+ expected = ["192.168.100.%s" % i for i in range(1, 15)]
+ result = address_to_hosts('192.168.100.0/28')
+ self.assertEqual(14, len(list(result)))
+ assert_loop(result, expected)
+ # /16
+ result = address_to_hosts('192.168.100.0/16')
+ self.assertEqual(65534, len(list(result)))
+ # NOTE(dripton): I don't test /13 because it makes the test take 3s.
+ # /12 gives over a million IPs, which is ridiculous.
+ self.assertRaises(exception.InvalidInput, address_to_hosts,
+ '192.168.100.1/12')
+
+
+class NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NetworkCommandsTestCase, self).setUp()
+ self.commands = manage.NetworkCommands()
+ self.net = {'id': 0,
+ 'label': 'fake',
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': 'dead:beef::/64',
+ 'multi_host': False,
+ 'gateway_v6': 'dead:beef::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns1': '8.8.8.8',
+ 'dns2': '8.8.4.4',
+ 'vlan': 200,
+ 'vlan_start': 201,
+ 'vpn_public_address': '10.0.0.2',
+ 'vpn_public_port': '2222',
+ 'vpn_private_address': '192.168.0.2',
+ 'dhcp_start': '192.168.0.3',
+ 'project_id': 'fake_project',
+ 'host': 'fake_host',
+ 'uuid': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'}
+
+ def fake_network_get_by_cidr(context, cidr):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(cidr, self.fake_net['cidr'])
+ return db_fakes.FakeModel(dict(test_network.fake_network,
+ **self.fake_net))
+
+ def fake_network_get_by_uuid(context, uuid):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(uuid, self.fake_net['uuid'])
+ return db_fakes.FakeModel(dict(test_network.fake_network,
+ **self.fake_net))
+
+ def fake_network_update(context, network_id, values):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.assertEqual(values, self.fake_update_value)
+ self.fake_network_get_by_cidr = fake_network_get_by_cidr
+ self.fake_network_get_by_uuid = fake_network_get_by_uuid
+ self.fake_network_update = fake_network_update
+
+ def test_create(self):
+
+ def fake_create_networks(obj, context, **kwargs):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(kwargs['label'], 'Test')
+ self.assertEqual(kwargs['cidr'], '10.2.0.0/24')
+ self.assertEqual(kwargs['multi_host'], False)
+ self.assertEqual(kwargs['num_networks'], 1)
+ self.assertEqual(kwargs['network_size'], 256)
+ self.assertEqual(kwargs['vlan'], 200)
+ self.assertEqual(kwargs['vlan_start'], 201)
+ self.assertEqual(kwargs['vpn_start'], 2000)
+ self.assertEqual(kwargs['cidr_v6'], 'fd00:2::/120')
+ self.assertEqual(kwargs['gateway'], '10.2.0.1')
+ self.assertEqual(kwargs['gateway_v6'], 'fd00:2::22')
+ self.assertEqual(kwargs['bridge'], 'br200')
+ self.assertEqual(kwargs['bridge_interface'], 'eth0')
+ self.assertEqual(kwargs['dns1'], '8.8.8.8')
+ self.assertEqual(kwargs['dns2'], '8.8.4.4')
+ self.flags(network_manager='nova.network.manager.VlanManager')
+ from nova.network import manager as net_manager
+ self.stubs.Set(net_manager.VlanManager, 'create_networks',
+ fake_create_networks)
+ self.commands.create(
+ label='Test',
+ cidr='10.2.0.0/24',
+ num_networks=1,
+ network_size=256,
+ multi_host='F',
+ vlan=200,
+ vlan_start=201,
+ vpn_start=2000,
+ cidr_v6='fd00:2::/120',
+ gateway='10.2.0.1',
+ gateway_v6='fd00:2::22',
+ bridge='br200',
+ bridge_interface='eth0',
+ dns1='8.8.8.8',
+ dns2='8.8.4.4',
+ uuid='aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
+
+ def test_list(self):
+
+ def fake_network_get_all(context):
+ return [db_fakes.FakeModel(self.net)]
+ self.stubs.Set(db, 'network_get_all', fake_network_get_all)
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.list()
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ _fmt = "\t".join(["%(id)-5s", "%(cidr)-18s", "%(cidr_v6)-15s",
+ "%(dhcp_start)-15s", "%(dns1)-15s", "%(dns2)-15s",
+ "%(vlan)-15s", "%(project_id)-15s", "%(uuid)-15s"])
+ head = _fmt % {'id': _('id'),
+ 'cidr': _('IPv4'),
+ 'cidr_v6': _('IPv6'),
+ 'dhcp_start': _('start address'),
+ 'dns1': _('DNS1'),
+ 'dns2': _('DNS2'),
+ 'vlan': _('VlanID'),
+ 'project_id': _('project'),
+ 'uuid': _("uuid")}
+ body = _fmt % {'id': self.net['id'],
+ 'cidr': self.net['cidr'],
+ 'cidr_v6': self.net['cidr_v6'],
+ 'dhcp_start': self.net['dhcp_start'],
+ 'dns1': self.net['dns1'],
+ 'dns2': self.net['dns2'],
+ 'vlan': self.net['vlan'],
+ 'project_id': self.net['project_id'],
+ 'uuid': self.net['uuid']}
+ answer = '%s\n%s\n' % (head, body)
+ self.assertEqual(result, answer)
+
+ def test_delete(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_uuid',
+ self.fake_network_get_by_uuid)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(uuid=self.fake_net['uuid'])
+
+ def test_delete_by_cidr(self):
+ self.fake_net = self.net
+ self.fake_net['project_id'] = None
+ self.fake_net['host'] = None
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+
+ def fake_network_delete_safe(context, network_id):
+ self.assertTrue(context.to_dict()['is_admin'])
+ self.assertEqual(network_id, self.fake_net['id'])
+ self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.commands.delete(fixed_range=self.fake_net['cidr'])
+
+ def _test_modify_base(self, update_value, project, host, dis_project=None,
+ dis_host=None):
+ self.fake_net = self.net
+ self.fake_update_value = update_value
+ self.stubs.Set(db, 'network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+ self.stubs.Set(db, 'network_update', self.fake_network_update)
+ self.commands.modify(self.fake_net['cidr'], project=project, host=host,
+ dis_project=dis_project, dis_host=dis_host)
+
+ def test_modify_associate(self):
+ self._test_modify_base(update_value={'project_id': 'test_project',
+ 'host': 'test_host'},
+ project='test_project', host='test_host')
+
+ def test_modify_unchanged(self):
+ self._test_modify_base(update_value={}, project=None, host=None)
+
+ def test_modify_disassociate(self):
+ self._test_modify_base(update_value={'project_id': None, 'host': None},
+ project=None, host=None, dis_project=True,
+ dis_host=True)
+
+
+class NeutronV2NetworkCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(NeutronV2NetworkCommandsTestCase, self).setUp()
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.commands = manage.NetworkCommands()
+
+ def test_create(self):
+ self.assertEqual(2, self.commands.create())
+
+ def test_list(self):
+ self.assertEqual(2, self.commands.list())
+
+ def test_delete(self):
+ self.assertEqual(2, self.commands.delete())
+
+ def test_modify(self):
+ self.assertEqual(2, self.commands.modify('192.168.0.1'))
+
+
+class ProjectCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(ProjectCommandsTestCase, self).setUp()
+ self.commands = manage.ProjectCommands()
+
+ def test_quota(self):
+ output = StringIO.StringIO()
+ sys.stdout = output
+ self.commands.quota(project_id='admin',
+ key='instances',
+ value='unlimited',
+ )
+
+ sys.stdout = sys.__stdout__
+ result = output.getvalue()
+ print_format = "%-36s %-10s" % ('instances', 'unlimited')
+ self.assertEqual((print_format in result), True)
+
+ def test_quota_update_invalid_key(self):
+ self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
+
+
+class DBCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(DBCommandsTestCase, self).setUp()
+ self.commands = manage.DbCommands()
+
+ def test_archive_deleted_rows_negative(self):
+ self.assertEqual(1, self.commands.archive_deleted_rows(-1))
+
+
+class ServiceCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(ServiceCommandsTestCase, self).setUp()
+ self.commands = manage.ServiceCommands()
+
+ def test_service_enable_invalid_params(self):
+ self.assertEqual(2, self.commands.enable('nohost', 'noservice'))
+
+ def test_service_disable_invalid_params(self):
+ self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
+
+
+class CellCommandsTestCase(test.TestCase):
+ def setUp(self):
+ super(CellCommandsTestCase, self).setUp()
+ self.commands = manage.CellCommands()
+
+ def test_create_transport_hosts_multiple(self):
+ """Test the _create_transport_hosts method
+ when broker_hosts is set.
+ """
+ brokers = "127.0.0.1:5672,127.0.0.2:5671"
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts=brokers)
+ self.assertEqual(2, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+ self.assertEqual('127.0.0.2', thosts[1].hostname)
+ self.assertEqual(5671, thosts[1].port)
+
+ def test_create_transport_hosts_single(self):
+ """Test the _create_transport_hosts method when hostname is passed."""
+ thosts = self.commands._create_transport_hosts('guest', 'devstack',
+ hostname='127.0.0.1',
+ port=80)
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(80, thosts[0].port)
+
+ def test_create_transport_hosts_single_broker(self):
+ """Test the _create_transport_hosts method for single broker_hosts."""
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672')
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+
+ def test_create_transport_hosts_both(self):
+ """Test the _create_transport_hosts method when both broker_hosts
+ and hostname/port are passed.
+ """
+ thosts = self.commands._create_transport_hosts(
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672',
+ hostname='127.0.0.2', port=80)
+ self.assertEqual(1, len(thosts))
+ self.assertEqual('127.0.0.1', thosts[0].hostname)
+ self.assertEqual(5672, thosts[0].port)
+
+ def test_create_transport_hosts_wrong_val(self):
+ """Test the _create_transport_hosts method when broker_hosts
+ is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:5672,127.0.0.1')
+
+ def test_create_transport_hosts_wrong_port_val(self):
+ """Test the _create_transport_hosts method when port in
+ broker_hosts is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ broker_hosts='127.0.0.1:')
+
+ def test_create_transport_hosts_wrong_port_arg(self):
+ """Test the _create_transport_hosts method when port
+ argument is wrongly sepcified
+ """
+ self.assertRaises(ValueError,
+ self.commands._create_transport_hosts,
+ 'guest', 'devstack',
+ hostname='127.0.0.1', port='ab')
+
+ @mock.patch.object(context, 'get_admin_context')
+ @mock.patch.object(db, 'cell_create')
+ def test_create_broker_hosts(self, mock_db_cell_create, mock_ctxt):
+ """Test the create function when broker_hosts is
+ passed
+ """
+ cell_tp_url = "fake://guest:devstack@127.0.0.1:5432"
+ cell_tp_url += ",guest:devstack@127.0.0.2:9999/"
+ ctxt = mock.sentinel
+ mock_ctxt.return_value = mock.sentinel
+ self.commands.create("test",
+ broker_hosts='127.0.0.1:5432,127.0.0.2:9999',
+ woffset=0, wscale=0,
+ username="guest", password="devstack")
+ exp_values = {'name': "test",
+ 'is_parent': False,
+ 'transport_url': cell_tp_url,
+ 'weight_offset': 0.0,
+ 'weight_scale': 0.0}
+ mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
+
+ @mock.patch.object(context, 'get_admin_context')
+ @mock.patch.object(db, 'cell_create')
+ def test_create_hostname(self, mock_db_cell_create, mock_ctxt):
+ """Test the create function when hostname and port is
+ passed
+ """
+ cell_tp_url = "fake://guest:devstack@127.0.0.1:9999/"
+ ctxt = mock.sentinel
+ mock_ctxt.return_value = mock.sentinel
+ self.commands.create("test",
+ hostname='127.0.0.1', port="9999",
+ woffset=0, wscale=0,
+ username="guest", password="devstack")
+ exp_values = {'name': "test",
+ 'is_parent': False,
+ 'transport_url': cell_tp_url,
+ 'weight_offset': 0.0,
+ 'weight_scale': 0.0}
+ mock_db_cell_create.assert_called_once_with(ctxt, exp_values)
diff --git a/nova/tests/test_objectstore.py b/nova/tests/unit/test_objectstore.py
index a8023d5f01..a8023d5f01 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/unit/test_objectstore.py
diff --git a/nova/tests/test_pipelib.py b/nova/tests/unit/test_pipelib.py
index 99d840a839..99d840a839 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/unit/test_pipelib.py
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
new file mode 100644
index 0000000000..59663076be
--- /dev/null
+++ b/nova/tests/unit/test_policy.py
@@ -0,0 +1,231 @@
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test of Policy Engine For Nova."""
+
+import os.path
+import StringIO
+
+import mock
+import six.moves.urllib.request as urlrequest
+
+from nova import context
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
+from nova import test
+from nova.tests.unit import policy_fixture
+from nova import utils
+
+
+class PolicyFileTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PolicyFileTestCase, self).setUp()
+ self.context = context.RequestContext('fake', 'fake')
+ self.target = {}
+
+ def test_modified_policy_reloads(self):
+ with utils.tempdir() as tmpdir:
+ tmpfilename = os.path.join(tmpdir, 'policy')
+
+ self.flags(policy_file=tmpfilename)
+
+ # NOTE(uni): context construction invokes policy check to determin
+ # is_admin or not. As a side-effect, policy reset is needed here
+ # to flush existing policy cache.
+ policy.reset()
+
+ action = "example:test"
+ with open(tmpfilename, "w") as policyfile:
+ policyfile.write('{"example:test": ""}')
+ policy.enforce(self.context, action, self.target)
+ with open(tmpfilename, "w") as policyfile:
+ policyfile.write('{"example:test": "!"}')
+ policy._ENFORCER.load_rules(True)
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+
+class PolicyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PolicyTestCase, self).setUp()
+ rules = {
+ "true": '@',
+ "example:allowed": '@',
+ "example:denied": "!",
+ "example:get_http": "http://www.example.com",
+ "example:my_file": "role:compute_admin or "
+ "project_id:%(project_id)s",
+ "example:early_and_fail": "! and @",
+ "example:early_or_success": "@ or !",
+ "example:lowercase_admin": "role:admin or role:sysadmin",
+ "example:uppercase_admin": "role:ADMIN or role:sysadmin",
+ }
+ policy.reset()
+ policy.init()
+ policy.set_rules(dict((k, common_policy.parse_rule(v))
+ for k, v in rules.items()))
+ self.context = context.RequestContext('fake', 'fake', roles=['member'])
+ self.target = {}
+
+ def test_enforce_nonexistent_action_throws(self):
+ action = "example:noexist"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_enforce_bad_action_throws(self):
+ action = "example:denied"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_enforce_bad_action_noraise(self):
+ action = "example:denied"
+ result = policy.enforce(self.context, action, self.target, False)
+ self.assertEqual(result, False)
+
+ def test_enforce_good_action(self):
+ action = "example:allowed"
+ result = policy.enforce(self.context, action, self.target)
+ self.assertEqual(result, True)
+
+ @mock.patch.object(urlrequest, 'urlopen',
+ return_value=StringIO.StringIO("True"))
+ def test_enforce_http_true(self, mock_urlrequest):
+ action = "example:get_http"
+ target = {}
+ result = policy.enforce(self.context, action, target)
+ self.assertEqual(result, True)
+
+ @mock.patch.object(urlrequest, 'urlopen',
+ return_value=StringIO.StringIO("False"))
+ def test_enforce_http_false(self, mock_urlrequest):
+ action = "example:get_http"
+ target = {}
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, target)
+
+ def test_templatized_enforcement(self):
+ target_mine = {'project_id': 'fake'}
+ target_not_mine = {'project_id': 'another'}
+ action = "example:my_file"
+ policy.enforce(self.context, action, target_mine)
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, target_not_mine)
+
+ def test_early_AND_enforcement(self):
+ action = "example:early_and_fail"
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
+
+ def test_early_OR_enforcement(self):
+ action = "example:early_or_success"
+ policy.enforce(self.context, action, self.target)
+
+ def test_ignore_case_role_check(self):
+ lowercase_action = "example:lowercase_admin"
+ uppercase_action = "example:uppercase_admin"
+ # NOTE(dprince) we mix case in the Admin role here to ensure
+ # case is ignored
+ admin_context = context.RequestContext('admin',
+ 'fake',
+ roles=['AdMiN'])
+ policy.enforce(admin_context, lowercase_action, self.target)
+ policy.enforce(admin_context, uppercase_action, self.target)
+
+
+class DefaultPolicyTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(DefaultPolicyTestCase, self).setUp()
+
+ self.rules = {
+ "default": '',
+ "example:exist": "!",
+ }
+
+ self._set_rules('default')
+
+ self.context = context.RequestContext('fake', 'fake')
+
+ def _set_rules(self, default_rule):
+ policy.reset()
+ rules = dict((k, common_policy.parse_rule(v))
+ for k, v in self.rules.items())
+ policy.init(rules=rules, default_rule=default_rule, use_conf=False)
+
+ def test_policy_called(self):
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, "example:exist", {})
+
+ def test_not_found_policy_calls_default(self):
+ policy.enforce(self.context, "example:noexist", {})
+
+ def test_default_not_found(self):
+ self._set_rules("default_noexist")
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, "example:noexist", {})
+
+
+class IsAdminCheckTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(IsAdminCheckTestCase, self).setUp()
+ policy.init()
+
+ def test_init_true(self):
+ check = policy.IsAdminCheck('is_admin', 'True')
+
+ self.assertEqual(check.kind, 'is_admin')
+ self.assertEqual(check.match, 'True')
+ self.assertEqual(check.expected, True)
+
+ def test_init_false(self):
+ check = policy.IsAdminCheck('is_admin', 'nottrue')
+
+ self.assertEqual(check.kind, 'is_admin')
+ self.assertEqual(check.match, 'False')
+ self.assertEqual(check.expected, False)
+
+ def test_call_true(self):
+ check = policy.IsAdminCheck('is_admin', 'True')
+
+ self.assertEqual(check('target', dict(is_admin=True),
+ policy._ENFORCER), True)
+ self.assertEqual(check('target', dict(is_admin=False),
+ policy._ENFORCER), False)
+
+ def test_call_false(self):
+ check = policy.IsAdminCheck('is_admin', 'False')
+
+ self.assertEqual(check('target', dict(is_admin=True),
+ policy._ENFORCER), False)
+ self.assertEqual(check('target', dict(is_admin=False),
+ policy._ENFORCER), True)
+
+
+class AdminRolePolicyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(AdminRolePolicyTestCase, self).setUp()
+ self.policy = self.useFixture(policy_fixture.RoleBasedPolicyFixture())
+ self.context = context.RequestContext('fake', 'fake', roles=['member'])
+ self.actions = policy.get_rules().keys()
+ self.target = {}
+
+ def test_enforce_admin_actions_with_nonadmin_context_throws(self):
+ """Check if non-admin context passed to admin actions throws
+ Policy not authorized exception
+ """
+ for action in self.actions:
+ self.assertRaises(exception.PolicyNotAuthorized, policy.enforce,
+ self.context, action, self.target)
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
new file mode 100644
index 0000000000..9152f09a57
--- /dev/null
+++ b/nova/tests/unit/test_quota.py
@@ -0,0 +1,2765 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import compute
+from nova.compute import flavors
+from nova import context
+from nova import db
+from nova.db.sqlalchemy import api as sqa_api
+from nova.db.sqlalchemy import models as sqa_models
+from nova import exception
+from nova import quota
+from nova import test
+import nova.tests.unit.image.fake
+
+CONF = cfg.CONF
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+
+
+class QuotaIntegrationTestCase(test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(QuotaIntegrationTestCase, self).setUp()
+ self.flags(compute_driver='nova.virt.fake.FakeDriver',
+ quota_instances=2,
+ quota_cores=4,
+ quota_floating_ips=1,
+ network_manager='nova.network.manager.FlatDHCPManager')
+
+ # Apparently needed by the RPC tests...
+ self.network = self.start_service('network')
+
+ self.user_id = 'admin'
+ self.project_id = 'admin'
+ self.context = context.RequestContext(self.user_id,
+ self.project_id,
+ is_admin=True)
+
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+
+ self.compute_api = compute.API()
+
+ def tearDown(self):
+ super(QuotaIntegrationTestCase, self).tearDown()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def _create_instance(self, cores=2):
+ """Create a test instance."""
+ inst = {}
+ inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = self.user_id
+ inst['project_id'] = self.project_id
+ inst['instance_type_id'] = '3' # m1.large
+ inst['vcpus'] = cores
+ return db.instance_create(self.context, inst)
+
+ def test_too_many_instances(self):
+ instance_uuids = []
+ for i in range(CONF.quota_instances):
+ instance = self._create_instance()
+ instance_uuids.append(instance['uuid'])
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ try:
+ self.compute_api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid)
+ except exception.QuotaError as e:
+ expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
+ 'used': 4, 'allowed': 4, 'overs': 'cores,instances'}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected QuotaError exception')
+ for instance_uuid in instance_uuids:
+ db.instance_destroy(self.context, instance_uuid)
+
+ def test_too_many_cores(self):
+ instance = self._create_instance(cores=4)
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ try:
+ self.compute_api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid)
+ except exception.QuotaError as e:
+ expected_kwargs = {'code': 413, 'resource': 'cores', 'req': 1,
+ 'used': 4, 'allowed': 4, 'overs': 'cores'}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected QuotaError exception')
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_many_cores_with_unlimited_quota(self):
+ # Setting cores quota to unlimited:
+ self.flags(quota_cores=-1)
+ instance = self._create_instance(cores=4)
+ db.instance_destroy(self.context, instance['uuid'])
+
+ def test_too_many_addresses(self):
+ address = '192.168.0.100'
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'project_id': self.project_id})
+ self.assertRaises(exception.QuotaError,
+ self.network.allocate_floating_ip,
+ self.context,
+ self.project_id)
+ db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_auto_assigned(self):
+ address = '192.168.0.100'
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'project_id': self.project_id})
+ # auto allocated addresses should not be counted
+ self.assertRaises(exception.NoMoreFloatingIps,
+ self.network.allocate_floating_ip,
+ self.context,
+ self.project_id,
+ True)
+ db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_too_many_metadata_items(self):
+ metadata = {}
+ for i in range(CONF.quota_metadata_items + 1):
+ metadata['key%s' % i] = 'value%s' % i
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ self.assertRaises(exception.QuotaError, self.compute_api.create,
+ self.context,
+ min_count=1,
+ max_count=1,
+ instance_type=inst_type,
+ image_href=image_uuid,
+ metadata=metadata)
+
+ def _create_with_injected_files(self, files):
+ api = self.compute_api
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ api.create(self.context, min_count=1, max_count=1,
+ instance_type=inst_type, image_href=image_uuid,
+ injected_files=files)
+
+ def test_no_injected_files(self):
+ api = self.compute_api
+ inst_type = flavors.get_flavor_by_name('m1.small')
+ image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
+ api.create(self.context,
+ instance_type=inst_type,
+ image_href=image_uuid)
+
+ def test_max_injected_files(self):
+ files = []
+ for i in xrange(CONF.quota_injected_files):
+ files.append(('/my/path%d' % i, 'config = test\n'))
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_files(self):
+ files = []
+ for i in xrange(CONF.quota_injected_files + 1):
+ files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_max_injected_file_content_bytes(self):
+ max = CONF.quota_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max)])
+ files = [('/test/path', content)]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_content_bytes(self):
+ max = CONF.quota_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max + 1)])
+ files = [('/test/path', content)]
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_max_injected_file_path_bytes(self):
+ max = CONF.quota_injected_file_path_length
+ path = ''.join(['a' for i in xrange(max)])
+ files = [(path, 'config = quotatest')]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_path_bytes(self):
+ max = CONF.quota_injected_file_path_length
+ path = ''.join(['a' for i in xrange(max + 1)])
+ files = [(path, 'config = quotatest')]
+ self.assertRaises(exception.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_reservation_expire(self):
+ self.useFixture(test.TimeOverride())
+
+ def assertInstancesReserved(reserved):
+ result = quota.QUOTAS.get_project_quotas(self.context,
+ self.context.project_id)
+ self.assertEqual(result['instances']['reserved'], reserved)
+
+ quota.QUOTAS.reserve(self.context,
+ expire=60,
+ instances=2)
+
+ assertInstancesReserved(2)
+
+ timeutils.advance_time_seconds(80)
+
+ quota.QUOTAS.expire(self.context)
+
+ assertInstancesReserved(0)
+
+
+class FakeContext(object):
+ def __init__(self, project_id, quota_class):
+ self.is_admin = False
+ self.user_id = 'fake_user'
+ self.project_id = project_id
+ self.quota_class = quota_class
+ self.read_deleted = 'no'
+
+ def elevated(self):
+ elevated = self.__class__(self.project_id, self.quota_class)
+ elevated.is_admin = True
+ return elevated
+
+
+class FakeDriver(object):
+ def __init__(self, by_project=None, by_user=None, by_class=None,
+ reservations=None):
+ self.called = []
+ self.by_project = by_project or {}
+ self.by_user = by_user or {}
+ self.by_class = by_class or {}
+ self.reservations = reservations or []
+
+ def get_by_project_and_user(self, context, project_id, user_id, resource):
+ self.called.append(('get_by_project_and_user',
+ context, project_id, user_id, resource))
+ try:
+ return self.by_user[user_id][resource]
+ except KeyError:
+ raise exception.ProjectUserQuotaNotFound(project_id=project_id,
+ user_id=user_id)
+
+ def get_by_project(self, context, project_id, resource):
+ self.called.append(('get_by_project', context, project_id, resource))
+ try:
+ return self.by_project[project_id][resource]
+ except KeyError:
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
+
+ def get_by_class(self, context, quota_class, resource):
+ self.called.append(('get_by_class', context, quota_class, resource))
+ try:
+ return self.by_class[quota_class][resource]
+ except KeyError:
+ raise exception.QuotaClassNotFound(class_name=quota_class)
+
+ def get_defaults(self, context, resources):
+ self.called.append(('get_defaults', context, resources))
+ return resources
+
+ def get_class_quotas(self, context, resources, quota_class,
+ defaults=True):
+ self.called.append(('get_class_quotas', context, resources,
+ quota_class, defaults))
+ return resources
+
+ def get_user_quotas(self, context, resources, project_id, user_id,
+ quota_class=None, defaults=True, usages=True):
+ self.called.append(('get_user_quotas', context, resources,
+ project_id, user_id, quota_class, defaults,
+ usages))
+ return resources
+
+ def get_project_quotas(self, context, resources, project_id,
+ quota_class=None, defaults=True, usages=True,
+ remains=False):
+ self.called.append(('get_project_quotas', context, resources,
+ project_id, quota_class, defaults, usages,
+ remains))
+ return resources
+
+ def limit_check(self, context, resources, values, project_id=None,
+ user_id=None):
+ self.called.append(('limit_check', context, resources,
+ values, project_id, user_id))
+
+ def reserve(self, context, resources, deltas, expire=None,
+ project_id=None, user_id=None):
+ self.called.append(('reserve', context, resources, deltas,
+ expire, project_id, user_id))
+ return self.reservations
+
+ def commit(self, context, reservations, project_id=None, user_id=None):
+ self.called.append(('commit', context, reservations, project_id,
+ user_id))
+
+ def rollback(self, context, reservations, project_id=None, user_id=None):
+ self.called.append(('rollback', context, reservations, project_id,
+ user_id))
+
+ def usage_reset(self, context, resources):
+ self.called.append(('usage_reset', context, resources))
+
+ def destroy_all_by_project_and_user(self, context, project_id, user_id):
+ self.called.append(('destroy_all_by_project_and_user', context,
+ project_id, user_id))
+
+ def destroy_all_by_project(self, context, project_id):
+ self.called.append(('destroy_all_by_project', context, project_id))
+
+ def expire(self, context):
+ self.called.append(('expire', context))
+
+
+class BaseResourceTestCase(test.TestCase):
+ def test_no_flag(self):
+ resource = quota.BaseResource('test_resource')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertIsNone(resource.flag)
+ self.assertEqual(resource.default, -1)
+
+ def test_with_flag(self):
+ # We know this flag exists, so use it...
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertEqual(resource.flag, 'quota_instances')
+ self.assertEqual(resource.default, 10)
+
+ def test_with_flag_no_quota(self):
+ self.flags(quota_instances=-1)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+
+ self.assertEqual(resource.name, 'test_resource')
+ self.assertEqual(resource.flag, 'quota_instances')
+ self.assertEqual(resource.default, -1)
+
+ def test_quota_no_project_no_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver()
+ context = FakeContext(None, None)
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 10)
+
+ def test_quota_with_project_no_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ ))
+ context = FakeContext('test_project', None)
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 15)
+
+ def test_quota_no_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=20),
+ ))
+ context = FakeContext(None, 'test_class')
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 20)
+
+ def test_quota_with_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ ),
+ by_class=dict(
+ test_class=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context)
+
+ self.assertEqual(quota_value, 15)
+
+ def test_quota_override_project_with_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=15),
+ override_project=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context,
+ project_id='override_project')
+
+ self.assertEqual(quota_value, 20)
+
+ def test_quota_with_project_override_class(self):
+ self.flags(quota_instances=10)
+ resource = quota.BaseResource('test_resource', 'quota_instances')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=15),
+ override_class=dict(test_resource=20),
+ ))
+ context = FakeContext('test_project', 'test_class')
+ quota_value = resource.quota(driver, context,
+ quota_class='override_class')
+
+ self.assertEqual(quota_value, 20)
+
+ def test_valid_method_call_check_invalid_input(self):
+ resources = {'dummy': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'limit')
+
+ def test_valid_method_call_check_invalid_method(self):
+ resources = {'key_pairs': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'dummy')
+
+ def test_valid_method_call_check_multiple(self):
+ resources = {'key_pairs': 1, 'dummy': 2}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+ resources = {'key_pairs': 1, 'instances': 2, 'dummy': 3}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+ def test_valid_method_call_check_wrong_method_reserve(self):
+ resources = {'key_pairs': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'reserve')
+
+ def test_valid_method_call_check_wrong_method_check(self):
+ resources = {'fixed_ips': 1}
+
+ self.assertRaises(exception.InvalidQuotaMethodUsage,
+ quota._valid_method_call_check_resources,
+ resources, 'check')
+
+
+class QuotaEngineTestCase(test.TestCase):
+ def test_init(self):
+ quota_obj = quota.QuotaEngine()
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
+ def test_init_override_string(self):
+ quota_obj = quota.QuotaEngine(
+ quota_driver_class='nova.tests.unit.test_quota.FakeDriver')
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertIsInstance(quota_obj._driver, FakeDriver)
+
+ def test_init_override_obj(self):
+ quota_obj = quota.QuotaEngine(quota_driver_class=FakeDriver)
+
+ self.assertEqual(quota_obj._resources, {})
+ self.assertEqual(quota_obj._driver, FakeDriver)
+
+ def test_register_resource(self):
+ quota_obj = quota.QuotaEngine()
+ resource = quota.AbsoluteResource('test_resource')
+ quota_obj.register_resource(resource)
+
+ self.assertEqual(quota_obj._resources, dict(test_resource=resource))
+
+ def test_register_resources(self):
+ quota_obj = quota.QuotaEngine()
+ resources = [
+ quota.AbsoluteResource('test_resource1'),
+ quota.AbsoluteResource('test_resource2'),
+ quota.AbsoluteResource('test_resource3'),
+ ]
+ quota_obj.register_resources(resources)
+
+ self.assertEqual(quota_obj._resources, dict(
+ test_resource1=resources[0],
+ test_resource2=resources[1],
+ test_resource3=resources[2],
+ ))
+
+ def test_get_by_project_and_user(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_user=dict(
+ fake_user=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_project_and_user(context, 'test_project',
+ 'fake_user', 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_project_and_user', context, 'test_project',
+ 'fake_user', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def test_get_by_project(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_project=dict(
+ test_project=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_project(context, 'test_project',
+ 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_project', context, 'test_project', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def test_get_by_class(self):
+ context = FakeContext('test_project', 'test_class')
+ driver = FakeDriver(by_class=dict(
+ test_class=dict(test_resource=42)))
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ result = quota_obj.get_by_class(context, 'test_class', 'test_resource')
+
+ self.assertEqual(driver.called, [
+ ('get_by_class', context, 'test_class', 'test_resource'),
+ ])
+ self.assertEqual(result, 42)
+
+ def _make_quota_obj(self, driver):
+ quota_obj = quota.QuotaEngine(quota_driver_class=driver)
+ resources = [
+ quota.AbsoluteResource('test_resource4'),
+ quota.AbsoluteResource('test_resource3'),
+ quota.AbsoluteResource('test_resource2'),
+ quota.AbsoluteResource('test_resource1'),
+ ]
+ quota_obj.register_resources(resources)
+
+ return quota_obj
+
+ def test_get_defaults(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result = quota_obj.get_defaults(context)
+
+ self.assertEqual(driver.called, [
+ ('get_defaults', context, quota_obj._resources),
+ ])
+ self.assertEqual(result, quota_obj._resources)
+
+ def test_get_class_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_class_quotas(context, 'test_class')
+ result2 = quota_obj.get_class_quotas(context, 'test_class', False)
+
+ self.assertEqual(driver.called, [
+ ('get_class_quotas', context, quota_obj._resources,
+ 'test_class', True),
+ ('get_class_quotas', context, quota_obj._resources,
+ 'test_class', False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_get_user_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_user_quotas(context, 'test_project',
+ 'fake_user')
+ result2 = quota_obj.get_user_quotas(context, 'test_project',
+ 'fake_user',
+ quota_class='test_class',
+ defaults=False,
+ usages=False)
+
+ self.assertEqual(driver.called, [
+ ('get_user_quotas', context, quota_obj._resources,
+ 'test_project', 'fake_user', None, True, True),
+ ('get_user_quotas', context, quota_obj._resources,
+ 'test_project', 'fake_user', 'test_class', False, False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_get_project_quotas(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.get_project_quotas(context, 'test_project')
+ result2 = quota_obj.get_project_quotas(context, 'test_project',
+ quota_class='test_class',
+ defaults=False,
+ usages=False)
+
+ self.assertEqual(driver.called, [
+ ('get_project_quotas', context, quota_obj._resources,
+ 'test_project', None, True, True, False),
+ ('get_project_quotas', context, quota_obj._resources,
+ 'test_project', 'test_class', False, False, False),
+ ])
+ self.assertEqual(result1, quota_obj._resources)
+ self.assertEqual(result2, quota_obj._resources)
+
+ def test_count_no_resource(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ self.assertRaises(exception.QuotaResourceUnknown,
+ quota_obj.count, context, 'test_resource5',
+ True, foo='bar')
+
+ def test_count_wrong_resource(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ self.assertRaises(exception.QuotaResourceUnknown,
+ quota_obj.count, context, 'test_resource1',
+ True, foo='bar')
+
+ def test_count(self):
+ def fake_count(context, *args, **kwargs):
+ self.assertEqual(args, (True,))
+ self.assertEqual(kwargs, dict(foo='bar'))
+ return 5
+
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.register_resource(quota.CountableResource('test_resource5',
+ fake_count))
+ result = quota_obj.count(context, 'test_resource5', True, foo='bar')
+
+ self.assertEqual(result, 5)
+
+ def test_limit_check(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.limit_check(context, test_resource1=4, test_resource2=3,
+ test_resource3=2, test_resource4=1)
+
+ self.assertEqual(driver.called, [
+ ('limit_check', context, quota_obj._resources, dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1,
+ ), None, None),
+ ])
+
+ def test_reserve(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver(reservations=[
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ quota_obj = self._make_quota_obj(driver)
+ result1 = quota_obj.reserve(context, test_resource1=4,
+ test_resource2=3, test_resource3=2,
+ test_resource4=1)
+ result2 = quota_obj.reserve(context, expire=3600,
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
+ result3 = quota_obj.reserve(context, project_id='fake_project',
+ test_resource1=1, test_resource2=2,
+ test_resource3=3, test_resource4=4)
+
+ self.assertEqual(driver.called, [
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=4,
+ test_resource2=3,
+ test_resource3=2,
+ test_resource4=1,
+ ), None, None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), 3600, None, None),
+ ('reserve', context, quota_obj._resources, dict(
+ test_resource1=1,
+ test_resource2=2,
+ test_resource3=3,
+ test_resource4=4,
+ ), None, 'fake_project', None),
+ ])
+ self.assertEqual(result1, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ self.assertEqual(result2, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+ self.assertEqual(result3, [
+ 'resv-01', 'resv-02', 'resv-03', 'resv-04',
+ ])
+
+ def test_commit(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.commit(context, ['resv-01', 'resv-02', 'resv-03'])
+
+ self.assertEqual(driver.called, [
+ ('commit', context, ['resv-01', 'resv-02', 'resv-03'], None,
+ None),
+ ])
+
+ def test_rollback(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.rollback(context, ['resv-01', 'resv-02', 'resv-03'])
+
+ self.assertEqual(driver.called, [
+ ('rollback', context, ['resv-01', 'resv-02', 'resv-03'], None,
+ None),
+ ])
+
+ def test_usage_reset(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.usage_reset(context, ['res1', 'res2', 'res3'])
+
+ self.assertEqual(driver.called, [
+ ('usage_reset', context, ['res1', 'res2', 'res3']),
+ ])
+
+ def test_destroy_all_by_project_and_user(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.destroy_all_by_project_and_user(context,
+ 'test_project', 'fake_user')
+
+ self.assertEqual(driver.called, [
+ ('destroy_all_by_project_and_user', context, 'test_project',
+ 'fake_user'),
+ ])
+
+ def test_destroy_all_by_project(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.destroy_all_by_project(context, 'test_project')
+
+ self.assertEqual(driver.called, [
+ ('destroy_all_by_project', context, 'test_project'),
+ ])
+
+ def test_expire(self):
+ context = FakeContext(None, None)
+ driver = FakeDriver()
+ quota_obj = self._make_quota_obj(driver)
+ quota_obj.expire(context)
+
+ self.assertEqual(driver.called, [
+ ('expire', context),
+ ])
+
+ def test_resources(self):
+ quota_obj = self._make_quota_obj(None)
+
+ self.assertEqual(quota_obj.resources,
+ ['test_resource1', 'test_resource2',
+ 'test_resource3', 'test_resource4'])
+
+
+class DbQuotaDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(DbQuotaDriverTestCase, self).setUp()
+
+ self.flags(quota_instances=10,
+ quota_cores=20,
+ quota_ram=50 * 1024,
+ quota_floating_ips=10,
+ quota_fixed_ips=10,
+ quota_metadata_items=128,
+ quota_injected_files=5,
+ quota_injected_file_content_bytes=10 * 1024,
+ quota_injected_file_path_length=255,
+ quota_security_groups=10,
+ quota_security_group_rules=20,
+ quota_server_groups=10,
+ quota_server_group_members=10,
+ reservation_expire=86400,
+ until_refresh=0,
+ max_age=0,
+ )
+
+ self.driver = quota.DbQuotaDriver()
+
+ self.calls = []
+
+ self.useFixture(test.TimeOverride())
+
+ def test_get_defaults(self):
+ # Use our pre-defined resources
+ self._stub_quota_class_get_default()
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+
+ self.assertEqual(result, dict(
+ instances=5,
+ cores=20,
+ ram=25 * 1024,
+ floating_ips=10,
+ fixed_ips=10,
+ metadata_items=64,
+ injected_files=5,
+ injected_file_content_bytes=5 * 1024,
+ injected_file_path_bytes=255,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ server_groups=10,
+ server_group_members=10,
+ ))
+
+ def _stub_quota_class_get_default(self):
+ # Stub out quota_class_get_default
+ def fake_qcgd(context):
+ self.calls.append('quota_class_get_default')
+ return dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ )
+ self.stubs.Set(db, 'quota_class_get_default', fake_qcgd)
+
+ def _stub_quota_class_get_all_by_name(self):
+ # Stub out quota_class_get_all_by_name
+ def fake_qcgabn(context, quota_class):
+ self.calls.append('quota_class_get_all_by_name')
+ self.assertEqual(quota_class, 'test_class')
+ return dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ )
+ self.stubs.Set(db, 'quota_class_get_all_by_name', fake_qcgabn)
+
+ def test_get_class_quotas(self):
+ self._stub_quota_class_get_all_by_name()
+ result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
+ 'test_class')
+
+ self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
+ self.assertEqual(result, dict(
+ instances=5,
+ cores=20,
+ ram=25 * 1024,
+ floating_ips=10,
+ fixed_ips=10,
+ metadata_items=64,
+ injected_files=5,
+ injected_file_content_bytes=5 * 1024,
+ injected_file_path_bytes=255,
+ security_groups=10,
+ security_group_rules=20,
+ key_pairs=100,
+ server_groups=10,
+ server_group_members=10,
+ ))
+
+ def test_get_class_quotas_no_defaults(self):
+ self._stub_quota_class_get_all_by_name()
+ result = self.driver.get_class_quotas(None, quota.QUOTAS._resources,
+ 'test_class', False)
+
+ self.assertEqual(self.calls, ['quota_class_get_all_by_name'])
+ self.assertEqual(result, dict(
+ instances=5,
+ ram=25 * 1024,
+ metadata_items=64,
+ injected_file_content_bytes=5 * 1024,
+ ))
+
+ def _stub_get_by_project_and_user(self):
+ def fake_qgabpau(context, project_id, user_id):
+ self.calls.append('quota_get_all_by_project_and_user')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ return dict(
+ cores=10,
+ injected_files=2,
+ injected_file_path_bytes=127,
+ )
+
+ def fake_qgabp(context, project_id):
+ self.calls.append('quota_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return {
+ 'cores': 10,
+ 'injected_files': 2,
+ 'injected_file_path_bytes': 127,
+ }
+
+ def fake_qugabpau(context, project_id, user_id):
+ self.calls.append('quota_usage_get_all_by_project_and_user')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ return dict(
+ instances=dict(in_use=2, reserved=2),
+ cores=dict(in_use=4, reserved=4),
+ ram=dict(in_use=10 * 1024, reserved=0),
+ floating_ips=dict(in_use=2, reserved=0),
+ metadata_items=dict(in_use=0, reserved=0),
+ injected_files=dict(in_use=0, reserved=0),
+ injected_file_content_bytes=dict(in_use=0, reserved=0),
+ injected_file_path_bytes=dict(in_use=0, reserved=0),
+ )
+
+ self.stubs.Set(db, 'quota_get_all_by_project_and_user', fake_qgabpau)
+ self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
+ self.stubs.Set(db, 'quota_usage_get_all_by_project_and_user',
+ fake_qugabpau)
+
+ self._stub_quota_class_get_all_by_name()
+
+ def test_get_user_quotas(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def _stub_get_by_project_and_user_specific(self):
+ def fake_quota_get(context, project_id, resource, user_id=None):
+ self.calls.append('quota_get')
+ self.assertEqual(project_id, 'test_project')
+ self.assertEqual(user_id, 'fake_user')
+ self.assertEqual(resource, 'test_resource')
+ return dict(
+ test_resource=dict(in_use=20, reserved=10),
+ )
+ self.stubs.Set(db, 'quota_get', fake_quota_get)
+
+ def test_get_by_project_and_user(self):
+ self._stub_get_by_project_and_user_specific()
+ result = self.driver.get_by_project_and_user(
+ FakeContext('test_project', 'test_class'),
+ 'test_project', 'fake_user', 'test_resource')
+
+ self.assertEqual(self.calls, ['quota_get'])
+ self.assertEqual(result, dict(
+ test_resource=dict(in_use=20, reserved=10),
+ ))
+
+ def _stub_get_by_project(self):
+ def fake_qgabp(context, project_id):
+ self.calls.append('quota_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return dict(
+ cores=10,
+ injected_files=2,
+ injected_file_path_bytes=127,
+ )
+
+ def fake_qugabp(context, project_id):
+ self.calls.append('quota_usage_get_all_by_project')
+ self.assertEqual(project_id, 'test_project')
+ return dict(
+ instances=dict(in_use=2, reserved=2),
+ cores=dict(in_use=4, reserved=4),
+ ram=dict(in_use=10 * 1024, reserved=0),
+ floating_ips=dict(in_use=2, reserved=0),
+ metadata_items=dict(in_use=0, reserved=0),
+ injected_files=dict(in_use=0, reserved=0),
+ injected_file_content_bytes=dict(in_use=0, reserved=0),
+ injected_file_path_bytes=dict(in_use=0, reserved=0),
+ )
+
+ self.stubs.Set(db, 'quota_get_all_by_project', fake_qgabp)
+ self.stubs.Set(db, 'quota_usage_get_all_by_project', fake_qugabp)
+
+ self._stub_quota_class_get_all_by_name()
+ self._stub_quota_class_get_default()
+
+ def test_get_project_quotas(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_alt_context_no_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', None),
+ quota.QUOTAS._resources, 'test_project', 'fake_user')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=10,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=50 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=128,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=10 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_alt_context_no_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('other_project', 'other_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_alt_context_with_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user',
+ quota_class='test_class')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_alt_context_with_class(self):
+ self.maxDiff = None
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('other_project', 'other_class'),
+ quota.QUOTAS._resources, 'test_project', quota_class='test_class')
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ in_use=2,
+ reserved=2,
+ ),
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ in_use=10 * 1024,
+ reserved=0,
+ ),
+ floating_ips=dict(
+ limit=10,
+ in_use=2,
+ reserved=0,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ metadata_items=dict(
+ limit=64,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ security_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ in_use=0,
+ reserved=0,
+ ),
+ key_pairs=dict(
+ limit=100,
+ in_use=0,
+ reserved=0,
+ ),
+ server_groups=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ server_group_members=dict(
+ limit=10,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_no_defaults(self):
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user',
+ defaults=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project_and_user',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_project_quotas_no_defaults(self):
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', defaults=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_usage_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ cores=dict(
+ limit=10,
+ in_use=4,
+ reserved=4,
+ ),
+ injected_files=dict(
+ limit=2,
+ in_use=0,
+ reserved=0,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ in_use=0,
+ reserved=0,
+ ),
+ ))
+
+ def test_get_user_quotas_no_usages(self):
+ self._stub_get_by_project_and_user()
+ result = self.driver.get_user_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', 'fake_user', usages=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project_and_user',
+ 'quota_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ ),
+ cores=dict(
+ limit=10,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ ),
+ floating_ips=dict(
+ limit=10,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ ),
+ metadata_items=dict(
+ limit=64,
+ ),
+ injected_files=dict(
+ limit=2,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ ),
+ security_groups=dict(
+ limit=10,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ ),
+ key_pairs=dict(
+ limit=100,
+ ),
+ server_groups=dict(
+ limit=10,
+ ),
+ server_group_members=dict(
+ limit=10,
+ ),
+ ))
+
+ def test_get_project_quotas_no_usages(self):
+ self._stub_get_by_project()
+ result = self.driver.get_project_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', usages=False)
+
+ self.assertEqual(self.calls, [
+ 'quota_get_all_by_project',
+ 'quota_class_get_all_by_name',
+ 'quota_class_get_default',
+ ])
+ self.assertEqual(result, dict(
+ instances=dict(
+ limit=5,
+ ),
+ cores=dict(
+ limit=10,
+ ),
+ ram=dict(
+ limit=25 * 1024,
+ ),
+ floating_ips=dict(
+ limit=10,
+ ),
+ fixed_ips=dict(
+ limit=10,
+ ),
+ metadata_items=dict(
+ limit=64,
+ ),
+ injected_files=dict(
+ limit=2,
+ ),
+ injected_file_content_bytes=dict(
+ limit=5 * 1024,
+ ),
+ injected_file_path_bytes=dict(
+ limit=127,
+ ),
+ security_groups=dict(
+ limit=10,
+ ),
+ security_group_rules=dict(
+ limit=20,
+ ),
+ key_pairs=dict(
+ limit=100,
+ ),
+ server_groups=dict(
+ limit=10,
+ ),
+ server_group_members=dict(
+ limit=10,
+ ),
+ ))
+
+ def _stub_get_settable_quotas(self):
+ def fake_get_project_quotas(context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True, remains=False,
+ project_quotas=None):
+ self.calls.append('get_project_quotas')
+ result = {}
+ for k, v in resources.items():
+ limit = v.default
+ reserved = 0
+ if k == 'instances':
+ remains = v.default - 5
+ in_use = 1
+ elif k == 'cores':
+ remains = -1
+ in_use = 5
+ limit = -1
+ else:
+ remains = v.default
+ in_use = 0
+ result[k] = {'limit': limit, 'in_use': in_use,
+ 'reserved': reserved, 'remains': remains}
+ return result
+
+ def fake_get_user_quotas(context, resources, project_id, user_id,
+ quota_class=None, defaults=True,
+ usages=True, project_quotas=None,
+ user_quotas=None):
+ self.calls.append('get_user_quotas')
+ result = {}
+ for k, v in resources.items():
+ reserved = 0
+ if k == 'instances':
+ in_use = 1
+ elif k == 'cores':
+ in_use = 5
+ reserved = 10
+ else:
+ in_use = 0
+ result[k] = {'limit': v.default,
+ 'in_use': in_use, 'reserved': reserved}
+ return result
+
+ def fake_qgabpau(context, project_id, user_id):
+ self.calls.append('quota_get_all_by_project_and_user')
+ return {'instances': 2, 'cores': -1}
+
+ self.stubs.Set(self.driver, 'get_project_quotas',
+ fake_get_project_quotas)
+ self.stubs.Set(self.driver, 'get_user_quotas',
+ fake_get_user_quotas)
+ self.stubs.Set(db, 'quota_get_all_by_project_and_user',
+ fake_qgabpau)
+
+ def test_get_settable_quotas_with_user(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', user_id='test_user')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ 'quota_get_all_by_project_and_user',
+ 'get_user_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 1,
+ 'maximum': 7,
+ },
+ 'cores': {
+ 'minimum': 15,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': 50 * 1024,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': 128,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': 5,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': 10 * 1024,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': 255,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': 20,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': 100,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ })
+
+ def test_get_settable_quotas_without_user(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 5,
+ 'maximum': -1,
+ },
+ 'cores': {
+ 'minimum': 5,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': -1,
+ },
+ })
+
+ def test_get_settable_quotas_by_user_with_unlimited_value(self):
+ self._stub_get_settable_quotas()
+ result = self.driver.get_settable_quotas(
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources, 'test_project', user_id='test_user')
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ 'quota_get_all_by_project_and_user',
+ 'get_user_quotas',
+ ])
+ self.assertEqual(result, {
+ 'instances': {
+ 'minimum': 1,
+ 'maximum': 7,
+ },
+ 'cores': {
+ 'minimum': 15,
+ 'maximum': -1,
+ },
+ 'ram': {
+ 'minimum': 0,
+ 'maximum': 50 * 1024,
+ },
+ 'floating_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'fixed_ips': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'metadata_items': {
+ 'minimum': 0,
+ 'maximum': 128,
+ },
+ 'injected_files': {
+ 'minimum': 0,
+ 'maximum': 5,
+ },
+ 'injected_file_content_bytes': {
+ 'minimum': 0,
+ 'maximum': 10 * 1024,
+ },
+ 'injected_file_path_bytes': {
+ 'minimum': 0,
+ 'maximum': 255,
+ },
+ 'security_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'security_group_rules': {
+ 'minimum': 0,
+ 'maximum': 20,
+ },
+ 'key_pairs': {
+ 'minimum': 0,
+ 'maximum': 100,
+ },
+ 'server_groups': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ 'server_group_members': {
+ 'minimum': 0,
+ 'maximum': 10,
+ },
+ })
+
+ def _stub_get_project_quotas(self):
+ def fake_get_project_quotas(context, resources, project_id,
+ quota_class=None, defaults=True,
+ usages=True, remains=False,
+ project_quotas=None):
+ self.calls.append('get_project_quotas')
+ return dict((k, dict(limit=v.default))
+ for k, v in resources.items())
+
+ self.stubs.Set(self.driver, 'get_project_quotas',
+ fake_get_project_quotas)
+
+ def test_get_quotas_has_sync_unknown(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['unknown'], True)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_no_sync_unknown(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['unknown'], False)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_has_sync_no_sync_resource(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['metadata_items'], True)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_no_sync_has_sync_resource(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.QuotaResourceUnknown,
+ self.driver._get_quotas,
+ None, quota.QUOTAS._resources,
+ ['instances'], False)
+ self.assertEqual(self.calls, [])
+
+ def test_get_quotas_has_sync(self):
+ self._stub_get_project_quotas()
+ result = self.driver._get_quotas(FakeContext('test_project',
+ 'test_class'),
+ quota.QUOTAS._resources,
+ ['instances', 'cores', 'ram',
+ 'floating_ips', 'security_groups',
+ 'server_groups'],
+ True,
+ project_id='test_project')
+
+ self.assertEqual(self.calls, ['get_project_quotas'])
+ self.assertEqual(result, dict(
+ instances=10,
+ cores=20,
+ ram=50 * 1024,
+ floating_ips=10,
+ security_groups=10,
+ server_groups=10,
+ ))
+
+ def test_get_quotas_no_sync(self):
+ self._stub_get_project_quotas()
+ result = self.driver._get_quotas(FakeContext('test_project',
+ 'test_class'),
+ quota.QUOTAS._resources,
+ ['metadata_items', 'injected_files',
+ 'injected_file_content_bytes',
+ 'injected_file_path_bytes',
+ 'security_group_rules',
+ 'server_group_members'], False,
+ project_id='test_project')
+
+ self.assertEqual(self.calls, ['get_project_quotas'])
+ self.assertEqual(result, dict(
+ metadata_items=128,
+ injected_files=5,
+ injected_file_content_bytes=10 * 1024,
+ injected_file_path_bytes=255,
+ security_group_rules=20,
+ server_group_members=10,
+ ))
+
+ def test_limit_check_under(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.InvalidQuotaValue,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=-1))
+
+ def test_limit_check_over(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.OverQuota,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=129))
+
+ def test_limit_check_project_overs(self):
+ self._stub_get_project_quotas()
+ self.assertRaises(exception.OverQuota,
+ self.driver.limit_check,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(injected_file_content_bytes=10241,
+ injected_file_path_bytes=256))
+
+ def test_limit_check_unlimited(self):
+ self.flags(quota_metadata_items=-1)
+ self._stub_get_project_quotas()
+ self.driver.limit_check(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=32767))
+
+ def test_limit_check(self):
+ self._stub_get_project_quotas()
+ self.driver.limit_check(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(metadata_items=128))
+
+ def _stub_quota_reserve(self):
+ def fake_quota_reserve(context, resources, quotas, user_quotas, deltas,
+ expire, until_refresh, max_age, project_id=None,
+ user_id=None):
+ self.calls.append(('quota_reserve', expire, until_refresh,
+ max_age))
+ return ['resv-1', 'resv-2', 'resv-3']
+ self.stubs.Set(db, 'quota_reserve', fake_quota_reserve)
+
+ def test_reserve_bad_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.assertRaises(exception.InvalidReservationExpiration,
+ self.driver.reserve,
+ FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire='invalid')
+ self.assertEqual(self.calls, [])
+
+ def test_reserve_default_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2))
+
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=86400)
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_int_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=3600)
+
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_timedelta_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ expire_delta = datetime.timedelta(seconds=60)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire_delta)
+
+ expire = timeutils.utcnow() + expire_delta
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_datetime_expire(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_until_refresh(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.flags(until_refresh=500)
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 500, 0),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_reserve_max_age(self):
+ self._stub_get_project_quotas()
+ self._stub_quota_reserve()
+ self.flags(max_age=86400)
+ expire = timeutils.utcnow() + datetime.timedelta(seconds=120)
+ result = self.driver.reserve(FakeContext('test_project', 'test_class'),
+ quota.QUOTAS._resources,
+ dict(instances=2), expire=expire)
+
+ self.assertEqual(self.calls, [
+ 'get_project_quotas',
+ ('quota_reserve', expire, 0, 86400),
+ ])
+ self.assertEqual(result, ['resv-1', 'resv-2', 'resv-3'])
+
+ def test_usage_reset(self):
+ calls = []
+
+ def fake_quota_usage_update(context, project_id, user_id, resource,
+ **kwargs):
+ calls.append(('quota_usage_update', context, project_id, user_id,
+ resource, kwargs))
+ if resource == 'nonexist':
+ raise exception.QuotaUsageNotFound(project_id=project_id)
+ self.stubs.Set(db, 'quota_usage_update', fake_quota_usage_update)
+
+ ctx = FakeContext('test_project', 'test_class')
+ resources = ['res1', 'res2', 'nonexist', 'res4']
+ self.driver.usage_reset(ctx, resources)
+
+ # Make sure we had some calls
+ self.assertEqual(len(calls), len(resources))
+
+ # Extract the elevated context that was used and do some
+ # sanity checks
+ elevated = calls[0][1]
+ self.assertEqual(elevated.project_id, ctx.project_id)
+ self.assertEqual(elevated.quota_class, ctx.quota_class)
+ self.assertEqual(elevated.is_admin, True)
+
+ # Now check that all the expected calls were made
+ exemplar = [('quota_usage_update', elevated, 'test_project',
+ 'fake_user', res, dict(in_use=-1)) for res in resources]
+ self.assertEqual(calls, exemplar)
+
+
+class FakeSession(object):
+ def begin(self):
+ return self
+
+ def add(self, instance):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, exc_traceback):
+ return False
+
+
+class FakeUsage(sqa_models.QuotaUsage):
+ def save(self, *args, **kwargs):
+ pass
+
+
+class QuotaReserveSqlAlchemyTestCase(test.TestCase):
+ # nova.db.sqlalchemy.api.quota_reserve is so complex it needs its
+ # own test case, and since it's a quota manipulator, this is the
+ # best place to put it...
+
+ def setUp(self):
+ super(QuotaReserveSqlAlchemyTestCase, self).setUp()
+ self.sync_called = set()
+ self.quotas = dict(
+ instances=5,
+ cores=10,
+ ram=10 * 1024,
+ fixed_ips=5,
+ )
+ self.deltas = dict(
+ instances=2,
+ cores=4,
+ ram=2 * 1024,
+ fixed_ips=2,
+ )
+
+ def make_sync(res_name):
+ def sync(context, project_id, user_id, session):
+ self.sync_called.add(res_name)
+ if res_name in self.usages:
+ if self.usages[res_name].in_use < 0:
+ return {res_name: 2}
+ else:
+ return {res_name: self.usages[res_name].in_use - 1}
+ return {res_name: 0}
+ return sync
+ self.resources = {}
+
+ _existing_quota_sync_func_dict = dict(sqa_api.QUOTA_SYNC_FUNCTIONS)
+
+ def restore_sync_functions():
+ sqa_api.QUOTA_SYNC_FUNCTIONS.clear()
+ sqa_api.QUOTA_SYNC_FUNCTIONS.update(_existing_quota_sync_func_dict)
+
+ self.addCleanup(restore_sync_functions)
+
+ for res_name in ('instances', 'cores', 'ram', 'fixed_ips'):
+ method_name = '_sync_%s' % res_name
+ sqa_api.QUOTA_SYNC_FUNCTIONS[method_name] = make_sync(res_name)
+ res = quota.ReservableResource(res_name, '_sync_%s' % res_name)
+ self.resources[res_name] = res
+
+ self.expire = timeutils.utcnow() + datetime.timedelta(seconds=3600)
+ self.usages = {}
+ self.usages_created = {}
+ self.reservations_created = {}
+ self.usages_list = [
+ dict(resource='instances',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=2,
+ until_refresh=None),
+ dict(resource='cores',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=4,
+ until_refresh=None),
+ dict(resource='ram',
+ project_id='test_project',
+ user_id='fake_user',
+ in_use=2,
+ reserved=2 * 1024,
+ until_refresh=None),
+ dict(resource='fixed_ips',
+ project_id='test_project',
+ user_id=None,
+ in_use=2,
+ reserved=2,
+ until_refresh=None),
+ ]
+
+ def fake_get_session():
+ return FakeSession()
+
+ def fake_get_project_user_quota_usages(context, session, project_id,
+ user_id):
+ return self.usages.copy(), self.usages.copy()
+
+ def fake_quota_usage_create(project_id, user_id, resource,
+ in_use, reserved, until_refresh,
+ session=None, save=True):
+ quota_usage_ref = self._make_quota_usage(
+ project_id, user_id, resource, in_use, reserved, until_refresh,
+ timeutils.utcnow(), timeutils.utcnow())
+
+ self.usages_created[resource] = quota_usage_ref
+
+ return quota_usage_ref
+
+ def fake_reservation_create(uuid, usage_id, project_id,
+ user_id, resource, delta, expire,
+ session=None):
+ reservation_ref = self._make_reservation(
+ uuid, usage_id, project_id, user_id, resource, delta, expire,
+ timeutils.utcnow(), timeutils.utcnow())
+
+ self.reservations_created[resource] = reservation_ref
+
+ return reservation_ref
+
+ self.stubs.Set(sqa_api, 'get_session', fake_get_session)
+ self.stubs.Set(sqa_api, '_get_project_user_quota_usages',
+ fake_get_project_user_quota_usages)
+ self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
+ self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
+
+ self.useFixture(test.TimeOverride())
+
+ def _make_quota_usage(self, project_id, user_id, resource, in_use,
+ reserved, until_refresh, created_at, updated_at):
+ quota_usage_ref = FakeUsage()
+ quota_usage_ref.id = len(self.usages) + len(self.usages_created)
+ quota_usage_ref.project_id = project_id
+ quota_usage_ref.user_id = user_id
+ quota_usage_ref.resource = resource
+ quota_usage_ref.in_use = in_use
+ quota_usage_ref.reserved = reserved
+ quota_usage_ref.until_refresh = until_refresh
+ quota_usage_ref.created_at = created_at
+ quota_usage_ref.updated_at = updated_at
+ quota_usage_ref.deleted_at = None
+ quota_usage_ref.deleted = False
+
+ return quota_usage_ref
+
+ def init_usage(self, project_id, user_id, resource, in_use, reserved=0,
+ until_refresh=None, created_at=None, updated_at=None):
+ if created_at is None:
+ created_at = timeutils.utcnow()
+ if updated_at is None:
+ updated_at = timeutils.utcnow()
+ if resource == 'fixed_ips':
+ user_id = None
+
+ quota_usage_ref = self._make_quota_usage(project_id, user_id, resource,
+ in_use, reserved,
+ until_refresh,
+ created_at, updated_at)
+
+ self.usages[resource] = quota_usage_ref
+
+ def compare_usage(self, usage_dict, expected):
+ for usage in expected:
+ resource = usage['resource']
+ for key, value in usage.items():
+ actual = getattr(usage_dict[resource], key)
+ self.assertEqual(actual, value,
+ "%s != %s on usage for resource %s" %
+ (actual, value, resource))
+
+ def _make_reservation(self, uuid, usage_id, project_id, user_id, resource,
+ delta, expire, created_at, updated_at):
+ reservation_ref = sqa_models.Reservation()
+ reservation_ref.id = len(self.reservations_created)
+ reservation_ref.uuid = uuid
+ reservation_ref.usage_id = usage_id
+ reservation_ref.project_id = project_id
+ reservation_ref.user_id = user_id
+ reservation_ref.resource = resource
+ reservation_ref.delta = delta
+ reservation_ref.expire = expire
+ reservation_ref.created_at = created_at
+ reservation_ref.updated_at = updated_at
+ reservation_ref.deleted_at = None
+ reservation_ref.deleted = False
+
+ return reservation_ref
+
+ def compare_reservation(self, reservations, expected):
+ reservations = set(reservations)
+ for resv in expected:
+ resource = resv['resource']
+ resv_obj = self.reservations_created[resource]
+
+ self.assertIn(resv_obj.uuid, reservations)
+ reservations.discard(resv_obj.uuid)
+
+ for key, value in resv.items():
+ actual = getattr(resv_obj, key)
+ self.assertEqual(actual, value,
+ "%s != %s on reservation for resource %s" %
+ (actual, value, resource))
+
+ self.assertEqual(len(reservations), 0)
+
+ def _update_reservations_list(self, usage_id_change=False,
+ delta_change=False):
+ reservations_list = [
+ dict(resource='instances',
+ project_id='test_project',
+ delta=2),
+ dict(resource='cores',
+ project_id='test_project',
+ delta=4),
+ dict(resource='ram',
+ delta=2 * 1024),
+ dict(resource='fixed_ips',
+ project_id='test_project',
+ delta=2),
+ ]
+ if usage_id_change:
+ reservations_list[0]["usage_id"] = self.usages_created['instances']
+ reservations_list[1]["usage_id"] = self.usages_created['cores']
+ reservations_list[2]["usage_id"] = self.usages_created['ram']
+ reservations_list[3]["usage_id"] = self.usages_created['fixed_ips']
+ else:
+ reservations_list[0]["usage_id"] = self.usages['instances']
+ reservations_list[1]["usage_id"] = self.usages['cores']
+ reservations_list[2]["usage_id"] = self.usages['ram']
+ reservations_list[3]["usage_id"] = self.usages['fixed_ips']
+ if delta_change:
+ reservations_list[0]["delta"] = -2
+ reservations_list[1]["delta"] = -4
+ reservations_list[2]["delta"] = -2 * 1024
+ reservations_list[3]["delta"] = -2
+ return reservations_list
+
+ def _init_usages(self, *in_use, **kwargs):
+ for i, option in enumerate(('instances', 'cores', 'ram', 'fixed_ips')):
+ self.init_usage('test_project', 'fake_user',
+ option, in_use[i], **kwargs)
+ return FakeContext('test_project', 'test_class')
+
+ def test_quota_reserve_create_usages(self):
+ context = FakeContext('test_project', 'test_class')
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["in_use"] = 0
+ self.usages_list[1]["in_use"] = 0
+ self.usages_list[2]["in_use"] = 0
+ self.usages_list[3]["in_use"] = 0
+ self.compare_usage(self.usages_created, self.usages_list)
+ reservations_list = self._update_reservations_list(True)
+ self.compare_reservation(result, reservations_list)
+
+ def test_quota_reserve_negative_in_use(self):
+ context = self._init_usages(-1, -1, -1, -1, until_refresh=1)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 5, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["until_refresh"] = 5
+ self.usages_list[1]["until_refresh"] = 5
+ self.usages_list[2]["until_refresh"] = 5
+ self.usages_list[3]["until_refresh"] = 5
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_until_refresh(self):
+ context = self._init_usages(3, 3, 3, 3, until_refresh=1)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 5, 0)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.usages_list[0]["until_refresh"] = 5
+ self.usages_list[1]["until_refresh"] = 5
+ self.usages_list[2]["until_refresh"] = 5
+ self.usages_list[3]["until_refresh"] = 5
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_max_age(self):
+ max_age = 3600
+ record_created = (timeutils.utcnow() -
+ datetime.timedelta(seconds=max_age))
+ context = self._init_usages(3, 3, 3, 3, created_at=record_created,
+ updated_at=record_created)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, max_age)
+
+ self.assertEqual(self.sync_called, set(['instances', 'cores',
+ 'ram', 'fixed_ips']))
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_no_refresh(self):
+ context = self._init_usages(3, 3, 3, 3)
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 3
+ self.usages_list[1]["in_use"] = 3
+ self.usages_list[2]["in_use"] = 3
+ self.usages_list[3]["in_use"] = 3
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.compare_reservation(result, self._update_reservations_list())
+
+ def test_quota_reserve_unders(self):
+ context = self._init_usages(1, 3, 1 * 1024, 1)
+ self.deltas["instances"] = -2
+ self.deltas["cores"] = -4
+ self.deltas["ram"] = -2 * 1024
+ self.deltas["fixed_ips"] = -2
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 3
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 1 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ reservations_list = self._update_reservations_list(False, True)
+ self.compare_reservation(result, reservations_list)
+
+ def test_quota_reserve_overs(self):
+ context = self._init_usages(4, 8, 10 * 1024, 4)
+ try:
+ sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire, 0, 0)
+ except exception.OverQuota as e:
+ expected_kwargs = {'code': 500,
+ 'usages': {'instances': {'reserved': 0, 'in_use': 4},
+ 'ram': {'reserved': 0, 'in_use': 10240},
+ 'fixed_ips': {'reserved': 0, 'in_use': 4},
+ 'cores': {'reserved': 0, 'in_use': 8}},
+ 'headroom': {'cores': 2, 'ram': 0, 'fixed_ips': 1,
+ 'instances': 1},
+ 'overs': ['cores', 'fixed_ips', 'instances', 'ram'],
+ 'quotas': {'cores': 10, 'ram': 10240,
+ 'fixed_ips': 5, 'instances': 5}}
+ self.assertEqual(e.kwargs, expected_kwargs)
+ else:
+ self.fail('Expected OverQuota failure')
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 4
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 8
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 10 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 4
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_cores_unlimited(self):
+ # Requesting 8 cores, quota_cores set to unlimited:
+ self.flags(quota_cores=-1)
+ self._init_usages(1, 8, 1 * 1024, 1)
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 8
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 1 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_ram_unlimited(self):
+ # Requesting 10*1024 ram, quota_ram set to unlimited:
+ self.flags(quota_ram=-1)
+ self._init_usages(1, 1, 10 * 1024, 1)
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 1
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 1
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 10 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 1
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ self.assertEqual(self.reservations_created, {})
+
+ def test_quota_reserve_reduction(self):
+ context = self._init_usages(10, 20, 20 * 1024, 10)
+ self.deltas["instances"] = -2
+ self.deltas["cores"] = -4
+ self.deltas["ram"] = -2 * 1024
+ self.deltas["fixed_ips"] = -2
+ result = sqa_api.quota_reserve(context, self.resources, self.quotas,
+ self.quotas, self.deltas, self.expire,
+ 0, 0)
+
+ self.assertEqual(self.sync_called, set([]))
+ self.usages_list[0]["in_use"] = 10
+ self.usages_list[0]["reserved"] = 0
+ self.usages_list[1]["in_use"] = 20
+ self.usages_list[1]["reserved"] = 0
+ self.usages_list[2]["in_use"] = 20 * 1024
+ self.usages_list[2]["reserved"] = 0
+ self.usages_list[3]["in_use"] = 10
+ self.usages_list[3]["reserved"] = 0
+ self.compare_usage(self.usages, self.usages_list)
+ self.assertEqual(self.usages_created, {})
+ reservations_list = self._update_reservations_list(False, True)
+ self.compare_reservation(result, reservations_list)
+
+
+class NoopQuotaDriverTestCase(test.TestCase):
+ def setUp(self):
+ super(NoopQuotaDriverTestCase, self).setUp()
+
+ self.flags(quota_instances=10,
+ quota_cores=20,
+ quota_ram=50 * 1024,
+ quota_floating_ips=10,
+ quota_metadata_items=128,
+ quota_injected_files=5,
+ quota_injected_file_content_bytes=10 * 1024,
+ quota_injected_file_path_length=255,
+ quota_security_groups=10,
+ quota_security_group_rules=20,
+ reservation_expire=86400,
+ until_refresh=0,
+ max_age=0,
+ )
+
+ self.expected_with_usages = {}
+ self.expected_without_usages = {}
+ self.expected_without_dict = {}
+ self.expected_settable_quotas = {}
+ for r in quota.QUOTAS._resources:
+ self.expected_with_usages[r] = dict(limit=-1,
+ in_use=-1,
+ reserved=-1)
+ self.expected_without_usages[r] = dict(limit=-1)
+ self.expected_without_dict[r] = -1
+ self.expected_settable_quotas[r] = dict(minimum=0, maximum=-1)
+
+ self.driver = quota.NoopQuotaDriver()
+
+ def test_get_defaults(self):
+ # Use our pre-defined resources
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_class_quotas(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class')
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_class_quotas_no_defaults(self):
+ result = self.driver.get_class_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_class',
+ False)
+ self.assertEqual(self.expected_without_dict, result)
+
+ def test_get_project_quotas(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project')
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_user_quotas(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user')
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_project_quotas_no_defaults(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ defaults=False)
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_user_quotas_no_defaults(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user',
+ defaults=False)
+ self.assertEqual(self.expected_with_usages, result)
+
+ def test_get_project_quotas_no_usages(self):
+ result = self.driver.get_project_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+
+ def test_get_user_quotas_no_usages(self):
+ result = self.driver.get_user_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+
+ def test_get_settable_quotas_with_user(self):
+ result = self.driver.get_settable_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project',
+ 'fake_user')
+ self.assertEqual(self.expected_settable_quotas, result)
+
+ def test_get_settable_quotas_without_user(self):
+ result = self.driver.get_settable_quotas(None,
+ quota.QUOTAS._resources,
+ 'test_project')
+ self.assertEqual(self.expected_settable_quotas, result)
diff --git a/nova/tests/test_safeutils.py b/nova/tests/unit/test_safeutils.py
index 66d20ca79e..66d20ca79e 100644
--- a/nova/tests/test_safeutils.py
+++ b/nova/tests/unit/test_safeutils.py
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
new file mode 100644
index 0000000000..bb36143869
--- /dev/null
+++ b/nova/tests/unit/test_service.py
@@ -0,0 +1,370 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit Tests for remote procedure calls using queue
+"""
+
+import sys
+
+import mock
+import mox
+from oslo.concurrency import processutils
+from oslo.config import cfg
+import testtools
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import manager
+from nova.openstack.common import service as _service
+from nova import rpc
+from nova import service
+from nova import test
+from nova.tests.unit import utils
+from nova import wsgi
+
+test_service_opts = [
+ cfg.StrOpt("fake_manager",
+ default="nova.tests.unit.test_service.FakeManager",
+ help="Manager for testing"),
+ cfg.StrOpt("test_service_listen",
+ default='127.0.0.1',
+ help="Host to bind test service to"),
+ cfg.IntOpt("test_service_listen_port",
+ default=0,
+ help="Port number to bind test service to"),
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(test_service_opts)
+
+
+class FakeManager(manager.Manager):
+ """Fake manager for tests."""
+ def test_method(self):
+ return 'manager'
+
+
+class ExtendedService(service.Service):
+ def test_method(self):
+ return 'service'
+
+
+class ServiceManagerTestCase(test.TestCase):
+ """Test cases for Services."""
+
+ def test_message_gets_to_manager(self):
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(serv.test_method(), 'manager')
+
+ def test_override_manager_method(self):
+ serv = ExtendedService('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(serv.test_method(), 'service')
+
+ def test_service_with_min_down_time(self):
+ CONF.set_override('service_down_time', 10)
+ CONF.set_override('report_interval', 10)
+ serv = service.Service('test',
+ 'test',
+ 'test',
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ self.assertEqual(CONF.service_down_time, 25)
+
+
+class ServiceFlagsTestCase(test.TestCase):
+ def test_service_enabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=True)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assertFalse(ref['disabled'])
+
+ def test_service_disabled_on_create_based_on_flag(self):
+ self.flags(enable_new_services=False)
+ host = 'foo'
+ binary = 'nova-fake'
+ app = service.Service.create(host=host, binary=binary)
+ app.start()
+ app.stop()
+ ref = db.service_get(context.get_admin_context(), app.service_id)
+ db.service_destroy(context.get_admin_context(), app.service_id)
+ self.assertTrue(ref['disabled'])
+
+
+class ServiceTestCase(test.TestCase):
+ """Test cases for Services."""
+
+ def setUp(self):
+ super(ServiceTestCase, self).setUp()
+ self.host = 'foo'
+ self.binary = 'nova-fake'
+ self.topic = 'fake'
+ self.mox.StubOutWithMock(db, 'service_create')
+ self.mox.StubOutWithMock(db, 'service_get_by_args')
+ self.flags(use_local=True, group='conductor')
+
+ def test_create(self):
+
+ # NOTE(vish): Create was moved out of mox replay to make sure that
+ # the looping calls are created in StartService.
+ app = service.Service.create(host=self.host, binary=self.binary,
+ topic=self.topic)
+
+ self.assertTrue(app)
+
+ def _service_start_mocks(self):
+ service_create = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0}
+ service_ref = {'host': self.host,
+ 'binary': self.binary,
+ 'topic': self.topic,
+ 'report_count': 0,
+ 'id': 1}
+
+ db.service_get_by_args(mox.IgnoreArg(),
+ self.host, self.binary).AndRaise(exception.NotFound())
+ db.service_create(mox.IgnoreArg(),
+ service_create).AndReturn(service_ref)
+ return service_ref
+
+ def test_init_and_start_hooks(self):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__],
+ 'FakeManager', use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ self.manager_mock.service_name = self.topic
+ self.manager_mock.additional_endpoints = []
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+ self._service_start_mocks()
+ # pre_start_hook is called after service record is created,
+ # but before RPC consumer is created
+ self.manager_mock.pre_start_hook()
+ # post_start_hook is called after RPC consumer is created.
+ self.manager_mock.post_start_hook()
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+
+ def _test_service_check_create_race(self, ex):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+
+ db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
+ ).AndRaise(exception.NotFound)
+ db.service_create(mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndRaise(ex)
+
+ class TestException(Exception):
+ pass
+
+ db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
+ ).AndRaise(TestException)
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ self.assertRaises(TestException, serv.start)
+
+ def test_service_check_create_race_topic_exists(self):
+ ex = exception.ServiceTopicExists(host='foo', topic='bar')
+ self._test_service_check_create_race(ex)
+
+ def test_service_check_create_race_binary_exists(self):
+ ex = exception.ServiceBinaryExists(host='foo', binary='bar')
+ self._test_service_check_create_race(ex)
+
+ def test_parent_graceful_shutdown(self):
+ self.manager_mock = self.mox.CreateMock(FakeManager)
+ self.mox.StubOutWithMock(sys.modules[__name__],
+ 'FakeManager', use_mock_anything=True)
+ self.mox.StubOutWithMock(self.manager_mock, 'init_host')
+ self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
+ self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
+
+ self.mox.StubOutWithMock(_service.Service, 'stop')
+
+ FakeManager(host=self.host).AndReturn(self.manager_mock)
+
+ self.manager_mock.service_name = self.topic
+ self.manager_mock.additional_endpoints = []
+
+ # init_host is called before any service record is created
+ self.manager_mock.init_host()
+ self._service_start_mocks()
+ # pre_start_hook is called after service record is created,
+ # but before RPC consumer is created
+ self.manager_mock.pre_start_hook()
+ # post_start_hook is called after RPC consumer is created.
+ self.manager_mock.post_start_hook()
+
+ _service.Service.stop()
+
+ self.mox.ReplayAll()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+
+ serv.stop()
+
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
+ def test_parent_graceful_shutdown_with_cleanup_host(self,
+ mock_svc_get_by_args,
+ mock_API):
+ mock_svc_get_by_args.return_value = {'id': 'some_value'}
+ mock_manager = mock.Mock()
+
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+
+ serv.manager = mock_manager
+ serv.manager.additional_endpoints = []
+
+ serv.start()
+ serv.manager.init_host.assert_called_with()
+
+ serv.stop()
+ serv.manager.cleanup_host.assert_called_with()
+
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
+ @mock.patch.object(rpc, 'get_server')
+ def test_service_stop_waits_for_rpcserver(
+ self, mock_rpc, mock_svc_get_by_args, mock_API):
+ mock_svc_get_by_args.return_value = {'id': 'some_value'}
+ serv = service.Service(self.host,
+ self.binary,
+ self.topic,
+ 'nova.tests.unit.test_service.FakeManager')
+ serv.start()
+ serv.stop()
+ serv.rpcserver.start.assert_called_once_with()
+ serv.rpcserver.stop.assert_called_once_with()
+ serv.rpcserver.wait.assert_called_once_with()
+
+
+class TestWSGIService(test.TestCase):
+
+ def setUp(self):
+ super(TestWSGIService, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+
+ def test_service_random_port(self):
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+ def test_workers_set_default(self):
+ test_service = service.WSGIService("osapi_compute")
+ self.assertEqual(test_service.workers, processutils.get_worker_count())
+
+ def test_workers_set_good_user_setting(self):
+ CONF.set_override('osapi_compute_workers', 8)
+ test_service = service.WSGIService("osapi_compute")
+ self.assertEqual(test_service.workers, 8)
+
+ def test_workers_set_zero_user_setting(self):
+ CONF.set_override('osapi_compute_workers', 0)
+ test_service = service.WSGIService("osapi_compute")
+ # If a value less than 1 is used, defaults to number of procs available
+ self.assertEqual(test_service.workers, processutils.get_worker_count())
+
+ def test_service_start_with_illegal_workers(self):
+ CONF.set_override("osapi_compute_workers", -1)
+ self.assertRaises(exception.InvalidInput,
+ service.WSGIService, "osapi_compute")
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_service_random_port_with_ipv6(self):
+ CONF.set_default("test_service_listen", "::1")
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+ self.assertEqual("::1", test_service.host)
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+ def test_reset_pool_size_to_default(self):
+ test_service = service.WSGIService("test_service")
+ test_service.start()
+
+ # Stopping the service, which in turn sets pool size to 0
+ test_service.stop()
+ self.assertEqual(test_service.server._pool.size, 0)
+
+ # Resetting pool size to default
+ test_service.reset()
+ test_service.start()
+ self.assertEqual(test_service.server._pool.size,
+ CONF.wsgi_default_pool_size)
+
+
+class TestLauncher(test.TestCase):
+
+ def setUp(self):
+ super(TestLauncher, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+ self.service = service.WSGIService("test_service")
+
+ def test_launch_app(self):
+ service.serve(self.service)
+ self.assertNotEqual(0, self.service.port)
+ service._launcher.stop()
diff --git a/nova/tests/test_test.py b/nova/tests/unit/test_test.py
index a0ee2ab809..a0ee2ab809 100644
--- a/nova/tests/test_test.py
+++ b/nova/tests/unit/test_test.py
diff --git a/nova/tests/unit/test_test_utils.py b/nova/tests/unit/test_test_utils.py
new file mode 100644
index 0000000000..8cc87fba65
--- /dev/null
+++ b/nova/tests/unit/test_test_utils.py
@@ -0,0 +1,70 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+import tempfile
+
+import fixtures
+
+from nova import db
+from nova import test
+from nova.tests.unit import utils as test_utils
+
+
+class TestUtilsTestCase(test.TestCase):
+ def test_get_test_admin_context(self):
+ # get_test_admin_context's return value behaves like admin context.
+ ctxt = test_utils.get_test_admin_context()
+
+ # TODO(soren): This should verify the full interface context
+ # objects expose.
+ self.assertTrue(ctxt.is_admin)
+
+ def test_get_test_instance(self):
+ # get_test_instance's return value looks like an instance_ref.
+ instance_ref = test_utils.get_test_instance()
+ ctxt = test_utils.get_test_admin_context()
+ db.instance_get(ctxt, instance_ref['id'])
+
+ def _test_get_test_network_info(self):
+ """Does the return value match a real network_info structure."""
+ # The challenge here is to define what exactly such a structure
+ # must look like.
+ pass
+
+ def test_ipv6_supported(self):
+ self.assertIn(test_utils.is_ipv6_supported(), (False, True))
+
+ def fake_open(path):
+ raise IOError
+
+ def fake_socket_fail(x, y):
+ e = socket.error()
+ e.errno = errno.EAFNOSUPPORT
+ raise e
+
+ def fake_socket_ok(x, y):
+ return tempfile.TemporaryFile()
+
+ with fixtures.MonkeyPatch('socket.socket', fake_socket_fail):
+ self.assertFalse(test_utils.is_ipv6_supported())
+
+ with fixtures.MonkeyPatch('socket.socket', fake_socket_ok):
+ with fixtures.MonkeyPatch('sys.platform', 'windows'):
+ self.assertTrue(test_utils.is_ipv6_supported())
+
+ with fixtures.MonkeyPatch('sys.platform', 'linux2'):
+ with fixtures.MonkeyPatch('__builtin__.open', fake_open):
+ self.assertFalse(test_utils.is_ipv6_supported())
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
new file mode 100644
index 0000000000..8c26a38998
--- /dev/null
+++ b/nova/tests/unit/test_utils.py
@@ -0,0 +1,981 @@
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import datetime
+import functools
+import hashlib
+import importlib
+import os
+import os.path
+import StringIO
+import tempfile
+
+import mox
+import netaddr
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+import nova
+from nova import exception
+from nova import test
+from nova import utils
+
+CONF = cfg.CONF
+
+
+class GetMyIP4AddressTestCase(test.NoDBTestCase):
+ def test_get_my_ipv4_address_with_no_ipv4(self):
+ response = """172.16.0.0/16 via 172.16.251.13 dev tun1
+172.16.251.1 via 172.16.251.13 dev tun1
+172.16.251.13 dev tun1 proto kernel scope link src 172.16.251.14
+172.24.0.0/16 via 172.16.251.13 dev tun1
+192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1"""
+
+ def fake_execute(*args, **kwargs):
+ return response, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_bad_process(self):
+ def fake_execute(*args, **kwargs):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '127.0.0.1')
+
+ def test_get_my_ipv4_address_with_single_interface(self):
+ response_route = """default via 192.168.1.1 dev wlan0 proto static
+192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.137 metric 9
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+3: wlan0 inet 192.168.1.137/24 brd 192.168.1.255 scope global wlan0
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '192.168.1.137')
+
+ def test_get_my_ipv4_address_with_multi_ipv4_on_single_interface(self):
+ response_route = """
+172.18.56.0/24 dev customer proto kernel scope link src 172.18.56.22
+169.254.0.0/16 dev customer scope link metric 1031
+default via 172.18.56.1 dev customer
+"""
+ response_addr = (""
+"31: customer inet 172.18.56.22/24 brd 172.18.56.255 scope global"
+" customer\n"
+"31: customer inet 172.18.56.32/24 brd 172.18.56.255 scope global "
+"secondary customer")
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.18.56.22')
+
+ def test_get_my_ipv4_address_with_multiple_interfaces(self):
+ response_route = """
+169.1.9.0/24 dev eth1 proto kernel scope link src 169.1.9.10
+172.17.248.0/21 dev eth0 proto kernel scope link src 172.17.255.9
+169.254.0.0/16 dev eth0 scope link metric 1002
+169.254.0.0/16 dev eth1 scope link metric 1003
+default via 172.17.248.1 dev eth0 proto static
+"""
+ response_addr = """
+1: lo inet 127.0.0.1/8 scope host lo
+2: eth0 inet 172.17.255.9/21 brd 172.17.255.255 scope global eth0
+3: eth1 inet 169.1.9.10/24 scope global eth1
+"""
+
+ def fake_execute(*args, **kwargs):
+ if 'route' in args:
+ return response_route, None
+ return response_addr, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ address = utils.get_my_ipv4_address()
+ self.assertEqual(address, '172.17.255.9')
+
+
+class GenericUtilsTestCase(test.NoDBTestCase):
+ def test_parse_server_string(self):
+ result = utils.parse_server_string('::1')
+ self.assertEqual(('::1', ''), result)
+ result = utils.parse_server_string('[::1]:8773')
+ self.assertEqual(('::1', '8773'), result)
+ result = utils.parse_server_string('2001:db8::192.168.1.1')
+ self.assertEqual(('2001:db8::192.168.1.1', ''), result)
+ result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
+ self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
+ result = utils.parse_server_string('192.168.1.1')
+ self.assertEqual(('192.168.1.1', ''), result)
+ result = utils.parse_server_string('192.168.1.2:8773')
+ self.assertEqual(('192.168.1.2', '8773'), result)
+ result = utils.parse_server_string('192.168.1.3')
+ self.assertEqual(('192.168.1.3', ''), result)
+ result = utils.parse_server_string('www.example.com:8443')
+ self.assertEqual(('www.example.com', '8443'), result)
+ result = utils.parse_server_string('www.example.com')
+ self.assertEqual(('www.example.com', ''), result)
+ # error case
+ result = utils.parse_server_string('www.exa:mple.com:8443')
+ self.assertEqual(('', ''), result)
+
+ def test_hostname_unicode_sanitization(self):
+ hostname = u"\u7684.test.example.com"
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_periods(self):
+ hostname = "....test.example.com..."
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_dashes(self):
+ hostname = "----test.example.com---"
+ self.assertEqual("test.example.com",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_sanitize_characters(self):
+ hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
+ self.assertEqual("91----test-host.example.com-0",
+ utils.sanitize_hostname(hostname))
+
+ def test_hostname_translate(self):
+ hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
+ self.assertEqual("hello", utils.sanitize_hostname(hostname))
+
+ def test_read_cached_file(self):
+ self.mox.StubOutWithMock(os.path, "getmtime")
+ os.path.getmtime(mox.IgnoreArg()).AndReturn(1)
+ self.mox.ReplayAll()
+
+ cache_data = {"data": 1123, "mtime": 1}
+ data = utils.read_cached_file("/this/is/a/fake", cache_data)
+ self.assertEqual(cache_data["data"], data)
+
+ def test_read_modified_cached_file(self):
+ self.mox.StubOutWithMock(os.path, "getmtime")
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ os.path.getmtime(mox.IgnoreArg()).AndReturn(2)
+
+ fake_contents = "lorem ipsum"
+ fake_file = self.mox.CreateMockAnything()
+ fake_file.read().AndReturn(fake_contents)
+ fake_context_manager = self.mox.CreateMockAnything()
+ fake_context_manager.__enter__().AndReturn(fake_file)
+ fake_context_manager.__exit__(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ __builtin__.open(mox.IgnoreArg()).AndReturn(fake_context_manager)
+
+ self.mox.ReplayAll()
+ cache_data = {"data": 1123, "mtime": 1}
+ self.reload_called = False
+
+ def test_reload(reloaded_data):
+ self.assertEqual(reloaded_data, fake_contents)
+ self.reload_called = True
+
+ data = utils.read_cached_file("/this/is/a/fake", cache_data,
+ reload_func=test_reload)
+ self.assertEqual(data, fake_contents)
+ self.assertTrue(self.reload_called)
+
+ def test_generate_password(self):
+ password = utils.generate_password()
+ self.assertTrue([c for c in password if c in '0123456789'])
+ self.assertTrue([c for c in password
+ if c in 'abcdefghijklmnopqrstuvwxyz'])
+ self.assertTrue([c for c in password
+ if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
+
+ def test_read_file_as_root(self):
+ def fake_execute(*args, **kwargs):
+ if args[1] == 'bad':
+ raise processutils.ProcessExecutionError()
+ return 'fakecontents', None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ contents = utils.read_file_as_root('good')
+ self.assertEqual(contents, 'fakecontents')
+ self.assertRaises(exception.FileNotFound,
+ utils.read_file_as_root, 'bad')
+
+ def test_temporary_chown(self):
+ def fake_execute(*args, **kwargs):
+ if args[0] == 'chown':
+ fake_execute.uid = args[1]
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ with tempfile.NamedTemporaryFile() as f:
+ with utils.temporary_chown(f.name, owner_uid=2):
+ self.assertEqual(fake_execute.uid, 2)
+ self.assertEqual(fake_execute.uid, os.getuid())
+
+ def test_xhtml_escape(self):
+ self.assertEqual('&quot;foo&quot;', utils.xhtml_escape('"foo"'))
+ self.assertEqual('&apos;foo&apos;', utils.xhtml_escape("'foo'"))
+ self.assertEqual('&amp;', utils.xhtml_escape('&'))
+ self.assertEqual('&gt;', utils.xhtml_escape('>'))
+ self.assertEqual('&lt;', utils.xhtml_escape('<'))
+ self.assertEqual('&lt;foo&gt;', utils.xhtml_escape('<foo>'))
+
+ def test_is_valid_ipv4(self):
+ self.assertTrue(utils.is_valid_ipv4('127.0.0.1'))
+ self.assertFalse(utils.is_valid_ipv4('::1'))
+ self.assertFalse(utils.is_valid_ipv4('bacon'))
+ self.assertFalse(utils.is_valid_ipv4(""))
+ self.assertFalse(utils.is_valid_ipv4(10))
+
+ def test_is_valid_ipv6(self):
+ self.assertTrue(utils.is_valid_ipv6("::1"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertTrue(utils.is_valid_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6("foo"))
+ self.assertFalse(utils.is_valid_ipv6("127.0.0.1"))
+ self.assertFalse(utils.is_valid_ipv6(""))
+ self.assertFalse(utils.is_valid_ipv6(10))
+
+ def test_is_valid_ipv6_cidr(self):
+ self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001/32"))
+ self.assertTrue(utils.is_valid_ipv6_cidr(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
+ self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
+
+ def test_get_shortened_ipv6(self):
+ self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
+ utils.get_shortened_ipv6(
+ "abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
+ self.assertEqual("::1", utils.get_shortened_ipv6(
+ "0000:0000:0000:0000:0000:0000:0000:0001"))
+ self.assertEqual("caca::caca:0:babe:201:102",
+ utils.get_shortened_ipv6(
+ "caca:0000:0000:caca:0000:babe:0201:0102"))
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
+ "failure")
+
+ def test_get_shortened_ipv6_cidr(self):
+ self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600:0000:0000:0000:0000:0000:0000:0000/64"))
+ self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
+ "2600::1/64"))
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "127.0.0.1")
+ self.assertRaises(netaddr.AddrFormatError,
+ utils.get_shortened_ipv6_cidr,
+ "failure")
+
+ def test_get_hash_str(self):
+ base_str = "foo"
+ value = hashlib.md5(base_str).hexdigest()
+ self.assertEqual(
+ value, utils.get_hash_str(base_str))
+
+
+class MonkeyPatchTestCase(test.NoDBTestCase):
+ """Unit test for utils.monkey_patch()."""
+ def setUp(self):
+ super(MonkeyPatchTestCase, self).setUp()
+ self.example_package = 'nova.tests.unit.monkey_patch_example.'
+ self.flags(
+ monkey_patch=True,
+ monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ + self.example_package + 'example_decorator'])
+
+ def test_monkey_patch(self):
+ utils.monkey_patch()
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
+ from nova.tests.unit.monkey_patch_example import example_a
+ from nova.tests.unit.monkey_patch_example import example_b
+
+ self.assertEqual('Example function', example_a.example_function_a())
+ exampleA = example_a.ExampleClassA()
+ exampleA.example_method()
+ ret_a = exampleA.example_method_add(3, 5)
+ self.assertEqual(ret_a, 8)
+
+ self.assertEqual('Example function', example_b.example_function_b())
+ exampleB = example_b.ExampleClassB()
+ exampleB.example_method()
+ ret_b = exampleB.example_method_add(3, 5)
+
+ self.assertEqual(ret_b, 8)
+ package_a = self.example_package + 'example_a.'
+ self.assertIn(package_a + 'example_function_a',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+
+ self.assertIn(package_a + 'ExampleClassA.example_method',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertIn(package_a + 'ExampleClassA.example_method_add',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ package_b = self.example_package + 'example_b.'
+ self.assertNotIn(package_b + 'example_function_b',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertNotIn(package_b + 'ExampleClassB.example_method',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+ self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
+ nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
+
+
+class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
+ """Unit test for default monkey_patch_modules value."""
+
+ def setUp(self):
+ super(MonkeyPatchDefaultTestCase, self).setUp()
+ self.flags(
+ monkey_patch=True)
+
+ def test_monkey_patch_default_mod(self):
+ # monkey_patch_modules is defined to be
+ # <module_to_patch>:<decorator_to_patch_with>
+ # Here we check that both parts of the default values are
+ # valid
+ for module in CONF.monkey_patch_modules:
+ m = module.split(':', 1)
+ # Check we can import the module to be patched
+ importlib.import_module(m[0])
+ # check the decorator is valid
+ decorator_name = m[1].rsplit('.', 1)
+ decorator_module = importlib.import_module(decorator_name[0])
+ getattr(decorator_module, decorator_name[1])
+
+
+class AuditPeriodTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(AuditPeriodTest, self).setUp()
+ # a fairly random time to test with
+ self.test_time = datetime.datetime(second=23,
+ minute=12,
+ hour=8,
+ day=5,
+ month=3,
+ year=2012)
+ timeutils.set_time_override(override_time=self.test_time)
+
+ def tearDown(self):
+ timeutils.clear_time_override()
+ super(AuditPeriodTest, self).tearDown()
+
+ def test_hour(self):
+ begin, end = utils.last_completed_audit_period(unit='hour')
+ self.assertEqual(begin, datetime.datetime(
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=8,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_hour_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='hour@10')
+ self.assertEqual(begin, datetime.datetime(
+ minute=10,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ minute=10,
+ hour=8,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_hour_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='hour@30')
+ self.assertEqual(begin, datetime.datetime(
+ minute=30,
+ hour=6,
+ day=5,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ minute=30,
+ hour=7,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day(self):
+ begin, end = utils.last_completed_audit_period(unit='day')
+ self.assertEqual(begin, datetime.datetime(
+ day=4,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='day@6')
+ self.assertEqual(begin, datetime.datetime(
+ hour=6,
+ day=4,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=6,
+ day=5,
+ month=3,
+ year=2012))
+
+ def test_day_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='day@10')
+ self.assertEqual(begin, datetime.datetime(
+ hour=10,
+ day=3,
+ month=3,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ hour=10,
+ day=4,
+ month=3,
+ year=2012))
+
+ def test_month(self):
+ begin, end = utils.last_completed_audit_period(unit='month')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=2,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=3,
+ year=2012))
+
+ def test_month_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='month@2')
+ self.assertEqual(begin, datetime.datetime(
+ day=2,
+ month=2,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=2,
+ month=3,
+ year=2012))
+
+ def test_month_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='month@15')
+ self.assertEqual(begin, datetime.datetime(
+ day=15,
+ month=1,
+ year=2012))
+ self.assertEqual(end, datetime.datetime(
+ day=15,
+ month=2,
+ year=2012))
+
+ def test_year(self):
+ begin, end = utils.last_completed_audit_period(unit='year')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=1,
+ year=2011))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=1,
+ year=2012))
+
+ def test_year_with_offset_before_current(self):
+ begin, end = utils.last_completed_audit_period(unit='year@2')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=2,
+ year=2011))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=2,
+ year=2012))
+
+ def test_year_with_offset_after_current(self):
+ begin, end = utils.last_completed_audit_period(unit='year@6')
+ self.assertEqual(begin, datetime.datetime(
+ day=1,
+ month=6,
+ year=2010))
+ self.assertEqual(end, datetime.datetime(
+ day=1,
+ month=6,
+ year=2011))
+
+
+class MkfsTestCase(test.NoDBTestCase):
+
+ def test_mkfs(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
+ run_as_root=False)
+ utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
+ run_as_root=False)
+ utils.execute('mkswap', '/my/swap/block/dev',
+ run_as_root=False)
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev')
+ utils.mkfs('msdos', '/my/msdos/block/dev')
+ utils.mkfs('swap', '/my/swap/block/dev')
+
+ def test_mkfs_with_label(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F',
+ '-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
+ utils.execute('mkfs', '-t', 'msdos',
+ '-n', 'msdos-vol', '/my/msdos/block/dev',
+ run_as_root=False)
+ utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev',
+ run_as_root=False)
+ self.mox.ReplayAll()
+
+ utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
+ utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
+ utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
+
+
+class LastBytesTestCase(test.NoDBTestCase):
+ """Test the last_bytes() utility method."""
+
+ def setUp(self):
+ super(LastBytesTestCase, self).setUp()
+ self.f = StringIO.StringIO('1234567890')
+
+ def test_truncated(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 5)
+ self.assertEqual(out, '67890')
+ self.assertTrue(remaining > 0)
+
+ def test_read_all(self):
+ self.f.seek(0, os.SEEK_SET)
+ out, remaining = utils.last_bytes(self.f, 1000)
+ self.assertEqual(out, '1234567890')
+ self.assertFalse(remaining > 0)
+
+ def test_seek_too_far_real_file(self):
+ # StringIO doesn't raise IOError if you see past the start of the file.
+ flo = tempfile.TemporaryFile()
+ content = '1234567890'
+ flo.write(content)
+ self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
+
+
+class IntLikeTestCase(test.NoDBTestCase):
+
+ def test_is_int_like(self):
+ self.assertTrue(utils.is_int_like(1))
+ self.assertTrue(utils.is_int_like("1"))
+ self.assertTrue(utils.is_int_like("514"))
+ self.assertTrue(utils.is_int_like("0"))
+
+ self.assertFalse(utils.is_int_like(1.1))
+ self.assertFalse(utils.is_int_like("1.1"))
+ self.assertFalse(utils.is_int_like("1.1.1"))
+ self.assertFalse(utils.is_int_like(None))
+ self.assertFalse(utils.is_int_like("0."))
+ self.assertFalse(utils.is_int_like("aaaaaa"))
+ self.assertFalse(utils.is_int_like("...."))
+ self.assertFalse(utils.is_int_like("1g"))
+ self.assertFalse(
+ utils.is_int_like("0cc3346e-9fef-4445-abe6-5d2b2690ec64"))
+ self.assertFalse(utils.is_int_like("a1"))
+
+
+class MetadataToDictTestCase(test.NoDBTestCase):
+ def test_metadata_to_dict(self):
+ self.assertEqual(utils.metadata_to_dict(
+ [{'key': 'foo1', 'value': 'bar'},
+ {'key': 'foo2', 'value': 'baz'}]),
+ {'foo1': 'bar', 'foo2': 'baz'})
+
+ def test_metadata_to_dict_empty(self):
+ self.assertEqual(utils.metadata_to_dict([]), {})
+
+ def test_dict_to_metadata(self):
+ expected = [{'key': 'foo1', 'value': 'bar1'},
+ {'key': 'foo2', 'value': 'bar2'}]
+ self.assertEqual(utils.dict_to_metadata(dict(foo1='bar1',
+ foo2='bar2')),
+ expected)
+
+ def test_dict_to_metadata_empty(self):
+ self.assertEqual(utils.dict_to_metadata({}), [])
+
+
+class WrappedCodeTestCase(test.NoDBTestCase):
+ """Test the get_wrapped_function utility method."""
+
+ def _wrapper(self, function):
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ function(self, *args, **kwargs)
+ return decorated_function
+
+ def test_single_wrapped(self):
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+ def test_double_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+ def test_triple_wrapped(self):
+ @self._wrapper
+ @self._wrapper
+ @self._wrapper
+ def wrapped(self, instance, red=None, blue=None):
+ pass
+
+ func = utils.get_wrapped_function(wrapped)
+ func_code = func.func_code
+ self.assertEqual(4, len(func_code.co_varnames))
+ self.assertIn('self', func_code.co_varnames)
+ self.assertIn('instance', func_code.co_varnames)
+ self.assertIn('red', func_code.co_varnames)
+ self.assertIn('blue', func_code.co_varnames)
+
+
+class ExpectedArgsTestCase(test.NoDBTestCase):
+ def test_passes(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ @dec
+ def func(foo, bar, baz="lol"):
+ pass
+
+ def test_raises(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ def func(bar, baz):
+ pass
+
+ self.assertRaises(TypeError, dec, func)
+
+ def test_var_no_of_args(self):
+ @utils.expects_func_args('foo')
+ def dec(f):
+ return f
+
+ @dec
+ def func(bar, *args, **kwargs):
+ pass
+
+ def test_more_layers(self):
+ @utils.expects_func_args('foo', 'baz')
+ def dec(f):
+ return f
+
+ def dec_2(f):
+ def inner_f(*a, **k):
+ return f()
+ return inner_f
+
+ @dec_2
+ def func(bar, baz):
+ pass
+
+ self.assertRaises(TypeError, dec, func)
+
+
+class StringLengthTestCase(test.NoDBTestCase):
+ def test_check_string_length(self):
+ self.assertIsNone(utils.check_string_length(
+ 'test', 'name', max_length=255))
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 11, 'name', max_length=255)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ '', 'name', min_length=1)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 'a' * 256, 'name', max_length=255)
+
+ def test_check_string_length_noname(self):
+ self.assertIsNone(utils.check_string_length(
+ 'test', max_length=255))
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 11, max_length=255)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ '', min_length=1)
+ self.assertRaises(exception.InvalidInput,
+ utils.check_string_length,
+ 'a' * 256, max_length=255)
+
+
+class ValidateIntegerTestCase(test.NoDBTestCase):
+ def test_valid_inputs(self):
+ self.assertEqual(
+ utils.validate_integer(42, "answer"), 42)
+ self.assertEqual(
+ utils.validate_integer("42", "answer"), 42)
+ self.assertEqual(
+ utils.validate_integer(
+ "7", "lucky", min_value=7, max_value=8), 7)
+ self.assertEqual(
+ utils.validate_integer(
+ 7, "lucky", min_value=6, max_value=7), 7)
+ self.assertEqual(
+ utils.validate_integer(
+ 300, "Spartaaa!!!", min_value=300), 300)
+ self.assertEqual(
+ utils.validate_integer(
+ "300", "Spartaaa!!!", max_value=300), 300)
+
+ def test_invalid_inputs(self):
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ "im-not-an-int", "not-an-int")
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ 3.14, "Pie")
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ "299", "Sparta no-show",
+ min_value=300, max_value=300)
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ 55, "doing 55 in a 54",
+ max_value=54)
+ self.assertRaises(exception.InvalidInput,
+ utils.validate_integer,
+ unichr(129), "UnicodeError",
+ max_value=1000)
+
+
+class ValidateNeutronConfiguration(test.NoDBTestCase):
+ def test_nova_network(self):
+ self.assertFalse(utils.is_neutron())
+
+ def test_neutron(self):
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.assertTrue(utils.is_neutron())
+
+ def test_quantum(self):
+ self.flags(network_api_class='nova.network.quantumv2.api.API')
+ self.assertTrue(utils.is_neutron())
+
+
+class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
+ def test_is_auto_disk_config_disabled(self):
+ self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
+
+ def test_is_auto_disk_config_disabled_none(self):
+ self.assertFalse(utils.is_auto_disk_config_disabled(None))
+
+ def test_is_auto_disk_config_disabled_false(self):
+ self.assertFalse(utils.is_auto_disk_config_disabled("false"))
+
+
+class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
+ def get_image(self):
+ image_meta = {
+ "id": "fake-image",
+ "name": "fake-name",
+ "min_ram": 1,
+ "min_disk": 1,
+ "disk_format": "raw",
+ "container_format": "bare",
+ }
+
+ return image_meta
+
+ def get_flavor(self):
+ flavor = {
+ "id": "fake.flavor",
+ "root_gb": 10,
+ }
+
+ return flavor
+
+ def test_base_image_properties(self):
+ image = self.get_image()
+
+ # Verify that we inherit all the needed keys
+ sys_meta = utils.get_system_metadata_from_image(image)
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image[key], sys_meta.get(sys_key))
+
+ # Verify that everything else is ignored
+ self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
+
+ def test_inherit_image_properties(self):
+ image = self.get_image()
+ image["properties"] = {"foo1": "bar", "foo2": "baz"}
+
+ sys_meta = utils.get_system_metadata_from_image(image)
+
+ # Verify that we inherit all the image properties
+ for key, expected in image["properties"].iteritems():
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(sys_meta[sys_key], expected)
+
+ def test_vhd_min_disk_image(self):
+ image = self.get_image()
+ flavor = self.get_flavor()
+
+ image["disk_format"] = "vhd"
+
+ sys_meta = utils.get_system_metadata_from_image(image, flavor)
+
+ # Verify that the min_disk property is taken from
+ # flavor's root_gb when using vhd disk format
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
+ self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
+
+ def test_dont_inherit_empty_values(self):
+ image = self.get_image()
+
+ for key in utils.SM_INHERITABLE_KEYS:
+ image[key] = None
+
+ sys_meta = utils.get_system_metadata_from_image(image)
+
+ # Verify that the empty properties have not been inherited
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertNotIn(sys_key, sys_meta)
+
+
+class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
+ def get_system_metadata(self):
+ sys_meta = {
+ "image_min_ram": 1,
+ "image_min_disk": 1,
+ "image_disk_format": "raw",
+ "image_container_format": "bare",
+ }
+
+ return sys_meta
+
+ def test_image_from_system_metadata(self):
+ sys_meta = self.get_system_metadata()
+ sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
+ sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that we inherit all the needed keys
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image[key], sys_meta.get(sys_key))
+
+ # Verify that we inherit the rest of metadata as properties
+ self.assertIn("properties", image)
+
+ for key, value in image["properties"].iteritems():
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ self.assertEqual(image["properties"][key], sys_meta[sys_key])
+
+ def test_dont_inherit_empty_values(self):
+ sys_meta = self.get_system_metadata()
+
+ for key in utils.SM_INHERITABLE_KEYS:
+ sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
+ sys_meta[sys_key] = None
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that the empty properties have not been inherited
+ for key in utils.SM_INHERITABLE_KEYS:
+ self.assertNotIn(key, image)
+
+ def test_non_inheritable_image_properties(self):
+ sys_meta = self.get_system_metadata()
+ sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
+
+ self.flags(non_inheritable_image_properties=["foo1"])
+
+ image = utils.get_image_from_system_metadata(sys_meta)
+
+ # Verify that the foo1 key has not been inherited
+ self.assertNotIn("foo1", image)
+
+
+class VersionTestCase(test.NoDBTestCase):
+ def test_convert_version_to_int(self):
+ self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
+ self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
+ self.assertEqual(utils.convert_version_to_int((5, )), 5)
+ self.assertRaises(exception.NovaException,
+ utils.convert_version_to_int, '5a.6b')
+
+ def test_convert_version_to_string(self):
+ self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
+ self.assertEqual(utils.convert_version_to_str(4), '4')
+
+ def test_convert_version_to_tuple(self):
+ self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
+
+
+class ConstantTimeCompareTestCase(test.NoDBTestCase):
+ def test_constant_time_compare(self):
+ self.assertTrue(utils.constant_time_compare("abcd1234", "abcd1234"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "a"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "ABCD234"))
diff --git a/nova/tests/test_versions.py b/nova/tests/unit/test_versions.py
index 06baca8b05..06baca8b05 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/unit/test_versions.py
diff --git a/nova/tests/test_weights.py b/nova/tests/unit/test_weights.py
index d6804037a7..d6804037a7 100644
--- a/nova/tests/test_weights.py
+++ b/nova/tests/unit/test_weights.py
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
new file mode 100644
index 0000000000..0a08a7651f
--- /dev/null
+++ b/nova/tests/unit/test_wsgi.py
@@ -0,0 +1,263 @@
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for `nova.wsgi`."""
+
+import os.path
+import tempfile
+import urllib2
+
+import eventlet
+import eventlet.wsgi
+import mock
+from oslo.config import cfg
+import requests
+import testtools
+import webob
+
+import nova.exception
+from nova import test
+from nova.tests.unit import utils
+import nova.wsgi
+
+SSL_CERT_DIR = os.path.normpath(os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ 'ssl_cert'))
+CONF = cfg.CONF
+
+
+class TestLoaderNothingExists(test.NoDBTestCase):
+ """Loader tests where os.path.exists always returns False."""
+
+ def setUp(self):
+ super(TestLoaderNothingExists, self).setUp()
+ self.stubs.Set(os.path, 'exists', lambda _: False)
+
+ def test_relpath_config_not_found(self):
+ self.flags(api_paste_config='api-paste.ini')
+ self.assertRaises(
+ nova.exception.ConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+ def test_asbpath_config_not_found(self):
+ self.flags(api_paste_config='/etc/nova/api-paste.ini')
+ self.assertRaises(
+ nova.exception.ConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+
+class TestLoaderNormalFilesystem(test.NoDBTestCase):
+ """Loader tests with normal filesystem (unmodified os.path module)."""
+
+ _paste_config = """
+[app:test_app]
+use = egg:Paste#static
+document_root = /tmp
+ """
+
+ def setUp(self):
+ super(TestLoaderNormalFilesystem, self).setUp()
+ self.config = tempfile.NamedTemporaryFile(mode="w+t")
+ self.config.write(self._paste_config.lstrip())
+ self.config.seek(0)
+ self.config.flush()
+ self.loader = nova.wsgi.Loader(self.config.name)
+
+ def test_config_found(self):
+ self.assertEqual(self.config.name, self.loader.config_path)
+
+ def test_app_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteAppNotFound,
+ self.loader.load_app,
+ "nonexistent app",
+ )
+
+ def test_app_found(self):
+ url_parser = self.loader.load_app("test_app")
+ self.assertEqual("/tmp", url_parser.directory)
+
+ def tearDown(self):
+ self.config.close()
+ super(TestLoaderNormalFilesystem, self).tearDown()
+
+
+class TestWSGIServer(test.NoDBTestCase):
+ """WSGI server tests."""
+
+ def test_no_app(self):
+ server = nova.wsgi.Server("test_app", None)
+ self.assertEqual("test_app", server.name)
+
+ def test_custom_max_header_line(self):
+ self.flags(max_header_line=4096) # Default value is 16384.
+ nova.wsgi.Server("test_custom_max_header_line", None)
+ self.assertEqual(CONF.max_header_line, eventlet.wsgi.MAX_HEADER_LINE)
+
+ def test_start_random_port(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="127.0.0.1", port=0)
+ server.start()
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_start_random_port_with_ipv6(self):
+ server = nova.wsgi.Server("test_random_port", None,
+ host="::1", port=0)
+ server.start()
+ self.assertEqual("::1", server.host)
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
+
+ def test_server_pool_waitall(self):
+ # test pools waitall method gets called while stopping server
+ server = nova.wsgi.Server("test_server", None,
+ host="127.0.0.1", port=4444)
+ server.start()
+ with mock.patch.object(server._pool,
+ 'waitall') as mock_waitall:
+ server.stop()
+ server.wait()
+ mock_waitall.assert_called_once_with()
+
+ def test_uri_length_limit(self):
+ server = nova.wsgi.Server("test_uri_length_limit", None,
+ host="127.0.0.1", max_url_len=16384)
+ server.start()
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 10000 * 'x')
+ resp = requests.get(uri, proxies={"http": ""})
+ eventlet.sleep(0)
+ self.assertNotEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
+
+ uri = "http://127.0.0.1:%d/%s" % (server.port, 20000 * 'x')
+ resp = requests.get(uri, proxies={"http": ""})
+ eventlet.sleep(0)
+ self.assertEqual(resp.status_code,
+ requests.codes.REQUEST_URI_TOO_LARGE)
+ server.stop()
+ server.wait()
+
+ def test_reset_pool_size_to_default(self):
+ server = nova.wsgi.Server("test_resize", None,
+ host="127.0.0.1", max_url_len=16384)
+ server.start()
+
+ # Stopping the server, which in turn sets pool size to 0
+ server.stop()
+ self.assertEqual(server._pool.size, 0)
+
+ # Resetting pool size to default
+ server.reset()
+ server.start()
+ self.assertEqual(server._pool.size, CONF.wsgi_default_pool_size)
+
+
+class TestWSGIServerWithSSL(test.NoDBTestCase):
+ """WSGI server with SSL tests."""
+
+ def setUp(self):
+ super(TestWSGIServerWithSSL, self).setUp()
+ self.flags(enabled_ssl_apis=['fake_ssl'],
+ ssl_cert_file=os.path.join(SSL_CERT_DIR, 'certificate.crt'),
+ ssl_key_file=os.path.join(SSL_CERT_DIR, 'privatekey.key'))
+
+ def test_ssl_server(self):
+
+ def test_app(env, start_response):
+ start_response('200 OK', {})
+ return ['PONG']
+
+ fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
+ host="127.0.0.1", port=0,
+ use_ssl=True)
+ fake_ssl_server.start()
+ self.assertNotEqual(0, fake_ssl_server.port)
+
+ cli = eventlet.connect(("localhost", fake_ssl_server.port))
+ cli = eventlet.wrap_ssl(cli,
+ ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
+
+ cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.read(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ fake_ssl_server.stop()
+ fake_ssl_server.wait()
+
+ def test_two_servers(self):
+
+ def test_app(env, start_response):
+ start_response('200 OK', {})
+ return ['PONG']
+
+ fake_ssl_server = nova.wsgi.Server("fake_ssl", test_app,
+ host="127.0.0.1", port=0, use_ssl=True)
+ fake_ssl_server.start()
+ self.assertNotEqual(0, fake_ssl_server.port)
+
+ fake_server = nova.wsgi.Server("fake", test_app,
+ host="127.0.0.1", port=0)
+ fake_server.start()
+ self.assertNotEqual(0, fake_server.port)
+
+ cli = eventlet.connect(("localhost", fake_ssl_server.port))
+ cli = eventlet.wrap_ssl(cli,
+ ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
+
+ cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.read(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ cli = eventlet.connect(("localhost", fake_server.port))
+
+ cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
+ 'Connection: close\r\nContent-length:4\r\n\r\nPING')
+ response = cli.recv(8192)
+ self.assertEqual(response[-4:], "PONG")
+
+ fake_ssl_server.stop()
+ fake_ssl_server.wait()
+
+ @testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
+ def test_app_using_ipv6_and_ssl(self):
+ greetings = 'Hello, World!!!'
+
+ @webob.dec.wsgify
+ def hello_world(req):
+ return greetings
+
+ server = nova.wsgi.Server("fake_ssl",
+ hello_world,
+ host="::1",
+ port=0,
+ use_ssl=True)
+
+ server.start()
+
+ response = urllib2.urlopen('https://[::1]:%d/' % server.port)
+ self.assertEqual(greetings, response.read())
+
+ server.stop()
+ server.wait()
diff --git a/nova/tests/utils.py b/nova/tests/unit/utils.py
index 58d0825587..58d0825587 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/unit/utils.py
diff --git a/nova/tests/virt/__init__.py b/nova/tests/unit/virt/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/__init__.py
+++ b/nova/tests/unit/virt/__init__.py
diff --git a/nova/tests/virt/disk/__init__.py b/nova/tests/unit/virt/disk/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/disk/__init__.py
+++ b/nova/tests/unit/virt/disk/__init__.py
diff --git a/nova/tests/virt/disk/mount/__init__.py b/nova/tests/unit/virt/disk/mount/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/disk/mount/__init__.py
+++ b/nova/tests/unit/virt/disk/mount/__init__.py
diff --git a/nova/tests/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
index 6375c9386b..6375c9386b 100644
--- a/nova/tests/virt/disk/mount/test_loop.py
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
diff --git a/nova/tests/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
index d048511d16..d048511d16 100644
--- a/nova/tests/virt/disk/mount/test_nbd.py
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
diff --git a/nova/tests/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 1f62c33b51..1f62c33b51 100644
--- a/nova/tests/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
diff --git a/nova/tests/unit/virt/disk/test_inject.py b/nova/tests/unit/virt/disk/test_inject.py
new file mode 100644
index 0000000000..97c8a08013
--- /dev/null
+++ b/nova/tests/unit/virt/disk/test_inject.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk import api as diskapi
+from nova.virt.disk.vfs import guestfs as vfsguestfs
+
+
+class VirtDiskTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsguestfs.guestfs = fakeguestfs
+
+ def test_inject_data(self):
+
+ self.assertTrue(diskapi.inject_data("/some/file", use_cow=True))
+
+ self.assertTrue(diskapi.inject_data("/some/file",
+ mandatory=('files',)))
+
+ self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey",
+ mandatory=('key',)))
+
+ os_name = os.name
+ os.name = 'nt' # Cause password injection to fail
+ self.assertRaises(exception.NovaException,
+ diskapi.inject_data,
+ "/some/file", admin_password="p",
+ mandatory=('admin_password',))
+ self.assertFalse(diskapi.inject_data("/some/file", admin_password="p"))
+ os.name = os_name
+
+ self.assertFalse(diskapi.inject_data("/some/fail/file",
+ key="mysshkey"))
+
+ def test_inject_data_key(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "Hello World#!/bin/sh\n# Added by " +
+ "Nova to ensure injected ssh keys " +
+ "have the right context\nrestorecon " +
+ "-RF root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+
+ self.assertIn("/root/.ssh", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh"],
+ {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
+ self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
+ {'isdir': False,
+ 'content': "Hello World\n# The following ssh " +
+ "key was injected by Nova\nmysshkey\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o600})
+
+ vfs.teardown()
+
+ def test_inject_data_key_with_selinux_append_with_newline(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
+ vfs.make_path("etc/selinux")
+ vfs.make_path("etc/rc.d")
+ diskapi._inject_key_into_fs("mysshkey", vfs)
+
+ self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
+ {'isdir': False,
+ 'content': "#!/bin/sh\necho done\n# Added "
+ "by Nova to ensure injected ssh keys have "
+ "the right context\nrestorecon -RF "
+ "root/.ssh 2>/dev/null || :\n",
+ 'gid': 100,
+ 'uid': 100,
+ 'mode': 0o700})
+ vfs.teardown()
+
+ def test_inject_net(self):
+
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_net_into_fs("mynetconfig", vfs)
+
+ self.assertIn("/etc/network/interfaces", vfs.handle.files)
+ self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
+ {'content': 'mynetconfig',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_metadata(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_metadata_into_fs({"foo": "bar", "eek": "wizz"}, vfs)
+
+ self.assertIn("/meta.js", vfs.handle.files)
+ self.assertEqual({'content': '{"foo": "bar", ' +
+ '"eek": "wizz"}',
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100},
+ vfs.handle.files["/meta.js"])
+ vfs.teardown()
+
+ def test_inject_admin_password(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ def fake_salt():
+ return "1234567890abcdef"
+
+ self.stubs.Set(diskapi, '_generate_salt', fake_salt)
+
+ vfs.handle.write("/etc/shadow",
+ "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n")
+
+ vfs.handle.write("/etc/passwd",
+ "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
+
+ diskapi._inject_admin_password_into_fs("123456", vfs)
+
+ self.assertEqual(vfs.handle.files["/etc/passwd"],
+ {'content': "root:x:0:0:root:/root:/bin/bash\n" +
+ "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
+ "daemon:x:2:2:daemon:/sbin:" +
+ "/sbin/nologin\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ shadow = vfs.handle.files["/etc/shadow"]
+
+ # if the encrypted password is only 13 characters long, then
+ # nova.virt.disk.api:_set_password fell back to DES.
+ if len(shadow['content']) == 91:
+ self.assertEqual(shadow,
+ {'content': "root:12tir.zIbWQ3c" +
+ ":14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ else:
+ self.assertEqual(shadow,
+ {'content': "root:$1$12345678$a4ge4d5iJ5vw" +
+ "vbFS88TEN0:14917:0:99999:7:::\n" +
+ "bin:*:14495:0:99999:7:::\n" +
+ "daemon:*:14495:0:99999:7:::\n",
+ 'gid': 100,
+ 'isdir': False,
+ 'mode': 0o700,
+ 'uid': 100})
+ vfs.teardown()
+
+ def test_inject_files_into_fs(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ diskapi._inject_files_into_fs([("/path/to/not/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/not/exists", vfs.handle.files)
+ shadow_dir = vfs.handle.files["/path/to/not/exists"]
+ self.assertEqual(shadow_dir,
+ {"isdir": True,
+ "gid": 0,
+ "uid": 0,
+ "mode": 0o744})
+
+ shadow_file = vfs.handle.files["/path/to/not/exists/file"]
+ self.assertEqual(shadow_file,
+ {"isdir": False,
+ "content": "inject-file-contents",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700})
+ vfs.teardown()
+
+ def test_inject_files_into_fs_dir_exists(self):
+ vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
+ vfs.setup()
+
+ called = {'make_path': False}
+
+ def fake_has_file(*args, **kwargs):
+ return True
+
+ def fake_make_path(*args, **kwargs):
+ called['make_path'] = True
+
+ self.stubs.Set(vfs, 'has_file', fake_has_file)
+ self.stubs.Set(vfs, 'make_path', fake_make_path)
+
+ # test for already exists dir
+ diskapi._inject_files_into_fs([("/path/to/exists/file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/path/to/exists/file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for root dir
+ diskapi._inject_files_into_fs([("/inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ # test for null dir
+ vfs.handle.files.pop("/inject-file")
+ diskapi._inject_files_into_fs([("inject-file",
+ "inject-file-contents")],
+ vfs)
+
+ self.assertIn("/inject-file", vfs.handle.files)
+ self.assertFalse(called['make_path'])
+
+ vfs.teardown()
diff --git a/nova/tests/virt/disk/vfs/__init__.py b/nova/tests/unit/virt/disk/vfs/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/disk/vfs/__init__.py
+++ b/nova/tests/unit/virt/disk/vfs/__init__.py
diff --git a/nova/tests/virt/disk/vfs/fakeguestfs.py b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
index 5e5efa7a14..5e5efa7a14 100644
--- a/nova/tests/virt/disk/vfs/fakeguestfs.py
+++ b/nova/tests/unit/virt/disk/vfs/fakeguestfs.py
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
new file mode 100644
index 0000000000..33dd100329
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -0,0 +1,264 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.disk.vfs import fakeguestfs
+from nova.virt.disk.vfs import guestfs as vfsimpl
+
+
+class VirtDiskVFSGuestFSTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VirtDiskVFSGuestFSTest, self).setUp()
+ sys.modules['guestfs'] = fakeguestfs
+ vfsimpl.guestfs = fakeguestfs
+
+ def _do_test_appliance_setup_inspect(self, forcetcg):
+ if forcetcg:
+ vfsimpl.force_tcg()
+ else:
+ vfsimpl.force_tcg(False)
+
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ vfs.setup()
+
+ if forcetcg:
+ self.assertEqual("force_tcg", vfs.handle.backend_settings)
+ vfsimpl.force_tcg(False)
+ else:
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(3, len(vfs.handle.mounts))
+ self.assertEqual("/dev/mapper/guestvgf-lv_root",
+ vfs.handle.mounts[0][1])
+ self.assertEqual("/dev/vda1",
+ vfs.handle.mounts[1][1])
+ self.assertEqual("/dev/mapper/guestvgf-lv_home",
+ vfs.handle.mounts[2][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+ self.assertEqual("/boot", vfs.handle.mounts[1][2])
+ self.assertEqual("/home", vfs.handle.mounts[2][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_inspect_auto(self):
+ self._do_test_appliance_setup_inspect(False)
+
+ def test_appliance_setup_inspect_tcg(self):
+ self._do_test_appliance_setup_inspect(True)
+
+ def test_appliance_setup_inspect_no_root_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return []
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_inspect_multi_boots_raises(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=-1)
+ # call setup to init the handle so we can stub it
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+
+ def fake_inspect_os():
+ return ['fake1', 'fake2']
+
+ self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
+ self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
+
+ def test_appliance_setup_static_nopart(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=None)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_appliance_setup_static_part(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
+ imgfmt="qcow2",
+ partition=2)
+ vfs.setup()
+
+ self.assertIsNone(vfs.handle.backend_settings)
+ self.assertTrue(vfs.handle.running)
+ self.assertEqual(1, len(vfs.handle.mounts))
+ self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1])
+ self.assertEqual("/", vfs.handle.mounts[0][2])
+
+ handle = vfs.handle
+ vfs.teardown()
+
+ self.assertIsNone(vfs.handle)
+ self.assertFalse(handle.running)
+ self.assertTrue(handle.closed)
+ self.assertEqual(0, len(handle.mounts))
+
+ def test_makepath(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertIn("/some/dir", vfs.handle.files)
+ self.assertIn("/other/dir", vfs.handle.files)
+ self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
+ self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
+
+ vfs.teardown()
+
+ def test_append_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Hello World Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_replace_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/some/file", vfs.handle.files)
+ self.assertEqual("Goodbye",
+ vfs.handle.files["/some/file"]["content"])
+
+ vfs.teardown()
+
+ def test_read_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertEqual("Hello World", vfs.read_file("/some/file"))
+
+ vfs.teardown()
+
+ def test_has_file(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ vfs.teardown()
+
+ def test_set_permissions(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.set_permissions("/some/file", 0o7777)
+ self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"])
+
+ vfs.teardown()
+
+ def test_set_ownership(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ vfs.read_file("/some/file")
+
+ self.assertEqual(100, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(500, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(110, vfs.handle.files["/some/file"]["uid"])
+ self.assertEqual(600, vfs.handle.files["/some/file"]["gid"])
+
+ vfs.teardown()
+
+ def test_close_on_error(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['close_on_exit'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('close_on_exit', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_python_return_dict(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.kwargs['python_return_dict'])
+ vfs.teardown()
+ self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False)
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertNotIn('python_return_dict', vfs.handle.kwargs)
+ vfs.teardown()
+
+ def test_setup_debug_disable(self):
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertFalse(vfs.handle.trace_enabled)
+ self.assertFalse(vfs.handle.verbose_enabled)
+ self.assertIsNone(vfs.handle.event_callback)
+
+ def test_setup_debug_enabled(self):
+ self.flags(debug=True, group='guestfs')
+ vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.setup()
+ self.assertTrue(vfs.handle.trace_enabled)
+ self.assertTrue(vfs.handle.verbose_enabled)
+ self.assertIsNotNone(vfs.handle.event_callback)
diff --git a/nova/tests/unit/virt/disk/vfs/test_localfs.py b/nova/tests/unit/virt/disk/vfs/test_localfs.py
new file mode 100644
index 0000000000..6e7780e74b
--- /dev/null
+++ b/nova/tests/unit/virt/disk/vfs/test_localfs.py
@@ -0,0 +1,385 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit import utils as tests_utils
+import nova.utils
+from nova.virt.disk.vfs import localfs as vfsimpl
+
+CONF = cfg.CONF
+
+dirs = []
+files = {}
+commands = []
+
+
+def fake_execute(*args, **kwargs):
+ commands.append({"args": args, "kwargs": kwargs})
+
+ if args[0] == "readlink":
+ if args[1] == "-nm":
+ if args[2] in ["/scratch/dir/some/file",
+ "/scratch/dir/some/dir",
+ "/scratch/dir/other/dir",
+ "/scratch/dir/other/file"]:
+ return args[2], ""
+ elif args[1] == "-e":
+ if args[2] in files:
+ return args[2], ""
+
+ return "", "No such file"
+ elif args[0] == "mkdir":
+ dirs.append(args[2])
+ elif args[0] == "chown":
+ owner = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ sep = owner.find(':')
+ if sep != -1:
+ user = owner[0:sep]
+ group = owner[sep + 1:]
+ else:
+ user = owner
+ group = None
+
+ if user:
+ if user == "fred":
+ uid = 105
+ else:
+ uid = 110
+ files[path]["uid"] = uid
+ if group:
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chgrp":
+ group = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ if group == "users":
+ gid = 500
+ else:
+ gid = 600
+ files[path]["gid"] = gid
+ elif args[0] == "chmod":
+ mode = args[1]
+ path = args[2]
+ if path not in files:
+ raise Exception("No such file: " + path)
+
+ files[path]["mode"] = int(mode, 8)
+ elif args[0] == "cat":
+ path = args[1]
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700
+ }
+ return files[path]["content"], ""
+ elif args[0] == "tee":
+ if args[1] == "-a":
+ path = args[2]
+ append = True
+ else:
+ path = args[1]
+ append = False
+ if path not in files:
+ files[path] = {
+ "content": "Hello World",
+ "gid": 100,
+ "uid": 100,
+ "mode": 0o700,
+ }
+ if append:
+ files[path]["content"] += kwargs["process_input"]
+ else:
+ files[path]["content"] = kwargs["process_input"]
+
+
+class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase):
+ def setUp(self):
+ super(VirtDiskVFSLocalFSTestPaths, self).setUp()
+
+ real_execute = processutils.execute
+
+ def nonroot_execute(*cmd_parts, **kwargs):
+ kwargs.pop('run_as_root', None)
+ return real_execute(*cmd_parts, **kwargs)
+
+ self.stubs.Set(processutils, 'execute', nonroot_execute)
+
+ def test_check_safe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ ret = vfs._canonical_path('etc/something.conf')
+ self.assertEqual(ret, '/foo/etc/something.conf')
+
+ def test_check_unsafe_path(self):
+ if not tests_utils.coreutils_readlink_available():
+ self.skipTest("coreutils readlink(1) unavailable")
+ vfs = vfsimpl.VFSLocalFS("dummy.img")
+ vfs.imgdir = "/foo"
+ self.assertRaises(exception.Invalid,
+ vfs._canonical_path,
+ 'etc/../../../something.conf')
+
+
+class VirtDiskVFSLocalFSTest(test.NoDBTestCase):
+ def test_makepath(self):
+ global dirs, commands
+ dirs = []
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.make_path("/some/dir")
+ vfs.make_path("/other/dir")
+
+ self.assertEqual(dirs,
+ ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]),
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/some/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('mkdir', '-p',
+ '/scratch/dir/other/dir'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_append_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.append_file("/some/file", " Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Hello World Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '-a',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': ' Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_replace_file(self):
+ global files, commands
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.replace_file("/some/file", "Goodbye")
+
+ self.assertIn("/scratch/dir/some/file", files)
+ self.assertEqual(files["/scratch/dir/some/file"]["content"],
+ "Goodbye")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('tee', '/scratch/dir/some/file'),
+ 'kwargs': {'process_input': 'Goodbye',
+ 'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_read_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ self.assertEqual(vfs.read_file("/some/file"), "Hello World")
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_has_file(self):
+ global commands, files
+ files = {}
+ commands = []
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertTrue(vfs.has_file("/some/file"))
+ self.assertFalse(vfs.has_file("/other/file"))
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-e',
+ '/scratch/dir/other/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ ])
+
+ def test_set_permissions(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ vfs.set_permissions("/some/file", 0o777)
+ self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chmod', '777',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
+
+ def test_set_ownership(self):
+ global commands, files
+ commands = []
+ files = {}
+ self.stubs.Set(processutils, 'execute', fake_execute)
+
+ vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
+ vfs.imgdir = "/scratch/dir"
+ vfs.read_file("/some/file")
+
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", "fred", None)
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
+
+ vfs.set_ownership("/some/file", None, "users")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500)
+
+ vfs.set_ownership("/some/file", "joe", "admins")
+ self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110)
+ self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600)
+
+ root_helper = nova.utils._get_root_helper()
+ self.assertEqual(commands,
+ [{'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('cat', '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'fred',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chgrp', 'users',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('readlink', '-nm',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}},
+ {'args': ('chown', 'joe:admins',
+ '/scratch/dir/some/file'),
+ 'kwargs': {'run_as_root': True,
+ 'root_helper': root_helper}}])
diff --git a/nova/tests/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/hyperv/__init__.py
+++ b/nova/tests/unit/virt/hyperv/__init__.py
diff --git a/nova/tests/virt/hyperv/db_fakes.py b/nova/tests/unit/virt/hyperv/db_fakes.py
index 9e8249323e..9e8249323e 100644
--- a/nova/tests/virt/hyperv/db_fakes.py
+++ b/nova/tests/unit/virt/hyperv/db_fakes.py
diff --git a/nova/tests/virt/hyperv/fake.py b/nova/tests/unit/virt/hyperv/fake.py
index 6403374aa5..6403374aa5 100644
--- a/nova/tests/virt/hyperv/fake.py
+++ b/nova/tests/unit/virt/hyperv/fake.py
diff --git a/nova/tests/virt/hyperv/test_basevolumeutils.py b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
index 8f48515d09..8f48515d09 100644
--- a/nova/tests/virt/hyperv/test_basevolumeutils.py
+++ b/nova/tests/unit/virt/hyperv/test_basevolumeutils.py
diff --git a/nova/tests/virt/hyperv/test_hostutils.py b/nova/tests/unit/virt/hyperv/test_hostutils.py
index 998692d350..998692d350 100644
--- a/nova/tests/virt/hyperv/test_hostutils.py
+++ b/nova/tests/unit/virt/hyperv/test_hostutils.py
diff --git a/nova/tests/unit/virt/hyperv/test_hypervapi.py b/nova/tests/unit/virt/hyperv/test_hypervapi.py
new file mode 100644
index 0000000000..375420a484
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_hypervapi.py
@@ -0,0 +1,1967 @@
+# Copyright 2012 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for the Hyper-V driver and related APIs.
+"""
+
+import contextlib
+import datetime
+import io
+import os
+import platform
+import shutil
+import time
+import uuid
+
+import mock
+import mox
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.i18n import _
+from nova.image import glance
+from nova.openstack.common import fileutils
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.virt.hyperv import db_fakes
+from nova.tests.unit.virt.hyperv import fake
+from nova import utils
+from nova.virt import configdrive
+from nova.virt import driver
+from nova.virt.hyperv import basevolumeutils
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import driver as driver_hyperv
+from nova.virt.hyperv import hostops
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import ioutils
+from nova.virt.hyperv import livemigrationutils
+from nova.virt.hyperv import networkutils
+from nova.virt.hyperv import networkutilsv2
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import rdpconsoleutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vhdutils
+from nova.virt.hyperv import vhdutilsv2
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+from nova.virt.hyperv import volumeops
+from nova.virt.hyperv import volumeutils
+from nova.virt.hyperv import volumeutilsv2
+from nova.virt import images
+
+CONF = cfg.CONF
+CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
+
+
+class HyperVAPIBaseTestCase(test.NoDBTestCase):
+ """Base unit tests class for Hyper-V driver calls."""
+
+ def __init__(self, test_case_name):
+ self._mox = mox.Mox()
+ super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(HyperVAPIBaseTestCase, self).setUp()
+
+ self._user_id = 'fake'
+ self._project_id = 'fake'
+ self._instance_data = None
+ self._image_metadata = None
+ self._fetched_image = None
+ self._update_image_raise_exception = False
+ self._volume_target_portal = 'testtargetportal:3260'
+ self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
+ self._context = context.RequestContext(self._user_id, self._project_id)
+ self._instance_ide_disks = []
+ self._instance_ide_dvds = []
+ self._instance_volume_disks = []
+ self._test_vm_name = None
+ self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
+ self._check_min_windows_version_satisfied = True
+
+ self._setup_stubs()
+
+ self.flags(instances_path=r'C:\Hyper-V\test\instances',
+ network_api_class='nova.network.neutronv2.api.API')
+ self.flags(force_volumeutils_v1=True, group='hyperv')
+ self.flags(force_hyperv_utils_v1=True, group='hyperv')
+
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def _setup_stubs(self):
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ fake_image.stub_out_image_service(self.stubs)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
+
+ def fake_fetch(context, image_id, target, user, project):
+ self._fetched_image = target
+ self.stubs.Set(images, 'fetch', fake_fetch)
+
+ def fake_get_remote_image_service(context, name):
+ class FakeGlanceImageService(object):
+ def update(self_fake, context, image_id, image_metadata, f):
+ if self._update_image_raise_exception:
+ raise vmutils.HyperVException(
+ "Simulated update failure")
+ self._image_metadata = image_metadata
+ return (FakeGlanceImageService(), 1)
+ self.stubs.Set(glance, 'get_remote_image_service',
+ fake_get_remote_image_service)
+
+ def fake_check_min_windows_version(fake_self, major, minor):
+ if [major, minor] >= [6, 3]:
+ return False
+ return self._check_min_windows_version_satisfied
+ self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
+ fake_check_min_windows_version)
+
+ def fake_sleep(ms):
+ pass
+ self.stubs.Set(time, 'sleep', fake_sleep)
+
+ class FakeIOThread(object):
+ def __init__(self, src, dest, max_bytes):
+ pass
+
+ def start(self):
+ pass
+
+ self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
+ self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
+ self._mox.StubOutWithMock(fake.PathUtils, 'open')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
+ self._mox.StubOutWithMock(fake.PathUtils, 'copy')
+ self._mox.StubOutWithMock(fake.PathUtils, 'remove')
+ self._mox.StubOutWithMock(fake.PathUtils, 'rename')
+ self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
+ self._mox.StubOutWithMock(fake.PathUtils,
+ 'get_instance_migr_revert_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
+ self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
+
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'attach_volume_to_controller')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_mounted_disk_by_drive_number')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_controller_volume_paths')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'enable_vm_metrics_collection')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'get_vm_serial_port_connection')
+
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils,
+ 'get_internal_vhd_size_by_file_size')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
+ self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
+
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils,
+ 'is_cpu_feature_present')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
+ self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
+
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'get_external_vswitch')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'create_vswitch_port')
+ self._mox.StubOutWithMock(networkutils.NetworkUtils,
+ 'vswitch_port_needed')
+
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'live_migrate_vm')
+ self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
+ 'check_live_migration_config')
+
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'volume_in_mapping')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_session_id_from_mounted_disk')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_device_number_for_target')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_from_disk_path')
+ self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
+ 'get_target_lun_count')
+
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'execute_log_out')
+ self._mox.StubOutWithMock(volumeutils.VolumeUtils,
+ 'get_iscsi_initiator')
+
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'login_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'logout_storage_target')
+ self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
+ 'execute_log_out')
+
+ self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
+ 'get_rdp_console_port')
+
+ self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
+ self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ 'metadata_for_config_drive')
+
+ # Can't use StubOutClassWithMocks due to __exit__ and __enter__
+ self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+
+ self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
+ self._mox.StubOutWithMock(utils, 'execute')
+
+ def tearDown(self):
+ self._mox.UnsetStubs()
+ super(HyperVAPIBaseTestCase, self).tearDown()
+
+
+class HyperVAPITestCase(HyperVAPIBaseTestCase):
+ """Unit tests for Hyper-V driver calls."""
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self._conn)
+
+ def test_get_available_resource(self):
+ cpu_info = {'Architecture': 'fake',
+ 'Name': 'fake',
+ 'Manufacturer': 'ACME, Inc.',
+ 'NumberOfCores': 2,
+ 'NumberOfLogicalProcessors': 4}
+
+ tot_mem_kb = 2000000L
+ free_mem_kb = 1000000L
+
+ tot_hdd_b = 4L * 1024 ** 3
+ free_hdd_b = 3L * 1024 ** 3
+
+ windows_version = '6.2.9200'
+
+ hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
+ free_mem_kb))
+
+ m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
+ m.AndReturn((tot_hdd_b, free_hdd_b))
+
+ hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
+ m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
+ m.MultipleTimes()
+
+ m = hostutils.HostUtils.get_windows_version()
+ m.AndReturn(windows_version)
+
+ self._mox.ReplayAll()
+ dic = self._conn.get_available_resource(None)
+ self._mox.VerifyAll()
+
+ self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
+ self.assertEqual(dic['hypervisor_hostname'], platform.node())
+ self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
+ self.assertEqual(dic['memory_mb_used'],
+ tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
+ self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
+ self.assertEqual(dic['local_gb_used'],
+ tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
+ self.assertEqual(dic['hypervisor_version'],
+ windows_version.replace('.', ''))
+ self.assertEqual(dic['supported_instances'],
+ '[["i686", "hyperv", "hvm"], ["x86_64", "hyperv", "hvm"]]')
+
+ def test_list_instances(self):
+ fake_instances = ['fake1', 'fake2']
+ vmutils.VMUtils.list_instances().AndReturn(fake_instances)
+
+ self._mox.ReplayAll()
+ instances = self._conn.list_instances()
+ self._mox.VerifyAll()
+
+ self.assertEqual(instances, fake_instances)
+
+ def test_get_host_uptime(self):
+ fake_host = "fake_host"
+ with mock.patch.object(self._conn._hostops,
+ "get_host_uptime") as mock_uptime:
+ self._conn._hostops.get_host_uptime(fake_host)
+ mock_uptime.assert_called_once_with(fake_host)
+
+ def test_get_info(self):
+ self._instance_data = self._get_instance_data()
+
+ summary_info = {'NumberOfProcessors': 2,
+ 'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
+ 'MemoryUsage': 1000,
+ 'UpTime': 1}
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.get_vm_summary_info(func)
+ m.AndReturn(summary_info)
+
+ self._mox.ReplayAll()
+ info = self._conn.get_info(self._instance_data)
+ self._mox.VerifyAll()
+
+ self.assertEqual(info["state"], power_state.RUNNING)
+
+ def test_get_info_instance_not_found(self):
+ # Tests that InstanceNotFound is raised if the instance isn't found
+ # from the vmutils.vm_exists method.
+ self._instance_data = self._get_instance_data()
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(False)
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
+ self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_spawn_cow_image(self):
+ self._test_spawn_instance(True)
+
+ def test_spawn_cow_image_vhdx(self):
+ self._test_spawn_instance(True, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def test_spawn_no_cow_image(self):
+ self._test_spawn_instance(False)
+
+ def test_spawn_dynamic_memory(self):
+ CONF.set_override('dynamic_memory_ratio', 2.0, 'hyperv')
+ self._test_spawn_instance()
+
+ def test_spawn_no_cow_image_vhdx(self):
+ self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
+
+ def _setup_spawn_config_drive_mocks(self, use_cdrom):
+ instance_metadata.InstanceMetadata(mox.IgnoreArg(),
+ content=mox.IsA(list),
+ extra_md=mox.IsA(dict))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ cdb = self._mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.IsA(str))
+ cdb.__exit__(None, None, None).AndReturn(None)
+
+ if not use_cdrom:
+ utils.execute(CONF.hyperv.qemu_img_cmd,
+ 'convert',
+ '-f',
+ 'raw',
+ '-O',
+ 'vpc',
+ mox.IsA(str),
+ mox.IsA(str),
+ attempts=1)
+ fake.PathUtils.remove(mox.IsA(str))
+
+ m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk)
+
+ def _test_spawn_config_drive(self, use_cdrom, format_error=False):
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
+ self.flags(mkisofs_cmd='mkisofs.exe')
+
+ if use_cdrom:
+ expected_ide_disks = 1
+ expected_ide_dvds = 1
+ else:
+ expected_ide_disks = 2
+ expected_ide_dvds = 0
+
+ if format_error:
+ self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
+ self._test_spawn_instance,
+ with_exception=True,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+ else:
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+
+ def test_spawn_config_drive(self):
+ self._test_spawn_config_drive(False)
+
+ def test_spawn_config_drive_format_error(self):
+ CONF.set_override('config_drive_format', 'wrong_format')
+ self._test_spawn_config_drive(True, True)
+
+ def test_spawn_config_drive_cdrom(self):
+ self._test_spawn_config_drive(True)
+
+ def test_spawn_no_config_drive(self):
+ self.flags(force_config_drive=False)
+
+ expected_ide_disks = 1
+ expected_ide_dvds = 0
+
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds)
+
+ def _test_spawn_nova_net_vif(self, with_port):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ fake_vswitch_path = 'fake vswitch path'
+ fake_vswitch_port = 'fake port'
+
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndReturn(fake_vswitch_path)
+
+ m = networkutils.NetworkUtils.vswitch_port_needed()
+ m.AndReturn(with_port)
+
+ if with_port:
+ m = networkutils.NetworkUtils.create_vswitch_port(
+ fake_vswitch_path, mox.IsA(str))
+ m.AndReturn(fake_vswitch_port)
+ vswitch_conn_data = fake_vswitch_port
+ else:
+ vswitch_conn_data = fake_vswitch_path
+
+ vmutils.VMUtils.set_nic_connection(mox.IsA(str),
+ mox.IsA(str), vswitch_conn_data)
+
+ self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
+
+ def test_spawn_nova_net_vif_with_port(self):
+ self._test_spawn_nova_net_vif(True)
+
+ def test_spawn_nova_net_vif_without_port(self):
+ self._test_spawn_nova_net_vif(False)
+
+ def test_spawn_nova_net_vif_no_vswitch_exception(self):
+ self.flags(network_api_class='nova.network.api.API')
+ # Reinstantiate driver, as the VIF plugin is loaded during __init__
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ def setup_vif_mocks():
+ m = networkutils.NetworkUtils.get_external_vswitch(
+ CONF.hyperv.vswitch_name)
+ m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
+
+ self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
+ setup_vif_mocks_func=setup_vif_mocks,
+ with_exception=True)
+
+ def test_spawn_with_metrics_collection(self):
+ self.flags(enable_instance_metrics_collection=True, group='hyperv')
+ self._test_spawn_instance(False)
+
+ def test_spawn_with_ephemeral_storage(self):
+ self._test_spawn_instance(True, expected_ide_disks=2,
+ ephemeral_storage=True)
+
+ def _check_instance_name(self, vm_name):
+ return vm_name == self._instance_data['name']
+
+ def _test_vm_state_change(self, action, from_state, to_state):
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ to_state)
+
+ if to_state in (constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_delete_vm_log_mocks()
+ if to_state in (constants.HYPERV_VM_STATE_ENABLED,
+ constants.HYPERV_VM_STATE_REBOOT):
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ action(self._instance_data)
+ self._mox.VerifyAll()
+
+ def test_pause(self):
+ self._test_vm_state_change(self._conn.pause, None,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_pause_already_paused(self):
+ self._test_vm_state_change(self._conn.pause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_PAUSED)
+
+ def test_unpause(self):
+ self._test_vm_state_change(self._conn.unpause,
+ constants.HYPERV_VM_STATE_PAUSED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_unpause_already_running(self):
+ self._test_vm_state_change(self._conn.unpause, None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_suspend(self):
+ self._test_vm_state_change(self._conn.suspend, None,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_suspend_already_suspended(self):
+ self._test_vm_state_change(self._conn.suspend,
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_SUSPENDED)
+
+ def test_resume(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None),
+ constants.HYPERV_VM_STATE_SUSPENDED,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_resume_already_running(self):
+ self._test_vm_state_change(lambda i: self._conn.resume(self._context,
+ i, None), None,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_power_off(self):
+ self._test_vm_state_change(self._conn.power_off, None,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_already_powered_off(self):
+ self._test_vm_state_change(self._conn.power_off,
+ constants.HYPERV_VM_STATE_DISABLED,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ def _test_power_on(self, block_device_info):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ if block_device_info:
+ self._mox.StubOutWithMock(volumeops.VolumeOps,
+ 'fix_instance_volume_disk_paths')
+ volumeops.VolumeOps.fix_instance_volume_disk_paths(
+ mox.Func(self._check_instance_name), block_device_info)
+
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info,
+ block_device_info=block_device_info)
+ self._mox.VerifyAll()
+
+ def test_power_on_having_block_devices(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+ self._test_power_on(block_device_info=block_device_info)
+
+ def test_power_on_without_block_devices(self):
+ self._test_power_on(block_device_info=None)
+
+ def test_power_on_already_running(self):
+ self._instance_data = self._get_instance_data()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+ self._mox.ReplayAll()
+ self._conn.power_on(self._context, self._instance_data, network_info)
+ self._mox.VerifyAll()
+
+ def test_reboot(self):
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self._instance_data = self._get_instance_data()
+
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ self._setup_delete_vm_log_mocks()
+ self._setup_log_vm_output_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.reboot(self._context, self._instance_data, network_info,
+ None)
+ self._mox.VerifyAll()
+
+ def _setup_destroy_mocks(self, destroy_disks=True):
+ fake_volume_drives = ['fake_volume_drive']
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun = 'fake_target_lun'
+
+ m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
+ m.AndReturn(True)
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([], fake_volume_drives))
+
+ vmutils.VMUtils.destroy_vm(func)
+
+ m = self._conn._volumeops.get_target_from_disk_path(
+ fake_volume_drives[0])
+ m.AndReturn((fake_target_iqn, fake_target_lun))
+
+ self._mock_logout_storage_target(fake_target_iqn)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ def test_destroy(self):
+ self._instance_data = self._get_instance_data()
+
+ self._setup_destroy_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.destroy(self._context, self._instance_data, None)
+ self._mox.VerifyAll()
+
+ def test_live_migration_unsupported_os(self):
+ self._check_min_windows_version_satisfied = False
+ self._conn = driver_hyperv.HyperVDriver(None)
+ self._test_live_migration(unsupported_os=True)
+
+ def test_live_migration_without_volumes(self):
+ self._test_live_migration()
+
+ def test_live_migration_with_volumes(self):
+ self._test_live_migration(with_volumes=True)
+
+ def test_live_migration_with_multiple_luns_per_target(self):
+ self._test_live_migration(with_volumes=True,
+ other_luns_available=True)
+
+ def test_live_migration_with_target_failure(self):
+ self._test_live_migration(test_failure=True)
+
+ def _test_live_migration(self, test_failure=False,
+ with_volumes=False,
+ other_luns_available=False,
+ unsupported_os=False):
+ dest_server = 'fake_server'
+
+ instance_data = self._get_instance_data()
+
+ fake_post_method = self._mox.CreateMockAnything()
+ if not test_failure and not unsupported_os:
+ fake_post_method(self._context, instance_data, dest_server,
+ False)
+
+ fake_recover_method = self._mox.CreateMockAnything()
+ if test_failure:
+ fake_recover_method(self._context, instance_data, dest_server,
+ False)
+
+ if with_volumes:
+ fake_target_iqn = 'fake_target_iqn'
+ fake_target_lun_count = 1
+
+ if not unsupported_os:
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_local_vm_log_path', 'fake_vm_log_path.1'))
+
+ m = fake.PathUtils.get_vm_console_log_paths(
+ mox.IsA(str), remote_server=mox.IsA(str))
+ m.AndReturn(('fake_remote_vm_log_path',
+ 'fake_remote_vm_log_path.1'))
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(False)
+
+ fake.PathUtils.copy(mox.IsA(str), mox.IsA(str))
+
+ m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
+ instance_data['name'], dest_server)
+ if test_failure:
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ if with_volumes:
+ m.AndReturn({fake_target_iqn: fake_target_lun_count})
+
+ self._mock_logout_storage_target(fake_target_iqn,
+ other_luns_available)
+ else:
+ m.AndReturn({})
+
+ self._mox.ReplayAll()
+ try:
+ hyperv_exception_raised = False
+ unsupported_os_exception_raised = False
+ self._conn.live_migration(self._context, instance_data,
+ dest_server, fake_post_method,
+ fake_recover_method)
+ except vmutils.HyperVException:
+ hyperv_exception_raised = True
+ except NotImplementedError:
+ unsupported_os_exception_raised = True
+
+ self.assertTrue(not test_failure ^ hyperv_exception_raised)
+ self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
+ self._mox.VerifyAll()
+
+ def test_pre_live_migration_cow_image(self):
+ self._test_pre_live_migration(True, False)
+
+ def test_pre_live_migration_no_cow_image(self):
+ self._test_pre_live_migration(False, False)
+
+ def test_pre_live_migration_with_volumes(self):
+ self._test_pre_live_migration(False, True)
+
+ def _test_pre_live_migration(self, cow, with_volumes):
+ self.flags(use_cow_images=cow)
+
+ instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, instance_data)
+ instance['system_metadata'] = {}
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
+ m.AndReturn(True)
+
+ if cow:
+ self._setup_get_cached_image_mocks(cow)
+
+ if with_volumes:
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+ else:
+ block_device_info = None
+
+ self._mox.ReplayAll()
+ self._conn.pre_live_migration(self._context, instance,
+ block_device_info, None, network_info)
+ self._mox.VerifyAll()
+
+ if cow:
+ self.assertIsNotNone(self._fetched_image)
+ else:
+ self.assertIsNone(self._fetched_image)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self._conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_snapshot_with_update_failure(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._update_image_raise_exception = True
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
+ self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _setup_snapshot_mocks(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
+ ]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
+
+ fake_hv_snapshot_path = 'fake_snapshot_path'
+ fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
+
+ self._instance_data = self._get_instance_data()
+
+ func = mox.Func(self._check_instance_name)
+ m = vmutils.VMUtils.take_vm_snapshot(func)
+ m.AndReturn(fake_hv_snapshot_path)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
+ m.AndReturn(fake_parent_vhd_path)
+
+ self._fake_dest_disk_path = None
+
+ def copy_dest_disk_path(src, dest):
+ self._fake_dest_disk_path = dest
+
+ m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m.WithSideEffects(copy_dest_disk_path)
+
+ self._fake_dest_base_disk_path = None
+
+ def copy_dest_base_disk_path(src, dest):
+ self._fake_dest_base_disk_path = dest
+
+ m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
+ m.WithSideEffects(copy_dest_base_disk_path)
+
+ def check_dest_disk_path(path):
+ return path == self._fake_dest_disk_path
+
+ def check_dest_base_disk_path(path):
+ return path == self._fake_dest_base_disk_path
+
+ func1 = mox.Func(check_dest_disk_path)
+ func2 = mox.Func(check_dest_base_disk_path)
+ # Make sure that the hyper-v base and differential VHDs are merged
+ vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
+ vhdutils.VHDUtils.merge_vhd(func1, func2)
+
+ def check_snapshot_path(snapshot_path):
+ return snapshot_path == fake_hv_snapshot_path
+
+ # Make sure that the Hyper-V snapshot is removed
+ func = mox.Func(check_snapshot_path)
+ vmutils.VMUtils.remove_vm_snapshot(func)
+
+ fake.PathUtils.rmtree(mox.IsA(str))
+
+ m = fake.PathUtils.open(func2, 'rb')
+ m.AndReturn(io.BytesIO(b'fake content'))
+
+ return (snapshot_name, func_call_matcher)
+
+ def test_snapshot(self):
+ (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.snapshot(self._context, self._instance_data, snapshot_name,
+ func_call_matcher.call)
+ self._mox.VerifyAll()
+
+ self.assertTrue(self._image_metadata)
+ self.assertIn("disk_format", self._image_metadata)
+ self.assertEqual("vhd", self._image_metadata["disk_format"])
+
+ # Assert states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ def _get_instance_data(self):
+ instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
+ return db_fakes.get_fake_instance_data(instance_name,
+ self._project_id,
+ self._user_id)
+
+ def _spawn_instance(self, cow, block_device_info=None,
+ ephemeral_storage=False):
+ self.flags(use_cow_images=cow)
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+
+ if ephemeral_storage:
+ instance['ephemeral_gb'] = 1
+
+ image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ self._conn.spawn(self._context, instance, image,
+ injected_files=[], admin_password=None,
+ network_info=network_info,
+ block_device_info=block_device_info)
+
+ def _add_ide_disk(self, vm_name, path, ctrller_addr,
+ drive_addr, drive_type):
+ if drive_type == constants.IDE_DISK:
+ self._instance_ide_disks.append(path)
+ elif drive_type == constants.IDE_DVD:
+ self._instance_ide_dvds.append(path)
+
+ def _add_volume_disk(self, vm_name, controller_path, address,
+ mounted_disk_path):
+ self._instance_volume_disks.append(mounted_disk_path)
+
+ def _check_img_path(self, image_path):
+ return image_path == self._fetched_image
+
+ def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
+ boot_from_volume=False,
+ block_device_info=None,
+ admin_permissions=True,
+ ephemeral_storage=False):
+ vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
+ mox.IsA(int), mox.IsA(bool),
+ CONF.hyperv.dynamic_memory_ratio,
+ mox.IsA(list))
+
+ if not boot_from_volume:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ if ephemeral_storage:
+ m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ func = mox.Func(self._check_vm_name)
+ m = vmutils.VMUtils.create_scsi_controller(func)
+ m.InAnyOrder()
+
+ if boot_from_volume:
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+
+ self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
+ target_lun, target_portal, True)
+
+ vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
+ mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
+
+ if setup_vif_mocks_func:
+ setup_vif_mocks_func()
+
+ if CONF.hyperv.enable_instance_metrics_collection:
+ vmutils.VMUtils.enable_vm_metrics_collection(
+ mox.Func(self._check_vm_name))
+
+ vmutils.VMUtils.get_vm_serial_port_connection(
+ mox.IsA(str), update_connection=mox.IsA(str))
+
+ def _set_vm_name(self, vm_name):
+ self._test_vm_name = vm_name
+
+ def _check_vm_name(self, vm_name):
+ return vm_name == self._test_vm_name
+
+ def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
+ self._mox.StubOutWithMock(vmutils.VMUtils,
+ 'check_admin_permissions')
+ m = vmutils.VMUtils.check_admin_permissions()
+ if admin_permissions:
+ m.AndReturn(None)
+ else:
+ m.AndRaise(vmutils.HyperVAuthorizationException(_(
+ 'Simulated failure')))
+
+ def _setup_log_vm_output_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
+ units.Mi).start()
+
+ def _setup_delete_vm_log_mocks(self):
+ m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
+ m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
+ fileutils.delete_if_exists(mox.IsA(str))
+ fileutils.delete_if_exists(mox.IsA(str))
+
+ def _setup_get_cached_image_mocks(self, cow=True,
+ vhd_format=constants.DISK_FORMAT_VHD):
+ m = vhdutils.VHDUtils.get_vhd_format(
+ mox.Func(self._check_img_path))
+ m.AndReturn(vhd_format)
+
+ def check_img_path_with_ext(image_path):
+ return image_path == self._fetched_image + '.' + vhd_format.lower()
+
+ fake.PathUtils.rename(mox.Func(self._check_img_path),
+ mox.Func(check_img_path_with_ext))
+
+ if cow and vhd_format == constants.DISK_FORMAT_VHD:
+ m = vhdutils.VHDUtils.get_vhd_info(
+ mox.Func(check_img_path_with_ext))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
+ with_exception=False,
+ block_device_info=None,
+ boot_from_volume=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ m = vmutils.VMUtils.vm_exists(mox.IsA(str))
+ m.WithSideEffects(self._set_vm_name).AndReturn(False)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ if block_device_info:
+ m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
+ 'fake_root_device_name', block_device_info)
+ m.AndReturn(boot_from_volume)
+
+ if not boot_from_volume:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+
+ self._setup_get_cached_image_mocks(cow, vhd_format)
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
+ 'Type': 2})
+
+ if cow:
+ m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
+ m.AndReturn(vhd_format)
+ if vhd_format == constants.DISK_FORMAT_VHD:
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str))
+ else:
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int))
+ else:
+ fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
+
+ self._setup_check_admin_permissions_mocks(
+ admin_permissions=admin_permissions)
+ if ephemeral_storage:
+ m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
+ m.AndReturn(self._test_instance_dir)
+ vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
+ mox.IsA(str))
+
+ self._setup_create_instance_mocks(setup_vif_mocks_func,
+ boot_from_volume,
+ block_device_info,
+ ephemeral_storage=ephemeral_storage)
+
+ if config_drive and not with_exception:
+ self._setup_spawn_config_drive_mocks(use_cdrom)
+
+ # TODO(alexpilotti) Based on where the exception is thrown
+ # some of the above mock calls need to be skipped
+ if with_exception:
+ self._setup_destroy_mocks()
+ else:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ def _test_spawn_instance(self, cow=True,
+ expected_ide_disks=1,
+ expected_ide_dvds=0,
+ setup_vif_mocks_func=None,
+ with_exception=False,
+ config_drive=False,
+ use_cdrom=False,
+ admin_permissions=True,
+ vhd_format=constants.DISK_FORMAT_VHD,
+ ephemeral_storage=False):
+ self._setup_spawn_instance_mocks(cow,
+ setup_vif_mocks_func,
+ with_exception,
+ config_drive=config_drive,
+ use_cdrom=use_cdrom,
+ admin_permissions=admin_permissions,
+ vhd_format=vhd_format,
+ ephemeral_storage=ephemeral_storage)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_ide_disks), expected_ide_disks)
+ self.assertEqual(len(self._instance_ide_dvds), expected_ide_dvds)
+
+ vhd_path = os.path.join(self._test_instance_dir, 'root.' +
+ vhd_format.lower())
+ self.assertEqual(vhd_path, self._instance_ide_disks[0])
+
+ def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
+ fake_mounted_disk, fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ volumeutils.VolumeUtils.login_storage_target(target_lun,
+ target_iqn,
+ target_portal)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
+ target_portal=None, boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ fake_controller_path = 'fake_scsi_controller_path'
+ self._mox.StubOutWithMock(self._conn._volumeops,
+ '_get_free_controller_slot')
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ if boot_from_volume:
+ m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
+ m.AndReturn(fake_controller_path)
+ fake_free_slot = 0
+ else:
+ m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
+ m.AndReturn(fake_controller_path)
+
+ fake_free_slot = 1
+ m = self._conn._volumeops._get_free_controller_slot(
+ fake_controller_path)
+ m.AndReturn(fake_free_slot)
+
+ m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
+ fake_controller_path,
+ fake_free_slot,
+ fake_mounted_disk)
+ m.WithSideEffects(self._add_volume_disk)
+
+ def _test_util_class_version(self, v1_class, v2_class,
+ get_instance_action, is_hyperv_2012,
+ force_v1_flag, force_utils_v1):
+ self._check_min_windows_version_satisfied = is_hyperv_2012
+ CONF.set_override(force_v1_flag, force_v1_flag, 'hyperv')
+ self._conn = driver_hyperv.HyperVDriver(None)
+
+ instance = get_instance_action()
+ is_v1 = isinstance(instance, v1_class)
+ # v2_class can inherit from v1_class
+ is_v2 = isinstance(instance, v2_class)
+
+ self.assertTrue((is_hyperv_2012 and not force_v1_flag) ^
+ (is_v1 and not is_v2))
+
+ def test_volumeutils_version_hyperv_2012(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', False)
+
+ def test_volumeutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ True, 'force_volumeutils_v1', True)
+
+ def test_volumeutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(volumeutils.VolumeUtils,
+ volumeutilsv2.VolumeUtilsV2,
+ lambda: utilsfactory.get_volumeutils(),
+ False, 'force_volumeutils_v1', False)
+
+ def test_vmutils_version_hyperv_2012(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vmutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vmutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
+ lambda: utilsfactory.get_vmutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_vhdutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_vhdutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(vhdutils.VHDUtils,
+ vhdutilsv2.VHDUtilsV2,
+ lambda: utilsfactory.get_vhdutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', False)
+
+ def test_networkutils_version_hyperv_2012_force_v1(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ True, 'force_hyperv_utils_v1', True)
+
+ def test_networkutils_version_hyperv_2008R2(self):
+ self._test_util_class_version(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2,
+ lambda: utilsfactory.get_networkutils(),
+ False, 'force_hyperv_utils_v1', False)
+
+ def test_attach_volume(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self._conn.attach_volume(None, connection_info, instance_data,
+ mount_point)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number):
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndRaise(vmutils.HyperVException('Simulated failure'))
+
+ def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
+ target_lun, target_portal=None,
+ boot_from_volume=False):
+ fake_mounted_disk = "fake_mounted disk"
+ fake_device_number = 0
+
+ self._mock_login_storage_target(target_iqn, target_lun,
+ target_portal,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
+ fake_mounted_disk,
+ fake_device_number)
+
+ self._mock_logout_storage_target(target_iqn)
+
+ def test_attach_volume_logout(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ target_portal = data['target_portal']
+ mount_point = '/dev/sdc'
+
+ self._mock_attach_volume_target_logout(instance_data['name'],
+ target_iqn, target_lun,
+ target_portal)
+
+ self._mox.ReplayAll()
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_attach_volume_connection_error(self):
+ instance_data = self._get_instance_data()
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ mount_point = '/dev/sdc'
+
+ def fake_login_storage_target(connection_info):
+ raise vmutils.HyperVException('Fake connection exception')
+
+ self.stubs.Set(self._conn._volumeops, '_login_storage_target',
+ fake_login_storage_target)
+ self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
+ None, connection_info, instance_data, mount_point)
+
+ def _mock_detach_volume(self, target_iqn, target_lun,
+ other_luns_available=False):
+ fake_mounted_disk = "fake_mounted_disk"
+ fake_device_number = 0
+ m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
+ target_lun)
+ m.AndReturn(fake_device_number)
+
+ m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
+ fake_device_number)
+ m.AndReturn(fake_mounted_disk)
+
+ vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
+
+ self._mock_logout_storage_target(target_iqn, other_luns_available)
+
+ def _mock_logout_storage_target(self, target_iqn,
+ other_luns_available=False):
+
+ m = volumeutils.VolumeUtils.get_target_lun_count(target_iqn)
+ m.AndReturn(1 + int(other_luns_available))
+
+ if not other_luns_available:
+ volumeutils.VolumeUtils.logout_storage_target(target_iqn)
+
+ def _test_detach_volume(self, other_luns_available=False):
+ instance_data = self._get_instance_data()
+ self.assertIn('name', instance_data)
+
+ connection_info = db_fakes.get_fake_volume_info_data(
+ self._volume_target_portal, self._volume_id)
+ data = connection_info['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+ self.assertIn('target_portal', data)
+
+ mount_point = '/dev/sdc'
+
+ self._mock_detach_volume(target_iqn, target_lun, other_luns_available)
+ self._mox.ReplayAll()
+ self._conn.detach_volume(connection_info, instance_data, mount_point)
+ self._mox.VerifyAll()
+
+ def test_detach_volume(self):
+ self._test_detach_volume()
+
+ def test_detach_volume_multiple_luns_per_target(self):
+ # The iSCSI target should not be disconnected in this case.
+ self._test_detach_volume(other_luns_available=True)
+
+ def test_boot_from_volume(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ self._setup_spawn_instance_mocks(cow=False,
+ block_device_info=block_device_info,
+ boot_from_volume=True)
+
+ self._mox.ReplayAll()
+ self._spawn_instance(False, block_device_info)
+ self._mox.VerifyAll()
+
+ self.assertEqual(len(self._instance_volume_disks), 1)
+
+ def test_get_volume_connector(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_my_ip = "fake_ip"
+ fake_host = "fake_host"
+ fake_initiator = "fake_initiator"
+
+ self.flags(my_ip=fake_my_ip)
+ self.flags(host=fake_host)
+
+ m = volumeutils.VolumeUtils.get_iscsi_initiator()
+ m.AndReturn(fake_initiator)
+
+ self._mox.ReplayAll()
+ data = self._conn.get_volume_connector(instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(fake_my_ip, data.get('ip'))
+ self.assertEqual(fake_host, data.get('host'))
+ self.assertEqual(fake_initiator, data.get('initiator'))
+
+ def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
+ copy_exception=False,
+ size_exception=False):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ instance['root_gb'] = 10
+
+ fake_local_ip = '10.0.0.1'
+ if same_host:
+ fake_dest_ip = fake_local_ip
+ else:
+ fake_dest_ip = '10.0.0.2'
+
+ if size_exception:
+ flavor = 'm1.tiny'
+ else:
+ flavor = 'm1.small'
+
+ flavor = db.flavor_get_by_name(self._context, flavor)
+
+ if not size_exception:
+ fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
+ fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
+
+ func = mox.Func(self._check_instance_name)
+ vmutils.VMUtils.set_vm_state(func,
+ constants.HYPERV_VM_STATE_DISABLED)
+
+ self._setup_delete_vm_log_mocks()
+
+ m = vmutils.VMUtils.get_vm_storage_paths(func)
+ m.AndReturn(([fake_root_vhd_path], []))
+
+ m = hostutils.HostUtils.get_local_ips()
+ m.AndReturn([fake_local_ip])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(
+ instance['name'], remove_dir=True)
+ m.AndReturn(fake_revert_path)
+
+ if same_host:
+ fake.PathUtils.makedirs(mox.IsA(str))
+
+ m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
+ if copy_exception:
+ m.AndRaise(shutil.Error('Simulated copy error'))
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+ else:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = True
+ if same_host:
+ fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
+ destroy_disks = False
+
+ self._setup_destroy_mocks(False)
+
+ if destroy_disks:
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ mox.IsA(str),
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ return (instance, fake_dest_ip, network_info, flavor)
+
+ def test_migrate_disk_and_power_off(self):
+ (instance,
+ fake_dest_ip,
+ network_info,
+ flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_same_host(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ same_host=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self._conn.migrate_disk_and_power_off(self._context, instance,
+ fake_dest_ip, flavor,
+ network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_copy_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ copy_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
+ args = self._setup_test_migrate_disk_and_power_off_mocks(
+ size_exception=True)
+ (instance, fake_dest_ip, network_info, flavor) = args
+
+ self._mox.ReplayAll()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._conn.migrate_disk_and_power_off,
+ self._context, instance, fake_dest_ip,
+ flavor, network_info)
+ self._mox.VerifyAll()
+
+ def _mock_attach_config_drive(self, instance, config_drive_format):
+ instance['config_drive'] = True
+ self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
+ m = fake.PathUtils.lookup_configdrive_path(
+ mox.Func(self._check_instance_name))
+
+ if config_drive_format in constants.DISK_FORMAT_MAP:
+ m.AndReturn(self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ else:
+ m.AndReturn(None)
+
+ m = vmutils.VMUtils.attach_ide_drive(
+ mox.Func(self._check_instance_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ def _verify_attach_config_drive(self, config_drive_format):
+ if config_drive_format == constants.IDE_DISK_FORMAT.lower():
+ self.assertEqual(self._instance_ide_disks[1],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
+ self.assertEqual(self._instance_ide_dvds[0],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+
+ def _test_finish_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ instance['system_metadata'] = {}
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ self._mox.StubOutWithMock(fake.PathUtils, 'exists')
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
+ instance["image_ref"]))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'ParentPath': fake_parent_vhd_path,
+ 'MaxInternalSize': 1})
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+
+ vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
+
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024})
+
+ m = fake.PathUtils.exists(mox.IsA(str))
+ m.AndReturn(True)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ return m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_migration(self._context, None, instance, "",
+ network_info, None, False, None, power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def test_finish_migration_with_ephemeral_storage(self):
+ self._test_finish_migration(False, ephemeral_storage=True)
+
+ def test_finish_migration_attach_config_drive_iso(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_migration_attach_config_drive_vhd(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_confirm_migration(self):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
+ remove_dir=True)
+ self._mox.ReplayAll()
+ self._conn.confirm_migration(None, instance, network_info)
+ self._mox.VerifyAll()
+
+ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+
+ fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
+ instance['name'])
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str),
+ create_dir=False,
+ remove_dir=True)
+ m.AndReturn(self._test_instance_dir)
+
+ m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
+ m.AndReturn(fake_revert_path)
+ fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ m.AndReturn(self._test_instance_dir)
+
+ m = fake.PathUtils.get_instance_dir(mox.IsA(str))
+ if ephemeral_storage:
+ m.AndReturn(self._test_instance_dir)
+ else:
+ m.AndReturn(None)
+
+ self._set_vm_name(instance['name'])
+ self._setup_create_instance_mocks(None, False,
+ ephemeral_storage=ephemeral_storage)
+
+ if power_on:
+ vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
+ constants.HYPERV_VM_STATE_ENABLED)
+ self._setup_log_vm_output_mocks()
+
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
+ self._mox.ReplayAll()
+ self._conn.finish_revert_migration(self._context, instance,
+ network_info, None,
+ power_on)
+ self._mox.VerifyAll()
+
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def test_spawn_no_admin_permissions(self):
+ self.assertRaises(vmutils.HyperVAuthorizationException,
+ self._test_spawn_instance,
+ with_exception=True,
+ admin_permissions=False)
+
+ def test_finish_revert_migration_with_ephemeral_storage(self):
+ self._test_finish_revert_migration(False, ephemeral_storage=True)
+
+ def test_finish_revert_migration_attach_config_drive_iso(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_revert_migration_attach_config_drive_vhd(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.plug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.unplug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self._conn, "destroy") as mock_destroy:
+ self._conn.rollback_live_migration_at_destination(self._context,
+ self._test_spawn_instance, [], None)
+ mock_destroy.assert_called_once_with(self._context,
+ self._test_spawn_instance, [], None)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self._conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_get_rdp_console(self):
+ self.flags(my_ip="192.168.1.1")
+
+ self._instance_data = self._get_instance_data()
+ instance = db.instance_create(self._context, self._instance_data)
+
+ fake_port = 9999
+ fake_vm_id = "fake_vm_id"
+
+ m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
+ m.AndReturn(fake_port)
+
+ m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
+ m.AndReturn(fake_vm_id)
+
+ self._mox.ReplayAll()
+ connect_info = self._conn.get_rdp_console(self._context, instance)
+ self._mox.VerifyAll()
+
+ self.assertEqual(CONF.my_ip, connect_info.host)
+ self.assertEqual(fake_port, connect_info.port)
+ self.assertEqual(fake_vm_id, connect_info.internal_access_path)
+
+
+class VolumeOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for VolumeOps class."""
+
+ def setUp(self):
+ super(VolumeOpsTestCase, self).setUp()
+ self.volumeops = volumeops.VolumeOps()
+
+ def test_get_mounted_disk_from_lun(self):
+ with contextlib.nested(
+ mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_mounted_disk_by_drive_number')
+ ) as (mock_get_device_number_for_target,
+ mock_get_mounted_disk_by_drive_number):
+
+ mock_get_device_number_for_target.return_value = 0
+ mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
+ target_lun)
+ self.assertEqual(disk, 'disk_path')
+
+ def test_get_mounted_disk_from_lun_failure(self):
+ self.flags(mounted_disk_query_retry_count=1, group='hyperv')
+
+ with mock.patch.object(self.volumeops._volutils,
+ 'get_device_number_for_target') as m_device_num:
+ m_device_num.side_effect = [None, -1]
+
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ mapping = driver.block_device_info_get_mapping(block_device_info)
+ data = mapping[0]['connection_info']['data']
+ target_lun = data['target_lun']
+ target_iqn = data['target_iqn']
+
+ for attempt in xrange(1):
+ self.assertRaises(exception.NotFound,
+ self.volumeops._get_mounted_disk_from_lun,
+ target_iqn, target_lun)
+
+ def test_get_free_controller_slot_exception(self):
+ fake_drive = mock.MagicMock()
+ type(fake_drive).AddressOnParent = mock.PropertyMock(
+ side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
+ fake_scsi_controller_path = 'fake_scsi_controller_path'
+
+ with mock.patch.object(self.volumeops._vmutils,
+ 'get_attached_disks') as fake_get_attached_disks:
+ fake_get_attached_disks.return_value = (
+ [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
+ self.assertRaises(vmutils.HyperVException,
+ self.volumeops._get_free_controller_slot,
+ fake_scsi_controller_path)
+
+ def test_fix_instance_volume_disk_paths(self):
+ block_device_info = db_fakes.get_fake_block_device_info(
+ self._volume_target_portal, self._volume_id)
+
+ with contextlib.nested(
+ mock.patch.object(self.volumeops,
+ '_get_mounted_disk_from_lun'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'get_vm_scsi_controller'),
+ mock.patch.object(self.volumeops._vmutils,
+ 'set_disk_host_resource'),
+ mock.patch.object(self.volumeops,
+ 'ebs_root_in_block_devices')
+ ) as (mock_get_mounted_disk_from_lun,
+ mock_get_vm_scsi_controller,
+ mock_set_disk_host_resource,
+ mock_ebs_in_block_devices):
+
+ mock_ebs_in_block_devices.return_value = False
+ mock_get_mounted_disk_from_lun.return_value = "fake_mounted_path"
+ mock_set_disk_host_resource.return_value = "fake_controller_path"
+
+ self.volumeops.fix_instance_volume_disk_paths(
+ "test_vm_name",
+ block_device_info)
+
+ mock_get_mounted_disk_from_lun.assert_called_with(
+ 'iqn.2010-10.org.openstack:volume-' + self._volume_id, 1, True)
+ mock_get_vm_scsi_controller.assert_called_with("test_vm_name")
+ mock_set_disk_host_resource("test_vm_name", "fake_controller_path",
+ 0, "fake_mounted_path")
+
+
+class HostOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for the Hyper-V hostops class."""
+
+ def setUp(self):
+ self._hostops = hostops.HostOps()
+ self._hostops._hostutils = mock.MagicMock()
+ self._hostops.time = mock.MagicMock()
+ super(HostOpsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostops.time')
+ def test_host_uptime(self, mock_time):
+ self._hostops._hostutils.get_host_tick_count64.return_value = 100
+ mock_time.strftime.return_value = "01:01:01"
+
+ result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
+ str(datetime.timedelta(
+ milliseconds = long(100))))
+ actual_uptime = self._hostops.get_host_uptime()
+ self.assertEqual(result_uptime, actual_uptime)
diff --git a/nova/tests/virt/hyperv/test_ioutils.py b/nova/tests/unit/virt/hyperv/test_ioutils.py
index 2f12450a46..2f12450a46 100644
--- a/nova/tests/virt/hyperv/test_ioutils.py
+++ b/nova/tests/unit/virt/hyperv/test_ioutils.py
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
new file mode 100644
index 0000000000..8cda2ccd48
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -0,0 +1,79 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import migrationops
+from nova.virt.hyperv import vmutils
+
+
+class MigrationOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V MigrationOps class."""
+
+ _FAKE_TIMEOUT = 10
+ _FAKE_RETRY_INTERVAL = 5
+
+ def setUp(self):
+ super(MigrationOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(migrationops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._migrationops = migrationops.MigrationOps()
+ self._migrationops._vmops = mock.MagicMock()
+ self._migrationops._vmutils = mock.MagicMock()
+
+ def test_check_and_attach_config_drive_unknown_path(self):
+ instance = fake_instance.fake_instance_obj(self.context,
+ expected_attrs=['system_metadata'])
+ instance.config_drive = 'True'
+ self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
+ return_value=None)
+ self.assertRaises(vmutils.HyperVException,
+ self._migrationops._check_and_attach_config_drive,
+ instance)
+
+ @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
+ @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
+ def test_migrate_disk_and_power_off(self, mock_check_flavor,
+ mock_migrate_disk_files):
+ instance = fake_instance.fake_instance_obj(self.context)
+ flavor = mock.MagicMock()
+ network_info = mock.MagicMock()
+
+ disk_files = [mock.MagicMock()]
+ volume_drives = [mock.MagicMock()]
+
+ mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
+ mock_get_vm_st_path.return_value = (disk_files, volume_drives)
+
+ self._migrationops.migrate_disk_and_power_off(
+ self.context, instance, mock.sentinel.FAKE_DEST, flavor,
+ network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+
+ mock_check_flavor.assert_called_once_with(instance, flavor)
+ self._migrationops._vmops.power_off.assert_called_once_with(
+ instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
+ mock_get_vm_st_path.assert_called_once_with(instance.name)
+ mock_migrate_disk_files.assert_called_once_with(
+ instance.name, disk_files, mock.sentinel.FAKE_DEST)
+ self._migrationops._vmops.destroy.assert_called_once_with(
+ instance, destroy_disks=False)
diff --git a/nova/tests/virt/hyperv/test_networkutils.py b/nova/tests/unit/virt/hyperv/test_networkutils.py
index 281df29833..281df29833 100644
--- a/nova/tests/virt/hyperv/test_networkutils.py
+++ b/nova/tests/unit/virt/hyperv/test_networkutils.py
diff --git a/nova/tests/unit/virt/hyperv/test_networkutilsv2.py b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
new file mode 100644
index 0000000000..1038e88682
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_networkutilsv2.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_networkutils
+from nova.virt.hyperv import networkutilsv2
+
+
+class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
+ """Unit tests for the Hyper-V NetworkUtilsV2 class."""
+
+ _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
+
+ def setUp(self):
+ super(NetworkUtilsV2TestCase, self).setUp()
+ self._networkutils = networkutilsv2.NetworkUtilsV2()
+ self._networkutils._conn = mock.MagicMock()
+
+ def _prepare_external_port(self, mock_vswitch, mock_ext_port):
+ mock_lep = mock_ext_port.associators()[0]
+ mock_lep1 = mock_lep.associators()[0]
+ mock_esw = mock_lep1.associators()[0]
+ mock_esw.associators.return_value = [mock_vswitch]
+
+ def test_create_vswitch_port(self):
+ self.assertRaises(
+ NotImplementedError,
+ self._networkutils.create_vswitch_port,
+ mock.sentinel.FAKE_VSWITCH_PATH,
+ mock.sentinel.FAKE_PORT_NAME)
+
+ def test_vswitch_port_needed(self):
+ self.assertFalse(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
index 0ded84ec6b..0ded84ec6b 100644
--- a/nova/tests/virt/hyperv/test_pathutils.py
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
diff --git a/nova/tests/virt/hyperv/test_rdpconsoleutils.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
index 98d4484b61..98d4484b61 100644
--- a/nova/tests/virt/hyperv/test_rdpconsoleutils.py
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutils.py
diff --git a/nova/tests/virt/hyperv/test_rdpconsoleutilsv2.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
index bcdfaf92f0..bcdfaf92f0 100644
--- a/nova/tests/virt/hyperv/test_rdpconsoleutilsv2.py
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleutilsv2.py
diff --git a/nova/tests/virt/hyperv/test_utilsfactory.py b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
index 77b8a92a8e..77b8a92a8e 100644
--- a/nova/tests/virt/hyperv/test_utilsfactory.py
+++ b/nova/tests/unit/virt/hyperv/test_utilsfactory.py
diff --git a/nova/tests/virt/hyperv/test_vhdutils.py b/nova/tests/unit/virt/hyperv/test_vhdutils.py
index e41353329a..e41353329a 100644
--- a/nova/tests/virt/hyperv/test_vhdutils.py
+++ b/nova/tests/unit/virt/hyperv/test_vhdutils.py
diff --git a/nova/tests/virt/hyperv/test_vhdutilsv2.py b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
index a813d3bbd6..a813d3bbd6 100644
--- a/nova/tests/virt/hyperv/test_vhdutilsv2.py
+++ b/nova/tests/unit/virt/hyperv/test_vhdutilsv2.py
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
new file mode 100644
index 0000000000..5ec107747e
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -0,0 +1,230 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import timeout as etimeout
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import vmutils
+
+
+class VMOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMOps class."""
+
+ _FAKE_TIMEOUT = 2
+
+ def __init__(self, test_case_name):
+ super(VMOpsTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(vmops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._vmops = vmops.VMOps()
+
+ def test_attach_config_drive(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._vmops.attach_config_drive,
+ instance, 'C:/fake_instance_dir/configdrive.xxx')
+
+ def test_reboot_hard(self):
+ self._test_reboot(vmops.REBOOT_TYPE_HARD,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = True
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_failed(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
+ mock_soft_shutdown.return_value = True
+ mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
+ instance, {}, vmops.REBOOT_TYPE_SOFT)
+
+ mock_soft_shutdown.assert_called_once_with(instance)
+ mock_power_on.assert_called_once_with(instance)
+
+ def _test_reboot(self, reboot_type, vm_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.reboot(instance, {}, reboot_type)
+ mock_set_state.assert_called_once_with(instance, vm_state)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = True
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ mock_wait_for_power_off.assert_called_once_with(
+ instance.name, self._FAKE_TIMEOUT)
+
+ self.assertTrue(result)
+
+ @mock.patch("time.sleep")
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ def test_soft_shutdown_failed(self, mock_shutdown_vm, mock_sleep):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ mock_shutdown_vm.side_effect = vmutils.HyperVException(
+ "Expected failure.")
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ self.assertFalse(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.side_effect = [False, True]
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
+
+ calls = [mock.call(instance.name, 1),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertTrue(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off,
+ mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = False
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
+
+ calls = [mock.call(instance.name, 1.5),
+ mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
+ mock_shutdown_vm.assert_called_with(instance.name)
+ mock_wait_for_power_off.assert_has_calls(calls)
+
+ self.assertFalse(result)
+
+ def _test_power_off(self, timeout):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.power_off(instance, timeout)
+
+ mock_set_state.assert_called_once_with(
+ instance, constants.HYPERV_VM_STATE_DISABLED)
+
+ def test_power_off_hard(self):
+ self._test_power_off(timeout=0)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_exception(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_power_off(timeout=1)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_soft_shutdown.return_value = True
+
+ self._vmops.power_off(instance, 1, 0)
+
+ mock_soft_shutdown.assert_called_once_with(
+ instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(mock_set_state.called)
+
+ def test_get_vm_state(self):
+ summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
+
+ with mock.patch.object(self._vmops._vmutils,
+ 'get_vm_summary_info') as mock_get_summary_info:
+ mock_get_summary_info.return_value = summary_info
+
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
+
+ @mock.patch.object(vmops.VMOps, '_get_vm_state')
+ def test_wait_for_power_off_true(self, mock_get_state):
+ mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
+ self.assertTrue(result)
+
+ @mock.patch.object(vmops.etimeout, "with_timeout")
+ def test_wait_for_power_off_false(self, mock_with_timeout):
+ mock_with_timeout.side_effect = etimeout.Timeout()
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(result)
+
+ @mock.patch("__builtin__.open")
+ @mock.patch("os.path.exists")
+ @mock.patch.object(pathutils.PathUtils, 'get_vm_console_log_paths')
+ def test_get_console_output_exception(self,
+ fake_get_vm_log_path,
+ fake_path_exists,
+ fake_open):
+ fake_vm = mock.MagicMock()
+
+ fake_open.side_effect = vmutils.HyperVException
+ fake_path_exists.return_value = True
+ fake_get_vm_log_path.return_value = (
+ mock.sentinel.fake_console_log_path,
+ mock.sentinel.fake_console_log_archived)
+
+ with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmops.get_console_output,
+ fake_vm)
+
+ def test_list_instance_uuids(self):
+ fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
+ with mock.patch.object(self._vmops._vmutils,
+ 'list_instance_notes') as mock_list_notes:
+ mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+
+ response = self._vmops.list_instance_uuids()
+ mock_list_notes.assert_called_once_with()
+
+ self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/unit/virt/hyperv/test_vmutils.py
index 7c54f273ab..7c54f273ab 100644
--- a/nova/tests/virt/hyperv/test_vmutils.py
+++ b/nova/tests/unit/virt/hyperv/test_vmutils.py
diff --git a/nova/tests/unit/virt/hyperv/test_vmutilsv2.py b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
new file mode 100644
index 0000000000..e4c24683eb
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_vmutilsv2.py
@@ -0,0 +1,197 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.hyperv import test_vmutils
+from nova.virt.hyperv import vmutilsv2
+
+
+class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
+ """Unit tests for the Hyper-V VMUtilsV2 class."""
+
+ _DEFINE_SYSTEM = 'DefineSystem'
+ _DESTROY_SYSTEM = 'DestroySystem'
+ _DESTROY_SNAPSHOT = 'DestroySnapshot'
+
+ _ADD_RESOURCE = 'AddResourceSettings'
+ _REMOVE_RESOURCE = 'RemoveResourceSettings'
+ _SETTING_TYPE = 'VirtualSystemType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
+
+ def setUp(self):
+ super(VMUtilsV2TestCase, self).setUp()
+ self._vmutils = vmutilsv2.VMUtilsV2()
+ self._vmutils._conn = mock.MagicMock()
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyResourceSettings.assert_called_with(
+ ResourceSettings=[self._FAKE_RES_DATA])
+
+ @mock.patch.object(vmutilsv2, 'wmi', create=True)
+ @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateSnapshot.assert_called_with(
+ AffectedSystem=self._FAKE_VM_PATH,
+ SnapshotType=self._vmutils._SNAPSHOT_FULL)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
+ mock_add_virt_res):
+ self._lookup_vm()
+ fake_eth_port = mock_get_new_sd.return_value
+
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+ mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
+
+ metric_def = mock.MagicMock()
+ mock_disk = mock.MagicMock()
+ mock_disk.path_.return_value = self._FAKE_RES_PATH
+ mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
+
+ fake_metric_def_paths = ["fake_0", None]
+ fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH]
+
+ metric_def.path_.side_effect = fake_metric_def_paths
+ self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
+ metric_def]
+
+ self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
+
+ calls = []
+ for i in range(len(fake_metric_def_paths)):
+ calls.append(mock.call(
+ Subject=fake_metric_resource_paths[i],
+ Definition=fake_metric_def_paths[i],
+ MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
+
+ mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH])
+
+ def test_list_instance_notes(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name',
+ 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instance_notes()
+
+ self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName', 'Notes'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
+
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
+ @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
+ def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
+ vm_path, dynamic_memory_ratio=1.0):
+ mock_vs_man_svc = mock.MagicMock()
+ mock_vs_data = mock.MagicMock()
+ mock_job = mock.MagicMock()
+ fake_job_path = 'fake job path'
+ fake_ret_val = 'fake return value'
+ _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
+
+ mock_check_ret_val.return_value = mock_job
+ _conn.new.return_value = mock_vs_data
+ mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
+ vm_path,
+ fake_ret_val)
+ mock_job.associators.return_value = ['fake vm path']
+
+ response = self._vmutils._create_vm_obj(
+ vs_man_svc=mock_vs_man_svc,
+ vm_name='fake vm',
+ notes='fake notes',
+ dynamic_memory_ratio=dynamic_memory_ratio)
+
+ if not vm_path:
+ mock_job.associators.assert_called_once_with(
+ self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
+
+ _conn.new.assert_called_once_with()
+ self.assertEqual(mock_vs_data.ElementName, 'fake vm')
+ mock_vs_man_svc.DefineSystem.assert_called_once_with(
+ ResourceSettings=[], ReferenceConfiguration=None,
+ SystemSettings=mock_vs_data.GetText_(1))
+ mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
+
+ if dynamic_memory_ratio > 1:
+ self.assertFalse(mock_vs_data.VirtualNumaEnabled)
+
+ mock_get_wmi_obj.assert_called_with('fake vm path')
+
+ self.assertEqual(mock_vs_data.Notes, 'fake notes')
+ self.assertEqual(response, mock_get_wmi_obj())
+
+ def test_create_vm_obj(self):
+ self._test_create_vm_obj(vm_path='fake vm path')
+
+ def test_create_vm_obj_no_vm_path(self):
+ self._test_create_vm_obj(vm_path=None)
+
+ def test_create_vm_obj_dynamic_memory(self):
+ self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
+
+ def test_list_instances(self):
+ vs = mock.MagicMock()
+ attrs = {'ElementName': 'fake_name'}
+ vs.configure_mock(**attrs)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
+ response = self._vmutils.list_instances()
+
+ self.assertEqual([(attrs['ElementName'])], response)
+ self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
+ ['ElementName'],
+ VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeutils.py b/nova/tests/unit/virt/hyperv/test_volumeutils.py
new file mode 100644
index 0000000000..98ffcce533
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_volumeutils.py
@@ -0,0 +1,151 @@
+# Copyright 2014 Cloudbase Solutions Srl
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo.config import cfg
+
+from nova.tests.unit.virt.hyperv import test_basevolumeutils
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import volumeutils
+
+CONF = cfg.CONF
+CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
+ 'hyperv')
+
+
+class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
+ """Unit tests for the Hyper-V VolumeUtils class."""
+
+ _FAKE_PORTAL_ADDR = '10.1.1.1'
+ _FAKE_PORTAL_PORT = '3260'
+ _FAKE_LUN = 0
+ _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+
+ _FAKE_STDOUT_VALUE = 'The operation completed successfully'
+
+ def setUp(self):
+ super(VolumeUtilsTestCase, self).setUp()
+ self._volutils = volumeutils.VolumeUtils()
+ self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
+ self.flags(volume_attach_retry_count=4, group='hyperv')
+ self.flags(volume_attach_retry_interval=0, group='hyperv')
+
+ def _test_login_target_portal(self, portal_connected):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+
+ self._volutils.execute = mock.MagicMock()
+ if portal_connected:
+ exec_output = 'Address and Socket: %s %s' % (
+ self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
+ else:
+ exec_output = ''
+
+ self._volutils.execute.return_value = exec_output
+
+ self._volutils._login_target_portal(fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if portal_connected:
+ self.assertIn('RefreshTargetPortal', all_call_args)
+ else:
+ self.assertIn('AddTargetPortal', all_call_args)
+
+ def test_login_connected_portal(self):
+ self._test_login_target_portal(True)
+
+ def test_login_new_portal(self):
+ self._test_login_target_portal(False)
+
+ def _test_login_target(self, target_connected, raise_exception=False):
+ fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
+ self._FAKE_PORTAL_PORT)
+ self._volutils.execute = mock.MagicMock()
+ self._volutils._login_target_portal = mock.MagicMock()
+
+ if target_connected:
+ self._volutils.execute.return_value = self._FAKE_TARGET
+ elif raise_exception:
+ self._volutils.execute.return_value = ''
+ else:
+ self._volutils.execute.side_effect = (
+ ['', '', '', self._FAKE_TARGET, ''])
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.login_storage_target,
+ self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
+ else:
+ self._volutils.login_storage_target(self._FAKE_LUN,
+ self._FAKE_TARGET,
+ fake_portal)
+
+ call_list = self._volutils.execute.call_args_list
+ all_call_args = [arg for call in call_list for arg in call[0]]
+
+ if target_connected:
+ self.assertNotIn('qlogintarget', all_call_args)
+ else:
+ self.assertIn('qlogintarget', all_call_args)
+
+ def test_login_connected_target(self):
+ self._test_login_target(True)
+
+ def test_login_disconncted_target(self):
+ self._test_login_target(False)
+
+ def test_login_target_exception(self):
+ self._test_login_target(False, True)
+
+ def _test_execute_wrapper(self, raise_exception):
+ fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
+
+ if raise_exception:
+ output = 'fake error'
+ else:
+ output = 'The operation completed successfully'
+
+ with mock.patch('nova.utils.execute') as fake_execute:
+ fake_execute.return_value = (output, None)
+
+ if raise_exception:
+ self.assertRaises(vmutils.HyperVException,
+ self._volutils.execute,
+ *fake_cmd)
+ else:
+ ret_val = self._volutils.execute(*fake_cmd)
+ self.assertEqual(output, ret_val)
+
+ def test_execute_raise_exception(self):
+ self._test_execute_wrapper(True)
+
+ def test_execute_exception(self):
+ self._test_execute_wrapper(False)
+
+ @mock.patch.object(volumeutils, 'utils')
+ def test_logout_storage_target(self, mock_utils):
+ mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
+ mock.sentinel.FAKE_STDERR_VALUE)
+ session = mock.MagicMock()
+ session.SessionId = mock.sentinel.FAKE_SESSION_ID
+ self._volutils._conn_wmi.query.return_value = [session]
+
+ self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
+ mock_utils.execute.assert_called_once_with(
+ 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
diff --git a/nova/tests/virt/hyperv/test_volumeutilsv2.py b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
index 1c242b71f8..1c242b71f8 100644
--- a/nova/tests/virt/hyperv/test_volumeutilsv2.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeutilsv2.py
diff --git a/nova/tests/virt/ironic/__init__.py b/nova/tests/unit/virt/ironic/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/ironic/__init__.py
+++ b/nova/tests/unit/virt/ironic/__init__.py
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
new file mode 100644
index 0000000000..025d2616dd
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -0,0 +1,126 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ironicclient import client as ironic_client
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import client_wrapper
+
+CONF = cfg.CONF
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class IronicClientWrapperTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicClientWrapperTestCase, self).setUp()
+ self.ironicclient = client_wrapper.IronicClientWrapper()
+ # Do not waste time sleeping
+ cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list")
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with()
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
+ mock_get_client.return_value = FAKE_CLIENT
+ self.ironicclient.call("node.list", 'test', associated=True)
+ mock_get_client.assert_called_once_with()
+ mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
+ mock_multi_getattr.return_value.assert_called_once_with(
+ 'test', associated=True)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_no_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token=None, group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_username': CONF.ironic.admin_username,
+ 'os_password': CONF.ironic.admin_password,
+ 'os_auth_url': CONF.ironic.admin_url,
+ 'os_tenant_name': CONF.ironic.admin_tenant_name,
+ 'os_service_type': 'baremetal',
+ 'os_endpoint_type': 'public',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_with_auth_token(self, mock_ir_cli):
+ self.flags(admin_auth_token='fake-token', group='ironic')
+ ironicclient = client_wrapper.IronicClientWrapper()
+ # dummy call to have _get_client() called
+ ironicclient.call("node.list")
+ expected = {'os_auth_token': 'fake-token',
+ 'ironic_url': CONF.ironic.api_endpoint}
+ mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
+ **expected)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail(self, mock_get_client, mock_multi_getattr):
+ cfg.CONF.set_override('api_max_retries', 2, 'ironic')
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(exception.NovaException, self.ironicclient.call,
+ "node.list")
+ self.assertEqual(2, test_obj.call_count)
+
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
+ @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
+ def test_call_fail_unexpected_exception(self, mock_get_client,
+ mock_multi_getattr):
+ test_obj = mock.Mock()
+ test_obj.side_effect = ironic_exception.HTTPNotFound
+ mock_multi_getattr.return_value = test_obj
+ mock_get_client.return_value = FAKE_CLIENT
+ self.assertRaises(ironic_exception.HTTPNotFound,
+ self.ironicclient.call, "node.list")
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unauthorized(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.Unauthorized
+ self.assertRaises(exception.NovaException,
+ self.ironicclient._get_client)
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__get_client_unexpected_exception(self, mock_get_client):
+ mock_get_client.side_effect = ironic_exception.ConnectionRefused
+ self.assertRaises(ironic_exception.ConnectionRefused,
+ self.ironicclient._get_client)
+
+ def test__multi_getattr_good(self):
+ response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
+ self.assertEqual(FAKE_CLIENT.node.list, response)
+
+ def test__multi_getattr_fail(self):
+ self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
+ FAKE_CLIENT, "nonexistent")
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
new file mode 100644
index 0000000000..0e24c7bab4
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -0,0 +1,1268 @@
+# Copyright 2014 Red Hat, Inc.
+# Copyright 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the ironic driver."""
+
+from ironicclient import exc as ironic_exception
+import mock
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+
+from nova.compute import power_state as nova_states
+from nova.compute import task_states
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import utils
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import firewall
+from nova.virt.ironic import client_wrapper as cw
+from nova.virt.ironic import driver as ironic_driver
+from nova.virt.ironic import ironic_states
+
+
+CONF = cfg.CONF
+
+IRONIC_FLAGS = dict(
+ api_version=1,
+ group='ironic',
+)
+
+FAKE_CLIENT = ironic_utils.FakeClient()
+
+
+class FakeClientWrapper(cw.IronicClientWrapper):
+ def _get_client(self):
+ return FAKE_CLIENT
+
+
+class FakeLoopingCall(object):
+ def __init__(self):
+ self.wait = mock.MagicMock()
+ self.start = mock.MagicMock()
+ self.start.return_value = self
+
+
+def _get_properties():
+ return {'cpus': 2,
+ 'memory_mb': 512,
+ 'local_gb': 10,
+ 'cpu_arch': 'x86_64'}
+
+
+def _get_stats():
+ return {'cpu_arch': 'x86_64'}
+
+
+FAKE_CLIENT_WRAPPER = FakeClientWrapper()
+
+
+@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
+class IronicDriverTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverTestCase, self).setUp()
+ self.flags(**IRONIC_FLAGS)
+ self.driver = ironic_driver.IronicDriver(None)
+ self.driver.virtapi = fake.FakeVirtAPI()
+ self.ctx = nova_context.get_admin_context()
+
+ # mock retries configs to avoid sleeps and make tests run quicker
+ CONF.set_default('api_max_retries', default=1, group='ironic')
+ CONF.set_default('api_retry_interval', default=0, group='ironic')
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
+
+ def test_validate_driver_loading(self):
+ self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
+
+ def test__get_hypervisor_type(self):
+ self.assertEqual('ironic', self.driver._get_hypervisor_type())
+
+ def test__get_hypervisor_version(self):
+ self.assertEqual(1, self.driver._get_hypervisor_version())
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node(self, mock_gbiui):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ ironicclient = cw.IronicClientWrapper()
+
+ mock_gbiui.return_value = node
+ result = ironic_driver._validate_instance_and_node(ironicclient,
+ instance)
+ self.assertEqual(result.uuid, node_uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test__validate_instance_and_node_failed(self, mock_gbiui):
+ ironicclient = cw.IronicClientWrapper()
+ mock_gbiui.side_effect = ironic_exception.NotFound()
+ instance_uuid = uuidutils.generate_uuid(),
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertRaises(exception.InstanceNotFound,
+ ironic_driver._validate_instance_and_node,
+ ironicclient, instance)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYING)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_active(FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_done(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.ACTIVE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_active_fail(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ provision_state=ironic_states.DEPLOYFAIL)
+
+ fake_validate.return_value = node
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._wait_for_active,
+ FAKE_CLIENT, instance)
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_pass(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.POWER_OFF)
+
+ fake_validate.return_value = node
+ self.driver._wait_for_power_state(
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test__wait_for_power_state_ok(self, fake_validate):
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=uuidutils.generate_uuid())
+ node = ironic_utils.get_test_node(
+ target_power_state=ironic_states.NOSTATE)
+
+ fake_validate.return_value = node
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.driver._wait_for_power_state,
+ FAKE_CLIENT, instance, 'fake message')
+ self.assertTrue(fake_validate.called)
+
+ def test__node_resource(self):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(props['cpus'], result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(props['memory_mb'], result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(props['local_gb'], result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ def test__node_resource_canonicalizes_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ props['cpu_arch'] = 'i386'
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual('i686',
+ jsonutils.loads(result['supported_instances'])[0][0])
+ self.assertEqual('i386',
+ jsonutils.loads(result['stats'])['cpu_arch'])
+
+ def test__node_resource_unknown_arch(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ del props['cpu_arch']
+ node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual([], jsonutils.loads(result['supported_instances']))
+
+ def test__node_resource_exposes_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertIsNone(stats.get('capabilities'))
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = None
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
+
+ def test__node_resource_malformed_capabilities(self):
+ props = _get_properties()
+ props['capabilities'] = 'test:capability,:no_key,no_val:'
+ node = ironic_utils.get_test_node(properties=props)
+ result = self.driver._node_resource(node)
+ stats = jsonutils.loads(result['stats'])
+ self.assertEqual('capability', stats.get('test'))
+
+ def test__node_resource_no_instance_uuid(self):
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ power_state=ironic_states.POWER_OFF,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(props['cpus'], result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(props['memory_mb'], result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(props['local_gb'], result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable')
+ def test__node_resource_unavailable_node_res(self, mock_res_unavail):
+ mock_res_unavail.return_value = True
+ node_uuid = uuidutils.generate_uuid()
+ props = _get_properties()
+ stats = _get_stats()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=None,
+ properties=props)
+
+ result = self.driver._node_resource(node)
+ self.assertEqual(0, result['vcpus'])
+ self.assertEqual(0, result['vcpus_used'])
+ self.assertEqual(0, result['memory_mb'])
+ self.assertEqual(0, result['memory_mb_used'])
+ self.assertEqual(0, result['local_gb'])
+ self.assertEqual(0, result['local_gb_used'])
+ self.assertEqual(node_uuid, result['hypervisor_hostname'])
+ self.assertEqual(stats, jsonutils.loads(result['stats']))
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
+ create=True)
+ def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._start_firewall(fake_inst, fake_net_info)
+
+ mock_aif.assert_called_once_with(fake_inst, fake_net_info)
+ mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
+ mock_pif.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test__stop_firewall(self, mock_ui):
+ fake_inst = 'fake-inst'
+ fake_net_info = utils.get_test_network_info()
+ self.driver._stop_firewall(fake_inst, fake_net_info)
+ mock_ui.assert_called_once_with(fake_inst, fake_net_info)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists(self, mock_call):
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertTrue(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_instance_exists_fail(self, mock_call):
+ mock_call.side_effect = ironic_exception.NotFound
+ instance_uuid = 'fake-uuid'
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid)
+ self.assertFalse(self.driver.instance_exists(instance))
+ mock_call.assert_called_once_with('node.get_by_instance_uuid',
+ instance_uuid)
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ @mock.patch.object(objects.Instance, 'get_by_uuid')
+ def test_list_instances(self, mock_inst_by_uuid, mock_call):
+ nodes = []
+ instances = []
+ for i in range(2):
+ uuid = uuidutils.generate_uuid()
+ instances.append(fake_instance.fake_instance_obj(self.ctx,
+ id=i,
+ uuid=uuid))
+ nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
+
+ mock_inst_by_uuid.side_effect = instances
+ mock_call.return_value = nodes
+
+ response = self.driver.list_instances()
+ mock_call.assert_called_with("node.list", associated=True, limit=0)
+ expected_calls = [mock.call(mock.ANY, instances[0].uuid),
+ mock.call(mock.ANY, instances[1].uuid)]
+ mock_inst_by_uuid.assert_has_calls(expected_calls)
+ self.assertEqual(['instance-00000000', 'instance-00000001'],
+ sorted(response))
+
+ @mock.patch.object(cw.IronicClientWrapper, 'call')
+ def test_list_instance_uuids(self, mock_call):
+ num_nodes = 2
+ nodes = []
+ for n in range(num_nodes):
+ nodes.append(ironic_utils.get_test_node(
+ instance_uuid=uuidutils.generate_uuid()))
+
+ mock_call.return_value = nodes
+ uuids = self.driver.list_instance_uuids()
+ mock_call.assert_called_with('node.list', associated=True, limit=0)
+ expected = [n.instance_uuid for n in nodes]
+ self.assertEqual(sorted(expected), sorted(uuids))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache_empty_list(self, mock_get,
+ mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = []
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_get.assert_called_with(node.uuid)
+ mock_list.assert_called_with(detail=True, limit=0)
+
+ mock_get.side_effect = ironic_exception.NotFound
+ self.assertFalse(self.driver.node_is_available(node.uuid))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_empty_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ mock_list.assert_called_with(detail=True, limit=0)
+ self.assertEqual(0, mock_get.call_count)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_node_is_available_with_cache(self, mock_get, mock_list):
+ node = ironic_utils.get_test_node()
+ mock_get.return_value = node
+ mock_list.return_value = [node]
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ # prove that zero calls are made after populating cache
+ mock_list.reset_mock()
+ self.assertTrue(self.driver.node_is_available(node.uuid))
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+
+ def test__node_resources_unavailable(self):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node in maintenance /w no instance and ERROR power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.ERROR},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.NOSTATE},
+ ]
+ for n in node_dicts:
+ node = ironic_utils.get_test_node(**n)
+ self.assertTrue(self.driver._node_resources_unavailable(node))
+
+ avail_node = ironic_utils.get_test_node(
+ power_state=ironic_states.POWER_OFF)
+ self.assertFalse(self.driver._node_resources_unavailable(avail_node))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ def test_get_available_nodes(self, mock_list):
+ node_dicts = [
+ # a node in maintenance /w no instance and power OFF
+ {'uuid': uuidutils.generate_uuid(),
+ 'maintenance': True,
+ 'power_state': ironic_states.POWER_OFF},
+ # a node /w instance and power ON
+ {'uuid': uuidutils.generate_uuid(),
+ 'instance_uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.POWER_ON},
+ # a node not in maintenance /w no instance and bad power state
+ {'uuid': uuidutils.generate_uuid(),
+ 'power_state': ironic_states.ERROR},
+ ]
+ nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
+ mock_list.return_value = nodes
+ available_nodes = self.driver.get_available_nodes()
+ expected_uuids = [n['uuid'] for n in node_dicts]
+ self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource(self, mock_nr, mock_list, mock_get):
+ node = ironic_utils.get_test_node()
+ node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
+ fake_resource = 'fake-resource'
+ mock_get.return_value = node
+ # ensure cache gets populated without the node we want
+ mock_list.return_value = [node_2]
+ mock_nr.return_value = fake_resource
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ mock_nr.assert_called_once_with(node)
+ mock_get.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(FAKE_CLIENT.node, 'list')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ def test_get_available_resource_with_cache(self, mock_nr, mock_list,
+ mock_get):
+ node = ironic_utils.get_test_node()
+ fake_resource = 'fake-resource'
+ mock_list.return_value = [node]
+ mock_nr.return_value = fake_resource
+ # populate the cache
+ self.driver.get_available_nodes(refresh=True)
+ mock_list.reset_mock()
+
+ result = self.driver.get_available_resource(node.uuid)
+ self.assertEqual(fake_resource, result)
+ self.assertEqual(0, mock_list.call_count)
+ self.assertEqual(0, mock_get.call_count)
+ mock_nr.assert_called_once_with(node)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info(self, mock_gbiu):
+ instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ properties = {'memory_mb': 512, 'cpus': 2}
+ power_state = ironic_states.POWER_ON
+ node = ironic_utils.get_test_node(instance_uuid=instance_uuid,
+ properties=properties,
+ power_state=power_state)
+
+ mock_gbiu.return_value = node
+
+ # ironic_states.POWER_ON should be mapped to
+ # nova_states.RUNNING
+ memory_kib = properties['memory_mb'] * 1024
+ expected = {'state': nova_states.RUNNING,
+ 'max_mem': memory_kib,
+ 'mem': memory_kib,
+ 'num_cpu': properties['cpus'],
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj('fake-context',
+ uuid=instance_uuid)
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
+ def test_get_info_http_not_found(self, mock_gbiu):
+ mock_gbiu.side_effect = ironic_exception.NotFound()
+
+ expected = {'state': nova_states.NOSTATE,
+ 'max_mem': 0,
+ 'mem': 0,
+ 'num_cpu': 0,
+ 'cpu_time': 0}
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, uuid=uuidutils.generate_uuid())
+ result = self.driver.get_info(instance)
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_macs_for_instance(self, mock_node):
+ node = ironic_utils.get_test_node()
+ port = ironic_utils.get_test_port()
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ result = self.driver.macs_for_instance(instance)
+ self.assertEqual(set([port.address]), result)
+ mock_node.list_ports.assert_called_once_with(node.uuid)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ def test_macs_for_instance_http_not_found(self, mock_get):
+ mock_get.side_effect = ironic_exception.NotFound()
+
+ instance = fake_instance.fake_instance_obj(
+ self.ctx, node=uuidutils.generate_uuid())
+ result = self.driver.macs_for_instance(instance)
+ self.assertIsNone(result)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
+ mock_fg_bid, mock_node, mock_looping, mock_save):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.spawn(self.ctx, instance, None, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_fg_bid.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
+ mock_pvifs.assert_called_once_with(node, instance, None)
+ mock_sf.assert_called_once_with(instance, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'active')
+
+ self.assertIsNone(instance['default_ephemeral_device'])
+ self.assertFalse(mock_save.called)
+
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
+ mock_wait_active, mock_destroy,
+ mock_fg_bid, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ fake_flavor = {'ephemeral_gb': 0}
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ mock_fg_bid.return_value = fake_flavor
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ deploy_exc = exception.InstanceDeployFailure('foo')
+ fake_looping_call.wait.side_effect = deploy_exc
+ self.assertRaises(
+ exception.InstanceDeployFailure,
+ self.driver.spawn, self.ctx, instance, None, [], None)
+ mock_destroy.assert_called_once_with(self.ctx, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_good(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.driver._add_driver_fields(node, instance, image_meta, flavor)
+ expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
+ 'value': image_meta['id']},
+ {'path': '/instance_info/root_gb', 'op': 'add',
+ 'value': str(instance.root_gb)},
+ {'path': '/instance_info/swap_mb', 'op': 'add',
+ 'value': str(flavor['swap'])},
+ {'path': '/instance_uuid', 'op': 'add',
+ 'value': instance.uuid}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__add_driver_fields_fail(self, mock_update):
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor = ironic_utils.get_test_flavor()
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver._add_driver_fields,
+ node, instance, image_meta, flavor)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_good_with_flavor(self, mock_update):
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ self.driver._cleanup_deploy(self.ctx, node, instance, None,
+ flavor=flavor)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_without_flavor(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver._cleanup_deploy(self.ctx, node, instance, None)
+ expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
+ mock_update.assert_called_once_with(node.uuid, expected_patch)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FAKE_CLIENT.node, 'update')
+ def test__cleanup_deploy_fail(self, mock_update, mock_flavor):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
+ mock_update.side_effect = ironic_exception.BadRequest()
+ node = ironic_utils.get_test_node(driver='fake',
+ instance_uuid='fake-id')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.assertRaises(exception.InstanceTerminationFailure,
+ self.driver._cleanup_deploy,
+ self.ctx, node, instance, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_node_driver_validation_fail(self, mock_flavor, mock_node):
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.validate.return_value = ironic_utils.get_test_validation(
+ power=False, deploy=False)
+ mock_node.get.return_value = node
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.assertRaises(exception.ValidationError, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_with(mock.ANY, instance['instance_type_id'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ class TestException(Exception):
+ pass
+
+ mock_sf.side_effect = TestException()
+ self.assertRaises(TestException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ mock_node.set_provision_state.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ flavor = ironic_utils.get_test_flavor()
+ mock_flavor.return_value = flavor
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+ mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn,
+ self.ctx, instance, image_meta, [], None)
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.validate.assert_called_once_with(node_uuid)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
+ instance, None,
+ flavor=flavor)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
+ def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
+ mock_pvifs, mock_sf,
+ mock_flavor, mock_node,
+ mock_looping):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ fake_net_info = utils.get_test_network_info()
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_flavor.return_value = ironic_utils.get_test_flavor()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ mock_node.get.return_value = node
+ mock_node.validate.return_value = ironic_utils.get_test_validation()
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ fake_looping_call.wait.side_effect = ironic_exception.BadRequest
+ fake_net_info = utils.get_test_network_info()
+ self.assertRaises(ironic_exception.BadRequest,
+ self.driver.spawn, self.ctx, instance,
+ image_meta, [], None, fake_net_info)
+ mock_destroy.assert_called_once_with(self.ctx, instance,
+ fake_net_info)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
+ def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
+ mock_wait, mock_flavor,
+ mock_node, mock_save,
+ mock_looping):
+ mock_flavor.return_value = ironic_utils.get_test_flavor(ephemeral_gb=1)
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.return_value = mock.MagicMock()
+ image_meta = ironic_utils.get_test_image_meta()
+
+ self.driver.spawn(self.ctx, instance, image_meta, [], None)
+ mock_flavor.assert_called_once_with(self.ctx,
+ instance['instance_type_id'])
+ self.assertTrue(mock_save.called)
+ self.assertEqual('/dev/sda1', instance['default_ephemeral_device'])
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy(self, mock_cleanup_deploy, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = None
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.set_provision_state.side_effect = fake_set_provision_state
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node,
+ instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
+ def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
+ mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ network_info = 'foo'
+
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.DELETING)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.driver.destroy(self.ctx, instance, network_info, None)
+ self.assertFalse(mock_node.set_provision_state.called)
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
+ network_info)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ fake_validate.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ mock_sps.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unprovision_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ def fake_set_provision_state(*_):
+ node.provision_state = ironic_states.ERROR
+
+ mock_node.get_by_instance_uuid.return_value = node
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_destroy_unassociate_fail(self, mock_node):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
+ provision_state=ironic_states.ACTIVE)
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+
+ mock_node.get_by_instance_uuid.return_value = node
+ mock_node.update.side_effect = exception.NovaException()
+ self.assertRaises(exception.NovaException, self.driver.destroy,
+ self.ctx, instance, None, None)
+ mock_node.set_provision_state.assert_called_once_with(node_uuid,
+ 'deleted')
+ mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_reboot(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node.uuid)
+ self.driver.reboot(self.ctx, instance, None, None)
+ mock_sp.assert_called_once_with(node.uuid, 'reboot')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_off(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_off(instance)
+ mock_sp.assert_called_once_with(node.uuid, 'off')
+
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
+ def test_power_on(self, mock_sp, fake_validate, mock_looping):
+ node = ironic_utils.get_test_node()
+ fake_validate.side_effect = [node, node]
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+ instance_uuid = uuidutils.generate_uuid()
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=instance_uuid)
+
+ self.driver.power_on(self.ctx, instance,
+ utils.get_test_network_info())
+ mock_sp.assert_called_once_with(node.uuid, 'on')
+
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+
+ port_id = unicode(network_info[0]['id'])
+ expected_patch = [{'op': 'add',
+ 'path': '/extra/vif_port_id',
+ 'value': port_id}]
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ mock_port_udt.assert_called_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
+ def test_plug_vifs(self, mock__plug_vifs, mock_get):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+
+ mock_get.return_value = node
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = utils.get_test_network_info()
+ self.driver.plug_vifs(instance, network_info)
+
+ mock_get.assert_called_once_with(node_uuid)
+ mock__plug_vifs.assert_called_once_with(node, instance, network_info)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ # len(network_info) > len(ports)
+ network_info = (utils.get_test_network_info() +
+ utils.get_test_network_info())
+ self.assertRaises(exception.NovaException,
+ self.driver._plug_vifs, node, instance,
+ network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
+ @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
+ def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
+ mock_port_udt):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port()
+
+ mock_lp.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ network_info = []
+ self.driver._plug_vifs(node, instance, network_info)
+
+ # asserts
+ mock_uvifs.assert_called_once_with(node, instance, network_info)
+ mock_lp.assert_called_once_with(node_uuid)
+ # assert port.update() was not called
+ self.assertFalse(mock_port_udt.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ node=node_uuid)
+ expected_patch = [{'op': 'remove', 'path':
+ '/extra/vif_port_id'}]
+ self.driver.unplug_vifs(instance,
+ utils.get_test_network_info())
+
+ # asserts
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ mock_update.assert_called_once_with(port.uuid, expected_patch)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ @mock.patch.object(FAKE_CLIENT, 'node')
+ def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
+ node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
+ node = ironic_utils.get_test_node(uuid=node_uuid)
+ port = ironic_utils.get_test_port(extra={})
+
+ mock_node.get.return_value = node
+ mock_node.list_ports.return_value = [port]
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
+ self.driver.unplug_vifs(instance, utils.get_test_network_info())
+
+ mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(FAKE_CLIENT.port, 'update')
+ def test_unplug_vifs_no_network_info(self, mock_update):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = []
+ self.driver.unplug_vifs(instance, network_info)
+
+ # assert port.update() was not called
+ self.assertFalse(mock_update.called)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
+ create=True)
+ def test_unfilter_instance(self, mock_ui):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.unfilter_instance(instance, network_info)
+ mock_ui.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
+ create=True)
+ @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
+ create=True)
+ def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ network_info = utils.get_test_network_info()
+ self.driver.ensure_filtering_rules_for_instance(instance,
+ network_info)
+ mock_sbf.assert_called_once_with(instance, network_info)
+ mock_pif.assert_called_once_with(instance, network_info)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_instance_security_rules(self, mock_risr):
+ instance = fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_instance_security_rules(instance)
+ mock_risr.assert_called_once_with(instance)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_provider_fw_rules', create=True)
+ def test_refresh_provider_fw_rules(self, mock_rpfr):
+ fake_instance.fake_instance_obj(self.ctx)
+ self.driver.refresh_provider_fw_rules()
+ mock_rpfr.assert_called_once_with()
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_security_group_members', create=True)
+ def test_refresh_security_group_members(self, mock_rsgm):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_security_group_members(fake_group)
+ mock_rsgm.assert_called_once_with(fake_group)
+
+ @mock.patch.object(firewall.NoopFirewallDriver,
+ 'refresh_instance_security_rules', create=True)
+ def test_refresh_security_group_rules(self, mock_risr):
+ fake_group = 'fake-security-group-members'
+ self.driver.refresh_instance_security_rules(fake_group)
+ mock_risr.assert_called_once_with(fake_group)
+
+ @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
+ @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate, mock_looping,
+ mock_wait_active, preserve=False):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ fake_looping_call = FakeLoopingCall()
+ mock_looping.return_value = fake_looping_call
+
+ self.driver.rebuild(
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None,
+ preserve_ephemeral=preserve)
+
+ mock_save.assert_called_once_with(
+ expected_task_state=[task_states.REBUILDING])
+ mock_driver_fields.assert_called_once_with(node, instance, image_meta,
+ flavor, preserve)
+ mock_set_pstate.assert_called_once_with(node_uuid,
+ ironic_states.REBUILD)
+ mock_looping.assert_called_once_with(mock_wait_active,
+ FAKE_CLIENT_WRAPPER,
+ instance)
+ fake_looping_call.start.assert_called_once_with(
+ interval=CONF.ironic.api_retry_interval)
+ fake_looping_call.wait.assert_called_once_with()
+
+ def test_rebuild_preserve_ephemeral(self):
+ self._test_rebuild(preserve=True)
+
+ def test_rebuild_no_preserve_ephemeral(self):
+ self._test_rebuild(preserve=False)
+
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
+ @mock.patch.object(FAKE_CLIENT.node, 'get')
+ @mock.patch.object(objects.Instance, 'save')
+ def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
+ mock_fg_bid, mock_set_pstate):
+ node_uuid = uuidutils.generate_uuid()
+ instance_uuid = uuidutils.generate_uuid()
+ node = ironic_utils.get_test_node(uuid=node_uuid,
+ instance_uuid=instance_uuid,
+ instance_type_id=5)
+ mock_get.return_value = node
+
+ image_meta = ironic_utils.get_test_image_meta()
+ flavor_id = 5
+ flavor = {'id': flavor_id, 'name': 'baremetal'}
+ mock_fg_bid.return_value = flavor
+
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ uuid=instance_uuid,
+ node=node_uuid,
+ instance_type_id=flavor_id)
+
+ exceptions = [
+ exception.NovaException(),
+ ironic_exception.BadRequest(),
+ ironic_exception.InternalServerError(),
+ ]
+ for e in exceptions:
+ mock_set_pstate.side_effect = e
+ self.assertRaises(exception.InstanceDeployFailure,
+ self.driver.rebuild,
+ context=self.ctx, instance=instance, image_meta=image_meta,
+ injected_files=None, admin_password=None, bdms=None,
+ detach_block_devices=None, attach_block_devices=None)
diff --git a/nova/tests/unit/virt/ironic/test_patcher.py b/nova/tests/unit/virt/ironic/test_patcher.py
new file mode 100644
index 0000000000..a69e8cacfe
--- /dev/null
+++ b/nova/tests/unit/virt/ironic/test_patcher.py
@@ -0,0 +1,139 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova import context as nova_context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.ironic import utils as ironic_utils
+from nova.virt.ironic import patcher
+
+CONF = cfg.CONF
+
+
+class IronicDriverFieldsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(IronicDriverFieldsTestCase, self).setUp()
+ self.image_meta = ironic_utils.get_test_image_meta()
+ self.flavor = ironic_utils.get_test_flavor()
+ self.ctx = nova_context.get_admin_context()
+ self.instance = fake_instance.fake_instance_obj(self.ctx)
+ # Generic expected patches
+ self._expected_deploy_patch = [{'path': '/instance_info/image_source',
+ 'value': self.image_meta['id'],
+ 'op': 'add'},
+ {'path': '/instance_info/root_gb',
+ 'value': str(self.instance['root_gb']),
+ 'op': 'add'},
+ {'path': '/instance_info/swap_mb',
+ 'value': str(self.flavor['swap']),
+ 'op': 'add'}]
+ self._expected_cleanup_patch = []
+
+ def test_create_generic(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
+
+ def test_create_pxe(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patcher_obj = patcher.create(node)
+ self.assertIsInstance(patcher_obj, patcher.PXEDriverFields)
+
+ def test_generic_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_generic_get_deploy_patch_ephemeral(self):
+ CONF.set_override('default_ephemeral_format', 'testfmt')
+ node = ironic_utils.get_test_node(driver='fake')
+ instance = fake_instance.fake_instance_obj(self.ctx,
+ ephemeral_gb=10)
+ patch = patcher.create(node).get_deploy_patch(
+ instance, self.image_meta, self.flavor)
+ expected = [{'path': '/instance_info/ephemeral_gb',
+ 'value': str(instance.ephemeral_gb),
+ 'op': 'add'},
+ {'path': '/instance_info/ephemeral_format',
+ 'value': 'testfmt',
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_deploy_patch_preserve_ephemeral(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ for preserve in [True, False]:
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor,
+ preserve_ephemeral=preserve)
+ expected = [{'path': '/instance_info/preserve_ephemeral',
+ 'value': str(preserve), 'op': 'add', }]
+ expected += self._expected_deploy_patch
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_generic_get_cleanup_patch(self):
+ node = ironic_utils.get_test_node(driver='fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ self.assertEqual(self._expected_cleanup_patch, patch)
+
+ def test_pxe_get_deploy_patch(self):
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ extra_specs = self.flavor['extra_specs']
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'value': extra_specs['baremetal:deploy_kernel_id'],
+ 'op': 'add'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'value': extra_specs['baremetal:deploy_ramdisk_id'],
+ 'op': 'add'}]
+ expected += self._expected_deploy_patch
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, self.flavor)
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_deploy_patch_no_flavor_kernel_ramdisk_ids(self):
+ flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_deploy_patch(
+ self.instance, self.image_meta, flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
+
+ def test_pxe_get_cleanup_patch(self):
+ driver_info = {'pxe_deploy_kernel': 'fake-kernel-id',
+ 'pxe_deploy_ramdisk': 'fake-ramdisk-id'}
+ node = ironic_utils.get_test_node(driver='pxe_fake',
+ driver_info=driver_info)
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ expected = [{'path': '/driver_info/pxe_deploy_kernel',
+ 'op': 'remove'},
+ {'path': '/driver_info/pxe_deploy_ramdisk',
+ 'op': 'remove'}]
+ self.assertEqual(sorted(expected), sorted(patch))
+
+ def test_pxe_get_cleanup_patch_no_flavor_kernel_ramdisk_ids(self):
+ self.flavor = ironic_utils.get_test_flavor(extra_specs={})
+ node = ironic_utils.get_test_node(driver='pxe_fake')
+ patch = patcher.create(node).get_cleanup_patch(self.instance, None,
+ self.flavor)
+ # If there's no extra_specs patch should be exactly like a
+ # generic patch
+ self.assertEqual(self._expected_cleanup_patch, patch)
diff --git a/nova/tests/virt/ironic/utils.py b/nova/tests/unit/virt/ironic/utils.py
index cee0abffac..cee0abffac 100644
--- a/nova/tests/virt/ironic/utils.py
+++ b/nova/tests/unit/virt/ironic/utils.py
diff --git a/nova/tests/virt/libvirt/__init__.py b/nova/tests/unit/virt/libvirt/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/libvirt/__init__.py
+++ b/nova/tests/unit/virt/libvirt/__init__.py
diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
index 9a7cbdbdaf..9a7cbdbdaf 100644
--- a/nova/tests/virt/libvirt/fake_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/fake_imagebackend.py
diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
index 01ab689b00..01ab689b00 100644
--- a/nova/tests/virt/libvirt/fake_libvirt_utils.py
+++ b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
index 3a0e7ebefb..3a0e7ebefb 100644
--- a/nova/tests/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
new file mode 100644
index 0000000000..f849bc59a7
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -0,0 +1,991 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova import block_device
+from nova.compute import arch
+from nova import context
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_block_device
+import nova.tests.unit.image.fake
+from nova.virt import block_device as driver_block_device
+from nova.virt.libvirt import blockinfo
+
+
+class LibvirtBlockInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtBlockInfoTest, self).setUp()
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.test_instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2, # m1.tiny
+ 'config_drive': None,
+ 'system_metadata': {
+ 'instance_type_memory_mb': 128,
+ 'instance_type_root_gb': 0,
+ 'instance_type_name': 'm1.micro',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_vcpus': 1,
+ 'instance_type_swap': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': '1',
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_id': 2,
+ }
+ }
+
+ def test_volume_in_mapping(self):
+ swap = {'device_name': '/dev/sdb',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/sdc1', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/sdd', 'size': 10}]
+ block_device_mapping = [{'mount_device': '/dev/sde',
+ 'device_path': 'fake_device'},
+ {'mount_device': '/dev/sdf',
+ 'device_path': 'fake_device'}]
+ block_device_info = {
+ 'root_device_name': '/dev/sda',
+ 'swap': swap,
+ 'ephemerals': ephemerals,
+ 'block_device_mapping': block_device_mapping}
+
+ def _assert_volume_in_mapping(device_name, true_or_false):
+ self.assertEqual(
+ true_or_false,
+ block_device.volume_in_mapping(device_name,
+ block_device_info))
+
+ _assert_volume_in_mapping('sda', False)
+ _assert_volume_in_mapping('sdb', True)
+ _assert_volume_in_mapping('sdc1', True)
+ _assert_volume_in_mapping('sdd', True)
+ _assert_volume_in_mapping('sde', True)
+ _assert_volume_in_mapping('sdf', True)
+ _assert_volume_in_mapping('sdg', False)
+ _assert_volume_in_mapping('sdh1', False)
+
+ def test_find_disk_dev(self):
+ mapping = {
+ "disk.local": {
+ 'dev': 'sda',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ "disk.swap": {
+ 'dev': 'sdc',
+ 'bus': 'scsi',
+ 'type': 'disk',
+ },
+ }
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
+ self.assertEqual('sdb', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
+ last_device=True)
+ self.assertEqual('sdz', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
+ self.assertEqual('vda', dev)
+
+ dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
+ self.assertEqual('fda', dev)
+
+ def test_get_next_disk_dev(self):
+ mapping = {}
+ mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.local'])
+
+ mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
+ 'virtio')
+ self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
+ mapping['disk.swap'])
+
+ mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
+ 'ide',
+ 'cdrom',
+ True)
+ self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
+ mapping['disk.config'])
+
+ def test_get_next_disk_dev_boot_index(self):
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
+
+ info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
+ self.assertEqual({'dev': 'vda', 'bus': 'virtio',
+ 'type': 'disk', 'boot_index': '2'},
+ info)
+
+ def test_get_disk_mapping_simple(self):
+ # The simplest possible disk mapping setup, all defaults
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_rootdev(self):
+ # A simple disk mapping setup, but with custom root device name
+
+ instance_ref = objects.Instance(**self.test_instance)
+ block_device_info = {
+ 'root_device_name': '/dev/sda'
+ }
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'scsi', 'dev': 'sda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_rescue(self):
+ # A simple disk mapping setup, but in rescue mode
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ rescue=True)
+
+ expect = {
+ 'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_lxc(self):
+ # A simple disk mapping setup, but for lxc
+
+ self.test_instance['ephemeral_gb'] = 0
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
+ "lxc", "lxc",
+ None)
+ expect = {
+ 'disk': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'lxc', 'dev': None,
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_iso(self):
+ # A simple disk mapping setup, but with a ISO for root device
+
+ instance_ref = objects.Instance(**self.test_instance)
+ image_meta = {'disk_format': 'iso'}
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ None,
+ image_meta)
+
+ expect = {
+ 'disk': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
+ 'root': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_swap(self):
+ # A simple disk mapping setup, but with a swap device added
+
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_simple_configdrive(self):
+ # A simple disk mapping setup, but with configdrive added
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_cdrom_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as cdrom
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='iso9660')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'}
+ }
+
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_disk_configdrive(self):
+ # A simple disk mapping setup, with configdrive added as disk
+
+ self.flags(force_config_drive=True)
+ self.flags(config_drive_format='vfat')
+
+ instance_ref = objects.Instance(**self.test_instance)
+
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide")
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_ephemeral(self):
+ # A disk mapping with ephemeral devices
+ self.test_instance['system_metadata']['instance_type_swap'] = 5
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ {'device_type': 'floppy',
+ 'device_name': '/dev/vdd', 'size': 10},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_custom_swap(self):
+ # A disk mapping with a swap device at position vdb. This
+ # should cause disk.local to be removed
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'swap': {'device_name': '/dev/vdb',
+ 'swap_size': 10},
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_root(self):
+ # A disk mapping with a blockdev replacing the default root
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'device_type': 'disk',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_eph(self):
+ # A disk mapping with a blockdev replacing the ephemeral device
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_blockdev_many(self):
+ # A disk mapping with a blockdev replacing all devices
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 0,
+ 'disk_bus': 'scsi',
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdb",
+ 'boot_index': -1,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdc",
+ 'boot_index': -1,
+ 'device_type': 'cdrom',
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_complex(self):
+ # The strangest possible disk mapping setup
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vdf',
+ 'swap': {'device_name': '/dev/vdy',
+ 'swap_size': 10},
+ 'ephemerals': [
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 10},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 10},
+ ],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vda",
+ 'boot_index': 1,
+ 'delete_on_termination': True},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide",
+ block_device_info)
+
+ expect = {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ self.assertEqual(expect, mapping)
+
+ def test_get_disk_mapping_updates_original(self):
+ instance_ref = objects.Instance(**self.test_instance)
+
+ block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': {'device_name': '/dev/vdb',
+ 'device_type': 'really_lame_type',
+ 'swap_size': 10},
+ 'ephemerals': [{'disk_bus': 'no_such_bus',
+ 'device_type': 'yeah_right',
+ 'device_name': '/dev/vdc', 'size': 10}],
+ 'block_device_mapping': [
+ {'connection_info': "fake",
+ 'mount_device': None,
+ 'device_type': 'lawnmower',
+ 'delete_on_termination': True}]
+ }
+ expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
+ 'device_type': 'disk', 'swap_size': 10}
+ expected_ephemeral = {'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'device_name': '/dev/vdc', 'size': 10}
+ expected_bdm = {'connection_info': "fake",
+ 'mount_device': '/dev/vdd',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True}
+
+ blockinfo.get_disk_mapping("kvm", instance_ref,
+ "virtio", "ide", block_device_info)
+
+ self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_ephemeral,
+ block_device_info['ephemerals'][0])
+ self.assertEqual(expected_bdm,
+ block_device_info['block_device_mapping'][0])
+
+ def test_get_disk_bus(self):
+ expected = (
+ (arch.X86_64, 'disk', 'virtio'),
+ (arch.X86_64, 'cdrom', 'ide'),
+ (arch.X86_64, 'floppy', 'fdc'),
+ (arch.PPC, 'disk', 'virtio'),
+ (arch.PPC, 'cdrom', 'scsi'),
+ (arch.PPC64, 'disk', 'virtio'),
+ (arch.PPC64, 'cdrom', 'scsi')
+ )
+ for guestarch, dev, res in expected:
+ with mock.patch.object(blockinfo.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ expected = (
+ ('scsi', None, 'disk', 'scsi'),
+ (None, 'scsi', 'cdrom', 'scsi'),
+ ('usb', None, 'disk', 'usb')
+ )
+ for dbus, cbus, dev, res in expected:
+ image_meta = {'properties': {'hw_disk_bus': dbus,
+ 'hw_cdrom_bus': cbus}}
+ bus = blockinfo.get_disk_bus_for_device_type('kvm',
+ image_meta,
+ device_type=dev)
+ self.assertEqual(res, bus)
+
+ image_meta = {'properties': {'hw_disk_bus': 'xen'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ blockinfo.get_disk_bus_for_device_type,
+ 'kvm',
+ image_meta)
+
+ def test_success_get_disk_bus_for_disk_dev(self):
+ expected = (
+ ('ide', ("kvm", "hda")),
+ ('scsi', ("kvm", "sdf")),
+ ('virtio', ("kvm", "vds")),
+ ('fdc', ("kvm", "fdc")),
+ ('uml', ("kvm", "ubd")),
+ ('xen', ("xen", "sdf")),
+ ('xen', ("xen", "xvdb"))
+ )
+ for res, args in expected:
+ self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
+
+ def test_fail_get_disk_bus_for_disk_dev(self):
+ self.assertRaises(exception.NovaException,
+ blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
+
+ def test_get_config_drive_type_default(self):
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_cdrom(self):
+ self.flags(config_drive_format='iso9660')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('cdrom', config_drive_type)
+
+ def test_get_config_drive_type_disk(self):
+ self.flags(config_drive_format='vfat')
+ config_drive_type = blockinfo.get_config_drive_type()
+ self.assertEqual('disk', config_drive_type)
+
+ def test_get_config_drive_type_improper_value(self):
+ self.flags(config_drive_format='test')
+ self.assertRaises(exception.ConfigDriveUnknownFormat,
+ blockinfo.get_config_drive_type)
+
+ def test_get_info_from_bdm(self):
+ bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
+ 'disk_bus': 'usb', 'swap_size': 4},
+ {'device_type': 'disk', 'guest_format': 'ext3',
+ 'device_name': '/dev/vdb', 'size': 2},
+ {'disk_bus': 'ide', 'guest_format': None,
+ 'device_name': '/dev/vdc', 'size': 3},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/sdr",
+ 'disk_bus': 'lame_bus',
+ 'device_type': 'cdrom',
+ 'boot_index': 0,
+ 'delete_on_termination': True},
+ {'connection_info': "fake",
+ 'mount_device': "/dev/vdo",
+ 'disk_bus': 'scsi',
+ 'boot_index': 1,
+ 'device_type': 'lame_type',
+ 'delete_on_termination': True}]
+ expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
+ {'dev': 'vdb', 'type': 'disk',
+ 'bus': 'virtio', 'format': 'ext3'},
+ {'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
+ {'dev': 'sdr', 'type': 'cdrom',
+ 'bus': 'scsi', 'boot_index': '1'},
+ {'dev': 'vdo', 'type': 'disk',
+ 'bus': 'scsi', 'boot_index': '2'}]
+
+ for bdm, expected in zip(bdms, expected):
+ self.assertEqual(expected,
+ blockinfo.get_info_from_bdm('kvm', bdm, {}))
+
+ # Test that passed bus and type are considered
+ bdm = {'device_name': '/dev/vda'}
+ expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
+ self.assertEqual(
+ expected, blockinfo.get_info_from_bdm('kvm', bdm, {},
+ disk_bus='ide',
+ dev_type='disk'))
+
+ # Test that lame bus values are defaulted properly
+ bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
+ with mock.patch.object(blockinfo,
+ 'get_disk_bus_for_device_type',
+ return_value='ide') as get_bus:
+ blockinfo.get_info_from_bdm('kvm', bdm, {})
+ get_bus.assert_called_once_with('kvm', None, 'cdrom')
+
+ # Test that missing device is defaulted as expected
+ bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
+ expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
+ mapping = {'root': {'dev': 'vda'}}
+ with mock.patch.object(blockinfo,
+ 'find_disk_dev_for_disk_bus',
+ return_value='vdd') as find_dev:
+ got = blockinfo.get_info_from_bdm(
+ 'kvm', bdm, mapping, assigned_devices=['vdb', 'vdc'])
+ find_dev.assert_called_once_with(
+ {'root': {'dev': 'vda'},
+ 'vdb': {'dev': 'vdb'},
+ 'vdc': {'dev': 'vdc'}}, 'ide')
+ self.assertEqual(expected, got)
+
+ def test_get_device_name(self):
+ bdm_obj = objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0}))
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
+
+ bdm_obj.device_name = None
+ self.assertIsNone(blockinfo.get_device_name(bdm_obj))
+
+ driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
+ self.assertIsNone(blockinfo.get_device_name(driver_bdm))
+
+ @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
+ return_value='vda')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
+ return_value='virtio')
+ def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide')
+ mock_find_dev.assert_called_once_with({}, 'virtio')
+
+ blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide',
+ root_device_name='/dev/vda')
+ mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_get_root_info_bdm(self, mock_get_info):
+ root_bdm = {'mount_device': '/dev/vda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'}
+ # No root_device_name
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Both device names
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
+ mock_get_info.reset_mock()
+ # Missing device names
+ del root_bdm['mount_device']
+ blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
+ root_device_name='sda')
+ mock_get_info.assert_called_once_with('kvm',
+ {'device_name': 'sda',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk'},
+ {}, 'virtio')
+
+ def test_get_boot_order_simple(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ 'root': {'bus': 'virtio', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_complex(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ 'disk': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/hda': {'bus': 'ide', 'dev': 'hda',
+ 'type': 'cdrom', 'boot_index': '3'},
+ '/dev/fda': {'bus': 'fdc', 'dev': 'fda',
+ 'type': 'floppy', 'boot_index': '2'},
+ 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'format': 'ext3'},
+ 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
+ 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
+ 'root': {'bus': 'virtio', 'dev': 'vdf',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'fd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+ def test_get_boot_order_overlapping(self):
+ disk_info = {
+ 'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {
+ '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
+ 'type': 'disk', 'boot_index': '2'},
+ '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
+ 'type': 'cdrom', 'boot_index': '3'},
+ 'root': {'bus': 'scsi', 'dev': 'vda',
+ 'type': 'disk', 'boot_index': '1'},
+ }
+ }
+ expected_order = ['hd', 'cdrom']
+ self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
+
+
+class DefaultDeviceNamesTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DefaultDeviceNamesTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': 2}
+ self.root_device_name = '/dev/vda'
+ self.virt_type = 'kvm'
+ self.flavor = {'swap': 4}
+ self.patchers = []
+ self.patchers.append(mock.patch('nova.compute.flavors.extract_flavor',
+ return_value=self.flavor))
+ self.patchers.append(mock.patch(
+ 'nova.objects.block_device.BlockDeviceMapping.save'))
+ for patcher in self.patchers:
+ patcher.start()
+
+ self.ephemerals = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdb',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': None,
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.swap = [objects.BlockDeviceMapping(
+ self.context, **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdc',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'volume_size': 1,
+ 'boot_index': -1}))]
+
+ self.block_device_mapping = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'volume_id': 'fake-volume-id-1',
+ 'boot_index': 0})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vdd',
+ 'source_type': 'snapshot',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))]
+
+ def tearDown(self):
+ super(DefaultDeviceNamesTestCase, self).tearDown()
+ for patcher in self.patchers:
+ patcher.stop()
+
+ def _test_default_device_names(self, *block_device_lists):
+ blockinfo.default_device_names(self.virt_type,
+ self.context,
+ self.instance,
+ self.root_device_name,
+ *block_device_lists)
+
+ def test_only_block_device_mapping(self):
+ # Test no-op
+ original_bdm = copy.deepcopy(self.block_device_mapping)
+ self._test_default_device_names([], [], self.block_device_mapping)
+ for original, defaulted in zip(
+ original_bdm, self.block_device_mapping):
+ self.assertEqual(original.device_name, defaulted.device_name)
+
+ # Assert it defaults the missing one as expected
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], [], self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_ephemerals(self):
+ # Test ephemeral gets assigned
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals, [],
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_with_swap(self):
+ # Test swap only
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names([], self.swap, [])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and block_device_mapping
+ self.swap[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names([], self.swap,
+ self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
+
+ def test_all_together(self):
+ # Test swap missing
+ self.swap[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test swap and eph missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+
+ # Test all missing
+ self.swap[0]['device_name'] = None
+ self.ephemerals[0]['device_name'] = None
+ self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
+ self._test_default_device_names(self.ephemerals,
+ self.swap, self.block_device_mapping)
+ self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
+ self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
new file mode 100644
index 0000000000..192d075640
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -0,0 +1,2344 @@
+# Copyright (C) 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from lxml import etree
+from oslo.utils import units
+
+from nova.compute import arch
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt.libvirt import config
+
+
+class LibvirtConfigBaseTest(test.NoDBTestCase):
+ def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
+ self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
+
+
+class LibvirtConfigTest(LibvirtConfigBaseTest):
+
+ def test_config_plain(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, "<demo/>")
+
+ def test_config_ns(self):
+ obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
+ ns_uri="http://example.com/foo")
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <foo:demo xmlns:foo="http://example.com/foo"/>""")
+
+ def test_config_text(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ root = obj.format_dom()
+ root.append(obj._text_node("foo", "bar"))
+
+ xml = etree.tostring(root)
+ self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
+
+ def test_config_text_unicode(self):
+ obj = config.LibvirtConfigObject(root_name='demo')
+ root = obj.format_dom()
+ root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9'))
+ self.assertXmlEqual('<demo><foo>&#240;&#159;&#146;&#169;</foo></demo>',
+ etree.tostring(root))
+
+ def test_config_parse(self):
+ inxml = "<demo><foo/></demo>"
+ obj = config.LibvirtConfigObject(root_name="demo")
+ obj.parse_str(inxml)
+
+
+class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
+
+ def test_config_host(self):
+ xmlin = """
+ <capabilities>
+ <host>
+ <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Opteron_G3</model>
+ <vendor>AMD</vendor>
+ <topology sockets='1' cores='4' threads='1'/>
+ <feature name='ibs'/>
+ <feature name='osvw'/>
+ </cpu>
+ <topology>
+ <cells num='2'>
+ <cell id='0'>
+ <memory unit='KiB'>4048280</memory>
+ <pages unit='KiB' size='4'>1011941</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='0' socket_id='0' core_id='0' siblings='0'/>
+ <cpu id='1' socket_id='0' core_id='1' siblings='1'/>
+ <cpu id='2' socket_id='0' core_id='2' siblings='2'/>
+ <cpu id='3' socket_id='0' core_id='3' siblings='3'/>
+ </cpus>
+ </cell>
+ <cell id='1'>
+ <memory unit='KiB'>4127684</memory>
+ <pages unit='KiB' size='4'>1031921</pages>
+ <pages unit='KiB' size='2048'>0</pages>
+ <cpus num='4'>
+ <cpu id='4' socket_id='1' core_id='0' siblings='4'/>
+ <cpu id='5' socket_id='1' core_id='1' siblings='5'/>
+ <cpu id='6' socket_id='1' core_id='2' siblings='6'/>
+ <cpu id='7' socket_id='1' core_id='3' siblings='7'/>
+ </cpus>
+ </cell>
+ </cells>
+ </topology>
+ </host>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='x86_64'/>
+ </guest>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name='i686'/>
+ </guest>
+ </capabilities>"""
+
+ obj = config.LibvirtConfigCaps()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost)
+ self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
+
+ xmlout = obj.to_xml()
+
+ self.assertXmlEqual(xmlin, xmlout)
+
+
+class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
+ def test_config_platform(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.track = "host"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="platform" track="host"/>
+ """)
+
+ def test_config_pit(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "pit"
+ obj.tickpolicy = "discard"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="pit" tickpolicy="discard"/>
+ """)
+
+ def test_config_hpet(self):
+ obj = config.LibvirtConfigGuestTimer()
+ obj.name = "hpet"
+ obj.present = False
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <timer name="hpet" present="no"/>
+ """)
+
+
+class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
+ def test_config_utc(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc"/>
+ """)
+
+ def test_config_localtime(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "localtime"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="localtime"/>
+ """)
+
+ def test_config_timezone(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "timezone"
+ obj.timezone = "EDT"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="timezone" timezone="EDT"/>
+ """)
+
+ def test_config_variable(self):
+ obj = config.LibvirtConfigGuestClock()
+ obj.offset = "variable"
+ obj.adjustment = "123456"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="variable" adjustment="123456"/>
+ """)
+
+ def test_config_timers(self):
+ obj = config.LibvirtConfigGuestClock()
+
+ tmpit = config.LibvirtConfigGuestTimer()
+ tmpit.name = "pit"
+ tmpit.tickpolicy = "discard"
+
+ tmrtc = config.LibvirtConfigGuestTimer()
+ tmrtc.name = "rtc"
+ tmrtc.tickpolicy = "merge"
+
+ obj.add_timer(tmpit)
+ obj.add_timer(tmrtc)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <clock offset="utc">
+ <timer name="pit" tickpolicy="discard"/>
+ <timer name="rtc" tickpolicy="merge"/>
+ </clock>
+ """)
+
+
+class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPUFeature("mtrr")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr"/>
+ """)
+
+
+class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUFeature("mtrr")
+ obj.policy = "force"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <feature name="mtrr" policy="force"/>
+ """)
+
+
+class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
+
+ def test_parse_dom(self):
+ xml = """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """
+ xmldoc = etree.fromstring(xml)
+ obj = config.LibvirtConfigGuestCPUNUMA()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(2, len(obj.cells))
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ obj.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ obj.cells.append(cell)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ """)
+
+
+class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_only_uniq_cpu_featues(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
+ obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic"/>
+ <feature name="mtrr"/>
+ </cpu>
+ """)
+
+ def test_config_topology(self):
+ obj = config.LibvirtConfigCPU()
+ obj.model = "Penryn"
+ obj.sockets = 4
+ obj.cores = 4
+ obj.threads = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu>
+ <model>Penryn</model>
+ <topology sockets="4" cores="4" threads="2"/>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu match="exact">
+ <model>Penryn</model>
+ </cpu>
+ """)
+
+ def test_config_complex(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.model = "Penryn"
+ obj.vendor = "Intel"
+ obj.arch = arch.X86_64
+ obj.mode = "custom"
+
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
+ obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="custom" match="exact">
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature name="apic" policy="require"/>
+ <feature name="mtrr" policy="require"/>
+ </cpu>
+ """)
+
+ def test_config_host(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact"/>
+ """)
+
+ def test_config_host_with_numa(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ numa = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ numa.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ numa.cells.append(cell)
+
+ obj.numa = numa
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <cpu mode="host-model" match="exact">
+ <numa>
+ <cell id="0" cpus="0-1" memory="1000000"/>
+ <cell id="1" cpus="2-3" memory="1500000"/>
+ </numa>
+ </cpu>
+ """)
+
+
+class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSMBIOS()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <smbios mode="sysinfo"/>
+ """)
+
+
+class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios"/>
+ """)
+
+ def test_config_bios(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.bios_version = "6.6.6"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ <entry name="version">6.6.6</entry>
+ </bios>
+ </sysinfo>
+ """)
+
+ def test_config_system(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_version = "6.6.6"
+ obj.system_serial = "123456"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="version">6.6.6</entry>
+ <entry name="serial">123456</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+ def test_config_mixed(self):
+ obj = config.LibvirtConfigGuestSysinfo()
+ obj.bios_vendor = "Acme"
+ obj.system_manufacturer = "Acme"
+ obj.system_product = "Wile Coyote"
+ obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <sysinfo type="smbios">
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="manufacturer">Acme</entry>
+ <entry name="product">Wile Coyote</entry>
+ <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
+ </system>
+ </sysinfo>
+ """)
+
+
+class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_file_serial(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""")
+
+ def test_config_file_serial_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9')
+
+ def test_config_file_discard(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.driver_cache = "none"
+ obj.driver_discard = "unmap"
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello.qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""", xml)
+
+ def test_config_file_discard_parse(self):
+ xml = """
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual('unmap', obj.driver_discard)
+
+ def test_config_block(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "block"
+ obj.source_path = "/tmp/hello"
+ obj.source_device = "cdrom"
+ obj.driver_name = "qemu"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>""")
+
+ def test_config_block_parse(self):
+ xml = """<disk type="block" device="cdrom">
+ <driver name="qemu"/>
+ <source dev="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hdc"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'block')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hdc')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "iscsi"
+ obj.source_name = "foo.bar.com"
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_network_parse(self):
+ xml = """<disk type="network" device="disk">
+ <driver name="qemu" type="qcow2"/>
+ <source name="foo.bar.com" protocol="iscsi"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'iscsi')
+ self.assertEqual(obj.source_name, 'foo.bar.com')
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+ def test_config_network_no_name(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'nbd'
+ obj.source_hosts = ['foo.bar.com']
+ obj.source_ports = [None]
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source protocol="nbd">
+ <host name="foo.bar.com"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_multihost(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = 'network'
+ obj.source_protocol = 'rbd'
+ obj.source_name = 'pool/image'
+ obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4']
+ obj.source_ports = [None, '123', '456']
+ obj.driver_name = 'qemu'
+ obj.driver_format = 'raw'
+ obj.target_dev = '/dev/vda'
+ obj.target_bus = 'virtio'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd">
+ <host name="foo.bar.com"/>
+ <host name="::1" port="123"/>
+ <host name="1.2.3.4" port="456"/>
+ </source>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_network_auth(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "network"
+ obj.source_protocol = "rbd"
+ obj.source_name = "pool/image"
+ obj.driver_name = "qemu"
+ obj.driver_format = "raw"
+ obj.target_dev = "/dev/vda"
+ obj.target_bus = "virtio"
+ obj.auth_username = "foo"
+ obj.auth_secret_type = "ceph"
+ obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="network" device="disk">
+ <driver name="qemu" type="raw"/>
+ <source name="pool/image" protocol="rbd"/>
+ <auth username="foo">
+ <secret type="ceph"
+ uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
+ </auth>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>""")
+
+ def test_config_iotune(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.disk_read_bytes_sec = 1024000
+ obj.disk_read_iops_sec = 1000
+ obj.disk_total_bytes_sec = 2048000
+ obj.disk_write_bytes_sec = 1024000
+ obj.disk_write_iops_sec = 1000
+ obj.disk_total_iops_sec = 2000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <iotune>
+ <read_bytes_sec>1024000</read_bytes_sec>
+ <read_iops_sec>1000</read_iops_sec>
+ <write_bytes_sec>1024000</write_bytes_sec>
+ <write_iops_sec>1000</write_iops_sec>
+ <total_bytes_sec>2048000</total_bytes_sec>
+ <total_iops_sec>2000</total_iops_sec>
+ </iotune>
+ </disk>""")
+
+ def test_config_blockio(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.logical_block_size = "4096"
+ obj.physical_block_size = "4096"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <blockio logical_block_size="4096" physical_block_size="4096"/>
+ </disk>""", xml)
+
+
+class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>""")
+
+ def test_config_file_parse(self):
+ xml = """<disk type="file" device="disk">
+ <source file="/tmp/hello"/>
+ <target bus="ide" dev="/dev/hda"/>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_path, '/tmp/hello')
+ self.assertEqual(obj.target_dev, '/dev/hda')
+ self.assertEqual(obj.target_bus, 'ide')
+
+
+class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
+
+ def test_config_file_parse(self):
+ xml = """<backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/mid.qcow2'/>
+ <backingStore type='file'>
+ <driver name='qemu' type='qcow2'/>
+ <source file='/var/lib/libvirt/images/base.qcow2'/>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
+ self.assertEqual(obj.backing_store.driver_name, 'qemu')
+ self.assertEqual(obj.backing_store.source_type, 'file')
+ self.assertEqual(obj.backing_store.source_file,
+ '/var/lib/libvirt/images/base.qcow2')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+ def test_config_network_parse(self):
+ xml = """<backingStore type='network' index='1'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img1'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <format type='qcow2'/>
+ <source protocol='gluster' name='volume1/img2'>
+ <host name='host1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'gluster')
+ self.assertEqual(obj.source_name, 'volume1/img1')
+ self.assertEqual(obj.source_hosts[0], 'host1')
+ self.assertEqual(obj.source_ports[0], '24007')
+ self.assertEqual(obj.index, '1')
+ self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
+ self.assertEqual(obj.backing_store.index, '2')
+ self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
+ self.assertEqual(obj.backing_store.source_ports[0], '24007')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+
+class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
+
+ def test_config_mount(self):
+ obj = config.LibvirtConfigGuestFilesys()
+ obj.source_type = "mount"
+ obj.source_dir = "/tmp/hello"
+ obj.target_dir = "/mnt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <filesystem type="mount">
+ <source dir="/tmp/hello"/>
+ <target dir="/mnt"/>
+ </filesystem>""")
+
+
+class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
+
+ def test_config_tablet(self):
+ obj = config.LibvirtConfigGuestInput()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <input type="tablet" bus="usb"/>""")
+
+
+class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
+
+ def test_config_graphics(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "vnc"
+ obj.autoport = True
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
+ """)
+
+
+class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
+
+ def test_config_pci_guest_host_dev(self):
+ obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
+ xml = obj.to_xml()
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes"/>
+ """
+ self.assertXmlEqual(xml, expected)
+
+ def test_parse_GuestHostdev(self):
+ xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+
+ def test_parse_GuestHostdev_non_pci(self):
+ xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>"""
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+ self.assertEqual(obj.managed, 'no')
+
+
+class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
+
+ expected = """
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address bus="0x11" domain="0x1234" function="0x3"
+ slot="0x22" />
+ </source>
+ </hostdev>
+ """
+
+ def test_config_guest_hosdev_pci(self):
+ hostdev = config.LibvirtConfigGuestHostdevPCI()
+ hostdev.domain = "1234"
+ hostdev.bus = "11"
+ hostdev.slot = "22"
+ hostdev.function = "3"
+ xml = hostdev.to_xml()
+ self.assertXmlEqual(self.expected, xml)
+
+ def test_parse_guest_hosdev_pci(self):
+ xmldoc = self.expected
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'pci')
+ self.assertEqual(obj.managed, 'yes')
+ self.assertEqual(obj.domain, '0x1234')
+ self.assertEqual(obj.bus, '0x11')
+ self.assertEqual(obj.slot, '0x22')
+ self.assertEqual(obj.function, '0x3')
+
+ def test_parse_guest_hosdev_usb(self):
+ xmldoc = """<hostdev mode='subsystem' type='usb'>
+ <source startupPolicy='optional'>
+ <vendor id='0x1234'/>
+ <product id='0xbeef'/>
+ </source>
+ <boot order='2'/>
+ </hostdev>"""
+ obj = config.LibvirtConfigGuestHostdevPCI()
+ obj.parse_str(xmldoc)
+ self.assertEqual(obj.mode, 'subsystem')
+ self.assertEqual(obj.type, 'usb')
+
+
+class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
+
+ def test_config_file(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "file"
+ obj.source_path = "/tmp/vm.log"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="file">
+ <source path="/tmp/vm.log"/>
+ </serial>""")
+
+ def test_config_serial_port(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "tcp"
+ obj.listen_port = 11111
+ obj.listen_host = "0.0.0.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <serial type="tcp">
+ <source host="0.0.0.0" service="11111" mode="bind"/>
+ </serial>""")
+
+
+class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
+ def test_config_pty(self):
+ obj = config.LibvirtConfigGuestConsole()
+ obj.type = "pty"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <console type="pty"/>""")
+
+
+class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
+ def test_config_spice_minimal(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio'/>
+ </channel>""")
+
+ def test_config_spice_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "spicevmc"
+ obj.target_name = "com.redhat.spice.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="spicevmc">
+ <target type='virtio' name='com.redhat.spice.0'/>
+ </channel>""")
+
+ def test_config_qga_full(self):
+ obj = config.LibvirtConfigGuestChannel()
+ obj.type = "unix"
+ obj.target_name = "org.qemu.guest_agent.0"
+ obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % (
+ obj.target_name, "instance-name")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <channel type="unix">
+ <source path="%s" mode="bind"/>
+ <target type="virtio" name="org.qemu.guest_agent.0"/>
+ </channel>""" % obj.source_path)
+
+
+class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
+ def test_config_ethernet(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "ethernet"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "vnet0"
+ obj.driver_name = "vhost"
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="ethernet">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <driver name="vhost"/>
+ <target dev="vnet0"/>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.filtername = "clean-traffic"
+ obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
+ obj.vif_inbound_average = 1024000
+ obj.vif_inbound_peak = 10240000
+ obj.vif_inbound_burst = 1024000
+ obj.vif_outbound_average = 1024000
+ obj.vif_outbound_peak = 10240000
+ obj.vif_outbound_burst = 1024000
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <filterref filter="clean-traffic">
+ <parameter name="IP" value="192.168.122.1"/>
+ </filterref>
+ <bandwidth>
+ <inbound average="1024000" peak="10240000" burst="1024000"/>
+ <outbound average="1024000" peak="10240000" burst="1024000"/>
+ </bandwidth>
+ </interface>""")
+
+ def test_config_bridge_ovs(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "bridge"
+ obj.source_dev = "br0"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.vporttype = "openvswitch"
+ obj.vportparams.append({"key": "instanceid", "value": "foobar"})
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="bridge">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source bridge="br0"/>
+ <target dev="tap12345678"/>
+ <virtualport type="openvswitch">
+ <parameters instanceid="foobar"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_8021Qbh(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.target_dev = "tap12345678"
+ obj.source_dev = "eth0"
+ obj.vporttype = "802.1Qbh"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="private"/>
+ <target dev="tap12345678"/>
+ <virtualport type="802.1Qbh"/>
+ </interface>""")
+
+ def test_config_direct(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "direct"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+ obj.source_dev = "eth0"
+ obj.source_mode = "passthrough"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="direct">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <model type="virtio"/>
+ <source dev="eth0" mode="passthrough"/>
+ </interface>""")
+
+ def test_config_8021Qbh_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vporttype = "802.1Qbh"
+ obj.add_vport_param("profileid", "MyPortProfile")
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <virtualport type="802.1Qbh">
+ <parameters profileid="MyPortProfile"/>
+ </virtualport>
+ </interface>""")
+
+ def test_config_hw_veb_hostdev(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "hostdev"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.source_dev = "0000:0a:00.1"
+ obj.vlan = "100"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <interface type="hostdev" managed="yes">
+ <mac address="DE:AD:BE:EF:CA:FE"/>
+ <source>
+ <address type="pci" domain="0x0000"
+ bus="0x0a" slot="0x00" function="0x1"/>
+ </source>
+ <vlan>
+ <tag id="100"/>
+ </vlan>
+ </interface>""")
+
+
+class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
+
+ def test_config_lxc(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ </domain>""")
+
+ def test_config_lxc_with_idmap(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "lxc"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "exe"
+ obj.os_init_path = "/sbin/init"
+
+ uidmap = config.LibvirtConfigGuestUIDMap()
+ uidmap.target = "10000"
+ uidmap.count = "1"
+ obj.idmaps.append(uidmap)
+ gidmap = config.LibvirtConfigGuestGIDMap()
+ gidmap.target = "10000"
+ gidmap.count = "1"
+ obj.idmaps.append(gidmap)
+
+ fs = config.LibvirtConfigGuestFilesys()
+ fs.source_dir = "/root/lxc"
+ fs.target_dir = "/"
+
+ obj.add_device(fs)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <domain type="lxc">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>exe</type>
+ <init>/sbin/init</init>
+ </os>
+ <devices>
+ <filesystem type="mount">
+ <source dir="/root/lxc"/>
+ <target dir="/"/>
+ </filesystem>
+ </devices>
+ <idmap>
+ <uid start="0" target="10000" count="1"/>
+ <gid start="0" target="10000" count="1"/>
+ </idmap>
+ </domain>""", xml)
+
+ def test_config_xen_pv(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_kernel = "/tmp/vmlinuz"
+ obj.os_initrd = "/tmp/ramdisk"
+ obj.os_cmdline = "console=xvc0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>linux</type>
+ <kernel>/tmp/vmlinuz</kernel>
+ <initrd>/tmp/ramdisk</initrd>
+ <cmdline>console=xvc0</cmdline>
+ </os>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_xen_hvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "xen"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_loader = '/usr/lib/xen/boot/hvmloader'
+ obj.os_root = "root=xvda"
+ obj.os_cmdline = "console=xvc0"
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/xvda"
+ disk.target_bus = "xen"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="xen">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <os>
+ <type>hvm</type>
+ <loader>/usr/lib/xen/boot/hvmloader</loader>
+ <cmdline>console=xvc0</cmdline>
+ <root>root=xvda</root>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="xen" dev="/dev/xvda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_kvm(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.cpuset = set([0, 1, 3, 4, 5])
+
+ obj.cputune = config.LibvirtConfigGuestCPUTune()
+ obj.cputune.shares = 100
+ obj.cputune.quota = 50000
+ obj.cputune.period = 25000
+
+ obj.membacking = config.LibvirtConfigGuestMemoryBacking()
+ obj.membacking.hugepages = True
+
+ obj.memtune = config.LibvirtConfigGuestMemoryTune()
+ obj.memtune.hard_limit = 496
+ obj.memtune.soft_limit = 672
+ obj.memtune.swap_hard_limit = 1638
+ obj.memtune.min_guarantee = 2970
+
+ obj.numatune = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.mode = "preferred"
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.numatune.memory = numamemory
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.mode = "preferred"
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.mode = "preferred"
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.mode = "preferred"
+ numamemnode2.nodeset = [8]
+
+ obj.numatune.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "linux"
+ obj.os_boot_dev = ["hd", "cdrom", "fd"]
+ obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
+ obj.pae = True
+ obj.acpi = True
+ obj.apic = True
+
+ obj.sysinfo = config.LibvirtConfigGuestSysinfo()
+ obj.sysinfo.bios_vendor = "Acme"
+ obj.sysinfo.system_version = "1.0.0"
+
+ disk = config.LibvirtConfigGuestDisk()
+ disk.source_type = "file"
+ disk.source_path = "/tmp/img"
+ disk.target_dev = "/dev/vda"
+ disk.target_bus = "virtio"
+
+ obj.add_device(disk)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <memoryBacking>
+ <hugepages/>
+ </memoryBacking>
+ <memtune>
+ <hard_limit units="K">496</hard_limit>
+ <soft_limit units="K">672</soft_limit>
+ <swap_hard_limit units="K">1638</swap_hard_limit>
+ <min_guarantee units="K">2970</min_guarantee>
+ </memtune>
+ <numatune>
+ <memory mode="preferred" nodeset="0-3,8"/>
+ <memnode cellid="0" mode="preferred" nodeset="0-1"/>
+ <memnode cellid="1" mode="preferred" nodeset="2-3"/>
+ <memnode cellid="2" mode="preferred" nodeset="8"/>
+ </numatune>
+ <vcpu cpuset="0-1,3-5">2</vcpu>
+ <sysinfo type='smbios'>
+ <bios>
+ <entry name="vendor">Acme</entry>
+ </bios>
+ <system>
+ <entry name="version">1.0.0</entry>
+ </system>
+ </sysinfo>
+ <os>
+ <type>linux</type>
+ <boot dev="hd"/>
+ <boot dev="cdrom"/>
+ <boot dev="fd"/>
+ <smbios mode="sysinfo"/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>
+ <devices>
+ <disk type="file" device="disk">
+ <source file="/tmp/img"/>
+ <target bus="virtio" dev="/dev/vda"/>
+ </disk>
+ </devices>
+ </domain>""")
+
+ def test_config_machine_type(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 2
+ obj.name = "demo"
+ obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
+ obj.os_type = "hvm"
+ obj.os_mach_type = "fake_machine_type"
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
+ <name>demo</name>
+ <memory>104857600</memory>
+ <vcpu>2</vcpu>
+ <os>
+ <type machine="fake_machine_type">hvm</type>
+ </os>
+ </domain>""")
+
+ def test_ConfigGuest_parse_devices(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 1)
+ self.assertIsInstance(obj.devices[0],
+ config.LibvirtConfigGuestHostdevPCI)
+ self.assertEqual(obj.devices[0].mode, 'subsystem')
+ self.assertEqual(obj.devices[0].managed, 'no')
+
+ def test_ConfigGuest_parse_devices_wrong_type(self):
+ xmldoc = """ <domain type="kvm">
+ <devices>
+ <hostdev mode="subsystem" type="xxxx" managed="no">
+ </hostdev>
+ </devices>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+ self.assertEqual(len(obj.devices), 0)
+
+ def test_ConfigGuest_parese_cpu(self):
+ xmldoc = """ <domain>
+ <cpu mode='custom' match='exact'>
+ <model>kvm64</model>
+ </cpu>
+ </domain>
+ """
+ obj = config.LibvirtConfigGuest()
+ obj.parse_str(xmldoc)
+
+ self.assertEqual(obj.cpu.mode, 'custom')
+ self.assertEqual(obj.cpu.match, 'exact')
+ self.assertEqual(obj.cpu.model, 'kvm64')
+
+
+class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
+
+ def test_config_snapshot(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks/>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_path = 'source-path'
+ disk.source_type = 'file'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='file'>
+ <source file='source-path'/>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+ def test_config_snapshot_with_network_disks(self):
+ obj = config.LibvirtConfigGuestSnapshot()
+ obj.name = "Demo"
+
+ disk = config.LibvirtConfigGuestSnapshotDisk()
+ disk.name = 'vda'
+ disk.source_name = 'source-file'
+ disk.source_type = 'network'
+ disk.source_hosts = ['host1']
+ disk.source_ports = ['12345']
+ disk.source_protocol = 'glusterfs'
+ disk.snapshot = 'external'
+ disk.driver_name = 'qcow2'
+ obj.add_disk(disk)
+
+ disk2 = config.LibvirtConfigGuestSnapshotDisk()
+ disk2.name = 'vdb'
+ disk2.snapshot = 'no'
+ obj.add_disk(disk2)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <domainsnapshot>
+ <name>Demo</name>
+ <disks>
+ <disk name='vda' snapshot='external' type='network'>
+ <source protocol='glusterfs' name='source-file'>
+ <host name='host1' port='12345'/>
+ </source>
+ </disk>
+ <disk name='vdb' snapshot='no'/>
+ </disks>
+ </domainsnapshot>""")
+
+
+class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
+
+ def test_config_virt_usb_device(self):
+ xmlin = """
+ <device>
+ <name>usb_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="usb">
+ <domain>0</domain>
+ <capability type="fake_usb">
+ <address fake_usb="fake"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsNone(obj.pci_capability)
+
+ def test_config_virt_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_09_00_0</name>
+ <parent>pci_0000_00_1c_0</parent>
+ <driver>
+ <name>vxge</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/>
+ <address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 3)
+ self.assertEqual(obj.pci_capability.bus, 9)
+
+ def test_config_phy_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="phys_function">
+ <address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(len(obj.pci_capability.fun_capability[0].
+ device_addrs),
+ 1)
+
+ def test_config_non_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions"/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_fail_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_33_00_0</name>
+ <parent>pci_0000_22_1c_0</parent>
+ <driver>
+ <name>vxx</name>
+ </driver>
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>9</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
+ <vendor id="0x17d5">Neterion Inc.</vendor>
+ <capability type="virt_functions">
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "virt_functions")
+
+ def test_config_2cap_device(self):
+ xmlin = """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+
+ self.assertIsInstance(obj.pci_capability,
+ config.LibvirtConfigNodeDevicePciCap)
+ self.assertIsInstance(obj.pci_capability.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+ self.assertEqual(obj.pci_capability.fun_capability[0].type,
+ "phys_function")
+ self.assertEqual(obj.pci_capability.fun_capability[1].type,
+ "virt_functions")
+
+
+class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+
+ def test_config_device_pci_2cap(self):
+ xmlin = """
+ <capability type="pci">
+ <domain>0</domain>
+ <bus>10</bus>
+ <slot>1</slot>
+ <function>5</function>
+ <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
+ <vendor id="0x8086">Intel Inc.</vendor>
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>
+ <capability type="phys_function">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ </capability>
+ </capability>"""
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(obj.domain, 0)
+ self.assertEqual(obj.bus, 10)
+ self.assertEqual(obj.slot, 1)
+ self.assertEqual(obj.function, 5)
+ self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
+ self.assertEqual(obj.product_id, 0x10bd)
+ self.assertEqual(obj.vendor, "Intel Inc.")
+ self.assertEqual(obj.vendor_id, 0x8086)
+ self.assertIsInstance(obj.fun_capability[0],
+ config.LibvirtConfigNodeDevicePciSubFunctionCap)
+
+ self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
+ self.assertEqual(obj.fun_capability[0].device_addrs,
+ [(0, 10, 1, 1),
+ (1, 10, 2, 3), ])
+ self.assertEqual(obj.fun_capability[1].type, 'phys_function')
+ self.assertEqual(obj.fun_capability[1].device_addrs,
+ [(0, 10, 1, 1), ])
+
+ def test_config_read_only_disk(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.source_type = "disk"
+ obj.source_device = "disk"
+ obj.driver_name = "kvm"
+ obj.target_dev = "/dev/hdc"
+ obj.target_bus = "virtio"
+ obj.readonly = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ <readonly/>
+ </disk>""")
+
+ obj.readonly = False
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <disk type="disk" device="disk">
+ <driver name="kvm"/>
+ <target bus="virtio" dev="/dev/hdc"/>
+ </disk>""")
+
+
+class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
+
+ def test_config_device_pci_subfunction(self):
+ xmlin = """
+ <capability type="virt_functions">
+ <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
+ <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
+ </capability>"""
+ fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
+ fun_capability.parse_str(xmlin)
+ self.assertEqual('virt_functions', fun_capability.type)
+ self.assertEqual([(0, 10, 1, 1),
+ (1, 10, 2, 3)],
+ fun_capability.device_addrs)
+
+
+class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest):
+
+ def test_config_video_driver(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl'/>
+ </video>""")
+
+ def test_config_video_driver_vram_heads(self):
+ obj = config.LibvirtConfigGuestVideo()
+ obj.type = 'qxl'
+ obj.vram = '9216'
+ obj.heads = '1'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <video>
+ <model type='qxl' vram='9216' heads='1'/>
+ </video>""")
+
+
+class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest):
+
+ def test_config_seclabel_config(self):
+ obj = config.LibvirtConfigSeclabel()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'/>""")
+
+ def test_config_seclabel_baselabel(self):
+ obj = config.LibvirtConfigSeclabel()
+ obj.type = 'dynamic'
+ obj.baselabel = 'system_u:system_r:my_svirt_t:s0'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <seclabel type='dynamic'>
+ <baselabel>system_u:system_r:my_svirt_t:s0</baselabel>
+ </seclabel>""")
+
+
+class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest):
+
+ def test_config_rng_driver(self):
+ obj = config.LibvirtConfigGuestRng()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <backend model='random'/>
+</rng>""")
+
+ def test_config_rng_driver_with_rate(self):
+ obj = config.LibvirtConfigGuestRng()
+ obj.backend = '/dev/random'
+ obj.rate_period = '12'
+ obj.rate_bytes = '34'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+<rng model='virtio'>
+ <rate period='12' bytes='34'/>
+ <backend model='random'>/dev/random</backend>
+</rng>""")
+
+
+class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest):
+
+ def test_config_guest_contoller(self):
+ obj = config.LibvirtConfigGuestController()
+ obj.type = 'scsi'
+ obj.index = 0
+ obj.model = 'virtio-scsi'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <controller type='scsi' index='0' model='virtio-scsi'/>""")
+
+
+class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest):
+ def test_config_watchdog(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+ obj.action = 'none'
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>")
+
+ def test_config_watchdog_default_action(self):
+ obj = config.LibvirtConfigGuestWatchdog()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>")
+
+
+class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest):
+
+ def test_config_cputune_timeslice(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+ cputune.shares = 100
+ cputune.quota = 50000
+ cputune.period = 25000
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <shares>100</shares>
+ <quota>50000</quota>
+ <period>25000</period>
+ </cputune>""")
+
+ def test_config_cputune_vcpus(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+
+ vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu0.id = 0
+ vcpu0.cpuset = set([0, 1])
+ vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu1.id = 1
+ vcpu1.cpuset = set([2, 3])
+ vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu2.id = 2
+ vcpu2.cpuset = set([4, 5])
+ vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu3.id = 3
+ vcpu3.cpuset = set([6, 7])
+ cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+ <cputune>
+ <vcpupin vcpu="0" cpuset="0-1"/>
+ <vcpupin vcpu="1" cpuset="2-3"/>
+ <vcpupin vcpu="2" cpuset="4-5"/>
+ <vcpupin vcpu="3" cpuset="6-7"/>
+ </cputune>""")
+
+
+class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memoryBacking/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+ obj.locked = True
+ obj.sharedpages = False
+ obj.hugepages = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memoryBacking>
+ <hugepages/>
+ <nosharedpages/>
+ <locked/>
+ </memoryBacking>""")
+
+
+class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<memtune/>")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+ obj.soft_limit = 6
+ obj.hard_limit = 28
+ obj.swap_hard_limit = 140
+ obj.min_guarantee = 270
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <memtune>
+ <hard_limit units="K">28</hard_limit>
+ <soft_limit units="K">6</soft_limit>
+ <swap_hard_limit units="K">140</swap_hard_limit>
+ <min_guarantee units="K">270</min_guarantee>
+ </memtune>""")
+
+
+class LibvirtConfigGuestNUMATuneTest(LibvirtConfigBaseTest):
+ def test_config_numa_tune_none(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<numatune/>", xml)
+
+ def test_config_numa_tune_memory(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemory = config.LibvirtConfigGuestNUMATuneMemory()
+ numamemory.nodeset = [0, 1, 2, 3, 8]
+
+ obj.memory = numamemory
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memory mode="strict" nodeset="0-3,8"/>
+ </numatune>""", xml)
+
+ def test_config_numa_tune_memnodes(self):
+ obj = config.LibvirtConfigGuestNUMATune()
+
+ numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode0.cellid = 0
+ numamemnode0.nodeset = [0, 1]
+
+ numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode1.cellid = 1
+ numamemnode1.nodeset = [2, 3]
+
+ numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
+ numamemnode2.cellid = 2
+ numamemnode2.nodeset = [8]
+
+ obj.memnodes.extend([numamemnode0,
+ numamemnode1,
+ numamemnode2])
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <numatune>
+ <memnode cellid="0" mode="strict" nodeset="0-1"/>
+ <memnode cellid="1" mode="strict" nodeset="2-3"/>
+ <memnode cellid="2" mode="strict" nodeset="8"/>
+ </numatune>""", xml)
+
+
+class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
+
+ def test_config_metadata(self):
+ meta = config.LibvirtConfigGuestMetaNovaInstance()
+ meta.package = "2014.2.3"
+ meta.name = "moonbuggy"
+ meta.creationTime = 1234567890
+ meta.roottype = "image"
+ meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
+
+ owner = config.LibvirtConfigGuestMetaNovaOwner()
+ owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
+ owner.username = "buzz"
+ owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
+ owner.projectname = "moonshot"
+
+ meta.owner = owner
+
+ flavor = config.LibvirtConfigGuestMetaNovaFlavor()
+ flavor.name = "m1.lowgravity"
+ flavor.vcpus = 8
+ flavor.memory = 2048
+ flavor.swap = 10
+ flavor.disk = 50
+ flavor.ephemeral = 10
+
+ meta.flavor = flavor
+
+ xml = meta.to_xml()
+ self.assertXmlEqual(xml, """
+ <nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'>
+ <nova:package version="2014.2.3"/>
+ <nova:name>moonbuggy</nova:name>
+ <nova:creationTime>2009-02-13 23:31:30</nova:creationTime>
+ <nova:flavor name="m1.lowgravity">
+ <nova:memory>2048</nova:memory>
+ <nova:disk>50</nova:disk>
+ <nova:swap>10</nova:swap>
+ <nova:ephemeral>10</nova:ephemeral>
+ <nova:vcpus>8</nova:vcpus>
+ </nova:flavor>
+ <nova:owner>
+ <nova:user
+ uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user>
+ <nova:project
+ uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project>
+ </nova:owner>
+ <nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/>
+ </nova:instance>
+ """)
+
+
+class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest):
+ def test_config_id_map_parse_start_not_int(self):
+ xmlin = "<uid start='a' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_target_not_int(self):
+ xmlin = "<uid start='2' target='a' count='5'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_id_map_parse_count_not_int(self):
+ xmlin = "<uid start='2' target='20000' count='a'/>"
+ obj = config.LibvirtConfigGuestIDMap()
+
+ self.assertRaises(ValueError, obj.parse_str, xmlin)
+
+ def test_config_uid_map(self):
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_uid_map_parse(self):
+ xmlin = "<uid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestUIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+ def test_config_gid_map(self):
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.start = 1
+ obj.target = 10000
+ obj.count = 2
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml)
+
+ def test_config_gid_map_parse(self):
+ xmlin = "<gid start='2' target='20000' count='5'/>"
+ obj = config.LibvirtConfigGuestGIDMap()
+ obj.parse_str(xmlin)
+
+ self.assertEqual(2, obj.start)
+ self.assertEqual(20000, obj.target)
+ self.assertEqual(5, obj.count)
+
+
+class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest):
+
+ def test_config_memory_balloon_period(self):
+ balloon = config.LibvirtConfigMemoryBalloon()
+ balloon.model = 'fake_virtio'
+ balloon.period = 11
+
+ xml = balloon.to_xml()
+ expected_xml = """
+ <memballoon model='fake_virtio'>
+ <stats period='11'/>
+ </memballoon>"""
+
+ self.assertXmlEqual(expected_xml, xml)
diff --git a/nova/tests/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
index 649144c0d1..649144c0d1 100644
--- a/nova/tests/virt/libvirt/test_designer.py
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
diff --git a/nova/tests/virt/libvirt/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
index 02efbe10b5..02efbe10b5 100644
--- a/nova/tests/virt/libvirt/test_dmcrypt.py
+++ b/nova/tests/unit/virt/libvirt/test_dmcrypt.py
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
new file mode 100644
index 0000000000..90e25e1b3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -0,0 +1,12576 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import __builtin__
+import contextlib
+import copy
+import datetime
+import errno
+import os
+import random
+import re
+import shutil
+import threading
+import time
+import uuid
+
+import eventlet
+from eventlet import greenthread
+import fixtures
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import encodeutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.api.metadata import base as instance_metadata
+from nova.compute import arch
+from nova.compute import manager
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_mode
+from nova.compute import vm_states
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import fileutils
+from nova.openstack.common import loopingcall
+from nova.openstack.common import uuidutils
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_pci_device
+from nova.tests.unit.virt.libvirt import fake_imagebackend
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova import version
+from nova.virt import block_device as driver_block_device
+from nova.virt import configdrive
+from nova.virt.disk import api as disk
+from nova.virt import driver
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import firewall as base_firewall
+from nova.virt import hardware
+from nova.virt import images
+from nova.virt.libvirt import blockinfo
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import firewall
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+from nova.virt.libvirt import utils as libvirt_utils
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+libvirt_driver.libvirt = libvirt
+
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
+CONF.import_opt('instances_path', 'nova.compute.manager')
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+
+_fake_NodeDevXml = \
+ {"pci_0000_04_00_3": """
+ <device>
+ <name>pci_0000_04_00_3</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igb</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x1521'>I350 Gigabit Network Connection</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='virt_functions'>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
+ <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
+ </capability>
+ </capability>
+ </device>""",
+ "pci_0000_04_10_7": """
+ <device>
+ <name>pci_0000_04_10_7</name>
+ <parent>pci_0000_00_01_1</parent>
+ <driver>
+ <name>igbvf</name>
+ </driver>
+ <capability type='pci'>
+ <domain>0</domain>
+ <bus>4</bus>
+ <slot>16</slot>
+ <function>7</function>
+ <product id='0x1520'>I350 Ethernet Controller Virtual Function</product>
+ <vendor id='0x8086'>Intel Corporation</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
+ </capability>
+ <capability type='virt_functions'>
+ </capability>
+ </capability>
+ </device>"""}
+
+
+def _concurrency(signal, wait, done, target, is_block_dev=False):
+ signal.send()
+ wait.wait()
+ done.send()
+
+
+class FakeVirDomainSnapshot(object):
+
+ def __init__(self, dom=None):
+ self.dom = dom
+
+ def delete(self, flags):
+ pass
+
+
+class FakeVirtDomain(object):
+
+ def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
+ if uuidstr is None:
+ uuidstr = str(uuid.uuid4())
+ self.uuidstr = uuidstr
+ self.id = id
+ self.domname = name
+ self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
+ None, None]
+ if fake_xml:
+ self._fake_dom_xml = fake_xml
+ else:
+ self._fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ def name(self):
+ if self.domname is None:
+ return "fake-domain %s" % self
+ else:
+ return self.domname
+
+ def ID(self):
+ return self.id
+
+ def info(self):
+ return self._info
+
+ def create(self):
+ pass
+
+ def managedSave(self, *args):
+ pass
+
+ def createWithFlags(self, launch_flags):
+ pass
+
+ def XMLDesc(self, *args):
+ return self._fake_dom_xml
+
+ def UUIDString(self):
+ return self.uuidstr
+
+ def attachDeviceFlags(self, xml, flags):
+ pass
+
+ def attachDevice(self, xml):
+ pass
+
+ def detachDeviceFlags(self, xml, flags):
+ pass
+
+ def snapshotCreateXML(self, xml, flags):
+ pass
+
+ def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
+ pass
+
+ def blockRebase(self, disk, base, bandwidth=0, flags=0):
+ pass
+
+ def blockJobInfo(self, path, flags):
+ pass
+
+ def resume(self):
+ pass
+
+ def destroy(self):
+ pass
+
+
+class CacheConcurrencyTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # utils.synchronized() will create the lock_path for us if it
+ # doesn't already exist. It will also delete it when it's done,
+ # which can cause race conditions with the multiple threads we
+ # use for tests. So, create the path here so utils.synchronized()
+ # won't delete it out from under one of the threads.
+ self.lock_path = os.path.join(CONF.instances_path, 'locks')
+ fileutils.ensure_tree(self.lock_path)
+
+ def fake_exists(fname):
+ basedir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if fname == basedir or fname == self.lock_path:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def test_same_fname_concurrency(self):
+ # Ensures that the same fname cache runs at a sequentially.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname', None,
+ signal=sig2, wait=wait2, done=done2)
+
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+ def test_different_fname_concurrency(self):
+ # Ensures that two different fname caches are concurrent.
+ uuid = uuidutils.generate_uuid()
+
+ backend = imagebackend.Backend(False)
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ sig1 = eventlet.event.Event()
+ thr1 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname2', None,
+ signal=sig1, wait=wait1, done=done1)
+ eventlet.sleep(0)
+ # Thread 1 should run before thread 2.
+ sig1.wait()
+
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ sig2 = eventlet.event.Event()
+ thr2 = eventlet.spawn(backend.image({'name': 'instance',
+ 'uuid': uuid},
+ 'name').cache,
+ _concurrency, 'fname1', None,
+ signal=sig2, wait=wait2, done=done2)
+ eventlet.sleep(0)
+ # Wait for thread 2 to start.
+ sig2.wait()
+
+ wait2.send()
+ tries = 0
+ while not done2.ready() and tries < 10:
+ eventlet.sleep(0)
+ tries += 1
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+ # Wait on greenthreads to assert they didn't raise exceptions
+ # during execution
+ thr1.wait()
+ thr2.wait()
+
+
+class FakeVolumeDriver(object):
+ def __init__(self, *args, **kwargs):
+ pass
+
+ def attach_volume(self, *args):
+ pass
+
+ def detach_volume(self, *args):
+ pass
+
+ def get_xml(self, *args):
+ return ""
+
+ def get_config(self, *args):
+ """Connect the volume to a fake device."""
+ conf = vconfig.LibvirtConfigGuestDisk()
+ conf.source_type = "network"
+ conf.source_protocol = "fake"
+ conf.source_name = "fake"
+ conf.target_dev = "fake"
+ conf.target_bus = "fake"
+ return conf
+
+ def connect_volume(self, *args):
+ """Connect the volume to a fake device."""
+ return self.get_config()
+
+
+class FakeConfigGuestDisk(object):
+ def __init__(self, *args, **kwargs):
+ self.source_type = None
+ self.driver_cache = None
+
+
+class FakeConfigGuest(object):
+ def __init__(self, *args, **kwargs):
+ self.driver_cache = None
+
+
+class FakeNodeDevice(object):
+ def __init__(self, fakexml):
+ self.xml = fakexml
+
+ def XMLDesc(self, *args):
+ return self.xml
+
+
+class LibvirtConnTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(LibvirtConnTestCase, self).setUp()
+ self.flags(fake_call=True)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.get_admin_context()
+ temp_dir = self.useFixture(fixtures.TempDir()).path
+ self.flags(instances_path=temp_dir)
+ self.flags(snapshots_directory=temp_dir, group='libvirt')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ # Force libvirt to return a host UUID that matches the serial in
+ # nova.tests.unit.fakelibvirt. This is necessary because the host UUID
+ # returned by libvirt becomes the serial whose value is checked for in
+ # test_xml_and_uri_* below.
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid',
+ lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
+ # Prevent test suite trying to find /etc/machine-id
+ # which isn't guaranteed to exist. Instead it will use
+ # the host UUID from libvirt which we mock above
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def fake_extend(image, size, use_cow=False):
+ pass
+
+ self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
+
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ class FakeConn():
+ def baselineCPU(self, cpu, flag):
+ """Add new libvirt API."""
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ <feature policy='require' name='hypervisor'/>
+ </cpu>"""
+
+ def getCapabilities(self):
+ """Ensure standard capabilities being returned."""
+ return """<capabilities>
+ <host><cpu><arch>x86_64</arch>
+ <feature policy='require' name='hypervisor'/>
+ </cpu></host>
+ </capabilities>"""
+
+ def getVersion(self):
+ return 1005001
+
+ def getLibVersion(self):
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
+ def nwfilterDefineXML(self, *args, **kwargs):
+ pass
+
+ def nodeDeviceLookupByName(self, x):
+ pass
+
+ def listDevices(self, cap, flags):
+ return []
+
+ def lookupByName(self, name):
+ pass
+
+ def getHostname(self):
+ return "mustard"
+
+ def getType(self):
+ return "QEMU"
+
+ def numOfDomains(self):
+ return 0
+
+ def listDomainsID(self):
+ return []
+
+ def listDefinedDomains(self):
+ return []
+
+ def getInfo(self):
+ return [arch.X86_64, 123456, 2, 2000,
+ 2, 1, 1, 1]
+
+ self.conn = FakeConn()
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
+ lambda *a, **k: self.conn)
+
+ sys_meta = {
+ 'instance_type_memory_mb': 2048,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.small',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
+ self.stubs)
+ self.test_instance = {
+ 'id': 1,
+ 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
+ 'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'display_name': "Acme webserver",
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ 'root_gb': 10,
+ 'ephemeral_gb': 20,
+ 'instance_type_id': '5', # m1.small
+ 'extra_specs': {},
+ 'system_metadata': sys_meta,
+ 'pci_devices': objects.PciDeviceList(),
+ 'numa_topology': None,
+ 'config_drive': None,
+ 'vm_mode': None,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'os_type': 'linux',
+ 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
+ 'ephemeral_key_uuid': None,
+ }
+
+ def relpath(self, path):
+ return os.path.relpath(path, CONF.instances_path)
+
+ def tearDown(self):
+ nova.tests.unit.image.fake.FakeImageService_reset()
+ super(LibvirtConnTestCase, self).tearDown()
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtDriver(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtDriver(object):
+ def defineXML(self, xml):
+ return FakeVirtDomain()
+
+ # Creating mocks
+ volume_driver = ('iscsi=nova.tests.unit.virt.libvirt.test_driver'
+ '.FakeVolumeDriver')
+ self.flags(volume_drivers=[volume_driver],
+ group='libvirt')
+ fake = FakeLibvirtDriver()
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
+
+ def fake_lookup(self, instance_name):
+ return FakeVirtDomain()
+
+ def fake_execute(self, *args, **kwargs):
+ open(args[-1], "a").close()
+
+ def _create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'disabled': kwargs.get('disabled', False),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0}
+
+ return objects.Service(**service_ref)
+
+ def _get_launch_flags(self, conn, network_info, power_on=True,
+ vifs_already_plugged=False):
+ timeout = CONF.vif_plugging_timeout
+
+ events = []
+ if (conn._conn_supports_start_paused and
+ utils.is_neutron() and
+ not vifs_already_plugged and
+ power_on and timeout):
+ events = conn._get_neutron_events(network_info)
+
+ launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
+
+ return launch_flags
+
+ def test_public_api_signatures(self):
+ baseinst = driver.ComputeDriver(None)
+ inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertPublicAPISignatures(baseinst, inst)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_ok(self, mock_version):
+ mock_version.return_value = True
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
+ def test_min_version_start_abort(self, mock_version):
+ mock_version.return_value = False
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable(self, mock_svc):
+ # Tests disabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable(self, mock_svc):
+ # Tests enabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertTrue(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
+ # Tests enabling an enabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=False, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(True)
+ self.assertFalse(svc.disabled)
+
+ @mock.patch.object(objects.Service, 'get_by_compute_host')
+ def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
+ # Tests disabling a disabled host.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ svc = self._create_service(disabled=True, host='fake-mini')
+ mock_svc.return_value = svc
+ conn._set_host_enabled(False)
+ self.assertTrue(svc.disabled)
+
+ def test_set_host_enabled_swallows_exceptions(self):
+ # Tests that set_host_enabled will swallow exceptions coming from the
+ # db_api code so they don't break anything calling it, e.g. the
+ # _get_new_connection method.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
+ # Make db.service_get_by_compute_host raise NovaException; this
+ # is more robust than just raising ComputeHostNotFound.
+ db_mock.side_effect = exception.NovaException
+ conn._set_host_enabled(False)
+
+ def test_prepare_pci_device(self):
+
+ pci_devices = [dict(hypervisor_name='xxx')]
+
+ self.flags(virt_type='xen', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+ def attach(self):
+ pass
+
+ def dettach(self):
+ pass
+
+ def reset(self):
+ pass
+
+ self.mox.StubOutWithMock(self.conn, 'nodeDeviceLookupByName')
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
+ self.mox.ReplayAll()
+ conn._prepare_pci_devices_for_use(pci_devices)
+
+ def test_prepare_pci_device_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ class FakeDev():
+
+ def attach(self):
+ pass
+
+ def dettach(self):
+ raise libvirt.libvirtError("xxxxx")
+
+ def reset(self):
+ pass
+
+ self.stubs.Set(self.conn, 'nodeDeviceLookupByName',
+ lambda x: FakeDev())
+ self.assertRaises(exception.PciDevicePrepareFailed,
+ conn._prepare_pci_devices_for_use, pci_devices)
+
+ def test_detach_pci_devices_exception(self):
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid')]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: False
+
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, None, pci_devices)
+
+ def test_detach_pci_devices(self):
+
+ fake_domXML1 =\
+ """<domain> <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2' cache='none'/>
+ <source file='xxx'/>
+ <target dev='vda' bus='virtio'/>
+ <alias name='virtio-disk0'/>
+ <address type='pci' domain='0x0000' bus='0x00'
+ slot='0x04' function='0x0'/>
+ </disk>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000"
+ bus="0x04"/>
+ </source>
+ </hostdev></devices></domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0001:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pci_devices[0]['hypervisor_name'] = 'marked'
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+
+ conn._detach_pci_devices(FakeDomain(), pci_devices)
+ self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
+
+ def test_detach_pci_devices_timeout(self):
+
+ fake_domXML1 =\
+ """<domain>
+ <devices>
+ <hostdev mode="subsystem" type="pci" managed="yes">
+ <source>
+ <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
+ </source>
+ </hostdev>
+ </devices>
+ </domain>"""
+
+ pci_devices = [dict(hypervisor_name='xxx',
+ id='id1',
+ instance_uuid='uuid',
+ address="0000:04:10:1")]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_has_min_version')
+ libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_get_guest_pci_device')
+
+ class FakeDev():
+ def to_xml(self):
+ pass
+
+ libvirt_driver.LibvirtDriver._get_guest_pci_device =\
+ lambda x, y: FakeDev()
+
+ class FakeDomain():
+ def detachDeviceFlags(self, xml, flag):
+ pass
+
+ def XMLDesc(self, flag):
+ return fake_domXML1
+ self.assertRaises(exception.PciDeviceDetachFailed,
+ conn._detach_pci_devices, FakeDomain(), pci_devices)
+
+ def test_get_connector(self):
+ initiator = 'fake.initiator.iqn'
+ ip = 'fakeip'
+ host = 'fakehost'
+ wwpns = ['100010604b019419']
+ wwnns = ['200010604b019419']
+ self.flags(my_ip=ip)
+ self.flags(host=host)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ expected = {
+ 'ip': ip,
+ 'initiator': initiator,
+ 'host': host,
+ 'wwpns': wwpns,
+ 'wwnns': wwnns
+ }
+ volume = {
+ 'id': 'fake'
+ }
+ result = conn.get_volume_connector(volume)
+ self.assertThat(expected, matchers.DictMatches(result))
+
+ def test_lifecycle_event_registration(self):
+ calls = []
+
+ def fake_registerErrorHandler(*args, **kwargs):
+ calls.append('fake_registerErrorHandler')
+
+ def fake_get_host_capabilities(**args):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ calls.append('fake_get_host_capabilities')
+ return caps
+
+ @mock.patch.object(libvirt, 'registerErrorHandler',
+ side_effect=fake_registerErrorHandler)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ side_effect=fake_get_host_capabilities)
+ def test_init_host(get_host_capabilities, register_error_handler):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn.init_host("test_host")
+
+ test_init_host()
+ # NOTE(dkliban): Will fail if get_host_capabilities is called before
+ # registerErrorHandler
+ self.assertEqual(['fake_registerErrorHandler',
+ 'fake_get_host_capabilities'], calls)
+
+ @mock.patch.object(libvirt_driver, 'LOG')
+ def test_connect_auth_cb_exception(self, log_mock):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ creds = dict(authname='nova', password='verybadpass')
+ self.assertRaises(exception.NovaException,
+ conn._connect_auth_cb, creds, False)
+ self.assertEqual(0, len(log_mock.method_calls),
+ 'LOG should not be used in _connect_auth_cb.')
+
+ def test_sanitize_log_to_xml(self):
+ # setup fake data
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ # Tests that the parameters to the _get_guest_xml method
+ # are sanitized for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conf = mock.Mock()
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LOG, 'debug',
+ side_effect=fake_debug),
+ mock.patch.object(conn, '_get_guest_config', return_value=conf)
+ ) as (
+ debug_mock, conf_mock
+ ):
+ conn._get_guest_xml(self.context, self.test_instance,
+ network_info={}, disk_info={},
+ image_meta={}, block_device_info=bdi)
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def test_close_callback(self):
+ self.close_callback = None
+
+ def set_close_callback(cb, opaque):
+ self.close_callback = cb
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=set_close_callback),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ # verify that the driver registers for the close callback
+ # and re-connects after receiving the callback
+ conn._get_connection()
+ self.assertFalse(service_mock.disabled)
+ self.assertTrue(self.close_callback)
+ conn._init_events_pipe()
+ self.close_callback(self.conn, 1, None)
+ conn._dispatch_events()
+
+ self.assertTrue(service_mock.disabled)
+ conn._get_connection()
+
+ def test_close_callback_bad_signature(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method has a different
+ number of arguments in the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=TypeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_close_callback_not_defined(self):
+ '''Validates that a connection to libvirt exist,
+ even when registerCloseCallback method missing from
+ the libvirt python library.
+ '''
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = False
+ with contextlib.nested(
+ mock.patch.object(conn, "_connect", return_value=self.conn),
+ mock.patch.object(self.conn, "registerCloseCallback",
+ side_effect=AttributeError('dd')),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ connection = conn._get_connection()
+ self.assertTrue(connection)
+
+ def test_cpu_features_bug_1217630(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt, it shouldn't see the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Test new verion of libvirt, should find the `aes' feature
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ self.assertIn('aes', [x.name for x in caps.host.cpu.features])
+
+ def test_cpu_features_are_not_duplicated(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # Test old version of libvirt. Should return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ # Test new version of libvirt. Should still return single 'hypervisor'
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
+ mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
+ # Cleanup the capabilities cache firstly
+ conn._caps = None
+ caps = conn._get_host_capabilities()
+ cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
+ self.assertEqual(1, cnt)
+
+ def test_baseline_cpu_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ # `mock` has trouble stubbing attributes that don't exist yet, so
+ # fallback to plain-Python attribute setting/deleting
+ cap_str = 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'
+ if not hasattr(libvirt_driver.libvirt, cap_str):
+ setattr(libvirt_driver.libvirt, cap_str, True)
+ self.addCleanup(delattr, libvirt_driver.libvirt, cap_str)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virConnectBaselineCPU',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=not_supported_exc):
+ caps = conn._get_host_capabilities()
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ # Clear cached result so we can test again...
+ conn._caps = None
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'baselineCPU',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_host_capabilities)
+
+ def test_lxc_get_host_capabilities_failed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ with mock.patch.object(conn._conn, 'baselineCPU', return_value=-1):
+ setattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', 1)
+ caps = conn._get_host_capabilities()
+ delattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
+ self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
+ self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(time, "time")
+ def test_get_guest_config(self, time_mock, mock_flavor):
+ time_mock.return_value = 1234567.89
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+
+ ctxt = context.RequestContext(project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie")
+
+ flavor = objects.Flavor(name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={})
+ instance_ref = objects.Instance(**test_instance)
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info,
+ context=ctxt)
+
+ self.assertEqual(cfg.uuid, instance_ref["uuid"])
+ self.assertEqual(cfg.pae, False)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.apic, True)
+ self.assertEqual(cfg.memory, 6 * units.Ki)
+ self.assertEqual(cfg.vcpus, 28)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual(len(cfg.metadata), 1)
+ self.assertIsInstance(cfg.metadata[0],
+ vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes",
+ cfg.metadata[0].name)
+ self.assertEqual(1234567.89,
+ cfg.metadata[0].creationTime)
+ self.assertEqual("image",
+ cfg.metadata[0].roottype)
+ self.assertEqual(str(instance_ref["image_ref"]),
+ cfg.metadata[0].rootid)
+
+ self.assertIsInstance(cfg.metadata[0].owner,
+ vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(456,
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("pie",
+ cfg.metadata[0].owner.username)
+ self.assertEqual(123,
+ cfg.metadata[0].owner.projectid)
+ self.assertEqual("aubergine",
+ cfg.metadata[0].owner.projectname)
+
+ self.assertIsInstance(cfg.metadata[0].flavor,
+ vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small",
+ cfg.metadata[0].flavor.name)
+ self.assertEqual(6,
+ cfg.metadata[0].flavor.memory)
+ self.assertEqual(28,
+ cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496,
+ cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128,
+ cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336,
+ cfg.metadata[0].flavor.swap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_id_maps(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ self.flags(uid_maps=['0:1000:100'], group='libvirt')
+ self.flags(gid_maps=['0:1000:100'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertEqual(len(cfg.idmaps), 2)
+ self.assertIsInstance(cfg.idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap)
+ self.assertIsInstance(cfg.idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fits(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(set([0, 1]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_no_fit(self, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([3])),
+ mock.patch.object(random, 'choice')
+ ) as (get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertFalse(choice_mock.called)
+ self.assertEqual(set([3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
+ mock.patch.object(
+ random, 'choice', side_effect=lambda cells: cells[0])
+ ) as (has_min_version_mock, get_host_cap_mock,
+ get_vcpu_pin_set_mock, choice_mock):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ # NOTE(ndipanov): we make sure that pin_set was taken into account
+ # when choosing viable cells
+ choice_mock.assert_called_once_with([set([2, 3])])
+ self.assertEqual(set([2, 3]), cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNone(cfg.cpu.numa)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_non_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2]), 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps)):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ self.assertIsNone(cfg.cputune)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_numa_host_instance_topo(self, mock_flavor):
+ instance_topology = objects.InstanceNUMATopology.obj_from_topology(
+ hardware.VirtNUMAInstanceTopology(
+ cells=[hardware.VirtNUMATopologyCellInstance(
+ 0, set([0, 1]), 1024),
+ hardware.VirtNUMATopologyCellInstance(
+ 1, set([2, 3]),
+ 1024)]))
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.numa_topology = instance_topology
+ flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, name='fake',
+ extra_specs={})
+ mock_flavor.return_value = flavor
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with contextlib.nested(
+ mock.patch.object(
+ objects.Flavor, "get_by_id", return_value=flavor),
+ mock.patch.object(
+ objects.InstanceNUMATopology, "get_by_instance_uuid",
+ return_value=instance_topology),
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, "_get_host_capabilities", return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 2]))
+ ):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertIsNone(cfg.cpuset)
+ # Test that the pinning is correct and limited to allowed only
+ self.assertEqual(0, cfg.cputune.vcpupin[0].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[0].cpuset)
+ self.assertEqual(1, cfg.cputune.vcpupin[1].id)
+ self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[1].cpuset)
+ self.assertEqual(2, cfg.cputune.vcpupin[2].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[2].cpuset)
+ self.assertEqual(3, cfg.cputune.vcpupin[3].id)
+ self.assertEqual(set([2]), cfg.cputune.vcpupin[3].cpuset)
+ self.assertIsNotNone(cfg.cpu.numa)
+ for instance_cell, numa_cfg_cell in zip(
+ instance_topology.cells, cfg.cpu.numa.cells):
+ self.assertEqual(instance_cell.id, numa_cfg_cell.id)
+ self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
+ self.assertEqual(instance_cell.memory * units.Ki,
+ numa_cfg_cell.memory)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_clock(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ hpet_map = {
+ arch.X86_64: True,
+ arch.I686: True,
+ arch.PPC: False,
+ arch.PPC64: False,
+ arch.ARMV7: False,
+ arch.AARCH64: False,
+ }
+
+ for guestarch, expect_hpet in hpet_map.items():
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "utc")
+ self.assertIsInstance(cfg.clock.timers[0],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertIsInstance(cfg.clock.timers[1],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual(cfg.clock.timers[0].name, "pit")
+ self.assertEqual(cfg.clock.timers[0].tickpolicy,
+ "delay")
+ self.assertEqual(cfg.clock.timers[1].name, "rtc")
+ self.assertEqual(cfg.clock.timers[1].tickpolicy,
+ "catchup")
+ if expect_hpet:
+ self.assertEqual(3, len(cfg.clock.timers))
+ self.assertIsInstance(cfg.clock.timers[2],
+ vconfig.LibvirtConfigGuestTimer)
+ self.assertEqual('hpet', cfg.clock.timers[2].name)
+ self.assertFalse(cfg.clock.timers[2].present)
+ else:
+ self.assertEqual(2, len(cfg.clock.timers))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_windows(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['os_type'] = 'windows'
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+
+ self.assertIsInstance(cfg.clock,
+ vconfig.LibvirtConfigGuestClock)
+ self.assertEqual(cfg.clock.offset, "localtime")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_two_nics(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 2),
+ {}, disk_info)
+ self.assertEqual(cfg.acpi, True)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_boot_dev, ["hd"])
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_bug_1118829(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = {'disk_bus': 'virtio',
+ 'cdrom_bus': 'ide',
+ 'mapping': {u'vda': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': u'vda'},
+ 'root': {'bus': 'virtio',
+ 'type': 'disk',
+ 'dev': 'vda'}}}
+
+ # NOTE(jdg): For this specific test leave this blank
+ # This will exercise the failed code path still,
+ # and won't require fakes and stubs of the iscsi discovery
+ block_device_info = {}
+ conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_root_device_name(self, mock_flavor):
+ self.flags(virt_type='uml', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ block_device_info = {'root_device_name': '/dev/vdb'}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, block_device_info)
+ self.assertEqual(cfg.acpi, False)
+ self.assertEqual(cfg.memory, 2 * units.Mi)
+ self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.os_type, "uml")
+ self.assertEqual(cfg.os_boot_dev, [])
+ self.assertEqual(cfg.os_root, '/dev/vdb')
+ self.assertEqual(len(cfg.devices), 3)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_block_device(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdd'}),
+ ])}
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'vdd')
+ self.assertTrue(info['block_device_mapping'][0].save.called)
+ self.assertTrue(info['block_device_mapping'][1].save.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_lxc_with_attached_volume(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'boot_index': 0}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 3,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ }),
+ ])}
+
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
+ info['block_device_mapping'][2]['connection_info'] = conn_info
+ info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
+ info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
+ info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, info)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ None, info)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[1].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdd')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_configdrive(self, mock_flavor):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ # make configdrive.required_by() return True
+ instance_ref['config_drive'] = True
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ expect = {"ppc": "sdz", "ppc64": "sdz"}
+ disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, disk)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, [], image_meta)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_virtio_scsi_bus_bdm(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ conn_info = {'driver_volume_type': 'fake'}
+ bd_info = {
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
+ ])}
+ bd_info['block_device_mapping'][0]['connection_info'] = conn_info
+ bd_info['block_device_mapping'][1]['connection_info'] = conn_info
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref, bd_info, image_meta)
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info, [], bd_info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'sdc')
+ self.assertEqual(cfg.devices[2].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'sdd')
+ self.assertEqual(cfg.devices[3].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=False, group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_tablet(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_spice_and_agent(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console(self, acquire_port,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.return_value = 11111
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(8, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual(11111, cfg.devices[2].listen_port)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_through_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 3}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(10, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_flavor(self, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': "a"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_image_and_flavor(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = {"properties": {"hw_serial_port_count": "3"}}
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw:serial_port_count': 4}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info)
+ self.assertEqual(10, len(cfg.devices), cfg.devices)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("tcp", cfg.devices[2].type)
+ self.assertEqual("tcp", cfg.devices[3].type)
+ self.assertEqual("tcp", cfg.devices[4].type)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_serial_console_invalid_img_meta(self,
+ mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_serial_port_count": "fail"}}
+ self.assertRaises(
+ exception.ImageSerialPortNumberInvalid,
+ conn._get_guest_config, instance_ref, [], image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch('nova.console.serial.acquire_port')
+ def test_get_guest_config_serial_console_through_port_rng_exhausted(
+ self, acquire_port, mock_flavor):
+ self.flags(enabled=True, group='serial_console')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
+ '127.0.0.1')
+ self.assertRaises(
+ exception.SocketPortRangeExhaustedException,
+ conn._get_guest_config, instance_ref, [], {}, disk_info)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(_lookup_by_name)
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_bind_only(self, _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='bind')
+ self.assertEqual([
+ ('127.0.0.1', 101),
+ ('127.0.0.2', 100)], list(i))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_serial_ports_from_instance_connect_only(self,
+ _lookup_by_name):
+ i = self._test_get_serial_ports_from_instance(
+ _lookup_by_name, mode='connect')
+ self.assertEqual([
+ ('127.0.0.1', 100),
+ ('127.0.0.2', 101)], list(i))
+
+ def _test_get_serial_ports_from_instance(self, _lookup_by_name, mode=None):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="100" mode="connect"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.1" service="101" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="100" mode="bind"/>
+ </serial>
+ <serial type="tcp">
+ <source host="127.0.0.2" service="101" mode="connect"/>
+ </serial>
+ </devices>
+ </domain>"""
+
+ dom = mock.MagicMock()
+ dom.XMLDesc.return_value = xml
+ _lookup_by_name.return_value = dom
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ return conn._get_serial_ports_from_instance(
+ {'name': 'fake_instance'}, mode=mode)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 6)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, "vnc")
+ self.assertEqual(cfg.devices[4].type, "xen")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_hvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref['vm_mode'] = vm_mode.HVM
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.HVM)
+ self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_type_xen_pae_pvm(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='xen',
+ use_usb_tablet=False,
+ group='libvirt')
+ self.flags(enabled=False,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(cfg.os_type, vm_mode.XEN)
+ self.assertEqual(cfg.pae, True)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_vnc_and_spice(self, mock_flavor):
+ self.flags(vnc_enabled=True)
+ self.flags(virt_type='kvm',
+ use_usb_tablet=True,
+ group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ self.assertEqual(len(cfg.devices), 10)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[9],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
+ self.assertEqual(cfg.devices[6].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "spice")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_invalid_watchdog_action(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "something"}}
+ self.assertRaises(exception.InvalidWatchdogAction,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_action_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_watchdog_action": "none"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_with_watchdog_action_flavor(self, mock_flavor,
+ hw_watchdog_action="hw:watchdog_action"):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {hw_watchdog_action: 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("none", cfg.devices[7].action)
+
+ def test_get_guest_config_with_watchdog_action_through_flavor(self):
+ self._test_get_guest_config_with_watchdog_action_flavor()
+
+ # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
+ # should be removed in the next release
+ def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
+ self):
+ self._test_get_guest_config_with_watchdog_action_flavor(
+ hw_watchdog_action="hw_watchdog_action")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_watchdog_overrides_flavor(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_watchdog_action': 'none'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_watchdog_action": "pause"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+
+ self.assertEqual(9, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestWatchdog)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual("pause", cfg.devices[7].action)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_unsupported_video_driver_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "something"}}
+ self.assertRaises(exception.InvalidVideoMode,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "vmvga"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[6].type, "vmvga")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+ self.assertEqual(cfg.devices[7].type, "unix")
+ self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_video_driver_vram(self, mock_flavor):
+ self.flags(vnc_enabled=False)
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestChannel)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[5].type, "spice")
+ self.assertEqual(cfg.devices[6].type, "qxl")
+ self.assertEqual(cfg.devices[6].vram, 64)
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_unmount_fs_if_error_during_lxc_create_domain(self,
+ mock_get_inst_path, mock_ensure_tree, mock_setup_container,
+ mock_get_info, mock_teardown):
+ """If we hit an error during a `_create_domain` call to `libvirt+lxc`
+ we need to ensure the guest FS is unmounted from the host so that any
+ future `lvremove` calls will work.
+ """
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.side_effect = exception.InstanceNotFound(
+ instance_id='foo')
+ conn._conn.defineXML = mock.Mock()
+ conn._conn.defineXML.side_effect = ValueError('somethingbad')
+ with contextlib.nested(
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(ValueError,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ mock_instance, None)
+
+ mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
+
+ def test_video_driver_flavor_limit_not_set(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id'),
+ mock.patch.object(objects.Instance, 'save'),
+ ) as (mock_flavor, mock_instance):
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ def test_video_driver_ram_above_flavor_limit(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_type = instance_ref.get_flavor()
+ instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_video_model": "qxl",
+ "hw_video_ram": "64"}}
+ with contextlib.nested(
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=instance_type),
+ mock.patch.object(objects.Instance, 'save')):
+ self.assertRaises(exception.RequestedVRamTooHigh,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta,
+ disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_without_qga_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
+ cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[4].type, "tablet")
+ self.assertEqual(cfg.devices[5].type, "vnc")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_device(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_not_allowed(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 7)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_limits(self, mock_flavor):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True',
+ 'hw_rng:rate_bytes': '1024',
+ 'hw_rng:rate_period': '2'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertIsNone(cfg.devices[6].backend)
+ self.assertEqual(cfg.devices[6].rate_bytes, 1024)
+ self.assertEqual(cfg.devices[6].rate_period, 2)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_backend(self, mock_flavor, mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(len(cfg.devices), 8)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[6].model, 'random')
+ self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
+ self.assertIsNone(cfg.devices[6].rate_bytes)
+ self.assertIsNone(cfg.devices[6].rate_period)
+
+ @mock.patch('nova.virt.libvirt.driver.os.path.exists')
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_rng_dev_not_present(self, mock_flavor,
+ mock_path):
+ self.flags(virt_type='kvm',
+ use_usb_tablet=False,
+ rng_dev_path='/dev/hw_rng',
+ group='libvirt')
+ mock_path.return_value = False
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'hw_rng:allowed': 'True'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_rng_model": "virtio"}}
+
+ self.assertRaises(exception.RngDeviceNotExist,
+ conn._get_guest_config,
+ instance_ref,
+ [],
+ image_meta, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': '10000',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+
+ self.assertEqual(10000, cfg.cputune.shares)
+ self.assertEqual(20000, cfg.cputune.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_bogus_cpu_quota(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
+ 'quota:cpu_period': '20000'}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.assertRaises(ValueError,
+ conn._get_guest_config,
+ instance_ref, [], {}, disk_info)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_sysinfo_serial(self, expected_serial,
+ mock_flavor):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ cfg = drvr._get_guest_config_sysinfo(instance_ref)
+
+ self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
+ self.assertEqual(version.vendor_string(),
+ cfg.system_manufacturer)
+ self.assertEqual(version.product_string(),
+ cfg.system_product)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.system_version)
+ self.assertEqual(expected_serial,
+ cfg.system_serial)
+ self.assertEqual(instance_ref['uuid'],
+ cfg.system_uuid)
+
+ def test_get_guest_config_sysinfo_serial_none(self):
+ self.flags(sysinfo_serial="none", group="libvirt")
+ self._test_get_guest_config_sysinfo_serial(None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid")
+ def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_os(self):
+ self.flags(sysinfo_serial="os", group="libvirt")
+
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_open, ):
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_hardware(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_get_host_uuid")
+ ) as (mock_exists, mock_uuid):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return False
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_os(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_exists, mock_open):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return True
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_invalid(self):
+ self.flags(sysinfo_serial="invalid", group="libvirt")
+
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.LibvirtDriver,
+ fake.FakeVirtAPI(),
+ True)
+
+ def _create_fake_service_compute(self):
+ service_info = {
+ 'id': 1729,
+ 'host': 'fake',
+ 'report_count': 0
+ }
+ service_ref = objects.Service(**service_info)
+
+ compute_info = {
+ 'id': 1729,
+ 'vcpus': 2,
+ 'memory_mb': 1024,
+ 'local_gb': 2048,
+ 'vcpus_used': 0,
+ 'memory_mb_used': 0,
+ 'local_gb_used': 0,
+ 'free_ram_mb': 1024,
+ 'free_disk_gb': 2048,
+ 'hypervisor_type': 'xen',
+ 'hypervisor_version': 1,
+ 'running_vms': 0,
+ 'cpu_info': '',
+ 'current_workload': 0,
+ 'service_id': service_ref['id']
+ }
+ compute_ref = objects.ComputeNode(**compute_info)
+ return (service_ref, compute_ref)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_kvm(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.1',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'yes')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "1")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_with_pci_passthrough_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ service_ref, compute_ref = self._create_fake_service_compute()
+
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ pci_device_info = dict(test_pci_device.fake_db_dev)
+ pci_device_info.update(compute_node_id=1,
+ label='fake',
+ status='allocated',
+ address='0000:00:00.2',
+ compute_id=compute_ref['id'],
+ instance_uuid=instance.uuid,
+ request_id=None,
+ extra_info={})
+ pci_device = objects.PciDevice(**pci_device_info)
+ pci_list = objects.PciDeviceList()
+ pci_list.objects.append(pci_device)
+ instance.pci_devices = pci_list
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance)
+ cfg = conn._get_guest_config(instance, [], {}, disk_info)
+ had_pci = 0
+ # care only about the PCI devices
+ for dev in cfg.devices:
+ if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
+ had_pci += 1
+ self.assertEqual(dev.type, 'pci')
+ self.assertEqual(dev.managed, 'no')
+ self.assertEqual(dev.mode, 'subsystem')
+
+ self.assertEqual(dev.domain, "0000")
+ self.assertEqual(dev.bus, "00")
+ self.assertEqual(dev.slot, "00")
+ self.assertEqual(dev.function, "2")
+ self.assertEqual(had_pci, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_line_without_kernel_id(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"os_command_line":
+ "fake_os_command_line"}}
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_os_command_empty(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ self.test_instance['kernel_id'] = "fake_kernel_id"
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
+ # default, so testing an empty string and None value in the
+ # os_command_line image property must pass
+ image_meta = {"properties": {"os_command_line": ""}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertNotEqual(cfg.os_cmdline, "")
+
+ image_meta = {"properties": {"os_command_line": None}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertIsNotNone(cfg.os_cmdline)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_armv7(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.ARMV7
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "vexpress-a15")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_aarch64(self, mock_flavor):
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.arch = arch.AARCH64
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+ return caps
+
+ self.flags(virt_type="kvm",
+ group="libvirt")
+
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ "_get_host_capabilities",
+ get_host_capabilities_stub)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "virt")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_through_image_meta(self,
+ mock_flavor):
+ self.flags(virt_type="kvm",
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ image_meta = {"properties": {"hw_machine_type":
+ "fake_machine_type"}}
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_config_machine_type_from_config(self, mock_flavor):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(hw_machine_type=['x86_64=fake_machine_type'],
+ group='libvirt')
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # Make sure the host arch is mocked as x86_64
+ self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
+ baselineCPU=fake_baselineCPU,
+ getVersion=lambda: 1005001)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_get_guest_config_ppc64(self, device_index, mock_flavor):
+ """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
+ """
+ self.flags(virt_type='kvm', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ image_meta = {}
+ expected = (arch.PPC64, arch.PPC)
+ for guestarch in expected:
+ with mock.patch.object(libvirt_driver.libvirt_utils,
+ 'get_arch',
+ return_value=guestarch):
+ cfg = conn._get_guest_config(instance_ref, [],
+ image_meta,
+ disk_info)
+ self.assertIsInstance(cfg.devices[device_index],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertEqual(cfg.devices[device_index].type, 'vga')
+
+ def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
+ self.flags(vnc_enabled=True)
+ self._test_get_guest_config_ppc64(6)
+
+ def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
+ self.flags(enabled=True,
+ agent_enabled=True,
+ group='spice')
+ self._test_get_guest_config_ppc64(8)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_none(self, mock_flavor):
+ self.flags(cpu_mode="none", group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertIsNone(conf.cpu.mode)
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_kvm(self, mock_flavor):
+ self.flags(virt_type="kvm",
+ cpu_mode=None,
+ group='libvirt')
+
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 11
+
+ self.stubs.Set(self.conn,
+ "getLibVersion",
+ get_lib_version_stub)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_uml(self, mock_flavor):
+ self.flags(virt_type="uml",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_default_lxc(self, mock_flavor):
+ self.flags(virt_type="lxc",
+ cpu_mode=None,
+ group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsNone(conf.cpu)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_passthrough(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-passthrough", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-passthrough")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_host_model(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="host-model", group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertIsNone(conf.cpu.model)
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_config_custom(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ self.flags(cpu_mode="custom",
+ cpu_model="Penryn",
+ group='libvirt')
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "custom")
+ self.assertEqual(conf.cpu.model, "Penryn")
+ self.assertEqual(conf.cpu.sockets, 1)
+ self.assertEqual(conf.cpu.cores, 1)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_cpu_topology(self, mock_flavor):
+ fake_flavor = objects.flavor.Flavor.get_by_id(
+ self.context,
+ self.test_instance['instance_type_id'])
+ fake_flavor.vcpus = 8
+ fake_flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
+ return_value=fake_flavor):
+ conf = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertIsInstance(conf.cpu,
+ vconfig.LibvirtConfigGuestCPU)
+ self.assertEqual(conf.cpu.mode, "host-model")
+ self.assertEqual(conf.cpu.sockets, 4)
+ self.assertEqual(conf.cpu.cores, 2)
+ self.assertEqual(conf.cpu.threads, 1)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_by_default(self, mock_flavor):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_disable(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=0, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_period_value(self, mock_flavor):
+ self.flags(mem_stats_period_seconds=21, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(21, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_qemu(self, mock_flavor):
+ self.flags(virt_type='qemu', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_xen(self, mock_flavor):
+ self.flags(virt_type='xen', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigMemoryBalloon)
+ self.assertEqual('xen', device.model)
+ self.assertEqual(10, device.period)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_get_guest_memory_balloon_config_lxc(self, mock_flavor):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ no_exist = True
+ for device in cfg.devices:
+ if device.root_name == 'memballoon':
+ no_exist = False
+ break
+ self.assertTrue(no_exist)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.HVM})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=True)
+
+ def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
+ instance_data = dict(self.test_instance)
+ instance_data.update({'vm_mode': vm_mode.XEN})
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, expect_xen_hvm=False,
+ xen_only=True)
+
+ def test_xml_and_uri_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=False)
+
+ def test_xml_and_uri_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=False, expect_ramdisk=False)
+
+ def test_xml_and_uri(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data,
+ expect_kernel=True, expect_ramdisk=True)
+
+ def test_xml_and_uri_rescue(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'ari-deadbeef'
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_kernel(self):
+ instance_data = dict(self.test_instance)
+ instance_data['ramdisk_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=False,
+ expect_ramdisk=True, rescue=instance_data)
+
+ def test_xml_and_uri_rescue_no_ramdisk(self):
+ instance_data = dict(self.test_instance)
+ instance_data['kernel_id'] = 'aki-deadbeef'
+ self._check_xml_and_uri(instance_data, expect_kernel=True,
+ expect_ramdisk=False, rescue=instance_data)
+
+ def test_xml_uuid(self):
+ self._check_xml_and_uuid({"disk_format": "raw"})
+
+ def test_lxc_container_and_uri(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_container(instance_data)
+
+ def test_xml_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, None)
+
+ def test_xml_user_specified_disk_prefix(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_prefix(instance_data, 'sd')
+
+ def test_xml_disk_driver(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_disk_driver(instance_data)
+
+ def test_xml_disk_bus_virtio(self):
+ self._check_xml_and_disk_bus({"disk_format": "raw"},
+ None,
+ (("disk", "virtio", "vda"),))
+
+ def test_xml_disk_bus_ide(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ None,
+ (expec_val,))
+
+ def test_xml_disk_bus_ide_and_virtio(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
+ expected = {arch.PPC: ("cdrom", "scsi", "sda"),
+ arch.PPC64: ("cdrom", "scsi", "sda")}
+
+ swap = {'device_name': '/dev/vdc',
+ 'swap_size': 1}
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'size': 1}]
+ block_device_info = {
+ 'swap': swap,
+ 'ephemerals': ephemerals}
+ expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
+ ("cdrom", "ide", "hda"))
+ self._check_xml_and_disk_bus({"disk_format": "iso"},
+ block_device_info,
+ (expec_val,
+ ("disk", "virtio", "vdb"),
+ ("disk", "virtio", "vdc")))
+
+ def test_list_instance_domains_fast(self):
+ if not hasattr(libvirt, "VIR_CONNECT_LIST_DOMAINS_ACTIVE"):
+ self.skipTest("libvirt missing VIR_CONNECT_LIST_DOMAINS_ACTIVE")
+
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ def fake_list_all(flags):
+ vms = []
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
+ vms.extend([vm1, vm2])
+ if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
+ vms.extend([vm3, vm4])
+ return vms
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_fast()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_fast(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_slow(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+ vms = [vm1, vm2, vm3, vm4]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_lookup_name(name):
+ for vm in vms:
+ if vm.name() == name:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ # Include one ID that no longer exists
+ return [vm1.ID(), vm2.ID(), 666]
+
+ def fake_list_ddoms():
+ # Include one name that no longer exists and
+ # one dup from running list to show race in
+ # transition from inactive -> running
+ return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = fake_list_ddoms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.numOfDefinedDomains = lambda: 2
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains_slow()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+
+ doms = drvr._list_instance_domains_slow(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+
+ def test_list_instance_domains_fallback_no_support(self):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vms = [vm1, vm2]
+
+ def fake_lookup_id(id):
+ for vm in vms:
+ if vm.ID() == id:
+ return vm
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ raise ex
+
+ def fake_list_doms():
+ return [vm1.ID(), vm2.ID()]
+
+ def fake_list_all(flags):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "API is not supported",
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+ raise ex
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
+ libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
+
+ self.mox.ReplayAll()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].id, vm1.id)
+ self.assertEqual(doms[1].id, vm2.id)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains_fast")
+ def test_list_instance_domains_filtering(self, mock_list):
+ vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains()
+ self.assertEqual(len(doms), 2)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
+ doms = drvr._list_instance_domains(only_running=False)
+ self.assertEqual(len(doms), 4)
+ self.assertEqual(doms[0].name(), vm1.name())
+ self.assertEqual(doms[1].name(), vm2.name())
+ self.assertEqual(doms[2].name(), vm3.name())
+ self.assertEqual(doms[3].name(), vm4.name())
+ mock_list.assert_called_with(False)
+
+ mock_list.return_value = [vm0, vm1, vm2]
+ doms = drvr._list_instance_domains(only_guests=False)
+ self.assertEqual(len(doms), 3)
+ self.assertEqual(doms[0].name(), vm0.name())
+ self.assertEqual(doms[1].name(), vm1.name())
+ self.assertEqual(doms[2].name(), vm2.name())
+ mock_list.assert_called_with(True)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instances(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ names = drvr.list_instances()
+ self.assertEqual(names[0], vm1.name())
+ self.assertEqual(names[1], vm2.name())
+ self.assertEqual(names[2], vm3.name())
+ self.assertEqual(names[3], vm4.name())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_list_instance_uuids(self, mock_list):
+ vm1 = FakeVirtDomain(id=3, name="instance00000001")
+ vm2 = FakeVirtDomain(id=17, name="instance00000002")
+ vm3 = FakeVirtDomain(name="instance00000003")
+ vm4 = FakeVirtDomain(name="instance00000004")
+
+ mock_list.return_value = [vm1, vm2, vm3, vm4]
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ uuids = drvr.list_instance_uuids()
+ self.assertEqual(len(uuids), 4)
+ self.assertEqual(uuids[0], vm1.UUIDString())
+ self.assertEqual(uuids[1], vm2.UUIDString())
+ self.assertEqual(uuids[2], vm3.UUIDString())
+ self.assertEqual(uuids[3], vm4.UUIDString())
+ mock_list.assert_called_with(only_running=False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_get_all_block_devices(self, mock_list):
+ xml = [
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/3'/>
+ </disk>
+ </devices>
+ </domain>
+ """,
+ ]
+
+ mock_list.return_value = [
+ FakeVirtDomain(xml[0], id=3, name="instance00000001"),
+ FakeVirtDomain(xml[1], id=1, name="instance00000002"),
+ FakeVirtDomain(xml[2], id=5, name="instance00000003")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ devices = drvr._get_all_block_devices()
+ self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
+ mock_list.assert_called_with()
+
+ def test_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_ami_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for testing ami
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'ami')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./', group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_raw_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info,
+ autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='raw',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'raw',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('raw', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ # This is for all the subsequent tests that do not set the value of
+ # images type
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_lxc_snapshot_in_raw_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
+ libvirt_driver.libvirt_utils.disk_type = "raw"
+
+ def convert_image(source, dest, out_format):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+
+ self.stubs.Set(images, 'convert_image', convert_image)
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'raw')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_in_qcow2_format(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshot_image_format='qcow2',
+ snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['disk_format'], 'qcow2')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lvm_snapshot_in_qcow2_format(self):
+ # Tests Lvm backend snapshot functionality with raw format
+ # snapshots.
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='block' device='disk'>
+ <source dev='/dev/some-vg/some-lv'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ update_task_state_calls = [
+ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
+ mock.call(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)]
+ mock_update_task_state = mock.Mock()
+ mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
+ autospec=True)
+ volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
+ mock_volume_info = mock.Mock(return_value=volume_info, autospec=True)
+ mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
+ mock_convert_image = mock.Mock()
+
+ def convert_image_side_effect(source, dest, out_format,
+ run_as_root=True):
+ libvirt_driver.libvirt_utils.files[dest] = ''
+ mock_convert_image.side_effect = convert_image_side_effect
+
+ self.flags(snapshots_directory='./',
+ snapshot_image_format='qcow2',
+ images_type='lvm',
+ images_volume_group='nova-vg', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "lvm"
+
+ # Start test
+ image_service = nova.tests.unit.image.fake.FakeImageService()
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = image_service.create(context, sent_meta)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_conn',
+ autospec=True),
+ mock.patch.object(libvirt_driver.imagebackend.lvm,
+ 'volume_info',
+ mock_volume_info),
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ mock_convert_image),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_lookup_by_name',
+ mock_lookupByName)):
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ mock_update_task_state)
+
+ mock_lookupByName.assert_called_once_with("instance-00000001")
+ mock_volume_info.assert_has_calls(mock_volume_info_calls)
+ mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
+ mock.ANY,
+ 'qcow2',
+ run_as_root=True)
+ snapshot = image_service.show(context, recv_meta['id'])
+ mock_update_task_state.assert_has_calls(update_task_state_calls)
+ self.assertEqual('available', snapshot['properties']['image_state'])
+ self.assertEqual('active', snapshot['status'])
+ self.assertEqual('qcow2', snapshot['disk_format'])
+ self.assertEqual(snapshot_name, snapshot['name'])
+ self.flags(images_type='default', group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ def test_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_image_architecture(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+
+ # Assign different image_ref from nova/images/fakes for
+ # testing different base image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+
+ # Assuming that base image already exists in image_service
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ # Create new image. It will be updated in snapshot method
+ # To work with it from snapshot, the single image_service is needed
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_lxc_snapshot_no_original_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ virt_type='lxc',
+ group='libvirt')
+ libvirt_driver.libvirt_utils.disk_type = "qcow2"
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+
+ instance_ref = objects.Instance(**test_instance)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id)}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_metadata_image(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign an image with an architecture defined (x86_64)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'architecture': 'fake_arch',
+ 'key_a': 'value_a',
+ 'key_b': 'value_b'}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['architecture'], 'fake_arch')
+ self.assertEqual(snapshot['properties']['key_a'], 'value_a')
+ self.assertEqual(snapshot['properties']['key_b'], 'value_b')
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test_snapshot_with_os_type(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+
+ self.flags(snapshots_directory='./',
+ group='libvirt')
+
+ # Assign a non-existent image
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
+ test_instance["os_type"] = 'linux'
+
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.info_cache = objects.InstanceInfoCache(
+ network_info=None)
+ properties = {'instance_id': instance_ref['id'],
+ 'user_id': str(self.context.user_id),
+ 'os_type': instance_ref['os_type']}
+ snapshot_name = 'test-snap'
+ sent_meta = {'name': snapshot_name, 'is_public': False,
+ 'status': 'creating', 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
+ libvirt_driver.utils.execute = self.fake_execute
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.snapshot(self.context, instance_ref, recv_meta['id'],
+ func_call_matcher.call)
+
+ snapshot = self.image_service.show(context, recv_meta['id'])
+ self.assertIsNone(func_call_matcher.match())
+ self.assertEqual(snapshot['properties']['image_state'], 'available')
+ self.assertEqual(snapshot['properties']['os_type'],
+ instance_ref['os_type'])
+ self.assertEqual(snapshot['status'], 'active')
+ self.assertEqual(snapshot['name'], snapshot_name)
+
+ def test__create_snapshot_metadata(self):
+ base = {}
+ instance = {'kernel_id': 'kernel',
+ 'project_id': 'prj_id',
+ 'ramdisk_id': 'ram_id',
+ 'os_type': None}
+ img_fmt = 'raw'
+ snp_name = 'snapshot_name'
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ expected = {'is_public': False,
+ 'status': 'active',
+ 'name': snp_name,
+ 'properties': {
+ 'kernel_id': instance['kernel_id'],
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id'],
+ 'ramdisk_id': instance['ramdisk_id'],
+ },
+ 'disk_format': img_fmt,
+ 'container_format': base.get('container_format', 'bare')
+ }
+ self.assertEqual(ret, expected)
+
+ # simulate an instance with os_type field defined
+ # disk format equals to ami
+ # container format not equals to bare
+ instance['os_type'] = 'linux'
+ base['disk_format'] = 'ami'
+ base['container_format'] = 'test_container'
+ expected['properties']['os_type'] = instance['os_type']
+ expected['disk_format'] = base['disk_format']
+ expected['container_format'] = base.get('container_format', 'bare')
+ ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
+ self.assertEqual(ret, expected)
+
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
+ 'connect_volume')
+ @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
+ def test_get_volume_config(self, get_config, connect_volume):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ connection_info = {'driver_volume_type': 'fake',
+ 'data': {'device_path': '/fake',
+ 'access_mode': 'rw'}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_config = mock.MagicMock()
+
+ get_config.return_value = mock_config
+ config = conn._get_volume_config(connection_info, disk_info)
+ get_config.assert_called_once_with(connection_info, disk_info)
+ self.assertEqual(mock_config, config)
+
+ def test_attach_invalid_volume_type(self):
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume, None,
+ {"driver_volume_type": "badtype"},
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_hypervisor(self):
+ self.flags(virt_type='fake_type', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InvalidHypervisorType,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ def test_attach_blockio_invalid_version(self):
+ def get_lib_version_stub():
+ return (0 * 1000 * 1000) + (9 * 1000) + 8
+ self.flags(virt_type='qemu', group='libvirt')
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+ instance = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
+ self.assertRaises(exception.Invalid,
+ conn.attach_volume, None,
+ {"driver_volume_type": "fake",
+ "data": {"logical_block_size": "4096",
+ "physical_block_size": "4096"}
+ },
+ instance,
+ "/dev/sda")
+
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_attach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_info):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_lookup_by_name.return_value = mock_dom
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ bdm = {'device_name': 'vdb',
+ 'disk_bus': 'fake-bus',
+ 'device_type': 'fake-type'}
+ disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
+ 'dev': 'vdb'}
+ mock_get_info.return_value = disk_info
+ mock_conf = mock.MagicMock()
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (mock_connect_volume, mock_get_volume_config,
+ mock_set_cache_mode):
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+
+ conn.attach_volume(self.context, connection_info, instance,
+ "/dev/vdb", disk_bus=bdm['disk_bus'],
+ device_type=bdm['device_type'])
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_info.assert_called_with(CONF.libvirt.virt_type, bdm)
+ mock_connect_volume.assert_called_with(
+ connection_info, disk_info)
+ mock_get_volume_config.assert_called_with(
+ connection_info, disk_info)
+ mock_set_cache_mode.assert_called_with(mock_conf)
+ mock_dom.attachDeviceFlags.assert_called_with(
+ mock_conf.to_xml(), flags)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_disk_xml')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_detach_volume_with_vir_domain_affect_live_flag(self,
+ mock_lookup_by_name, mock_get_disk_xml):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_xml = \
+ """
+ <disk type='file'>
+ <source file='/path/to/fake-volume'/>
+ <target dev='vdc' bus='virtio'/>
+ </disk>
+ """
+ mock_get_disk_xml.return_value = mock_xml
+
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"device_path": "/fake",
+ "access_mode": "rw"}}
+ flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
+
+ with mock.patch.object(conn, '_disconnect_volume') as \
+ mock_disconnect_volume:
+ for state in (power_state.RUNNING, power_state.PAUSED):
+ mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
+ mock_lookup_by_name.return_value = mock_dom
+
+ conn.detach_volume(connection_info, instance, '/dev/vdc')
+
+ mock_lookup_by_name.assert_called_with(instance['name'])
+ mock_get_disk_xml.assert_called_with(mock_dom.XMLDesc(0),
+ 'vdc')
+ mock_dom.detachDeviceFlags.assert_called_with(mock_xml, flags)
+ mock_disconnect_volume.assert_called_with(
+ connection_info, 'vdc')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_multi_nic(self, mock_flavor):
+ network_info = _fake_network_info(self.stubs, 2)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = objects.Instance(**self.test_instance)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+ interfaces = tree.findall("./devices/interface")
+ self.assertEqual(len(interfaces), 2)
+ self.assertEqual(interfaces[0].get('type'), 'bridge')
+
+ def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
+ exc=ValueError()):
+ open_behavior = os.open(os.path.join('.', '.directio.test'),
+ os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
+ if raise_open:
+ open_behavior.AndRaise(exc)
+ else:
+ open_behavior.AndReturn(3)
+ write_bahavior = os.write(3, mox.IgnoreArg())
+ if raise_write:
+ write_bahavior.AndRaise(exc)
+ else:
+ os.close(3)
+ os.unlink(3)
+
+ def test_supports_direct_io(self):
+ # O_DIRECT is not supported on all Python runtimes, so on platforms
+ # where it's not supported (e.g. Mac), we can still test the code-path
+ # by stubbing out the value.
+ if not hasattr(os, 'O_DIRECT'):
+ # `mock` seems to have trouble stubbing an attr that doesn't
+ # originally exist, so falling back to stubbing out the attribute
+ # directly.
+ os.O_DIRECT = 16384
+ self.addCleanup(delattr, os, 'O_DIRECT')
+
+ einval = OSError()
+ einval.errno = errno.EINVAL
+ self.mox.StubOutWithMock(os, 'open')
+ self.mox.StubOutWithMock(os, 'write')
+ self.mox.StubOutWithMock(os, 'close')
+ self.mox.StubOutWithMock(os, 'unlink')
+ _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
+
+ self._behave_supports_direct_io()
+ self._behave_supports_direct_io(raise_write=True)
+ self._behave_supports_direct_io(raise_open=True)
+ self._behave_supports_direct_io(raise_write=True, exc=einval)
+ self._behave_supports_direct_io(raise_open=True, exc=einval)
+
+ self.mox.ReplayAll()
+ self.assertTrue(_supports_direct_io('.'))
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertRaises(ValueError, _supports_direct_io, '.')
+ self.assertFalse(_supports_direct_io('.'))
+ self.assertFalse(_supports_direct_io('.'))
+ self.mox.VerifyAll()
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_container(self, instance, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), 'lxc:///')
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ check = [
+ (lambda t: t.find('.').get('type'), 'lxc'),
+ (lambda t: t.find('./os/type').text, 'exe'),
+ (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
+
+ for i, (check, expected_result) in enumerate(check):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
+ target = tree.find('./devices/filesystem/source').get('dir')
+ self.assertTrue(len(target) > 0)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_prefix(self, instance, prefix, mock_flavor):
+ instance_ref = objects.Instance(**instance)
+
+ def _get_prefix(p, default):
+ if p:
+ return p + 'a'
+ return default
+
+ type_disk_map = {
+ 'qemu': [
+ (lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'xen': [
+ (lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'sda'))],
+ 'kvm': [
+ (lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'vda'))],
+ 'uml': [
+ (lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./devices/disk/target').get('dev'),
+ _get_prefix(prefix, 'ubda'))]
+ }
+
+ for (virt_type, checks) in type_disk_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ if prefix:
+ self.flags(disk_prefix=prefix, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info)
+ tree = etree.fromstring(xml)
+
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_driver(self, image_meta, mock_flavor):
+ os_open = os.open
+ directio_supported = True
+
+ def os_open_stub(path, flags, *args, **kwargs):
+ if flags & os.O_DIRECT:
+ if not directio_supported:
+ raise OSError(errno.EINVAL,
+ '%s: %s' % (os.strerror(errno.EINVAL), path))
+ flags &= ~os.O_DIRECT
+ return os_open(path, flags, *args, **kwargs)
+
+ self.stubs.Set(os, 'open', os_open_stub)
+
+ @staticmethod
+ def connection_supports_direct_io_stub(dirpath):
+ return directio_supported
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_supports_direct_io', connection_supports_direct_io_stub)
+
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "none")
+
+ directio_supported = False
+
+ # The O_DIRECT availability is cached on first use in
+ # LibvirtDriver, hence we re-create it here
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ disks = tree.findall('./devices/disk/driver')
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "writethrough")
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_disk_bus(self, image_meta,
+ block_device_info, wantConfig,
+ mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ block_device_info,
+ image_meta)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta,
+ block_device_info=block_device_info)
+ tree = etree.fromstring(xml)
+
+ got_disks = tree.findall('./devices/disk')
+ got_disk_targets = tree.findall('./devices/disk/target')
+ for i in range(len(wantConfig)):
+ want_device_type = wantConfig[i][0]
+ want_device_bus = wantConfig[i][1]
+ want_device_dev = wantConfig[i][2]
+
+ got_device_type = got_disks[i].get('device')
+ got_device_bus = got_disk_targets[i].get('bus')
+ got_device_dev = got_disk_targets[i].get('dev')
+
+ self.assertEqual(got_device_type, want_device_type)
+ self.assertEqual(got_device_bus, want_device_bus)
+ self.assertEqual(got_device_dev, want_device_dev)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uuid(self, image_meta, mock_flavor):
+ instance_ref = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = drv._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info, image_meta)
+ tree = etree.fromstring(xml)
+ self.assertEqual(tree.find('./uuid').text,
+ instance_ref['uuid'])
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _check_xml_and_uri(self, instance, mock_flavor,
+ expect_ramdisk=False, expect_kernel=False,
+ rescue=None, expect_xen_hvm=False, xen_only=False):
+ instance_ref = objects.Instance(**instance)
+
+ xen_vm_mode = vm_mode.XEN
+ if expect_xen_hvm:
+ xen_vm_mode = vm_mode.HVM
+
+ type_uri_map = {'qemu': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'qemu'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'kvm': ('qemu:///system',
+ [(lambda t: t.find('.').get('type'), 'kvm'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.HVM),
+ (lambda t: t.find('./devices/emulator'), None)]),
+ 'uml': ('uml:///system',
+ [(lambda t: t.find('.').get('type'), 'uml'),
+ (lambda t: t.find('./os/type').text,
+ vm_mode.UML)]),
+ 'xen': ('xen:///',
+ [(lambda t: t.find('.').get('type'), 'xen'),
+ (lambda t: t.find('./os/type').text,
+ xen_vm_mode)])}
+
+ if expect_xen_hvm or xen_only:
+ hypervisors_to_check = ['xen']
+ else:
+ hypervisors_to_check = ['qemu', 'kvm', 'xen']
+
+ for hypervisor_type in hypervisors_to_check:
+ check_list = type_uri_map[hypervisor_type][1]
+
+ if rescue:
+ suffix = '.rescue'
+ else:
+ suffix = ''
+ if expect_kernel:
+ check = (lambda t: self.relpath(t.find('./os/kernel').text).
+ split('/')[1], 'kernel' + suffix)
+ else:
+ check = (lambda t: t.find('./os/kernel'), None)
+ check_list.append(check)
+
+ if expect_kernel:
+ check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
+ text, hypervisor_type == "qemu")
+ check_list.append(check)
+ # Hypervisors that only support vm_mode.HVM and Xen
+ # should not produce configuration that results in kernel
+ # arguments
+ if not expect_kernel and (hypervisor_type in
+ ['qemu', 'kvm', 'xen']):
+ check = (lambda t: t.find('./os/root'), None)
+ check_list.append(check)
+ check = (lambda t: t.find('./os/cmdline'), None)
+ check_list.append(check)
+
+ if expect_ramdisk:
+ check = (lambda t: self.relpath(t.find('./os/initrd').text).
+ split('/')[1], 'ramdisk' + suffix)
+ else:
+ check = (lambda t: t.find('./os/initrd'), None)
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ xpath = "./sysinfo/system/entry"
+ check = (lambda t: t.findall(xpath)[0].get("name"),
+ "manufacturer")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[0].text,
+ version.vendor_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[1].get("name"),
+ "product")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[1].text,
+ version.product_string())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[2].get("name"),
+ "version")
+ check_list.append(check)
+ # NOTE(sirp): empty strings don't roundtrip in lxml (they are
+ # converted to None), so we need an `or ''` to correct for that
+ check = (lambda t: t.findall(xpath)[2].text or '',
+ version.version_string_with_package())
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[3].get("name"),
+ "serial")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[3].text,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ check_list.append(check)
+
+ check = (lambda t: t.findall(xpath)[4].get("name"),
+ "uuid")
+ check_list.append(check)
+ check = (lambda t: t.findall(xpath)[4].text,
+ instance['uuid'])
+ check_list.append(check)
+
+ if hypervisor_type in ['qemu', 'kvm']:
+ check = (lambda t: t.findall('./devices/serial')[0].get(
+ 'type'), 'file')
+ check_list.append(check)
+ check = (lambda t: t.findall('./devices/serial')[1].get(
+ 'type'), 'pty')
+ check_list.append(check)
+ check = (lambda t: self.relpath(t.findall(
+ './devices/serial/source')[0].get('path')).
+ split('/')[1], 'console.log')
+ check_list.append(check)
+ else:
+ check = (lambda t: t.find('./devices/console').get(
+ 'type'), 'pty')
+ check_list.append(check)
+
+ common_checks = [
+ (lambda t: t.find('.').tag, 'domain'),
+ (lambda t: t.find('./memory').text, '2097152')]
+ if rescue:
+ common_checks += [
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[0].
+ get('file')).split('/')[1], 'disk.rescue'),
+ (lambda t: self.relpath(t.findall('./devices/disk/source')[1].
+ get('file')).split('/')[1], 'disk')]
+ else:
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[0].get('file')).split('/')[1],
+ 'disk')]
+ common_checks += [(lambda t: self.relpath(t.findall(
+ './devices/disk/source')[1].get('file')).split('/')[1],
+ 'disk.local')]
+
+ for virt_type in hypervisors_to_check:
+ expected_uri = type_uri_map[virt_type][0]
+ checks = type_uri_map[virt_type][1]
+ self.flags(virt_type=virt_type, group='libvirt')
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertEqual(conn.uri(), expected_uri)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ rescue=rescue)
+
+ flavor = instance_ref.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+ xml = conn._get_guest_xml(self.context, instance_ref,
+ network_info, disk_info,
+ rescue=rescue)
+ tree = etree.fromstring(xml)
+ for i, (check, expected_result) in enumerate(checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed check %d' %
+ (check(tree), expected_result, i))
+
+ for i, (check, expected_result) in enumerate(common_checks):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s != %s failed common check %d' %
+ (check(tree), expected_result, i))
+
+ filterref = './devices/interface/filterref'
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
+ instance_filter_name = fw._instance_filter_name(instance_ref,
+ nic_id)
+ self.assertEqual(tree.find(filterref).get('filter'),
+ instance_filter_name)
+
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
+ #
+ # Deliberately not just assigning this string to CONF.connection_uri
+ # and checking against that later on. This way we make sure the
+ # implementation doesn't fiddle around with the CONF.
+ testuri = 'something completely different'
+ self.flags(connection_uri=testuri, group='libvirt')
+ for (virt_type, (expected_uri, checks)) in type_uri_map.iteritems():
+ self.flags(virt_type=virt_type, group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertEqual(conn.uri(), testuri)
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ # ensure_filtering_fules_for_instance() finishes with timeout.
+ # Preparing mocks
+ def fake_none(self, *args):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
+
+ fake_timer = FakeTime()
+
+ def fake_sleep(t):
+ fake_timer.sleep(t)
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock()
+ instance_ref = objects.Instance(**self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'instance_filter_exists',
+ fake_none)
+ self.stubs.Set(greenthread,
+ 'sleep',
+ fake_sleep)
+ conn.ensure_filtering_rules_for_instance(instance_ref,
+ network_info)
+ except exception.NovaException as e:
+ msg = ('The firewall filter for %s does not exist' %
+ instance_ref['name'])
+ c1 = (0 <= six.text_type(e).find(msg))
+ self.assertTrue(c1)
+
+ self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
+ "amount of time")
+
+ def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'disk_available_least': 400,
+ 'cpu_info': 'asdf',
+ }
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, True)
+ self.assertThat({"filename": "file",
+ 'image_type': 'default',
+ 'disk_available_mb': 409600,
+ "disk_over_commit": False,
+ "block_migration": True},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+ filename = "file"
+
+ self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ # _check_cpu_match
+ conn._compare_cpu("asdf")
+
+ # mounted_on_same_shared_storage
+ conn._create_shared_storage_test_file().AndReturn(filename)
+
+ self.mox.ReplayAll()
+ return_value = conn.check_can_live_migrate_destination(self.context,
+ instance_ref, compute_info, compute_info, False)
+ self.assertThat({"filename": "file",
+ "image_type": 'default',
+ "block_migration": False,
+ "disk_over_commit": False,
+ "disk_available_mb": None},
+ matchers.DictMatches(return_value))
+
+ def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
+ instance_ref = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf'}
+
+ self.mox.StubOutWithMock(conn, '_compare_cpu')
+
+ conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
+ reason='foo')
+ )
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidCPUInfo,
+ conn.check_can_live_migrate_destination,
+ self.context, instance_ref,
+ compute_info, compute_info, False)
+
+ def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
+ objects.Instance(**self.test_instance)
+ dest_check_data = {"filename": "file",
+ "block_migration": True,
+ "disk_over_commit": False,
+ "disk_available_mb": 1024}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
+ conn._cleanup_shared_storage_test_file("file")
+
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_destination_cleanup(self.context,
+ dest_check_data)
+
+ def _mock_can_live_migrate_source(self, block_migration=False,
+ is_shared_block_storage=False,
+ is_shared_instance_path=False,
+ disk_available_mb=1024):
+ instance = objects.Instance(**self.test_instance)
+ dest_check_data = {'filename': 'file',
+ 'image_type': 'default',
+ 'block_migration': block_migration,
+ 'disk_over_commit': False,
+ 'disk_available_mb': disk_available_mb}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, '_is_shared_block_storage')
+ conn._is_shared_block_storage(instance, dest_check_data).AndReturn(
+ is_shared_block_storage)
+ self.mox.StubOutWithMock(conn, '_check_shared_storage_test_file')
+ conn._check_shared_storage_test_file('file').AndReturn(
+ is_shared_instance_path)
+
+ return (instance, dest_check_data, conn)
+
+ def test_check_can_live_migrate_source_block_migration(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True)
+
+ self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
+ conn._assert_dest_node_has_enough_disk(
+ self.context, instance, dest_check_data['disk_available_mb'],
+ False, None)
+
+ self.mox.ReplayAll()
+ ret = conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+ self.assertIsInstance(ret, dict)
+ self.assertIn('is_shared_block_storage', ret)
+ self.assertIn('is_shared_instance_path', ret)
+
+ def test_check_can_live_migrate_source_shared_block_storage(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_block_storage=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_instance_path(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ is_shared_instance_path=True)
+ self.mox.ReplayAll()
+ conn.check_can_live_migrate_source(self.context, instance,
+ dest_check_data)
+
+ def test_check_can_live_migrate_source_non_shared_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source, self.context,
+ instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_shared_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_block_storage=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_shared_path_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ is_shared_instance_path=True)
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidLocalStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source()
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InvalidSharedStorage,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
+ instance, dest_check_data, conn = self._mock_can_live_migrate_source(
+ block_migration=True,
+ disk_available_mb=0)
+
+ self.mox.StubOutWithMock(conn, "get_instance_disk_info")
+ conn.get_instance_disk_info(instance["name"],
+ block_device_info=None).AndReturn(
+ '[{"virt_disk_size":2}]')
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.MigrationError,
+ conn.check_can_live_migrate_source,
+ self.context, instance, dest_check_data)
+
+ def test_is_shared_block_storage_rbd(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd'}))
+
+ def test_is_shared_block_storage_non_remote(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_source(self):
+ CONF.set_override('images_type', 'rbd', 'libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_rbd_only_dest(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertFalse(conn._is_shared_block_storage(
+ 'instance', {'image_type': 'rbd',
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[]'
+ self.assertTrue(conn._is_shared_block_storage(
+ {'name': 'name'}, {'is_volume_backed': True,
+ 'is_shared_instance_path': False}))
+
+ def test_is_shared_block_storage_volume_backed_with_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
+ mock_get.return_value = '[{"virt_disk_size":2}]'
+ self.assertFalse(conn._is_shared_block_storage(
+ {'name': 'instance_name'},
+ {'is_volume_backed': True, 'is_shared_instance_path': False}))
+ mock_get.assert_called_once_with('instance_name')
+
+ def test_is_shared_block_storage_nfs(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_backend = mock.MagicMock()
+ mock_image_backend.backend.return_value = mock_backend
+ mock_backend.is_file_in_instance_path.return_value = True
+ self.assertTrue(conn._is_shared_block_storage(
+ 'instance', {'is_shared_instance_path': True}))
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_changes_listen_addresses(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ xml_tmpl = ("<domain type='kvm'>"
+ "<devices>"
+ "<graphics type='vnc' listen='{vnc}'>"
+ "<listen address='{vnc}'/>"
+ "</graphics>"
+ "<graphics type='spice' listen='{spice}'>"
+ "<listen address='{spice}'/>"
+ "</graphics>"
+ "</devices>"
+ "</domain>")
+
+ initial_xml = xml_tmpl.format(vnc='1.2.3.4',
+ spice='5.6.7.8')
+
+ target_xml = xml_tmpl.format(vnc='10.0.0.1',
+ spice='10.0.0.2')
+ target_xml = etree.tostring(etree.fromstring(target_xml))
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ initial_xml)
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ target_xml,
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
+ def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
+ self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ def test_live_migration_raises_exception(self):
+ # Confirms recover method is called when exceptions are raised.
+ # Preparing data
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI2")
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ if getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+ else:
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ None,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ None,
+ _bandwidth).AndRaise(
+ libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+ self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
+ self.compute._rollback_live_migration(self.context, instance_ref,
+ 'dest', False)
+
+ # start test
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs':
+ {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', False,
+ self.compute._rollback_live_migration,
+ migrate_data=migrate_data)
+
+ self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
+ self.assertEqual(power_state.RUNNING, instance_ref.power_state)
+
+ @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
+ def test_live_migration_raises_unsupported_config_exception(self):
+ # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
+ # migrateToURI is used instead.
+
+ # Preparing data
+ instance_ref = fake_instance.fake_instance_obj(
+ self.context, **self.test_instance)
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
+ self.mox.StubOutWithMock(vdmock, 'migrateToURI')
+ _bandwidth = CONF.libvirt.live_migration_bandwidth
+ vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
+ FakeVirtDomain().XMLDesc(0))
+ unsupported_config_error = libvirt.libvirtError('ERR')
+ unsupported_config_error.err = (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
+ # This is the first error we hit but since the error code is
+ # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
+ vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
+ mox.IgnoreArg(), mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(unsupported_config_error)
+ # This is the second and final error that will actually kill the run,
+ # we use TestingException to make sure it's not the same libvirtError
+ # above.
+ vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ mox.IgnoreArg(), None,
+ _bandwidth).AndRaise(test.TestingException('oops'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ def fake_recover_method(context, instance, dest, block_migration):
+ pass
+
+ graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
+ migrate_data = {'pre_live_migration_result':
+ {'graphics_listen_addrs': graphics_listen_addrs}}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(
+ conn, '_check_graphics_addresses_can_live_migrate')
+ conn._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
+ self.mox.ReplayAll()
+
+ # start test
+ self.assertRaises(test.TestingException, conn._live_migration,
+ self.context, instance_ref, 'dest', post_method=None,
+ recover_method=fake_recover_method,
+ migrate_data=migrate_data)
+
+ def test_rollback_live_migration_at_destination(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None, True, None)
+ mock_destroy.assert_called_once_with("context",
+ "instance", [], None, True, None)
+
+ def _do_test_create_images_and_backing(self, disk_type):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+ self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
+
+ disk_info = {'path': 'foo', 'type': disk_type,
+ 'disk_size': 1 * 1024 ** 3,
+ 'virt_disk_size': 20 * 1024 ** 3,
+ 'backing_file': None}
+ disk_info_json = jsonutils.dumps([disk_info])
+
+ libvirt_driver.libvirt_utils.create_image(
+ disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(os.path, 'exists', lambda *args: False)
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", disk_info_json)
+
+ def test_create_images_and_backing_qcow2(self):
+ self._do_test_create_images_and_backing('qcow2')
+
+ def test_create_images_and_backing_raw(self):
+ self._do_test_create_images_and_backing('raw')
+
+ def test_create_images_and_backing_ephemeral_gets_created(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ disk_info_json = jsonutils.dumps(
+ [{u'backing_file': u'fake_image_backing_file',
+ u'disk_size': 10747904,
+ u'path': u'disk_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 25165824},
+ {u'backing_file': u'ephemeral_1_default',
+ u'disk_size': 393216,
+ u'over_committed_disk_size': 1073348608,
+ u'path': u'disk_eph_path',
+ u'type': u'qcow2',
+ u'virt_disk_size': 1073741824}])
+
+ base_dir = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ self.test_instance.update({'name': 'fake_instance',
+ 'user_id': 'fake-user',
+ 'os_type': None,
+ 'project_id': 'fake-project'})
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_fetch_instance_kernel_ramdisk'),
+ mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
+ mock.patch.object(conn, '_create_ephemeral')
+ ) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
+ create_ephemeral_mock):
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir",
+ disk_info_json)
+ self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
+ m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'ephemeral_1_default'),
+ m_kwargs['target'])
+ self.assertEqual(len(fetch_image_mock.call_args_list), 1)
+ m_args, m_kwargs = fetch_image_mock.call_args_list[0]
+ self.assertEqual(
+ os.path.join(base_dir, 'fake_image_backing_file'),
+ m_kwargs['target'])
+
+ def test_create_images_and_backing_disk_info_none(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
+
+ conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
+ self.mox.ReplayAll()
+
+ conn._create_images_and_backing(self.context, self.test_instance,
+ "/fake/instance/dir", None)
+
+ def test_pre_live_migration_works_correctly_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+
+ # Creating mocks
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+
+ self.mox.ReplayAll()
+ result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None)
+
+ target_res = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(result, target_res)
+
+ def test_pre_live_migration_block_with_config_drive_mocked(self):
+ # Creating testdata
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_true(*args, **kwargs):
+ return True
+
+ self.stubs.Set(configdrive, 'required_by', fake_true)
+
+ inst_ref = {'id': 'foo'}
+ c = context.get_admin_context()
+
+ self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
+ conn.pre_live_migration, c, inst_ref, vol, None,
+ None, {'is_shared_instance_path': False,
+ 'is_shared_block_storage': False})
+
+ def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
+ # Creating testdata, using temp dir.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ self.stubs.Set(conn, '_create_images_and_backing', fake_none)
+
+ class FakeNetworkInfo():
+ def fixed_ips(self):
+ return ["test_ip_addr"]
+ inst_ref = objects.Instance(**self.test_instance)
+ c = context.get_admin_context()
+ nw_info = FakeNetworkInfo()
+ # Creating mocks
+ self.mox.StubOutWithMock(conn, "_connect_volume")
+ for v in vol['block_device_mapping']:
+ disk_info = {
+ 'bus': "scsi",
+ 'dev': v['mount_device'].rpartition("/")[2],
+ 'type': "disk"
+ }
+ conn._connect_volume(v['connection_info'],
+ disk_info)
+ self.mox.StubOutWithMock(conn, 'plug_vifs')
+ conn.plug_vifs(mox.IsA(inst_ref), nw_info)
+ self.mox.ReplayAll()
+ migrate_data = {'is_shared_instance_path': False,
+ 'is_volume_backed': True,
+ 'block_migration': False,
+ 'instance_relative_path': inst_ref['name']
+ }
+ ret = conn.pre_live_migration(c, inst_ref, vol, nw_info, None,
+ migrate_data)
+ target_ret = {'graphics_listen_addrs': {'spice': '127.0.0.1',
+ 'vnc': '127.0.0.1'}}
+ self.assertEqual(ret, target_ret)
+ self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
+ inst_ref['name'])))
+
+ def test_pre_live_migration_plug_vifs_retry_fails(self):
+ self.flags(live_migration_retry_count=3)
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ raise processutils.ProcessExecutionError()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn.pre_live_migration,
+ self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_plug_vifs_retry_works(self):
+ self.flags(live_migration_retry_count=3)
+ called = {'count': 0}
+ instance = {'name': 'test', 'uuid': 'uuid'}
+
+ def fake_plug_vifs(instance, network_info):
+ called['count'] += 1
+ if called['count'] < CONF.live_migration_retry_count:
+ raise processutils.ProcessExecutionError()
+ else:
+ return
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
+ conn.pre_live_migration(self.context, instance, block_device_info=None,
+ network_info=[], disk_info={})
+
+ def test_pre_live_migration_image_not_created_with_shared_storage(self):
+ migrate_data_set = [{'is_shared_block_storage': False,
+ 'block_migration': False},
+ {'is_shared_block_storage': True,
+ 'block_migration': False},
+ {'is_shared_block_storage': False,
+ 'block_migration': True}]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing'),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ for migrate_data in migrate_data_set:
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertFalse(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_pre_live_migration_with_not_shared_instance_path(self):
+ migrate_data = {'is_shared_block_storage': False,
+ 'is_shared_instance_path': False}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+
+ def check_instance_dir(context, instance,
+ instance_dir, disk_info):
+ self.assertTrue(instance_dir)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing',
+ side_effect=check_instance_dir),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertTrue(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_get_instance_disk_info_works_correctly(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'])
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ def test_post_live_migration(self):
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
+ {'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ inst_ref = {'id': 'foo'}
+ cntx = context.get_admin_context()
+
+ # Set up the mock expectations
+ with contextlib.nested(
+ mock.patch.object(driver, 'block_device_info_get_mapping',
+ return_value=vol['block_device_mapping']),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (block_device_info_get_mapping, _disconnect_volume):
+ conn.post_live_migration(cntx, inst_ref, vol)
+
+ block_device_info_get_mapping.assert_has_calls([
+ mock.call(vol)])
+ _disconnect_volume.assert_has_calls([
+ mock.call(v['connection_info'],
+ v['mount_device'].rpartition("/")[2])
+ for v in vol['block_device_mapping']])
+
+ def test_get_instance_disk_info_excludes_volumes(self):
+ # Test data
+ instance_ref = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume1'/>"
+ "<target dev='vdc' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/fake/path/to/volume2'/>"
+ "<target dev='vdd' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
+ fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
+ fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+ os.path.getsize('/test/disk.local').AndReturn((3328599655))
+
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
+ self.mox.StubOutWithMock(os.path, "exists")
+ os.path.exists('/test/disk.local').AndReturn(True)
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
+ '/test/disk.local').AndReturn((ret, ''))
+
+ self.mox.ReplayAll()
+ conn_info = {'driver_volume_type': 'fake'}
+ info = {'block_device_mapping': [
+ {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
+ {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_instance_disk_info(instance_ref['name'],
+ block_device_info=info)
+ info = jsonutils.loads(info)
+ self.assertEqual(info[0]['type'], 'raw')
+ self.assertEqual(info[0]['path'], '/test/disk')
+ self.assertEqual(info[0]['disk_size'], 10737418240)
+ self.assertEqual(info[0]['backing_file'], "")
+ self.assertEqual(info[0]['over_committed_disk_size'], 0)
+ self.assertEqual(info[1]['type'], 'qcow2')
+ self.assertEqual(info[1]['path'], '/test/disk.local')
+ self.assertEqual(info[1]['virt_disk_size'], 21474836480)
+ self.assertEqual(info[1]['backing_file'], "file")
+ self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def test_spawn_with_network_info(self, mock_flavor):
+ # Preparing mocks
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Penryn</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='xtpr'/>
+ </cpu>
+ """
+
+ # _fake_network_info must be called before create_fake_libvirt_mock(),
+ # as _fake_network_info calls importutils.import_class() and
+ # create_fake_libvirt_mock() mocks importutils.import_class().
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001,
+ baselineCPU=fake_baselineCPU)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ mock_flavor.return_value = flavor
+
+ # Mock out the get_info method of the LibvirtDriver so that the polling
+ # in the spawn method of the LibvirtDriver returns immediately
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
+ libvirt_driver.LibvirtDriver.get_info(instance
+ ).AndReturn({'state': power_state.RUNNING})
+
+ # Start test
+ self.mox.ReplayAll()
+
+ with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
+ del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.firewall_driver,
+ 'setup_basic_filtering',
+ fake_none)
+ self.stubs.Set(conn.firewall_driver,
+ 'prepare_instance_filter',
+ fake_none)
+ self.stubs.Set(imagebackend.Image,
+ 'cache',
+ fake_none)
+
+ conn.spawn(self.context, instance, None, [], 'herp',
+ network_info=network_info)
+
+ path = os.path.join(CONF.instances_path, instance['name'])
+ if os.path.isdir(path):
+ shutil.rmtree(path)
+
+ path = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name)
+ if os.path.isdir(path):
+ shutil.rmtree(os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name))
+
+ def test_spawn_without_image_meta(self):
+ self.create_image_called = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_create_image(*args, **kwargs):
+ self.create_image_called = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_create_image)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.create_image_called)
+
+ conn.spawn(self.context,
+ instance,
+ {'id': instance['image_ref']},
+ [],
+ None)
+ self.assertTrue(self.create_image_called)
+
+ def test_spawn_from_volume_calls_cache(self):
+ self.cache_called_for_disk = False
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_cache(*args, **kwargs):
+ if kwargs.get('image_id') == 'my_fake_image':
+ self.cache_called_for_disk = True
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+
+ self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ block_device_info = {'root_device_name': '/dev/vda',
+ 'block_device_mapping': [
+ {'mount_device': 'vda',
+ 'boot_index': 0}
+ ]
+ }
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from volume but with placeholder image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['root_device_name'] = '/dev/vda'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+
+ conn.spawn(self.context, instance, None, [], None,
+ block_device_info=block_device_info)
+ self.assertFalse(self.cache_called_for_disk)
+
+ # Booted from an image
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance = objects.Instance(**instance_ref)
+ conn.spawn(self.context, instance, None, [], None)
+ self.assertTrue(self.cache_called_for_disk)
+
+ def test_start_lxc_from_volume(self):
+ self.flags(virt_type="lxc",
+ group='libvirt')
+
+ def check_setup_container(path, container_dir=None, use_cow=False):
+ self.assertEqual(path, '/dev/path/to/dev')
+ self.assertTrue(use_cow)
+ return '/dev/nbd1'
+
+ bdm = {
+ 'guest_format': None,
+ 'boot_index': 0,
+ 'mount_device': '/dev/sda',
+ 'connection_info': {
+ 'driver_volume_type': 'iscsi',
+ 'serial': 'afc1',
+ 'data': {
+ 'access_mode': 'rw',
+ 'device_path': '/dev/path/to/dev',
+ 'target_discovered': False,
+ 'encrypted': False,
+ 'qos_specs': None,
+ 'target_iqn': 'iqn: volume-afc1',
+ 'target_portal': 'ip: 3260',
+ 'volume_id': 'afc1',
+ 'target_lun': 1,
+ 'auth_password': 'uj',
+ 'auth_username': '47',
+ 'auth_method': 'CHAP'
+ }
+ },
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'delete_on_termination': False
+ }
+
+ def _get(key, opt=None):
+ return bdm.get(key, opt)
+
+ def getitem(key):
+ return bdm[key]
+
+ def setitem(key, val):
+ bdm[key] = val
+
+ bdm_mock = mock.MagicMock()
+ bdm_mock.__getitem__.side_effect = getitem
+ bdm_mock.__setitem__.side_effect = setitem
+ bdm_mock.get = _get
+
+ disk_mock = mock.MagicMock()
+ disk_mock.source_path = '/dev/path/to/dev'
+
+ block_device_info = {'block_device_mapping': [bdm_mock],
+ 'root_device_name': '/dev/sda'}
+
+ # Volume-backed instance created without image
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = ''
+ instance_ref['root_device_name'] = '/dev/sda'
+ instance_ref['ephemeral_gb'] = 0
+ instance_ref['uuid'] = uuidutils.generate_uuid()
+ instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
+ inst_obj = objects.Instance(**instance_ref)
+
+ flavor = inst_obj.get_flavor()
+ flavor.extra_specs = {}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=disk_mock),
+ mock.patch.object(conn, 'get_info',
+ return_value={'state': power_state.RUNNING}),
+ mock.patch('nova.virt.disk.api.setup_container',
+ side_effect=check_setup_container),
+ mock.patch('nova.virt.disk.api.teardown_container'),
+ mock.patch.object(objects.Instance, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor)):
+
+ conn.spawn(self.context, inst_obj, None, [], None,
+ network_info=[],
+ block_device_info=block_device_info)
+ self.assertEqual('/dev/nbd1',
+ inst_obj.system_metadata.get(
+ 'rootfs_device_name'))
+
+ def test_spawn_with_pci_devices(self):
+ def fake_none(*args, **kwargs):
+ return None
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ class FakeLibvirtPciDevice():
+ def dettach(self):
+ return None
+
+ def reset(self):
+ return None
+
+ def fake_node_device_lookup_by_name(address):
+ pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
+ % dict(hex='[\da-f]', oct='[0-8]'))
+ pattern = re.compile(pattern)
+ if pattern.match(address) is None:
+ raise libvirt.libvirtError()
+ return FakeLibvirtPciDevice()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_image', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._conn.nodeDeviceLookupByName = \
+ fake_node_device_lookup_by_name
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 'my_fake_image'
+ instance = objects.Instance(**instance_ref)
+ instance = dict(instance.iteritems())
+ instance['pci_devices'] = [{'address': '0000:00:00.0'}]
+
+ conn.spawn(self.context, instance, None, [], None)
+
+ def test_chown_disk_config_for_instance(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = copy.deepcopy(self.test_instance)
+ instance['name'] = 'test_name'
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
+ fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
+ os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
+ fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
+
+ self.mox.ReplayAll()
+ conn._chown_disk_config_for_instance(instance)
+
+ def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ instance['os_type'] = os_type
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ if mkfs:
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {os_type: 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': filename,
+ 'size': 20 * units.Gi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ def test_create_image_plain_os_type_blank(self):
+ self._test_create_image_plain(os_type='',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_none(self):
+ self._test_create_image_plain(os_type=None,
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_no_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_default',
+ mkfs=False)
+
+ def test_create_image_plain_os_type_set_with_fs(self):
+ self._test_create_image_plain(os_type='test',
+ filename='ephemeral_20_test',
+ mkfs=True)
+
+ def test_create_image_with_swap(self):
+ gotFiles = []
+
+ def fake_image(self, instance, name, image_type=''):
+ class FakeImage(imagebackend.Image):
+ def __init__(self, instance, name, is_block_dev=False):
+ self.path = os.path.join(instance['name'], name)
+ self.is_block_dev = is_block_dev
+
+ def create_image(self, prepare_template, base,
+ size, *args, **kwargs):
+ pass
+
+ def cache(self, fetch_func, filename, size=None,
+ *args, **kwargs):
+ gotFiles.append({'filename': filename,
+ 'size': size})
+
+ def snapshot(self, name):
+ pass
+
+ return FakeImage(instance, name)
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_get_info(instance):
+ return {'state': power_state.RUNNING}
+
+ # Stop 'libvirt_driver._create_image' touching filesystem
+ self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
+ fake_image)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ # Turn on some swap to exercise that codepath in _create_image
+ instance_ref['system_metadata']['instance_type_swap'] = 500
+ instance = objects.Instance(**instance_ref)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_get_guest_xml', fake_none)
+ self.stubs.Set(conn, '_create_domain_and_network', fake_none)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ conn._create_image(context, instance, disk_info['mapping'])
+ conn._get_guest_xml(self.context, instance, None,
+ disk_info, image_meta)
+
+ wantFiles = [
+ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
+ 'size': 10 * units.Gi},
+ {'filename': 'ephemeral_20_default',
+ 'size': 20 * units.Gi},
+ {'filename': 'swap_500',
+ 'size': 500 * units.Mi},
+ ]
+ self.assertEqual(gotFiles, wantFiles)
+
+ @mock.patch.object(utils, 'execute')
+ def test_create_ephemeral_specified_fs(self, mock_exec):
+ self.flags(default_ephemeral_format='ext3')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20,
+ specified_fs='ext4')
+ mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
+ 'myVol', '/dev/something',
+ run_as_root=True)
+
+ def test_create_ephemeral_specified_fs_not_valid(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'guest_format': 'dummy',
+ 'size': 1}]
+ block_device_info = {
+ 'ephemerals': ephemerals}
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = objects.Instance(**instance_ref)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ disk_info['mapping'].pop('disk.local')
+
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(conn, 'get_info'),
+ mock.patch.object(conn, '_create_domain_and_network')):
+ self.assertRaises(exception.InvalidBDMFormat, conn._create_image,
+ context, instance, disk_info['mapping'],
+ block_device_info=block_device_info)
+
+ def test_create_ephemeral_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext3', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20)
+
+ def test_create_ephemeral_with_conf(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
+ '/dev/something', run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_ephemeral_with_arbitrary(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
+ {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
+ run_as_root=True)
+ self.mox.ReplayAll()
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True)
+
+ def test_create_swap_default(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('mkswap', '/dev/something', run_as_root=False)
+ self.mox.ReplayAll()
+
+ conn._create_swap('/dev/something', 1, max_size=20)
+
+ def test_get_console_output_file(self):
+ fake_libvirt_utils.files['console.log'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ console_log = '%s/console.log' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='file'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % console_log
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_console_output_pty(self):
+ fake_libvirt_utils.files['pty'] = '01234567890'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456
+ instance = objects.Instance(**instance_ref)
+
+ console_dir = (os.path.join(tmpdir, instance['name']))
+ pty_file = '%s/fake_pty' % (console_dir)
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ <console type='pty'>
+ <source path='%s'/>
+ <target port='0'/>
+ </console>
+ </devices>
+ </domain>
+ """ % pty_file
+
+ def fake_lookup(id):
+ return FakeVirtDomain(fake_dom_xml)
+
+ def _fake_flush(self, fake_pty):
+ return 'foo'
+
+ def _fake_append_to_file(self, data, fpath):
+ return 'pty'
+
+ self.create_fake_libvirt_mock()
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
+ libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ try:
+ prev_max = libvirt_driver.MAX_CONSOLE_BYTES
+ libvirt_driver.MAX_CONSOLE_BYTES = 5
+ output = conn.get_console_output(self.context, instance)
+ finally:
+ libvirt_driver.MAX_CONSOLE_BYTES = prev_max
+
+ self.assertEqual('67890', output)
+
+ def test_get_host_ip_addr(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ ip = conn.get_host_ip_addr()
+ self.assertEqual(ip, CONF.my_ip)
+
+ def test_broken_connection(self):
+ for (error, domain) in (
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
+ (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
+ (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.mox.StubOutWithMock(conn, "_wrapped_conn")
+ self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
+
+ conn._wrapped_conn.getLibVersion().AndRaise(
+ libvirt.libvirtError("fake failure"))
+
+ libvirt.libvirtError.get_error_code().AndReturn(error)
+ libvirt.libvirtError.get_error_domain().AndReturn(domain)
+
+ self.mox.ReplayAll()
+
+ self.assertFalse(conn._test_connection(conn._wrapped_conn))
+
+ self.mox.UnsetStubs()
+
+ def test_command_with_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ side_effect=libvirt.libvirtError("fake")),
+ mock.patch.object(libvirt.libvirtError, "get_error_code"),
+ mock.patch.object(libvirt.libvirtError, "get_error_domain"),
+ mock.patch.object(conn, '_set_host_enabled')):
+ self.assertRaises(exception.HypervisorUnavailable,
+ conn.get_num_instances)
+
+ def test_broken_connection_disable_service(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._init_events_pipe()
+ with contextlib.nested(
+ mock.patch.object(conn, '_set_host_enabled')):
+ conn._close_callback(conn._wrapped_conn, 'ERROR!', '')
+ conn._dispatch_events()
+ conn._set_host_enabled.assert_called_once_with(
+ False,
+ disable_reason=u'Connection to libvirt lost: ERROR!')
+
+ def test_service_resume_after_broken_connection(self):
+ self.mox.UnsetStubs()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ service_mock = mock.MagicMock()
+ service_mock.disabled.return_value = True
+ with contextlib.nested(
+ mock.patch.object(libvirt, 'openAuth',
+ return_value=mock.MagicMock()),
+ mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock)):
+
+ conn.get_num_instances()
+ self.assertTrue(not service_mock.disabled and
+ service_mock.disabled_reason is 'None')
+
+ def test_broken_connection_no_wrapped_conn(self):
+ # Tests that calling _close_callback when _wrapped_conn is None
+ # is a no-op, i.e. set_host_enabled won't be called.
+ self.mox.UnsetStubs()
+ # conn._wrapped_conn will be None since we never call libvirt.openAuth
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ # create our mock connection that libvirt will send to the callback
+ mock_failed_conn = mock.MagicMock()
+ mock_failed_conn.__getitem__.return_value = True
+ # nothing should happen when calling _close_callback since
+ # _wrapped_conn is None in the driver
+ conn._init_events_pipe()
+ conn._close_callback(mock_failed_conn, reason=None, opaque=None)
+ conn._dispatch_events()
+
+ def test_immediate_delete(self):
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id=instance_name)
+
+ def fake_delete_instance_files(instance):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = objects.Instance(**self.test_instance)
+ conn.destroy(self.context, instance, {})
+
+ def _test_destroy_removes_disk(self, volume_fail=False):
+ instance = {"name": "instancename", "id": "42",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
+ "cleaned": 0, 'info_cache': None, 'security_groups': []}
+ vol = {'block_device_mapping': [
+ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
+ db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
+ columns_to_join=['info_cache',
+ 'security_groups'],
+ use_slave=False
+ ).AndReturn(instance)
+ self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
+ driver.block_device_info_get_mapping(vol
+ ).AndReturn(vol['block_device_mapping'])
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ "_disconnect_volume")
+ if volume_fail:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
+ AndRaise(exception.VolumeNotFound('vol'))
+ else:
+ libvirt_driver.LibvirtDriver._disconnect_volume(
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ 'delete_instance_files')
+ (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()).
+ AndReturn(True))
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ def fake_obj_load_attr(self, attrname):
+ if not hasattr(self, attrname):
+ self[attrname] = {}
+
+ def fake_save(self, context):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(objects.Instance, 'fields',
+ {'id': int, 'uuid': str, 'cleaned': int})
+ self.stubs.Set(objects.Instance, 'obj_load_attr',
+ fake_obj_load_attr)
+ self.stubs.Set(objects.Instance, 'save', fake_save)
+
+ conn.destroy(self.context, instance, [], vol)
+
+ def test_destroy_removes_disk(self):
+ self._test_destroy_removes_disk(volume_fail=False)
+
+ def test_destroy_removes_disk_volume_fails(self):
+ self._test_destroy_removes_disk(volume_fail=True)
+
+ def test_destroy_not_removes_disk(self):
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ '_undefine_domain')
+ libvirt_driver.LibvirtDriver._undefine_domain(instance)
+
+ # Start test
+ self.mox.ReplayAll()
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(conn, '_destroy', fake_destroy)
+ self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
+ self.stubs.Set(conn.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ conn.destroy(self.context, instance, [], None, False)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container(self, mock_look_up,
+ mock_teardown_container,
+ mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ fake_domain = FakeVirtDomain()
+
+ def destroy_side_effect(*args, **kwargs):
+ fake_domain._info[0] = power_state.SHUTDOWN
+
+ with mock.patch.object(fake_domain, 'destroy',
+ side_effect=destroy_side_effect) as mock_domain_destroy:
+ mock_look_up.return_value = fake_domain
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_domain_destroy.assert_called_once_with()
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
+ def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
+ mock_look_up, mock_teardown_container, mock_cleanup):
+ self.flags(virt_type='lxc', group='libvirt')
+ instance = fake_instance.fake_instance_obj(self.context)
+ inf_exception = exception.InstanceNotFound(instance_id=instance.name)
+ mock_look_up.side_effect = inf_exception
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = []
+ conn.destroy(self.context, instance, network_info, None, False)
+
+ mock_look_up.assert_has_calls([mock.call(instance.name),
+ mock.call(instance.name)])
+ mock_teardown_container.assert_called_once_with(instance)
+ mock_cleanup.assert_called_once_with(self.context, instance,
+ network_info, None, False,
+ None)
+
+ def test_reboot_different_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_create_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_other_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_create_called)
+
+ def test_reboot_same_ids(self):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+
+ self.flags(wait_soft_reboot_seconds=1, group='libvirt')
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ self.reboot_hard_reboot_called = False
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown()
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_hard_reboot(*args, **kwargs):
+ self.reboot_hard_reboot_called = True
+
+ def fake_sleep(interval):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(greenthread, 'sleep', fake_sleep)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+ self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
+ conn.reboot(None, instance, [], 'SOFT')
+ self.assertTrue(self.reboot_hard_reboot_called)
+
+ def test_soft_reboot_libvirt_exception(self):
+ # Tests that a hard reboot is performed when a soft reboot results
+ # in raising a libvirtError.
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+
+ # setup mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.ID().AndReturn('some_fake_id')
+ mock_domain.shutdown().AndRaise(libvirt.libvirtError('Err'))
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ context = None
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ network_info = []
+
+ self.mox.StubOutWithMock(conn, '_lookup_by_name')
+ conn._lookup_by_name(instance['name']).AndReturn(mock_domain)
+ self.mox.StubOutWithMock(conn, '_hard_reboot')
+ conn._hard_reboot(context, instance, network_info, None)
+
+ self.mox.ReplayAll()
+
+ conn.reboot(context, instance, network_info, 'SOFT')
+
+ def _test_resume_state_on_host_boot_with_state(self, state):
+ called = {'count': 0}
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.info().AndReturn([state, None, None, None, None])
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ instance_details = {"name": "instancename", "id": 1,
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+ network_info = _fake_network_info(self.stubs, 1)
+
+ conn.resume_state_on_host_boot(self.context, instance, network_info,
+ block_device_info=None)
+
+ ignored_states = (power_state.RUNNING,
+ power_state.SUSPENDED,
+ power_state.NOSTATE,
+ power_state.PAUSED)
+ if state in ignored_states:
+ self.assertEqual(called['count'], 0)
+ else:
+ self.assertEqual(called['count'], 1)
+
+ def test_resume_state_on_host_boot_with_running_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_suspended_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
+
+ def test_resume_state_on_host_boot_with_paused_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
+
+ def test_resume_state_on_host_boot_with_nostate(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
+
+ def test_resume_state_on_host_boot_with_shutdown_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
+
+ def test_resume_state_on_host_boot_with_crashed_state(self):
+ self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
+
+ def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
+ called = {'count': 0}
+ instance_details = {'name': 'test'}
+ instance = fake_instance.fake_instance_obj(
+ self.context, **instance_details)
+
+ def fake_lookup_by_name(instance_name):
+ raise exception.InstanceNotFound(instance_id='fake')
+
+ def fake_hard_reboot(*args):
+ called['count'] += 1
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
+ conn.resume_state_on_host_boot(self.context, instance, network_info=[],
+ block_device_info=None)
+
+ self.assertEqual(called['count'], 1)
+
+ def test_hard_reboot(self):
+ called = {'count': 0}
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.mox.StubOutWithMock(conn, '_destroy')
+ self.mox.StubOutWithMock(conn, '_get_instance_disk_info')
+ self.mox.StubOutWithMock(conn, '_get_guest_xml')
+ self.mox.StubOutWithMock(conn, '_create_images_and_backing')
+ self.mox.StubOutWithMock(conn, '_create_domain_and_network')
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+
+ conn._destroy(instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, block_device_info)
+
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+
+ conn._get_guest_xml(self.context, instance, network_info, disk_info,
+ image_meta=image_meta,
+ block_device_info=block_device_info,
+ write_to_disk=True).AndReturn(dummyxml)
+ disk_info_json = '[{"virt_disk_size": 2}]'
+ conn._get_instance_disk_info(instance["name"], dummyxml,
+ block_device_info).AndReturn(disk_info_json)
+ conn._create_images_and_backing(self.context, instance,
+ libvirt_utils.get_instance_path(instance),
+ disk_info_json)
+ conn._create_domain_and_network(self.context, dummyxml, instance,
+ network_info, block_device_info,
+ reboot=True, vifs_already_plugged=True)
+ self.mox.ReplayAll()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
+ @mock.patch('nova.pci.manager.get_instance_pci_devs')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
+ @mock.patch('nova.virt.libvirt.utils.write_to_file')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
+ def test_hard_reboot_does_not_call_glance_show(self,
+ mock_destroy, mock_get_disk_info, mock_get_guest_config,
+ mock_get_instance_path, mock_write_to_file,
+ mock_get_instance_disk_info, mock_create_images_and_backing,
+ mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
+ mock_get_instance_pci_devs, mock_looping_call):
+ """For a hard reboot, we shouldn't need an additional call to glance
+ to get the image metadata.
+
+ This is important for automatically spinning up instances on a
+ host-reboot, since we won't have a user request context that'll allow
+ the Glance request to go through. We have to rely on the cached image
+ metadata, instead.
+
+ https://bugs.launchpad.net/nova/+bug/1339386
+ """
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ instance = objects.Instance(**self.test_instance)
+
+ network_info = mock.MagicMock()
+ block_device_info = mock.MagicMock()
+ mock_get_disk_info.return_value = {}
+ mock_get_guest_config.return_value = mock.MagicMock()
+ mock_get_instance_path.return_value = '/foo'
+ mock_looping_call.return_value = mock.MagicMock()
+ conn._image_api = mock.MagicMock()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ self.assertFalse(conn._image_api.get.called)
+
+ def test_power_on(self):
+
+ def _check_xml_bus(name, xml, block_info):
+ tree = etree.fromstring(xml)
+ got_disk_targets = tree.findall('./devices/disk/target')
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+ want_device_bus = image_meta.get('hw_disk_bus')
+ if not want_device_bus:
+ want_device_bus = self.fake_img['properties']['hw_disk_bus']
+ got_device_bus = got_disk_targets[0].get('bus')
+ self.assertEqual(got_device_bus, want_device_bus)
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ def _get_inst(with_meta=True):
+ inst_ref = self.test_instance
+ inst_ref['uuid'] = uuidutils.generate_uuid()
+ if with_meta:
+ inst_ref['system_metadata']['image_hw_disk_bus'] = 'ide'
+ instance = objects.Instance(**inst_ref)
+ instance['image_ref'] = '70a599e0-31e7-49b7-b260-868f221a761e'
+ return instance
+
+ called = {'count': 0}
+ self.fake_img = {'id': '70a599e0-31e7-49b7-b260-868f221a761e',
+ 'name': 'myfakeimage',
+ 'created_at': '',
+ 'updated_at': '',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'properties': {'hw_disk_bus': 'ide'}}
+
+ instance = _get_inst()
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ image_service_mock = mock.Mock()
+ image_service_mock.show.return_value = self.fake_img
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_destroy', return_value=None),
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_create_domain_and_network'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value = flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.get_info = fake_get_info
+ conn._get_instance_disk_info = _check_xml_bus
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ instance = _get_inst(with_meta=False)
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ def _test_clean_shutdown(self, seconds_to_shutdown,
+ timeout, retry_interval,
+ shutdown_attempts, succeeds):
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ shutdown_count = []
+
+ def count_shutdowns():
+ shutdown_count.append("shutdown")
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+
+ retry_countdown = retry_interval
+ for x in xrange(min(seconds_to_shutdown, timeout)):
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ if retry_countdown == 0:
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+ retry_countdown = retry_interval
+ else:
+ retry_countdown -= 1
+
+ if seconds_to_shutdown < timeout:
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ result = conn._clean_shutdown(instance, timeout, retry_interval)
+
+ self.assertEqual(succeeds, result)
+ self.assertEqual(shutdown_attempts, len(shutdown_count))
+
+ def test_clean_shutdown_first_time(self):
+ self._test_clean_shutdown(seconds_to_shutdown=2,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=True)
+
+ def test_clean_shutdown_with_retry(self):
+ self._test_clean_shutdown(seconds_to_shutdown=4,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=True)
+
+ def test_clean_shutdown_failure(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=False)
+
+ def test_clean_shutdown_no_wait(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=0,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=False)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, network_info)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(FakeVirtDomain, 'attachDevice')
+ @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_attach_sriov_ports_with_info_cache(self,
+ mock_get_image_metadata,
+ mock_ID,
+ mock_attachDevice,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._attach_sriov_ports(self.context, instance, domain, None)
+ mock_get_image_metadata.assert_called_once_with(self.context,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_attachDevice.called)
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_has_min_version', return_value=True)
+ @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
+ @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
+ def test_detach_sriov_ports(self,
+ mock_get_image_metadata,
+ mock_detachDeviceFlags,
+ mock_has_min_version,
+ mock_flavor):
+ instance = objects.Instance(**self.test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ mock_flavor.return_value = flavor
+
+ network_info = _fake_network_info(self.stubs, 1)
+ network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+
+ domain = FakeVirtDomain()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ conn._detach_sriov_ports(instance, domain)
+ mock_get_image_metadata.assert_called_once_with(mock.ANY,
+ conn._image_api, instance['image_ref'], instance)
+ self.assertTrue(mock_detachDeviceFlags.called)
+
+ def test_resume(self):
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ instance = objects.Instance(**self.test_instance)
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_existing_domain_xml',
+ return_value=dummyxml),
+ mock.patch.object(conn, '_create_domain_and_network',
+ return_value='fake_dom'),
+ mock.patch.object(conn, '_attach_pci_devices'),
+ mock.patch.object(pci_manager, 'get_instance_pci_devs',
+ return_value='fake_pci_devs'),
+ ) as (_get_existing_domain_xml, _create_domain_and_network,
+ _attach_pci_devices, get_instance_pci_devs):
+ conn.resume(self.context, instance, network_info,
+ block_device_info)
+ _get_existing_domain_xml.assert_has_calls([mock.call(instance,
+ network_info, block_device_info)])
+ _create_domain_and_network.assert_has_calls([mock.call(
+ self.context, dummyxml,
+ instance, network_info,
+ block_device_info=block_device_info,
+ vifs_already_plugged=True)])
+ _attach_pci_devices.assert_has_calls([mock.call('fake_dom',
+ 'fake_pci_devs')])
+
+ def test_destroy_undefines(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndReturn(1)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ def test_cleanup_rbd(self, mock_driver):
+ driver = mock_driver.return_value
+ driver.cleanup_volumes = mock.Mock()
+ fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._cleanup_rbd(fake_instance)
+
+ driver.cleanup_volumes.assert_called_once_with(fake_instance)
+
+ def test_destroy_undefines_no_undefine_flags(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_with_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndReturn(True)
+ mock.managedSaveRemove(0)
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_undefines_no_attribute_no_managed_save(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy()
+ mock.undefineFlags(1).AndRaise(AttributeError())
+ mock.hasManagedSaveImage(0).AndRaise(AttributeError())
+ mock.undefine()
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_info(instance_name):
+ return {'state': power_state.SHUTDOWN, 'id': -1}
+
+ def fake_delete_instance_files(instance):
+ return None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, 'get_info', fake_get_info)
+ self.stubs.Set(conn, '_delete_instance_files',
+ fake_delete_instance_files)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ conn.destroy(self.context, instance, [])
+
+ def test_destroy_timed_out(self):
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ def fake_get_error_code(self):
+ return libvirt.VIR_ERR_OPERATION_TIMEOUT
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(libvirt.libvirtError, 'get_error_code',
+ fake_get_error_code)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.assertRaises(exception.InstancePowerOffFailure,
+ conn.destroy, self.context, instance, [])
+
+ def test_private_destroy_not_found(self):
+ ex = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ "No such domain",
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+ mock = self.mox.CreateMock(libvirt.virDomain)
+ mock.ID()
+ mock.destroy().AndRaise(ex)
+ mock.info().AndRaise(ex)
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ # NOTE(vish): verifies destroy doesn't raise if the instance disappears
+ conn._destroy(instance)
+
+ def test_undefine_domain_with_not_found_instance(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError("not found")
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
+ self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
+ libvirt.libvirtError.get_error_code().AndReturn(
+ libvirt.VIR_ERR_NO_DOMAIN)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {'name': 'test'}
+
+ # NOTE(wenjianhn): verifies undefine doesn't raise if the
+ # instance disappears
+ conn._undefine_domain(instance)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '0'}]}
+
+ def get_info(instance_name, xml, **kwargs):
+ return jsonutils.dumps(fake_disks.get(instance_name))
+
+ with mock.patch.object(drvr,
+ "_get_instance_disk_info") as mock_info:
+ mock_info.side_effect = get_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(result, 10653532160)
+ mock_list.assert_called_with()
+ mock_info.assert_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_disk_over_committed_size_total_eperm(self, mock_list):
+ # Ensure destroy calls managedSaveRemove for saved instance.
+ class DiagFakeDomain(object):
+ def __init__(self, name):
+ self._name = name
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return self._name
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ def XMLDesc(self, flags):
+ return "<domain/>"
+
+ mock_list.return_value = [
+ DiagFakeDomain("instance0000001"),
+ DiagFakeDomain("instance0000002")]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ fake_disks = {'instance0000001':
+ [{'type': 'qcow2', 'path': '/somepath/disk1',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/somepath/disk1',
+ 'disk_size': '83886080',
+ 'over_committed_disk_size': '10653532160'}],
+ 'instance0000002':
+ [{'type': 'raw', 'path': '/somepath/disk2',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk2',
+ 'disk_size': '10737418240',
+ 'over_committed_disk_size': '21474836480'}]}
+
+ def side_effect(name, dom):
+ if name == 'instance0000001':
+ raise OSError(errno.EACCES, 'Permission denied')
+ if name == 'instance0000002':
+ return jsonutils.dumps(fake_disks.get(name))
+ get_disk_info = mock.Mock()
+ get_disk_info.side_effect = side_effect
+ drvr._get_instance_disk_info = get_disk_info
+
+ result = drvr._get_disk_over_committed_size_total()
+ self.assertEqual(21474836480, result)
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_list_instance_domains",
+ return_value=[mock.MagicMock(name='foo')])
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
+ side_effect=exception.VolumeBDMPathNotFound(path='bar'))
+ def test_disk_over_committed_size_total_bdm_not_found(self,
+ mock_get_disk_info,
+ mock_list_domains):
+ # Tests that we handle VolumeBDMPathNotFound gracefully.
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_disk_over_committed_size_total())
+
+ def test_cpu_info(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ cpu = vconfig.LibvirtConfigCPU()
+ cpu.model = "Opteron_G4"
+ cpu.vendor = "AMD"
+ cpu.arch = arch.X86_64
+
+ cpu.cores = 2
+ cpu.threads = 1
+ cpu.sockets = 4
+
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
+ cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = cpu
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.X86_64
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = vm_mode.HVM
+ guest.arch = arch.I686
+ guest.domtype = ["kvm"]
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = {"vendor": "AMD",
+ "features": ["extapic", "3dnow"],
+ "model": "Opteron_G4",
+ "arch": arch.X86_64,
+ "topology": {"cores": 2, "threads": 1, "sockets": 4}}
+ got = jsonutils.loads(conn._get_cpu_info())
+ self.assertEqual(want, got)
+
+ def test_get_pcidev_info(self):
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actualvf = conn._get_pcidev_info("pci_0000_04_00_3")
+ expect_vf = {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:00.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "label": 'label_8086_1521',
+ "dev_type": 'type-PF',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+ actualvf = conn._get_pcidev_info("pci_0000_04_10_7")
+ expect_vf = {
+ "dev_id": "pci_0000_04_10_7",
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "label": 'label_8086_1520',
+ "dev_type": 'type-VF',
+ "phys_function": '0000:04:00.3',
+ }
+
+ self.assertEqual(actualvf, expect_vf)
+
+ def test_pci_device_assignable(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: True)
+
+ fake_dev = {'dev_type': 'type-PF'}
+ self.assertFalse(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-VF'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+ fake_dev = {'dev_type': 'type-PCI'}
+ self.assertTrue(conn._pci_device_assignable(fake_dev))
+
+ def test_list_devices_not_supported(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Handle just the NO_SUPPORT error
+ not_supported_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'this function is not supported by the connection driver:'
+ ' virNodeNumOfDevices',
+ error_code=libvirt.VIR_ERR_NO_SUPPORT)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=not_supported_exc):
+ self.assertEqual('[]', conn._get_pci_passthrough_devices())
+
+ # We cache not supported status to avoid emitting too many logging
+ # messages. Clear this value to test the other exception case.
+ del conn._list_devices_supported
+
+ # Other errors should not be caught
+ other_exc = fakelibvirt.make_libvirtError(
+ libvirt.libvirtError,
+ 'other exc',
+ error_code=libvirt.VIR_ERR_NO_DOMAIN)
+
+ with mock.patch.object(conn._conn, 'listDevices',
+ side_effect=other_exc):
+ self.assertRaises(libvirt.libvirtError,
+ conn._get_pci_passthrough_devices)
+
+ def test_get_pci_passthrough_devices(self):
+
+ def fakelistDevices(caps, fakeargs=0):
+ return ['pci_0000_04_00_3', 'pci_0000_04_10_7']
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
+
+ def fake_nodeDeviceLookupByName(name):
+ return FakeNodeDevice(_fake_NodeDevXml[name])
+
+ libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
+ fake_nodeDeviceLookupByName
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: x)
+ actjson = conn._get_pci_passthrough_devices()
+
+ expectvfs = [
+ {
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None},
+ {
+ "dev_id": "pci_0000_04_10_7",
+ "domain": 0,
+ "address": "0000:04:10.7",
+ "product_id": '1520',
+ "vendor_id": '8086',
+ "dev_type": 'type-VF',
+ "phys_function": [('0x0000', '0x04', '0x00', '0x3')],
+ }
+ ]
+
+ actctualvfs = jsonutils.loads(actjson)
+ for key in actctualvfs[0].keys():
+ if key not in ['phys_function', 'virt_functions', 'label']:
+ self.assertEqual(actctualvfs[0][key], expectvfs[1][key])
+
+ def _fake_caps_numa_topology(self):
+ topology = vconfig.LibvirtConfigCapsNUMATopology()
+
+ cell_0 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_0.id = 0
+ cell_0.memory = 1024 * units.Ki
+ cpu_0_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_0.id = 0
+ cpu_0_0.socket_id = 0
+ cpu_0_0.core_id = 0
+ cpu_0_0.sibling = 0
+ cpu_0_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_0_1.id = 1
+ cpu_0_1.socket_id = 0
+ cpu_0_1.core_id = 1
+ cpu_0_1.sibling = 1
+ cell_0.cpus = [cpu_0_0, cpu_0_1]
+
+ cell_1 = vconfig.LibvirtConfigCapsNUMACell()
+ cell_1.id = 1
+ cell_1.memory = 1024 * units.Ki
+ cpu_1_0 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_0.id = 2
+ cpu_1_0.socket_id = 1
+ cpu_1_0.core_id = 0
+ cpu_1_0.sibling = 2
+ cpu_1_1 = vconfig.LibvirtConfigCapsNUMACPU()
+ cpu_1_1.id = 3
+ cpu_1_1.socket_id = 1
+ cpu_1_1.core_id = 1
+ cpu_1_1.sibling = 3
+ cell_1.cpus = [cpu_1_0, cpu_1_1]
+
+ topology.cells = [cell_0, cell_1]
+ return topology
+
+ def test_get_host_numa_topology(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = self._fake_caps_numa_topology()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ expected_topo_dict = {'cells': [
+ {'cpus': '0,1', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 0},
+ {'cpus': '3', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1}]}
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(
+ conn, '_get_host_capabilities', return_value=caps),
+ mock.patch.object(
+ hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3]))
+ ):
+ got_topo = conn._get_host_numa_topology()
+ got_topo_dict = got_topo._to_dict()
+ self.assertThat(
+ expected_topo_dict, matchers.DictMatches(got_topo_dict))
+
+ def test_get_host_numa_topology_empty(self):
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.topology = None
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_has_min_version', return_value=True),
+ mock.patch.object(conn, '_get_host_capabilities',
+ return_value=caps)
+ ) as (has_min_version, get_caps):
+ self.assertIsNone(conn._get_host_numa_topology())
+ get_caps.assert_called_once_with()
+
+ def test_get_host_numa_topology_not_supported(self):
+ # Tests that libvirt isn't new enough to support numa topology.
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with mock.patch.object(conn, '_has_min_version', return_value=False):
+ self.assertIsNone(conn._get_host_numa_topology())
+
+ def test_diagnostic_vcpus_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ raise libvirt.libvirtError('vcpus missing')
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_blockstats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ raise libvirt.libvirtError('blockStats missing')
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_interfacestats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ raise libvirt.libvirtError('interfaceStat missing')
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_memorystats_exception(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ raise libvirt.libvirtError('memoryStats missing')
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ def test_diagnostic_full(self):
+ xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio'/>
+ </disk>
+ <interface type='network'>
+ <mac address='52:54:00:a4:38:38'/>
+ <source network='default'/>
+ <target dev='vnet0'/>
+ </interface>
+ </devices>
+ </domain>
+ """
+
+ class DiagFakeDomain(FakeVirtDomain):
+
+ def __init__(self):
+ super(DiagFakeDomain, self).__init__(fake_xml=xml)
+
+ def vcpus(self):
+ return ([(0, 1, 15340000000L, 0),
+ (1, 1, 1640000000L, 0),
+ (2, 1, 3040000000L, 0),
+ (3, 1, 1420000000L, 0)],
+ [(True, False),
+ (True, False),
+ (True, False),
+ (True, False)])
+
+ def blockStats(self, path):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ def interfaceStats(self, path):
+ return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
+
+ def memoryStats(self):
+ return {'actual': 220160L, 'rss': 200164L}
+
+ def maxMemory(self):
+ return 280160L
+
+ def fake_lookup_name(name):
+ return DiagFakeDomain()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ actual = conn.get_diagnostics({"name": "testvirt"})
+ expect = {'cpu0_time': 15340000000L,
+ 'cpu1_time': 1640000000L,
+ 'cpu2_time': 3040000000L,
+ 'cpu3_time': 1420000000L,
+ 'vda_read': 688640L,
+ 'vda_read_req': 169L,
+ 'vda_write': 0L,
+ 'vda_write_req': 0L,
+ 'vda_errors': -1L,
+ 'vdb_read': 688640L,
+ 'vdb_read_req': 169L,
+ 'vdb_write': 0L,
+ 'vdb_write_req': 0L,
+ 'vdb_errors': -1L,
+ 'memory': 280160L,
+ 'memory-actual': 220160L,
+ 'memory-rss': 200164L,
+ 'vnet0_rx': 4408L,
+ 'vnet0_rx_drop': 0L,
+ 'vnet0_rx_errors': 0L,
+ 'vnet0_rx_packets': 82L,
+ 'vnet0_tx': 0L,
+ 'vnet0_tx_drop': 0L,
+ 'vnet0_tx_errors': 0L,
+ 'vnet0_tx_packets': 0L,
+ }
+ self.assertEqual(actual, expect)
+
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count(self, mock_list):
+ """Domain can fail to return the vcpu description in case it's
+ just starting up or shutting down. Make sure None is handled
+ gracefully.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self, vcpus):
+ self._vcpus = vcpus
+
+ def vcpus(self):
+ if self._vcpus is None:
+ raise libvirt.libvirtError("fake-error")
+ else:
+ return ([1] * self._vcpus, [True] * self._vcpus)
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return "19479fee-07a5-49bb-9138-d3738280d63c"
+
+ mock_list.return_value = [
+ DiagFakeDomain(None), DiagFakeDomain(5)]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(5, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains")
+ def test_failing_vcpu_count_none(self, mock_list):
+ """Domain will return zero if the current number of vcpus used
+ is None. This is in case of VM state starting up or shutting
+ down. None type returned is counted as zero.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self):
+ pass
+
+ def vcpus(self):
+ return None
+
+ def ID(self):
+ return 1
+
+ def name(self):
+ return "instance000001"
+
+ mock_list.return_value = [DiagFakeDomain()]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(0, drvr._get_vcpu_used())
+ mock_list.assert_called_with()
+
+ def test_get_memory_used_normal(self):
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_conn, mock_platform):
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ self.assertEqual(6866, drvr._get_memory_mb_used())
+
+ def test_get_memory_used_xen(self):
+ self.flags(virt_type='xen', group='libvirt')
+
+ class DiagFakeDomain(object):
+ def __init__(self, id, memmb):
+ self.id = id
+ self.memmb = memmb
+
+ def info(self):
+ return [0, 0, self.memmb * 1024]
+
+ def ID(self):
+ return self.id
+
+ def name(self):
+ return "instance000001"
+
+ def UUIDString(self):
+ return str(uuid.uuid4())
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ m = mock.mock_open(read_data="""
+MemTotal: 16194180 kB
+MemFree: 233092 kB
+MemAvailable: 8892356 kB
+Buffers: 567708 kB
+Cached: 8362404 kB
+SwapCached: 0 kB
+Active: 8381604 kB
+""")
+
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_list_instance_domains"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_list, mock_conn, mock_platform):
+ mock_list.return_value = [
+ DiagFakeDomain(0, 15814),
+ DiagFakeDomain(1, 750),
+ DiagFakeDomain(2, 1042)]
+ mock_conn.getInfo.return_value = [
+ arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
+
+ self.assertEqual(8657, drvr._get_memory_mb_used())
+ mock_list.assert_called_with(only_guests=False)
+
+ def test_get_instance_capabilities(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ def get_host_capabilities_stub(self):
+ caps = vconfig.LibvirtConfigCaps()
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.X86_64
+ guest.domtype = ['kvm', 'qemu']
+ caps.guests.append(guest)
+
+ guest = vconfig.LibvirtConfigGuest()
+ guest.ostype = 'hvm'
+ guest.arch = arch.I686
+ guest.domtype = ['kvm']
+ caps.guests.append(guest)
+
+ return caps
+
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_get_host_capabilities',
+ get_host_capabilities_stub)
+
+ want = [(arch.X86_64, 'kvm', 'hvm'),
+ (arch.X86_64, 'qemu', 'hvm'),
+ (arch.I686, 'kvm', 'hvm')]
+ got = conn._get_instance_capabilities()
+ self.assertEqual(want, got)
+
+ def test_event_dispatch(self):
+ # Validate that the libvirt self-pipe for forwarding
+ # events between threads is working sanely
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+
+ conn._init_events_pipe()
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+ conn._queue_event(event1)
+ conn._queue_event(event2)
+ conn._dispatch_events()
+
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ conn._queue_event(event3)
+ conn._queue_event(event4)
+ conn._dispatch_events()
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_lifecycle(self):
+ # Validate that libvirt events are correctly translated
+ # to Nova events
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ conn.register_event_listener(handler)
+ conn._init_events_pipe()
+ fake_dom_xml = """
+ <domain type='kvm'>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <devices>
+ <disk type='file'>
+ <source file='filename'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ dom = FakeVirtDomain(fake_dom_xml,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+
+ conn._event_lifecycle_callback(conn._conn,
+ dom,
+ libvirt.VIR_DOMAIN_EVENT_STOPPED,
+ 0,
+ conn)
+ conn._dispatch_events()
+ self.assertEqual(len(got_events), 1)
+ self.assertIsInstance(got_events[0], virtevent.LifecycleEvent)
+ self.assertEqual(got_events[0].uuid,
+ "cef19ce0-0ca2-11df-855d-b19fbce37686")
+ self.assertEqual(got_events[0].transition,
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, 'emit_event')
+ def test_event_emit_delayed_call_now(self, emit_event_mock):
+ self.flags(virt_type="kvm", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._event_emit_delayed(None)
+ emit_event_mock.assert_called_once_with(None)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed(self, spawn_after_mock):
+ CONF.set_override("virt_type", "xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ event = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ spawn_after_mock.assert_called_once_with(15, conn.emit_event, event)
+
+ @mock.patch.object(greenthread, 'spawn_after')
+ def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
+ self.flags(virt_type="xen", group="libvirt")
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ conn._events_delayed[uuid] = None
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STOPPED)
+ conn._event_emit_delayed(event)
+ self.assertFalse(spawn_after_mock.called)
+
+ def test_event_delayed_cleanup(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
+ event = virtevent.LifecycleEvent(
+ uuid, virtevent.EVENT_LIFECYCLE_STARTED)
+ gt_mock = mock.Mock()
+ conn._events_delayed[uuid] = gt_mock
+ conn._event_delayed_cleanup(event)
+ gt_mock.cancel.assert_called_once_with()
+ self.assertNotIn(uuid, conn._events_delayed.keys())
+
+ def test_set_cache_mode(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'directsync')
+
+ def test_set_cache_mode_invalid_mode(self):
+ self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuestDisk()
+
+ fake_conf.source_type = 'file'
+ conn._set_cache_mode(fake_conf)
+ self.assertIsNone(fake_conf.driver_cache)
+
+ def test_set_cache_mode_invalid_object(self):
+ self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ fake_conf = FakeConfigGuest()
+
+ fake_conf.driver_cache = 'fake'
+ conn._set_cache_mode(fake_conf)
+ self.assertEqual(fake_conf.driver_cache, 'fake')
+
+ def _test_shared_storage_detection(self, is_same):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('bar')
+ utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
+ os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
+ if is_same:
+ os.unlink(mox.IgnoreArg())
+ else:
+ utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
+ self.mox.ReplayAll()
+ return conn._is_storage_shared_with('foo', '/path')
+
+ def test_shared_storage_detection_same_host(self):
+ self.assertTrue(self._test_shared_storage_detection(True))
+
+ def test_shared_storage_detection_different_host(self):
+ self.assertFalse(self._test_shared_storage_detection(False))
+
+ def test_shared_storage_detection_easy(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
+ self.mox.StubOutWithMock(utils, 'execute')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(os, 'unlink')
+ conn.get_host_ip_addr().AndReturn('foo')
+ self.mox.ReplayAll()
+ self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
+ def test_get_domain_info_with_more_return(self, lookup_mock):
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ dom_mock = mock.MagicMock()
+ dom_mock.info.return_value = [
+ 1, 2048, 737, 8, 12345, 888888
+ ]
+ dom_mock.ID.return_value = mock.sentinel.instance_id
+ lookup_mock.return_value = dom_mock
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ info = conn.get_info(instance)
+ expect = {'state': 1,
+ 'max_mem': 2048,
+ 'mem': 737,
+ 'num_cpu': 8,
+ 'cpu_time': 12345,
+ 'id': mock.sentinel.instance_id}
+ self.assertEqual(expect, info)
+ dom_mock.info.assert_called_once_with()
+ dom_mock.ID.assert_called_once_with()
+ lookup_mock.assert_called_once_with(instance['name'])
+
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ @mock.patch.object(encodeutils, 'safe_decode')
+ def test_create_domain(self, mock_safe_decode, mock_get_inst_path):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ mock_get_inst_path.assertHasCalls([mock.call(mock_instance)])
+ mock_domain.createWithFlags.assertHasCalls([mock.call(0)])
+ self.assertEqual(2, mock_safe_decode.call_count)
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
+ mock_setup_container, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
+ mock_ensure_tree, mock_setup_container,
+ mock_chown, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
+ gid_maps=["0:1000:100"], group='libvirt')
+
+ def chown_side_effect(path, id_maps):
+ self.assertEqual('/tmp/rootfs', path)
+ self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
+ self.assertEqual(0, id_maps[0].start)
+ self.assertEqual(1000, id_maps[0].target)
+ self.assertEqual(100, id_maps[0].count)
+ self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
+ self.assertEqual(0, id_maps[1].start)
+ self.assertEqual(1000, id_maps[1].target)
+ self.assertEqual(100, id_maps[1].count)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_chown.side_effect = chown_side_effect
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_not_running(self, mock_get_inst_path,
+ mock_ensure_tree,
+ mock_setup_container,
+ mock_get_info, mock_teardown):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.SHUTDOWN}
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
+ conn._create_domain_and_network(self.context, 'xml',
+ mock_instance, [])
+
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_not_called()
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ teardown_call = mock.call(container_dir='/tmp/rootfs')
+ mock_teardown.assert_has_calls([teardown_call])
+
+ def test_create_domain_define_xml_fails(self):
+ """Tests that the xml is logged when defining the domain fails."""
+ fake_xml = "<test>this is a test</test>"
+
+ def fake_defineXML(xml):
+ self.assertEqual(fake_xml, xml)
+ raise libvirt.libvirtError('virDomainDefineXML() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock(defineXML=fake_defineXML)
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_with_flags_fails(self):
+ """Tests that the xml is logged when creating the domain with flags
+ fails
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_createWithFlags(launch_flags):
+ raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ self.assertRaises(libvirt.libvirtError, conn._create_domain,
+ domain=fake_domain)
+ self.assertTrue(self.log_error_called)
+
+ def test_create_domain_enable_hairpin_fails(self):
+ """Tests that the xml is logged when enabling hairpin mode for the
+ domain fails.
+ """
+ fake_xml = "<test>this is a test</test>"
+ fake_domain = FakeVirtDomain(fake_xml)
+
+ def fake_enable_hairpin(launch_flags):
+ raise processutils.ProcessExecutionError('error')
+
+ self.log_error_called = False
+
+ def fake_error(msg, *args):
+ self.log_error_called = True
+ self.assertIn(fake_xml, msg % args)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
+
+ self.create_fake_libvirt_mock()
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ conn._create_domain,
+ domain=fake_domain,
+ power_on=False)
+ self.assertTrue(self.log_error_called)
+
+ def test_get_vnc_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='vnc' port='5900'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ vnc_dict = conn.get_vnc_console(self.context, instance)
+ self.assertEqual(vnc_dict.port, '5900')
+
+ def test_get_vnc_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_spice_console(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<graphics type='spice' port='5950'/>"
+ "</devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ spice_dict = conn.get_spice_console(self.context, instance)
+ self.assertEqual(spice_dict.port, '5950')
+
+ def test_get_spice_console_unavailable(self):
+ instance = objects.Instance(**self.test_instance)
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices></devices></domain>")
+
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "XMLDesc")
+ vdmock.XMLDesc(0).AndReturn(dummyxml)
+
+ def fake_lookup(instance_name):
+ if instance_name == instance['name']:
+ return vdmock
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ conn.get_spice_console, self.context, instance)
+
+ def test_detach_volume_with_instance_not_found(self):
+ # Test that detach_volume() method does not raise exception,
+ # if the instance does not exist.
+
+ instance = objects.Instance(**self.test_instance)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lookup_by_name',
+ side_effect=exception.InstanceNotFound(
+ instance_id=instance.name)),
+ mock.patch.object(conn, '_disconnect_volume')
+ ) as (_lookup_by_name, _disconnect_volume):
+ connection_info = {'driver_volume_type': 'fake'}
+ conn.detach_volume(connection_info, instance, '/dev/sda')
+ _lookup_by_name.assert_called_once_with(instance.name)
+ _disconnect_volume.assert_called_once_with(connection_info,
+ 'sda')
+
+ @mock.patch.object(objects.Flavor, 'get_by_id')
+ def _test_attach_detach_interface_get_config(self, method_name,
+ mock_flavor):
+ """Tests that the get_config() method is properly called in
+ attach_interface() and detach_interface().
+
+ method_name: either \"attach_interface\" or \"detach_interface\"
+ depending on the method to test.
+ """
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
+
+ instance = objects.Instance(**self.test_instance)
+ mock_flavor.return_value = instance.get_flavor()
+ network_info = _fake_network_info(self.stubs, 1)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ if method_name == "attach_interface":
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method_name == "detach_interface":
+ fake_image_meta = None
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ if method_name == "attach_interface":
+ self.mox.StubOutWithMock(conn.firewall_driver,
+ 'setup_basic_filtering')
+ conn.firewall_driver.setup_basic_filtering(instance, network_info)
+
+ expected = conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ instance.get_flavor(),
+ CONF.libvirt.virt_type)
+ self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
+ conn.vif_driver.get_config(instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).\
+ AndReturn(expected)
+
+ self.mox.ReplayAll()
+
+ if method_name == "attach_interface":
+ conn.attach_interface(instance, fake_image_meta,
+ network_info[0])
+ elif method_name == "detach_interface":
+ conn.detach_interface(instance, network_info[0])
+ else:
+ raise ValueError("Unhandled method %" % method_name)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_attach_interface_get_config(self, mock_lock):
+ """Tests that the get_config() method is properly called in
+ attach_interface().
+ """
+ mock_lock.return_value = threading.Semaphore()
+
+ self._test_attach_detach_interface_get_config("attach_interface")
+
+ def test_detach_interface_get_config(self):
+ """Tests that the get_config() method is properly called in
+ detach_interface().
+ """
+ self._test_attach_detach_interface_get_config("detach_interface")
+
+ def test_default_root_device_name(self):
+ instance = {'uuid': 'fake_instance'}
+ image_meta = {'id': 'fake'}
+ root_bdm = {'source_type': 'image',
+ 'detination_type': 'volume',
+ 'image_id': 'fake_id'}
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
+ self.mox.StubOutWithMock(blockinfo, 'get_root_info')
+
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'disk').InAnyOrder().\
+ AndReturn('virtio')
+ blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
+ image_meta,
+ 'cdrom').InAnyOrder().\
+ AndReturn('ide')
+ blockinfo.get_root_info('fake_libvirt_type',
+ image_meta, root_bdm,
+ 'virtio', 'ide').AndReturn({'dev': 'vda'})
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(conn.default_root_device_name(instance, image_meta,
+ root_bdm), '/dev/vda')
+
+ def test_default_device_names_for_instance(self):
+ instance = {'uuid': 'fake_instance'}
+ root_device_name = '/dev/vda'
+ ephemerals = [{'device_name': 'vdb'}]
+ swap = [{'device_name': 'vdc'}]
+ block_device_mapping = [{'device_name': 'vdc'}]
+ self.flags(virt_type='fake_libvirt_type', group='libvirt')
+
+ self.mox.StubOutWithMock(blockinfo, 'default_device_names')
+
+ blockinfo.default_device_names('fake_libvirt_type', mox.IgnoreArg(),
+ instance, root_device_name,
+ ephemerals, swap, block_device_mapping)
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn.default_device_names_for_instance(instance, root_device_name,
+ ephemerals, swap,
+ block_device_mapping)
+
+ def test_is_supported_fs_format(self):
+ supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
+ disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertTrue(conn.is_supported_fs_format(fs))
+
+ supported_fs = ['', 'dummy']
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertFalse(conn.is_supported_fs_format(fs))
+
+ def test_hypervisor_hostname_caching(self):
+ # Make sure that the first hostname is always returned
+ class FakeConn(object):
+ def getHostname(self):
+ pass
+
+ def getLibVersion(self):
+ return 99999
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._wrapped_conn = FakeConn()
+ self.mox.StubOutWithMock(conn._wrapped_conn, 'getHostname')
+ conn._conn.getHostname().AndReturn('foo')
+ conn._conn.getHostname().AndReturn('bar')
+ self.mox.ReplayAll()
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+ self.assertEqual('foo', conn._get_hypervisor_hostname())
+
+ def test_get_connection_serial(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call serially
+ get_conn_currency(driver)
+ get_conn_currency(driver)
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_get_connection_concurrency(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call concurrently
+ thr1 = eventlet.spawn(get_conn_currency, driver=driver)
+ thr2 = eventlet.spawn(get_conn_currency, driver=driver)
+
+ # let threads run
+ eventlet.sleep(0)
+
+ thr1.wait()
+ thr2.wait()
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_post_live_migration_at_destination_with_block_device_info(self):
+ # Preparing mocks
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ self.resultXML = None
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9011
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ if image_meta is None:
+ image_meta = {}
+ conf = conn._get_guest_config(instance, network_info, image_meta,
+ disk_info, rescue, block_device_info)
+ self.resultXML = conf.to_xml()
+ return self.resultXML
+
+ def fake_lookup_name(instance_name):
+ return mock_domain
+
+ def fake_defineXML(xml):
+ return
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>
+ """
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001)
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance = objects.Instance(**instance_ref)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
+ libvirt_driver.LibvirtDriver._conn.getCapabilities = \
+ fake_getCapabilities
+ libvirt_driver.LibvirtDriver._conn.getVersion = lambda: 1005001
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.defineXML = fake_defineXML
+ libvirt_driver.LibvirtDriver._conn.baselineCPU = fake_baselineCPU
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn,
+ '_get_guest_xml',
+ fake_to_xml)
+ self.stubs.Set(conn,
+ '_lookup_by_name',
+ fake_lookup_name)
+ block_device_info = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'guest_format': None,
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'delete_on_termination': False}),
+ ])}
+ block_device_info['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'iscsi'})
+ with contextlib.nested(
+ mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'),
+ mock.patch.object(objects.Flavor, 'get_by_id',
+ return_value=flavor),
+ mock.patch.object(objects.Instance, 'save')):
+ conn.post_live_migration_at_destination(
+ self.context, instance, network_info, True,
+ block_device_info=block_device_info)
+ self.assertTrue('fake' in self.resultXML)
+ self.assertTrue(
+ block_device_info['block_device_mapping'][0].save.called)
+
+ def test_create_propagates_exceptions(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid',
+ image_ref='my_fake_image')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_create_domain_setup_lxc'),
+ mock.patch.object(conn, '_create_domain_cleanup_lxc'),
+ mock.patch.object(conn, '_is_booted_from_volume',
+ return_value=False),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain',
+ side_effect=exception.NovaException),
+ mock.patch.object(conn, 'cleanup')):
+ self.assertRaises(exception.NovaException,
+ conn._create_domain_and_network,
+ self.context,
+ 'xml',
+ instance, None)
+
+ def test_create_without_pause(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ @contextlib.contextmanager
+ def fake_lxc_disk_handler(*args, **kwargs):
+ yield
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_lxc_disk_handler',
+ side_effect=fake_lxc_disk_handler),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn, 'firewall_driver'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn, 'cleanup')) as (
+ _handler, cleanup, firewall_driver, create, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, None)
+ self.assertEqual(0, create.call_args_list[0][1]['launch_flags'])
+ self.assertEqual(0, domain.resume.call_count)
+
+ def _test_create_with_network_events(self, neutron_failure=None,
+ power_on=True):
+ generated_events = []
+
+ def wait_timeout():
+ event = mock.MagicMock()
+ if neutron_failure == 'timeout':
+ raise eventlet.timeout.Timeout()
+ elif neutron_failure == 'error':
+ event.status = 'failed'
+ else:
+ event.status = 'completed'
+ return event
+
+ def fake_prepare(instance, event_name):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.event_name = event_name
+ m.wait.side_effect = wait_timeout
+ generated_events.append(m)
+ return m
+
+ virtapi = manager.ComputeVirtAPI(mock.MagicMock())
+ prepare = virtapi._compute.instance_events.prepare_for_instance_event
+ prepare.side_effect = fake_prepare
+ conn = libvirt_driver.LibvirtDriver(virtapi, False)
+
+ instance = objects.Instance(id=1, uuid='fake-uuid')
+ vifs = [{'id': 'vif1', 'active': False},
+ {'id': 'vif2', 'active': False}]
+
+ @mock.patch.object(conn, 'plug_vifs')
+ @mock.patch.object(conn, 'firewall_driver')
+ @mock.patch.object(conn, '_create_domain')
+ @mock.patch.object(conn, 'cleanup')
+ def test_create(cleanup, create, fw_driver, plug_vifs):
+ domain = conn._create_domain_and_network(self.context, 'xml',
+ instance, vifs,
+ power_on=power_on)
+ plug_vifs.assert_called_with(instance, vifs)
+
+ flag = self._get_launch_flags(conn, vifs, power_on=power_on)
+ self.assertEqual(flag,
+ create.call_args_list[0][1]['launch_flags'])
+ if flag:
+ domain.resume.assert_called_once_with()
+ if neutron_failure and CONF.vif_plugging_is_fatal:
+ cleanup.assert_called_once_with(self.context,
+ instance, network_info=vifs,
+ block_device_info=None)
+
+ test_create()
+
+ if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
+ prepare.assert_has_calls([
+ mock.call(instance, 'network-vif-plugged-vif1'),
+ mock.call(instance, 'network-vif-plugged-vif2')])
+ for event in generated_events:
+ if neutron_failure and generated_events.index(event) != 0:
+ self.assertEqual(0, event.call_count)
+ elif (neutron_failure == 'error' and
+ not CONF.vif_plugging_is_fatal):
+ event.wait.assert_called_once_with()
+ else:
+ self.assertEqual(0, prepare.call_count)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_power_off(self,
+ is_neutron):
+ # Tests that we don't wait for events if we don't start the instance.
+ self._test_create_with_network_events(power_on=False)
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_nowait(self, is_neutron):
+ self.flags(vif_plugging_timeout=0)
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_timeout(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_timeout(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='timeout')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_nonfatal_error(
+ self, is_neutron):
+ self.flags(vif_plugging_is_fatal=False)
+ self._test_create_with_network_events(neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=True)
+ def test_create_with_network_events_neutron_failed_fatal_error(
+ self, is_neutron):
+ self.assertRaises(exception.VirtualInterfaceCreateException,
+ self._test_create_with_network_events,
+ neutron_failure='error')
+
+ @mock.patch('nova.utils.is_neutron', return_value=False)
+ def test_create_with_network_events_non_neutron(self, is_neutron):
+ self._test_create_with_network_events()
+
+ @mock.patch('nova.volume.encryptors.get_encryption_metadata')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_encryption_meta = mock.MagicMock()
+ get_encryption_metadata.return_value = mock_encryption_meta
+
+ fake_xml = """
+ <domain>
+ <name>instance-00000001</name>
+ <memory>1048576</memory>
+ <vcpu>1</vcpu>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='raw' cache='none'/>
+ <source file='/path/fake-volume1'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ </devices>
+ </domain>
+ """
+ fake_volume_id = "fake-volume-id"
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"access_mode": "rw",
+ "volume_id": fake_volume_id}}
+
+ def fake_getitem(*args, **kwargs):
+ fake_bdm = {'connection_info': connection_info,
+ 'mount_device': '/dev/vda'}
+ return fake_bdm.get(args[0])
+
+ mock_volume = mock.MagicMock()
+ mock_volume.__getitem__.side_effect = fake_getitem
+ bdi = {'block_device_mapping': [mock_volume]}
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_get_volume_encryptor'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver,
+ 'prepare_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
+ prepare_instance_filter, create_domain, apply_instance_filter):
+ create_domain.return_value = mock_dom
+
+ domain = conn._create_domain_and_network(self.context, fake_xml,
+ instance, network_info,
+ block_device_info=bdi)
+
+ get_encryption_metadata.assert_called_once_with(self.context,
+ conn._volume_api, fake_volume_id, connection_info)
+ get_volume_encryptor.assert_called_once_with(connection_info,
+ mock_encryption_meta)
+ plug_vifs.assert_called_once_with(instance, network_info)
+ setup_basic_filtering.assert_called_once_with(instance,
+ network_info)
+ prepare_instance_filter.assert_called_once_with(instance,
+ network_info)
+ flags = self._get_launch_flags(conn, network_info)
+ create_domain.assert_called_once_with(fake_xml, instance=instance,
+ launch_flags=flags,
+ power_on=True)
+ self.assertEqual(mock_dom, domain)
+
+ def test_get_guest_storage_config(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["default_swap_device"] = None
+ instance = objects.Instance(**test_instance)
+ flavor = instance.get_flavor()
+ flavor.extra_specs = {}
+ conn_info = {'driver_volume_type': 'fake', 'data': {}}
+ bdi = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'})
+ ])}
+ bdm = bdi['block_device_mapping'][0]
+ bdm['connection_info'] = conn_info
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance, bdi)
+ mock_conf = mock.MagicMock(source_path='fake')
+
+ with contextlib.nested(
+ mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
+ 'save'),
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_config',
+ return_value=mock_conf),
+ mock.patch.object(conn, '_set_cache_mode')
+ ) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
+ devices = conn._get_guest_storage_config(instance, None,
+ disk_info, False, bdi, flavor)
+
+ self.assertEqual(3, len(devices))
+ self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
+ self.assertIsNone(instance.default_swap_device)
+ connect_volume.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ get_volume_config.assert_called_with(bdm['connection_info'],
+ {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
+ self.assertEqual(1, volume_save.call_count)
+ self.assertEqual(3, set_cache_mode.call_count)
+
+ def test_get_neutron_events(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+ events = conn._get_neutron_events(network_info)
+ self.assertEqual([('network-vif-plugged', '1')], events)
+
+ def test_unplug_vifs_ignores_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ conn._unplug_vifs('inst', [1], ignore_errors=True)
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ def test_unplug_vifs_reports_errors(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ with mock.patch.object(conn, 'vif_driver') as vif_driver:
+ vif_driver.unplug.side_effect = exception.AgentError(
+ method='unplug')
+ self.assertRaises(exception.AgentError,
+ conn.unplug_vifs, 'inst', [1])
+ vif_driver.unplug.assert_called_once_with('inst', 1)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = mock.Mock()
+ conn._disconnect_volume = mock.Mock()
+ fake_inst = {'name': 'foo'}
+ fake_bdms = [{'connection_info': 'foo',
+ 'mount_device': None}]
+ with mock.patch('nova.virt.driver'
+ '.block_device_info_get_mapping',
+ return_value=fake_bdms):
+ conn.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
+ self.assertTrue(conn._disconnect_volume.called)
+
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ fake_inst = {'name': 'foo'}
+ with mock.patch.object(conn._conn, 'lookupByName') as lookup:
+ lookup.return_value = fake_inst
+ # NOTE(danms): Make unplug cause us to bail early, since
+ # we only care about how it was called
+ unplug.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ conn.cleanup, 'ctxt', fake_inst, 'netinfo')
+ unplug.assert_called_once_with(fake_inst, 'netinfo', True)
+
+ @mock.patch('nova.virt.driver.block_device_info_get_mapping')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_get_serial_ports_from_instance')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
+ def test_cleanup_serial_console_enabled(
+ self, undefine, get_ports,
+ block_device_info_get_mapping):
+ self.flags(enabled="True", group='serial_console')
+ instance = 'i1'
+ network_info = {}
+ bdm_info = {}
+ firewall_driver = mock.MagicMock()
+
+ get_ports.return_value = iter([('127.0.0.1', 10000)])
+ block_device_info_get_mapping.return_value = ()
+
+ # We want to ensure undefine_domain is called after
+ # lookup_domain.
+ def undefine_domain(instance):
+ get_ports.side_effect = Exception("domain undefined")
+ undefine.side_effect = undefine_domain
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ conn.firewall_driver = firewall_driver
+ conn.cleanup(
+ 'ctx', instance, network_info,
+ block_device_info=bdm_info,
+ destroy_disks=False, destroy_vifs=False)
+
+ get_ports.assert_called_once_with(instance)
+ undefine.assert_called_once_with(instance)
+ firewall_driver.unfilter_instance.assert_called_once_with(
+ instance, network_info=network_info)
+ block_device_info_get_mapping.assert_called_once_with(bdm_info)
+
+ def test_swap_volume(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with mock.patch.object(drvr._conn, 'defineXML',
+ create=True) as mock_define:
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_dom.blockJobInfo.return_value = {}
+
+ drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dstfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
+ mock_dom.blockResize.assert_called_once_with(
+ srcfile, 1 * units.Gi / units.Ki)
+ mock_define.assert_called_once_with(xmldoc)
+
+ def test_live_snapshot(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with contextlib.nested(
+ mock.patch.object(drvr._conn, 'defineXML', create=True),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
+ mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
+ mock.patch.object(fake_libvirt_utils, 'chown'),
+ mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
+ ) as (mock_define, mock_size, mock_backing, mock_create_cow,
+ mock_chown, mock_snapshot):
+
+ xmldoc = "<domain/>"
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+ bckfile = "/other/path"
+ dltfile = dstfile + ".delta"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_size.return_value = 1004009
+ mock_backing.return_value = bckfile
+
+ drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2")
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dltfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
+
+ mock_size.assert_called_once_with(srcfile)
+ mock_backing.assert_called_once_with(srcfile, basename=False)
+ mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_chown.assert_called_once_with(dltfile, os.getuid())
+ mock_snapshot.assert_called_once_with(dltfile, "qcow2",
+ dstfile, "qcow2")
+ mock_define.assert_called_once_with(xmldoc)
+
+ @mock.patch.object(greenthread, "spawn")
+ def test_live_migration_hostname_valid(self, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.live_migration(self.context, self.test_instance,
+ "host1.example.com",
+ lambda x: x,
+ lambda x: x)
+ self.assertEqual(1, mock_spawn.call_count)
+
+ @mock.patch.object(greenthread, "spawn")
+ @mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
+ def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ mock_hostname.return_value = False
+ self.assertRaises(exception.InvalidHostname,
+ drvr.live_migration,
+ self.context, self.test_instance,
+ "foo/?com=/bin/sh",
+ lambda x: x,
+ lambda x: x)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.mkstemp')
+ @mock.patch('os.close', return_value=None)
+ def test_check_instance_shared_storage_local_raw(self,
+ mock_close,
+ mock_mkstemp,
+ mock_exists):
+ instance_uuid = str(uuid.uuid4())
+ self.flags(images_type='raw', group='libvirt')
+ self.flags(instances_path='/tmp')
+ mock_mkstemp.return_value = (-1,
+ '/tmp/{0}/file'.format(instance_uuid))
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ temp_file = driver.check_instance_shared_storage_local(self.context,
+ instance)
+ self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
+ temp_file['filename'])
+
+ def test_check_instance_shared_storage_local_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertIsNone(driver.
+ check_instance_shared_storage_local(self.context,
+ instance))
+
+
+class HostStateTestCase(test.NoDBTestCase):
+
+ cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
+ '"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
+ '"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
+ '"mtrr", "sep", "apic"], '
+ '"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
+ instance_caps = [(arch.X86_64, "kvm", "hvm"),
+ (arch.I686, "kvm", "hvm")]
+ pci_devices = [{
+ "dev_id": "pci_0000_04_00_3",
+ "address": "0000:04:10.3",
+ "product_id": '1521',
+ "vendor_id": '8086',
+ "dev_type": 'type-PF',
+ "phys_function": None}]
+ numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hardware.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+
+ class FakeConnection(libvirt_driver.LibvirtDriver):
+ """Fake connection object."""
+ def __init__(self):
+ super(HostStateTestCase.FakeConnection,
+ self).__init__(fake.FakeVirtAPI(), True)
+
+ def _get_vcpu_total(self):
+ return 1
+
+ def _get_vcpu_used(self):
+ return 0
+
+ def _get_cpu_info(self):
+ return HostStateTestCase.cpu_info
+
+ def _get_disk_over_committed_size_total(self):
+ return 0
+
+ def _get_local_gb_info(self):
+ return {'total': 100, 'used': 20, 'free': 80}
+
+ def _get_memory_mb_total(self):
+ return 497
+
+ def _get_memory_mb_used(self):
+ return 88
+
+ def _get_hypervisor_type(self):
+ return 'QEMU'
+
+ def _get_hypervisor_version(self):
+ return 13091
+
+ def _get_hypervisor_hostname(self):
+ return 'compute1'
+
+ def get_host_uptime(self):
+ return ('10:01:16 up 1:36, 6 users, '
+ 'load average: 0.21, 0.16, 0.19')
+
+ def _get_disk_available_least(self):
+ return 13091
+
+ def _get_instance_capabilities(self):
+ return HostStateTestCase.instance_caps
+
+ def _get_pci_passthrough_devices(self):
+ return jsonutils.dumps(HostStateTestCase.pci_devices)
+
+ def _get_host_numa_topology(self):
+ return HostStateTestCase.numa_topology
+
+ def test_update_status(self):
+ drvr = HostStateTestCase.FakeConnection()
+
+ stats = drvr.get_available_resource("compute1")
+ self.assertEqual(stats["vcpus"], 1)
+ self.assertEqual(stats["memory_mb"], 497)
+ self.assertEqual(stats["local_gb"], 100)
+ self.assertEqual(stats["vcpus_used"], 0)
+ self.assertEqual(stats["memory_mb_used"], 88)
+ self.assertEqual(stats["local_gb_used"], 20)
+ self.assertEqual(stats["hypervisor_type"], 'QEMU')
+ self.assertEqual(stats["hypervisor_version"], 13091)
+ self.assertEqual(stats["hypervisor_hostname"], 'compute1')
+ self.assertEqual(jsonutils.loads(stats["cpu_info"]),
+ {"vendor": "Intel", "model": "pentium",
+ "arch": arch.I686,
+ "features": ["ssse3", "monitor", "pni", "sse2", "sse",
+ "fxsr", "clflush", "pse36", "pat", "cmov",
+ "mca", "pge", "mtrr", "sep", "apic"],
+ "topology": {"cores": "1", "threads": "1", "sockets": "1"}
+ })
+ self.assertEqual(stats["disk_available_least"], 80)
+ self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
+ HostStateTestCase.pci_devices)
+ self.assertThat(hardware.VirtNUMAHostTopology.from_json(
+ stats['numa_topology'])._to_dict(),
+ matchers.DictMatches(
+ HostStateTestCase.numa_topology._to_dict()))
+
+
+class LibvirtDriverTestCase(test.NoDBTestCase):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
+ def setUp(self):
+ super(LibvirtDriverTestCase, self).setUp()
+ self.libvirtconnection = libvirt_driver.LibvirtDriver(
+ fake.FakeVirtAPI(), read_only=True)
+ self.context = context.get_admin_context()
+
+ def _create_instance(self, params=None):
+ """Create a test instance."""
+ if not params:
+ params = {}
+
+ sys_meta = {
+ 'instance_type_memory_mb': 512,
+ 'instance_type_swap': 0,
+ 'instance_type_vcpu_weight': None,
+ 'instance_type_root_gb': 1,
+ 'instance_type_id': 2,
+ 'instance_type_name': u'm1.tiny',
+ 'instance_type_ephemeral_gb': 0,
+ 'instance_type_rxtx_factor': 1.0,
+ 'instance_type_flavorid': u'1',
+ 'instance_type_vcpus': 1
+ }
+
+ inst = {}
+ inst['id'] = 1
+ inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
+ inst['os_type'] = 'linux'
+ inst['image_ref'] = '1'
+ inst['reservation_id'] = 'r-fakeres'
+ inst['user_id'] = 'fake'
+ inst['project_id'] = 'fake'
+ inst['instance_type_id'] = 2
+ inst['ami_launch_index'] = 0
+ inst['host'] = 'host1'
+ inst['root_gb'] = 10
+ inst['ephemeral_gb'] = 20
+ inst['config_drive'] = True
+ inst['kernel_id'] = 2
+ inst['ramdisk_id'] = 3
+ inst['key_data'] = 'ABCDEFG'
+ inst['system_metadata'] = sys_meta
+
+ inst.update(params)
+
+ return objects.Instance(**inst)
+
+ def test_migrate_disk_and_power_off_exception(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.counter = 0
+ self.checked_shared_storage = False
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return '[]'
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ self.counter += 1
+ if self.counter == 1:
+ assert False, "intentional failure"
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_is_storage_shared(dest, inst_base):
+ self.checked_shared_storage = True
+ return False
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
+ fake_is_storage_shared)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ self.assertRaises(AssertionError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.2', flavor, None)
+
+ def test_migrate_disk_and_power_off(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'virt_disk_size': '10737418240',
+ 'backing_file': '/base/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # dest is different host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.2', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ # dest is same host case
+ out = self.libvirtconnection.migrate_disk_and_power_off(
+ None, ins_ref, '10.0.0.1', flavor, None)
+ self.assertEqual(out, disk_info_text)
+
+ @mock.patch('nova.utils.execute')
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.get_instance_disk_info')
+ def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
+ get_host_ip_addr,
+ mock_destroy,
+ mock_copy_image,
+ mock_execute):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .migrate_disk_and_power_off.
+ """
+ self.copy_or_move_swap_called = False
+
+ # 10G root and 512M swap disk
+ disk_info = [{'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 10737418240, 'path': '/test/disk',
+ 'backing_file': '/base/disk'},
+ {'disk_size': 1, 'type': 'qcow2',
+ 'virt_disk_size': 536870912, 'path': '/test/disk.swap',
+ 'backing_file': '/base/swap_512'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ mock_get_disk_info.return_value = disk_info_text
+ get_host_ip_addr.return_value = '10.0.0.1'
+
+ def fake_copy_image(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if '/test/disk.swap' in list(args):
+ self.copy_or_move_swap_called = True
+
+ def fake_execute(*args, **kwargs):
+ # disk.swap should not be touched since it is skipped over
+ if set(['mv', '/test/disk.swap']).issubset(list(args)):
+ self.copy_or_move_swap_called = True
+
+ mock_copy_image.side_effect = fake_copy_image
+ mock_execute.side_effect = fake_execute
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ # Original instance config
+ instance = self._create_instance({'root_gb': 10,
+ 'ephemeral_gb': 0})
+
+ # Re-size fake instance to 20G root and 1024M swap disk
+ flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
+
+ # Destination is same host
+ out = conn.migrate_disk_and_power_off(None, instance, '10.0.0.1',
+ flavor, None)
+
+ mock_get_disk_info.assert_called_once_with(instance.name,
+ block_device_info=None)
+ self.assertTrue(get_host_ip_addr.called)
+ mock_destroy.assert_called_once_with(instance)
+ self.assertFalse(self.copy_or_move_swap_called)
+ self.assertEqual(disk_info_text, out)
+
+ def test_migrate_disk_and_power_off_lvm(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.flags(images_type='lvm', group='libvirt')
+ disk_info = [{'type': 'raw', 'path': '/dev/vg/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/dev/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # Migration is not implemented for LVM backed instances
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_resize_error(self):
+ instance = self._create_instance()
+ flavor = {'root_gb': 5}
+ self.assertRaises(
+ exception.InstanceFaultRollback,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ 'ctx', instance, '10.0.0.1', flavor, None)
+
+ def test_wait_for_running(self):
+ def fake_get_info(instance):
+ if instance['name'] == "not_found":
+ raise exception.InstanceNotFound(instance_id=instance['uuid'])
+ elif instance['name'] == "running":
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ # instance not found case
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'not_found',
+ 'uuid': 'not_found_uuid'})
+
+ # instance is running case
+ self.assertRaises(loopingcall.LoopingCallDone,
+ self.libvirtconnection._wait_for_running,
+ {'name': 'running',
+ 'uuid': 'running_uuid'})
+
+ # else case
+ self.libvirtconnection._wait_for_running({'name': 'else',
+ 'uuid': 'other_uuid'})
+
+ def test_disk_size_from_instance_disk_info(self):
+ inst = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
+
+ info = {'path': '/path/disk'}
+ self.assertEqual(10 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.local'}
+ self.assertEqual(20 * units.Gi,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ info = {'path': '/path/disk.swap'}
+ self.assertEqual(0,
+ self.libvirtconnection._disk_size_from_instance(inst, info))
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_raw_to_qcow2(self, mock_execute):
+ path = '/test/disk'
+ _path_qcow = path + '_qcow'
+
+ self.libvirtconnection._disk_raw_to_qcow2(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'raw',
+ '-O', 'qcow2', path, _path_qcow),
+ mock.call('mv', _path_qcow, path)])
+
+ @mock.patch('nova.utils.execute')
+ def test_disk_qcow2_to_raw(self, mock_execute):
+ path = '/test/disk'
+ _path_raw = path + '_raw'
+
+ self.libvirtconnection._disk_qcow2_to_raw(path)
+ mock_execute.assert_has_calls([
+ mock.call('qemu-img', 'convert', '-f', 'qcow2',
+ '-O', 'raw', path, _path_raw),
+ mock.call('mv', _path_raw, path)])
+
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_raw(self, mock_extend):
+ info = {'type': 'raw', 'path': '/test/disk'}
+
+ self.libvirtconnection._disk_resize(info, 50)
+ mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
+
+ @mock.patch('nova.virt.disk.api.can_resize_image')
+ @mock.patch('nova.virt.disk.api.is_image_partitionless')
+ @mock.patch('nova.virt.disk.api.extend')
+ def test_disk_resize_qcow2(
+ self, mock_extend, mock_can_resize, mock_is_partitionless):
+ info = {'type': 'qcow2', 'path': '/test/disk'}
+
+ with contextlib.nested(
+ mock.patch.object(
+ self.libvirtconnection, '_disk_qcow2_to_raw'),
+ mock.patch.object(
+ self.libvirtconnection, '_disk_raw_to_qcow2'))\
+ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
+
+ mock_can_resize.return_value = True
+ mock_is_partitionless.return_value = True
+
+ self.libvirtconnection._disk_resize(info, 50)
+
+ mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
+ mock_extend.assert_called_once_with(
+ info['path'], 50, use_cow=False)
+ mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_migration.
+ """
+
+ disk_info = [{'type': 'qcow2', 'path': '/test/disk',
+ 'local_gb': 10, 'backing_file': '/base/disk'},
+ {'type': 'raw', 'path': '/test/disk.local',
+ 'local_gb': 10, 'backing_file': '/base/disk.local'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+ powered_on = power_on
+ self.fake_create_domain_called = False
+ self.fake_disk_resize_called = False
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ return ""
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_image(context, inst,
+ disk_mapping, suffix='',
+ disk_images=None, network_info=None,
+ block_device_info=None, inject_files=True):
+ self.assertFalse(inject_files)
+
+ def fake_create_domain_and_network(
+ context, xml, instance, network_info,
+ block_device_info=None, power_on=True, reboot=False,
+ vifs_already_plugged=False):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ self.assertTrue(vifs_already_plugged)
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_disk_resize(info, size):
+ self.fake_disk_resize_called = True
+
+ self.flags(use_cow_images=True)
+ self.stubs.Set(self.libvirtconnection, '_disk_resize',
+ fake_disk_resize)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(self.libvirtconnection, '_create_image',
+ fake_create_image)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ fake_create_domain_and_network)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ ins_ref = self._create_instance()
+
+ self.libvirtconnection.finish_migration(
+ context.get_admin_context(), None, ins_ref,
+ disk_info_text, [], None,
+ resize_instance, None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+ self.assertEqual(
+ resize_instance, self.fake_disk_resize_called)
+
+ def test_finish_migration_resize(self):
+ self._test_finish_migration(True, resize_instance=True)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(False)
+
+ def _test_finish_revert_migration(self, power_on):
+ """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
+ .finish_revert_migration.
+ """
+ powered_on = power_on
+ self.fake_create_domain_called = False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ def fake_plug_vifs(instance, network_info):
+ pass
+
+ def fake_create_domain(xml, instance=None, launch_flags=0,
+ power_on=True):
+ self.fake_create_domain_called = True
+ self.assertEqual(powered_on, power_on)
+ return mock.MagicMock()
+
+ def fake_enable_hairpin(instance):
+ pass
+
+ def fake_get_info(instance):
+ if powered_on:
+ return {'state': power_state.RUNNING}
+ else:
+ return {'state': power_state.SHUTDOWN}
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None):
+ return ""
+
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
+ self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
+ self.stubs.Set(utils, 'execute', fake_execute)
+ fw = base_firewall.NoopFirewallDriver()
+ self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
+ self.stubs.Set(self.libvirtconnection, '_create_domain',
+ fake_create_domain)
+ self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
+ fake_enable_hairpin)
+ self.stubs.Set(self.libvirtconnection, 'get_info',
+ fake_get_info)
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ ins_ref = self._create_instance()
+ os.mkdir(os.path.join(tmpdir, ins_ref['name']))
+ libvirt_xml_path = os.path.join(tmpdir,
+ ins_ref['name'],
+ 'libvirt.xml')
+ f = open(libvirt_xml_path, 'w')
+ f.close()
+
+ self.libvirtconnection.finish_revert_migration(
+ context.get_admin_context(), ins_ref,
+ [], None, power_on)
+ self.assertTrue(self.fake_create_domain_called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(False)
+
+ def _test_finish_revert_migration_after_crash(self, backup_made=True,
+ del_inst_failed=False):
+ class FakeLoopingCall:
+ def start(self, *a, **k):
+ return self
+
+ def wait(self):
+ return None
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
+ self.stubs.Set(self.libvirtconnection, '_get_guest_xml',
+ lambda *a, **k: None)
+ self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
+ lambda *a: None)
+ self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
+ lambda *a, **k: FakeLoopingCall())
+
+ libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
+ os.path.exists('/fake/foo_resize').AndReturn(backup_made)
+ if backup_made:
+ if del_inst_failed:
+ os_error = OSError(errno.ENOENT, 'No such file or directory')
+ shutil.rmtree('/fake/foo').AndRaise(os_error)
+ else:
+ shutil.rmtree('/fake/foo')
+ utils.execute('mv', '/fake/foo_resize', '/fake/foo')
+
+ self.mox.ReplayAll()
+
+ self.libvirtconnection.finish_revert_migration(context, {}, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(backup_made=False)
+
+ def test_finish_revert_migration_after_crash_delete_failed(self):
+ self._test_finish_revert_migration_after_crash(backup_made=True,
+ del_inst_failed=True)
+
+ def test_cleanup_failed_migration(self):
+ self.mox.StubOutWithMock(shutil, 'rmtree')
+ shutil.rmtree('/fake/inst')
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_failed_migration('/fake/inst')
+
+ def test_confirm_migration(self):
+ ins_ref = self._create_instance()
+
+ self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ self.mox.ReplayAll()
+ self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ ins_ref = self._create_instance({'host': CONF.host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_cleanup_resize_not_same_host(self):
+ CONF.set_override('policy_dirs', [])
+ host = 'not' + CONF.host
+ ins_ref = self._create_instance({'host': host})
+
+ def fake_os_path_exists(path):
+ return True
+
+ def fake_undefine_domain(instance):
+ pass
+
+ def fake_unplug_vifs(instance, network_info, ignore_errors=False):
+ pass
+
+ def fake_unfilter_instance(instance, network_info):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stubs.Set(self.libvirtconnection, '_undefine_domain',
+ fake_undefine_domain)
+ self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
+ fake_unplug_vifs)
+ self.stubs.Set(self.libvirtconnection.firewall_driver,
+ 'unfilter_instance', fake_unfilter_instance)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref,
+ forceold=True).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
+ self.mox.ReplayAll()
+ self.libvirtconnection._cleanup_resize(ins_ref,
+ _fake_network_info(self.stubs, 1))
+
+ def test_get_instance_disk_info_exception(self):
+ instance_name = "fake-instance-name"
+
+ class FakeExceptionDomain(FakeVirtDomain):
+ def __init__(self):
+ super(FakeExceptionDomain, self).__init__()
+
+ def XMLDesc(self, *args):
+ raise libvirt.libvirtError("Libvirt error")
+
+ def fake_lookup_by_name(instance_name):
+ return FakeExceptionDomain()
+
+ self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
+ fake_lookup_by_name)
+ self.assertRaises(exception.InstanceNotFound,
+ self.libvirtconnection.get_instance_disk_info,
+ instance_name)
+
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.lvm.list_volumes')
+ def test_lvm_disks(self, listlvs, exists):
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.flags(images_volume_group='vols', group='libvirt')
+ exists.return_value = True
+ listlvs.return_value = ['fake-uuid_foo',
+ 'other-uuid_foo']
+ disks = self.libvirtconnection._lvm_disks(instance)
+ self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
+
+ def test_is_booted_from_volume(self):
+ func = libvirt_driver.LibvirtDriver._is_booted_from_volume
+ instance, disk_mapping = {}, {}
+
+ self.assertTrue(func(instance, disk_mapping))
+ disk_mapping['disk'] = 'map'
+ self.assertTrue(func(instance, disk_mapping))
+
+ instance['image_ref'] = 'uuid'
+ self.assertFalse(func(instance, disk_mapping))
+
+ @mock.patch('nova.virt.netutils.get_injected_network_template')
+ @mock.patch('nova.virt.disk.api.inject_data')
+ def _test_inject_data(self, driver_params, disk_params,
+ disk_inject_data, inj_network,
+ called=True):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ class ImageBackend(object):
+ path = '/path'
+
+ def check_image_exists(self):
+ if self.path == '/fail/path':
+ return False
+ return True
+
+ def fake_inj_network(*args, **kwds):
+ return args[0] or None
+ inj_network.side_effect = fake_inj_network
+
+ image_backend = ImageBackend()
+ image_backend.path = disk_params[0]
+
+ with mock.patch.object(
+ conn.image_backend,
+ 'image',
+ return_value=image_backend):
+ self.flags(inject_partition=0, group='libvirt')
+
+ conn._inject_data(**driver_params)
+
+ if called:
+ disk_inject_data.assert_called_once_with(
+ *disk_params,
+ partition=None, mandatory=('files',), use_cow=True)
+
+ self.assertEqual(disk_inject_data.called, called)
+
+ def _test_inject_data_default_driver_params(self):
+ return {
+ 'instance': {
+ 'uuid': 'fake-uuid',
+ 'id': 1,
+ 'kernel_id': None,
+ 'image_ref': 1,
+ 'key_data': None,
+ 'metadata': None
+ },
+ 'network_info': None,
+ 'admin_pass': None,
+ 'files': None,
+ 'suffix': ''
+ }
+
+ def test_inject_data_adminpass(self):
+ self.flags(inject_password=True, group='libvirt')
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['admin_pass'] = 'foobar'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ 'foobar', # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_password=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_key(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['key_data'] = 'key-content'
+
+ self.flags(inject_key=True, group='libvirt')
+ disk_params = [
+ '/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ # Test with the configuration setted to false.
+ self.flags(inject_key=False, group='libvirt')
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def test_inject_data_metadata(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['instance']['metadata'] = 'data'
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ 'data', # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_files(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['files'] = ['file1', 'file2']
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ ['file1', 'file2'], # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_data_net(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ driver_params['network_info'] = {'net': 'eno1'}
+ disk_params = [
+ '/path', # injection_path
+ None, # key
+ {'net': 'eno1'}, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params)
+
+ def test_inject_not_exist_image(self):
+ driver_params = self._test_inject_data_default_driver_params()
+ disk_params = [
+ '/fail/path', # injection_path
+ 'key-content', # key
+ None, # net
+ None, # metadata
+ None, # admin_pass
+ None, # files
+ ]
+ self._test_inject_data(driver_params, disk_params, called=False)
+
+ def _test_attach_detach_interface(self, method, power_state,
+ expected_flags):
+ instance = self._create_instance()
+ network_info = _fake_network_info(self.stubs, 1)
+ domain = FakeVirtDomain()
+ self.mox.StubOutWithMock(self.libvirtconnection, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.libvirtconnection.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
+ self.mox.StubOutWithMock(domain, 'info')
+ self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
+
+ self.libvirtconnection._lookup_by_name(
+ 'instance-00000001').AndReturn(domain)
+ if method == 'attach_interface':
+ self.libvirtconnection.firewall_driver.setup_basic_filtering(
+ instance, [network_info[0]])
+
+ fake_flavor = instance.get_flavor()
+
+ objects.Flavor.get_by_id(mox.IgnoreArg(), 2).AndReturn(fake_flavor)
+
+ if method == 'attach_interface':
+ fake_image_meta = {'id': instance['image_ref']}
+ elif method == 'detach_interface':
+ fake_image_meta = None
+ expected = self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0], fake_image_meta, fake_flavor,
+ CONF.libvirt.virt_type)
+
+ self.mox.StubOutWithMock(self.libvirtconnection.vif_driver,
+ 'get_config')
+ self.libvirtconnection.vif_driver.get_config(
+ instance, network_info[0],
+ fake_image_meta,
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).AndReturn(expected)
+ domain.info().AndReturn([power_state])
+ if method == 'attach_interface':
+ domain.attachDeviceFlags(expected.to_xml(), expected_flags)
+ elif method == 'detach_interface':
+ domain.detachDeviceFlags(expected.to_xml(), expected_flags)
+
+ self.mox.ReplayAll()
+ if method == 'attach_interface':
+ self.libvirtconnection.attach_interface(
+ instance, fake_image_meta, network_info[0])
+ elif method == 'detach_interface':
+ self.libvirtconnection.detach_interface(
+ instance, network_info[0])
+ self.mox.VerifyAll()
+
+ def test_attach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_attach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'attach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_detach_interface_with_running_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.RUNNING,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_pause_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.PAUSED,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
+ libvirt.VIR_DOMAIN_AFFECT_LIVE))
+
+ def test_detach_interface_with_shutdown_instance(self):
+ self._test_attach_detach_interface(
+ 'detach_interface', power_state.SHUTDOWN,
+ expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+
+ def test_rescue(self):
+ instance = self._create_instance({'config_drive': None})
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance,
+ network_info, image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ def test_rescue_config_drive(self):
+ instance = self._create_instance()
+ uuid = instance.uuid
+ configdrive_path = uuid + '/disk.config.rescue'
+ dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
+ "<devices>"
+ "<disk type='file'><driver name='qemu' type='raw'/>"
+ "<source file='/test/disk'/>"
+ "<target dev='vda' bus='virtio'/></disk>"
+ "<disk type='file'><driver name='qemu' type='qcow2'/>"
+ "<source file='/test/disk.local'/>"
+ "<target dev='vdb' bus='virtio'/></disk>"
+ "</devices></domain>")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ '__init__')
+ self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
+ content=mox.IgnoreArg(),
+ extra_md=mox.IgnoreArg(),
+ network_info=mox.IgnoreArg())
+ cdb = self.mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.Regex(configdrive_path))
+ cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndReturn(None)
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance, network_info,
+ image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resize(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resume(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_none(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, False, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertEqual(0, len(shutil.mock_calls))
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_concurrent(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ expected.append(expected[0])
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ def _assert_on_id_map(self, idmap, klass, start, target, count):
+ self.assertIsInstance(idmap, klass)
+ self.assertEqual(start, idmap.start)
+ self.assertEqual(target, idmap.target)
+ self.assertEqual(count, idmap.count)
+
+ def test_get_id_maps(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.virt_type = "lxc"
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(len(idmaps), 4)
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+ self._assert_on_id_map(idmaps[2],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[3],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_not_lxc(self):
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(0, len(idmaps))
+
+ def test_get_id_maps_only_uid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
+ CONF.libvirt.gid_maps = []
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestUIDMap,
+ 1, 20000, 10)
+
+ def test_get_id_maps_only_gid(self):
+ self.flags(virt_type="lxc", group="libvirt")
+ CONF.libvirt.uid_maps = []
+ CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ idmaps = conn._get_guest_idmaps()
+
+ self.assertEqual(2, len(idmaps))
+ self._assert_on_id_map(idmaps[0],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 0, 10000, 1)
+ self._assert_on_id_map(idmaps[1],
+ vconfig.LibvirtConfigGuestGIDMap,
+ 1, 20000, 10)
+
+ def test_instance_on_disk(self):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertFalse(conn.instance_on_disk(instance))
+
+ def test_instance_on_disk_rbd(self):
+ self.flags(images_type='rbd', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+ self.assertTrue(conn.instance_on_disk(instance))
+
+ @mock.patch("nova.objects.Flavor.get_by_id")
+ @mock.patch("nova.compute.utils.get_image_metadata")
+ def test_prepare_args_for_get_config(self, mock_image, mock_get):
+ instance = self._create_instance()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ def fake_get_by_id(context, id):
+ self.assertEqual('yes', context.read_deleted)
+
+ mock_get.side_effect = fake_get_by_id
+
+ conn._prepare_args_for_get_config(self.context, instance)
+
+ mock_get.assert_called_once_with(self.context,
+ instance['instance_type_id'])
+
+
+class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
+ """Test for LibvirtDriver.get_all_volume_usage."""
+
+ def setUp(self):
+ super(LibvirtVolumeUsageTestCase, self).setUp()
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.ins_ref = objects.Instance(
+ id=1729,
+ uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
+ )
+
+ # verify bootable volume device path also
+ self.bdms = [{'volume_id': 1,
+ 'device_name': '/dev/vde'},
+ {'volume_id': 2,
+ 'device_name': 'vda'}]
+
+ def test_get_all_volume_usage(self):
+ def fake_block_stats(instance_name, disk):
+ return (169L, 688640L, 0L, 0L, -1L)
+
+ self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+
+ expected_usage = [{'volume': 1,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L},
+ {'volume': 2,
+ 'instance': self.ins_ref,
+ 'rd_bytes': 688640L, 'wr_req': 0L,
+ 'flush_operations': -1L, 'rd_req': 169L,
+ 'wr_bytes': 0L}]
+ self.assertEqual(vol_usage, expected_usage)
+
+ def test_get_all_volume_usage_device_not_found(self):
+ def fake_lookup(instance_name):
+ raise libvirt.libvirtError('invalid path')
+
+ self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
+ vol_usage = self.conn.get_all_volume_usage(self.c,
+ [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
+ self.assertEqual(vol_usage, [])
+
+
+class LibvirtNonblockingTestCase(test.NoDBTestCase):
+ """Test libvirtd calls are nonblocking."""
+
+ def setUp(self):
+ super(LibvirtNonblockingTestCase, self).setUp()
+ self.flags(connection_uri="test:///default",
+ group='libvirt')
+
+ def test_connection_to_primitive(self):
+ # Test bug 962840.
+ import nova.virt.libvirt.driver as libvirt_driver
+ connection = libvirt_driver.LibvirtDriver('')
+ connection.set_host_enabled = mock.Mock()
+ jsonutils.to_primitive(connection._conn, convert_instances=True)
+
+ def test_tpool_execute_calls_libvirt(self):
+ conn = libvirt.virConnect()
+ conn.is_expected = True
+
+ self.mox.StubOutWithMock(eventlet.tpool, 'execute')
+ eventlet.tpool.execute(
+ libvirt.openAuth,
+ 'test:///default',
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(conn)
+ eventlet.tpool.execute(
+ conn.domainEventRegisterAny,
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ if hasattr(libvirt.virConnect, 'registerCloseCallback'):
+ eventlet.tpool.execute(
+ conn.registerCloseCallback,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ c = driver._get_connection()
+ self.assertEqual(True, c.is_expected)
+
+
+class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
+ """Tests for libvirtDriver.volume_snapshot_create/delete."""
+
+ def setUp(self):
+ super(LibvirtVolumeSnapshotTestCase, self).setUp()
+
+ self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.c = context.get_admin_context()
+
+ self.flags(instance_name_template='instance-%s')
+ self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
+
+ # creating instance
+ self.inst = {}
+ self.inst['uuid'] = uuidutils.generate_uuid()
+ self.inst['id'] = '1'
+
+ # create domain info
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source dev='/path/to/dev/1'/>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ # alternate domain info with network-backed snapshot chain
+ self.dom_netdisk_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
+ </disk>
+ <disk type='network' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/root.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='1'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore type='network' index='2'>
+ <driver name='qemu' type='qcow2'/>
+ <source protocol='gluster' name='vol1/snap-b.img'>
+ <host name='server1' port='24007'/>
+ </source>
+ <backingStore/>
+ </backingStore>
+ </backingStore>
+ <target dev='vdb' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ </devices>
+ </domain>
+ """
+
+ self.create_info = {'type': 'qcow2',
+ 'snapshot_id': '1234-5678',
+ 'new_file': 'new-file'}
+
+ self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
+ self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
+
+ self.delete_info_1 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': None}
+
+ self.delete_info_2 = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'other-snap.img'}
+
+ self.delete_info_netdisk = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'root.img'}
+
+ self.delete_info_invalid_type = {'type': 'made_up_type',
+ 'file_to_merge': 'some_file',
+ 'merge_target_file':
+ 'some_other_file'}
+
+ def tearDown(self):
+ super(LibvirtVolumeSnapshotTestCase, self).tearDown()
+
+ @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
+ 'refresh_connection_info')
+ @mock.patch('nova.objects.block_device.BlockDeviceMapping.'
+ 'get_by_volume_id')
+ def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
+ mock_refresh_connection_info):
+ fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 123,
+ 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'connection_info': '{"fake": "connection_info"}'})
+ mock_get_by_volume_id.return_value = fake_bdm
+
+ self.conn._volume_refresh_connection_info(self.c, self.inst,
+ self.volume_uuid)
+
+ mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
+ mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
+ self.conn._volume_api, self.conn)
+
+ def test_volume_snapshot_create(self, quiesce=True):
+ """Test snapshot creation with file-based disk."""
+ self.flags(instance_name_template='instance-%s')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_libgfapi(self, quiesce=True):
+ """Test snapshot creation with libgfapi network disk."""
+ self.flags(instance_name_template = 'instance-%s')
+ self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+
+ self.dom_xml = """
+ <domain type='kvm'>
+ <devices>
+ <disk type='file'>
+ <source file='disk1_file'/>
+ <target dev='vda' bus='virtio'/>
+ <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
+ </disk>
+ <disk type='block'>
+ <source protocol='gluster' name='gluster1/volume-1234'>
+ <host name='127.3.4.5' port='24007'/>
+ </source>
+ <target dev='vdb' bus='virtio' serial='1234'/>
+ </disk>
+ </devices>
+ </domain>"""
+
+ instance = objects.Instance(**self.inst)
+
+ new_file = 'new-file'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ snap_xml_src = (
+ '<domainsnapshot>\n'
+ ' <disks>\n'
+ ' <disk name="disk1_file" snapshot="external" type="file">\n'
+ ' <source file="new-file"/>\n'
+ ' </disk>\n'
+ ' <disk name="vdb" snapshot="no"/>\n'
+ ' </disks>\n'
+ '</domainsnapshot>\n')
+
+ # Older versions of libvirt may be missing these.
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+
+ snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
+ libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
+
+ snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
+
+ if quiesce:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
+ else:
+ domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
+ AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
+ domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_create(self.c, instance, domain,
+ self.volume_uuid, new_file)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_create_noquiesce(self):
+ self.test_volume_snapshot_create(quiesce=False)
+
+ def test_volume_snapshot_create_outer_success(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file'])
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'creating')
+
+ self.mox.StubOutWithMock(self.conn._volume_api, 'get_snapshot')
+ self.conn._volume_api.get_snapshot(self.c,
+ self.create_info['snapshot_id']).AndReturn({'status': 'available'})
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_create_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
+
+ self.conn._lookup_by_name('instance-1').AndReturn(domain)
+
+ self.conn._volume_snapshot_create(self.c,
+ instance,
+ domain,
+ self.volume_uuid,
+ self.create_info['new_file']).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.create_info['snapshot_id'], 'error')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_create,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.create_info)
+
+ def test_volume_snapshot_delete_1(self):
+ """Deleting newest snapshot -- blockRebase."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vda', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_2(self):
+ """Deleting older snapshot -- blockCommit."""
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeVirtDomain(fake_xml=self.dom_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
+
+ domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vda', 0).AndReturn({})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_2)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_success(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'deleting')
+
+ self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
+ self.conn._volume_refresh_connection_info(self.c, instance,
+ self.volume_uuid)
+
+ self.mox.ReplayAll()
+
+ self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_outer_failure(self):
+ instance = objects.Instance(**self.inst)
+ snapshot_id = '1234-9876'
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
+
+ self.conn._volume_snapshot_delete(self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ delete_info=self.delete_info_1).\
+ AndRaise(exception.NovaException('oops'))
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ snapshot_id,
+ self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_invalid_type(self):
+ instance = objects.Instance(**self.inst)
+
+ FakeVirtDomain(fake_xml=self.dom_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_volume_api')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ self.conn._volume_api.update_snapshot_status(
+ self.c, self.snapshot_id, 'error_deleting')
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.conn.volume_snapshot_delete,
+ self.c,
+ instance,
+ self.volume_uuid,
+ self.snapshot_id,
+ self.delete_info_invalid_type)
+
+ def test_volume_snapshot_delete_netdisk_1(self):
+ """Delete newest snapshot -- blockRebase for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vdb', 'vdb[1]', 0, 0)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_netdisk_2(self):
+ """Delete older snapshot -- blockCommit for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = objects.Instance(**self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
+ fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_netdisk)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/unit/virt/libvirt/test_fakelibvirt.py b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
new file mode 100644
index 0000000000..7a6d020426
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
@@ -0,0 +1,386 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+
+from lxml import etree
+
+from nova.compute import arch
+import nova.tests.unit.virt.libvirt.fakelibvirt as libvirt
+
+
+def get_vm_xml(name="testname", uuid=None, source_type='file',
+ interface_type='bridge'):
+ uuid_tag = ''
+ if uuid:
+ uuid_tag = '<uuid>%s</uuid>' % (uuid,)
+
+ return '''<domain type='kvm'>
+ <name>%(name)s</name>
+%(uuid_tag)s
+ <memory>128000</memory>
+ <vcpu>1</vcpu>
+ <os>
+ <type>hvm</type>
+ <kernel>/somekernel</kernel>
+ <cmdline>root=/dev/sda</cmdline>
+ <boot dev='hd'/>
+ </os>
+ <features>
+ <acpi/>
+ </features>
+ <devices>
+ <disk type='file' device='disk'>
+ <driver name='qemu' type='qcow2'/>
+ <source %(source_type)s='/somefile'/>
+ <target dev='vda' bus='virtio'/>
+ </disk>
+ <interface type='%(interface_type)s'>
+ <mac address='05:26:3e:31:28:1f'/>
+ <source %(interface_type)s='br100'/>
+ </interface>
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/>
+ <graphics type='spice' port='5901' autoport='yes' keymap='en-us'/>
+ </devices>
+</domain>''' % {'name': name,
+ 'uuid_tag': uuid_tag,
+ 'source_type': source_type,
+ 'interface_type': interface_type}
+
+
+class FakeLibvirtTests(test.NoDBTestCase):
+ def tearDown(self):
+ super(FakeLibvirtTests, self).tearDown()
+ libvirt._reset()
+
+ def get_openAuth_curry_func(self, readOnly=False):
+ def fake_cb(credlist):
+ return 0
+
+ creds = [[libvirt.VIR_CRED_AUTHNAME,
+ libvirt.VIR_CRED_NOECHOPROMPT],
+ fake_cb,
+ None]
+ flags = 0
+ if readOnly:
+ flags = libvirt.VIR_CONNECT_RO
+ return lambda uri: libvirt.openAuth(uri, creds, flags)
+
+ def test_openAuth_accepts_None_uri_by_default(self):
+ conn_method = self.get_openAuth_curry_func()
+ conn = conn_method(None)
+ self.assertNotEqual(conn, None, "Connecting to fake libvirt failed")
+
+ def test_openAuth_can_refuse_None_uri(self):
+ conn_method = self.get_openAuth_curry_func()
+ libvirt.allow_default_uri_connection = False
+ self.addCleanup(libvirt._reset)
+ self.assertRaises(ValueError, conn_method, None)
+
+ def test_openAuth_refuses_invalid_URI(self):
+ conn_method = self.get_openAuth_curry_func()
+ self.assertRaises(libvirt.libvirtError, conn_method, 'blah')
+
+ def test_getInfo(self):
+ conn_method = self.get_openAuth_curry_func(readOnly=True)
+ res = conn_method(None).getInfo()
+ self.assertIn(res[0], (arch.I686, arch.X86_64))
+ self.assertTrue(1024 <= res[1] <= 16384,
+ "Memory unusually high or low.")
+ self.assertTrue(1 <= res[2] <= 32,
+ "Active CPU count unusually high or low.")
+ self.assertTrue(800 <= res[3] <= 4500,
+ "CPU speed unusually high or low.")
+ self.assertTrue(res[2] <= (res[5] * res[6]),
+ "More active CPUs than num_sockets*cores_per_socket")
+
+ def test_createXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('createXML', [0])
+
+ def test_defineXML_detects_invalid_xml(self):
+ self._test_XML_func_detects_invalid_xml('defineXML', [])
+
+ def _test_XML_func_detects_invalid_xml(self, xmlfunc_name, args):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ try:
+ getattr(conn, xmlfunc_name)("this is not valid </xml>", *args)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_XML_DETAIL)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_DOMAIN)
+ return
+ raise self.failureException("Invalid XML didn't raise libvirtError")
+
+ def test_defineXML_defines_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(0, dom.isActive())
+ dom.undefine()
+ self.assertRaises(libvirt.libvirtError,
+ conn.lookupByName,
+ 'testname')
+
+ def test_blockStats(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ blockstats = dom.blockStats('vda')
+ self.assertEqual(len(blockstats), 5)
+ for x in blockstats:
+ self.assertIn(type(x), [int, long])
+
+ def test_attach_detach(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ xml = '''<disk type='block'>
+ <driver name='qemu' type='raw'/>
+ <source dev='/dev/nbd0'/>
+ <target dev='/dev/vdc' bus='virtio'/>
+ </disk>'''
+ self.assertTrue(dom.attachDevice(xml))
+ self.assertTrue(dom.detachDevice(xml))
+
+ def test_info(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ info = dom.info()
+ self.assertEqual(info[0], libvirt.VIR_DOMAIN_RUNNING)
+ self.assertEqual(info[1], 128000)
+ self.assertTrue(info[2] <= 128000)
+ self.assertEqual(info[3], 1)
+ self.assertIn(type(info[4]), [int, long])
+
+ def test_createXML_runs_domain(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.createXML(get_vm_xml(), 0)
+ dom = conn.lookupByName('testname')
+ self.assertEqual('testname', dom.name())
+ self.assertEqual(1, dom.isActive())
+ dom.destroy()
+ try:
+ dom = conn.lookupByName('testname')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ self.fail("lookupByName succeeded for destroyed non-defined VM")
+
+ def test_defineXML_remembers_uuid(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ uuid = 'b21f957d-a72f-4b93-b5a5-45b1161abb02'
+ conn.defineXML(get_vm_xml(uuid=uuid))
+ dom = conn.lookupByName('testname')
+ self.assertEqual(dom.UUIDString(), uuid)
+
+ def test_createWithFlags(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertTrue(dom.isActive(),
+ 'Domain wasn\'t running after createWithFlags')
+
+ def test_managedSave(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ self.assertFalse(dom.isActive(), 'Defined domain was running.')
+ dom.createWithFlags(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+ dom.managedSave(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 1)
+ dom.managedSaveRemove(0)
+ self.assertEqual(dom.hasManagedSaveImage(0), 0)
+
+ def test_listDomainsId_and_lookupById(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ dom.createWithFlags(0)
+ self.assertEqual(len(conn.listDomainsID()), 1)
+
+ dom_id = conn.listDomainsID()[0]
+ self.assertEqual(conn.lookupByID(dom_id), dom)
+
+ dom_id = conn.listDomainsID()[0]
+ try:
+ conn.lookupByID(dom_id + 1)
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
+ return
+ raise self.failureException("Looking up an invalid domain ID didn't "
+ "raise libvirtError")
+
+ def test_define_and_retrieve(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml())
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ etree.fromstring(xml)
+
+ def _test_accepts_source_type(self, source_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(source_type=source_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/disk/source')
+ self.assertEqual(elem.get('file'), '/somefile')
+
+ def test_accepts_source_dev(self):
+ self._test_accepts_source_type('dev')
+
+ def test_accepts_source_path(self):
+ self._test_accepts_source_type('path')
+
+ def test_network_type_bridge_sticks(self):
+ self._test_network_type_sticks('bridge')
+
+ def test_network_type_network_sticks(self):
+ self._test_network_type_sticks('network')
+
+ def _test_network_type_sticks(self, network_type):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.listDomainsID(), [])
+ conn.defineXML(get_vm_xml(interface_type=network_type))
+ dom = conn.lookupByName('testname')
+ xml = dom.XMLDesc(0)
+ tree = etree.fromstring(xml)
+ elem = tree.find('./devices/interface')
+ self.assertEqual(elem.get('type'), network_type)
+ elem = elem.find('./source')
+ self.assertEqual(elem.get(network_type), 'br100')
+
+ def test_getType(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertEqual(conn.getType(), 'QEMU')
+
+ def test_getVersion(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ self.assertIsInstance(conn.getVersion(), int)
+
+ def test_getCapabilities(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ etree.fromstring(conn.getCapabilities())
+
+ def test_nwfilter_define_undefine(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+ # Will raise an exception if it's not valid XML
+ xml = '''<filter name='nova-instance-instance-789' chain='root'>
+ <uuid>946878c6-3ad3-82b2-87f3-c709f3807f58</uuid>
+ </filter>'''
+
+ conn.nwfilterDefineXML(xml)
+ nwfilter = conn.nwfilterLookupByName('nova-instance-instance-789')
+ nwfilter.undefine()
+ try:
+ conn.nwfilterLookupByName('nova-instance-instance-789320334')
+ except libvirt.libvirtError as e:
+ self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_NWFILTER)
+ self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_NWFILTER)
+ return
+ raise self.failureException("Invalid NWFilter name didn't"
+ " raise libvirtError")
+
+ def test_compareCPU_compatible(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
+
+ def test_compareCPU_incompatible_vendor(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_model,
+ "AnotherVendor",
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_arch(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % ('not-a-valid-arch',
+ libvirt.node_cpu_model,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_incompatible_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <model>%s</model>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ "AnotherModel",
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
+
+ def test_compareCPU_compatible_unspecified_model(self):
+ conn = self.get_openAuth_curry_func()('qemu:///system')
+
+ xml = '''<cpu>
+ <arch>%s</arch>
+ <vendor>%s</vendor>
+ <topology sockets="%d" cores="%d" threads="%d"/>
+ </cpu>''' % (libvirt.node_arch,
+ libvirt.node_cpu_vendor,
+ libvirt.node_sockets,
+ libvirt.node_cores,
+ libvirt.node_threads)
+ self.assertEqual(conn.compareCPU(xml, 0),
+ libvirt.VIR_CPU_COMPARE_IDENTICAL)
diff --git a/nova/tests/unit/virt/libvirt/test_firewall.py b/nova/tests/unit/virt/libvirt/test_firewall.py
new file mode 100644
index 0000000000..b6d4cddf51
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_firewall.py
@@ -0,0 +1,749 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import threading
+import uuid
+from xml.dom import minidom
+
+from lxml import etree
+import mock
+import mox
+from oslo.concurrency import lockutils
+
+from nova.compute import utils as compute_utils
+from nova import exception
+from nova.network import linux_net
+from nova import objects
+from nova import test
+from nova.tests.unit import fake_network
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova.virt.libvirt import firewall
+from nova.virt import netutils
+from nova.virt import virtapi
+
+try:
+ import libvirt
+except ImportError:
+ libvirt = fakelibvirt
+
+_fake_network_info = fake_network.fake_get_instance_nw_info
+_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
+_ipv4_like = fake_network.ipv4_like
+
+
+class NWFilterFakes:
+ def __init__(self):
+ self.filters = {}
+
+ def nwfilterLookupByName(self, name):
+ if name in self.filters:
+ return self.filters[name]
+ raise libvirt.libvirtError('Filter Not Found')
+
+ def filterDefineXMLMock(self, xml):
+ class FakeNWFilterInternal:
+ def __init__(self, parent, name, u, xml):
+ self.name = name
+ self.uuid = u
+ self.parent = parent
+ self.xml = xml
+
+ def XMLDesc(self, flags):
+ return self.xml
+
+ def undefine(self):
+ del self.parent.filters[self.name]
+
+ tree = etree.fromstring(xml)
+ name = tree.get('name')
+ u = tree.find('uuid')
+ if u is None:
+ u = uuid.uuid4().hex
+ else:
+ u = u.text
+ if name not in self.filters:
+ self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
+ else:
+ if self.filters[name].uuid != u:
+ raise libvirt.libvirtError(
+ "Mismatching name '%s' with uuid '%s' vs '%s'"
+ % (name, self.filters[name].uuid, u))
+ self.filters[name].xml = xml
+ return True
+
+
+class FakeVirtAPI(virtapi.VirtAPI):
+ def provider_fw_rule_get_all(self, context):
+ return []
+
+
+class IptablesFirewallTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(IptablesFirewallTestCase, self).setUp()
+
+ class FakeLibvirtDriver(object):
+ def nwfilterDefineXML(*args, **kwargs):
+ """setup_basic_rules in nwfilter calls this."""
+ pass
+
+ self.fake_libvirt_connection = FakeLibvirtDriver()
+ self.fw = firewall.IptablesFirewallDriver(
+ FakeVirtAPI(),
+ get_connection=lambda: self.fake_libvirt_connection)
+
+ in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
+ '*mangle',
+ ':PREROUTING ACCEPT [241:39722]',
+ ':INPUT ACCEPT [230:39282]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [266:26558]',
+ ':POSTROUTING ACCEPT [267:26590]',
+ '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
+ '--checksum-fill',
+ 'COMMIT',
+ '# Completed on Tue Dec 18 15:50:25 2012',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def _create_instance_ref(self,
+ uuid="74526555-9166-4893-a203-126bdcab0d67"):
+ inst = objects.Instance(
+ id=7,
+ uuid=uuid,
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_static_filters(self, mock_lock, mock_secgroup,
+ mock_secrule, mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
+ SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
+ instance_ref = self._create_instance_ref(UUID)
+ src_instance_ref = self._create_instance_ref(SRC_UUID)
+
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group')
+
+ src_secgroup = objects.SecurityGroup(id=2,
+ user_id='fake',
+ project_id='fake',
+ name='testsourcegroup',
+ description='src group')
+
+ r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=-1,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='icmp',
+ from_port=8,
+ to_port=-1,
+ cidr='192.168.11.0/24',
+ grantee_group=None)
+
+ r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr='192.168.10.0/24',
+ grantee_group=None)
+
+ r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol='tcp',
+ from_port=80,
+ to_port=81,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
+ protocol=None,
+ cidr=None,
+ grantee_group=src_secgroup,
+ group_id=src_secgroup['id'])
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ src_secgroup_list = objects.SecurityGroupList()
+ src_secgroup_list.objects.append(src_secgroup)
+ instance_ref.security_groups = secgroup_list
+ src_instance_ref.security_groups = src_secgroup_list
+
+ def _fake_secgroup(ctxt, instance):
+ if instance.uuid == UUID:
+ return instance_ref.security_groups
+ else:
+ return src_instance_ref.security_groups
+
+ mock_secgroup.side_effect = _fake_secgroup
+
+ def _fake_secrule(ctxt, id):
+ if id == secgroup.id:
+ rules = objects.SecurityGroupRuleList()
+ rules.objects.extend([r1, r2, r3, r4, r5])
+ return rules
+ else:
+ return []
+
+ mock_secrule.side_effect = _fake_secrule
+
+ def _fake_instlist(ctxt, id):
+ if id == src_secgroup['id']:
+ insts = objects.InstanceList()
+ insts.objects.append(src_instance_ref)
+ return insts
+ else:
+ insts = objects.InstanceList()
+ insts.objects.append(instance_ref)
+ return insts
+
+ mock_instlist.side_effect = _fake_instlist
+
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('ip6tables-save', '-c'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('iptables-save', '-c'):
+ return '\n'.join(self.in_rules), None
+ if cmd == ('iptables-restore', '-c'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
+ return '', ''
+ if cmd == ('ip6tables-restore', '-c',):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
+ return '', ''
+
+ network_model = _fake_network_info(self.stubs, 1)
+
+ linux_net.iptables_manager.execute = fake_iptables_execute
+
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self.out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+
+ security_group_chain = None
+ for rule in self.out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
+ '-s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
+ '--icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
+ '--dports 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
+ '%s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "Protocol/port-less acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
+ '-m multiport --dports 80:81 -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = _fake_network_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_multinic_iptables(self, mock_lock, mock_secgroup):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = _fake_network_info(self.stubs, networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance_ref = self._create_instance_ref()
+ self.mox.StubOutWithMock(self.fw,
+ 'instance_rules')
+ self.mox.StubOutWithMock(self.fw,
+ 'add_filters_for_instance',
+ use_mock_anything=True)
+ self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
+ 'has_chain')
+
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.fw.instance_rules(instance_ref,
+ mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
+ ).AndReturn(True)
+ self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
+ self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
+ self.fw.do_refresh_security_group_rules("fake")
+
+ @mock.patch.object(lockutils, "external_lock")
+ def test_do_refresh_security_group_rules_instance_gone(self, mock_lock):
+ mock_lock.return_value = threading.Semaphore()
+ instance1 = {'id': 1, 'uuid': 'fake-uuid1'}
+ instance2 = {'id': 2, 'uuid': 'fake-uuid2'}
+ self.fw.instance_info = {1: (instance1, 'netinfo1'),
+ 2: (instance2, 'netinfo2')}
+ mock_filter = mock.MagicMock()
+ with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
+ mock_filter.has_chain.return_value = False
+ with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
+ mock_ir.return_value = (None, None)
+ self.fw.do_refresh_security_group_rules('secgroup')
+ self.assertEqual(2, mock_ir.call_count)
+ # NOTE(danms): Make sure that it is checking has_chain each time,
+ # continuing to process all the instances, and never adding the
+ # new chains back if has_chain() is False
+ mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
+ mock.call('inst-2')],
+ any_order=True)
+ self.assertEqual(0, mock_filter.add_chain.call_count)
+
+ @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupRuleList,
+ "get_by_security_group_id")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_unfilter_instance_undefines_nwfilter(self, mock_lock,
+ mock_secgroup,
+ mock_secrule,
+ mock_instlist):
+ mock_lock.return_value = threading.Semaphore()
+
+ fakefilter = NWFilterFakes()
+ _xml_mock = fakefilter.filterDefineXMLMock
+ self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
+ _lookup_name = fakefilter.nwfilterLookupByName
+ self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
+ instance_ref = self._create_instance_ref()
+
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+
+ # should undefine just the instance filter
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ @mock.patch.object(FakeVirtAPI, "provider_fw_rule_get_all")
+ @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
+ @mock.patch.object(lockutils, "external_lock")
+ def test_provider_firewall_rules(self, mock_lock, mock_secgroup,
+ mock_fwrules):
+ mock_lock.return_value = threading.Semaphore()
+ mock_secgroup.return_value = objects.SecurityGroupList()
+
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ # create a firewall via setup_basic_filtering like libvirt_conn.spawn
+ # should have a chain with 0 rules
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ # add a rule angd send the update message, check for 1 rule
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ mock_fwrules.return_value = [{'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535},
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ mock_fwrules.return_value = [{'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535}]
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class NWFilterTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(NWFilterTestCase, self).setUp()
+
+ class Mock(object):
+ pass
+
+ self.fake_libvirt_connection = Mock()
+
+ self.fw = firewall.NWFilterFirewall(
+ FakeVirtAPI(),
+ lambda: self.fake_libvirt_connection)
+
+ def _create_security_group(self, instance_ref):
+ secgroup = objects.SecurityGroup(id=1,
+ user_id='fake',
+ project_id='fake',
+ name='testgroup',
+ description='test group description')
+
+ secgroup_list = objects.SecurityGroupList()
+ secgroup_list.objects.append(secgroup)
+ instance_ref.security_groups = secgroup_list
+
+ return secgroup
+
+ def _create_instance(self):
+ inst = objects.Instance(
+ id=7,
+ uuid="74526555-9166-4893-a203-126bdcab0d67",
+ user_id="fake",
+ project_id="fake",
+ image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ instance_type_id=1)
+ inst.info_cache = objects.InstanceInfoCache()
+ inst.info_cache.deleted = False
+ return inst
+
+ def test_creates_base_rule_first(self):
+ # These come pre-defined by libvirt
+ self.defined_filters = ['no-mac-spoofing',
+ 'no-ip-spoofing',
+ 'no-arp-spoofing',
+ 'allow-dhcp-server']
+
+ self.recursive_depends = {}
+ for f in self.defined_filters:
+ self.recursive_depends[f] = []
+
+ def _filterDefineXMLMock(xml):
+ dom = minidom.parseString(xml)
+ name = dom.firstChild.getAttribute('name')
+ self.recursive_depends[name] = []
+ for f in dom.getElementsByTagName('filterref'):
+ ref = f.getAttribute('filter')
+ self.assertTrue(ref in self.defined_filters,
+ ('%s referenced filter that does ' +
+ 'not yet exist: %s') % (name, ref))
+ dependencies = [ref] + self.recursive_depends[ref]
+ self.recursive_depends[name] += dependencies
+
+ self.defined_filters.append(name)
+ return True
+
+ self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ def _ensure_all_called(mac, allow_dhcp):
+ instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
+ mac.translate({ord(':'): None}))
+ requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
+ 'no-mac-spoofing']
+ required_not_list = []
+ if allow_dhcp:
+ requiredlist.append('allow-dhcp-server')
+ else:
+ required_not_list.append('allow-dhcp-server')
+ for required in requiredlist:
+ self.assertTrue(required in
+ self.recursive_depends[instance_filter],
+ "Instance's filter does not include %s" %
+ required)
+ for required_not in required_not_list:
+ self.assertFalse(required_not in
+ self.recursive_depends[instance_filter],
+ "Instance filter includes %s" % required_not)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ # since there is one (network_info) there is one vif
+ # pass this vif's mac to _ensure_all_called()
+ # to set the instance_filter properly
+ mac = network_info[0]['address']
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = True
+ _ensure_all_called(mac, allow_dhcp)
+
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ allow_dhcp = False
+ _ensure_all_called(mac, allow_dhcp)
+
+ def test_unfilter_instance_undefines_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ original_filter_count = len(fakefilter.filters)
+ self.fw.unfilter_instance(instance_ref, network_info)
+ self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
+
+ def test_redefining_nwfilters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def test_nwfilter_parameters(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ vif = network_info[0]
+ nic_id = vif['address'].replace(':', '')
+ instance_filter_name = self.fw._instance_filter_name(instance_ref,
+ nic_id)
+ f = fakefilter.nwfilterLookupByName(instance_filter_name)
+ tree = etree.fromstring(f.xml)
+
+ for fref in tree.findall('filterref'):
+ parameters = fref.findall('./parameter')
+ for parameter in parameters:
+ subnet_v4, subnet_v6 = vif['network']['subnets']
+ if parameter.get('name') == 'IP':
+ self.assertTrue(_ipv4_like(parameter.get('value'),
+ '192.168'))
+ elif parameter.get('name') == 'DHCPSERVER':
+ dhcp_server = subnet_v4.get('dhcp_server')
+ self.assertEqual(parameter.get('value'), dhcp_server)
+ elif parameter.get('name') == 'RASERVER':
+ ra_server = subnet_v6['gateway']['address'] + "/128"
+ self.assertEqual(parameter.get('value'), ra_server)
+ elif parameter.get('name') == 'PROJNET':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK':
+ ipv4_cidr = subnet_v4['cidr']
+ net, mask = netutils.get_net_and_mask(ipv4_cidr)
+ self.assertEqual(parameter.get('value'), mask)
+ elif parameter.get('name') == 'PROJNET6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), net)
+ elif parameter.get('name') == 'PROJMASK6':
+ ipv6_cidr = subnet_v6['cidr']
+ net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
+ self.assertEqual(parameter.get('value'), prefix)
+ else:
+ raise exception.InvalidParameterValue('unknown parameter '
+ 'in filter')
+
+ def test_multinic_base_filter_selection(self):
+ fakefilter = NWFilterFakes()
+ self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
+ self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
+
+ instance_ref = self._create_instance()
+ self._create_security_group(instance_ref)
+
+ network_info = _fake_network_info(self.stubs, 2)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+
+ self.fw.setup_basic_filtering(instance_ref, network_info)
+
+ def assert_filterref(instance, vif, expected=None):
+ expected = expected or []
+ nic_id = vif['address'].replace(':', '')
+ filter_name = self.fw._instance_filter_name(instance, nic_id)
+ f = fakefilter.nwfilterLookupByName(filter_name)
+ tree = etree.fromstring(f.xml)
+ frefs = [fr.get('filter') for fr in tree.findall('filterref')]
+ self.assertEqual(set(expected), set(frefs))
+
+ assert_filterref(instance_ref, network_info[0],
+ expected=['nova-base'])
+ assert_filterref(instance_ref, network_info[1],
+ expected=['nova-nodhcp'])
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
new file mode 100644
index 0000000000..e865c165da
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -0,0 +1,1309 @@
+# Copyright 2012 Grid Dynamics
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import inspect
+import os
+import shutil
+import tempfile
+
+import fixtures
+import mock
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.utils import units
+
+from nova import context
+from nova import exception
+from nova import keymgr
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import imageutils
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_processutils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import images
+from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
+
+CONF = cfg.CONF
+CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
+
+
+class _ImageTestCase(object):
+
+ def mock_create_image(self, image):
+ def create_image(fn, base, size, *args, **kwargs):
+ fn(target=base, *args, **kwargs)
+ image.create_image = create_image
+
+ def setUp(self):
+ super(_ImageTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instances_path=self.INSTANCES_PATH)
+ self.INSTANCE = {'name': 'instance',
+ 'uuid': uuidutils.generate_uuid()}
+ self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
+ self.INSTANCE['uuid'], 'disk.info')
+ self.NAME = 'fake.vm'
+ self.TEMPLATE = 'template'
+ self.CONTEXT = context.get_admin_context()
+
+ self.OLD_STYLE_INSTANCE_PATH = \
+ fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
+ self.PATH = os.path.join(
+ fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
+
+ # TODO(mikal): rename template_dir to base_dir and template_path
+ # to cached_image_path. This will be less confusing.
+ self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
+ self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+
+ def tearDown(self):
+ super(_ImageTestCase, self).tearDown()
+ shutil.rmtree(self.INSTANCES_PATH)
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
+ def test_prealloc_image_without_write_access(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+ self.stubs.Set(image, '_can_fallocate', lambda: True)
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: False)
+
+ # Testing fallocate is only called when user has write access.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Raw
+ super(RawTestCase, self).setUp()
+ self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_generated(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ return_value=imageutils.QemuImgInfo())
+ def test_create_image_extend(self, fake_qemu_img_info):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
+
+ self.mox.VerifyAll()
+
+ def test_correct_format(self):
+ self.stubs.UnsetAll()
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
+
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ info = self.mox.CreateMockAnything()
+ info.file_format = 'foo'
+ imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
+ self.assertEqual(image.driver_format, 'foo')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(images, 'qemu_img_info',
+ side_effect=exception.InvalidDiskInfo(
+ reason='invalid path'))
+ def test_resolve_driver_format(self, fake_qemu_img_info):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'raw')
+
+
+class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
+ SIZE = units.Gi
+
+ def setUp(self):
+ self.image_class = imagebackend.Qcow2
+ super(Qcow2TestCase, self).setUp()
+ self.QCOW2_BASE = (self.TEMPLATE_PATH +
+ '_%d' % (self.SIZE / units.Gi))
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'create_cow_image')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_with_size(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
+ self.PATH)
+ imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image_too_small(self):
+ fn = self.prepare_mocks()
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.SIZE)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ image.create_image, fn, self.TEMPLATE_PATH, 1)
+ self.mox.VerifyAll()
+
+ def test_generate_resized_backing_files(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(self.QCOW2_BASE)
+ os.path.exists(self.QCOW2_BASE).AndReturn(False)
+ imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
+ self.QCOW2_BASE)
+ imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
+
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_qcow2_exists_and_has_no_backing_file(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils,
+ 'get_disk_backing_file')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(True)
+
+ imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
+ .AndReturn(None)
+ os.path.exists(self.PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_resolve_driver_format(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'qcow2')
+
+
+class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Lvm
+ super(LvmTestCase, self).setUp()
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.PATH = os.path.join('/dev', self.VG, self.LV)
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.lvm = imagebackend.lvm
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(self.disk, 'resize2fs')
+ self.mox.StubOutWithMock(self.lvm, 'create_volume')
+ self.mox.StubOutWithMock(self.disk, 'get_disk_size')
+ self.mox.StubOutWithMock(self.utils, 'execute')
+ return fn
+
+ def _create_image(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_generated(self, sparse):
+ fn = self.prepare_mocks()
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ fn(target=self.PATH, ephemeral_size=None)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE, ephemeral_size=None)
+
+ self.mox.VerifyAll()
+
+ def _create_image_resize(self, sparse):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG, self.LV,
+ self.SIZE, sparse=sparse)
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute(*cmd, run_as_root=True)
+ self.disk.resize2fs(self.PATH, run_as_root=True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ fn = self.prepare_mocks()
+ fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False
+ ).AndRaise(RuntimeError())
+ self.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.TEMPLATE_SIZE)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE)
+ self.mox.VerifyAll()
+
+ def test_create_image_generated_negative(self):
+ fn = self.prepare_mocks()
+ fn(target=self.PATH,
+ ephemeral_size=None).AndRaise(RuntimeError())
+ self.lvm.create_volume(self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
+ self.lvm.remove_volumes([self.PATH])
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.assertRaises(RuntimeError, image.create_image, fn,
+ self.TEMPLATE_PATH, self.SIZE,
+ ephemeral_size=None)
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
+ VG = 'FakeVG'
+ TEMPLATE_SIZE = 512
+ SIZE = 1024
+
+ def setUp(self):
+ super(EncryptedLvmTestCase, self).setUp()
+ self.image_class = imagebackend.Lvm
+ self.flags(enabled=True, group='ephemeral_storage_encryption')
+ self.flags(cipher='aes-xts-plain64',
+ group='ephemeral_storage_encryption')
+ self.flags(key_size=512, group='ephemeral_storage_encryption')
+ self.flags(fixed_key='00000000000000000000000000000000'
+ '00000000000000000000000000000000',
+ group='keymgr')
+ self.flags(images_volume_group=self.VG, group='libvirt')
+ self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
+ self.OLD_STYLE_INSTANCE_PATH = None
+ self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
+ self.PATH = os.path.join('/dev/mapper',
+ imagebackend.dmcrypt.volume_name(self.LV))
+ self.key_manager = keymgr.API()
+ self.INSTANCE['ephemeral_key_uuid'] =\
+ self.key_manager.create_key(self.CONTEXT)
+ self.KEY = self.key_manager.get_key(self.CONTEXT,
+ self.INSTANCE['ephemeral_key_uuid']).get_encoded()
+
+ self.lvm = imagebackend.lvm
+ self.disk = imagebackend.disk
+ self.utils = imagebackend.utils
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.dmcrypt = imagebackend.dmcrypt
+
+ def _create_image(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT,
+ max_size=self.TEMPLATE_SIZE,
+ target=self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(self.VG,
+ self.LV,
+ self.TEMPLATE_SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+
+ def _create_image_generated(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(target=self.PATH,
+ ephemeral_size=None, context=self.CONTEXT)
+
+ def _create_image_resize(self, sparse):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=sparse)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ cmd = ('qemu-img',
+ 'convert',
+ '-O',
+ 'raw',
+ self.TEMPLATE_PATH,
+ self.PATH)
+ self.utils.execute.assert_called_with(*cmd, run_as_root=True)
+ self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
+
+ def test_create_image(self):
+ self._create_image(False)
+
+ def test_create_image_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image(True)
+
+ def test_create_image_generated(self):
+ self._create_image_generated(False)
+
+ def test_create_image_generated_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_generated(True)
+
+ def test_create_image_resize(self):
+ self._create_image_resize(False)
+
+ def test_create_image_resize_sparsed(self):
+ self.flags(sparse_logical_volumes=True, group='libvirt')
+ self._create_image_resize(True)
+
+ def test_create_image_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.lvm.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(
+ self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ self.dmcrypt.create_volume.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ context=self.CONTEXT)
+
+ fn.assert_called_with(
+ context=self.CONTEXT,
+ max_size=self.SIZE,
+ target=self.TEMPLATE_PATH)
+ self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.dmcrypt.volume_name(self.LV),
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ fn.assert_called_with(
+ target=self.PATH,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_create_image_generated_encrypt_negative(self):
+ with contextlib.nested(
+ mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
+ mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
+ mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
+ mock.patch.object(self.disk, 'get_disk_size',
+ mock.Mock(return_value=self.TEMPLATE_SIZE)),
+ mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
+ mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'create_lvm_image',
+ mock.Mock()),
+ mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
+ mock.Mock()),
+ mock.patch.object(self.utils, 'execute', mock.Mock())):
+ fn = mock.Mock()
+ fn.side_effect = RuntimeError()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.assertRaises(
+ RuntimeError,
+ image.create_image,
+ fn,
+ self.TEMPLATE_PATH,
+ self.SIZE,
+ ephemeral_size=None,
+ context=self.CONTEXT)
+
+ self.lvm.create_volume.assert_called_with(
+ self.VG,
+ self.LV,
+ self.SIZE,
+ sparse=False)
+ self.dmcrypt.create_volume.assert_called_with(
+ self.PATH.rpartition('/')[2],
+ self.LV_PATH,
+ CONF.ephemeral_storage_encryption.cipher,
+ CONF.ephemeral_storage_encryption.key_size,
+ self.KEY)
+ self.dmcrypt.delete_volume.assert_called_with(
+ self.PATH.rpartition('/')[2])
+ self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
+
+ def test_prealloc_image(self):
+ self.flags(preallocate_images='space')
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
+ POOL = "FakePool"
+ USER = "FakeUser"
+ CONF = "FakeConf"
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Rbd
+ super(RbdTestCase, self).setUp()
+ self.flags(images_rbd_pool=self.POOL,
+ rbd_user=self.USER,
+ images_rbd_ceph_conf=self.CONF,
+ group='libvirt')
+ self.libvirt_utils = imagebackend.libvirt_utils
+ self.utils = imagebackend.utils
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+
+ def test_cache(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ fn = self.mox.CreateMockAnything()
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ self.mox.StubOutWithMock(os.path, 'exists')
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ image.check_image_exists().AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.mock_create_image(image)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_create_image(self):
+ fn = self.mox.CreateMockAnything()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_resize(self):
+ fn = self.mox.CreateMockAnything()
+ full_size = self.SIZE * 2
+ fn(max_size=full_size, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+ self.mox.StubOutWithMock(image.driver, 'resize')
+ image.driver.resize(rbd_name, full_size)
+
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, full_size)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_already_exists(self):
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(True)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
+ image.check_image_exists().AndReturn(True)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+
+ self.mox.ReplayAll()
+
+ fn = self.mox.CreateMockAnything()
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
+
+ self.mox.VerifyAll()
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ def fake_resize(rbd_name, size):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+ def test_parent_compatible(self):
+ self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
+ inspect.getargspec(self.image_class.libvirt_info))
+
+ def test_image_path(self):
+
+ conf = "FakeConf"
+ pool = "FakePool"
+ user = "FakeUser"
+
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.flags(rbd_user=user, group='libvirt')
+ image = self.image_class(self.INSTANCE, self.NAME)
+ rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
+ user, conf)
+
+ self.assertEqual(image.path, rbd_path)
+
+
+class BackendTestCase(test.NoDBTestCase):
+ INSTANCE = {'name': 'fake-instance',
+ 'uuid': uuidutils.generate_uuid()}
+ NAME = 'fake-name.suffix'
+
+ def setUp(self):
+ super(BackendTestCase, self).setUp()
+ self.flags(enabled=False, group='ephemeral_storage_encryption')
+ self.INSTANCE['ephemeral_key_uuid'] = None
+
+ def get_image(self, use_cow, image_type):
+ return imagebackend.Backend(use_cow).image(self.INSTANCE,
+ self.NAME,
+ image_type)
+
+ def _test_image(self, image_type, image_not_cow, image_cow):
+ image1 = self.get_image(False, image_type)
+ image2 = self.get_image(True, image_type)
+
+ def assertIsInstance(instance, class_object):
+ failure = ('Expected %s,' +
+ ' but got %s.') % (class_object.__name__,
+ instance.__class__.__name__)
+ self.assertIsInstance(instance, class_object, msg=failure)
+
+ assertIsInstance(image1, image_not_cow)
+ assertIsInstance(image2, image_cow)
+
+ def test_image_raw(self):
+ self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
+
+ def test_image_qcow2(self):
+ self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
+
+ def test_image_lvm(self):
+ self.flags(images_volume_group='FakeVG', group='libvirt')
+ self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
+
+ def test_image_rbd(self):
+ conf = "FakeConf"
+ pool = "FakePool"
+ self.flags(images_rbd_pool=pool, group='libvirt')
+ self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
+ self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
+
+ def test_image_default(self):
+ self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
+
+
+class UtilTestCase(test.NoDBTestCase):
+ def test_get_hw_disk_discard(self):
+ self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
+ self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
+ self.assertIsNone(imagebackend.get_hw_disk_discard(None))
+ self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
+ "fake")
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
new file mode 100644
index 0000000000..d7bed2fcd0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -0,0 +1,887 @@
+# Copyright 2012 Michael Still and Canonical Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import cStringIO
+import hashlib
+import os
+import time
+
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova import conductor
+from nova import db
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_instance
+from nova import utils
+from nova.virt.libvirt import imagecache
+from nova.virt.libvirt import utils as libvirt_utils
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('host', 'nova.netconf')
+
+
+@contextlib.contextmanager
+def intercept_log_messages():
+ try:
+ mylog = logging.getLogger('nova')
+ stream = cStringIO.StringIO()
+ handler = logging.logging.StreamHandler(stream)
+ handler.setFormatter(logging.ContextFormatter())
+ mylog.logger.addHandler(handler)
+ yield stream
+ finally:
+ mylog.logger.removeHandler(handler)
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self.stock_instance_names = set(['instance-00000001',
+ 'instance-00000002',
+ 'instance-00000003',
+ 'banana-42-hamster'])
+
+ def test_read_stored_checksum_missing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+ csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
+ self.assertIsNone(csum)
+
+ def test_read_stored_checksum(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+ f = open(info_fname, 'w')
+ f.write(csum_input)
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_input.rstrip(),
+ '{"sha1": "%s"}' % csum_output)
+
+ def test_read_stored_checksum_legacy_essex(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ old_fname = fname + '.sha1'
+ f = open(old_fname, 'w')
+ f.write('fdghkfhkgjjksfdgjksjkghsdf')
+ f.close()
+
+ csum_output = imagecache.read_stored_checksum(fname,
+ timestamped=False)
+ self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
+ self.assertFalse(os.path.exists(old_fname))
+ info_fname = imagecache.get_info_filename(fname)
+ self.assertTrue(os.path.exists(info_fname))
+
+ def test_list_base_images(self):
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
+ '00000004']
+ images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3',
+ '17d1b00b81642842e514494a78e804e9a511637c',
+ '17d1b00b81642842e514494a78e804e9a511637c_5368709120',
+ '17d1b00b81642842e514494a78e804e9a511637c_10737418240']
+ listing.extend(images)
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ self.flags(instances_path='/var/lib/nova/instances')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+
+ sanitized = []
+ for ent in image_cache_manager.unexplained_images:
+ sanitized.append(ent.replace(base_dir + '/', ''))
+
+ self.assertEqual(sorted(sanitized), sorted(images))
+
+ expected = os.path.join(base_dir,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertIn(expected, image_cache_manager.unexplained_images)
+
+ unexpected = os.path.join(base_dir, '00000004')
+ self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
+
+ for ent in image_cache_manager.unexplained_images:
+ self.assertTrue(ent.startswith(base_dir))
+
+ self.assertEqual(len(image_cache_manager.originals), 2)
+
+ expected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c')
+ self.assertIn(expected, image_cache_manager.originals)
+
+ unexpected = os.path.join(base_dir,
+ '17d1b00b81642842e514494a78e804e9a511637c_'
+ '10737418240')
+ self.assertNotIn(unexpected, image_cache_manager.originals)
+
+ def test_list_backing_images_small(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_resized(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'instance-00000001',
+ 'instance-00000002', 'instance-00000003'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('instance-') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240'))
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_'
+ '10737418240')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_instancename(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ found = os.path.join(CONF.instances_path,
+ CONF.image_cache_subdirectory_name,
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [found]
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ inuse_images = image_cache_manager._list_backing_images()
+
+ self.assertEqual(inuse_images, [found])
+ self.assertEqual(len(image_cache_manager.unexplained_images), 0)
+
+ def test_list_backing_images_disk_notexist(self):
+ self.stubs.Set(os, 'listdir',
+ lambda x: ['_base', 'banana-42-hamster'])
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.find('banana-42-hamster') != -1)
+
+ def fake_get_disk(disk_path):
+ raise processutils.ProcessExecutionError()
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = []
+ image_cache_manager.instance_names = self.stock_instance_names
+
+ self.assertRaises(processutils.ProcessExecutionError,
+ image_cache_manager._list_backing_images)
+
+ def test_find_base_file_nothing(self):
+ self.stubs.Set(os.path, 'exists', lambda x: False)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ fingerprint = '549867354867'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ self.assertEqual(0, len(res))
+
+ def test_find_base_file_small(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_sm' % fingerprint))
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_sm')
+ self.assertEqual(res, [(base_file, True, False)])
+
+ def test_find_base_file_resized(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists',
+ lambda x: x.endswith('%s_10737418240' % fingerprint))
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file, False, True)])
+
+ def test_find_base_file_all(self):
+ fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
+ listing = ['00000001',
+ 'ephemeral_0_20_None',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
+ '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
+ '00000004']
+
+ self.stubs.Set(os, 'listdir', lambda x: listing)
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'isfile', lambda x: True)
+
+ base_dir = '/var/lib/nova/instances/_base'
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._list_base_images(base_dir)
+ res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
+
+ base_file1 = os.path.join(base_dir, fingerprint)
+ base_file2 = os.path.join(base_dir, fingerprint + '_sm')
+ base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
+ self.assertEqual(res, [(base_file1, False, False),
+ (base_file2, True, False),
+ (base_file3, False, True)])
+
+ @contextlib.contextmanager
+ def _make_base_file(self, checksum=True):
+ """Make a base file for testing."""
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname = os.path.join(tmpdir, 'aaa')
+
+ base_file = open(fname, 'w')
+ base_file.write('data')
+ base_file.close()
+ base_file = open(fname, 'r')
+
+ if checksum:
+ imagecache.write_stored_checksum(fname)
+
+ base_file.close()
+ yield fname
+
+ def test_remove_base_file(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Old files get cleaned up though
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_original(self):
+ with self._make_base_file() as fname:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.originals = [fname]
+ image_cache_manager._remove_base_file(fname)
+ info_fname = imagecache.get_info_filename(fname)
+
+ # Files are initially too new to delete
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # This file should stay longer than a resized image
+ os.utime(fname, (-1, time.time() - 3601))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertTrue(os.path.exists(info_fname))
+
+ # Originals don't stay forever though
+ os.utime(fname, (-1, time.time() - 3600 * 25))
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertFalse(os.path.exists(fname))
+ self.assertFalse(os.path.exists(info_fname))
+
+ def test_remove_base_file_dne(self):
+ # This test is solely to execute the "does not exist" code path. We
+ # don't expect the method being tested to do anything in this case.
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ def test_remove_base_file_oserror(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ os.mkdir(fname)
+ os.utime(fname, (-1, time.time() - 3601))
+
+ # This will raise an OSError because of file permissions
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_base_file(fname)
+
+ self.assertTrue(os.path.exists(fname))
+ self.assertNotEqual(stream.getvalue().find('Failed to remove'),
+ -1)
+
+ def test_handle_base_image_unused(self):
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files,
+ [fname])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_used_remotely(self):
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ img = '123'
+
+ with self._make_base_file() as fname:
+ os.utime(fname, (-1, time.time() - 3601))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_absent(self):
+ img = '123'
+
+ with intercept_log_messages() as stream:
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, None)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+ self.assertNotEqual(stream.getvalue().find('an absent base file'),
+ -1)
+
+ def test_handle_base_image_used_missing(self):
+ img = '123'
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ fname = os.path.join(tmpdir, 'aaa')
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files, [])
+
+ def test_handle_base_image_checksum_fails(self):
+ self.flags(checksum_base_images=True, group='libvirt')
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ img = '123'
+
+ with self._make_base_file() as fname:
+ with open(fname, 'w') as f:
+ f.write('banana')
+
+ d = {'sha1': '21323454'}
+ with open('%s.info' % fname, 'w') as f:
+ f.write(jsonutils.dumps(d))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.unexplained_images = [fname]
+ image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
+ image_cache_manager._handle_base_image(img, fname)
+
+ self.assertEqual(image_cache_manager.unexplained_images, [])
+ self.assertEqual(image_cache_manager.removable_base_files, [])
+ self.assertEqual(image_cache_manager.corrupt_base_files,
+ [fname])
+
+ def test_verify_base_images(self):
+ hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
+ hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
+ hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
+ hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
+
+ self.flags(instances_path='/instance_path',
+ image_cache_subdirectory_name='_base')
+
+ base_file_list = ['00000001',
+ 'ephemeral_0_20_None',
+ 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
+ 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
+ hashed_42,
+ hashed_1,
+ hashed_21,
+ hashed_22,
+ '%s_5368709120' % hashed_1,
+ '%s_10737418240' % hashed_1,
+ '00000004']
+
+ def fq_path(path):
+ return os.path.join('/instance_path/_base/', path)
+
+ # Fake base directory existence
+ orig_exists = os.path.exists
+
+ def exists(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_exists(path)
+
+ if path in ['/instance_path',
+ '/instance_path/_base',
+ '/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk',
+ '/instance_path/instance-3/disk',
+ '/instance_path/_base/%s.info' % hashed_42]:
+ return True
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+ if path == fq_path(p) + '.info':
+ return False
+
+ if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
+ hashed_21,
+ hashed_22,
+ hashed_42]]:
+ return False
+
+ self.fail('Unexpected path existence check: %s' % path)
+
+ self.stubs.Set(os.path, 'exists', lambda x: exists(x))
+
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+
+ # We need to stub utime as well
+ self.stubs.Set(os, 'utime', lambda x, y: None)
+
+ # Fake up some instances in the instances directory
+ orig_listdir = os.listdir
+
+ def listdir(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_listdir(path)
+
+ if path == '/instance_path':
+ return ['instance-1', 'instance-2', 'instance-3', '_base']
+
+ if path == '/instance_path/_base':
+ return base_file_list
+
+ self.fail('Unexpected directory listed: %s' % path)
+
+ self.stubs.Set(os, 'listdir', lambda x: listdir(x))
+
+ # Fake isfile for these faked images in _base
+ orig_isfile = os.path.isfile
+
+ def isfile(path):
+ # The python coverage tool got angry with my overly broad mocks
+ if not path.startswith('/instance_path'):
+ return orig_isfile(path)
+
+ for p in base_file_list:
+ if path == fq_path(p):
+ return True
+
+ self.fail('Unexpected isfile call: %s' % path)
+
+ self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # Fake the utils call which finds the backing image
+ def get_disk_backing_file(path):
+ if path in ['/instance_path/instance-1/disk',
+ '/instance_path/instance-2/disk']:
+ return fq_path('%s_5368709120' % hashed_1)
+ self.fail('Unexpected backing file lookup: %s' % path)
+
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
+ lambda x: get_disk_backing_file(x))
+
+ # Fake out verifying checksums, as that is tested elsewhere
+ self.stubs.Set(image_cache_manager, '_verify_checksum',
+ lambda x, y: True)
+
+ # Fake getmtime as well
+ orig_getmtime = os.path.getmtime
+
+ def getmtime(path):
+ if not path.startswith('/instance_path'):
+ return orig_getmtime(path)
+
+ return 1000000
+
+ self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
+
+ # Make sure we don't accidentally remove a real file
+ orig_remove = os.remove
+
+ def remove(path):
+ if not path.startswith('/instance_path'):
+ return orig_remove(path)
+
+ # Don't try to remove fake files
+ return
+
+ self.stubs.Set(os, 'remove', lambda x: remove(x))
+
+ # And finally we can make the call we're actually testing...
+ # The argument here should be a context, but it is mocked out
+ image_cache_manager.update(None, all_instances)
+
+ # Verify
+ active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
+ fq_path(hashed_21), fq_path(hashed_22)]
+ for act in active:
+ self.assertIn(act, image_cache_manager.active_base_files)
+ self.assertEqual(len(image_cache_manager.active_base_files),
+ len(active))
+
+ for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
+ fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
+ fq_path(hashed_42),
+ fq_path('%s_10737418240' % hashed_1)]:
+ self.assertIn(rem, image_cache_manager.removable_base_files)
+
+ # Ensure there are no "corrupt" images as well
+ self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
+
+ def test_verify_base_images_no_base(self):
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, [])
+
+ def test_is_valid_info_file(self):
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+
+ self.flags(instances_path='/tmp/no/such/dir/name/please')
+ self.flags(image_info_filename_pattern=('$instances_path/_base/'
+ '%(image)s.info'),
+ group='libvirt')
+ base_filename = os.path.join(CONF.instances_path, '_base', hashed)
+
+ is_valid_info_file = imagecache.is_valid_info_file
+ self.assertFalse(is_valid_info_file('banana'))
+ self.assertFalse(is_valid_info_file(
+ os.path.join(CONF.instances_path, '_base', '00000001')))
+ self.assertFalse(is_valid_info_file(base_filename))
+ self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
+ self.assertTrue(is_valid_info_file(base_filename + '.info'))
+
+ def test_configured_checksum_path(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+
+ # Ensure there is a base directory
+ os.mkdir(os.path.join(tmpdir, '_base'))
+
+ # Fake the database call which lists running instances
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = []
+ for instance in instances:
+ all_instances.append(fake_instance.fake_instance_obj(
+ None, **instance))
+
+ def touch(filename):
+ f = open(filename, 'w')
+ f.write('Touched')
+ f.close()
+
+ old = time.time() - (25 * 3600)
+ hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
+ base_filename = os.path.join(tmpdir, hashed)
+ touch(base_filename)
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+ touch(base_filename + '.info')
+ os.utime(base_filename + '.info', (old, old))
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager.update(None, all_instances)
+
+ self.assertTrue(os.path.exists(base_filename))
+ self.assertTrue(os.path.exists(base_filename + '.info'))
+
+ def test_compute_manager(self):
+ was = {'called': False}
+
+ def fake_get_all_by_filters(context, *args, **kwargs):
+ was['called'] = True
+ instances = []
+ for x in xrange(2):
+ instances.append(fake_instance.fake_db_instance(
+ image_ref='1',
+ uuid=x,
+ name=x,
+ vm_state='',
+ task_state=''))
+ return instances
+
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+
+ self.stubs.Set(db, 'instance_get_all_by_filters',
+ fake_get_all_by_filters)
+ compute = importutils.import_object(CONF.compute_manager)
+ self.flags(use_local=True, group='conductor')
+ compute.conductor_api = conductor.API()
+ compute._run_image_cache_manager_pass(None)
+ self.assertTrue(was['called'])
+
+
+class VerifyChecksumTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VerifyChecksumTestCase, self).setUp()
+ self.img = {'container_format': 'ami', 'id': '42'}
+ self.flags(checksum_base_images=True, group='libvirt')
+
+ def _make_checksum(self, tmpdir):
+ testdata = ('OpenStack Software delivers a massively scalable cloud '
+ 'operating system.')
+
+ fname = os.path.join(tmpdir, 'aaa')
+ info_fname = imagecache.get_info_filename(fname)
+
+ with open(fname, 'w') as f:
+ f.write(testdata)
+
+ return fname, info_fname, testdata
+
+ def _write_file(self, info_fname, info_attr, testdata):
+ f = open(info_fname, 'w')
+ if info_attr == "csum valid":
+ csum = hashlib.sha1()
+ csum.update(testdata)
+ f.write('{"sha1": "%s"}\n' % csum.hexdigest())
+ elif info_attr == "csum invalid, not json":
+ f.write('banana')
+ else:
+ f.write('{"sha1": "banana"}')
+ f.close()
+
+ def _check_body(self, tmpdir, info_attr):
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+ self._write_file(info_fname, info_attr, testdata)
+ image_cache_manager = imagecache.ImageCacheManager()
+ return image_cache_manager, fname
+
+ def test_verify_checksum(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertTrue(res)
+
+ def test_verify_checksum_disabled(self):
+ self.flags(checksum_base_images=False, group='libvirt')
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid_json(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=False)
+ self.assertFalse(res)
+ log = stream.getvalue()
+
+ # NOTE(mikal): this is a skip not a fail because the file is
+ # present, but is not in valid json format and therefore is
+ # skipped.
+ self.assertNotEqual(log.find('image verification skipped'), -1)
+
+ def test_verify_checksum_invalid_repaired(self):
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, not json"))
+ res = image_cache_manager._verify_checksum(
+ self.img, fname, create_if_missing=True)
+ self.assertIsNone(res)
+
+ def test_verify_checksum_invalid(self):
+ with intercept_log_messages() as stream:
+ with utils.tempdir() as tmpdir:
+ image_cache_manager, fname = (
+ self._check_body(tmpdir, "csum invalid, valid json"))
+ res = image_cache_manager._verify_checksum(self.img, fname)
+ self.assertFalse(res)
+ log = stream.getvalue()
+ self.assertNotEqual(log.find('image verification failed'), -1)
+
+ def test_verify_checksum_file_missing(self):
+ with utils.tempdir() as tmpdir:
+ self.flags(instances_path=tmpdir)
+ self.flags(image_info_filename_pattern=('$instances_path/'
+ '%(image)s.info'),
+ group='libvirt')
+ fname, info_fname, testdata = self._make_checksum(tmpdir)
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ res = image_cache_manager._verify_checksum('aaa', fname)
+ self.assertIsNone(res)
+
+ # Checksum requests for a file with no checksum now have the
+ # side effect of creating the checksum
+ self.assertTrue(os.path.exists(info_fname))
diff --git a/nova/tests/virt/libvirt/test_lvm.py b/nova/tests/unit/virt/libvirt/test_lvm.py
index fdb3e4b9f6..fdb3e4b9f6 100644
--- a/nova/tests/virt/libvirt/test_lvm.py
+++ b/nova/tests/unit/virt/libvirt/test_lvm.py
diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/unit/virt/libvirt/test_rbd.py
index bcbdc25f59..bcbdc25f59 100644
--- a/nova/tests/virt/libvirt/test_rbd.py
+++ b/nova/tests/unit/virt/libvirt/test_rbd.py
diff --git a/nova/tests/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 4114c03516..4114c03516 100644
--- a/nova/tests/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
new file mode 100644
index 0000000000..3d64dd5ad0
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -0,0 +1,959 @@
+# Copyright 2012 Nicira, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+from lxml import etree
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.network import linux_net
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit.virt.libvirt import fakelibvirt
+from nova import utils
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import vif
+
+CONF = cfg.CONF
+
+
+class LibvirtVifTestCase(test.NoDBTestCase):
+
+ gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
+ ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
+ subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_bridge_4],
+ gateway=gateway_bridge_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gateway_bridge_6,
+ ips=None,
+ routes=None)
+
+ network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_BRIDGE,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge='br0',
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ bridge_interface=None,
+ vlan=99)
+
+ vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'ovs_hybrid_plug': True,
+ 'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=network_model.VIF_TYPE_OVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None)
+
+ vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ovs,
+ type=None,
+ devname=None,
+ ovs_interfaceid='aaa')
+
+ vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={'port_filter': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_ivs,
+ type=network_model.VIF_TYPE_IVS,
+ details={
+ 'port_filter': True,
+ 'ovs_hybrid_plug': True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+
+ vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0',
+ vlan=99)
+
+ vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBH,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_PROFILEID:
+ 'MyPortProfile'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_HW_VEB,
+ vnic_type=network_model.VNIC_TYPE_DIRECT,
+ ovs_interfaceid=None,
+ details={
+ network_model.VIF_DETAILS_VLAN: '100'},
+ profile={'pci_vendor_info': '1137:0043',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'phynet1'})
+
+ vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_8021,
+ type=network_model.VIF_TYPE_802_QBG,
+ ovs_interfaceid=None,
+ qbg_params=network_model.VIF8021QbgParams(
+ managerid="xxx-yyy-zzz",
+ typeid="aaa-bbb-ccc",
+ typeidversion="1",
+ instanceid="ddd-eee-fff"))
+
+ network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4,
+ subnet_bridge_6],
+ interface='eth0')
+
+ network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label=None,
+ bridge=None,
+ subnets=[subnet_bridge_4],
+ interface='eth0')
+
+ vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ details={'physical_network':
+ 'fake_phy_network'},
+ devname='tap-xxx-yyy-zzz')
+
+ vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_midonet,
+ type=network_model.VIF_TYPE_MIDONET,
+ devname='tap-xxx-yyy-zzz')
+
+ vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_IOVISOR,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid=None)
+
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid'
+ }
+
+ bandwidth = {
+ 'quota:vif_inbound_peak': '200',
+ 'quota:vif_outbound_peak': '20',
+ 'quota:vif_inbound_average': '100',
+ 'quota:vif_outbound_average': '10',
+ 'quota:vif_inbound_burst': '300',
+ 'quota:vif_outbound_burst': '30'
+ }
+
+ def setUp(self):
+ super(LibvirtVifTestCase, self).setUp()
+ self.flags(allow_same_net_traffic=True)
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def _get_conn(self, uri="qemu:///session", ver=None):
+ def __inner():
+ if ver is None:
+ return fakelibvirt.Connection(uri, False)
+ else:
+ return fakelibvirt.Connection(uri, False, ver)
+ return __inner
+
+ def _get_node(self, xml):
+ doc = etree.fromstring(xml)
+ ret = doc.findall('./devices/interface')
+ self.assertEqual(len(ret), 1)
+ return ret[0]
+
+ def _assertMacEquals(self, node, vif):
+ mac = node.find("mac").get("address")
+ self.assertEqual(mac, vif['address'])
+
+ def _assertTypeEquals(self, node, type, attr, source, br_want,
+ prefix=None):
+ self.assertEqual(node.get("type"), type)
+ br_name = node.find(attr).get(source)
+ if prefix is None:
+ self.assertEqual(br_name, br_want)
+ else:
+ self.assertTrue(br_name.startswith(prefix))
+
+ def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
+ br_want=None, size=0, prefix=None):
+ ret = node.findall("filterref")
+ self.assertEqual(len(ret), size)
+ self._assertTypeEquals(node, type, attr, source, br_want,
+ prefix)
+ self._assertMacEquals(node, vif)
+
+ def _assertModel(self, xml, model_want=None, driver_want=None):
+ node = self._get_node(xml)
+ if model_want is None:
+ ret = node.findall("model")
+ self.assertEqual(len(ret), 0)
+ else:
+ model = node.find("model").get("type")
+ self.assertEqual(model, model_want)
+ if driver_want is None:
+ ret = node.findall("driver")
+ self.assertEqual(len(ret), 0)
+ else:
+ driver = node.find("driver").get("name")
+ self.assertEqual(driver, driver_want)
+
+ def _assertTypeAndPciEquals(self, node, type, vif):
+ self.assertEqual(node.get("type"), type)
+ address = node.find("source").find("address")
+ addr_type = address.get("type")
+ self.assertEqual("pci", addr_type)
+ pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
+ 'domain': address.get("domain")[2:],
+ 'bus': address.get("bus")[2:],
+ 'slot': address.get("slot")[2:],
+ 'func': address.get("function")[2:]}
+
+ pci_slot_want = vif['profile']['pci_slot']
+ self.assertEqual(pci_slot, pci_slot_want)
+
+ def _get_conf(self):
+ conf = vconfig.LibvirtConfigGuest()
+ conf.virt_type = "qemu"
+ conf.name = "fake-name"
+ conf.uuid = "fake-uuid"
+ conf.memory = 100 * 1024
+ conf.vcpus = 4
+ return conf
+
+ def _get_instance_xml(self, driver, vif, image_meta=None):
+ default_inst_type = {
+ 'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1,
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2,
+ 'extra_specs': dict(self.bandwidth)
+ }
+ conf = self._get_conf()
+ nic = driver.get_config(self.instance, vif, image_meta,
+ default_inst_type, CONF.libvirt.virt_type)
+ conf.add_device(nic)
+ return conf.to_xml()
+
+ def test_multiple_nics(self):
+ conf = self._get_conf()
+ # Tests multiple nic configuration and that target_dev is
+ # set for each
+ nics = [{'net_type': 'bridge',
+ 'mac_addr': '00:00:00:00:00:0b',
+ 'source_dev': 'b_source_dev',
+ 'target_dev': 'b_target_dev'},
+ {'net_type': 'ethernet',
+ 'mac_addr': '00:00:00:00:00:0e',
+ 'source_dev': 'e_source_dev',
+ 'target_dev': 'e_target_dev'},
+ {'net_type': 'direct',
+ 'mac_addr': '00:00:00:00:00:0d',
+ 'source_dev': 'd_source_dev',
+ 'target_dev': 'd_target_dev'}]
+
+ for nic in nics:
+ nic_conf = vconfig.LibvirtConfigGuestInterface()
+ nic_conf.net_type = nic['net_type']
+ nic_conf.target_dev = nic['target_dev']
+ nic_conf.mac_addr = nic['mac_addr']
+ nic_conf.source_dev = nic['source_dev']
+ conf.add_device(nic_conf)
+
+ xml = conf.to_xml()
+ doc = etree.fromstring(xml)
+ for nic in nics:
+ path = "./devices/interface/[@type='%s']" % nic['net_type']
+ node = doc.find(path)
+ self.assertEqual(nic['net_type'], node.get("type"))
+ self.assertEqual(nic['mac_addr'],
+ node.find("mac").get("address"))
+ self.assertEqual(nic['target_dev'],
+ node.find("target").get("dev"))
+
+ def test_model_novirtio(self):
+ self.flags(use_virtio_for_bridges=False,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_model_kvm(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_model_kvm_qemu_custom(self):
+ for virt in ('kvm', 'qemu'):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type=virt,
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ supported = (network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN)
+ for model in supported:
+ image_meta = {'properties': {'hw_vif_model': model}}
+ xml = self._get_instance_xml(d, self.vif_bridge,
+ image_meta)
+ self._assertModel(xml, model)
+
+ def test_model_kvm_bogus(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ image_meta = {'properties': {'hw_vif_model': 'acme'}}
+ self.assertRaises(exception.UnsupportedHardware,
+ self._get_instance_xml,
+ d,
+ self.vif_bridge,
+ image_meta)
+
+ def _test_model_qemu(self, *vif_objs, **kw):
+ libvirt_version = kw.get('libvirt_version')
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='qemu',
+ group='libvirt')
+
+ for vif_obj in vif_objs:
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ if libvirt_version is not None:
+ d.libvirt_version = libvirt_version
+
+ xml = self._get_instance_xml(d, vif_obj)
+
+ doc = etree.fromstring(xml)
+
+ bandwidth = doc.find('./devices/interface/bandwidth')
+ self.assertNotEqual(bandwidth, None)
+
+ inbound = bandwidth.find('inbound')
+ self.assertEqual(inbound.get("average"),
+ self.bandwidth['quota:vif_inbound_average'])
+ self.assertEqual(inbound.get("peak"),
+ self.bandwidth['quota:vif_inbound_peak'])
+ self.assertEqual(inbound.get("burst"),
+ self.bandwidth['quota:vif_inbound_burst'])
+
+ outbound = bandwidth.find('outbound')
+ self.assertEqual(outbound.get("average"),
+ self.bandwidth['quota:vif_outbound_average'])
+ self.assertEqual(outbound.get("peak"),
+ self.bandwidth['quota:vif_outbound_peak'])
+ self.assertEqual(outbound.get("burst"),
+ self.bandwidth['quota:vif_outbound_burst'])
+
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
+
+ def test_model_qemu_no_firewall(self):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ self.vif_ovs,
+ )
+
+ def test_model_qemu_iptables(self):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ self._test_model_qemu(
+ self.vif_bridge,
+ self.vif_ovs,
+ self.vif_ivs,
+ self.vif_8021qbg,
+ self.vif_iovisor,
+ self.vif_mlnx,
+ )
+
+ def test_model_xen(self):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='xen',
+ group='libvirt')
+
+ d = vif.LibvirtGenericVIFDriver(self._get_conn("xen:///system"))
+ xml = self._get_instance_xml(d, self.vif_bridge)
+ self._assertModel(xml)
+
+ def test_generic_driver_none(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.assertRaises(exception.NovaException,
+ self._get_instance_xml,
+ d,
+ self.vif_none)
+
+ def _check_bridge_driver(self, d, vif, br_want):
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_bridge, br_want, 1)
+
+ def test_generic_driver_bridge(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self._check_bridge_driver(d,
+ self.vif_bridge,
+ self.vif_bridge['network']['bridge'])
+
+ def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, prefix=dev_prefix)
+ script = node.find("script").get("path")
+ self.assertEqual(script, "")
+
+ def test_unplug_ivs_ethernet(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
+ delete.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_ethernet(None, self.vif_ovs)
+
+ def test_plug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ovs_vif_port': [mock.call('br0',
+ 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ovs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ovs_hybrid(self.instance, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=True),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, execute, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ execute.assert_has_calls(calls['execute'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port')
+ ) as (device_exists, delete_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ovs_hybrid(None, self.vif_ovs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_plug_ivs_hybrid(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)],
+ 'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid')]
+ }
+ with contextlib.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ivs_vif_port')
+ ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.plug_ivs_hybrid(self.instance, self.vif_ivs)
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid(self):
+ calls = {
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
+ }
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ivs_vif_port')
+ ) as (execute, delete_ivs_vif_port):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+ execute.assert_has_calls(calls['execute'])
+ delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
+
+ def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_ivs_hybrid(None, self.vif_ivs)
+
+ def test_unplug_iovisor(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
+ label='mylabel')
+ myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=mynetwork)
+ d.unplug_iovisor(None, myvif)
+
+ @mock.patch('nova.network.linux_net.device_exists')
+ def test_plug_iovisor(self, device_exists):
+ device_exists.return_value = True
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ instance = {
+ 'name': 'instance-name',
+ 'uuid': 'instance-uuid',
+ 'project_id': 'myproject'
+ }
+ d.plug_iovisor(instance, self.vif_ivs)
+
+ def test_unplug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_mlnx_direct(None, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'del-port',
+ 'fake_phy_network',
+ 'ca:fe:de:ad:be:ef',
+ run_as_root=True)
+
+ def test_plug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'add-port',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid',
+ 'fake_phy_network',
+ 'mlnx_direct',
+ 'eth-xxx-yyy-zzz',
+ run_as_root=True)
+
+ def test_plug_mlnx_no_physical_network(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ self.assertRaises(exception.NovaException,
+ d.plug_mlnx_direct,
+ self.instance,
+ self.vif_mlnx)
+ self.assertEqual(0, execute.call_count)
+
+ def test_ivs_ethernet_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ self._check_ivs_ethernet_driver(d,
+ self.vif_ivs,
+ "tap")
+
+ def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ vif, vif['devname'])
+
+ def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, "br0")
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "openvswitch")
+ iface_id_found = False
+ for p_elem in vp.findall("parameters"):
+ iface_id = p_elem.get("interfaceid", None)
+ if iface_id:
+ self.assertEqual(iface_id, want_iface_id)
+ iface_id_found = True
+
+ self.assertTrue(iface_id_found)
+
+ def test_generic_ovs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ovs['ovs_interfaceid']
+ self._check_ovs_virtualport_driver(d,
+ self.vif_ovs,
+ want_iface_id)
+
+ def test_generic_ivs_virtualport_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
+ want_iface_id = self.vif_ivs['ovs_interfaceid']
+ self._check_ivs_virtualport_driver(d,
+ self.vif_ivs,
+ want_iface_id)
+
+ def test_ivs_plug_with_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs, br_want, 1)
+
+ def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ivs_filter_hybrid, br_want, 0)
+
+ def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_ivs_filter_direct['devname']
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs_filter_direct, br_want, 0)
+
+ def test_hybrid_plug_without_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs_hybrid['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ self.vif_ovs_hybrid, br_want, 0)
+
+ def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
+ self.vif_ovs_filter_cap, br_want)
+
+ def _check_neutron_hybrid_driver(self, d, vif, br_want):
+ self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
+ xml = self._get_instance_xml(d, vif)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
+ vif, br_want, 1)
+
+ def test_generic_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ovs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ovs,
+ br_want)
+
+ def test_ivs_hybrid_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ br_want = "qbr" + self.vif_ivs['id']
+ br_want = br_want[:network_model.NIC_NAME_LEN]
+ self._check_neutron_hybrid_driver(d,
+ self.vif_ivs,
+ br_want)
+
+ def test_mlnx_direct_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d,
+ self.vif_mlnx)
+ node = self._get_node(xml)
+ self.assertEqual(node.get("type"), "direct")
+ self._assertTypeEquals(node, "direct", "source",
+ "dev", "eth-xxx-yyy-zzz")
+ self._assertTypeEquals(node, "direct", "source",
+ "mode", "passthrough")
+ self._assertMacEquals(node, self.vif_mlnx)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_midonet_ethernet_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_midonet['devname']
+ xml = self._get_instance_xml(d, self.vif_midonet)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_midonet, br_want)
+
+ def test_generic_8021qbh_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbh)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
+ self._assertMacEquals(node, self.vif_8021qbh)
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbh")
+ profile_id_found = False
+ for p_elem in vp.findall("parameters"):
+ details = self.vif_8021qbh["details"]
+ profile_id = p_elem.get("profileid", None)
+ if profile_id:
+ self.assertEqual(profile_id,
+ details[network_model.VIF_DETAILS_PROFILEID])
+ profile_id_found = True
+
+ self.assertTrue(profile_id_found)
+
+ def test_hw_veb_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_hw_veb)
+ node = self._get_node(xml)
+ self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
+ self._assertMacEquals(node, self.vif_hw_veb)
+ vlan = node.find("vlan").find("tag").get("id")
+ vlan_want = self.vif_hw_veb["details"]["vlan"]
+ self.assertEqual(vlan, vlan_want)
+
+ def test_generic_iovisor_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
+ br_want = self.vif_ivs['devname']
+ xml = self._get_instance_xml(d, self.vif_ivs)
+ node = self._get_node(xml)
+ self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self.vif_ivs, br_want)
+
+ def test_generic_8021qbg_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_8021qbg)
+
+ node = self._get_node(xml)
+ self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
+ self._assertMacEquals(node, self.vif_8021qbg)
+
+ vp = node.find("virtualport")
+ self.assertEqual(vp.get("type"), "802.1Qbg")
+ manager_id_found = False
+ type_id_found = False
+ typeversion_id_found = False
+ instance_id_found = False
+ for p_elem in vp.findall("parameters"):
+ wantparams = self.vif_8021qbg['qbg_params']
+ manager_id = p_elem.get("managerid", None)
+ type_id = p_elem.get("typeid", None)
+ typeversion_id = p_elem.get("typeidversion", None)
+ instance_id = p_elem.get("instanceid", None)
+ if manager_id:
+ self.assertEqual(manager_id,
+ wantparams['managerid'])
+ manager_id_found = True
+ if type_id:
+ self.assertEqual(type_id,
+ wantparams['typeid'])
+ type_id_found = True
+ if typeversion_id:
+ self.assertEqual(typeversion_id,
+ wantparams['typeidversion'])
+ typeversion_id_found = True
+ if instance_id:
+ self.assertEqual(instance_id,
+ wantparams['instanceid'])
+ instance_id_found = True
+
+ self.assertTrue(manager_id_found)
+ self.assertTrue(type_id_found)
+ self.assertTrue(typeversion_id_found)
+ self.assertTrue(instance_id_found)
diff --git a/nova/tests/unit/virt/libvirt/test_volume.py b/nova/tests/unit/virt/libvirt/test_volume.py
new file mode 100644
index 0000000000..0594161638
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/test_volume.py
@@ -0,0 +1,1160 @@
+# Copyright 2010 OpenStack Foundation
+# Copyright 2012 University Of Minho
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import os
+import time
+
+import fixtures
+import mock
+from oslo.concurrency import processutils
+from oslo.config import cfg
+
+from nova import exception
+from nova.storage import linuxscsi
+from nova import test
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova import utils
+from nova.virt import fake
+from nova.virt.libvirt import utils as libvirt_utils
+from nova.virt.libvirt import volume
+
+CONF = cfg.CONF
+
+
+class LibvirtVolumeTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtVolumeTestCase, self).setUp()
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ class FakeLibvirtDriver(object):
+ def __init__(self, hyperv="QEMU", version=1005001):
+ self.hyperv = hyperv
+ self.version = version
+
+ def _get_hypervisor_version(self):
+ return self.version
+
+ def _get_hypervisor_type(self):
+ return self.hyperv
+
+ def _get_all_block_devices(self):
+ return []
+
+ self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
+ self.connr = {
+ 'ip': '127.0.0.1',
+ 'initiator': 'fake_initiator',
+ 'host': 'fake_host'
+ }
+ self.disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.name = 'volume-00000001'
+ self.location = '10.0.2.15:3260'
+ self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
+ self.vol = {'id': 1, 'name': self.name}
+ self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
+ self.user = 'foo'
+
+ def _assertNetworkAndProtocolEquals(self, tree):
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
+ rbd_name = '%s/%s' % ('rbd', self.name)
+ self.assertEqual(tree.find('./source').get('name'), rbd_name)
+
+ def _assertFileTypeEquals(self, tree, file_path):
+ self.assertEqual(tree.get('type'), 'file')
+ self.assertEqual(tree.find('./source').get('file'), file_path)
+
+ def _assertDiskInfoEquals(self, tree, disk_info):
+ self.assertEqual(tree.get('device'), disk_info['type'])
+ self.assertEqual(tree.find('./target').get('bus'),
+ disk_info['bus'])
+ self.assertEqual(tree.find('./target').get('dev'),
+ disk_info['dev'])
+
+ def _test_libvirt_volume_driver_disk_info(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertDiskInfoEquals(tree, self.disk_info)
+
+ def test_libvirt_volume_disk_info_type(self):
+ self.disk_info['type'] = 'cdrom'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_dev(self):
+ self.disk_info['dev'] = 'hdc'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_disk_info_bus(self):
+ self.disk_info['bus'] = 'scsi'
+ self._test_libvirt_volume_driver_disk_info()
+
+ def test_libvirt_volume_driver_serial(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ },
+ 'serial': 'fake_serial',
+ }
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual('fake_serial', tree.find('./serial').text)
+ self.assertIsNone(tree.find('./blockio'))
+
+ def test_libvirt_volume_driver_blockio(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': '/foo',
+ 'logical_block_size': '4096',
+ 'physical_block_size': '4096',
+ },
+ 'serial': 'fake_serial',
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ blockio = tree.find('./blockio')
+ self.assertEqual('4096', blockio.get('logical_block_size'))
+ self.assertEqual('4096', blockio.get('physical_block_size'))
+
+ def test_libvirt_volume_driver_iotune(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'qos_specs': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ iotune = tree.find('./iotune')
+ # ensure invalid qos_specs is ignored
+ self.assertIsNone(iotune)
+
+ specs = {
+ 'total_bytes_sec': '102400',
+ 'read_bytes_sec': '51200',
+ 'write_bytes_sec': '0',
+ 'total_iops_sec': '0',
+ 'read_iops_sec': '200',
+ 'write_iops_sec': '200',
+ }
+ del connection_info['data']['qos_specs']
+ connection_info['data'].update(dict(qos_specs=specs))
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
+ self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
+ self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
+
+ def test_libvirt_volume_driver_readonly(self):
+ libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
+ connection_info = {
+ 'driver_volume_type': 'fake',
+ 'data': {
+ "device_path": "/foo",
+ 'access_mode': 'bar',
+ },
+ }
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ self.assertRaises(exception.InvalidVolumeAccessMode,
+ libvirt_driver.get_config,
+ connection_info, self.disk_info)
+
+ connection_info['data']['access_mode'] = 'rw'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNone(readonly)
+
+ connection_info['data']['access_mode'] = 'ro'
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ readonly = tree.find('./readonly')
+ self.assertIsNotNone(readonly)
+
+ def iscsi_connection(self, volume, location, iqn):
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ return {
+ 'driver_volume_type': 'iscsi',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ 'device_path': dev_path,
+ 'qos_specs': {
+ 'total_bytes_sec': '102400',
+ 'read_iops_sec': '200',
+ }
+ }
+ }
+
+ def test_rescan_multipath(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._rescan_multipath()
+ expected_multipath_cmd = ('multipath', '-r')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_iscsiadm_discover_parsing(self):
+ # Ensure that parsing iscsiadm discover ignores cruft.
+
+ targets = [
+ ["192.168.204.82:3260,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd3")],
+ ["192.168.204.82:3261,1",
+ ("iqn.2010-10.org.openstack:volume-"
+ "f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]]
+
+ # This slight wonkiness brought to you by pep8, as the actual
+ # example output runs about 97 chars wide.
+ sample_input = """Loading iscsi modules: done
+Starting iSCSI initiator service: done
+Setting up iSCSI targets: unused
+%s %s
+%s %s
+""" % (targets[0][0], targets[0][1], targets[1][0], targets[1][1])
+ driver = volume.LibvirtISCSIVolumeDriver("none")
+ out = driver._get_target_portals_from_iscsiadm_output(sample_input)
+ self.assertEqual(out, targets)
+
+ def test_libvirt_iscsi_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'manual'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--logout'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'delete')]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_still_in_use(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location, self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location),
+ ('iscsiadm', '-m', 'session'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--login'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--op', 'update',
+ '-n', 'node.startup', '-v', 'automatic'),
+ ('iscsiadm', '-m', 'node', '-T', self.iqn,
+ '-p', self.location, '--rescan'),
+ ('cp', '/dev/stdin',
+ '/sys/block/%s/device/delete' % dev_name)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
+ self.iqn)]
+ with contextlib.nested(
+ mock.patch.object(os.path, 'exists', return_value=True),
+ mock.patch.object(self.fake_conn, '_get_all_block_devices',
+ return_value=devs),
+ mock.patch.object(libvirt_driver, '_rescan_multipath'),
+ mock.patch.object(libvirt_driver, '_run_multipath'),
+ mock.patch.object(libvirt_driver, '_get_multipath_device_name',
+ return_value='/dev/mapper/fake-multipath-devname'),
+ mock.patch.object(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ return_value=[('fake-ip', 'fake-portal')]),
+ mock.patch.object(libvirt_driver, '_get_multipath_iqn',
+ return_value='fake-portal'),
+ ) as (mock_exists, mock_devices, mock_rescan_multipath,
+ mock_run_multipath, mock_device_name, mock_get_portals,
+ mock_get_iqn):
+ mock_run_multipath.side_effect = processutils.ProcessExecutionError
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ libvirt_driver.use_multipath = True
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ mock_run_multipath.assert_called_once_with(
+ ['-f', 'fake-multipath-devname'],
+ check_exit_code=[0, 1])
+
+ def test_libvirt_iscsi_driver_get_config(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
+ dev_path = '/dev/disk/by-path/%s' % (dev_name)
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ libvirt_driver.use_multipath = True
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(dev_path, tree.find('./source').get('dev'))
+
+ def test_libvirt_iscsi_driver_multipath_id(self):
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver.use_multipath = True
+ self.stubs.Set(libvirt_driver, '_run_iscsiadm_bare',
+ lambda x, check_exit_code: ('',))
+ self.stubs.Set(libvirt_driver, '_rescan_iscsi', lambda: None)
+ self.stubs.Set(libvirt_driver, '_get_host_device', lambda x: None)
+ self.stubs.Set(libvirt_driver, '_rescan_multipath', lambda: None)
+ fake_multipath_id = 'fake_multipath_id'
+ fake_multipath_device = '/dev/mapper/%s' % fake_multipath_id
+ self.stubs.Set(libvirt_driver, '_get_multipath_device_name',
+ lambda x: fake_multipath_device)
+
+ def fake_disconnect_volume_multipath_iscsi(iscsi_properties,
+ multipath_device):
+ if fake_multipath_device != multipath_device:
+ raise Exception('Invalid multipath_device.')
+
+ self.stubs.Set(libvirt_driver, '_disconnect_volume_multipath_iscsi',
+ fake_disconnect_volume_multipath_iscsi)
+ with mock.patch.object(os.path, 'exists', return_value=True):
+ vol = {'id': 1, 'name': self.name}
+ connection_info = self.iscsi_connection(vol, self.location,
+ self.iqn)
+ libvirt_driver.connect_volume(connection_info,
+ self.disk_info)
+ self.assertEqual(fake_multipath_id,
+ connection_info['data']['multipath_id'])
+ libvirt_driver.disconnect_volume(connection_info, "fake")
+
+ def test_sanitize_log_run_iscsiadm(self):
+ # Tests that the parameters to the _run_iscsiadm function are sanitized
+ # for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ self.assertIn('node.session.auth.password', args[0])
+ self.assertNotIn('scrubme', args[0])
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ iscsi_properties = connection_info['data']
+ with mock.patch.object(volume.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ libvirt_driver._iscsiadm_update(iscsi_properties,
+ 'node.session.auth.password',
+ 'scrubme')
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
+ def iser_connection(self, volume, location, iqn):
+ return {
+ 'driver_volume_type': 'iser',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_iqn': iqn,
+ 'target_lun': 1,
+ }
+ }
+
+ def sheepdog_connection(self, volume):
+ return {
+ 'driver_volume_type': 'sheepdog',
+ 'data': {
+ 'name': volume['name']
+ }
+ }
+
+ def test_libvirt_sheepdog_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.sheepdog_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
+ self.assertEqual(tree.find('./source').get('name'), self.name)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def rbd_connection(self, volume):
+ return {
+ 'driver_volume_type': 'rbd',
+ 'data': {
+ 'name': '%s/%s' % ('rbd', volume['name']),
+ 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
+ 'auth_username': CONF.libvirt.rbd_user,
+ 'secret_type': 'ceph',
+ 'secret_uuid': CONF.libvirt.rbd_secret_uuid,
+ 'qos_specs': {
+ 'total_bytes_sec': '1048576',
+ 'read_iops_sec': '500',
+ }
+ }
+ }
+
+ def test_libvirt_rbd_driver(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
+ self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_hosts(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ hosts = ['example.com', '1.2.3.4', '::1']
+ ports = [None, '6790', '6791']
+ connection_info['data']['hosts'] = hosts
+ connection_info['data']['ports'] = ports
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./source/auth'))
+ found_hosts = tree.findall('./source/host')
+ self.assertEqual([host.get('name') for host in found_hosts], hosts)
+ self.assertEqual([host.get('port') for host in found_hosts], ports)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), self.user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = True
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertIsNone(tree.find('./auth'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
+ libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
+ connection_info = self.rbd_connection(self.vol)
+ secret_type = 'ceph'
+ connection_info['data']['auth_enabled'] = False
+ connection_info['data']['auth_username'] = self.user
+ connection_info['data']['secret_type'] = secret_type
+ connection_info['data']['secret_uuid'] = self.uuid
+
+ # NOTE: Supplying the rbd_secret_uuid will enable authentication
+ # locally in nova-compute even if not enabled in nova-volume/cinder
+ flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
+ flags_user = 'bar'
+ self.flags(rbd_user=flags_user,
+ rbd_secret_uuid=flags_uuid,
+ group='libvirt')
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertNetworkAndProtocolEquals(tree)
+ self.assertEqual(tree.find('./auth').get('username'), flags_user)
+ self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
+ self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_kvm_volume(self):
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ self.assertEqual(tree.get('type'), 'block')
+ self.assertEqual(tree.find('./source').get('dev'), dev_str)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[self.location, self.iqn]])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: self.iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_volume_with_multipath_still_in_use(self):
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.openstack:%s' % name
+ mpdev_filepath = '/dev/mapper/foo'
+
+ def _get_multipath_device_name(path):
+ if '%s-lun-1' % iqn in path:
+ return mpdev_filepath
+ return '/dev/mapper/donotdisconnect'
+
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ libvirt_driver._get_multipath_device_name =\
+ lambda x: _get_multipath_device_name(x)
+
+ block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices',
+ lambda: block_devs)
+
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iscsi_connection(vol, location, iqn)
+ connection_info['data']['device_path'] = mpdev_filepath
+
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+
+ iscsi_devs = ['1.2.3.4-iscsi-%s-lun-1' % iqn,
+ '%s-iscsi-%s-lun-1' % (location, iqn),
+ '%s-iscsi-%s-lun-2' % (location, iqn)]
+ libvirt_driver._get_iscsi_devices = lambda: iscsi_devs
+
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+
+ # Set up disconnect volume mock expectations
+ self.mox.StubOutWithMock(libvirt_driver, '_delete_device')
+ self.mox.StubOutWithMock(libvirt_driver, '_rescan_multipath')
+ libvirt_driver._rescan_multipath()
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[0])
+ libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[1])
+ libvirt_driver._rescan_multipath()
+
+ # Ensure that the mpath devices are deleted
+ self.mox.ReplayAll()
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
+ self.flags(iscsi_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ iqn0 = 'iqn.2010-10.org.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (self.location, iqn0)
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
+ self.iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_kvm_iser_volume_with_multipath(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ connection_info['data']['device_path'] = mpdev_filepath
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver._get_multipath_iqn = lambda x: iqn
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+ expected_multipath_cmd = ('multipath', '-f', 'foo')
+ self.assertIn(expected_multipath_cmd, self.executes)
+
+ def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
+ self.flags(iser_use_multipath=True, group='libvirt')
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(time, 'sleep', lambda x: None)
+ libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
+ name0 = 'volume-00000000'
+ location0 = '10.0.2.15:3260'
+ iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
+ dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
+ name = 'volume-00000001'
+ location = '10.0.2.15:3260'
+ iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
+ vol = {'id': 1, 'name': name}
+ dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
+ devs = [dev0, dev]
+ self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
+ self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
+ connection_info = self.iser_connection(vol, location, iqn)
+ mpdev_filepath = '/dev/mapper/foo'
+ disk_info = {
+ "bus": "virtio",
+ "dev": "vde",
+ "type": "disk",
+ }
+ libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
+ libvirt_driver.disconnect_volume(connection_info, 'vde')
+
+ def test_libvirt_nfs_driver(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ @mock.patch.object(volume.utils, 'execute')
+ @mock.patch.object(volume.LOG, 'debug')
+ @mock.patch.object(volume.LOG, 'exception')
+ def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception,
+ mock_LOG_debug, mock_utils_exe):
+ export_string = '192.168.1.1:/nfs/share1'
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: device is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: target is busy.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_debug.called)
+ mock_utils_exe.side_effect = processutils.ProcessExecutionError(
+ None, None, None, 'umount', 'umount: Other error.')
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertTrue(mock_LOG_exception.called)
+
+ def test_libvirt_nfs_driver_get_config(self):
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ def test_libvirt_nfs_driver_already_mounted(self):
+ # NOTE(vish) exists is to make driver assume connecting worked
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+
+ export_string = '192.168.1.1:/nfs/share1'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base, '--source',
+ export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_nfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/nfs/share1'
+ options = '-o intr,nfsvers=3'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(expected_commands, self.executes)
+
+ def aoe_connection(self, shelf, lun):
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ return {
+ 'driver_volume_type': 'aoe',
+ 'data': {
+ 'target_shelf': shelf,
+ 'target_lun': lun,
+ 'device_path': aoedevpath
+ }
+ }
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_libvirt_aoe_driver(self, exists):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ aoedev = 'e%s.%s' % (shelf, lun)
+ aoedevpath = '/dev/etherd/%s' % (aoedev)
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ exists.assert_called_with(aoedevpath)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+ self.assertEqual(aoedevpath, connection_info['data']['device_path'])
+ expected_commands = [('aoe-revalidate', aoedev)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_aoe_driver_get_config(self):
+ libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
+ shelf = '100'
+ lun = '1'
+ connection_info = self.aoe_connection(shelf, lun)
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(aoedevpath, tree.find('./source').get('dev'))
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def test_libvirt_glusterfs_driver(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ device_path = os.path.join(export_mnt_base,
+ connection_info['data']['name'])
+ self.assertEqual(device_path, connection_info['data']['device_path'])
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs', export_string, export_mnt_base),
+ ('umount', export_mnt_base)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_libvirt_glusterfs_driver_get_config(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+ file_path = os.path.join(export_mnt_base, self.name)
+
+ # Test default format - raw
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+
+ # Test specified format - qcow2
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'device_path': file_path,
+ 'format': 'qcow2'}}
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, file_path)
+ self.assertEqual('qcow2', tree.find('./driver').get('type'))
+
+ def test_libvirt_glusterfs_driver_already_mounted(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ export_string = '192.168.1.1:/volume-00001'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('findmnt', '--target', export_mnt_base,
+ '--source', export_string),
+ ('umount', export_mnt_base)]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_driver_with_opts(self):
+ mnt_base = '/mnt'
+ self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
+
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ options = '-o backupvolfile-server=192.168.1.2'
+ export_mnt_base = os.path.join(mnt_base,
+ utils.get_hash_str(export_string))
+
+ connection_info = {'data': {'export': export_string,
+ 'name': self.name,
+ 'options': options}}
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ expected_commands = [
+ ('mkdir', '-p', export_mnt_base),
+ ('mount', '-t', 'glusterfs',
+ '-o', 'backupvolfile-server=192.168.1.2',
+ export_string, export_mnt_base),
+ ('umount', export_mnt_base),
+ ]
+ self.assertEqual(self.executes, expected_commands)
+
+ def test_libvirt_glusterfs_libgfapi(self):
+ self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
+ libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
+ self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
+ export_string = '192.168.1.1:/volume-00001'
+ name = 'volume-00001'
+
+ connection_info = {'data': {'export': export_string, 'name': name}}
+
+ disk_info = {
+ "dev": "vde",
+ "type": "disk",
+ "bus": "virtio",
+ }
+
+ libvirt_driver.connect_volume(connection_info, disk_info)
+ conf = libvirt_driver.get_config(connection_info, disk_info)
+ tree = conf.format_dom()
+ self.assertEqual(tree.get('type'), 'network')
+ self.assertEqual(tree.find('./driver').get('type'), 'raw')
+
+ source = tree.find('./source')
+ self.assertEqual(source.get('protocol'), 'gluster')
+ self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
+ self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
+ self.assertEqual(source.find('./host').get('port'), '24007')
+
+ libvirt_driver.disconnect_volume(connection_info, "vde")
+
+ def fibrechan_connection(self, volume, location, wwn):
+ return {
+ 'driver_volume_type': 'fibrechan',
+ 'data': {
+ 'volume_id': volume['id'],
+ 'target_portal': location,
+ 'target_wwn': wwn,
+ 'target_lun': 1,
+ }
+ }
+
+ def test_libvirt_fibrechan_driver(self):
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas',
+ fake_libvirt_utils.get_fc_hbas)
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
+ fake_libvirt_utils.get_fc_hbas_info)
+ # NOTE(vish) exists is to make driver assume connecting worked
+ self.stubs.Set(os.path, 'exists', lambda x: True)
+ self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ multipath_devname = '/dev/md-1'
+ devices = {"device": multipath_devname,
+ "id": "1234567890",
+ "devices": [{'device': '/dev/sdb',
+ 'address': '1:0:0:1',
+ 'host': 1, 'channel': 0,
+ 'id': 0, 'lun': 1}]}
+ self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
+ self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
+ # Should work for string, unicode, and list
+ wwns = ['1234567890123456', unicode('1234567890123456'),
+ ['1234567890123456', '1234567890123457']]
+ for wwn in wwns:
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, wwn)
+ mount_device = "vde"
+ libvirt_driver.connect_volume(connection_info, self.disk_info)
+
+ # Test the scenario where multipath_id is returned
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ self.assertEqual(multipath_devname,
+ connection_info['data']['device_path'])
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+ # Test the scenario where multipath_id is not returned
+ connection_info["data"]["devices"] = devices["devices"]
+ del connection_info["data"]["multipath_id"]
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+
+ # Should not work for anything other than string, unicode, and list
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
+ self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.connect_volume,
+ connection_info, self.disk_info)
+
+ def test_libvirt_fibrechan_driver_get_config(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ connection_info = self.fibrechan_connection(self.vol,
+ self.location, 123)
+ connection_info['data']['device_path'] = ("/sys/devices/pci0000:00"
+ "/0000:00:03.0/0000:05:00.3/host2/fc_host/host2")
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(connection_info['data']['device_path'],
+ tree.find('./source').get('dev'))
+
+ def test_libvirt_fibrechan_getpci_num(self):
+ libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:05:00.3", pci_num)
+
+ hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
+ "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
+ pci_num = libvirt_driver._get_pci_num(hba)
+ self.assertEqual("0000:06:00.6", pci_num)
+
+ def test_libvirt_scality_driver(self):
+ tempdir = self.useFixture(fixtures.TempDir()).path
+ TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
+ TEST_CONFIG = os.path.join(tempdir, 'fake_config')
+ TEST_VOLDIR = 'volumes'
+ TEST_VOLNAME = 'volume_name'
+ TEST_CONN_INFO = {
+ 'data': {
+ 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
+ }
+ }
+ TEST_VOLPATH = os.path.join(TEST_MOUNT,
+ TEST_VOLDIR,
+ TEST_VOLNAME)
+ open(TEST_CONFIG, "w+").close()
+ os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
+
+ def _access_wrapper(path, flags):
+ if path == '/sbin/mount.sofs':
+ return True
+ else:
+ return os.access(path, flags)
+
+ self.stubs.Set(os, 'access', _access_wrapper)
+ self.flags(scality_sofs_config=TEST_CONFIG,
+ scality_sofs_mount_point=TEST_MOUNT,
+ group='libvirt')
+ driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
+ driver.connect_volume(TEST_CONN_INFO, self.disk_info)
+
+ device_path = os.path.join(TEST_MOUNT,
+ TEST_CONN_INFO['data']['sofs_path'])
+ self.assertEqual(device_path,
+ TEST_CONN_INFO['data']['device_path'])
+
+ conf = driver.get_config(TEST_CONN_INFO, self.disk_info)
+ tree = conf.format_dom()
+ self._assertFileTypeEquals(tree, TEST_VOLPATH)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
new file mode 100644
index 0000000000..f71438eae2
--- /dev/null
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -0,0 +1,684 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.serialization import jsonutils
+
+from nova import block_device
+from nova import context
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit import matchers
+from nova.virt import block_device as driver_block_device
+from nova.virt import driver
+from nova.volume import cinder
+from nova.volume import encryptors
+
+
+class TestDriverBlockDevice(test.NoDBTestCase):
+ driver_classes = {
+ 'swap': driver_block_device.DriverSwapBlockDevice,
+ 'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
+ 'volume': driver_block_device.DriverVolumeBlockDevice,
+ 'snapshot': driver_block_device.DriverSnapshotBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
+ 'blank': driver_block_device.DriverBlankBlockDevice
+ }
+
+ swap_bdm = block_device.BlockDeviceDict(
+ {'id': 1, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdb1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'delete_on_termination': True,
+ 'guest_format': 'swap',
+ 'disk_bus': 'scsi',
+ 'volume_size': 2,
+ 'boot_index': -1})
+
+ swap_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2,
+ 'disk_bus': 'scsi'}
+
+ swap_legacy_driver_bdm = {
+ 'device_name': '/dev/sdb1',
+ 'swap_size': 2}
+
+ ephemeral_bdm = block_device.BlockDeviceDict(
+ {'id': 2, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sdc1',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 4,
+ 'guest_format': 'ext4',
+ 'delete_on_termination': True,
+ 'boot_index': -1})
+
+ ephemeral_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'scsi'}
+
+ ephemeral_legacy_driver_bdm = {
+ 'device_name': '/dev/sdc1',
+ 'size': 4,
+ 'virtual_name': 'ephemeral0',
+ 'num': 0}
+
+ volume_bdm = block_device.BlockDeviceDict(
+ {'id': 3, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda1',
+ 'source_type': 'volume',
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'volume_size': 8,
+ 'destination_type': 'volume',
+ 'volume_id': 'fake-volume-id-1',
+ 'guest_format': 'ext4',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'delete_on_termination': False,
+ 'boot_index': 0})
+
+ volume_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0}
+
+ volume_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda1',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': False}
+
+ snapshot_bdm = block_device.BlockDeviceDict(
+ {'id': 4, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'snapshot',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ snapshot_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ snapshot_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ image_bdm = block_device.BlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 1,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'image_id': 'fake-image-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ image_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ image_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ blank_bdm = block_device.BlockDeviceDict(
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ blank_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ blank_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
+ def setUp(self):
+ super(TestDriverBlockDevice, self).setUp()
+ self.volume_api = self.mox.CreateMock(cinder.API)
+ self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
+ self.context = context.RequestContext('fake_user',
+ 'fake_project')
+
+ def test_no_device_raises(self):
+ for name, cls in self.driver_classes.items():
+ self.assertRaises(driver_block_device._NotTransformable,
+ cls, {'no_device': True})
+
+ def _test_driver_device(self, name):
+ db_bdm = getattr(self, "%s_bdm" % name)
+ test_bdm = self.driver_classes[name](db_bdm)
+ self.assertThat(test_bdm, matchers.DictMatches(
+ getattr(self, "%s_driver_bdm" % name)))
+
+ for k, v in db_bdm.iteritems():
+ field_val = getattr(test_bdm._bdm_obj, k)
+ if isinstance(field_val, bool):
+ v = bool(v)
+ self.assertEqual(field_val, v)
+
+ self.assertThat(test_bdm.legacy(),
+ matchers.DictMatches(
+ getattr(self, "%s_legacy_driver_bdm" % name)))
+
+ # Test passthru attributes
+ for passthru in test_bdm._proxy_as_attr:
+ self.assertEqual(getattr(test_bdm, passthru),
+ getattr(test_bdm._bdm_obj, passthru))
+
+ # Make sure that all others raise _invalidType
+ for other_name, cls in self.driver_classes.iteritems():
+ if other_name == name:
+ continue
+ self.assertRaises(driver_block_device._InvalidType,
+ cls,
+ getattr(self, '%s_bdm' % name))
+
+ # Test the save method
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save(self.context)
+ for fld, alias in test_bdm._update_on_save.iteritems():
+ self.assertEqual(test_bdm[alias or fld],
+ getattr(test_bdm._bdm_obj, fld))
+
+ save_mock.assert_called_once_with(self.context)
+
+ # Test the save method with no context passed
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save()
+ save_mock.assert_called_once_with()
+
+ def _test_driver_default_size(self, name):
+ size = 'swap_size' if name == 'swap' else 'size'
+ no_size_bdm = getattr(self, "%s_bdm" % name).copy()
+ no_size_bdm['volume_size'] = None
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ del no_size_bdm['volume_size']
+
+ driver_bdm = self.driver_classes[name](no_size_bdm)
+ self.assertEqual(driver_bdm[size], 0)
+
+ def test_driver_swap_block_device(self):
+ self._test_driver_device("swap")
+
+ def test_driver_swap_default_size(self):
+ self._test_driver_default_size('swap')
+
+ def test_driver_ephemeral_block_device(self):
+ self._test_driver_device("ephemeral")
+
+ def test_driver_ephemeral_default_size(self):
+ self._test_driver_default_size('ephemeral')
+
+ def test_driver_volume_block_device(self):
+ self._test_driver_device("volume")
+
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ self.assertEqual(test_bdm['connection_info'],
+ jsonutils.loads(test_bdm._bdm_obj.connection_info))
+ self.assertEqual(test_bdm._bdm_obj.id, 3)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
+ self.assertEqual(test_bdm.volume_size, 8)
+
+ def test_driver_snapshot_block_device(self):
+ self._test_driver_device("snapshot")
+
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 4)
+ self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+ self.assertEqual(test_bdm.volume_size, 3)
+
+ def test_driver_image_block_device(self):
+ self._test_driver_device('image')
+
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+ self.assertEqual(test_bdm._bdm_obj.id, 5)
+ self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
+ self.assertEqual(test_bdm.volume_size, 1)
+
+ def test_driver_image_block_device_destination_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm.copy()
+ bdm['destination_type'] = 'local'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'], bdm)
+
+ def test_driver_blank_block_device(self):
+ self._test_driver_device('blank')
+
+ test_bdm = self.driver_classes['blank'](
+ self.blank_bdm)
+ self.assertEqual(6, test_bdm._bdm_obj.id)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+ self.assertEqual(3, test_bdm.volume_size)
+
+ def _test_volume_attach(self, driver_bdm, bdm_dict,
+ fake_volume, check_attach=True,
+ fail_check_attach=False, driver_attach=False,
+ fail_driver_attach=False, volume_attach=True,
+ access_mode='rw'):
+ elevated_context = self.context.elevated()
+ self.stubs.Set(self.context, 'elevated',
+ lambda: elevated_context)
+ self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
+ self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'access_mode': access_mode}}
+ expected_conn_info = {'data': {'access_mode': access_mode},
+ 'serial': fake_volume['id']}
+ enc_data = {'fake': 'enc_data'}
+
+ self.volume_api.get(self.context,
+ fake_volume['id']).AndReturn(fake_volume)
+ if check_attach:
+ if not fail_check_attach:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndReturn(None)
+ else:
+ self.volume_api.check_attach(self.context, fake_volume,
+ instance=instance).AndRaise(
+ test.TestingException)
+ return instance, expected_conn_info
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ elevated_context, fake_volume['id'],
+ connector).AndReturn(connection_info)
+ if driver_attach:
+ encryptors.get_encryption_metadata(
+ elevated_context, self.volume_api, fake_volume['id'],
+ connection_info).AndReturn(enc_data)
+ if not fail_driver_attach:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndReturn(None)
+ else:
+ self.virt_driver.attach_volume(
+ elevated_context, expected_conn_info, instance,
+ bdm_dict['device_name'],
+ disk_bus=bdm_dict['disk_bus'],
+ device_type=bdm_dict['device_type'],
+ encryption=enc_data).AndRaise(test.TestingException)
+ self.volume_api.terminate_connection(
+ elevated_context, fake_volume['id'],
+ expected_conn_info).AndReturn(None)
+ return instance, expected_conn_info
+
+ if volume_attach:
+ self.volume_api.attach(elevated_context, fake_volume['id'],
+ 'fake_uuid', bdm_dict['device_name'],
+ mode=access_mode).AndReturn(None)
+ driver_bdm._bdm_obj.save(self.context).AndReturn(None)
+ return instance, expected_conn_info
+
+ def test_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_ro(self):
+ test_bdm = self.driver_classes['volume'](self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, access_mode='ro')
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_check_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver)
+
+ def test_volume_no_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=False)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=False)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_volume_attach_no_check_driver_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=True)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=True)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def check_volume_attach_driver_attach_fails(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1'}
+
+ instance, _ = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, fail_check_attach=True)
+ self.mox.ReplayAll()
+
+ self.asserRaises(test.TestingException, test_bdm.attach, self.context,
+ instance, self.volume_api, self.virt_driver,
+ do_driver_attach=True)
+
+ def test_refresh_connection(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+ connector = {'ip': 'fake_ip', 'host': 'fake_host'}
+ connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
+ expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
+ 'serial': 'fake-volume-id-2'}
+
+ self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
+
+ self.virt_driver.get_volume_connector(instance).AndReturn(connector)
+ self.volume_api.initialize_connection(
+ self.context, test_bdm.volume_id,
+ connector).AndReturn(connection_info)
+ test_bdm._bdm_obj.save(self.context).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ test_bdm.refresh_connection_info(self.context, instance,
+ self.volume_api, self.virt_driver)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
+ def test_snapshot_attach_no_volume(self):
+ no_volume_snapshot = self.snapshot_bdm.copy()
+ no_volume_snapshot['volume_id'] = None
+ test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
+
+ snapshot = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.get_snapshot(self.context,
+ 'fake-snapshot-id-1').AndReturn(snapshot)
+ self.volume_api.create(self.context, 3,
+ '', '', snapshot).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_snapshot, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_snapshot_attach_volume(self):
+ test_bdm = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_no_volume(self):
+ no_volume_image = self.image_bdm.copy()
+ no_volume_image['volume_id'] = None
+ test_bdm = self.driver_classes['image'](no_volume_image)
+
+ image = {'id': 'fake-image-id-1'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
+
+ wait_func = self.mox.CreateMockAnything()
+
+ self.volume_api.create(self.context, 1,
+ '', '', image_id=image['id']).AndReturn(volume)
+ wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, no_volume_image, volume)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver, wait_func)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_image_attach_volume(self):
+ test_bdm = self.driver_classes['image'](
+ self.image_bdm)
+
+ instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
+
+ volume_class = self.driver_classes['volume']
+ self.mox.StubOutWithMock(volume_class, 'attach')
+
+ # Make sure theses are not called
+ self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
+ self.mox.StubOutWithMock(self.volume_api, 'create')
+
+ volume_class.attach(self.context, instance, self.volume_api,
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+ self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+
+ def test_blank_attach_volume(self):
+ no_blank_volume = self.blank_bdm.copy()
+ no_blank_volume['volume_id'] = None
+ test_bdm = self.driver_classes['blank'](no_blank_volume)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
+ **{'uuid': 'fake-uuid'})
+ volume_class = self.driver_classes['volume']
+ volume = {'id': 'fake-volume-id-2',
+ 'display_name': 'fake-uuid-blank-vol'}
+
+ with contextlib.nested(
+ mock.patch.object(self.volume_api, 'create', return_value=volume),
+ mock.patch.object(volume_class, 'attach')
+ ) as (vol_create, vol_attach):
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+
+ vol_create.assert_called_once_with(self.context,
+ test_bdm.volume_size,
+ 'fake-uuid-blank-vol',
+ '')
+ vol_attach.assert_called_once_with(self.context, instance,
+ self.volume_api,
+ self.virt_driver,
+ do_check_attach=True)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+
+ def test_convert_block_devices(self):
+ converted = driver_block_device._convert_block_devices(
+ self.driver_classes['volume'],
+ [self.volume_bdm, self.ephemeral_bdm])
+ self.assertEqual(converted, [self.volume_driver_bdm])
+
+ def test_legacy_block_devices(self):
+ test_snapshot = self.driver_classes['snapshot'](
+ self.snapshot_bdm)
+
+ block_device_mapping = [test_snapshot, test_snapshot]
+ legacy_bdm = driver_block_device.legacy_block_devices(
+ block_device_mapping)
+ self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
+ self.snapshot_legacy_driver_bdm])
+
+ # Test that the ephemerals work as expected
+ test_ephemerals = [self.driver_classes['ephemeral'](
+ self.ephemeral_bdm) for _ in xrange(2)]
+ expected = [self.ephemeral_legacy_driver_bdm.copy()
+ for _ in xrange(2)]
+ expected[0]['virtual_name'] = 'ephemeral0'
+ expected[0]['num'] = 0
+ expected[1]['virtual_name'] = 'ephemeral1'
+ expected[1]['num'] = 1
+ legacy_ephemerals = driver_block_device.legacy_block_devices(
+ test_ephemerals)
+ self.assertEqual(expected, legacy_ephemerals)
+
+ def test_get_swap(self):
+ swap = [self.swap_driver_bdm]
+ legacy_swap = [self.swap_legacy_driver_bdm]
+ no_swap = [self.volume_driver_bdm]
+
+ self.assertEqual(swap[0], driver_block_device.get_swap(swap))
+ self.assertEqual(legacy_swap[0],
+ driver_block_device.get_swap(legacy_swap))
+ self.assertIsNone(driver_block_device.get_swap(no_swap))
+ self.assertIsNone(driver_block_device.get_swap([]))
+
+ def test_is_implemented(self):
+ for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
+ self.ephemeral_bdm, self.snapshot_bdm):
+ self.assertTrue(driver_block_device.is_implemented(bdm))
+ local_image = self.image_bdm.copy()
+ local_image['destination_type'] = 'local'
+ self.assertFalse(driver_block_device.is_implemented(local_image))
+
+ def test_is_block_device_mapping(self):
+ test_swap = self.driver_classes['swap'](self.swap_bdm)
+ test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
+ test_image = self.driver_classes['image'](self.image_bdm)
+ test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
+ test_volume = self.driver_classes['volume'](self.volume_bdm)
+ test_blank = self.driver_classes['blank'](self.blank_bdm)
+
+ for bdm in (test_image, test_snapshot, test_volume, test_blank):
+ self.assertTrue(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
+
+ for bdm in (test_swap, test_ephemeral):
+ self.assertFalse(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
diff --git a/nova/tests/virt/test_configdrive.py b/nova/tests/unit/virt/test_configdrive.py
index b8dc717b80..b8dc717b80 100644
--- a/nova/tests/virt/test_configdrive.py
+++ b/nova/tests/unit/virt/test_configdrive.py
diff --git a/nova/tests/virt/test_diagnostics.py b/nova/tests/unit/virt/test_diagnostics.py
index f3969fc09f..f3969fc09f 100644
--- a/nova/tests/virt/test_diagnostics.py
+++ b/nova/tests/unit/virt/test_diagnostics.py
diff --git a/nova/tests/unit/virt/test_driver.py b/nova/tests/unit/virt/test_driver.py
new file mode 100644
index 0000000000..572afdedec
--- /dev/null
+++ b/nova/tests/unit/virt/test_driver.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2013 Citrix Systems, Inc.
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.virt import driver
+
+
+class FakeDriver(object):
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+
+
+class FakeDriver2(FakeDriver):
+ pass
+
+
+class ToDriverRegistryTestCase(test.NoDBTestCase):
+
+ def assertDriverInstance(self, inst, class_, *args, **kwargs):
+ self.assertEqual(class_, inst.__class__)
+ self.assertEqual(args, inst.args)
+ self.assertEqual(kwargs, inst.kwargs)
+
+ def test_driver_dict_from_config(self):
+ drvs = driver.driver_dict_from_config(
+ [
+ 'key1=nova.tests.unit.virt.test_driver.FakeDriver',
+ 'key2=nova.tests.unit.virt.test_driver.FakeDriver2',
+ ], 'arg1', 'arg2', param1='value1', param2='value2'
+ )
+
+ self.assertEqual(
+ sorted(['key1', 'key2']),
+ sorted(drvs.keys())
+ )
+
+ self.assertDriverInstance(
+ drvs['key1'],
+ FakeDriver, 'arg1', 'arg2', param1='value1',
+ param2='value2')
+
+ self.assertDriverInstance(
+ drvs['key2'],
+ FakeDriver2, 'arg1', 'arg2', param1='value1',
+ param2='value2')
diff --git a/nova/tests/virt/test_events.py b/nova/tests/unit/virt/test_events.py
index 792a8d0453..792a8d0453 100644
--- a/nova/tests/virt/test_events.py
+++ b/nova/tests/unit/virt/test_events.py
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
new file mode 100644
index 0000000000..d0781a6ca7
--- /dev/null
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -0,0 +1,1439 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+from oslo.serialization import jsonutils
+import six
+
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import base as base_obj
+from nova import test
+from nova.tests.unit import matchers
+from nova.virt import hardware as hw
+
+
+class FakeFlavor(dict):
+ def __init__(self, vcpus, memory, extra_specs):
+ self['vcpus'] = vcpus
+ self['memory_mb'] = memory
+ self['extra_specs'] = extra_specs
+
+
+class FakeFlavorObject(object):
+ def __init__(self, vcpus, memory, extra_specs):
+ self.vcpus = vcpus
+ self.memory_mb = memory
+ self.extra_specs = extra_specs
+
+ def __getitem__(self, item):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ raise KeyError(item)
+
+ def get(self, item, default=None):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
+
+class CpuSetTestCase(test.NoDBTestCase):
+ def test_get_vcpu_pin_set(self):
+ self.flags(vcpu_pin_set="1-3,5,^2")
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ def test_parse_cpu_spec_none_returns_none(self):
+ self.flags(vcpu_pin_set=None)
+ cpuset_ids = hw.get_vcpu_pin_set()
+ self.assertIsNone(cpuset_ids)
+
+ def test_parse_cpu_spec_valid_syntax_works(self):
+ cpuset_ids = hw.parse_cpu_spec("1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,2")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
+ self.assertEqual(set([1, 2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-1")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
+ self.assertEqual(set([1, 2, 3]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1,^2")
+ self.assertEqual(set([1]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
+ self.assertEqual(set([2]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
+ self.assertEqual(set([1, 3, 5]), cpuset_ids)
+
+ cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
+ self.assertEqual(set([]), cpuset_ids)
+
+ def test_parse_cpu_spec_invalid_syntax_raises(self):
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ " -1-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2^")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^2-")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "--13,^^5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "a-3,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-a,5,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,b,^2")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "1-3,5,^c")
+
+ self.assertRaises(exception.Invalid,
+ hw.parse_cpu_spec,
+ "3 - 1, 5 , ^ 2 ")
+
+ def test_format_cpu_spec(self):
+ cpus = set([])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = []
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("", spec)
+
+ cpus = set([1, 3])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = [1, 3]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1,3", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("1-2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus)
+ self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
+
+ cpus = set([1, 2, 4, 6])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = [1, 2, 4, 6]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("1,2,4,6", spec)
+
+ cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+ cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
+ spec = hw.format_cpu_spec(cpus, allow_ranges=False)
+ self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
+
+
+class VCPUTopologyTest(test.NoDBTestCase):
+
+ def test_validate_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ 8, 2, 1, 65536, 65536, 65536
+ )
+ },
+ { # Image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": (
+ 4, 2, 2, 65536, 65536, 2,
+ )
+ },
+ { # Partial image topology overrides flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2",
+ }
+ },
+ "expect": (
+ 2, -1, -1, 65536, 65536, 65536,
+ )
+ },
+ { # Restrict use of threads
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_threads": "1",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 65536, 1,
+ )
+ },
+ { # Force use of at least two sockets
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": (
+ -1, -1, -1, 65536, 8, 1
+ )
+ },
+ { # Image limits reduce flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 1
+ )
+ },
+ { # Image limits kill flavor preferred
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "4",
+ }
+ },
+ "expect": (
+ -1, -1, -1, 65536, 4, 65536
+ )
+ },
+ { # Image limits cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPULimitsRangeExceeded,
+ },
+ { # Image preferred cannot exceed flavor
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_cores": "16",
+ }
+ },
+ "expect": exception.ImageVCPUTopologyRangeExceeded,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == tuple:
+ (preferred,
+ maximum) = hw.VirtCPUTopology.get_topology_constraints(
+ topo_test["flavor"],
+ topo_test["image"])
+
+ self.assertEqual(topo_test["expect"][0], preferred.sockets)
+ self.assertEqual(topo_test["expect"][1], preferred.cores)
+ self.assertEqual(topo_test["expect"][2], preferred.threads)
+ self.assertEqual(topo_test["expect"][3], maximum.sockets)
+ self.assertEqual(topo_test["expect"][4], maximum.cores)
+ self.assertEqual(topo_test["expect"][5], maximum.threads)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_topology_constraints,
+ topo_test["flavor"],
+ topo_test["image"])
+
+ def test_possible_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 2, 1],
+ [2, 4, 1],
+ [1, 8, 1],
+ [4, 1, 2],
+ [2, 2, 2],
+ [1, 4, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "expect": [
+ [8, 1, 1],
+ [4, 1, 2],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 7,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "expect": [
+ [7, 1, 1],
+ [1, 7, 1],
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 1,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 2,
+ "maxcores": 1,
+ "maxthreads": 4,
+ "expect": exception.ImageVCPULimitsRangeImpossible,
+ },
+ ]
+
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == list:
+ actual = []
+ for topology in hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"]):
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+ else:
+ self.assertRaises(topo_test["expect"],
+ hw.VirtCPUTopology.get_possible_topologies,
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ def test_sorting_configs(self):
+ testdata = [
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 8,
+ "maxcores": 8,
+ "maxthreads": 2,
+ "sockets": 4,
+ "cores": 2,
+ "threads": 1,
+ "expect": [
+ [4, 2, 1], # score = 2
+ [8, 1, 1], # score = 1
+ [2, 4, 1], # score = 1
+ [1, 8, 1], # score = 1
+ [4, 1, 2], # score = 1
+ [2, 2, 2], # score = 1
+ [1, 4, 2], # score = 1
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1024,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": 4,
+ "threads": -1,
+ "expect": [
+ [2, 4, 1], # score = 1
+ [1, 4, 2], # score = 1
+ [8, 1, 1], # score = 0
+ [4, 2, 1], # score = 0
+ [1, 8, 1], # score = 0
+ [4, 1, 2], # score = 0
+ [2, 2, 2], # score = 0
+ ]
+ },
+ {
+ "allow_threads": True,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [4, 1, 2], # score = 1
+ [8, 1, 1], # score = 0
+ ]
+ },
+ {
+ "allow_threads": False,
+ "vcpus": 8,
+ "maxsockets": 1024,
+ "maxcores": 1,
+ "maxthreads": 2,
+ "sockets": -1,
+ "cores": -1,
+ "threads": 2,
+ "expect": [
+ [8, 1, 1], # score = 0
+ ]
+ },
+ ]
+
+ for topo_test in testdata:
+ actual = []
+ possible = hw.VirtCPUTopology.get_possible_topologies(
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
+
+ tops = hw.VirtCPUTopology.sort_possible_topologies(
+ possible,
+ hw.VirtCPUTopology(topo_test["sockets"],
+ topo_test["cores"],
+ topo_test["threads"]))
+ for topology in tops:
+ actual.append([topology.sockets,
+ topology.cores,
+ topology.threads])
+
+ self.assertEqual(topo_test["expect"], actual)
+
+ def test_best_config(self):
+ testdata = [
+ { # Flavor sets preferred topology only
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [4, 2, 2],
+ },
+ { # Image topology overrides flavor
+ "allow_threads": False,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1",
+ "hw:cpu_maxthreads": "2",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "4",
+ "hw_cpu_cores": "2",
+ "hw_cpu_threads": "2",
+ }
+ },
+ "expect": [8, 2, 1],
+ },
+ { # Partial image topology overrides flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "8",
+ "hw:cpu_cores": "2",
+ "hw:cpu_threads": "1"
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_sockets": "2"
+ }
+ },
+ "expect": [2, 8, 1],
+ },
+ { # Restrict use of threads
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_threads": "1"
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Force use of at least two sockets
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": [16, 1, 1]
+ },
+ { # Image limits reduce flavor
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_max_sockets": "8",
+ "hw:cpu_max_cores": "8",
+ "hw:cpu_max_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_sockets": 4,
+ }
+ },
+ "expect": [4, 4, 1]
+ },
+ { # Image limits kill flavor preferred
+ "allow_threads": True,
+ "flavor": FakeFlavorObject(16, 2048, {
+ "hw:cpu_sockets": "2",
+ "hw:cpu_cores": "8",
+ "hw:cpu_threads": "1",
+ }),
+ "image": {
+ "properties": {
+ "hw_cpu_max_cores": 4,
+ }
+ },
+ "expect": [16, 1, 1]
+ },
+ ]
+
+ for topo_test in testdata:
+ topology = hw.VirtCPUTopology.get_desirable_configs(
+ topo_test["flavor"],
+ topo_test["image"],
+ topo_test["allow_threads"])[0]
+
+ self.assertEqual(topo_test["expect"][0], topology.sockets)
+ self.assertEqual(topo_test["expect"][1], topology.cores)
+ self.assertEqual(topo_test["expect"][2], topology.threads)
+
+
+class NUMATopologyTest(test.NoDBTestCase):
+
+ def test_topology_constraints(self):
+ testdata = [
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ }),
+ "image": {
+ },
+ "expect": None,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 5, 6, 7]), 1024),
+ ]),
+ },
+ {
+ # vcpus is not a multiple of nodes, so it
+ # is an error to not provide cpu/mem mapping
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyAsymmetric,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "4,6",
+ "hw:numa_mem.1": "512",
+ "hw:numa_cpus.2": "5,7",
+ "hw:numa_mem.2": "512",
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCellInstance(
+ 0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([5, 7]), 512),
+ ]),
+ },
+ {
+ # Request a CPU that is out of range
+ # wrt vCPU count
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 1,
+ "hw:numa_cpus.0": "0-16",
+ "hw:numa_mem.0": "2048",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUOutOfRange,
+ },
+ {
+ # Request the same CPU in two nodes
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-7",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "0-7",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUDuplicates,
+ },
+ {
+ # Request with some CPUs not assigned
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-2",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "3-4",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUsUnassigned,
+ },
+ {
+ # Request too little memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "512",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "512",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request too much memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request missing mem.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Request missing cpu.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Image attempts to override flavor
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ }),
+ "image": {
+ "hw_numa_nodes": 4,
+ },
+ "expect": exception.ImageNUMATopologyForbidden,
+ },
+ ]
+
+ for testitem in testdata:
+ if testitem["expect"] is None:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertIsNone(topology)
+ elif type(testitem["expect"]) == type:
+ self.assertRaises(testitem["expect"],
+ hw.VirtNUMAInstanceTopology.get_constraints,
+ testitem["flavor"],
+ testitem["image"])
+ else:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertEqual(len(testitem["expect"].cells),
+ len(topology.cells))
+ for i in range(len(topology.cells)):
+ self.assertEqual(testitem["expect"].cells[i].cpuset,
+ topology.cells[i].cpuset)
+ self.assertEqual(testitem["expect"].cells[i].memory,
+ topology.cells[i].memory)
+
+ def test_can_fit_isntances(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512)
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4, 6]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([7, 8]), 256),
+ ])
+
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, []))
+ self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance2]))
+ self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
+ hosttopo, [instance1, instance2]))
+
+ def test_host_usage_contiguous(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 3)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_host_usage_sparse(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCellInstance(6, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(5, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].id,
+ hostusage.cells[0].id)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].id,
+ hostusage.cells[1].id)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 256)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ def test_host_usage_culmulative_with_free(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(
+ 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 512),
+ hw.VirtNUMATopologyCellInstance(1, set([3]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([4]), 256)])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1])
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 1024)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 768)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ # Test freeing of resources
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hostusage, [instance1], free=True)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[1].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_topo_usage_none(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ None, [instance1])
+ self.assertIsNone(hostusage)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [])
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, None)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ def _test_to_dict(self, cell_or_topo, expected):
+ got = cell_or_topo._to_dict()
+ self.assertThat(expected, matchers.DictMatches(got))
+
+ def assertNUMACellMatches(self, expected_cell, got_cell):
+ attrs = ('cpuset', 'memory', 'id')
+ if isinstance(expected_cell, hw.VirtNUMAHostTopology):
+ attrs += ('cpu_usage', 'memory_usage')
+
+ for attr in attrs:
+ self.assertEqual(getattr(expected_cell, attr),
+ getattr(got_cell, attr))
+
+ def _test_cell_from_dict(self, data_dict, expected_cell):
+ cell_class = expected_cell.__class__
+ got_cell = cell_class._from_dict(data_dict)
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def _test_topo_from_dict(self, data_dict, expected_topo):
+ got_topo = expected_topo.__class__._from_dict(
+ data_dict)
+ for got_cell, expected_cell in zip(
+ got_topo.cells, expected_topo.cells):
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def test_numa_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': None}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_pagesize_dict(self):
+ cell = hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2]), 512, hw.VirtPageSize(2048))
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1,
+ 'pagesize': 2048}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_limit_cell_dict(self):
+ cell = hw.VirtNUMATopologyCellLimit(1, set([1, 2]), 512, 4, 2048)
+ cell_dict = {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 512, 'limit': 2048},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_usage_dict(self):
+ cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 512, 'used': 0},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_instance_topo_dict(self):
+ topo = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellInstance(2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2',
+ 'mem': {'total': 1024},
+ 'id': 1,
+ 'pagesize': None},
+ {'cpus': '3,4',
+ 'mem': {'total': 1024},
+ 'id': 2,
+ 'pagesize': None}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_limits_topo_dict(self):
+ topo = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2]), 1024, 4, 2048),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([3, 4]), 1024, 4, 2048)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_limit': 4,
+ 'mem': {'total': 1024, 'limit': 2048},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_numa_topo_dict_with_usage(self):
+ topo = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo)
+
+ def test_json(self):
+ expected = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ got = hw.VirtNUMAHostTopology.from_json(expected.to_json())
+
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ self.assertNUMACellMatches(exp_cell, got_cell)
+
+
+class NumberOfSerialPortsTest(test.NoDBTestCase):
+ def test_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ num_ports = hw.get_number_of_serial_ports(flavor, None)
+ self.assertEqual(3, num_ports)
+
+ def test_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 'foo'})
+ image_meta = {"properties": {}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_invalid_value(self):
+ flavor = FakeFlavorObject(8, 2048, {})
+ image_meta = {"properties": {"hw_serial_port_count": 'bar'}}
+ self.assertRaises(exception.ImageSerialPortNumberInvalid,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+ def test_image_meta_smaller_than_flavor(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 2}}
+ num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
+ self.assertEqual(2, num_ports)
+
+ def test_flavor_smaller_than_image_meta(self):
+ flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
+ image_meta = {"properties": {"hw_serial_port_count": 4}}
+ self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
+ hw.get_number_of_serial_ports,
+ flavor, image_meta)
+
+
+class NUMATopologyClaimsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(NUMATopologyClaimsTest, self).setUp()
+
+ self.host = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([5, 6]), 1024)])
+
+ self.limits = hw.VirtNUMALimitTopology(
+ cells=[
+ hw.VirtNUMATopologyCellLimit(
+ 1, set([1, 2, 3, 4]), 2048,
+ cpu_limit=8, memory_limit=4096),
+ hw.VirtNUMATopologyCellLimit(
+ 2, set([5, 6]), 1024,
+ cpu_limit=4, memory_limit=2048)])
+
+ self.large_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4, 5, 6]), 8192),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 4096)])
+ self.medium_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(
+ 1, set([1, 2, 3, 4]), 1024),
+ hw.VirtNUMATopologyCellInstance(
+ 2, set([7, 8]), 2048)])
+ self.small_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([5]), 1024)])
+ self.no_fit_instance = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
+ hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
+ hw.VirtNUMATopologyCellInstance(3, set([3]), 256)])
+
+ def test_claim_not_enough_info(self):
+
+ # No limits supplied
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance]))
+ # Empty topology
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ hw.VirtNUMAHostTopology(), [self.large_instance],
+ limits=self.limits))
+ # No instances to claim
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(self.host, [], self.limits))
+
+ def test_claim_succeeds(self):
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.small_instance], self.limits))
+ self.assertIsNone(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance], self.limits))
+
+ def test_claim_fails(self):
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.large_instance], self.limits),
+ six.text_type)
+
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.medium_instance, self.small_instance],
+ self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance], self.limits),
+ six.text_type)
+
+ # Instance fails if it won't fit the topology even with no limits
+ self.assertIsInstance(
+ hw.VirtNUMAHostTopology.claim_test(
+ self.host, [self.no_fit_instance]), six.text_type)
+
+
+class HelperMethodsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(HelperMethodsTestCase, self).setUp()
+ self.hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ self.instancetopo = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCellInstance(1, set([2]), 256),
+ ])
+ self.context = context.RequestContext('fake-user',
+ 'fake-project')
+
+ def _check_usage(self, host_usage):
+ self.assertEqual(2, host_usage.cells[0].cpu_usage)
+ self.assertEqual(256, host_usage.cells[0].memory_usage)
+ self.assertEqual(1, host_usage.cells[1].cpu_usage)
+ self.assertEqual(256, host_usage.cells[1].memory_usage)
+
+ def test_dicts_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_dicts_instance_json(self):
+ host = {'numa_topology': self.hosttopo}
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+ def test_dicts_host_json(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance_json(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_object_host_instance(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_instance_with_fetch(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = {'uuid': fake_uuid}
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_object_instance_with_load(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, uuid=fake_uuid)
+
+ with mock.patch.object(objects.InstanceNUMATopology,
+ 'get_by_instance_uuid', return_value=None) as get_mock:
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self.assertTrue(get_mock.called)
+
+ def test_instance_serialized_by_build_request_spec(self):
+ host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
+ fake_uuid = str(uuid.uuid4())
+ instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
+ numa_topology=objects.InstanceNUMATopology.obj_from_topology(
+ self.instancetopo))
+ # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
+ # We can remove this test once we no longer use that method.
+ instance_raw = jsonutils.to_primitive(
+ base_obj.obj_to_primitive(instance))
+ res = hw.get_host_numa_usage_from_instance(host, instance_raw)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_attr_host(self):
+ class Host(object):
+ def __init__(obj):
+ obj.numa_topology = self.hosttopo.to_json()
+
+ host = Host()
+ instance = {'numa_topology': self.instancetopo.to_json()}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance)
+ self.assertIsInstance(res, six.string_types)
+ self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
+
+ def test_never_serialize_result(self):
+ host = {'numa_topology': self.hosttopo.to_json()}
+ instance = {'numa_topology': self.instancetopo}
+
+ res = hw.get_host_numa_usage_from_instance(host, instance,
+ never_serialize_result=True)
+ self.assertIsInstance(res, hw.VirtNUMAHostTopology)
+ self._check_usage(res)
+
+
+class VirtMemoryPagesTestCase(test.NoDBTestCase):
+ def test_virt_pages_topology(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_virt_pages_topology_to_dict(self):
+ pages = hw.VirtPagesTopology(4, 1024, 512)
+ self.assertEqual({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512}, pages.to_dict())
+
+ def test_virt_pages_topology_from_dict(self):
+ pages = hw.VirtPagesTopology.from_dict({'size_kb': 4,
+ 'total': 1024,
+ 'used': 512})
+ self.assertEqual(4, pages.size_kb)
+ self.assertEqual(1024, pages.total)
+ self.assertEqual(512, pages.used)
+
+ def test_cell_instance_pagesize(self):
+ pagesize = hw.VirtPageSize(2048)
+ cell = hw.VirtNUMATopologyCellInstance(
+ 0, set([0]), 1024, pagesize)
+
+ self.assertEqual(0, cell.id)
+ self.assertEqual(set([0]), cell.cpuset)
+ self.assertEqual(1024, cell.memory)
+ self.assertEqual(2048, cell.pagesize.size_kb)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
new file mode 100644
index 0000000000..dc587fb4bc
--- /dev/null
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -0,0 +1,122 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo.config import cfg
+
+from nova.compute import vm_states
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt import imagecache
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTests(test.NoDBTestCase):
+
+ def test_configurationi_defaults(self):
+ self.assertEqual(2400, CONF.image_cache_manager_interval)
+ self.assertEqual('_base', CONF.image_cache_subdirectory_name)
+ self.assertTrue(CONF.remove_unused_base_images)
+ self.assertEqual(24 * 3600,
+ CONF.remove_unused_original_minimum_age_seconds)
+
+ def test_cache_manager(self):
+ cache_manager = imagecache.ImageCacheManager()
+ self.assertTrue(cache_manager.remove_unused_base_images)
+ self.assertRaises(NotImplementedError,
+ cache_manager.update, None, [])
+ self.assertRaises(NotImplementedError,
+ cache_manager._get_base)
+ base_images = cache_manager._list_base_images(None)
+ self.assertEqual([], base_images['unexplained_images'])
+ self.assertEqual([], base_images['originals'])
+ self.assertRaises(NotImplementedError,
+ cache_manager._age_and_verify_cached_images,
+ None, [], None)
+
+ def test_list_running_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'id': '2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': 'remotehost',
+ 'id': '3',
+ 'uuid': '789',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+
+ # The argument here should be a context, but it's mocked out
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(4, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual((1, 1, ['instance-00000002',
+ 'instance-00000003']),
+ running['used_images']['2'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['21'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['22'])
+
+ self.assertIn('instance-00000001', running['instance_names'])
+ self.assertIn('123', running['instance_names'])
+
+ self.assertEqual(4, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
+ self.assertEqual(2, running['image_popularity']['2'])
+ self.assertEqual(1, running['image_popularity']['21'])
+ self.assertEqual(1, running['image_popularity']['22'])
+
+ def test_list_resizing_instances(self):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+
+ image_cache_manager = imagecache.ImageCacheManager()
+ running = image_cache_manager._list_running_instances(None,
+ all_instances)
+
+ self.assertEqual(1, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual(set(['instance-00000001', '123',
+ 'instance-00000001_resize', '123_resize']),
+ running['instance_names'])
+
+ self.assertEqual(1, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
diff --git a/nova/tests/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index be5ea73ef1..be5ea73ef1 100644
--- a/nova/tests/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
diff --git a/nova/tests/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 67b0ac503a..67b0ac503a 100644
--- a/nova/tests/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
new file mode 100644
index 0000000000..48c009fd42
--- /dev/null
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -0,0 +1,881 @@
+# Copyright 2010 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import sys
+import traceback
+
+import fixtures
+import mock
+import netaddr
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+from oslo.utils import timeutils
+import six
+
+from nova.compute import manager
+from nova.console import type as ctype
+from nova import exception
+from nova import objects
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit import fake_block_device
+from nova.tests.unit.image import fake as fake_image
+from nova.tests.unit import utils as test_utils
+from nova.tests.unit.virt.libvirt import fake_libvirt_utils
+from nova.virt import block_device as driver_block_device
+from nova.virt import event as virtevent
+from nova.virt import fake
+from nova.virt import libvirt
+from nova.virt.libvirt import imagebackend
+
+LOG = logging.getLogger(__name__)
+
+
+def catch_notimplementederror(f):
+ """Decorator to simplify catching drivers raising NotImplementedError
+
+ If a particular call makes a driver raise NotImplementedError, we
+ log it so that we can extract this information afterwards as needed.
+ """
+ def wrapped_func(self, *args, **kwargs):
+ try:
+ return f(self, *args, **kwargs)
+ except NotImplementedError:
+ frame = traceback.extract_tb(sys.exc_info()[2])[-1]
+ LOG.error("%(driver)s does not implement %(method)s "
+ "required for test %(test)s" %
+ {'driver': type(self.connection),
+ 'method': frame[2], 'test': f.__name__})
+
+ wrapped_func.__name__ = f.__name__
+ wrapped_func.__doc__ = f.__doc__
+ return wrapped_func
+
+
+class _FakeDriverBackendTestCase(object):
+ def _setup_fakelibvirt(self):
+ # So that the _supports_direct_io does the test based
+ # on the current working directory, instead of the
+ # default instances_path which doesn't exist
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+
+ # Put fakelibvirt in place
+ if 'libvirt' in sys.modules:
+ self.saved_libvirt = sys.modules['libvirt']
+ else:
+ self.saved_libvirt = None
+
+ import nova.tests.unit.virt.libvirt.fake_imagebackend as \
+ fake_imagebackend
+ import nova.tests.unit.virt.libvirt.fake_libvirt_utils as \
+ fake_libvirt_utils
+ import nova.tests.unit.virt.libvirt.fakelibvirt as fakelibvirt
+
+ sys.modules['libvirt'] = fakelibvirt
+ import nova.virt.libvirt.driver
+ import nova.virt.libvirt.firewall
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.imagebackend',
+ fake_imagebackend))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt',
+ fakelibvirt))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.driver.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.imagebackend.libvirt_utils',
+ fake_libvirt_utils))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.firewall.libvirt',
+ fakelibvirt))
+
+ self.flags(rescue_image_id="2",
+ rescue_kernel_id="3",
+ rescue_ramdisk_id=None,
+ snapshots_directory='./',
+ group='libvirt')
+
+ def fake_extend(image, size):
+ pass
+
+ def fake_migrateToURI(*a):
+ pass
+
+ def fake_make_drive(_self, _path):
+ pass
+
+ def fake_get_instance_disk_info(_self, instance, xml=None,
+ block_device_info=None):
+ return '[]'
+
+ def fake_delete_instance_files(_self, _instance):
+ pass
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_get_instance_disk_info',
+ fake_get_instance_disk_info)
+
+ self.stubs.Set(nova.virt.libvirt.driver.disk,
+ 'extend', fake_extend)
+
+ self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
+ '_delete_instance_files',
+ fake_delete_instance_files)
+
+ # Like the existing fakelibvirt.migrateToURI, do nothing,
+ # but don't fail for these tests.
+ self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
+ 'migrateToURI', fake_migrateToURI)
+
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def _teardown_fakelibvirt(self):
+ # Restore libvirt
+ if self.saved_libvirt:
+ sys.modules['libvirt'] = self.saved_libvirt
+
+ def setUp(self):
+ super(_FakeDriverBackendTestCase, self).setUp()
+ # TODO(sdague): it would be nice to do this in a way that only
+ # the relevant backends where replaced for tests, though this
+ # should not harm anything by doing it for all backends
+ fake_image.stub_out_image_service(self.stubs)
+ self._setup_fakelibvirt()
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ self._teardown_fakelibvirt()
+ super(_FakeDriverBackendTestCase, self).tearDown()
+
+
+class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
+ """Test that ComputeManager can successfully load both
+ old style and new style drivers and end up with the correct
+ final class.
+ """
+
+ # if your driver supports being tested in a fake way, it can go here
+ #
+ # both long form and short form drivers are supported
+ new_drivers = {
+ 'nova.virt.fake.FakeDriver': 'FakeDriver',
+ 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
+ 'fake.FakeDriver': 'FakeDriver',
+ 'libvirt.LibvirtDriver': 'LibvirtDriver'
+ }
+
+ def test_load_new_drivers(self):
+ for cls, driver in self.new_drivers.iteritems():
+ self.flags(compute_driver=cls)
+ # NOTE(sdague) the try block is to make it easier to debug a
+ # failure by knowing which driver broke
+ try:
+ cm = manager.ComputeManager()
+ except Exception as e:
+ self.fail("Couldn't load driver %s - %s" % (cls, e))
+
+ self.assertEqual(cm.driver.__class__.__name__, driver,
+ "Could't load driver %s" % cls)
+
+ def test_fail_to_load_new_drivers(self):
+ self.flags(compute_driver='nova.virt.amiga')
+
+ def _fake_exit(error):
+ raise test.TestingException()
+
+ self.stubs.Set(sys, 'exit', _fake_exit)
+ self.assertRaises(test.TestingException, manager.ComputeManager)
+
+
+class _VirtDriverTestCase(_FakeDriverBackendTestCase):
+ def setUp(self):
+ super(_VirtDriverTestCase, self).setUp()
+
+ self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
+ self.connection = importutils.import_object(self.driver_module,
+ fake.FakeVirtAPI())
+ self.ctxt = test_utils.get_test_admin_context()
+ self.image_service = fake_image.FakeImageService()
+ # NOTE(dripton): resolve_driver_format does some file reading and
+ # writing and chowning that complicate testing too much by requiring
+ # using real directories with proper permissions. Just stub it out
+ # here; we test it in test_imagebackend.py
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
+ def _get_running_instance(self, obj=True):
+ instance_ref = test_utils.get_test_instance(obj=obj)
+ network_info = test_utils.get_test_network_info()
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ image_info = test_utils.get_test_image_info(None, instance_ref)
+ self.connection.spawn(self.ctxt, instance_ref, image_info,
+ [], 'herp', network_info=network_info)
+ return instance_ref, network_info
+
+ @catch_notimplementederror
+ def test_init_host(self):
+ self.connection.init_host('myhostname')
+
+ @catch_notimplementederror
+ def test_list_instances(self):
+ self.connection.list_instances()
+
+ @catch_notimplementederror
+ def test_list_instance_uuids(self):
+ self.connection.list_instance_uuids()
+
+ @catch_notimplementederror
+ def test_spawn(self):
+ instance_ref, network_info = self._get_running_instance()
+ domains = self.connection.list_instances()
+ self.assertIn(instance_ref['name'], domains)
+
+ num_instances = self.connection.get_num_instances()
+ self.assertEqual(1, num_instances)
+
+ @catch_notimplementederror
+ def test_snapshot_not_running(self):
+ instance_ref = test_utils.get_test_instance()
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ self.assertRaises(exception.InstanceNotRunning,
+ self.connection.snapshot,
+ self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_snapshot_running(self):
+ img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
+ lambda *args, **kwargs: None)
+
+ @catch_notimplementederror
+ def test_post_interrupted_snapshot_cleanup(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
+ instance_ref)
+
+ @catch_notimplementederror
+ def test_reboot(self):
+ reboot_type = "SOFT"
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.reboot(self.ctxt, instance_ref, network_info,
+ reboot_type)
+
+ @catch_notimplementederror
+ def test_get_host_ip_addr(self):
+ host_ip = self.connection.get_host_ip_addr()
+
+ # Will raise an exception if it's not a valid IP at all
+ ip = netaddr.IPAddress(host_ip)
+
+ # For now, assume IPv4.
+ self.assertEqual(ip.version, 4)
+
+ @catch_notimplementederror
+ def test_set_admin_password(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ self.connection.set_admin_password(instance, 'p4ssw0rd')
+
+ @catch_notimplementederror
+ def test_inject_file(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.inject_file(instance_ref,
+ base64.b64encode('/testfile'),
+ base64.b64encode('testcontents'))
+
+ @catch_notimplementederror
+ def test_resume_state_on_host_boot(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_rescue(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+
+ @catch_notimplementederror
+ def test_unrescue_unrescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_unrescue_rescued_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
+ self.connection.unrescue(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_poll_rebooting_instances(self):
+ instances = [self._get_running_instance()]
+ self.connection.poll_rebooting_instances(10, instances)
+
+ @catch_notimplementederror
+ def test_migrate_disk_and_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ flavor_ref = test_utils.get_test_flavor()
+ self.connection.migrate_disk_and_power_off(
+ self.ctxt, instance_ref, 'dest_host', flavor_ref,
+ network_info)
+
+ @catch_notimplementederror
+ def test_power_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+
+ @catch_notimplementederror
+ def test_power_on_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_on(self.ctxt, instance_ref,
+ network_info, None)
+
+ @catch_notimplementederror
+ def test_power_on_powered_off(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.power_off(instance_ref)
+ self.connection.power_on(self.ctxt, instance_ref, network_info, None)
+
+ @catch_notimplementederror
+ def test_soft_delete(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.soft_delete(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_running(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_restore_soft_deleted(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.soft_delete(instance_ref)
+ self.connection.restore(instance_ref)
+
+ @catch_notimplementederror
+ def test_pause(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_unpaused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_unpause_paused_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.pause(instance_ref)
+ self.connection.unpause(instance_ref)
+
+ @catch_notimplementederror
+ def test_suspend(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+
+ @catch_notimplementederror
+ def test_resume_unsuspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_resume_suspended_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.suspend(instance_ref)
+ self.connection.resume(self.ctxt, instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance_nonexistent(self):
+ fake_instance = {'id': 42, 'name': 'I just made this up!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
+ network_info = test_utils.get_test_network_info()
+ self.connection.destroy(self.ctxt, fake_instance, network_info)
+
+ @catch_notimplementederror
+ def test_destroy_instance(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIn(instance_ref['name'],
+ self.connection.list_instances())
+ self.connection.destroy(self.ctxt, instance_ref, network_info)
+ self.assertNotIn(instance_ref['name'],
+ self.connection.list_instances())
+
+ @catch_notimplementederror
+ def test_get_volume_connector(self):
+ result = self.connection.get_volume_connector({'id': 'fake'})
+ self.assertIn('ip', result)
+ self.assertIn('initiator', result)
+ self.assertIn('host', result)
+
+ @catch_notimplementederror
+ def test_attach_detach_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.assertIsNone(
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.detach_volume(connection_info, instance_ref,
+ '/dev/sda'))
+
+ @catch_notimplementederror
+ def test_swap_volume(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.assertIsNone(
+ self.connection.attach_volume(None, {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda'))
+ self.assertIsNone(
+ self.connection.swap_volume({'driver_volume_type': 'fake',
+ 'data': {}},
+ {'driver_volume_type': 'fake',
+ 'data': {}},
+ instance_ref,
+ '/dev/sda', 2))
+
+ @catch_notimplementederror
+ def test_attach_detach_different_power_states(self):
+ instance_ref, network_info = self._get_running_instance()
+ connection_info = {
+ "driver_volume_type": "fake",
+ "serial": "fake_serial",
+ "data": {}
+ }
+ self.connection.power_off(instance_ref)
+ self.connection.attach_volume(None, connection_info, instance_ref,
+ '/dev/sda')
+
+ bdm = {
+ 'root_device_name': None,
+ 'swap': None,
+ 'ephemerals': [],
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': instance_ref['uuid'],
+ 'device_name': '/dev/sda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'delete_on_termination': False,
+ 'snapshot_id': None,
+ 'volume_id': 'abcdedf',
+ 'volume_size': None,
+ 'no_device': None
+ }),
+ ])
+ }
+ bdm['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'fake', 'data': {}})
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ self.connection.power_on(
+ self.ctxt, instance_ref, network_info, bdm)
+ self.connection.detach_volume(connection_info,
+ instance_ref,
+ '/dev/sda')
+
+ @catch_notimplementederror
+ def test_get_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ info = self.connection.get_info(instance_ref)
+ self.assertIn('state', info)
+ self.assertIn('max_mem', info)
+ self.assertIn('mem', info)
+ self.assertIn('num_cpu', info)
+ self.assertIn('cpu_time', info)
+
+ @catch_notimplementederror
+ def test_get_info_for_unknown_instance(self):
+ self.assertRaises(exception.NotFound,
+ self.connection.get_info,
+ {'name': 'I just made this name up'})
+
+ @catch_notimplementederror
+ def test_get_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ self.connection.get_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_get_instance_diagnostics(self):
+ instance_ref, network_info = self._get_running_instance(obj=True)
+ instance_ref['launched_at'] = timeutils.utcnow()
+ self.connection.get_instance_diagnostics(instance_ref)
+
+ @catch_notimplementederror
+ def test_block_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.block_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 5)
+
+ @catch_notimplementederror
+ def test_interface_stats(self):
+ instance_ref, network_info = self._get_running_instance()
+ stats = self.connection.interface_stats(instance_ref['name'], 'someid')
+ self.assertEqual(len(stats), 8)
+
+ @catch_notimplementederror
+ def test_get_console_output(self):
+ fake_libvirt_utils.files['dummy.log'] = ''
+ instance_ref, network_info = self._get_running_instance()
+ console_output = self.connection.get_console_output(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(console_output, six.string_types)
+
+ @catch_notimplementederror
+ def test_get_vnc_console(self):
+ instance, network_info = self._get_running_instance(obj=True)
+ vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
+ self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
+
+ @catch_notimplementederror
+ def test_get_spice_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ spice_console = self.connection.get_spice_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(spice_console, ctype.ConsoleSpice)
+
+ @catch_notimplementederror
+ def test_get_rdp_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
+ self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
+
+ @catch_notimplementederror
+ def test_get_serial_console(self):
+ instance_ref, network_info = self._get_running_instance()
+ serial_console = self.connection.get_serial_console(self.ctxt,
+ instance_ref)
+ self.assertIsInstance(serial_console, ctype.ConsoleSerial)
+
+ @catch_notimplementederror
+ def test_get_console_pool_info(self):
+ instance_ref, network_info = self._get_running_instance()
+ console_pool = self.connection.get_console_pool_info(instance_ref)
+ self.assertIn('address', console_pool)
+ self.assertIn('username', console_pool)
+ self.assertIn('password', console_pool)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_rules(1)
+
+ @catch_notimplementederror
+ def test_refresh_security_group_members(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_security_group_members(1)
+
+ @catch_notimplementederror
+ def test_refresh_instance_security_rules(self):
+ # FIXME: Create security group and add the instance to it
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_instance_security_rules(instance_ref)
+
+ @catch_notimplementederror
+ def test_refresh_provider_fw_rules(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.refresh_provider_fw_rules()
+
+ @catch_notimplementederror
+ def test_ensure_filtering_for_instance(self):
+ instance = test_utils.get_test_instance(obj=True)
+ network_info = test_utils.get_test_network_info()
+ self.connection.ensure_filtering_rules_for_instance(instance,
+ network_info)
+
+ @catch_notimplementederror
+ def test_unfilter_instance(self):
+ instance_ref = test_utils.get_test_instance()
+ network_info = test_utils.get_test_network_info()
+ self.connection.unfilter_instance(instance_ref, network_info)
+
+ @catch_notimplementederror
+ def test_live_migration(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
+ lambda *a: None, lambda *a: None)
+
+ @catch_notimplementederror
+ def _check_available_resource_fields(self, host_status):
+ keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
+ 'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
+ 'hypervisor_hostname', 'cpu_info', 'disk_available_least',
+ 'supported_instances']
+ for key in keys:
+ self.assertIn(key, host_status)
+ self.assertIsInstance(host_status['hypervisor_version'], int)
+
+ @catch_notimplementederror
+ def test_get_available_resource(self):
+ available_resource = self.connection.get_available_resource(
+ 'myhostname')
+ self._check_available_resource_fields(available_resource)
+
+ @catch_notimplementederror
+ def test_get_available_nodes(self):
+ self.connection.get_available_nodes(False)
+
+ @catch_notimplementederror
+ def _check_host_cpu_status_fields(self, host_cpu_status):
+ self.assertIn('kernel', host_cpu_status)
+ self.assertIn('idle', host_cpu_status)
+ self.assertIn('user', host_cpu_status)
+ self.assertIn('iowait', host_cpu_status)
+ self.assertIn('frequency', host_cpu_status)
+
+ @catch_notimplementederror
+ def test_get_host_cpu_stats(self):
+ host_cpu_status = self.connection.get_host_cpu_stats()
+ self._check_host_cpu_status_fields(host_cpu_status)
+
+ @catch_notimplementederror
+ def test_set_host_enabled(self):
+ self.connection.set_host_enabled('a useless argument?', True)
+
+ @catch_notimplementederror
+ def test_get_host_uptime(self):
+ self.connection.get_host_uptime('a useless argument?')
+
+ @catch_notimplementederror
+ def test_host_power_action_reboot(self):
+ self.connection.host_power_action('a useless argument?', 'reboot')
+
+ @catch_notimplementederror
+ def test_host_power_action_shutdown(self):
+ self.connection.host_power_action('a useless argument?', 'shutdown')
+
+ @catch_notimplementederror
+ def test_host_power_action_startup(self):
+ self.connection.host_power_action('a useless argument?', 'startup')
+
+ @catch_notimplementederror
+ def test_add_to_aggregate(self):
+ self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
+
+ @catch_notimplementederror
+ def test_remove_from_aggregate(self):
+ self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
+
+ def test_events(self):
+ got_events = []
+
+ def handler(event):
+ got_events.append(event)
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+ event2 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_PAUSED)
+
+ self.connection.emit_event(event1)
+ self.connection.emit_event(event2)
+ want_events = [event1, event2]
+ self.assertEqual(want_events, got_events)
+
+ event3 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_RESUMED)
+ event4 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STOPPED)
+
+ self.connection.emit_event(event3)
+ self.connection.emit_event(event4)
+
+ want_events = [event1, event2, event3, event4]
+ self.assertEqual(want_events, got_events)
+
+ def test_event_bad_object(self):
+ # Passing in something which does not inherit
+ # from virtevent.Event
+
+ def handler(event):
+ pass
+
+ self.connection.register_event_listener(handler)
+
+ badevent = {
+ "foo": "bar"
+ }
+
+ self.assertRaises(ValueError,
+ self.connection.emit_event,
+ badevent)
+
+ def test_event_bad_callback(self):
+ # Check that if a callback raises an exception,
+ # it does not propagate back out of the
+ # 'emit_event' call
+
+ def handler(event):
+ raise Exception("Hit Me!")
+
+ self.connection.register_event_listener(handler)
+
+ event1 = virtevent.LifecycleEvent(
+ "cef19ce0-0ca2-11df-855d-b19fbce37686",
+ virtevent.EVENT_LIFECYCLE_STARTED)
+
+ self.connection.emit_event(event1)
+
+ def test_set_bootable(self):
+ self.assertRaises(NotImplementedError, self.connection.set_bootable,
+ 'instance', True)
+
+ @catch_notimplementederror
+ def test_get_instance_disk_info(self):
+ # This should be implemented by any driver that supports live migrate.
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.get_instance_disk_info(instance_ref['name'],
+ block_device_info={})
+
+
+class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = "nova.virt.driver.ComputeDriver"
+ super(AbstractDriverTestCase, self).setUp()
+
+
+class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
+ def setUp(self):
+ self.driver_module = 'nova.virt.fake.FakeDriver'
+ fake.set_nodes(['myhostname'])
+ super(FakeConnectionTestCase, self).setUp()
+
+ def _check_available_resource_fields(self, host_status):
+ super(FakeConnectionTestCase, self)._check_available_resource_fields(
+ host_status)
+
+ hypervisor_type = host_status['hypervisor_type']
+ supported_instances = host_status['supported_instances']
+ try:
+ # supported_instances could be JSON wrapped
+ supported_instances = jsonutils.loads(supported_instances)
+ except TypeError:
+ pass
+ self.assertTrue(any(hypervisor_type in x for x in supported_instances))
+
+
+class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ # Point _VirtDriverTestCase at the right module
+ self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
+ super(LibvirtConnTestCase, self).setUp()
+ self.stubs.Set(self.connection,
+ '_set_host_enabled', mock.MagicMock())
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.context.get_admin_context',
+ self._fake_admin_context))
+
+ def _fake_admin_context(self, *args, **kwargs):
+ return self.ctxt
+
+ def test_force_hard_reboot(self):
+ self.flags(wait_soft_reboot_seconds=0, group='libvirt')
+ self.test_reboot()
+
+ def test_migrate_disk_and_power_off(self):
+ # there is lack of fake stuff to execute this method. so pass.
+ self.skipTest("Test nothing, but this method"
+ " needed to override superclass.")
+
+ def test_internal_set_host_enabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: False
+ service_mock.configure_mock(disabled_reason='None',
+ disabled=False)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
+
+ def test_set_host_enabled_when_auto_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'AUTO: ERROR'
+ service_mock.configure_mock(disabled_reason='AUTO: ERROR',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertFalse(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'None')
+
+ def test_set_host_enabled_when_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(True)
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ def test_set_host_enabled_dont_override_manually_disabled(self):
+ self.mox.UnsetStubs()
+ service_mock = mock.MagicMock()
+
+ # Previous status of the service: disabled: True, 'Manually disabled'
+ service_mock.configure_mock(disabled_reason='Manually disabled',
+ disabled=True)
+ with mock.patch.object(objects.Service, "get_by_compute_host",
+ return_value=service_mock):
+ self.connection._set_host_enabled(False, 'ERROR!')
+ self.assertTrue(service_mock.disabled)
+ self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=False)
+ self.assertEqual(unplug_vifs_mock.call_count, 0)
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=True)
+ self.assertEqual(unplug_vifs_mock.call_count, 1)
+ unplug_vifs_mock.assert_called_once_with(instance_ref,
+ network_info, True)
diff --git a/nova/tests/virt/test_volumeutils.py b/nova/tests/unit/virt/test_volumeutils.py
index 8ba7e50399..8ba7e50399 100644
--- a/nova/tests/virt/test_volumeutils.py
+++ b/nova/tests/unit/virt/test_volumeutils.py
diff --git a/nova/tests/virt/vmwareapi/__init__.py b/nova/tests/unit/virt/vmwareapi/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/vmwareapi/__init__.py
+++ b/nova/tests/unit/virt/vmwareapi/__init__.py
diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
index 5bd2b7fb4f..5bd2b7fb4f 100644
--- a/nova/tests/virt/vmwareapi/fake.py
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
new file mode 100644
index 0000000000..d126b36e0f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -0,0 +1,131 @@
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+import contextlib
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import db
+from nova.tests.unit import test_flavors
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import network_util
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMwareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+@property
+def fake_vim_prop(arg):
+ """Stubs out the VMwareAPISession's vim property access method."""
+ return fake.get_fake_vim_object(arg)
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMwareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def fake_temp_method_exception():
+ raise vexc.VimFaultException(
+ [vexc.NOT_AUTHENTICATED],
+ "Session Empty/Not Authenticated")
+
+
+def fake_temp_session_exception():
+ raise vexc.VimConnectionException("it's a fake!",
+ "Session Exception")
+
+
+def fake_session_file_exception():
+ fault_list = [vexc.FILE_ALREADY_EXISTS]
+ raise vexc.VimFaultException(fault_list,
+ Exception('fake'))
+
+
+def fake_session_permission_exception():
+ fault_list = [vexc.NO_PERMISSION]
+ fault_string = 'Permission to perform this operation was denied.'
+ details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
+ raise vexc.VimFaultException(fault_list, fault_string, details=details)
+
+
+def _fake_flavor_get(context, id):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['id'] == id:
+ return instance_type
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(network_util, 'get_network_with_the_name',
+ fake.fake_get_network)
+ stubs.Set(images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
+ stubs.Set(driver.VMwareAPISession, "_is_vim_object",
+ fake_is_vim_object)
+ stubs.Set(db, 'flavor_get', _fake_flavor_get)
+
+
+def fake_suds_context(calls=None):
+ """Generate a suds client which automatically mocks all SOAP method calls.
+
+ Calls are stored in <calls>, indexed by the name of the call. If you need
+ to mock the behaviour of specific API calls you can pre-populate <calls>
+ with appropriate Mock objects.
+ """
+
+ calls = calls or {}
+
+ class fake_factory:
+ def create(self, name):
+ return mock.NonCallableMagicMock(name=name)
+
+ class fake_service:
+ def __getattr__(self, attr_name):
+ if attr_name in calls:
+ return calls[attr_name]
+
+ mock_call = mock.MagicMock(name=attr_name)
+ calls[attr_name] = mock_call
+ return mock_call
+
+ class fake_client:
+ def __init__(self, wdsl_url, **kwargs):
+ self.service = fake_service()
+ self.factory = fake_factory()
+
+ return contextlib.nested(
+ mock.patch('suds.client.Client', fake_client),
+
+ # As we're not connecting to a real host there's no need to wait
+ # between retries
+ mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
+ )
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
new file mode 100644
index 0000000000..7b4b1bba1f
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -0,0 +1,168 @@
+# Copyright 2013 IBM Corp.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+import mox
+
+from nova import context
+from nova.image import glance
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt import fake
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class ConfigDriveTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(ConfigDriveTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ self.flags(cluster_name=[cluster_name],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False)
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
+ self.network_info = utils.get_test_network_info()
+ self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
+ cluster_name)
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ instance_values = {
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
+ 'memory_mb': 8192,
+ 'flavor': 'm1.large',
+ 'instance_type_id': 0,
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': image_ref,
+ 'host': 'fake_host',
+ 'task_state': 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'node': self.node_name,
+ 'metadata': [],
+ 'expected_attrs': ['system_metadata'],
+ }
+ self.test_instance = fake_instance.fake_instance_obj(self.context,
+ **instance_values)
+
+ (image_service, image_id) = glance.get_remote_image_service(context,
+ image_ref)
+ metadata = image_service.show(context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ def fake_make_drive(_self, _path):
+ pass
+ # We can't actually make a config drive v2 because ensure_tree has
+ # been faked out
+ self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
+ 'make_drive', fake_make_drive)
+
+ def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
+ pass
+ self.stubs.Set(images,
+ 'upload_iso_to_datastore',
+ fake_upload_iso_to_datastore)
+
+ def tearDown(self):
+ super(ConfigDriveTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def _spawn_vm(self, injected_files=None, admin_password=None,
+ block_device_info=None):
+
+ injected_files = injected_files or []
+ self.conn.spawn(self.context, self.test_instance, self.image,
+ injected_files=injected_files,
+ admin_password=admin_password,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_create_vm_with_config_drive_verify_method_invocation(self):
+ self.test_instance.config_drive = 'True'
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.conn._vmops._create_config_drive(self.test_instance,
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()
+ ).AndReturn('[ds1] fake.iso')
+ self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ # if spawn does not call the _create_config_drive or
+ # _attach_cdrom_to_vm call with the correct set of parameters
+ # then mox's VerifyAll will throw a Expected methods never called
+ # Exception
+ self._spawn_vm()
+
+ def test_create_vm_without_config_drive(self):
+ self.test_instance.config_drive = None
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
+ self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ self.mox.ReplayAll()
+ # if spawn ends up calling _create_config_drive or
+ # _attach_cdrom_to_vm then mox will log a Unexpected method call
+ # exception
+ self._spawn_vm()
+
+ def test_create_vm_with_config_drive(self):
+ self.test_instance.config_drive = 'True'
+ self._spawn_vm()
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
new file mode 100644
index 0000000000..5f7eb76a62
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -0,0 +1,2650 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMwareAPI.
+"""
+
+import collections
+import contextlib
+import copy
+import datetime
+
+from eventlet import greenthread
+import mock
+import mox
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import timeutils
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from oslo.vmware import pbm
+from oslo.vmware import vim
+from oslo.vmware import vim_util as oslo_vim_util
+import suds
+
+from nova import block_device
+from nova.compute import api as compute_api
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import vm_states
+from nova import context
+from nova import exception
+from nova.image import glance
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit import matchers
+from nova.tests.unit import test_flavors
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova import utils as nova_utils
+from nova.virt import driver as v_driver
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+from nova.virt.vmwareapi import volumeops
+
+CONF = cfg.CONF
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('remove_unused_original_minimum_age_seconds',
+ 'nova.virt.imagecache')
+
+
+class fake_vm_ref(object):
+ def __init__(self):
+ self.value = 4
+ self._type = 'VirtualMachine'
+
+
+class fake_service_content(object):
+ def __init__(self):
+ self.ServiceContent = vmwareapi_fake.DataObject()
+ self.ServiceContent.fake = 'fake'
+
+
+class VMwareSudsTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareSudsTest, self).setUp()
+
+ def new_client_init(self, url, **kwargs):
+ return
+
+ mock.patch.object(suds.client.Client,
+ '__init__', new=new_client_init).start()
+ self.vim = self._vim_create()
+ self.addCleanup(mock.patch.stopall)
+
+ def _mock_getattr(self, attr_name):
+ self.assertEqual("RetrieveServiceContent", attr_name)
+ return lambda obj, **kwargs: fake_service_content()
+
+ def _vim_create(self):
+ with mock.patch.object(vim.Vim, '__getattr__', self._mock_getattr):
+ return vim.Vim()
+
+ def test_exception_with_deepcopy(self):
+ self.assertIsNotNone(self.vim)
+ self.assertRaises(vexc.VimException,
+ copy.deepcopy, self.vim)
+
+
+def _fake_create_session(inst):
+ session = vmwareapi_fake.DataObject()
+ session.key = 'fake_key'
+ session.userName = 'fake_username'
+ session._pbm_wsdl_loc = None
+ session._pbm = None
+ inst._session = session
+
+
+class VMwareDriverStartupTestCase(test.NoDBTestCase):
+ def _start_driver_with_flags(self, expected_exception_type, startup_flags):
+ self.flags(**startup_flags)
+ with mock.patch(
+ 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ e = self.assertRaises(
+ Exception, driver.VMwareVCDriver, None) # noqa
+ self.assertIs(type(e), expected_exception_type)
+
+ def test_start_driver_no_user(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_host(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_username='username', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_password(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_username='username',
+ group='vmware'))
+
+ def test_start_driver_with_user_host_password(self):
+ # Getting the InvalidInput exception signifies that no exception
+ # is raised regarding missing user/password/host
+ self._start_driver_with_flags(
+ nova.exception.InvalidInput,
+ dict(host_ip='ip', host_password='password',
+ host_username="user", datastore_regex="bad(regex",
+ group='vmware'))
+
+
+class VMwareSessionTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=False)
+ def test_call_method(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ session._vim = mock.Mock()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira', session._vim)
+
+ @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_vim(self, mock_is_vim):
+ with contextlib.nested(
+ mock.patch.object(driver.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ session = driver.VMwareAPISession()
+ module = mock.Mock()
+ session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira')
+
+
+class VMwareAPIVMTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ REQUIRES_LOCKING = True
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register, create_connection=True):
+ super(VMwareAPIVMTestCase, self).setUp()
+ vm_util.vm_refs_cache_reset()
+ self.context = context.RequestContext('fake', 'fake', is_admin=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ host_ip='test_url',
+ host_username='test_username',
+ host_password='test_pass',
+ api_retry_count=1,
+ use_linked_clone=False, group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base',
+ my_ip='')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ stubs.set_stubs(self.stubs)
+ vmwareapi_fake.reset()
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ if create_connection:
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+
+ self.vim = vmwareapi_fake.FakeVim()
+
+ # NOTE(vish): none of the network plugging code is actually
+ # being tested
+ self.network_info = utils.get_test_network_info()
+ image_ref = nova.tests.unit.image.fake.get_valid_image_id()
+ (image_service, image_id) = glance.get_remote_image_service(
+ self.context, image_ref)
+ metadata = image_service.show(self.context, image_id)
+ self.image = {
+ 'id': image_ref,
+ 'disk_format': 'vmdk',
+ 'size': int(metadata['size']),
+ }
+ self.fake_image_uuid = self.image['id']
+ nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
+ self.vnc_host = 'ha-host'
+ self.instance_without_compute = {'node': None,
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'display_description': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [
+ {'address': 'de:ad:be:ef:be:ef'}
+ ],
+ 'memory_mb': 8192,
+ 'instance_type': 'm1.large',
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': self.image['id'],
+ 'host': 'fake_host',
+ 'task_state':
+ 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'metadata': []}
+
+ def tearDown(self):
+ super(VMwareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ nova.tests.unit.image.fake.FakeImageService_reset()
+
+ def test_get_host_ip_addr(self):
+ self.assertEqual('test_url', self.conn.get_host_ip_addr())
+
+ def test_init_host_with_no_session(self):
+ self.conn._session = mock.Mock()
+ self.conn._session.vim = None
+ self.conn.init_host('fake_host')
+ self.conn._session._create_session.assert_called_once_with()
+
+ def test_init_host(self):
+ try:
+ self.conn.init_host("fake_host")
+ except Exception as ex:
+ self.fail("init_host raised: %s" % ex)
+
+ def _set_exception_vars(self):
+ self.wait_task = self.conn._session._wait_for_task
+ self.call_method = self.conn._session._call_method
+ self.task_ref = None
+ self.exception = False
+
+ def test_cleanup_host(self):
+ self.conn.init_host("fake_host")
+ try:
+ self.conn.cleanup_host("fake_host")
+ except Exception as ex:
+ self.fail("cleanup_host raised: %s" % ex)
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver.cleanup_host("foo")
+ vcdriver._session.vim.client.service.Logout.assert_called_once_with(
+ vcdriver._session.vim.service_content.sessionManager
+ )
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct_with_bad_logout(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ fault = suds.WebFault(mock.Mock(), mock.Mock())
+ vcdriver._session.vim.client.service.Logout.side_effect = fault
+ vcdriver.cleanup_host("foo")
+
+ def test_driver_capabilities(self):
+ self.assertTrue(self.conn.capabilities['has_imagecache'])
+ self.assertFalse(self.conn.capabilities['supports_recreate'])
+
+ def test_configuration_linked_clone(self):
+ self.flags(use_linked_clone=None, group='vmware')
+ self.assertRaises(vexc.UseLinkedCloneConfigurationFault,
+ self.conn._validate_configuration)
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm(self, get_profile_mock):
+ get_profile_mock.return_value = 'fake-profile'
+ self.flags(pbm_enabled=True,
+ pbm_default_policy='fake-policy',
+ pbm_wsdl_location='fake-location', group='vmware')
+ self.conn._validate_configuration()
+
+ @mock.patch.object(pbm, 'get_profile_id_by_name')
+ def test_configuration_pbm_bad_default(self, get_profile_mock):
+ get_profile_mock.return_value = None
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fake-location',
+ pbm_default_policy='fake-policy', group='vmware')
+ self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
+ self.conn._validate_configuration)
+
+ def test_login_retries(self):
+ self.attempts = 0
+ self.login_session = vmwareapi_fake.FakeVim()._login()
+
+ def _fake_login(_self):
+ self.attempts += 1
+ if self.attempts == 1:
+ raise vexc.VimConnectionException('Here is my fake exception')
+ return self.login_session
+
+ def _fake_check_session(_self):
+ return True
+
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
+ _fake_check_session)
+
+ with mock.patch.object(greenthread, 'sleep'):
+ self.conn = driver.VMwareAPISession()
+ self.assertEqual(self.attempts, 2)
+
+ def _get_instance_type_by_name(self, type):
+ for instance_type in test_flavors.DEFAULT_FLAVORS:
+ if instance_type['name'] == type:
+ return instance_type
+ if type == 'm1.micro':
+ return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
+ 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
+ 'ephemeral_gb': 0, 'updated_at': None,
+ 'disabled': False, 'vcpus': 1, 'extra_specs': {},
+ 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
+ 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
+
+ def _create_instance(self, node=None, set_image_ref=True,
+ uuid=None, instance_type='m1.large'):
+ if not node:
+ node = self.node_name
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ self.type_data = self._get_instance_type_by_name(instance_type)
+ values = {'name': 'fake_name',
+ 'id': 1,
+ 'uuid': uuid,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'kernel_id': "fake_kernel_uuid",
+ 'ramdisk_id': "fake_ramdisk_uuid",
+ 'mac_address': "de:ad:be:ef:be:ef",
+ 'flavor': instance_type,
+ 'node': node,
+ 'memory_mb': self.type_data['memory_mb'],
+ 'root_gb': self.type_data['root_gb'],
+ 'ephemeral_gb': self.type_data['ephemeral_gb'],
+ 'vcpus': self.type_data['vcpus'],
+ 'swap': self.type_data['swap'],
+ 'expected_attrs': ['system_metadata'],
+ }
+ if set_image_ref:
+ values['image_ref'] = self.fake_image_uuid
+ self.instance_node = node
+ self.uuid = uuid
+ self.instance = fake_instance.fake_instance_obj(
+ self.context, **values)
+
+ def _create_vm(self, node=None, num_instances=1, uuid=None,
+ instance_type='m1.large', powered_on=True):
+ """Create and spawn the VM."""
+ if not node:
+ node = self.node_name
+ self._create_instance(node=node, uuid=uuid,
+ instance_type=instance_type)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+ self._check_vm_record(num_instances=num_instances,
+ powered_on=powered_on)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def _get_vm_record(self):
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ for vm in vms.objects:
+ if vm.get('name') == self.uuid:
+ return vm
+ self.fail('Unable to find VM backing!')
+
+ def _check_vm_record(self, num_instances=1, powered_on=True):
+ """Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), num_instances)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info({'uuid': self.uuid,
+ 'name': 1,
+ 'node': self.instance_node})
+
+ vm = self._get_vm_record()
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEqual(vm_info['max_mem'], mem_kib)
+ self.assertEqual(vm_info['mem'], mem_kib)
+ self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
+ self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEqual(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ self.assertEqual(
+ vm.get("config.hardware.device").VirtualDevice[2].obj_name,
+ "ns0:VirtualE1000")
+ if powered_on:
+ # Check that the VM is running according to Nova
+ self.assertEqual(power_state.RUNNING, vm_info['state'])
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEqual('poweredOn', vm.get("runtime.powerState"))
+ else:
+ # Check that the VM is not running according to Nova
+ self.assertEqual(power_state.SHUTDOWN, vm_info['state'])
+
+ # Check that the VM is not running according to vSphere API.
+ self.assertEqual('poweredOff', vm.get("runtime.powerState"))
+
+ found_vm_uuid = False
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ for c in extras.OptionValue:
+ if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
+ found_vm_uuid = True
+ if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
+ found_iface_id = True
+
+ self.assertTrue(found_vm_uuid)
+ self.assertTrue(found_iface_id)
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEqual(info["state"], pwr_state)
+ self.assertEqual(info["max_mem"], mem_kib)
+ self.assertEqual(info["mem"], mem_kib)
+ self.assertEqual(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_instance_exists(self):
+ self._create_vm()
+ self.assertTrue(self.conn.instance_exists(self.instance))
+ invalid_instance = dict(uuid='foo', name='bar', node=self.node_name)
+ self.assertFalse(self.conn.instance_exists(invalid_instance))
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+
+ def test_list_instance_uuids(self):
+ self._create_vm()
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 1)
+
+ def test_list_instance_uuids_invalid_uuid(self):
+ self._create_vm(uuid='fake_id')
+ uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), 0)
+
+ def _cached_files_exist(self, exists=True):
+ cache = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(cache)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(cache)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_instance_dir_disk_created(self, mock_from_image):
+ """Test image file is cached when even when use_linked_clone
+ is False
+ """
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self._cached_files_exist()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_cache_dir_disk_created(self, mock_from_image):
+ """Test image disk is cached when use_linked_clone is True."""
+ self.flags(use_linked_clone=True, group='vmware')
+
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1 * units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE)
+
+ mock_from_image.return_value = img_props
+
+ self._create_vm()
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ def _iso_disk_type_created(self, instance_type='m1.large'):
+ self.image['disk_format'] = 'iso'
+ self._create_vm(instance_type=instance_type)
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created(self):
+ self._iso_disk_type_created()
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_type_created_with_root_gb_0(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(path)))
+
+ def test_iso_disk_cdrom_attach(self):
+ iso_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_iso_disk_cdrom_attach_with_config_drive(self,
+ mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=80 * units.Gi,
+ file_type='iso',
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+
+ self.flags(force_config_drive=True)
+ iso_path = [
+ ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid),
+ ds_util.DatastorePath(self.ds, 'fake-config-drive')]
+ self.iso_index = 0
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
+ self.iso_index += 1
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self.image['disk_format'] = 'iso'
+ self._create_vm()
+ self.assertEqual(self.iso_index, 2)
+
+ def test_cdrom_attach_with_config_drive(self):
+ self.flags(force_config_drive=True)
+
+ iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive')
+ self.cd_attach_called = False
+
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder, uuid, cookies):
+ return 'fake-config-drive'
+
+ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
+ iso_uploaded_path):
+ self.assertEqual(iso_uploaded_path, str(iso_path))
+ self.cd_attach_called = True
+
+ self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ fake_attach_cdrom)
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+ self.assertTrue(self.cd_attach_called)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_vm_ref_cached(self):
+ uuid = uuidutils.generate_uuid()
+ self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
+ self._create_vm(uuid=uuid)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
+
+ def _spawn_power_state(self, power_on):
+ self._spawn = self.conn._vmops.spawn
+ self._power_on = power_on
+
+ def _fake_spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info, block_device_info=None,
+ instance_name=None, power_on=True):
+ return self._spawn(context, instance, image_meta,
+ injected_files, admin_password, network_info,
+ block_device_info=block_device_info,
+ instance_name=instance_name,
+ power_on=self._power_on)
+
+ with (
+ mock.patch.object(self.conn._vmops, 'spawn', _fake_spawn)
+ ):
+ self._create_vm(powered_on=power_on)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ if power_on:
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_spawn_no_power_on(self):
+ self._spawn_power_state(False)
+
+ def test_spawn_power_on(self):
+ self._spawn_power_state(True)
+
+ def test_spawn_root_size_0(self):
+ self._create_vm(instance_type='m1.micro')
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ cache = ('[%s] vmware_base/%s/%s.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ self.assertTrue(vmwareapi_fake.get_file(cache))
+ self.assertFalse(vmwareapi_fake.get_file(gb_cache))
+
+ def _spawn_with_delete_exception(self, fault=None):
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "DeleteDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=fault)
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ if fault:
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ else:
+ self.assertRaises(vexc.VMwareDriverException, self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_delete_exception_not_found(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
+
+ def test_spawn_with_delete_exception_file_fault(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
+
+ def test_spawn_with_delete_exception_cannot_delete_file(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
+
+ def test_spawn_with_delete_exception_file_locked(self):
+ self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
+
+ def test_spawn_with_delete_exception_general(self):
+ self._spawn_with_delete_exception()
+
+ def test_spawn_disk_extend(self):
+ self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
+ requested_size = 80 * units.Mi
+ self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
+ requested_size, mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_spawn_disk_extend_exists(self):
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+
+ def _fake_extend(instance, requested_size, name, dc_ref):
+ vmwareapi_fake._add_file(str(root))
+
+ self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
+ _fake_extend)
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_sparse(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
+ ) as (mock_extend, mock_get_dc):
+ dc_val = mock.Mock()
+ dc_val.ref = "fake_dc_ref"
+ dc_val.name = "dc1"
+ mock_get_dc.return_value = dc_val
+ self._create_vm()
+ iid = img_props.image_id
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ iid, '%s.80.vmdk' % iid)
+ mock_extend.assert_called_once_with(
+ self.instance, self.instance.root_gb * units.Mi,
+ str(cached_image), "fake_dc_ref")
+
+ def test_spawn_disk_extend_failed_copy(self):
+ # Spawn instance
+ # copy for extend fails without creating a file
+ #
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake-copy-task':
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "CopyVirtualDisk_Task":
+ return 'fake-copy-task'
+
+ return self.call_method(module, method, *args, **kwargs)
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+
+ def test_spawn_disk_extend_failed_partial_copy(self):
+ # Spawn instance
+ # Copy for extend fails, leaving a file behind
+ #
+ # Expect the file to be cleaned up
+ # Expect the copy error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method),
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task)):
+ self.assertRaises(CopyError, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(cached_image))
+
+ def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
+ # Spawn instance
+ # Copy for extend fails, leaves file behind
+ # File cleanup fails
+ #
+ # Expect file to be left behind
+ # Expect file cleanup error to be raised
+ self.flags(use_linked_clone=True, group='vmware')
+ self.task_ref = None
+ uuid = self.fake_image_uuid
+ cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
+ uuid, uuid)
+
+ CopyError = vexc.FileFaultException
+ DeleteError = vexc.CannotDeleteFileException
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+ # N.B. We don't test for -flat here because real
+ # CopyVirtualDisk_Task doesn't actually create it
+ raise CopyError('Copy failed!')
+ elif task_ref == 'fake-delete-task':
+ raise DeleteError('Delete failed!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == "DeleteDatastoreFile_Task":
+ return 'fake-delete-task'
+
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "CopyVirtualDisk_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ new=fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ new=fake_call_method)):
+ self.assertRaises(DeleteError, self._create_vm)
+ self.assertTrue(vmwareapi_fake.get_file(cached_image))
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_invalid_disk_size(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=82 * units.Gi,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._create_vm)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
+ img_props = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' %
+ self.fake_image_uuid)
+ tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80-flat.vmdk' %
+ self.fake_image_uuid)
+
+ NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ raise NoDiskSpace()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == 'ExtendVirtualDisk_Task':
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (mock_wait_for_task, mock_call_method):
+ self.assertRaises(NoDiskSpace, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(str(cached_image)))
+ self.assertFalse(vmwareapi_fake.get_file(str(tmp_file)))
+
+ def test_spawn_with_move_file_exists_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise an file exists exception. The flag
+ # self.exception will be checked to see that
+ # the exception has indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.FileAlreadyExistsException()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_general_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a general exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ self.exception = True
+ raise vexc.VMwareDriverException('Exception!')
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+ self.assertTrue(self.exception)
+
+ def test_spawn_with_move_poll_exception(self):
+ self.call_method = self.conn._session._call_method
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ task_mdo = vmwareapi_fake.create_task(method, "error")
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self.assertRaises(vexc.VMwareDriverException,
+ self._create_vm)
+
+ def test_spawn_with_move_file_exists_poll_exception(self):
+ # The test will validate that the spawn completes
+ # successfully. The "MoveDatastoreFile_Task" will
+ # raise a file exists exception. The flag self.exception
+ # will be checked to see that the exception has
+ # indeed been raised.
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == "MoveDatastoreFile_Task":
+ self.exception = True
+ task_mdo = vmwareapi_fake.create_task(method, "error",
+ error_fault=vmwareapi_fake.FileAlreadyExists())
+ return task_mdo.obj
+ return task_ref
+
+ with (
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(self.exception)
+
+ def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
+ self._create_instance(set_image_ref=set_image_ref)
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ if vc_support:
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_res_pool_of_vm')
+ volumeops.VMwareVolumeOps._get_res_pool_of_vm(
+ mox.IgnoreArg()).AndReturn('fake_res_pool')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_relocate_vmdk_volume')
+ volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
+ 'fake_res_pool', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def test_spawn_attach_volume_iscsi(self):
+ self._create_instance()
+ self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
+ self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ root_disk = [{'connection_info': connection_info}]
+ v_driver.block_device_info_get_mapping(
+ mox.IgnoreArg()).AndReturn(root_disk)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_volume')
+ volumeops.VMwareVolumeOps.attach_volume(connection_info,
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ block_device_info = {'mount_device': 'vda'}
+ self.conn.spawn(self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=block_device_info)
+
+ def mock_upload_image(self, context, image, instance, **kwargs):
+ self.assertEqual(image, 'Test-Snapshot')
+ self.assertEqual(instance, self.instance)
+ self.assertEqual(kwargs['disk_type'], 'preallocated')
+
+ def test_get_vm_ref_using_extra_config(self):
+ self._create_vm()
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ # Disrupt the fake Virtual Machine object so that extraConfig
+ # cannot be matched.
+ fake_vm = self._get_vm_record()
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
+ # We should not get a Virtual Machine through extraConfig.
+ vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNone(vm_ref, 'VM Reference should be none')
+ # Check if we can find the Virtual Machine using the name.
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def test_search_vm_ref_by_identifier(self):
+ self._create_vm()
+ vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
+ self.instance['uuid'])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+ fake_vm = self._get_vm_record()
+ fake_vm.set("summary.config.instanceUuid", "foo")
+ fake_vm.set("name", "foo")
+ fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
+ self.assertIsNone(vm_util.search_vm_ref_by_identifier(
+ self.conn._session, self.instance['uuid']),
+ "VM Reference should be none")
+ self.assertIsNotNone(
+ vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
+ "VM Reference should not be none")
+
+ def test_get_object_for_optionvalue(self):
+ self._create_vm()
+ vms = self.conn._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
+ vm_ref = vm_util._get_object_for_optionvalue(vms,
+ self.instance["uuid"])
+ self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
+
+ def _test_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ with mock.patch.object(images, 'upload_image',
+ self.mock_upload_image):
+ self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
+ func_call_matcher.call)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertIsNone(func_call_matcher.match())
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_no_root_disk(self):
+ self._iso_disk_type_created(instance_type='m1.micro')
+ self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
+ self.context, self.instance, "Test-Snapshot",
+ lambda *args, **kwargs: None)
+
+ def test_snapshot_delete_vm_snapshot(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_create_vm_snapshot')
+ self.conn._vmops._create_vm_snapshot(
+ self.instance, fake_vm.obj).AndReturn(snapshot_ref)
+
+ self.mox.StubOutWithMock(vmops.VMwareVMOps,
+ '_delete_vm_snapshot')
+ self.conn._vmops._delete_vm_snapshot(
+ self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self._test_snapshot()
+
+ def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
+ self._create_vm()
+ fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=exception),
+ mock.patch.object(vmops, '_time_sleep_wrapper')
+ ) as (_fake_wait, _fake_sleep):
+ if exception != error_util.TaskInProgress:
+ self.assertRaises(exception,
+ self.conn._vmops._delete_vm_snapshot,
+ self.instance, fake_vm, snapshot_ref)
+ self.assertEqual(0, _fake_sleep.call_count)
+ else:
+ self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
+ snapshot_ref)
+ self.assertEqual(call_count - 1, _fake_sleep.call_count)
+ self.assertEqual(call_count, _fake_wait.call_count)
+
+ def test_snapshot_delete_vm_snapshot_exception(self):
+ self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
+
+ def test_snapshot_delete_vm_snapshot_exception_retry(self):
+ self.flags(api_retry_count=5, group='vmware')
+ self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress,
+ 5)
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_with_uuid(self):
+ """Test fall back to use name when can't find by uuid."""
+ self._create_vm()
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ reboot_type = "SOFT"
+ self.conn.reboot(self.context, self.instance, self.network_info,
+ reboot_type)
+ info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self._create_vm()
+ instances = [self.instance]
+ self.conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
+ self.context, self.instance, self.network_info,
+ 'SOFT')
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
+ self.instance)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SUSPENDED)
+ self.conn.resume(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
+ self.context, self.instance, self.network_info)
+
+ def test_power_on(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.conn.power_on(self.context, self.instance, self.network_info)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_power_on_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
+ self.context, self.instance, self.network_info)
+
+ def test_power_off(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.power_off(self.instance)
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ def test_power_off_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
+ self.instance)
+
+ def test_resume_state_on_host_boot(self):
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, "reboot")
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOff")
+ self.conn.reboot(self.context, self.instance, 'network_info',
+ 'hard', None)
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_1(self):
+ """Don't call reboot on instance which is poweredon."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("poweredOn")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def test_resume_state_on_host_boot_no_reboot_2(self):
+ """Don't call reboot on instance which is suspended."""
+ self._create_vm()
+ self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
+ self.mox.StubOutWithMock(self.conn, 'reboot')
+ vm_util.get_vm_state_from_name(mox.IgnoreArg(),
+ self.instance['uuid']).AndReturn("suspended")
+ self.mox.ReplayAll()
+ self.conn.resume_state_on_host_boot(self.context, self.instance,
+ 'network_info')
+
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with contextlib.nested(
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (fake_detach, fake_power_on):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ inst_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(inst_path)))
+ rescue_file_path = ds_util.DatastorePath(
+ self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path)))
+ # Unrescue does not power on with destroy
+ self.assertFalse(fake_power_on.called)
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 1)
+ # Delete the vmPathName
+ vm = self._get_vm_record()
+ vm.delete('config.files.vmPathName')
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def _destroy_instance_without_vm_ref(self, resize_exists=False,
+ task_state=None):
+
+ def fake_vm_ref_from_name(session, vm_name):
+ if resize_exists:
+ return 'fake-ref'
+
+ self._create_instance()
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ fake_vm_ref_from_name),
+ mock.patch.object(self.conn._session,
+ '_call_method'),
+ mock.patch.object(self.conn._vmops,
+ '_destroy_instance')
+ ) as (mock_get, mock_call, mock_destroy):
+ self.instance.task_state = task_state
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, True)
+ if resize_exists:
+ if task_state == task_states.RESIZE_REVERTING:
+ expected = 1
+ else:
+ expected = 2
+ else:
+ expected = 1
+ self.assertEqual(expected, mock_destroy.call_count)
+ self.assertFalse(mock_call.called)
+
+ def test_destroy_instance_without_vm_ref(self):
+ self._destroy_instance_without_vm_ref()
+
+ def test_destroy_instance_without_vm_ref_with_resize(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True)
+
+ def test_destroy_instance_without_vm_ref_with_resize_revert(self):
+ self._destroy_instance_without_vm_ref(resize_exists=True,
+ task_state=task_states.RESIZE_REVERTING)
+
+ def _rescue(self, config_drive=False):
+ # validate that the power on is only called once
+ self._power_on = vm_util.power_on_instance
+ self._power_on_called = 0
+
+ def fake_attach_disk_to_vm(vm_ref, instance,
+ adapter_type, disk_type, vmdk_path=None,
+ disk_size=None, linked_clone=False,
+ controller_key=None, unit_number=None,
+ device_name=None):
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ if config_drive:
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, folder,
+ instance_uuid, cookies):
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+ return str(ds_util.DatastorePath(data_store_name,
+ instance_uuid, 'fake.iso'))
+
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
+ self._create_vm()
+
+ def fake_power_on_instance(session, instance, vm_ref=None):
+ self._power_on_called += 1
+ return self._power_on(session, instance, vm_ref=vm_ref)
+
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.stubs.Set(vm_util, "power_on_instance",
+ fake_power_on_instance)
+ self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
+ fake_attach_disk_to_vm)
+
+ self.conn.rescue(self.context, self.instance, self.network_info,
+ self.image, 'fake-password')
+
+ info = self.conn.get_info({'name': '1-rescue',
+ 'uuid': '%s-rescue' % self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.SHUTDOWN)
+ self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid))
+ self.assertEqual(1, self._power_on_called)
+
+ def test_rescue(self):
+ self._rescue()
+ inst_file_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path)))
+ rescue_file_path = ds_util.DatastorePath(self.ds,
+ '%s-rescue' % self.uuid,
+ '%s-rescue.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path)))
+
+ def test_rescue_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ self._rescue(config_drive=True)
+
+ def test_unrescue(self):
+ # NOTE(dims): driver unrescue ends up eventually in vmops.unrescue
+ # with power_on=True, the test_destroy_rescued tests the
+ # vmops.unrescue with power_on=False
+ self._rescue()
+ vm_ref = vm_util.get_vm_ref(self.conn._session,
+ self.instance)
+ vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session,
+ '%s-rescue' % self.uuid)
+
+ self.poweroff_instance = vm_util.power_off_instance
+
+ def fake_power_off_instance(session, instance, vm_ref):
+ # This is called so that we actually poweroff the simulated vm.
+ # The reason for this is that there is a validation in destroy
+ # that the instance is not powered on.
+ self.poweroff_instance(session, instance, vm_ref)
+
+ def fake_detach_disk_from_vm(vm_ref, instance,
+ device_name, destroy_disk=False):
+ self.test_device_name = device_name
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, "power_off_instance",
+ side_effect=fake_power_off_instance),
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ side_effect=fake_detach_disk_from_vm),
+ mock.patch.object(vm_util, "power_on_instance"),
+ ) as (poweroff, detach, fake_power_on):
+ self.conn.unrescue(self.instance, None)
+ poweroff.assert_called_once_with(self.conn._session, mock.ANY,
+ vm_rescue_ref)
+ detach.assert_called_once_with(vm_rescue_ref, mock.ANY,
+ self.test_device_name)
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ self.test_vm_ref = None
+ self.test_device_name = None
+
+ def test_get_diagnostics(self):
+ self._create_vm()
+ expected = {'memoryReservation': 0, 'suspendInterval': 0,
+ 'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
+ 'consumedOverheadMemory': 20, 'numEthernetCards': 1,
+ 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
+ 'memoryOverhead': 21417984,
+ 'guestMemoryUsage': 0, 'connectionState': 'connected',
+ 'memorySizeMB': 512, 'balloonedMemory': 0,
+ 'vmPathName': 'fake_path', 'template': False,
+ 'overallCpuUsage': 0, 'powerState': 'poweredOn',
+ 'cpuReservation': 0, 'overallCpuDemand': 0,
+ 'numVirtualDisks': 1, 'hostMemoryUsage': 141}
+ expected = dict([('vmware:' + k, v) for k, v in expected.items()])
+ self.assertThat(
+ self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
+ 'node': self.instance_node}),
+ matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ self._create_vm()
+ expected = {'uptime': 0,
+ 'memory_details': {'used': 0, 'maximum': 512},
+ 'nic_details': [],
+ 'driver': 'vmwareapi',
+ 'state': 'running',
+ 'version': '1.0',
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'hypervisor_os': 'esxi',
+ 'config_drive': False}
+ actual = self.conn.get_instance_diagnostics(
+ {'name': 1, 'uuid': self.uuid, 'node': self.instance_node})
+ self.assertThat(actual.serialize(), matchers.DictMatches(expected))
+
+ def test_get_console_output(self):
+ self.assertRaises(NotImplementedError, self.conn.get_console_output,
+ None, None)
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ self._create_vm()
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+
+ def _test_finish_revert_migration(self, power_on):
+ self._create_vm()
+ # Ensure ESX driver throws an error
+ self.assertRaises(NotImplementedError,
+ self.conn.finish_revert_migration,
+ self.context,
+ instance=self.instance,
+ network_info=None)
+
+ def test_get_vnc_console_non_existent(self):
+ self._create_instance()
+ self.assertRaises(exception.InstanceNotFound,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def _test_get_vnc_console(self):
+ self._create_vm()
+ fake_vm = self._get_vm_record()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ opt_val = OptionValue(key='', value=5906)
+ fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ vnc_console = self.conn.get_vnc_console(self.context, self.instance)
+ self.assertEqual(self.vnc_host, vnc_console.host)
+ self.assertEqual(5906, vnc_console.port)
+
+ def test_get_vnc_console(self):
+ self._test_get_vnc_console()
+
+ def test_get_vnc_console_noport(self):
+ self._create_vm()
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.conn.get_vnc_console,
+ self.context,
+ self.instance)
+
+ def test_get_volume_connector(self):
+ self._create_vm()
+ connector_dict = self.conn.get_volume_connector(self.instance)
+ fake_vm = self._get_vm_record()
+ fake_vm_id = fake_vm.obj.value
+ self.assertEqual(connector_dict['ip'], 'test_url')
+ self.assertEqual(connector_dict['initiator'], 'iscsi-name')
+ self.assertEqual(connector_dict['host'], 'test_url')
+ self.assertEqual(connector_dict['instance'], fake_vm_id)
+
+ def _test_vmdk_connection_info(self, type):
+ return {'driver_volume_type': type,
+ 'serial': 'volume-fake-id',
+ 'data': {'volume': 'vm-10',
+ 'volume_id': 'volume-fake-id'}}
+
+ def test_volume_attach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_vmdk')
+ volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_vmdk(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_vmdk')
+ volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_vmdk_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+
+ # create fake backing info
+ volume_device = vmwareapi_fake.DataObject()
+ volume_device.backing = vmwareapi_fake.DataObject()
+ volume_device.backing.fileName = 'fake_path'
+
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_vmdk_base_volume_device')
+ volumeops.VMwareVolumeOps._get_vmdk_base_volume_device(
+ mox.IgnoreArg()).AndReturn(volume_device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), mox.IgnoreArg(),
+ vmdk_path='fake_path')
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_detach_vmdk_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('vmdk')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_get_volume_uuid')
+ volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(),
+ 'volume-fake-id').AndReturn('fake_disk_uuid')
+ self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device')
+ vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(),
+ 'fake_disk_uuid').AndReturn('fake_device')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_consolidate_vmdk_volume')
+ volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance,
+ mox.IgnoreArg(), 'fake_device', mox.IgnoreArg())
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_volume_attach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_attach_volume_iscsi')
+ volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_volume_detach_iscsi(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ mount_point = '/dev/vdc'
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_detach_volume_iscsi')
+ volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
+ self.instance, mount_point)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_attach_iscsi_disk_to_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_host:port'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ discover = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ # simulate target not found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn((None, None))
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_add_send_target_host')
+ # rescan gets called with target portal
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_rescan_hba')
+ volumeops.VMwareVolumeOps._iscsi_rescan_hba(
+ connection_info['data']['target_portal'])
+ # simulate target found
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(discover)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'attach_disk_to_vm')
+ volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
+ self.instance, mox.IgnoreArg(), 'rdmp',
+ device_name=mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.attach_volume(None, connection_info, self.instance,
+ mount_point)
+
+ def test_iscsi_rescan_hba(self):
+ fake_target_portal = 'fake_target_host:port'
+ host_storage_sys = vmwareapi_fake._get_objects(
+ "HostStorageSystem").objects[0]
+ iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
+ '.hostBusAdapter')
+ iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
+ # Check the host system does not have the send target
+ self.assertRaises(AttributeError, getattr, iscsi_hba,
+ 'configuredSendTarget')
+ # Rescan HBA with the target portal
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ vops._iscsi_rescan_hba(fake_target_portal)
+ # Check if HBA has the target portal configured
+ self.assertEqual('fake_target_host',
+ iscsi_hba.configuredSendTarget[0].address)
+ # Rescan HBA with same portal
+ vops._iscsi_rescan_hba(fake_target_portal)
+ self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
+
+ def test_iscsi_get_target(self):
+ data = {'target_portal': 'fake_target_host:port',
+ 'target_iqn': 'fake_target_iqn'}
+ host = vmwareapi_fake._get_objects('HostSystem').objects[0]
+ host._add_iscsi_target(data)
+ vops = volumeops.VMwareVolumeOps(self.conn._session)
+ result = vops._iscsi_get_target(data)
+ self.assertEqual(('fake-device', 'fake-uuid'), result)
+
+ def test_detach_iscsi_disk_from_vm(self):
+ self._create_vm()
+ connection_info = self._test_vmdk_connection_info('iscsi')
+ connection_info['data']['target_portal'] = 'fake_target_portal'
+ connection_info['data']['target_iqn'] = 'fake_target_iqn'
+ mount_point = '/dev/vdc'
+ find = ('fake_name', 'fake_uuid')
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ '_iscsi_get_target')
+ volumeops.VMwareVolumeOps._iscsi_get_target(
+ connection_info['data']).AndReturn(find)
+ self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
+ device = 'fake_device'
+ vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
+ self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
+ 'detach_disk_from_vm')
+ volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
+ self.instance, device, destroy_disk=True)
+ self.mox.ReplayAll()
+ self.conn.detach_volume(connection_info, self.instance, mount_point,
+ encryption=None)
+
+ def test_connection_info_get(self):
+ self._create_vm()
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertIn('instance', connector)
+
+ def test_connection_info_get_after_destroy(self):
+ self._create_vm()
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ connector = self.conn.get_volume_connector(self.instance)
+ self.assertEqual(connector['ip'], 'test_url')
+ self.assertEqual(connector['host'], 'test_url')
+ self.assertEqual(connector['initiator'], 'iscsi-name')
+ self.assertNotIn('instance', connector)
+
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.refresh_instance_security_rules,
+ instance=None)
+
+ def test_image_aging_image_used(self):
+ self._create_vm()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+
+ def _get_timestamp_filename(self):
+ return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self.old_time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+
+ def _override_time(self):
+ self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+
+ def _fake_get_timestamp_filename(fake):
+ return self._get_timestamp_filename()
+
+ self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
+ _fake_get_timestamp_filename)
+
+ def _timestamp_file_exists(self, exists=True):
+ timestamp = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ self._get_timestamp_filename() + '/')
+ if exists:
+ self.assertTrue(vmwareapi_fake.get_file(str(timestamp)))
+ else:
+ self.assertFalse(vmwareapi_fake.get_file(str(timestamp)))
+
+ def _image_aging_image_marked_for_deletion(self):
+ self._create_vm(uuid=uuidutils.generate_uuid())
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist()
+ self._timestamp_file_exists()
+
+ def test_image_aging_image_marked_for_deletion(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+
+ def _timestamp_file_removed(self):
+ self._override_time()
+ self._image_aging_image_marked_for_deletion()
+ self._create_vm(num_instances=2,
+ uuid=uuidutils.generate_uuid())
+ self._timestamp_file_exists(exists=False)
+
+ def test_timestamp_file_removed_spawn(self):
+ self._timestamp_file_removed()
+
+ def test_timestamp_file_removed_aging(self):
+ self._timestamp_file_removed()
+ ts = self._get_timestamp_filename()
+ ts_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid, ts + '/')
+ vmwareapi_fake._add_file(str(ts_path))
+ self._timestamp_file_exists()
+ all_instances = [self.instance]
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._timestamp_file_exists(exists=False)
+
+ def test_image_aging_disabled(self):
+ self._override_time()
+ self.flags(remove_unused_base_images=False)
+ self._create_vm()
+ self._cached_files_exist()
+ all_instances = []
+ self.conn.manage_image_cache(self.context, all_instances)
+ self._cached_files_exist(exists=True)
+ self._timestamp_file_exists(exists=False)
+
+ def _image_aging_aged(self, aging_time=100):
+ self._override_time()
+ cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ self.flags(remove_unused_original_minimum_age_seconds=aging_time)
+ self._image_aging_image_marked_for_deletion()
+ all_instances = []
+ timeutils.set_time_override(cur_time)
+ self.conn.manage_image_cache(self.context, all_instances)
+
+ def test_image_aging_aged(self):
+ self._image_aging_aged(aging_time=8)
+ self._cached_files_exist(exists=False)
+
+ def test_image_aging_not_aged(self):
+ self._image_aging_aged()
+ self._cached_files_exist()
+
+
+class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
+
+ @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
+ def setUp(self, mock_register):
+ super(VMwareAPIVCDriverTestCase, self).setUp(create_connection=False)
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ api_retry_count=1,
+ task_poll_interval=10, datastore_regex='.*', group='vmware')
+ self.flags(vnc_enabled=False,
+ image_cache_subdirectory_name='vmware_base')
+ vmwareapi_fake.reset()
+ self.conn = driver.VMwareVCDriver(None, False)
+ self._set_exception_vars()
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
+ self.vnc_host = 'ha-host'
+
+ def tearDown(self):
+ super(VMwareAPIVCDriverTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+
+ def test_public_api_signatures(self):
+ self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
+
+ def test_register_extension(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value=None) as mock_call_method:
+ self.conn._register_openstack_extension()
+ mock_call_method.assert_has_calls(
+ [mock.call(oslo_vim_util, 'find_extension',
+ constants.EXTENSION_KEY),
+ mock.call(oslo_vim_util, 'register_extension',
+ constants.EXTENSION_KEY,
+ constants.EXTENSION_TYPE_INSTANCE)])
+
+ def test_register_extension_already_exists(self):
+ with mock.patch.object(self.conn._session, '_call_method',
+ return_value='fake-extension') as mock_find_ext:
+ self.conn._register_openstack_extension()
+ mock_find_ext.assert_called_once_with(oslo_vim_util,
+ 'find_extension',
+ constants.EXTENSION_KEY)
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_list_instances_from_nodes(self):
+ # Create instance on node1
+ self._create_vm(self.node_name)
+ # Create instances on the other node
+ self._create_vm(self.node_name2, num_instances=2)
+ self._create_vm(self.node_name2, num_instances=3)
+ node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name)
+ node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2)
+ self.assertEqual(1, len(node1_vmops.list_instances()))
+ self.assertEqual(2, len(node2_vmops.list_instances()))
+ self.assertEqual(3, len(self.conn.list_instances()))
+
+ def _setup_mocks_for_session(self, mock_init):
+ mock_init.return_value = None
+
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver._session.vim = None
+
+ def side_effect():
+ vcdriver._session.vim = mock.Mock()
+ vcdriver._session._create_session.side_effect = side_effect
+ return vcdriver
+
+ def test_host_power_action(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'action')
+
+ def test_host_maintenance_mode(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_maintenance_mode, 'host', 'mode')
+
+ def test_set_host_enabled(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.set_host_enabled, 'host', 'state')
+
+ def test_datastore_regex_configured(self):
+ for node in self.conn._resources.keys():
+ self.assertEqual(self.conn._datastore_regex,
+ self.conn._resources[node]['vmops']._datastore_regex)
+
+ def test_get_available_resource(self):
+ stats = self.conn.get_available_resource(self.node_name)
+ cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
+ "vendor": ["Intel", "Intel"],
+ "topology": {"cores": 16,
+ "threads": 32}}
+ self.assertEqual(stats['vcpus'], 32)
+ self.assertEqual(stats['local_gb'], 1024)
+ self.assertEqual(stats['local_gb_used'], 1024 - 500)
+ self.assertEqual(stats['memory_mb'], 1000)
+ self.assertEqual(stats['memory_mb_used'], 500)
+ self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server')
+ self.assertEqual(stats['hypervisor_version'], 5001000)
+ self.assertEqual(stats['hypervisor_hostname'], self.node_name)
+ self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info))
+ self.assertEqual(stats['supported_instances'],
+ '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
+
+ def test_invalid_datastore_regex(self):
+
+ # Tests if we raise an exception for Invalid Regular Expression in
+ # vmware_datastore_regex
+ self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
+ group='vmware')
+ self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
+
+ def test_get_available_nodes(self):
+ nodelist = self.conn.get_available_nodes()
+ self.assertEqual(len(nodelist), 2)
+ self.assertIn(self.node_name, nodelist)
+ self.assertIn(self.node_name2, nodelist)
+
+ def test_spawn_multiple_node(self):
+
+ def fake_is_neutron():
+ return False
+
+ self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron)
+ uuid1 = uuidutils.generate_uuid()
+ uuid2 = uuidutils.generate_uuid()
+ self._create_vm(node=self.node_name, num_instances=1,
+ uuid=uuid1)
+ info = self.conn.get_info({'uuid': uuid1,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ self._create_vm(node=self.node_name2, num_instances=1,
+ uuid=uuid2)
+ info = self.conn.get_info({'uuid': uuid2,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ self._test_snapshot()
+
+ def test_snapshot_using_file_manager(self):
+ self._create_vm()
+ uuid_str = uuidutils.generate_uuid()
+ self.mox.StubOutWithMock(uuidutils,
+ 'generate_uuid')
+ uuidutils.generate_uuid().AndReturn(uuid_str)
+
+ self.mox.StubOutWithMock(ds_util, 'file_delete')
+ disk_ds_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s.vmdk" % uuid_str)
+ disk_ds_flat_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str)
+ # Check calls for delete vmdk and -flat.vmdk pair
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_flat_path,
+ mox.IgnoreArg()).AndReturn(None)
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None)
+
+ self.mox.ReplayAll()
+ self._test_snapshot()
+
+ def test_spawn_invalid_node(self):
+ self._create_instance(node='InvalidNodeName')
+ self.assertRaises(exception.NotFound, self.conn.spawn,
+ self.context, self.instance, self.image,
+ injected_files=[], admin_password=None,
+ network_info=self.network_info,
+ block_device_info=None)
+
+ @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
+ 'from_image')
+ def test_spawn_with_sparse_image(self, mock_from_image):
+ img_info = images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=False)
+
+ mock_from_image.return_value = img_info
+
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.plug_vifs,
+ instance=self.instance, network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance()
+ self.assertRaises(NotImplementedError,
+ self.conn.unplug_vifs,
+ instance=self.instance, network_info=None)
+
+ def _create_vif(self):
+ gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_4 = network_model.IP(address='8.8.8.8', type=None)
+ subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_4],
+ gateway=gw_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gw_6,
+ ips=None,
+ routes=None)
+
+ network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_4,
+ subnet_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+ return vif_bridge_neutron
+
+ def _validate_interfaces(self, id, index, num_iface_ids):
+ vm = self._get_vm_record()
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ key = "nvp.iface-id.%s" % index
+ num_found = 0
+ for c in extras.OptionValue:
+ if c.key.startswith("nvp.iface-id."):
+ num_found += 1
+ if c.key == key and c.value == id:
+ found_iface_id = True
+ self.assertTrue(found_iface_id)
+ self.assertEqual(num_found, num_iface_ids)
+
+ def _attach_interface(self, vif):
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_attach_interface(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ def test_attach_interface_with_exception(self):
+ self._create_vm()
+ vif = self._create_vif()
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.conn.attach_interface,
+ self.instance, self.image, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def _detach_interface(self, vif, mock_get_device):
+ self._create_vm()
+ self._attach_interface(vif)
+ self.conn.detach_interface(self.instance, vif)
+ self._validate_interfaces('free', 1, 2)
+
+ def test_detach_interface(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+
+ def test_detach_interface_and_attach(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_detach_interface_no_device(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_detach_interface_no_vif_match(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ vif['id'] = 'bad-id'
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def test_detach_interface_with_exception(self, mock_get_device):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceDetachFailed,
+ self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_migrate_disk_and_power_off(self):
+ def fake_update_instance_progress(context, instance, step,
+ total_steps):
+ pass
+
+ def fake_get_host_ref_from_name(dest):
+ return None
+
+ self._create_vm(instance_type='m1.large')
+ vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
+ flavor = self._get_instance_type_by_name('m1.large')
+ self.stubs.Set(self.conn._vmops, "_update_instance_progress",
+ fake_update_instance_progress)
+ self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
+ fake_get_host_ref_from_name)
+ self.conn.migrate_disk_and_power_off(self.context, self.instance,
+ 'fake_dest', flavor,
+ None)
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref_orig.value, vm_ref.value,
+ "These should be different")
+
+ def test_disassociate_vmref_from_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+
+ def test_clone_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0]
+ ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0]
+ dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0]
+ vm_util.clone_vmref_for_instance(self.conn._session, self.instance,
+ vm_ref, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+ cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ self.assertNotEqual(vm_ref.value, cloned_vm_ref.value,
+ "Reference for the cloned VM should be different")
+ vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref)
+ cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref)
+ self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup",
+ "Original VM name should be with suffix -backup")
+ self.assertEqual(cloned_vm_obj.name, self.instance['uuid'],
+ "VM name does not match instance['uuid']")
+ self.assertRaises(vexc.MissingParameter,
+ vm_util.clone_vmref_for_instance, self.conn._session,
+ self.instance, None, host_ref, ds_ref,
+ dc_obj.get("vmFolder"))
+
+ def test_associate_vmref_for_instance(self):
+ self._create_vm()
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ # First disassociate the VM from the instance so that we have a VM
+ # to later associate using the associate_vmref_for_instance method
+ vm_util.disassociate_vmref_from_instance(self.conn._session,
+ self.instance, vm_ref, "-backup")
+ # Ensure that the VM is indeed disassociated and that we cannot find
+ # the VM using the get_vm_ref method
+ self.assertRaises(exception.InstanceNotFound,
+ vm_util.get_vm_ref, self.conn._session, self.instance)
+ # Associate the VM back to the instance
+ vm_util.associate_vmref_for_instance(self.conn._session, self.instance,
+ suffix="-backup")
+ # Verify if we can get the VM reference
+ self.assertIsNotNone(
+ vm_util.get_vm_ref(self.conn._session, self.instance),
+ "No VM found")
+
+ def test_confirm_migration(self):
+ self._create_vm()
+ self.conn.confirm_migration(self.context, self.instance, None)
+
+ def test_resize_to_smaller_disk(self):
+ self._create_vm(instance_type='m1.large')
+ flavor = self._get_instance_type_by_name('m1.small')
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.conn.migrate_disk_and_power_off, self.context,
+ self.instance, 'fake_dest', flavor, None)
+
+ def test_spawn_attach_volume_vmdk(self):
+ self._spawn_attach_volume_vmdk(vc_support=True)
+
+ def test_spawn_attach_volume_vmdk_no_image_ref(self):
+ self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True)
+
+ def test_pause(self):
+ # Tests that the VMwareVCDriver does not implement the pause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
+
+ def test_unpause(self):
+ # Tests that the VMwareVCDriver does not implement the unpause method.
+ self._create_instance()
+ self.assertRaises(NotImplementedError, self.conn.unpause,
+ self.instance)
+
+ def test_datastore_dc_map(self):
+ vmops = self.conn._resources[self.node_name]['vmops']
+ self.assertEqual({}, vmops._datastore_dc_mapping)
+ self._create_vm()
+ # currently there are 2 data stores
+ self.assertEqual(2, len(vmops._datastore_dc_mapping))
+
+ def test_rollback_live_migration_at_destination(self):
+ with mock.patch.object(self.conn, "destroy") as mock_destroy:
+ self.conn.rollback_live_migration_at_destination(self.context,
+ "instance", [], None)
+ mock_destroy.assert_called_once_with(self.context,
+ "instance", [], None)
+
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self.conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+ self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
+
+ def test_destroy_no_datastore(self):
+ self._create_vm()
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEqual(1, len(instances))
+ # Overwrite the vmPathName
+ vm = self._get_vm_record()
+ vm.set("config.files.vmPathName", None)
+ self.conn.destroy(self.context, self.instance, self.network_info)
+ instances = self.conn.list_instances()
+ self.assertEqual(0, len(instances))
+
+ def test_destroy_non_existent(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self._create_instance()
+ self.conn.destroy(self.context, self.instance,
+ self.network_info,
+ None, self.destroy_disks)
+ mock_destroy.assert_called_once_with(self.instance,
+ self.destroy_disks)
+
+ def test_destroy_instance_without_compute(self):
+ self.destroy_disks = True
+ with mock.patch.object(self.conn._vmops,
+ "destroy") as mock_destroy:
+ self.conn.destroy(self.context, self.instance_without_compute,
+ self.network_info,
+ None, self.destroy_disks)
+ self.assertFalse(mock_destroy.called)
+
+ def test_get_host_uptime(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.get_host_uptime, 'host')
+
+ def _test_finish_migration(self, power_on, resize_instance=False):
+ """Tests the finish_migration method on VC Driver."""
+ # setup the test instance in the database
+ self._create_vm()
+ if resize_instance:
+ self.instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self.conn._vmops,
+ "_update_instance_progress"),
+ mock.patch.object(self.conn._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self.conn._vmops,
+ 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self.conn.finish_migration(context=self.context,
+ migration=None,
+ instance=self.instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self.conn._session.vim.client.factory,
+ self.instance)
+ fake_call_method.assert_any_call(
+ self.conn._session.vim,
+ "ReconfigVM_Task",
+ vm_ref,
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self.instance, self.instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_call_method.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self.context, self.instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True,
+ resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on VC Driver."""
+
+ # setup the test instance in the database
+ self._create_instance()
+ self.conn.finish_revert_migration(self.context,
+ instance=self.instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self.conn._session,
+ self.instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self.conn._session,
+ self.instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ def test_pbm_wsdl_location(self):
+ self.flags(pbm_enabled=True,
+ pbm_wsdl_location='fira',
+ group='vmware')
+ self.conn._update_pbm_location()
+ self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
+ self.assertIsNone(self.conn._session._pbm)
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
new file mode 100644
index 0000000000..6f5cf74b26
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -0,0 +1,548 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import re
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+from testtools import matchers
+
+from nova import exception
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+
+
+class DsUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DsUtilTestCase, self).setUp()
+ self.session = fake.FakeSession()
+ self.flags(api_retry_count=1, group='vmware')
+ fake.reset()
+
+ def tearDown(self):
+ super(DsUtilTestCase, self).tearDown()
+ fake.reset()
+
+ def test_file_delete(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('DeleteDatastoreFile_Task', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ return 'fake_delete_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.file_delete(self.session,
+ ds_path, 'fake-dc-ref')
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_delete_task')])
+
+ def test_file_move(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MoveDatastoreFile_Task', method)
+ sourceName = kwargs.get('sourceName')
+ self.assertEqual('[ds] tmp/src', sourceName)
+ destinationName = kwargs.get('destinationName')
+ self.assertEqual('[ds] base/dst', destinationName)
+ sourceDatacenter = kwargs.get('sourceDatacenter')
+ self.assertEqual('fake-dc-ref', sourceDatacenter)
+ destinationDatacenter = kwargs.get('destinationDatacenter')
+ self.assertEqual('fake-dc-ref', destinationDatacenter)
+ return 'fake_move_task'
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_wait_for_task'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
+ dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
+ ds_util.file_move(self.session,
+ 'fake-dc-ref', src_ds_path, dst_ds_path)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_move_task')])
+
+ def test_mkdir(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('MakeDirectory', method)
+ name = kwargs.get('name')
+ self.assertEqual('[ds] fake/path', name)
+ datacenter = kwargs.get('datacenter')
+ self.assertEqual('fake-dc-ref', datacenter)
+ createParentDirectories = kwargs.get('createParentDirectories')
+ self.assertTrue(createParentDirectories)
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
+
+ def test_file_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ ds_browser = args[0]
+ self.assertEqual('fake-browser', ds_browser)
+ datastorePath = kwargs.get('datastorePath')
+ self.assertEqual('[ds] fake/path', datastorePath)
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ result_file = fake.DataObject()
+ result_file.path = 'fake-file'
+
+ result = fake.DataObject()
+ result.file = [result_file]
+ result.path = '[ds] fake/path'
+
+ task_info = fake.DataObject()
+ task_info.result = result
+
+ return task_info
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertTrue(file_exists)
+
+ def test_file_exists_fails(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'SearchDatastore_Task':
+ return 'fake_exists_task'
+
+ # Should never get here
+ self.fail()
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == 'fake_exists_task':
+ raise vexc.FileNotFoundException()
+
+ # Should never get here
+ self.fail()
+
+ with contextlib.nested(
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(self.session, '_wait_for_task',
+ fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ file_exists = ds_util.file_exists(self.session,
+ 'fake-browser', ds_path, 'fake-file')
+ self.assertFalse(file_exists)
+
+ def _mock_get_datastore_calls(self, *datastores):
+ """Mock vim_util calls made by get_datastore."""
+
+ datastores_i = [None]
+
+ # For the moment, at least, this list of datastores is simply passed to
+ # get_properties_for_a_collection_of_objects, which we mock below. We
+ # don't need to over-complicate the fake function by worrying about its
+ # contents.
+ fake_ds_list = ['fake-ds']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ # Mock the call which returns a list of datastores for the cluster
+ if (module == ds_util.vim_util and
+ method == 'get_dynamic_property' and
+ args == ('fake-cluster', 'ClusterComputeResource',
+ 'datastore')):
+ fake_ds_mor = fake.DataObject()
+ fake_ds_mor.ManagedObjectReference = fake_ds_list
+ return fake_ds_mor
+
+ # Return the datastore result sets we were passed in, in the order
+ # given
+ if (module == ds_util.vim_util and
+ method == 'get_properties_for_a_collection_of_objects' and
+ args[0] == 'Datastore' and
+ args[1] == fake_ds_list):
+ # Start a new iterator over given datastores
+ datastores_i[0] = iter(datastores)
+ return datastores_i[0].next()
+
+ # Continue returning results from the current iterator.
+ if (module == ds_util.vim_util and
+ method == 'continue_to_get_objects'):
+ try:
+ return datastores_i[0].next()
+ except StopIteration:
+ return None
+
+ # Sentinel that get_datastore's use of vim has changed
+ self.fail('Unexpected vim call in get_datastore: %s' % method)
+
+ return mock.patch.object(self.session, '_call_method',
+ side_effect=fake_call_method)
+
+ def test_get_datastore(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore())
+ fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
+ False, "normal"))
+ fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
+ True, "inMaintenance"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster')
+ self.assertEqual("fake-ds", result.name)
+ self.assertEqual(units.Ti, result.capacity)
+ self.assertEqual(500 * units.Gi, result.freespace)
+
+ def test_get_datastore_with_regex(self):
+ # Test with a regex that matches with a datastore
+ datastore_valid_regex = re.compile("^openstack.*\d$")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertEqual("openstack-ds0", result.name)
+
+ def test_get_datastore_with_token(self):
+ regex = re.compile("^ds.*\d$")
+ fake0 = fake.FakeRetrieveResult()
+ fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
+ fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
+ setattr(fake0, 'token', 'token-0')
+ fake1 = fake.FakeRetrieveResult()
+ fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
+ fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
+
+ with self._mock_get_datastore_calls(fake0, fake1):
+ result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
+ self.assertEqual("ds2", result.name)
+
+ def test_get_datastore_with_list(self):
+ # Test with a regex containing whitelist of datastores
+ datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("openstack-ds1"))
+ fake_objects.add_object(fake.Datastore("openstack-ds2"))
+
+ with self._mock_get_datastore_calls(fake_objects):
+ result = ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_valid_regex)
+ self.assertNotEqual("openstack-ds1", result.name)
+
+ def test_get_datastore_with_regex_error(self):
+ # Test with a regex that has no match
+ # Checks if code raises DatastoreNotFound with a specific message
+ datastore_invalid_regex = re.compile("unknown-ds")
+ exp_message = (_("Datastore regex %s did not match any datastores")
+ % datastore_invalid_regex.pattern)
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+ # assertRaisesRegExp would have been a good choice instead of
+ # try/catch block, but it's available only from Py 2.7.
+ try:
+ with self._mock_get_datastore_calls(fake_objects):
+ ds_util.get_datastore(self.session, 'fake-cluster',
+ datastore_invalid_regex)
+ except exception.DatastoreNotFound as e:
+ self.assertEqual(exp_message, e.args[0])
+ else:
+ self.fail("DatastoreNotFound Exception was not raised with "
+ "message: %s" % exp_message)
+
+ def test_get_datastore_without_datastore(self):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
+
+ def test_get_datastore_inaccessible_ds(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.accessible", False)
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_ds_in_maintenance(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.maintenanceMode", "inMaintenance")
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ with self._mock_get_datastore_calls(fake_objects):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def test_get_datastore_no_host_in_cluster(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ return ''
+
+ with mock.patch.object(self.session, '_call_method',
+ fake_call_method):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ self.session, 'fake-cluster')
+
+ def _test_is_datastore_valid(self, accessible=True,
+ maintenance_mode="normal",
+ type="VMFS",
+ datastore_regex=None):
+ propdict = {}
+ propdict["summary.accessible"] = accessible
+ propdict["summary.maintenanceMode"] = maintenance_mode
+ propdict["summary.type"] = type
+ propdict["summary.name"] = "ds-1"
+
+ return ds_util._is_datastore_valid(propdict, datastore_regex)
+
+ def test_is_datastore_valid(self):
+ for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ ds_type))
+
+ def test_is_datastore_valid_inaccessible_ds(self):
+ self.assertFalse(self._test_is_datastore_valid(False,
+ "normal",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_in_maintenance(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "inMaintenance",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_type_invalid(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "vfat"))
+
+ def test_is_datastore_valid_not_matching_regex(self):
+ datastore_regex = re.compile("ds-2")
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+ def test_is_datastore_valid_matching_regex(self):
+ datastore_regex = re.compile("ds-1")
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+
+class DatastoreTestCase(test.NoDBTestCase):
+ def test_ds(self):
+ ds = ds_util.Datastore(
+ "fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
+ self.assertEqual('ds_name', ds.name)
+ self.assertEqual('fake_ref', ds.ref)
+ self.assertEqual(2 * units.Gi, ds.capacity)
+ self.assertEqual(1 * units.Gi, ds.freespace)
+
+ def test_ds_invalid_space(self):
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
+ self.assertRaises(ValueError, ds_util.Datastore,
+ "fake_ref", "ds_name", None, 2 * units.Gi)
+
+ def test_ds_no_capacity_no_freespace(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ self.assertIsNone(ds.capacity)
+ self.assertIsNone(ds.freespace)
+
+ def test_ds_invalid(self):
+ self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
+ self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
+
+ def test_build_path(self):
+ ds = ds_util.Datastore("fake_ref", "ds_name")
+ ds_path = ds.build_path("some_dir", "foo.vmdk")
+ self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
+
+
+class DatastorePathTestCase(test.NoDBTestCase):
+
+ def test_ds_path(self):
+ p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
+ self.assertEqual('[dsname] a/b/c/file.iso', str(p))
+ self.assertEqual('a/b/c/file.iso', p.rel_path)
+ self.assertEqual('a/b/c', p.parent.rel_path)
+ self.assertEqual('[dsname] a/b/c', str(p.parent))
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('file.iso', p.basename)
+ self.assertEqual('a/b/c', p.dirname)
+
+ def test_ds_path_no_ds_name(self):
+ bad_args = [
+ ('', ['a/b/c', 'file.iso']),
+ (None, ['a/b/c', 'file.iso'])]
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_invalid_path_components(self):
+ bad_args = [
+ ('dsname', [None]),
+ ('dsname', ['', None]),
+ ('dsname', ['a', None]),
+ ('dsname', ['a', None, 'b']),
+ ('dsname', [None, '']),
+ ('dsname', [None, 'b'])]
+
+ for t in bad_args:
+ self.assertRaises(
+ ValueError, ds_util.DatastorePath,
+ t[0], *t[1])
+
+ def test_ds_path_no_subdir(self):
+ args = [
+ ('dsname', ['', 'x.vmdk']),
+ ('dsname', ['x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
+ self.assertEqual('[dsname] x.vmdk', str(canonical_p))
+ self.assertEqual('', canonical_p.dirname)
+ self.assertEqual('x.vmdk', canonical_p.basename)
+ self.assertEqual('x.vmdk', canonical_p.rel_path)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+
+ def test_ds_path_ds_only(self):
+ args = [
+ ('dsname', []),
+ ('dsname', ['']),
+ ('dsname', ['', ''])]
+
+ canonical_p = ds_util.DatastorePath('dsname')
+ self.assertEqual('[dsname]', str(canonical_p))
+ self.assertEqual('', canonical_p.rel_path)
+ self.assertEqual('', canonical_p.basename)
+ self.assertEqual('', canonical_p.dirname)
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+
+ def test_ds_path_equivalence(self):
+ args = [
+ ('dsname', ['a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/', 'b/c/', 'x.vmdk']),
+ ('dsname', ['a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['a/b/c', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertEqual(str(canonical_p), str(p))
+ self.assertEqual(canonical_p.datastore, p.datastore)
+ self.assertEqual(canonical_p.rel_path, p.rel_path)
+ self.assertEqual(str(canonical_p.parent), str(p.parent))
+
+ def test_ds_path_non_equivalence(self):
+ args = [
+ # leading slash
+ ('dsname', ['/a', 'b', 'c', 'x.vmdk']),
+ ('dsname', ['/a/b/c/', 'x.vmdk']),
+ ('dsname', ['a/b/c', '/x.vmdk']),
+ # leading space
+ ('dsname', ['a/b/c/', ' x.vmdk']),
+ ('dsname', ['a/', ' b/c/', 'x.vmdk']),
+ ('dsname', [' a', 'b', 'c', 'x.vmdk']),
+ # trailing space
+ ('dsname', ['/a/b/c/', 'x.vmdk ']),
+ ('dsname', ['a/b/c/ ', 'x.vmdk'])]
+
+ canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
+ for t in args:
+ p = ds_util.DatastorePath(t[0], *t[1])
+ self.assertNotEqual(str(canonical_p), str(p))
+
+ def test_ds_path_hashable(self):
+ ds1 = ds_util.DatastorePath('dsname', 'path')
+ ds2 = ds_util.DatastorePath('dsname', 'path')
+
+ # If the above objects have the same hash, they will only be added to
+ # the set once
+ self.assertThat(set([ds1, ds2]), matchers.HasLength(1))
+
+ def test_equal(self):
+ a = ds_util.DatastorePath('ds_name', 'a')
+ b = ds_util.DatastorePath('ds_name', 'a')
+ self.assertEqual(a, b)
+
+ def test_join(self):
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join('b')
+ self.assertEqual('[ds_name] a/b', str(ds_path))
+
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join()
+ self.assertEqual('[ds_name] a', str(ds_path))
+
+ bad_args = [
+ [None],
+ ['', None],
+ ['a', None],
+ ['a', None, 'b']]
+ for arg in bad_args:
+ self.assertRaises(ValueError, p.join, *arg)
+
+ def test_ds_path_parse(self):
+ p = ds_util.DatastorePath.parse('[dsname]')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder', p.rel_path)
+
+ p = ds_util.DatastorePath.parse('[dsname] folder/file')
+ self.assertEqual('dsname', p.datastore)
+ self.assertEqual('folder/file', p.rel_path)
+
+ for p in [None, '']:
+ self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
+
+ for p in ['bad path', '/a/b/c', 'a/b/c']:
+ self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
diff --git a/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
index 1351530143..1351530143 100644
--- a/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util_datastore_selection.py
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
new file mode 100644
index 0000000000..d277963106
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2014 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import datetime
+
+import mock
+from oslo.config import cfg
+from oslo.utils import timeutils
+
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import imagecache
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vmops
+
+CONF = cfg.CONF
+
+
+class ImageCacheManagerTestCase(test.NoDBTestCase):
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(ImageCacheManagerTestCase, self).setUp()
+ self._session = mock.Mock(name='session')
+ self._imagecache = imagecache.ImageCacheManager(self._session,
+ 'fake-base-folder')
+ self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ self._file_name = 'ts-2012-11-22-12-00-00'
+ fake.reset()
+
+ def tearDown(self):
+ super(ImageCacheManagerTestCase, self).tearDown()
+ fake.reset()
+
+ def test_timestamp_cleanup(self):
+ def fake_get_timestamp(ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if not self.exists:
+ return
+ ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
+ timeutils.strtime(at=self._time,
+ fmt=imagecache.TIMESTAMP_FORMAT))
+ return ts
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'file_delete')
+ ) as (_get_timestamp, _file_delete):
+ self.exists = False
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(0, _file_delete.call_count)
+ self.exists = True
+ self._imagecache.timestamp_cleanup(
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ expected_ds_path = ds_util.DatastorePath(
+ 'fake-ds', 'fake-path', self._file_name)
+ _file_delete.assert_called_once_with(self._session,
+ expected_ds_path, 'fake-dc-ref')
+
+ def test_get_timestamp(self):
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ self.assertEqual('fake-ds-browser', ds_browser)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
+ if self.exists:
+ files = set()
+ files.add(self._file_name)
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ):
+ self.exists = True
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(self._file_name, ts)
+ self.exists = False
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertIsNone(ts)
+
+ def test_get_timestamp_filename(self):
+ timeutils.set_time_override(override_time=self._time)
+ fn = self._imagecache._get_timestamp_filename()
+ self.assertEqual(self._file_name, fn)
+
+ def test_get_datetime_from_filename(self):
+ t = self._imagecache._get_datetime_from_filename(self._file_name)
+ self.assertEqual(self._time, t)
+
+ def test_get_ds_browser(self):
+ cache = self._imagecache._ds_browser
+ ds_browser = mock.Mock()
+ moref = fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_get_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(vim_util, 'get_dynamic_property',
+ mock_get_method):
+ ret = self._imagecache._get_ds_browser(moref)
+ mock_get_method.assert_called_once_with(mock.ANY, moref,
+ 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ def test_list_base_images(self):
+ def fake_get_dynamic_property(vim, mobj, type, property_name):
+ return 'fake-ds-browser'
+
+ def fake_get_sub_folders(session, ds_browser, ds_path):
+ files = set()
+ files.add('image-ref-uuid')
+ return files
+
+ with contextlib.nested(
+ mock.patch.object(vim_util, 'get_dynamic_property',
+ fake_get_dynamic_property),
+ mock.patch.object(ds_util, 'get_sub_folders',
+ fake_get_sub_folders)
+ ) as (_get_dynamic, _get_sub_folders):
+ fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
+ datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref)
+ ds_path = datastore.build_path('base_folder')
+ images = self._imagecache._list_datastore_images(
+ ds_path, datastore)
+ originals = set()
+ originals.add('image-ref-uuid')
+ self.assertEqual({'originals': originals,
+ 'unexplained_images': []},
+ images)
+
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
+ @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
+ @mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
+ def test_enlist_image(self,
+ mock_get_ds_browser,
+ mock_timestamp_cleanup,
+ mock_timestamp_folder_get):
+ image_id = "fake_image_id"
+ dc_ref = "fake_dc_ref"
+ fake_ds_ref = mock.Mock()
+ ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=1,
+ freespace=1)
+
+ ds_browser = mock.Mock()
+ mock_get_ds_browser.return_value = ds_browser
+ timestamp_folder_path = mock.Mock()
+ mock_timestamp_folder_get.return_value = timestamp_folder_path
+
+ self._imagecache.enlist_image(image_id, ds, dc_ref)
+
+ cache_root_folder = ds.build_path("fake-base-folder")
+ mock_get_ds_browser.assert_called_once_with(
+ ds.ref)
+ mock_timestamp_folder_get.assert_called_once_with(
+ cache_root_folder, "fake_image_id")
+ mock_timestamp_cleanup.assert_called_once_with(
+ dc_ref, ds_browser, timestamp_folder_path)
+
+ def test_age_cached_images(self):
+ def fake_get_ds_browser(ds_ref):
+ return 'fake-ds-browser'
+
+ def fake_get_timestamp(ds_browser, ds_path):
+ self._get_timestamp_called += 1
+ path = str(ds_path)
+ if path == '[fake-ds] fake-path/fake-image-1':
+ # No time stamp exists
+ return
+ if path == '[fake-ds] fake-path/fake-image-2':
+ # Timestamp that will be valid => no deletion
+ return 'ts-2012-11-22-10-00-00'
+ if path == '[fake-ds] fake-path/fake-image-3':
+ # Timestamp that will be invalid => deletion
+ return 'ts-2012-11-20-12-00-00'
+ self.fail()
+
+ def fake_mkdir(session, ts_path, dc_ref):
+ self.assertEqual(
+ '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
+ str(ts_path))
+
+ def fake_file_delete(session, ds_path, dc_ref):
+ self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
+
+ def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
+ self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_get_ds_browser',
+ fake_get_ds_browser),
+ mock.patch.object(self._imagecache, '_get_timestamp',
+ fake_get_timestamp),
+ mock.patch.object(ds_util, 'mkdir',
+ fake_mkdir),
+ mock.patch.object(ds_util, 'file_delete',
+ fake_file_delete),
+ mock.patch.object(self._imagecache, 'timestamp_cleanup',
+ fake_timestamp_cleanup),
+ ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
+ _timestamp_cleanup):
+ timeutils.set_time_override(override_time=self._time)
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ self._get_timestamp_called = 0
+ self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
+ 'fake-image-3', 'fake-image-4'])
+ self._imagecache.used_images = set(['fake-image-4'])
+ self._imagecache._age_cached_images(
+ 'fake-context', datastore, dc_info,
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ self.assertEqual(3, self._get_timestamp_called)
+
+ def test_update(self):
+ def fake_list_datastore_images(ds_path, datastore):
+ return {'unexplained_images': [],
+ 'originals': self.images}
+
+ def fake_age_cached_images(context, datastore,
+ dc_info, ds_path):
+ self.assertEqual('[ds] fake-base-folder', str(ds_path))
+ self.assertEqual(self.images,
+ self._imagecache.used_images)
+ self.assertEqual(self.images,
+ self._imagecache.originals)
+
+ with contextlib.nested(
+ mock.patch.object(self._imagecache, '_list_datastore_images',
+ fake_list_datastore_images),
+ mock.patch.object(self._imagecache,
+ '_age_cached_images',
+ fake_age_cached_images)
+ ) as (_list_base, _age_and_verify):
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'inst-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'name': 'inst-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
+ self.images = set(['1', '2'])
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
+ dc_info = vmops.DcInfo(ref='dc_ref', name='name',
+ vmFolder='vmFolder')
+ datastores_info = [(datastore, dc_info)]
+ self._imagecache.update('context', all_instances, datastores_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
new file mode 100644
index 0000000000..07fc3be214
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -0,0 +1,216 @@
+# Copyright (c) 2014 VMware, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for images.
+"""
+
+import contextlib
+
+import mock
+from oslo.utils import units
+
+from nova import exception
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import read_write_util
+
+
+class VMwareImagesTestCase(test.NoDBTestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def test_fetch_image(self):
+ """Test fetching images."""
+
+ dc_name = 'fake-dc'
+ file_path = 'fake_file'
+ ds_name = 'ds1'
+ host = mock.MagicMock()
+ context = mock.MagicMock()
+
+ image_data = {
+ 'id': nova.tests.unit.image.fake.get_valid_image_id(),
+ 'disk_format': 'vmdk',
+ 'size': 512,
+ }
+ read_file_handle = mock.MagicMock()
+ write_file_handle = mock.MagicMock()
+ read_iter = mock.MagicMock()
+ instance = {}
+ instance['image_ref'] = image_data['id']
+ instance['uuid'] = 'fake-uuid'
+
+ def fake_read_handle(read_iter):
+ return read_file_handle
+
+ def fake_write_handle(host, dc_name, ds_name, cookies,
+ file_path, file_size):
+ return write_file_handle
+
+ with contextlib.nested(
+ mock.patch.object(read_write_util, 'GlanceFileRead',
+ side_effect=fake_read_handle),
+ mock.patch.object(read_write_util, 'VMwareHTTPWriteFile',
+ side_effect=fake_write_handle),
+ mock.patch.object(images, 'start_transfer'),
+ mock.patch.object(images.IMAGE_API, 'get',
+ return_value=image_data),
+ mock.patch.object(images.IMAGE_API, 'download',
+ return_value=read_iter),
+ ) as (glance_read, http_write, start_transfer, image_show,
+ image_download):
+ images.fetch_image(context, instance,
+ host, dc_name,
+ ds_name, file_path)
+
+ glance_read.assert_called_once_with(read_iter)
+ http_write.assert_called_once_with(host, dc_name, ds_name, None,
+ file_path, image_data['size'])
+ start_transfer.assert_called_once_with(
+ context, read_file_handle,
+ image_data['size'],
+ write_file_handle=write_file_handle)
+ image_download.assert_called_once_with(context, instance['image_ref'])
+ image_show.assert_called_once_with(context, instance['image_ref'])
+
+ def _setup_mock_get_remote_image_service(self,
+ mock_get_remote_image_service,
+ metadata):
+ mock_image_service = mock.MagicMock()
+ mock_image_service.show.return_value = metadata
+ mock_get_remote_image_service.return_value = [mock_image_service, 'i']
+
+ def test_from_image_with_image_ref(self):
+ raw_disk_size_in_gb = 83
+ raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_bytes,
+ 'disk_format': 'vmdk',
+ 'properties': {
+ "vmware_ostype": constants.DEFAULT_OS_TYPE,
+ "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
+ "vmware_disktype": constants.DEFAULT_DISK_TYPE,
+ "hw_vif_model": constants.DEFAULT_VIF_MODEL,
+ images.LINKED_CLONE_PROPERTY: True}}
+
+ img_props = images.VMwareImage.from_image(image_id, mdata)
+
+ image_size_in_kb = raw_disk_size_in_bytes / units.Ki
+
+ # assert that defaults are set and no value returned is left empty
+ self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
+ self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
+ img_props.adapter_type)
+ self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
+ self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
+ self.assertTrue(img_props.linked_clone)
+ self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
+
+ def _image_build(self, image_lc_setting, global_lc_setting,
+ disk_format=constants.DEFAULT_DISK_FORMAT,
+ os_type=constants.DEFAULT_OS_TYPE,
+ adapter_type=constants.DEFAULT_ADAPTER_TYPE,
+ disk_type=constants.DEFAULT_DISK_TYPE,
+ vif_model=constants.DEFAULT_VIF_MODEL):
+ self.flags(use_linked_clone=global_lc_setting, group='vmware')
+ raw_disk_size_in_gb = 93
+ raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
+
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_btyes,
+ 'disk_format': disk_format,
+ 'properties': {
+ "vmware_ostype": os_type,
+ "vmware_adaptertype": adapter_type,
+ "vmware_disktype": disk_type,
+ "hw_vif_model": vif_model}}
+
+ if image_lc_setting is not None:
+ mdata['properties'][
+ images.LINKED_CLONE_PROPERTY] = image_lc_setting
+
+ return images.VMwareImage.from_image(image_id, mdata)
+
+ def test_use_linked_clone_override_nf(self):
+ image_props = self._image_build(None, False)
+ self.assertFalse(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_nt(self):
+ image_props = self._image_build(None, True)
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ny(self):
+ image_props = self._image_build(None, "yes")
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ft(self):
+ image_props = self._image_build(False, True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_nt(self):
+ image_props = self._image_build("no", True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_yf(self):
+ image_props = self._image_build("yes", False)
+ self.assertTrue(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_disk_format_none(self):
+ image = self._image_build(None, True, disk_format=None)
+ self.assertIsNone(image.file_type)
+ self.assertFalse(image.is_iso)
+
+ def test_use_disk_format_iso(self):
+ image = self._image_build(None, True, disk_format='iso')
+ self.assertEqual('iso', image.file_type)
+ self.assertTrue(image.is_iso)
+
+ def test_use_bad_disk_format(self):
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._image_build,
+ None,
+ True,
+ disk_format='bad_disk_format')
+
+ def test_image_no_defaults(self):
+ image = self._image_build(False, False,
+ disk_format='iso',
+ os_type='fake-os-type',
+ adapter_type='fake-adapter-type',
+ disk_type='fake-disk-type',
+ vif_model='fake-vif-model')
+ self.assertEqual('iso', image.file_type)
+ self.assertEqual('fake-os-type', image.os_type)
+ self.assertEqual('fake-adapter-type', image.adapter_type)
+ self.assertEqual('fake-disk-type', image.disk_type)
+ self.assertEqual('fake-vif-model', image.vif_model)
+ self.assertFalse(image.linked_clone)
+
+ def test_image_defaults(self):
+ image = images.VMwareImage(image_id='fake-image-id')
+
+ # N.B. We intentially don't use the defined constants here. Amongst
+ # other potential failures, we're interested in changes to their
+ # values, which would not otherwise be picked up.
+ self.assertEqual('otherGuest', image.os_type)
+ self.assertEqual('lsiLogic', image.adapter_type)
+ self.assertEqual('preallocated', image.disk_type)
+ self.assertEqual('e1000', image.vif_model)
diff --git a/nova/tests/virt/vmwareapi/test_io_util.py b/nova/tests/unit/virt/vmwareapi/test_io_util.py
index a03c1e95b5..a03c1e95b5 100644
--- a/nova/tests/virt/vmwareapi/test_io_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_io_util.py
diff --git a/nova/tests/virt/vmwareapi/test_read_write_util.py b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
index 468d8b213a..468d8b213a 100644
--- a/nova/tests/virt/vmwareapi/test_read_write_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
new file mode 100644
index 0000000000..2a4d086c36
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -0,0 +1,346 @@
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+from oslo.config import cfg
+from oslo.vmware import exceptions as vexc
+
+from nova import exception
+from nova.network import model as network_model
+from nova import test
+from nova.tests.unit import matchers
+from nova.tests.unit import utils
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import vif
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+CONF = cfg.CONF
+
+
+class VMwareVifTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVifTestCase, self).setUp()
+ self.flags(vlan_interface='vmnet0', group='vmware')
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True)
+
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ self.session = fake.FakeSession()
+ self.cluster = None
+
+ def tearDown(self):
+ super(VMwareVifTestCase, self).tearDown()
+
+ def test_ensure_vlan_bridge(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
+
+ # FlatDHCP network mode without vlan - network doesn't exist with the host
+ def test_ensure_vlan_bridge_without_vlan(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(None)
+ network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
+ self.cluster).AndReturn('vmnet0')
+ network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
+ self.cluster).AndReturn(True)
+ network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
+ self.cluster)
+ network_util.get_network_with_the_name(self.session, 'fa0', None)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # FlatDHCP network mode without vlan - network exists with the host
+ # Get vswitch and check vlan interface should not be called
+ def test_ensure_vlan_bridge_with_network(self):
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+ vm_network = {'name': 'VM Network', 'type': 'Network'}
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(vm_network)
+ self.mox.ReplayAll()
+ vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
+
+ # Flat network mode with DVS
+ def test_ensure_vlan_bridge_with_existing_dvs(self):
+ network_ref = {'dvpg': 'dvportgroup-2062',
+ 'type': 'DistributedVirtualPortgroup'}
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ self.mox.StubOutWithMock(network_util,
+ 'get_vswitch_for_vlan_interface')
+ self.mox.StubOutWithMock(network_util,
+ 'check_if_vlan_interface_exists')
+ self.mox.StubOutWithMock(network_util, 'create_port_group')
+
+ network_util.get_network_with_the_name(self.session, 'fa0',
+ self.cluster).AndReturn(network_ref)
+ self.mox.ReplayAll()
+ ref = vif.ensure_vlan_bridge(self.session,
+ self.vif,
+ create_vlan=False)
+ self.assertThat(ref, matchers.DictMatches(network_ref))
+
+ def test_get_network_ref_neutron(self):
+ self.mox.StubOutWithMock(vif, 'get_neutron_network')
+ vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, True)
+
+ def test_get_network_ref_flat_dhcp(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=False)
+ self.mox.ReplayAll()
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge(self):
+ self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
+ vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
+ create_vlan=True)
+ self.mox.ReplayAll()
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ vlan=3,
+ bridge_interface='eth0',
+ injected=True,
+ should_create_vlan=True)
+ self.vif = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])[0]
+ vif.get_network_ref(self.session, self.cluster, self.vif, False)
+
+ def test_get_network_ref_bridge_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('bridge_id', network_ref['network-id'])
+
+ def test_get_network_ref_multiple_bridges_from_opaque(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id2')
+ self.assertEqual('bridge_id2', network_ref['network-id'])
+
+ def test_get_network_ref_integration(self):
+ opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertEqual('integration_bridge', network_ref['network-id'])
+
+ def test_get_network_ref_bridge_none(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'bridge_id2',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_network_ref_integration_multiple(self):
+ opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
+ 'opaqueNetworkName': 'name1',
+ 'opaqueNetworkType': 'OpaqueNetwork'},
+ {'opaqueNetworkId': 'integration_bridge',
+ 'opaqueNetworkName': 'name2',
+ 'opaqueNetworkType': 'OpaqueNetwork'}]
+ network_ref = vif._get_network_ref_from_opaque(opaque_networks,
+ 'integration_bridge', 'bridge_id')
+ self.assertIsNone(network_ref)
+
+ def test_get_neutron_network(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn('fake-network-ref')
+ self.mox.ReplayAll()
+ network_ref = vif.get_neutron_network(self.session,
+ self.vif['network']['id'],
+ self.cluster,
+ self.vif)
+ self.assertEqual(network_ref, 'fake-network-ref')
+
+ def test_get_neutron_network_opaque_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(opaque)
+ vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
+ CONF.vmware.integration_bridge,
+ self.vif['network']['id']).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_get_neutron_network_bridge_network_not_found(self):
+ self.mox.StubOutWithMock(vm_util, 'get_host_ref')
+ self.mox.StubOutWithMock(self.session, '_call_method')
+ self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
+ vm_util.get_host_ref(self.session,
+ self.cluster).AndReturn('fake-host')
+ opaque = fake.DataObject()
+ opaque.HostOpaqueNetworkInfo = ['fake-network-info']
+ self.session._call_method(vim_util, "get_dynamic_property",
+ 'fake-host', 'HostSystem',
+ 'config.network.opaqueNetwork').AndReturn(None)
+ network_util.get_network_with_the_name(self.session, 0,
+ self.cluster).AndReturn(None)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NetworkNotFoundForBridge,
+ vif.get_neutron_network, self.session,
+ self.vif['network']['id'], self.cluster, self.vif)
+
+ def test_create_port_group_already_exists(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.AlreadyExistsException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ network_util.create_port_group(self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_create_port_group_exception(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'AddPortGroup':
+ raise vexc.VMwareDriverException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method)
+ ) as (_add_vswitch, _get_host, _call_method):
+ self.assertRaises(vexc.VMwareDriverException,
+ network_util.create_port_group,
+ self.session, 'pg_name',
+ 'vswitch_name', vlan_id=0,
+ cluster=None)
+
+ def test_get_neutron_network_invalid_property(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ if method == 'get_dynamic_property':
+ raise vexc.InvalidPropertyException()
+
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_host_ref'),
+ mock.patch.object(self.session, '_call_method',
+ fake_call_method),
+ mock.patch.object(network_util, 'get_network_with_the_name')
+ ) as (_get_host, _call_method, _get_name):
+ vif.get_neutron_network(self.session, 'network_name',
+ 'cluster', self.vif)
+
+ def test_get_vif_info_none(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', None)
+ self.assertEqual([], vif_info)
+
+ def test_get_vif_info_empty_list(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', [])
+ self.assertEqual([], vif_info)
+
+ @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
+ def test_get_vif_info(self, mock_get_network_ref):
+ network_info = utils.get_test_network_info()
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', network_info)
+ expected = [{'iface_id': 'vif-xxx-yyy-zzz',
+ 'mac_address': 'fake',
+ 'network_name': 'fake',
+ 'network_ref': 'fake_ref',
+ 'vif_model': 'fake_model'}]
+ self.assertEqual(expected, vif_info)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vim_util.py b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
new file mode 100644
index 0000000000..d00e127b66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
@@ -0,0 +1,117 @@
+# Copyright (c) 2013 VMware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+
+import fixtures
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vim_util
+
+
+def _fake_get_object_properties(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent(None))
+ return fake_objects
+
+
+def _fake_get_object_properties_missing(vim, collector, mobj,
+ type, properties):
+ fake_objects = fake.FakeRetrieveResult()
+ ml = [fake.MissingProperty()]
+ fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
+ return fake_objects
+
+
+class VMwareVIMUtilTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareVIMUtilTestCase, self).setUp()
+ fake.reset()
+ self.vim = fake.FakeVim()
+ self.vim._login()
+
+ def test_get_dynamic_properties_missing(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_missing_path_exists(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.vmwareapi.vim_util.get_object_properties',
+ _fake_get_object_properties_missing))
+ res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertIsNone(res)
+
+ def test_get_dynamic_properties_with_token(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ # Add a token to our results, indicating that more are available
+ result = fake.FakeRetrieveResult(token='fake_token')
+
+ # We expect these properties to be returned
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name1', val='value1'),
+ DynamicProperty(name='name2', val='value2')
+ ]))
+
+ # These properties should be ignored
+ result.add_object(ObjectContent(propSet=[
+ DynamicProperty(name='name3', val='value3')
+ ]))
+
+ retrievePropertiesEx = mock.MagicMock(name='RetrievePropertiesEx')
+ retrievePropertiesEx.return_value = result
+
+ calls = {'RetrievePropertiesEx': retrievePropertiesEx}
+ with stubs.fake_suds_context(calls):
+ session = driver.VMwareAPISession(host_ip='localhost')
+
+ service_content = session.vim.service_content
+ props = session._call_method(vim_util, "get_dynamic_properties",
+ service_content.propertyCollector,
+ 'fake_type', None)
+
+ self.assertEqual(props, {
+ 'name1': 'value1',
+ 'name2': 'value2'
+ })
+
+ @mock.patch.object(vim_util, 'get_object_properties', return_value=None)
+ def test_get_dynamic_properties_no_objects(self, mock_get_object_props):
+ res = vim_util.get_dynamic_properties('fake-vim', 'fake-obj',
+ 'fake-type', 'fake-property')
+ self.assertEqual({}, res)
+
+ def test_get_inner_objects(self):
+ property = ['summary.name']
+ # Get the fake datastores directly from the cluster
+ cluster_refs = fake._get_object_refs('ClusterComputeResource')
+ cluster = fake._get_object(cluster_refs[0])
+ expected_ds = cluster.datastore.ManagedObjectReference
+ # Get the fake datastores using inner objects utility method
+ result = vim_util.get_inner_objects(
+ self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
+ datastores = [oc.obj for oc in result.objects]
+ self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
new file mode 100644
index 0000000000..906d03cf66
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -0,0 +1,1069 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright 2013 Canonical Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import contextlib
+import re
+
+import mock
+from oslo.vmware import exceptions as vexc
+
+from nova import context
+from nova import exception
+from nova.network import model as network_model
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.vmwareapi import fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import vm_util
+
+
+class partialObject(object):
+ def __init__(self, path='fake-path'):
+ self.path = path
+ self.fault = fake.DataObject()
+
+
+class VMwareVMUtilTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMUtilTestCase, self).setUp()
+ fake.reset()
+ stubs.set_stubs(self.stubs)
+ vm_util.vm_refs_cache_reset()
+
+ def _test_get_stats_from_cluster(self, connection_state="connected",
+ maintenance_mode=False):
+ ManagedObjectRefs = [fake.ManagedObjectReference("host1",
+ "HostSystem"),
+ fake.ManagedObjectReference("host2",
+ "HostSystem")]
+ hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
+ respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
+ prop_dict = {'host': hosts, 'resourcePool': respool}
+
+ hardware = fake.DataObject()
+ hardware.numCpuCores = 8
+ hardware.numCpuThreads = 16
+ hardware.vendor = "Intel"
+ hardware.cpuModel = "Intel(R) Xeon(R)"
+
+ runtime_host_1 = fake.DataObject()
+ runtime_host_1.connectionState = "connected"
+ runtime_host_1.inMaintenanceMode = False
+
+ runtime_host_2 = fake.DataObject()
+ runtime_host_2.connectionState = connection_state
+ runtime_host_2.inMaintenanceMode = maintenance_mode
+
+ prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_1)]
+ prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
+ fake.Prop(name="runtime_summary",
+ val=runtime_host_2)]
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_1))
+ fake_objects.add_object(fake.ObjectContent("prop_list_host1",
+ prop_list_host_2))
+
+ respool_resource_usage = fake.DataObject()
+ respool_resource_usage.maxUsage = 5368709120
+ respool_resource_usage.overallUsage = 2147483648
+
+ def fake_call_method(*args):
+ if "get_dynamic_properties" in args:
+ return prop_dict
+ elif "get_properties_for_a_collection_of_objects" in args:
+ return fake_objects
+ else:
+ return respool_resource_usage
+
+ session = fake.FakeSession()
+ with mock.patch.object(session, '_call_method', fake_call_method):
+ result = vm_util.get_stats_from_cluster(session, "cluster1")
+ cpu_info = {}
+ mem_info = {}
+ if connection_state == "connected" and not maintenance_mode:
+ cpu_info['vcpus'] = 32
+ cpu_info['cores'] = 16
+ cpu_info['vendor'] = ["Intel", "Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)",
+ "Intel(R) Xeon(R)"]
+ else:
+ cpu_info['vcpus'] = 16
+ cpu_info['cores'] = 8
+ cpu_info['vendor'] = ["Intel"]
+ cpu_info['model'] = ["Intel(R) Xeon(R)"]
+ mem_info['total'] = 5120
+ mem_info['free'] = 3072
+ expected_stats = {'cpu': cpu_info, 'mem': mem_info}
+ self.assertEqual(expected_stats, result)
+
+ def test_get_stats_from_cluster_hosts_connected_and_active(self):
+ self._test_get_stats_from_cluster()
+
+ def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
+ self._test_get_stats_from_cluster(connection_state="disconnected")
+
+ def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
+ self._test_get_stats_from_cluster(maintenance_mode=True)
+
+ def test_get_host_ref_no_hosts_in_cluster(self):
+ self.assertRaises(exception.NoValidHost,
+ vm_util.get_host_ref,
+ fake.FakeObjectRetrievalSession(""), 'fake_cluster')
+
+ def test_get_resize_spec(self):
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
+ fake_instance)
+ expected = """{'memoryMB': 2048,
+ 'numCPUs': 2,
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_cdrom_attach_config_spec(self):
+
+ result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
+ fake.Datastore(),
+ "/tmp/foo.iso",
+ 200, 0)
+ expected = """{
+ 'deviceChange': [
+ {
+ 'device': {
+ 'connectable': {
+ 'allowGuestControl': False,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0: VirtualDeviceConnectInfo'
+ },
+ 'backing': {
+ 'datastore': {
+ "summary.maintenanceMode": "normal",
+ "summary.type": "VMFS",
+ "summary.accessible":true,
+ "summary.name": "fake-ds",
+ "summary.capacity": 1099511627776,
+ "summary.freeSpace": 536870912000,
+ "browser": ""
+ },
+ 'fileName': '/tmp/foo.iso',
+ 'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
+ },
+ 'controllerKey': 200,
+ 'unitNumber': 0,
+ 'key': -1,
+ 'obj_name': 'ns0: VirtualCdrom'
+ },
+ 'operation': 'add',
+ 'obj_name': 'ns0: VirtualDeviceConfigSpec'
+ }
+ ],
+ 'obj_name': 'ns0: VirtualMachineConfigSpec'
+}
+"""
+
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_lsilogic_controller_spec(self):
+ # Test controller spec returned for lsiLogic sas adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="lsiLogicsas")
+ self.assertEqual("ns0:VirtualLsiLogicSASController",
+ config_spec.device.obj_name)
+
+ def test_paravirtual_controller_spec(self):
+ # Test controller spec returned for paraVirtual adapter type
+ config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
+ adapter_type="paraVirtual")
+ self.assertEqual("ns0:ParaVirtualSCSIController",
+ config_spec.device.obj_name)
+
+ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
+ # Test the adapter_type returned for a lsiLogic sas controller
+ controller_key = 1000
+ disk = fake.VirtualDisk()
+ disk.controllerKey = controller_key
+ disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ if parent:
+ disk_backing.parent = parent
+ disk.backing = disk_backing
+ controller = fake.VirtualLsiLogicSASController()
+ controller.key = controller_key
+ devices = [disk, controller]
+ return devices
+
+ def test_get_vmdk_path(self):
+ uuid = '00000000-0000-0000-0000-000000000000'
+ filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ session = fake.FakeSession()
+
+ with mock.patch.object(session, '_call_method',
+ return_value=devices):
+ instance = {'uuid': uuid}
+ vmdk_path = vm_util.get_vmdk_path(session, None, instance)
+ self.assertEqual(filename, vmdk_path)
+
+ def test_get_vmdk_path_and_adapter_type(self):
+ filename = '[test_datastore] test_file.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(vmdk_info[0], filename)
+
+ def test_get_vmdk_path_and_adapter_type_with_match(self):
+ n_filename = '[test_datastore] uuid/uuid.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(n_filename, vmdk_info[0])
+
+ def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
+ n_filename = '[test_datastore] diuu/diuu.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[1]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertIsNone(vmdk_info[0])
+
+ def test_get_vmdk_adapter_type(self):
+ # Test for the adapter_type to be used in vmdk descriptor
+ # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
+ # and ParaVirtual
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
+ self.assertEqual("lsiLogic", vmdk_adapter_type)
+ vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
+ self.assertEqual("dummyAdapter", vmdk_adapter_type)
+
+ def test_find_allocated_slots(self):
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ disk3 = fake.VirtualDisk(201, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
+ devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
+ taken = vm_util._find_allocated_slots(devices)
+ self.assertEqual([0, 1], sorted(taken[200]))
+ self.assertEqual([1], taken[201])
+ self.assertEqual([7], taken[1000])
+
+ def test_allocate_controller_key_and_unit_number_ide_default(self):
+ # Test that default IDE controllers are used when there is a free slot
+ # on them
+ disk1 = fake.VirtualDisk(200, 0)
+ disk2 = fake.VirtualDisk(200, 1)
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [disk1, disk2, ide0, ide1]
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ None,
+ devices,
+ 'ide')
+ self.assertEqual(201, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_ide(self):
+ # Test that a new controller is created when there is no free slot on
+ # the default IDE controllers
+ ide0 = fake.VirtualIDEController(200)
+ ide1 = fake.VirtualIDEController(201)
+ devices = [ide0, ide1]
+ for controller_key in [200, 201]:
+ for unit_number in [0, 1]:
+ disk = fake.VirtualDisk(controller_key, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'ide')
+ self.assertEqual(-101, controller_key)
+ self.assertEqual(0, unit_number)
+ self.assertIsNotNone(controller_spec)
+
+ def test_allocate_controller_key_and_unit_number_scsi(self):
+ # Test that we allocate on existing SCSI controller if there is a free
+ # slot on it
+ devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
+ for unit_number in range(7):
+ disk = fake.VirtualDisk(1000, unit_number)
+ devices.append(disk)
+ factory = fake.FakeFactory()
+ (controller_key, unit_number,
+ controller_spec) = vm_util.allocate_controller_key_and_unit_number(
+ factory,
+ devices,
+ 'lsiLogic')
+ self.assertEqual(1000, controller_key)
+ self.assertEqual(8, unit_number)
+ self.assertIsNone(controller_spec)
+
+ def _test_get_vnc_config_spec(self, port):
+
+ result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
+ port)
+ return result
+
+ def test_get_vnc_config_spec(self):
+ result = self._test_get_vnc_config_spec(7)
+ expected = """{'extraConfig': [
+ {'value': 'true',
+ 'key': 'RemoteDisplay.vnc.enabled',
+ 'obj_name': 'ns0:OptionValue'},
+ {'value': 7,
+ 'key': 'RemoteDisplay.vnc.port',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def _create_fake_vms(self):
+ fake_vms = fake.FakeRetrieveResult()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ for i in range(10):
+ vm = fake.ManagedObject()
+ opt_val = OptionValue(key='', value=5900 + i)
+ vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ fake_vms.add_object(vm)
+ return fake_vms
+
+ def test_get_vnc_port(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10000, group='vmware')
+ actual = vm_util.get_vnc_port(
+ fake.FakeObjectRetrievalSession(fake_vms))
+ self.assertEqual(actual, 5910)
+
+ def test_get_vnc_port_exhausted(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10, group='vmware')
+ self.assertRaises(exception.ConsolePortRangeExhausted,
+ vm_util.get_vnc_port,
+ fake.FakeObjectRetrievalSession(fake_vms))
+
+ def test_get_all_cluster_refs_by_name_none(self):
+ fake_objects = fake.FakeRetrieveResult()
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
+ self.assertEqual({}, refs)
+
+ def test_get_all_cluster_refs_by_name_exists(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual(1, len(refs))
+
+ def test_get_all_cluster_refs_by_name_missing(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(partialObject(path='cluster'))
+ refs = vm_util.get_all_cluster_refs_by_name(
+ fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
+ self.assertEqual({}, refs)
+
+ def test_propset_dict_simple(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar")])
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+
+ def test_propset_dict_complex(self):
+ ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
+ DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
+ MoRef = collections.namedtuple('Val', ['value'])
+
+ object = ObjectContent(propSet=[
+ DynamicProperty(name='foo', val="bar"),
+ DynamicProperty(name='some.thing',
+ val=MoRef(value='else')),
+ DynamicProperty(name='another.thing', val='value')])
+
+ propdict = vm_util.propset_dict(object.propSet)
+ self.assertEqual("bar", propdict['foo'])
+ self.assertTrue(hasattr(propdict['some.thing'], 'value'))
+ self.assertEqual("else", propdict['some.thing'].value)
+ self.assertEqual("value", propdict['another.thing'])
+
+ def _test_detach_virtual_disk_spec(self, destroy_disk=False):
+ virtual_device_config = vm_util.detach_virtual_disk_spec(
+ fake.FakeFactory(),
+ 'fake_device',
+ destroy_disk)
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('fake_device', virtual_device_config.device)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy', virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
+
+ def test_detach_virtual_disk_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=False)
+
+ def test_detach_virtual_disk_destroy_spec(self):
+ self._test_detach_virtual_disk_spec(destroy_disk=True)
+
+ def test_get_vm_create_spec(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [])
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_allocations(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'reservation': 6,
+ 'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_limit(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations={'cpu_limit': 7})
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'limit': 7,
+ 'obj_name': 'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'high'}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'high',
+ 'shares': 0,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_vm_create_spec_with_share_custom(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ shares = {'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, instance_uuid,
+ 'fake-datastore', [],
+ allocations=shares)
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'instanceUuid': '%(instance_uuid)s',
+ 'name': '%(instance_uuid)s', 'deviceChange': [],
+ 'extraConfig': [{'value': '%(instance_uuid)s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'managedBy': {'extensionKey': 'org.openstack.compute',
+ 'type': 'instance',
+ 'obj_name': 'ns0:ManagedByInfo'},
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'cpuAllocation': {'shares': {'level': 'custom',
+ 'shares': 1948,
+ 'obj_name':'ns0:SharesInfo'},
+ 'obj_name':'ns0:ResourceAllocationInfo'},
+ 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_create_vm(self):
+
+ method_list = ['CreateVM_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateVM_Task'):
+ return 'fake_create_vm_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+ else:
+ self.fail('Should not get here....')
+
+ def fake_wait_for_task(self, *args):
+ task_info = mock.Mock(state="success", result="fake_vm_ref")
+ return task_info
+
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ fake_call_mock = mock.Mock(side_effect=fake_call_method)
+ fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
+ with contextlib.nested(
+ mock.patch.object(session, '_wait_for_task',
+ fake_wait_mock),
+ mock.patch.object(session, '_call_method',
+ fake_call_mock)
+ ) as (wait_for_task, call_method):
+ vm_ref = vm_util.create_vm(
+ session,
+ fake_instance,
+ 'fake_vm_folder',
+ 'fake_config_spec',
+ 'fake_res_pool_ref')
+ self.assertEqual('fake_vm_ref', vm_ref)
+
+ call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
+ 'fake_vm_folder', config='fake_config_spec',
+ pool='fake_res_pool_ref')
+ wait_for_task.assert_called_once_with('fake_create_vm_task')
+
+ @mock.patch.object(vm_util.LOG, 'warning')
+ def test_create_vm_invalid_guestid(self, mock_log_warn):
+ """Ensure we warn when create_vm() fails after we passed an
+ unrecognised guestId
+ """
+
+ found = [False]
+
+ def fake_log_warn(msg, values):
+ if not isinstance(values, dict):
+ return
+ if values.get('ostype') == 'invalid_os_type':
+ found[0] = True
+ mock_log_warn.side_effect = fake_log_warn
+
+ instance_values = {'id': 7, 'name': 'fake-name',
+ 'uuid': uuidutils.generate_uuid(),
+ 'vcpus': 2, 'memory_mb': 2048}
+ instance = fake_instance.fake_instance_obj(
+ context.RequestContext('fake', 'fake', is_admin=False),
+ **instance_values)
+
+ session = driver.VMwareAPISession()
+
+ config_spec = vm_util.get_vm_create_spec(
+ session.vim.client.factory,
+ instance, instance.name, 'fake-datastore', [],
+ os_type='invalid_os_type')
+
+ self.assertRaises(vexc.VMwareDriverException,
+ vm_util.create_vm, session, instance, 'folder',
+ config_spec, 'res-pool')
+ self.assertTrue(found[0])
+
+ def test_convert_vif_model(self):
+ expected = "VirtualE1000"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
+ self.assertEqual(expected, result)
+ expected = "VirtualE1000e"
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
+ self.assertEqual(expected, result)
+ types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
+ "VirtualVmxnet"]
+ for type in types:
+ self.assertEqual(type,
+ vm_util.convert_vif_model(type))
+ self.assertRaises(exception.Invalid,
+ vm_util.convert_vif_model,
+ "InvalidVifModel")
+
+ def test_power_on_instance_with_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_without_vm_ref(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vm_ref",
+ return_value='fake-vm-ref'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance)
+ fake_get_vm_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task",
+ side_effect=exception.NovaException('fake')),
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_on_instance,
+ session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_power_on_instance_with_power_state_exception(self):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(
+ session, "_wait_for_task",
+ side_effect=vexc.InvalidPowerStateException),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_on_instance(session, fake_instance,
+ vm_ref='fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOnVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_create_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(vm_util, "get_vmdk_create_spec",
+ return_value='fake-spec'),
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_get_spec, fake_call_method, fake_wait_for_task):
+ vm_util.create_virtual_disk(session, 'fake-dc-ref',
+ 'fake-adapter-type', 'fake-disk-type',
+ 'fake-path', 7)
+ fake_get_spec.assert_called_once_with(
+ session.vim.client.factory, 7,
+ 'fake-adapter-type',
+ 'fake-disk-type')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CreateVirtualDisk_Task",
+ dm,
+ name='fake-path',
+ datacenter='fake-dc-ref',
+ spec='fake-spec')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def test_copy_virtual_disk(self):
+ session = fake.FakeSession()
+ dm = session.vim.service_content.virtualDiskManager
+ with contextlib.nested(
+ mock.patch.object(session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(session, "_wait_for_task"),
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.copy_virtual_disk(session, 'fake-dc-ref',
+ 'fake-source', 'fake-dest')
+ fake_call_method.assert_called_once_with(
+ session.vim,
+ "CopyVirtualDisk_Task",
+ dm,
+ sourceName='fake-source',
+ sourceDatacenter='fake-dc-ref',
+ destName='fake-dest')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ def _create_fake_vm_objects(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.VirtualMachine())
+ return fake_objects
+
+ def test_get_values(self):
+ objects = self._create_fake_vm_objects()
+ query = vm_util.get_values_from_object_properties(
+ fake.FakeObjectRetrievalSession(objects), objects)
+ self.assertEqual('poweredOn', query['runtime.powerState'])
+ self.assertEqual('guestToolsRunning',
+ query['summary.guest.toolsRunningStatus'])
+ self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
+
+ def test_reconfigure_vm(self):
+ session = fake.FakeSession()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake_reconfigure_task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (_call_method, _wait_for_task):
+ vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
+ _call_method.assert_called_once_with(mock.ANY,
+ 'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
+ _wait_for_task.assert_called_once_with(
+ 'fake_reconfigure_task')
+
+ def test_get_network_attach_config_spec_opaque(self):
+ vif_info = {'network_name': 'br-int',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'OpaqueNetwork',
+ 'network-id': 'fake-network-id',
+ 'network-type': 'opaque'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name':'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {
+ 'macAddress':'00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl':True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name':'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'opaqueNetworkType': 'opaque',
+ 'opaqueNetworkId': 'fake-network-id',
+ 'obj_name': '%(card)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_attach_config_spec_dvs(self):
+ vif_info = {'network_name': 'br100',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'DistributedVirtualPortgroup',
+ 'dvsw': 'fake-network-id',
+ 'dvpg': 'fake-group'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ port = 'ns0:DistributedVirtualSwitchPortConnection'
+ backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {'macAddress': '00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl': True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'port': {
+ 'portgroupKey': 'fake-group',
+ 'switchUuid': 'fake-network-id',
+ 'obj_name': '%(obj_name_port)s'},
+ 'obj_name': '%(obj_name_backing)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
+ 'obj_name_backing': backing,
+ 'obj_name_port': port}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_detach_config_spec(self):
+ result = vm_util.get_network_detach_config_spec(
+ fake.FakeFactory(), 'fake-device', 2)
+ expected = """{
+ 'extraConfig': [{'value': 'free',
+ 'key': 'nvp.iface-id.2',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [{'device': 'fake-device',
+ 'operation': 'remove',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
+ def test_power_off_instance_no_vm_ref(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance)
+ fake_get_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_with_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task',
+ side_effect=exception.NovaException('fake'))
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_off_instance,
+ session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_power_state_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(
+ session, '_wait_for_task',
+ side_effect=vexc.InvalidPowerStateException)
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session.vim,
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+
+@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
+ # N.B. Mocking on the class only mocks test_*(), but we need
+ # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
+ # setUp causes object initialisation to fail. Not mocking in tests results
+ # in vim calls not using FakeVim.
+ @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ def setUp(self):
+ super(VMwareVMUtilGetHostRefTestCase, self).setUp()
+ fake.reset()
+ vm_util.vm_refs_cache_reset()
+
+ self.session = driver.VMwareAPISession()
+
+ # Create a fake VirtualMachine running on a known host
+ self.host_ref = fake._db_content['HostSystem'].keys()[0]
+ self.vm_ref = fake.create_vm(host_ref=self.host_ref)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_ref_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(self.host_ref, ret)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ def test_get_host_name_for_vm(self, mock_get_vm_ref):
+ mock_get_vm_ref.return_value = self.vm_ref
+
+ host = fake._get_object(self.host_ref)
+
+ ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
+
+ mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
+ self.assertEqual(host.name, ret)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
new file mode 100644
index 0000000000..e70f4661b0
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -0,0 +1,1293 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import contextlib
+
+import mock
+from oslo.utils import units
+from oslo.vmware import exceptions as vexc
+
+from nova.compute import power_state
+from nova import context
+from nova import db
+from nova import exception
+from nova.network import model as network_model
+from nova import objects
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.unit import fake_instance
+import nova.tests.unit.image.fake
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import ds_util
+from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmops
+
+
+class DsPathMatcher:
+ def __init__(self, expected_ds_path_str):
+ self.expected_ds_path_str = expected_ds_path_str
+
+ def __eq__(self, ds_path_param):
+ return str(ds_path_param) == self.expected_ds_path_str
+
+
+class VMwareVMOpsTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(VMwareVMOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self.flags(image_cache_subdirectory_name='vmware_base',
+ my_ip='',
+ flat_injected=True,
+ vnc_enabled=True)
+ self._context = context.RequestContext('fake_user', 'fake_project')
+ self._session = driver.VMwareAPISession()
+
+ self._virtapi = mock.Mock()
+ self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None)
+
+ self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ self._instance_values = {
+ 'name': 'fake_name',
+ 'uuid': 'fake_uuid',
+ 'vcpus': 1,
+ 'memory_mb': 512,
+ 'image_ref': self._image_id,
+ 'root_gb': 10,
+ 'node': 'respool-1001(MyResPoolName)',
+ 'expected_attrs': ['system_metadata'],
+ }
+ self._instance = fake_instance.fake_instance_obj(
+ self._context, **self._instance_values)
+
+ fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
+ self._ds = ds_util.Datastore(
+ ref=fake_ds_ref, name='fake_ds',
+ capacity=10 * units.Gi,
+ freespace=10 * units.Gi)
+ self._dc_info = vmops.DcInfo(
+ ref='fake_dc_ref', name='fake_dc',
+ vmFolder='fake_vm_folder')
+
+ subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
+ dns=[network_model.IP('192.168.0.1')],
+ gateway=
+ network_model.IP('192.168.0.1'),
+ ips=[
+ network_model.IP('192.168.0.100')],
+ routes=None)
+ subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
+ dns=None,
+ gateway=
+ network_model.IP('dead:beef::1'),
+ ips=[network_model.IP(
+ 'dead:beef::dcad:beff:feef:0')],
+ routes=None)
+ network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_4, subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self._network_values = {
+ 'id': None,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network': network,
+ 'type': None,
+ 'devname': None,
+ 'ovs_interfaceid': None,
+ 'rxtx_cap': 3
+ }
+ self.network_info = network_model.NetworkInfo([
+ network_model.VIF(**self._network_values)
+ ])
+ pure_IPv6_network = network_model.Network(id=0,
+ bridge='fa0',
+ label='fake',
+ subnets=[subnet_6],
+ vlan=None,
+ bridge_interface=None,
+ injected=True)
+ self.pure_IPv6_network_info = network_model.NetworkInfo([
+ network_model.VIF(id=None,
+ address='DE:AD:BE:EF:00:00',
+ network=pure_IPv6_network,
+ type=None,
+ devname=None,
+ ovs_interfaceid=None,
+ rxtx_cap=3)
+ ])
+
+ def test_get_machine_id_str(self):
+ result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
+ '192.168.0.1;192.168.0.255;192.168.0.1#', result)
+ result = vmops.VMwareVMOps._get_machine_id_str(
+ self.pure_IPv6_network_info)
+ self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
+
+ def _setup_create_folder_mocks(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ base_name = 'folder'
+ ds_name = "datastore"
+ ds_ref = mock.Mock()
+ ds_ref.value = 1
+ dc_ref = mock.Mock()
+ ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
+ ref=dc_ref,
+ name='fake-name',
+ vmFolder='fake-folder')
+ path = ds_util.DatastorePath(ds_name, base_name)
+ return ds_name, ds_ref, ops, path, dc_ref
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ def test_create_folder_if_missing_exception(self, mock_mkdir):
+ ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
+ ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
+ ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
+ mock_mkdir.assert_called_with(ops._session, path, dc)
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=True)
+ def test_check_if_folder_file_exists_with_existing(self,
+ mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ @mock.patch.object(ds_util, 'file_exists', return_value=False)
+ def test_check_if_folder_file_exists_no_existing(self, mock_exists):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ ops._create_folder_if_missing = mock.Mock()
+ mock_ds_ref = mock.Mock()
+ ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
+ "folder", "some_file")
+ ops._create_folder_if_missing.assert_called_once_with('datastore',
+ mock_ds_ref,
+ 'vmware_base')
+
+ def test_get_valid_vms_from_retrieve_result(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(3, len(vms))
+
+ def test_get_valid_vms_from_retrieve_result_with_invalid(self):
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ fake_objects = vmwareapi_fake.FakeRetrieveResult()
+ fake_objects.add_object(vmwareapi_fake.VirtualMachine())
+ invalid_vm1 = vmwareapi_fake.VirtualMachine()
+ invalid_vm1.set('runtime.connectionState', 'orphaned')
+ invalid_vm2 = vmwareapi_fake.VirtualMachine()
+ invalid_vm2.set('runtime.connectionState', 'inaccessible')
+ fake_objects.add_object(invalid_vm1)
+ fake_objects.add_object(invalid_vm2)
+ vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
+ self.assertEqual(1, len(vms))
+
+ def test_delete_vm_snapshot(self):
+ def fake_call_method(module, method, *args, **kwargs):
+ self.assertEqual('RemoveSnapshot_Task', method)
+ self.assertEqual('fake_vm_snapshot', args[0])
+ self.assertFalse(kwargs['removeChildren'])
+ self.assertTrue(kwargs['consolidate'])
+ return 'fake_remove_snapshot_task'
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ self._vmops._delete_vm_snapshot(self._instance,
+ "fake_vm_ref", "fake_vm_snapshot")
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_remove_snapshot_task')])
+
+ def test_create_vm_snapshot(self):
+
+ method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_method = method_list.pop(0)
+ self.assertEqual(expected_method, method)
+ if (expected_method == 'CreateSnapshot_Task'):
+ self.assertEqual('fake_vm_ref', args[0])
+ self.assertFalse(kwargs['memory'])
+ self.assertTrue(kwargs['quiesce'])
+ return 'fake_snapshot_task'
+ elif (expected_method == 'get_dynamic_property'):
+ task_info = mock.Mock()
+ task_info.result = "fake_snapshot_ref"
+ self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
+ return task_info
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method', fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ snap = self._vmops._create_vm_snapshot(self._instance,
+ "fake_vm_ref")
+ self.assertEqual("fake_snapshot_ref", snap)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_snapshot_task')])
+
+ def test_update_instance_progress(self):
+ instance = objects.Instance(context=mock.MagicMock(), uuid='fake-uuid')
+ with mock.patch.object(instance, 'save') as mock_save:
+ self._vmops._update_instance_progress(instance._context,
+ instance, 5, 10)
+ mock_save.assert_called_once_with()
+ self.assertEqual(50, instance.progress)
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_cpu = vmwareapi_fake.Prop(props[0], 4)
+ prop_mem = vmwareapi_fake.Prop(props[1], 128)
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
+ prop_list = [prop_state, prop_mem, prop_cpu]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.RUNNING, info['state'])
+ self.assertEqual(128 * 1024, info['max_mem'])
+ self.assertEqual(128 * 1024, info['mem'])
+ self.assertEqual(4, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(driver.VMwareAPISession, '_call_method')
+ def test_get_info_when_ds_unavailable(self, mock_call, mock_get_vm_ref):
+ props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
+ 'runtime.powerState']
+ prop_state = vmwareapi_fake.Prop(props[2], 'poweredOff')
+ # when vm's ds not available, only power state can be received
+ prop_list = [prop_state]
+ obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
+ result = vmwareapi_fake.FakeRetrieveResult()
+ result.add_object(obj_content)
+ mock_call.return_value = result
+ info = self._vmops.get_info(self._instance)
+ mock_call.assert_called_once_with(vim_util,
+ 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
+ props)
+ mock_get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ self.assertEqual(power_state.SHUTDOWN, info['state'])
+ self.assertEqual(0, info['max_mem'])
+ self.assertEqual(0, info['mem'])
+ self.assertEqual(0, info['num_cpu'])
+ self.assertEqual(0, info['cpu_time'])
+
+ def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
+ instance_ds_ref = mock.Mock()
+ instance_ds_ref.value = "ds-1"
+ _vcvmops = vmops.VMwareVMOps(self._session, None, None)
+ if ds_ref_exists:
+ ds_ref = mock.Mock()
+ ds_ref.value = "ds-1"
+ else:
+ ds_ref = None
+
+ def fake_call_method(module, method, *args, **kwargs):
+ fake_object1 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object1.add_object(vmwareapi_fake.Datacenter(
+ ds_ref=ds_ref))
+ if not ds_ref:
+ # Token is set for the fake_object1, so it will continue to
+ # fetch the next object.
+ setattr(fake_object1, 'token', 'token-0')
+ if method == "continue_to_get_objects":
+ fake_object2 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object2.add_object(vmwareapi_fake.Datacenter())
+ return fake_object2
+
+ return fake_object1
+
+ with mock.patch.object(self._session, '_call_method',
+ side_effect=fake_call_method) as fake_call:
+ dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
+
+ if ds_ref:
+ self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
+ fake_call.assert_called_once_with(vim_util, "get_objects",
+ "Datacenter", ["name", "datastore", "vmFolder"])
+ self.assertEqual("ha-datacenter", dc_info.name)
+ else:
+ calls = [mock.call(vim_util, "get_objects", "Datacenter",
+ ["name", "datastore", "vmFolder"]),
+ mock.call(vim_util, "continue_to_get_objects",
+ "token-0")]
+ fake_call.assert_has_calls(calls)
+ self.assertIsNone(dc_info)
+
+ def test_get_datacenter_ref_and_name(self):
+ self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
+
+ def test_get_datacenter_ref_and_name_with_no_datastore(self):
+ self._test_get_datacenter_ref_and_name()
+
+ def test_unrescue_power_on(self):
+ self._test_unrescue(True)
+
+ def test_unrescue_power_off(self):
+ self._test_unrescue(False)
+
+ def _test_unrescue(self, power_on):
+ self._vmops._volumeops = mock.Mock()
+ vm_rescue_ref = mock.Mock()
+ vm_ref = mock.Mock()
+
+ args_list = [(vm_ref, 'VirtualMachine',
+ 'config.hardware.device'),
+ (vm_rescue_ref, 'VirtualMachine',
+ 'config.hardware.device')]
+
+ def fake_call_method(module, method, *args, **kwargs):
+ expected_args = args_list.pop(0)
+ self.assertEqual('get_dynamic_property', method)
+ self.assertEqual(expected_args, args)
+
+ path = mock.Mock()
+ path_and_type = (path, mock.Mock(), mock.Mock())
+ with contextlib.nested(
+ mock.patch.object(vm_util, 'get_vmdk_path_and_adapter_type',
+ return_value=path_and_type),
+ mock.patch.object(vm_util, 'get_vmdk_volume_disk'),
+ mock.patch.object(vm_util, 'power_on_instance'),
+ mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
+ mock.patch.object(vm_util, 'get_vm_ref_from_name',
+ return_value=vm_rescue_ref),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method),
+ mock.patch.object(vm_util, 'power_off_instance'),
+ mock.patch.object(self._vmops, '_destroy_instance'),
+ ) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk,
+ _power_on_instance, _get_vm_ref, _get_vm_ref_from_name,
+ _call_method, _power_off, _destroy_instance):
+ self._vmops.unrescue(self._instance, power_on=power_on)
+
+ _get_vmdk_path_and_adapter_type.assert_called_once_with(
+ None, uuid='fake_uuid')
+ _get_vmdk_volume_disk.assert_called_once_with(None, path=path)
+ if power_on:
+ _power_on_instance.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref=vm_ref)
+ else:
+ self.assertFalse(_power_on_instance.called)
+ _get_vm_ref.assert_called_once_with(self._session,
+ self._instance)
+ _get_vm_ref_from_name.assert_called_once_with(self._session,
+ 'fake_uuid-rescue')
+ _power_off.assert_called_once_with(self._session, self._instance,
+ vm_rescue_ref)
+ _destroy_instance.assert_called_once_with(self._instance,
+ instance_name='fake_uuid-rescue')
+
+ def _test_finish_migration(self, power_on=True, resize_instance=False):
+ """Tests the finish_migration method on vmops."""
+ if resize_instance:
+ self._instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
+ with contextlib.nested(
+ mock.patch.object(self._session, "_call_method",
+ return_value='fake-task'),
+ mock.patch.object(self._vmops, "_update_instance_progress"),
+ mock.patch.object(self._session, "_wait_for_task"),
+ mock.patch.object(vm_util, "get_vm_resize_spec",
+ return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self._vmops, '_extend_virtual_disk'),
+ mock.patch.object(vm_util, "power_on_instance")
+ ) as (fake_call_method, fake_update_instance_progress,
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
+ self._vmops.finish_migration(context=self._context,
+ migration=None,
+ instance=self._instance,
+ disk_info=None,
+ network_info=None,
+ block_device_info=None,
+ resize_instance=resize_instance,
+ image_meta=None,
+ power_on=power_on)
+ if resize_instance:
+ fake_vm_resize_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ fake_call_method.assert_has_calls(mock.call(
+ self._session.vim,
+ "ReconfigVM_Task",
+ 'f',
+ spec='fake-spec'))
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self._instance, self._instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
+ else:
+ self.assertFalse(fake_vm_resize_spec.called)
+ self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
+
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance,
+ vm_ref='f')
+ else:
+ self.assertFalse(fake_power_on.called)
+ fake_update_instance_progress.called_once_with(
+ self._context, self._instance, 4, vmops.RESIZE_TOTAL_STEPS)
+
+ def test_finish_migration_power_on(self):
+ self._test_finish_migration(power_on=True, resize_instance=False)
+
+ def test_finish_migration_power_off(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migration_power_on_resize(self):
+ self._test_finish_migration(power_on=True, resize_instance=True)
+
+ @mock.patch.object(vm_util, 'associate_vmref_for_instance')
+ @mock.patch.object(vm_util, 'power_on_instance')
+ def _test_finish_revert_migration(self, fake_power_on,
+ fake_associate_vmref, power_on):
+ """Tests the finish_revert_migration method on vmops."""
+
+ # setup the test instance in the database
+ self._vmops.finish_revert_migration(self._context,
+ instance=self._instance,
+ network_info=None,
+ block_device_info=None,
+ power_on=power_on)
+ fake_associate_vmref.assert_called_once_with(self._session,
+ self._instance,
+ suffix='-orig')
+ if power_on:
+ fake_power_on.assert_called_once_with(self._session,
+ self._instance)
+ else:
+ self.assertFalse(fake_power_on.called)
+
+ def test_finish_revert_migration_power_on(self):
+ self._test_finish_revert_migration(power_on=True)
+
+ def test_finish_revert_migration_power_off(self):
+ self._test_finish_revert_migration(power_on=False)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
+ def test_configure_config_drive(self,
+ mock_create_config_drive,
+ mock_attach_cdrom_to_vm):
+ injected_files = mock.Mock()
+ admin_password = mock.Mock()
+ vm_ref = mock.Mock()
+ mock_create_config_drive.return_value = "fake_iso_path"
+ self._vmops._configure_config_drive(
+ self._instance, vm_ref, self._dc_info, self._ds,
+ injected_files, admin_password)
+
+ upload_iso_path = self._ds.build_path("fake_iso_path")
+ mock_create_config_drive.assert_called_once_with(self._instance,
+ injected_files, admin_password, self._ds.name,
+ self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
+ mock_attach_cdrom_to_vm.assert_called_once_with(
+ vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+
+ @mock.patch.object(vmops.LOG, 'debug')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
+ @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
+ def test_spawn_mask_block_device_info_password(self,
+ mock_build_virtual_machine,
+ mock_get_vm_config_info,
+ mock_debug):
+ # Very simple test that just ensures block_device_info auth_password
+ # is masked when logged; the rest of the test just fails out early.
+ data = {'auth_password': 'scrubme'}
+ bdm = [{'connection_info': {'data': data}}]
+ bdi = {'block_device_mapping': bdm}
+
+ self.password_logged = False
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.password_logged = True
+ self.assertNotIn('scrubme', args[0])
+
+ mock_debug.side_effect = fake_debug
+ self.flags(flat_injected=False, vnc_enabled=False)
+
+ # Call spawn(). We don't care what it does as long as it generates
+ # the log message, which we check below.
+ with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
+ mock_vo.attach_root_volume.side_effect = test.TestingException
+ try:
+ self._vmops.spawn(
+ self._context, self._instance, {},
+ injected_files=None, admin_password=None,
+ network_info=[], block_device_info=bdi
+ )
+ except test.TestingException:
+ pass
+
+ # Check that the relevant log message was generated, and therefore
+ # that we checked it was scrubbed
+ self.assertTrue(self.password_logged)
+
+ def test_get_ds_browser(self):
+ cache = self._vmops._datastore_browser_mapping
+ ds_browser = mock.Mock()
+ moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
+ self.assertIsNone(cache.get(moref.value))
+ mock_call_method = mock.Mock(return_value=ds_browser)
+ with mock.patch.object(self._session, '_call_method',
+ mock_call_method):
+ ret = self._vmops._get_ds_browser(moref)
+ mock_call_method.assert_called_once_with(vim_util,
+ 'get_dynamic_property', moref, 'Datastore', 'browser')
+ self.assertIs(ds_browser, ret)
+ self.assertIs(ds_browser, cache.get(moref.value))
+
+ @mock.patch.object(
+ vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_linked_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ mock_sized_image_exists,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ sized_cached_image_ds_loc = cache_root_folder.join(
+ "%s.%s.vmdk" % (self._image_id, vi.root_gb))
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ str(sized_cached_image_ds_loc))
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ str(sized_cached_image_ds_loc),
+ self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type,
+ str(sized_cached_image_ds_loc),
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_linked_clone(self):
+ self._test_use_disk_image_as_linked_clone()
+
+ def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
+ self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ def _test_use_disk_image_as_full_clone(self,
+ mock_copy_virtual_disk,
+ mock_extend_virtual_disk,
+ flavor_fits_image=False):
+ file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=file_size,
+ linked_clone=False)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
+
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ str(vi.cache_image_path),
+ '[fake_ds] fake_uuid/fake_uuid.vmdk')
+
+ if not flavor_fits_image:
+ mock_extend_virtual_disk.assert_called_once_with(
+ self._instance, vi.root_gb * units.Mi,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
+
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance, vi.ii.adapter_type,
+ vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, False)
+
+ def test_use_disk_image_as_full_clone(self):
+ self._test_use_disk_image_as_full_clone()
+
+ def test_use_disk_image_as_full_clone_image_too_big(self):
+ self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
+
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ def _test_use_iso_image(self,
+ mock_create_virtual_disk,
+ mock_attach_cdrom,
+ with_root_disk):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ linked_clone=True)
+
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+
+ self._vmops._volumeops = mock.Mock()
+ mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
+
+ self._vmops._use_iso_image("fake_vm_ref", vi)
+
+ mock_attach_cdrom.assert_called_once_with(
+ "fake_vm_ref", self._instance, self._ds.ref,
+ str(vi.cache_image_path))
+
+ if with_root_disk:
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi)
+ linked_clone = False
+ mock_attach_disk_to_vm.assert_called_once_with(
+ "fake_vm_ref", self._instance,
+ vi.ii.adapter_type, vi.ii.disk_type,
+ '[fake_ds] fake_uuid/fake_uuid.vmdk',
+ vi.root_gb * units.Mi, linked_clone)
+
+ def test_use_iso_image_with_root_disk(self):
+ self._test_use_iso_image(with_root_disk=True)
+
+ def test_use_iso_image_without_root_disk(self):
+ self._test_use_iso_image(with_root_disk=False)
+
+ def _verify_spawn_method_calls(self, mock_call_method):
+ # TODO(vui): More explicit assertions of spawn() behavior
+ # are waiting on additional refactoring pertaining to image
+ # handling/manipulation. Till then, we continue to assert on the
+ # sequence of VIM operations invoked.
+ expected_methods = ['get_dynamic_property',
+ 'SearchDatastore_Task',
+ 'CreateVirtualDisk_Task',
+ 'DeleteDatastoreFile_Task',
+ 'MoveDatastoreFile_Task',
+ 'DeleteDatastoreFile_Task',
+ 'SearchDatastore_Task',
+ 'ExtendVirtualDisk_Task',
+ ]
+
+ recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
+ self.assertEqual(expected_methods, recorded_methods)
+
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
+ @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
+ @mock.patch(
+ 'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance',
+ return_value='fake_node_mo_id')
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_res_pool_ref',
+ return_value='fake_rp_ref')
+ @mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
+ return_value=[])
+ @mock.patch('nova.utils.is_neutron',
+ return_value=False)
+ @mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
+ return_value='fake_create_spec')
+ @mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
+ return_value='fake_vm_ref')
+ @mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
+ @mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
+ @mock.patch(
+ 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
+ @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
+ @mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
+ # TODO(dims): Need to add tests for create_virtual_disk after the
+ # disk/image code in spawn gets refactored
+ def _test_spawn(self,
+ mock_copy_virtual_disk,
+ mock_power_on_instance,
+ mock_get_and_set_vnc_config,
+ mock_enlist_image,
+ mock_set_machine_id,
+ mock_mkdir,
+ mock_create_vm,
+ mock_get_create_spec,
+ mock_is_neutron,
+ mock_get_vif_info,
+ mock_get_res_pool_ref,
+ mock_get_mo_id_for_instance,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ mock_configure_config_drive,
+ block_device_info=None,
+ power_on=True,
+ allocations=None,
+ config_drive=False):
+
+ self._vmops._volumeops = mock.Mock()
+ image = {
+ 'id': 'fake-image-d',
+ 'disk_format': 'vmdk',
+ 'size': 1 * units.Gi,
+ }
+ network_info = mock.Mock()
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+ mock_call_method = mock.Mock(return_value='fake_task')
+
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ mock_call_method),
+ mock.patch.object(uuidutils, 'generate_uuid',
+ return_value='tmp-uuid'),
+ mock.patch.object(images, 'fetch_image')
+ ) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image):
+ self._vmops.spawn(self._context, self._instance, image,
+ injected_files='fake_files',
+ admin_password='password',
+ network_info=network_info,
+ block_device_info=block_device_info,
+ power_on=power_on)
+
+ mock_is_neutron.assert_called_once_with()
+
+ expected_mkdir_calls = 2
+ if block_device_info and len(block_device_info.get(
+ 'block_device_mapping', [])) > 0:
+ # if block_device_info contains key 'block_device_mapping'
+ # with any information, method mkdir wouldn't be called in
+ # method self._vmops.spawn()
+ expected_mkdir_calls = 0
+
+ self.assertEqual(expected_mkdir_calls, len(mock_mkdir.mock_calls))
+
+ mock_get_mo_id_for_instance.assert_called_once_with(self._instance)
+ mock_get_res_pool_ref.assert_called_once_with(
+ self._session, None, 'fake_node_mo_id')
+ mock_get_vif_info.assert_called_once_with(
+ self._session, None, False,
+ constants.DEFAULT_VIF_MODEL, network_info)
+ if allocations is None:
+ allocations = {}
+ mock_get_create_spec.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ 'fake_uuid',
+ 'fake_ds',
+ [],
+ 'otherGuest',
+ allocations=allocations)
+ mock_create_vm.assert_called_once_with(
+ self._session,
+ self._instance,
+ 'fake_vm_folder',
+ 'fake_create_spec',
+ 'fake_rp_ref')
+ mock_get_and_set_vnc_config.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance)
+ mock_set_machine_id.assert_called_once_with(
+ self._session.vim.client.factory,
+ self._instance,
+ network_info)
+ if power_on:
+ mock_power_on_instance.assert_called_once_with(
+ self._session, self._instance, vm_ref='fake_vm_ref')
+ else:
+ self.assertFalse(mock_power_on_instance.called)
+
+ if block_device_info:
+ root_disk = block_device_info['block_device_mapping'][0]
+ mock_attach = self._vmops._volumeops.attach_root_volume
+ mock_attach.assert_called_once_with(
+ root_disk['connection_info'], self._instance, 'vda',
+ self._ds.ref)
+ self.assertFalse(_wait_for_task.called)
+ self.assertFalse(_fetch_image.called)
+ self.assertFalse(_call_method.called)
+ else:
+ mock_enlist_image.assert_called_once_with(
+ self._image_id, self._ds, self._dc_info.ref)
+
+ upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._image_id, self._image_id)
+ _fetch_image.assert_called_once_with(
+ self._context,
+ self._instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ upload_file_name,
+ cookies='Fake-CookieJar')
+ self.assertTrue(len(_wait_for_task.mock_calls) > 0)
+ self._verify_spawn_method_calls(_call_method)
+
+ dc_ref = 'fake_dc_ref'
+ source_file = unicode('[fake_ds] vmware_base/%s/%s.vmdk' %
+ (self._image_id, self._image_id))
+ dest_file = unicode('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
+ (self._image_id, self._image_id,
+ self._instance['root_gb']))
+ # TODO(dims): add more tests for copy_virtual_disk after
+ # the disk/image code in spawn gets refactored
+ mock_copy_virtual_disk.assert_called_with(self._session,
+ dc_ref,
+ source_file,
+ dest_file)
+ if config_drive:
+ mock_configure_config_drive.assert_called_once_with(
+ self._instance, 'fake_vm_ref', self._dc_info,
+ self._ds, 'fake_files', 'password')
+
+ @mock.patch.object(ds_util, 'get_datastore')
+ @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
+ def _test_get_spawn_vm_config_info(self,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ image_size_bytes=0,
+ instance_name=None):
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=image_size_bytes,
+ linked_clone=True)
+
+ mock_get_datastore.return_value = self._ds
+ mock_get_datacenter_ref_and_name.return_value = self._dc_info
+
+ vi = self._vmops._get_vm_config_info(
+ self._instance, image_info, instance_name=instance_name)
+ self.assertEqual(image_info, vi.ii)
+ self.assertEqual(self._ds, vi.datastore)
+ self.assertEqual(self._instance.root_gb, vi.root_gb)
+ self.assertEqual(self._instance, vi.instance)
+ if instance_name is not None:
+ self.assertEqual(instance_name, vi.instance_name)
+ else:
+ self.assertEqual(self._instance.uuid, vi.instance_name)
+
+ cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ self.assertEqual(cache_image_path, str(vi.cache_image_path))
+
+ cache_image_folder = '[%s] vmware_base/%s' % (
+ self._ds.name, self._image_id)
+ self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
+
+ def test_get_spawn_vm_config_info(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_image_too_big(self):
+ image_size = (self._instance.root_gb + 1) * units.Gi
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._test_get_spawn_vm_config_info,
+ image_size_bytes=image_size)
+
+ def test_get_spawn_vm_config_info_with_instance_name(self):
+ image_size = (self._instance.root_gb) * units.Gi / 2
+ self._test_get_spawn_vm_config_info(
+ image_size_bytes=image_size,
+ instance_name="foo_instance_name")
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_config_drive_enabled(self):
+ self.flags(force_config_drive=True)
+ self._test_spawn(config_drive=True)
+
+ def test_spawn_no_power_on(self):
+ self._test_spawn(power_on=False)
+
+ def test_spawn_with_block_device_info(self):
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info)
+
+ def test_spawn_with_block_device_info_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ block_device_info = {
+ 'block_device_mapping': [{'connection_info': 'fake'}]
+ }
+ self._test_spawn(block_device_info=block_device_info,
+ config_drive=True)
+
+ def test_build_virtual_machine(self):
+ image_id = nova.tests.unit.image.fake.get_valid_image_id()
+ image = images.VMwareImage(image_id=image_id)
+
+ vm_ref = self._vmops.build_virtual_machine(self._instance,
+ 'fake-instance-name',
+ image, self._dc_info,
+ self._ds, self.network_info)
+
+ vm = vmwareapi_fake._get_object(vm_ref)
+
+ # Test basic VM parameters
+ self.assertEqual('fake-instance-name', vm.name)
+ # NOTE(mdbooth): The instanceUuid behaviour below is apparently
+ # deliberate.
+ self.assertEqual('fake-instance-name',
+ vm.get('summary.config.instanceUuid'))
+ self.assertEqual(self._instance_values['vcpus'],
+ vm.get('summary.config.numCpu'))
+ self.assertEqual(self._instance_values['memory_mb'],
+ vm.get('summary.config.memorySizeMB'))
+
+ # Test NSX config
+ for optval in vm.get('config.extraConfig').OptionValue:
+ if optval.key == 'nvp.vm-uuid':
+ self.assertEqual(self._instance_values['uuid'], optval.value)
+ break
+ else:
+ self.fail('nvp.vm-uuid not found in extraConfig')
+
+ # Test that the VM is associated with the specified datastore
+ datastores = vm.datastore.ManagedObjectReference
+ self.assertEqual(1, len(datastores))
+
+ datastore = vmwareapi_fake._get_object(datastores[0])
+ self.assertEqual(self._ds.name, datastore.get('summary.name'))
+
+ # Test that the VM's network is configured as specified
+ devices = vm.get('config.hardware.device').VirtualDevice
+ for device in devices:
+ if device.obj_name != 'ns0:VirtualE1000':
+ continue
+ self.assertEqual(self._network_values['address'],
+ device.macAddress)
+ break
+ else:
+ self.fail('NIC not configured')
+
+ def test_spawn_cpu_limit(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7})
+
+ def test_spawn_cpu_reservation(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_reservation': 7})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_reservation': 7})
+
+ def test_spawn_cpu_allocations(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_limit': 7,
+ 'quota:cpu_reservation': 6})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_limit': 7,
+ 'cpu_reservation': 6})
+
+ def test_spawn_cpu_shares_level(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'high'})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'high'})
+
+ def test_spawn_cpu_shares_custom(self):
+ def _fake_flavor_get(context, id):
+ flavor = stubs._fake_flavor_get(context, id)
+ flavor['extra_specs'].update({'quota:cpu_shares_level': 'custom',
+ 'quota:cpu_shares_share': 1948})
+ return flavor
+
+ with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
+ self._test_spawn(allocations={'cpu_shares_level': 'custom',
+ 'cpu_shares_share': 1948})
+
+ def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
+ disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
+ else constants.DEFAULT_DISK_TYPE)
+ file_type = (constants.DISK_FORMAT_ISO if is_iso
+ else constants.DEFAULT_DISK_FORMAT)
+
+ image_info = images.VMwareImage(
+ image_id=self._image_id,
+ file_size=10 * units.Mi,
+ file_type=file_type,
+ disk_type=disk_type,
+ linked_clone=True)
+ cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
+ mock_imagecache = mock.Mock()
+ mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
+ vi = vmops.VirtualMachineInstanceConfigInfo(
+ self._instance, "fake_uuid", image_info,
+ self._ds, self._dc_info, mock_imagecache)
+ return vi
+
+ @mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
+ @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def _test_fetch_image_if_missing(self,
+ mock_delete_datastore_file,
+ mock_cache_flat_image,
+ mock_cache_sparse_image,
+ mock_cache_iso_image,
+ mock_prepare_flat_image,
+ mock_prepare_sparse_image,
+ mock_prepare_iso_image,
+ mock_fetch_image_as_file,
+ mock_check_cache_folder,
+ is_iso=False,
+ is_sparse_disk=False):
+
+ tmp_dir_path = mock.Mock()
+ tmp_image_path = mock.Mock()
+ if is_iso:
+ mock_prepare = mock_prepare_iso_image
+ mock_cache = mock_cache_iso_image
+ elif is_sparse_disk:
+ mock_prepare = mock_prepare_sparse_image
+ mock_cache = mock_cache_sparse_image
+ else:
+ mock_prepare = mock_prepare_flat_image
+ mock_cache = mock_cache_flat_image
+ mock_prepare.return_value = tmp_dir_path, tmp_image_path
+
+ vi = self._make_vm_config_info(is_iso, is_sparse_disk)
+ self._vmops._fetch_image_if_missing(self._context, vi)
+
+ mock_check_cache_folder.assert_called_once_with(
+ self._ds.name, self._ds.ref)
+ mock_prepare.assert_called_once_with(vi)
+ mock_fetch_image_as_file.assert_called_once_with(
+ self._context, vi, tmp_image_path)
+ mock_cache.assert_called_once_with(vi, tmp_image_path)
+ mock_delete_datastore_file.assert_called_once_with(
+ str(tmp_dir_path), self._dc_info.ref)
+
+ def test_fetch_image_if_missing(self):
+ self._test_fetch_image_if_missing()
+
+ def test_fetch_image_if_missing_with_sparse(self):
+ self._test_fetch_image_if_missing(
+ is_sparse_disk=True)
+
+ def test_fetch_image_if_missing_with_iso(self):
+ self._test_fetch_image_if_missing(
+ is_iso=True)
+
+ @mock.patch.object(images, 'fetch_image')
+ def test_fetch_image_as_file(self, mock_fetch_image):
+ vi = self._make_vm_config_info()
+ image_ds_loc = mock.Mock()
+ self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
+ mock_fetch_image.assert_called_once_with(
+ self._context,
+ vi.instance,
+ self._session._host,
+ self._dc_info.name,
+ self._ds.name,
+ image_ds_loc.rel_path,
+ cookies='Fake-CookieJar')
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_iso_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_sparse_image(self, mock_generate_uuid):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
+ self._ds.name, self._image_id, "tmp-sparse.vmdk")
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ @mock.patch.object(ds_util, 'mkdir')
+ @mock.patch.object(vm_util, 'create_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
+ def test_prepare_flat_image(self,
+ mock_generate_uuid,
+ mock_delete_datastore_file,
+ mock_create_virtual_disk,
+ mock_mkdir):
+ vi = self._make_vm_config_info()
+ tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
+
+ expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
+ expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+ expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
+ self._ds.name, self._image_id)
+ expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
+ self._ds.name, self._image_id, self._image_id)
+
+ mock_mkdir.assert_called_once_with(
+ self._session, DsPathMatcher(expected_image_path_parent),
+ self._dc_info.ref)
+
+ self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
+ self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
+
+ image_info = vi.ii
+ mock_create_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ image_info.adapter_type,
+ image_info.disk_type,
+ DsPathMatcher(expected_path_to_create),
+ image_info.file_size_in_kb)
+ mock_delete_datastore_file.assert_called_once_with(
+ DsPathMatcher(expected_image_path),
+ self._dc_info.ref)
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_iso_image(self, mock_file_move):
+ vi = self._make_vm_config_info(is_iso=True)
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ def test_cache_flat_image(self, mock_file_move):
+ vi = self._make_vm_config_info()
+ tmp_image_ds_loc = mock.Mock()
+
+ self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
+
+ mock_file_move.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ tmp_image_ds_loc.parent,
+ DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
+
+ @mock.patch.object(ds_util, 'file_move')
+ @mock.patch.object(vm_util, 'copy_virtual_disk')
+ @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
+ def test_cache_sparse_image(self,
+ mock_delete_datastore_file,
+ mock_copy_virtual_disk,
+ mock_file_move):
+ vi = self._make_vm_config_info(is_sparse_disk=True)
+
+ sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
+ self._ds.name, self._image_id)
+ tmp_image_ds_loc = ds_util.DatastorePath.parse(sparse_disk_path)
+
+ self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
+
+ target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
+ self._ds.name,
+ self._image_id, self._image_id)
+ mock_copy_virtual_disk.assert_called_once_with(
+ self._session, self._dc_info.ref,
+ sparse_disk_path,
+ DsPathMatcher(target_disk_path))
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
new file mode 100644
index 0000000000..8dc6b500cb
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -0,0 +1,95 @@
+# Copyright 2013 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+
+import mock
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.tests.unit.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import volumeops
+
+
+class VMwareVolumeOpsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+
+ super(VMwareVolumeOpsTestCase, self).setUp()
+ vmwareapi_fake.reset()
+ stubs.set_stubs(self.stubs)
+ self._session = driver.VMwareAPISession()
+
+ self._volumeops = volumeops.VMwareVolumeOps(self._session)
+ self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'}
+
+ def _test_detach_disk_from_vm(self, destroy_disk=False):
+ def fake_call_method(module, method, *args, **kwargs):
+ vmdk_detach_config_spec = kwargs.get('spec')
+ virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
+ self.assertEqual('remove', virtual_device_config.operation)
+ self.assertEqual('ns0:VirtualDeviceConfigSpec',
+ virtual_device_config.obj_name)
+ if destroy_disk:
+ self.assertEqual('destroy',
+ virtual_device_config.fileOperation)
+ else:
+ self.assertFalse(hasattr(virtual_device_config,
+ 'fileOperation'))
+ return 'fake_configure_task'
+ with contextlib.nested(
+ mock.patch.object(self._session, '_wait_for_task'),
+ mock.patch.object(self._session, '_call_method',
+ fake_call_method)
+ ) as (_wait_for_task, _call_method):
+ fake_device = vmwareapi_fake.DataObject()
+ fake_device.backing = vmwareapi_fake.DataObject()
+ fake_device.backing.fileName = 'fake_path'
+ fake_device.key = 'fake_key'
+ self._volumeops.detach_disk_from_vm('fake_vm_ref', self.instance,
+ fake_device, destroy_disk)
+ _wait_for_task.assert_has_calls([
+ mock.call('fake_configure_task')])
+
+ def test_detach_with_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=True)
+
+ def test_detach_without_destroy_disk_from_vm(self):
+ self._test_detach_disk_from_vm(destroy_disk=False)
+
+ def _fake_call_get_dynamic_property(self, uuid, result):
+ def fake_call_method(vim, method, vm_ref, type, prop):
+ expected_prop = 'config.extraConfig["volume-%s"]' % uuid
+ self.assertEqual('VirtualMachine', type)
+ self.assertEqual(expected_prop, prop)
+ return result
+ return fake_call_method
+
+ def test_get_volume_uuid(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
+ fake_call = self._fake_call_get_dynamic_property(uuid, opt_val)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertEqual('volume-val', val)
+
+ def test_get_volume_uuid_not_found(self):
+ vm_ref = mock.Mock()
+ uuid = '1234'
+ fake_call = self._fake_call_get_dynamic_property(uuid, None)
+ with mock.patch.object(self._session, "_call_method", fake_call):
+ val = self._volumeops._get_volume_uuid(vm_ref, uuid)
+ self.assertIsNone(val)
diff --git a/nova/tests/virt/xenapi/__init__.py b/nova/tests/unit/virt/xenapi/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/xenapi/__init__.py
+++ b/nova/tests/unit/virt/xenapi/__init__.py
diff --git a/nova/tests/virt/xenapi/client/__init__.py b/nova/tests/unit/virt/xenapi/client/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/xenapi/client/__init__.py
+++ b/nova/tests/unit/virt/xenapi/client/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/client/test_objects.py b/nova/tests/unit/virt/xenapi/client/test_objects.py
new file mode 100644
index 0000000000..efaf17a9c7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_objects.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.tests.unit.virt.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi.client import objects
+
+
+class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPISessionObjectTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.obj = objects.XenAPISessionObject(self.session, "FAKE")
+
+ def test_call_method_via_attr(self):
+ self.session.call_xenapi.return_value = "asdf"
+
+ result = self.obj.get_X("ref")
+
+ self.assertEqual(result, "asdf")
+ self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
+
+
+class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ObjectsTestCase, self).setUp()
+ self.session = mock.Mock()
+
+ def test_VM(self):
+ vm = objects.VM(self.session)
+ vm.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_SR(self):
+ sr = objects.SR(self.session)
+ sr.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_VDI(self):
+ vdi = objects.VDI(self.session)
+ vdi.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_VBD(self):
+ vbd = objects.VBD(self.session)
+ vbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_PBD(self):
+ pbd = objects.PBD(self.session)
+ pbd.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_PIF(self):
+ pif = objects.PIF(self.session)
+ pif.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_VLAN(self):
+ vlan = objects.VLAN(self.session)
+ vlan.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_host(self):
+ host = objects.Host(self.session)
+ host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_network(self):
+ network = objects.Network(self.session)
+ network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_pool(self):
+ pool = objects.Pool(self.session)
+ pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class VBDTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VBDTestCase, self).setUp()
+ self.session = mock.Mock()
+ self.session.VBD = objects.VBD(self.session)
+
+ def test_plug(self):
+ self.session.VBD.plug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
+
+ def test_unplug(self):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.unplug",
+ "vbd_ref")
+
+ @mock.patch.object(utils, 'synchronized')
+ def test_vbd_plug_check_synchronized(self, mock_synchronized):
+ self.session.VBD.unplug("vbd_ref", "vm_ref")
+ mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
diff --git a/nova/tests/unit/virt/xenapi/client/test_session.py b/nova/tests/unit/virt/xenapi/client/test_session.py
new file mode 100644
index 0000000000..1fbbbf752d
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/client/test_session.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2014 Rackspace Hosting
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import errno
+import socket
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova import version
+from nova.virt.xenapi.client import session
+
+
+class SessionTestCase(stubs.XenAPITestBaseNoDB):
+ @mock.patch.object(session.XenAPISession, '_create_session')
+ @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
+ @mock.patch.object(session.XenAPISession, '_verify_plugin_version')
+ def test_session_passes_version(self, mock_verify, mock_version,
+ create_session):
+ sess = mock.Mock()
+ create_session.return_value = sess
+ mock_version.return_value = ('version', 'brand')
+
+ session.XenAPISession('url', 'username', 'password')
+
+ expected_version = '%s %s %s' % (version.vendor_string(),
+ version.product_string(),
+ version.version_string_with_package())
+ sess.login_with_password.assert_called_with('username', 'password',
+ expected_version,
+ 'OpenStack')
+
+
+class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(ApplySessionHelpersTestCase, self).setUp()
+ self.session = mock.Mock()
+ session.apply_session_helpers(self.session)
+
+ def test_apply_session_helpers_add_VM(self):
+ self.session.VM.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
+
+ def test_apply_session_helpers_add_SR(self):
+ self.session.SR.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
+
+ def test_apply_session_helpers_add_VDI(self):
+ self.session.VDI.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
+
+ def test_apply_session_helpers_add_VBD(self):
+ self.session.VBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PBD(self):
+ self.session.PBD.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
+
+ def test_apply_session_helpers_add_PIF(self):
+ self.session.PIF.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
+
+ def test_apply_session_helpers_add_VLAN(self):
+ self.session.VLAN.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
+
+ def test_apply_session_helpers_add_host(self):
+ self.session.host.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
+
+ def test_apply_session_helpers_add_network(self):
+ self.session.network.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("network.get_X",
+ "ref")
+
+ def test_apply_session_helpers_add_pool(self):
+ self.session.pool.get_X("ref")
+ self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
+
+
+class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
+ def _get_fake_xapisession(self):
+ class FakeXapiSession(session.XenAPISession):
+ def __init__(self, **kwargs):
+ "Skip the superclass's dirty init"
+ self.XenAPI = mock.MagicMock()
+
+ return FakeXapiSession()
+
+ def setUp(self):
+ super(CallPluginTestCase, self).setUp()
+ self.session = self._get_fake_xapisession()
+
+ def test_serialized_with_retry_socket_error_conn_reset(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
+ self.assertEqual(2, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_error_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNREFUSED
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(socket.error,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_once_with(plugin, fn)
+ self.assertEqual(0, retry_cb.call_count)
+
+ def test_serialized_with_retry_socket_reset_reraised(self):
+ exc = socket.error
+ exc.errno = errno.ECONNRESET
+ plugin = 'glance'
+ fn = 'download_vhd'
+ num_retries = 1
+ callback = None
+ retry_cb = mock.Mock()
+ with mock.patch.object(self.session, 'call_plugin_serialized',
+ autospec=True) as call_plugin_serialized:
+ call_plugin_serialized.side_effect = exc
+ self.assertRaises(exception.PluginRetriesExceeded,
+ self.session.call_plugin_serialized_with_retry, plugin, fn,
+ num_retries, callback, retry_cb)
+ call_plugin_serialized.assert_called_with(plugin, fn)
+ self.assertEqual(2, call_plugin_serialized.call_count)
diff --git a/nova/tests/virt/xenapi/image/__init__.py b/nova/tests/unit/virt/xenapi/image/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/virt/xenapi/image/__init__.py
+++ b/nova/tests/unit/virt/xenapi/image/__init__.py
diff --git a/nova/tests/unit/virt/xenapi/image/test_bittorrent.py b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
new file mode 100644
index 0000000000..5422036b98
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_bittorrent.py
@@ -0,0 +1,163 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mox
+import pkg_resources
+import six
+
+from nova import context
+from nova.i18n import _
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import bittorrent
+from nova.virt.xenapi import vm_utils
+
+
+class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestBittorrentStore, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.mox = mox.Mox()
+
+ self.flags(torrent_base_url='http://foo',
+ connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+
+ def mock_iter_eps(namespace):
+ return []
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
+
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ def test_download_image(self):
+
+ instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
+ params = {'image_id': 'fake_image_uuid',
+ 'sr_path': '/fake/sr/path',
+ 'torrent_download_stall_cutoff': 600,
+ 'torrent_listen_port_end': 6891,
+ 'torrent_listen_port_start': 6881,
+ 'torrent_max_last_accessed': 86400,
+ 'torrent_max_seeder_processes_per_host': 1,
+ 'torrent_seed_chance': 1.0,
+ 'torrent_seed_duration': 3600,
+ 'torrent_url': 'http://foo/fake_image_uuid.torrent',
+ 'uuid_stack': ['uuid1']}
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self.assertRaises(NotImplementedError, self.store.upload_image,
+ self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
+ ['fake_vdi_uuid'])
+
+
+def bad_fetcher(image_id):
+ raise test.TestingException("just plain bad.")
+
+
+def another_fetcher(image_id):
+ return "http://www.foobar.com/%s" % image_id
+
+
+class MockEntryPoint(object):
+ name = "torrent_url"
+
+ def load(self):
+ return another_fetcher
+
+
+class LookupTorrentURLTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(LookupTorrentURLTestCase, self).setUp()
+ self.store = bittorrent.BittorrentStore()
+ self.image_id = 'fakeimageid'
+
+ def _mock_iter_none(self, namespace):
+ return []
+
+ def _mock_iter_single(self, namespace):
+ return [MockEntryPoint()]
+
+ def test_default_fetch_url_no_base_url_set(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_none)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Cannot create default bittorrent URL without'
+ ' torrent_base_url set'
+ ' or torrent URL fetcher extension'),
+ six.text_type(exc))
+
+ def test_default_fetch_url_base_url_is_set(self):
+ self.flags(torrent_base_url='http://foo',
+ group='xenserver')
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual('http://foo/fakeimageid.torrent',
+ lookup_fn(self.image_id))
+
+ def test_with_extension(self):
+ self.stubs.Set(pkg_resources, 'iter_entry_points',
+ self._mock_iter_single)
+
+ lookup_fn = self.store._lookup_torrent_url_fn()
+ self.assertEqual("http://www.foobar.com/%s" % self.image_id,
+ lookup_fn(self.image_id))
+
+ def test_multiple_extensions_found(self):
+ self.flags(torrent_base_url=None,
+ group='xenserver')
+
+ def mock_iter_multiple(namespace):
+ return [MockEntryPoint(), MockEntryPoint()]
+
+ self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
+
+ exc = self.assertRaises(
+ RuntimeError, self.store._lookup_torrent_url_fn)
+ self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
+ ' Failing.'),
+ six.text_type(exc))
diff --git a/nova/tests/unit/virt/xenapi/image/test_glance.py b/nova/tests/unit/virt/xenapi/image/test_glance.py
new file mode 100644
index 0000000000..8fbb853efa
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/image/test_glance.py
@@ -0,0 +1,256 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+import time
+
+import mock
+from mox3 import mox
+
+from nova.compute import utils as compute_utils
+from nova import context
+from nova import exception
+from nova.openstack.common import log as logging
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import vm_utils
+
+
+class TestGlanceStore(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(TestGlanceStore, self).setUp()
+ self.store = glance.GlanceStore()
+
+ self.flags(host='1.1.1.1',
+ port=123,
+ api_insecure=False, group='glance')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.context = context.RequestContext(
+ 'user', 'project', auth_token='foobar')
+
+ fake.reset()
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+
+ self.stubs.Set(
+ vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
+
+ self.instance = {'uuid': 'blah',
+ 'system_metadata': [],
+ 'auto_disk_config': True,
+ 'os_type': 'default',
+ 'xenapi_use_agent': 'true'}
+
+ def _get_params(self):
+ return {'image_id': 'fake_image_uuid',
+ 'glance_host': '1.1.1.1',
+ 'glance_port': 123,
+ 'glance_use_ssl': False,
+ 'sr_path': '/fake/sr/path',
+ 'extra_headers': {'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'foobar',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'project',
+ 'X-User-Id': 'user',
+ 'X-Identity-Status': 'Confirmed'}}
+
+ def _get_download_params(self):
+ params = self._get_params()
+ params['uuid_stack'] = ['uuid1']
+ return params
+
+ def test_download_image(self):
+ params = self._get_download_params()
+
+ self.stubs.Set(vm_utils, '_make_uuid_stack',
+ lambda *a, **kw: ['uuid1'])
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'download_vhd', **params)
+ self.mox.ReplayAll()
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ self.mox.VerifyAll()
+
+ @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
+ @mock.patch.object(random, 'shuffle')
+ @mock.patch.object(time, 'sleep')
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
+ 'debug')
+ def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
+ mock_shuffle, mock_make_uuid_stack):
+ params = self._get_download_params()
+ self.flags(num_retries=2, group='glance')
+
+ params.pop("glance_port")
+ params.pop("glance_host")
+ calls = [mock.call('glance', 'download_vhd', glance_port=9292,
+ glance_host='10.0.1.1', **params),
+ mock.call('glance', 'download_vhd', glance_port=9293,
+ glance_host='10.0.0.1', **params)]
+ log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
+ 'attempts': 3, 'attempt': 1,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'}),
+ mock.call(mock.ANY, {'callback_result': '10.0.0.1',
+ 'attempts': 3, 'attempt': 2,
+ 'fn': 'download_vhd',
+ 'plugin': 'glance'})]
+
+ glance_api_servers = ['10.0.1.1:9292',
+ 'http://10.0.0.1:9293']
+ self.flags(api_servers=glance_api_servers, group='glance')
+
+ with (mock.patch.object(self.session, 'call_plugin_serialized')
+ ) as mock_call_plugin_serialized:
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ mock_call_plugin_serialized.side_effect = [error, "success"]
+
+ self.store.download_image(self.context, self.session,
+ self.instance, 'fake_image_uuid')
+
+ mock_call_plugin_serialized.assert_has_calls(calls)
+ mock_log_debug.assert_has_calls(log_calls, any_order=True)
+
+ self.assertEqual(1, mock_fault.call_count)
+
+ def _get_upload_params(self, auto_disk_config=True,
+ expected_os_type='default'):
+ params = self._get_params()
+ params['vdi_uuids'] = ['fake_vdi_uuid']
+ params['properties'] = {'auto_disk_config': auto_disk_config,
+ 'os_type': expected_os_type}
+ return params
+
+ def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
+ params = self._get_upload_params(auto_disk_config, expected_os_type)
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
+
+ self.mox.ReplayAll()
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image(self):
+ self._test_upload_image(True)
+
+ def test_upload_image_None_os_type(self):
+ self.instance['os_type'] = None
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_no_os_type(self):
+ del self.instance['os_type']
+ self._test_upload_image(True, 'linux')
+
+ def test_upload_image_auto_config_disk_disabled(self):
+ sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
+ self.instance["system_metadata"] = sys_meta
+ self._test_upload_image("disabled")
+
+ def test_upload_image_raises_exception(self):
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(RuntimeError)
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_then_raises_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "", "RetryableError", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.CouldNotUploadImage,
+ self.store.upload_image,
+ self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
+
+ def test_upload_image_retries_on_signal_exception(self):
+ self.flags(num_retries=2, group='glance')
+ params = self._get_upload_params()
+
+ self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
+ self.mox.StubOutWithMock(time, 'sleep')
+ self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
+ error_details = ["", "task signaled", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(0.5)
+ # Note(johngarbutt) XenServer 6.1 and later has this error
+ error_details = ["", "signal: SIGTERM", "", ""]
+ error = self.session.XenAPI.Failure(details=error_details)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params).AndRaise(error)
+ compute_utils.add_instance_fault_from_exc(self.context, self.instance,
+ error, (fake.Failure,
+ error,
+ mox.IgnoreArg()))
+ time.sleep(1)
+ self.session.call_plugin_serialized('glance', 'upload_vhd',
+ **params)
+ self.mox.ReplayAll()
+
+ self.store.upload_image(self.context, self.session, self.instance,
+ 'fake_image_uuid', ['fake_vdi_uuid'])
+ self.mox.VerifyAll()
diff --git a/nova/tests/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py
index 4763f66683..4763f66683 100644
--- a/nova/tests/virt/xenapi/image/test_utils.py
+++ b/nova/tests/unit/virt/xenapi/image/test_utils.py
diff --git a/nova/tests/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
index 4a86ce5371..4a86ce5371 100644
--- a/nova/tests/virt/xenapi/image/test_vdi_through_dev.py
+++ b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
diff --git a/nova/tests/unit/virt/xenapi/stubs.py b/nova/tests/unit/virt/xenapi/stubs.py
new file mode 100644
index 0000000000..ad13ca41df
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/stubs.py
@@ -0,0 +1,365 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Stubouts, mocks and fixtures for the test suite."""
+
+import pickle
+import random
+
+from oslo.serialization import jsonutils
+
+from nova import test
+import nova.tests.unit.image.fake
+from nova.virt.xenapi.client import session
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+
+
+def stubout_firewall_driver(stubs, conn):
+
+ def fake_none(self, *args):
+ return
+
+ _vmops = conn._vmops
+ stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
+ stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
+
+
+def stubout_instance_snapshot(stubs):
+ def fake_fetch_image(context, session, instance, name_label, image, type):
+ return {'root': dict(uuid=_make_fake_vdi(), file=None),
+ 'kernel': dict(uuid=_make_fake_vdi(), file=None),
+ 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
+
+ stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ def fake_wait_for_vhd_coalesce(*args):
+ # TODO(sirp): Should we actually fake out the data here
+ return "fakeparent", "fakebase"
+
+ stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
+
+
+def stubout_session(stubs, cls, product_version=(5, 6, 2),
+ product_brand='XenServer', **opt_args):
+ """Stubs out methods from XenAPISession."""
+ stubs.Set(session.XenAPISession, '_create_session',
+ lambda s, url: cls(url, **opt_args))
+ stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
+ lambda s: (product_version, product_brand))
+
+
+def stubout_get_this_vm_uuid(stubs):
+ def f(session):
+ vms = [rec['uuid'] for ref, rec
+ in fake.get_all_records('VM').iteritems()
+ if rec['is_control_domain']]
+ return vms[0]
+ stubs.Set(vm_utils, 'get_this_vm_uuid', f)
+
+
+def stubout_image_service_download(stubs):
+ def fake_download(*args, **kwargs):
+ pass
+ stubs.Set(nova.tests.unit.image.fake._FakeImageService,
+ 'download', fake_download)
+
+
+def stubout_stream_disk(stubs):
+ def fake_stream_disk(*args, **kwargs):
+ pass
+ stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
+
+
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs stu have PV kernels."""
+
+ def f(*args):
+ return False
+ stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
+
+
+def stubout_is_snapshot(stubs):
+ """Always returns true
+
+ xenapi fake driver does not create vmrefs for snapshots.
+ """
+
+ def f(*args):
+ return True
+ stubs.Set(vm_utils, 'is_snapshot', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_disk_image(stubs, raise_failure=False):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ def _fake_fetch_disk_image(context, session, instance, name_label, image,
+ image_type):
+ if raise_failure:
+ raise fake.Failure("Test Exception raised by "
+ "fake fetch_image_glance_disk")
+ elif image_type == vm_utils.ImageType.KERNEL:
+ filename = "kernel"
+ elif image_type == vm_utils.ImageType.RAMDISK:
+ filename = "ramdisk"
+ else:
+ filename = "unknown"
+
+ vdi_type = vm_utils.ImageType.to_string(image_type)
+ return {vdi_type: dict(uuid=None, file=filename)}
+
+ stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake create_vm")
+ stubs.Set(vm_utils, 'create_vm', f)
+
+
+def stubout_attach_disks(stubs):
+ """Simulates a failure in _attach_disks."""
+
+ def f(*args):
+ raise fake.Failure("Test Exception raised by fake _attach_disks")
+ stubs.Set(vmops.VMOps, '_attach_disks', f)
+
+
+def _make_fake_vdi():
+ sr_ref = fake.get_all('SR')[0]
+ vdi_ref = fake.create_vdi('', sr_ref)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ return vdi_rec['uuid']
+
+
+class FakeSessionForVMTests(fake.SessionBase):
+ """Stubs out a XenAPISession for VM tests."""
+
+ _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
+ "Sun Nov 6 22:49:02 2011\n"
+ "*filter\n"
+ ":INPUT ACCEPT [0:0]\n"
+ ":FORWARD ACCEPT [0:0]\n"
+ ":OUTPUT ACCEPT [0:0]\n"
+ "COMMIT\n"
+ "# Completed on Sun Nov 6 22:49:02 2011\n")
+
+ def host_call_plugin(self, _1, _2, plugin, method, _5):
+ if (plugin, method) == ('glance', 'download_vhd'):
+ root_uuid = _make_fake_vdi()
+ return pickle.dumps(dict(root=dict(uuid=root_uuid)))
+ elif (plugin, method) == ("xenhost", "iptables_config"):
+ return fake.as_json(out=self._fake_iptables_save_output,
+ err='')
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, _5))
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+ vm['domid'] = random.randrange(1, 1 << 16)
+ return vm
+
+ def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
+ vm_rec = self.VM_start(_1, vm_ref, _2, _3)
+ vm_rec['resident_on'] = host_ref
+
+ def VDI_snapshot(self, session_ref, vm_ref, _1):
+ sr_ref = "fakesr"
+ return fake.create_vdi('fakelabel', sr_ref, read_only=True)
+
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+
+class FakeSessionForFirewallTests(FakeSessionForVMTests):
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
+
+ def __init__(self, uri, test_case=None):
+ super(FakeSessionForFirewallTests, self).__init__(uri)
+ if hasattr(test_case, '_in_rules'):
+ self._in_rules = test_case._in_rules
+ if hasattr(test_case, '_in6_filter_rules'):
+ self._in6_filter_rules = test_case._in6_filter_rules
+ self._test_case = test_case
+
+ def host_call_plugin(self, _1, _2, plugin, method, args):
+ """Mock method four host_call_plugin to be used in unit tests
+ for the dom0 iptables Firewall drivers for XenAPI
+
+ """
+ if plugin == "xenhost" and method == "iptables_config":
+ # The command to execute is a json-encoded list
+ cmd_args = args.get('cmd_args', None)
+ cmd = jsonutils.loads(cmd_args)
+ if not cmd:
+ ret_str = ''
+ else:
+ output = ''
+ process_input = args.get('process_input', None)
+ if cmd == ['ip6tables-save', '-c']:
+ output = '\n'.join(self._in6_filter_rules)
+ if cmd == ['iptables-save', '-c']:
+ output = '\n'.join(self._in_rules)
+ if cmd == ['iptables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ if self._test_case is not None:
+ self._test_case._out_rules = lines
+ output = '\n'.join(lines)
+ if cmd == ['ip6tables-restore', '-c', ]:
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ output = '\n'.join(lines)
+ ret_str = fake.as_json(out=output, err='')
+ return ret_str
+ else:
+ return (super(FakeSessionForVMTests, self).
+ host_call_plugin(_1, _2, plugin, method, args))
+
+
+def stub_out_vm_methods(stubs):
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ def fake_wait_for_device(dev):
+ pass
+
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+ stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
+
+
+class FakeSessionForVolumeTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Volume tests."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ valid_vdi = False
+ refs = fake.get_all('VDI')
+ for ref in refs:
+ rec = fake.get_record('VDI', ref)
+ if rec['uuid'] == uuid:
+ valid_vdi = True
+ if not valid_vdi:
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+
+class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
+ def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
+ _6, _7, _8, _9, _10, _11):
+ # This is for testing failure
+ raise fake.Failure([['INVALID_VDI', 'session', self._session]])
+
+ def PBD_unplug(self, _1, ref):
+ rec = fake.get_record('PBD', ref)
+ rec['currently-attached'] = False
+
+ def SR_forget(self, _1, ref):
+ pass
+
+
+def stub_out_migration_methods(stubs):
+ fakesr = fake.create_sr()
+
+ def fake_import_all_migrated_disks(session, instance):
+ vdi_ref = fake.create_vdi(instance['name'], fakesr)
+ vdi_rec = fake.get_record('VDI', vdi_ref)
+ vdi_rec['other_config']['nova_disk_type'] = 'root'
+ return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
+ "ephemerals": {}}
+
+ def fake_wait_for_instance_to_start(self, *args):
+ pass
+
+ def fake_get_vdi(session, vm_ref, userdevice='0'):
+ vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
+ vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
+ vdi_ref = fake.create_vdi('derp', fakesr,
+ sm_config={'vhd-parent': vdi_rec_parent['uuid']})
+ vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
+ return vdi_ref, vdi_rec
+
+ def fake_sr(session, *args):
+ return fakesr
+
+ def fake_get_sr_path(*args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_generate_ephemeral(*args):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
+ fake_wait_for_instance_to_start)
+ stubs.Set(vm_utils, 'import_all_migrated_disks',
+ fake_import_all_migrated_disks)
+ stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+
+class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
+ def VM_assert_can_migrate(self, session, vmref, migrate_data,
+ live, vdi_map, vif_map, options):
+ raise fake.Failure("XenAPI VM.assert_can_migrate failed")
+
+ def host_migrate_receive(self, session, hostref, networkref, options):
+ raise fake.Failure("XenAPI host.migrate_receive failed")
+
+ def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
+ vif_map, options):
+ raise fake.Failure("XenAPI VM.migrate_send failed")
+
+
+# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
+# over to use XenAPITestBaseNoDB
+class XenAPITestBase(test.TestCase):
+ def setUp(self):
+ super(XenAPITestBase, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
+
+
+class XenAPITestBaseNoDB(test.NoDBTestCase):
+ def setUp(self):
+ super(XenAPITestBaseNoDB, self).setUp()
+ self.useFixture(test.ReplaceModule('XenAPI', fake))
+ fake.reset()
diff --git a/nova/tests/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py
index 5004b381d4..5004b381d4 100644
--- a/nova/tests/virt/xenapi/test_agent.py
+++ b/nova/tests/unit/virt/xenapi/test_agent.py
diff --git a/nova/tests/unit/virt/xenapi/test_driver.py b/nova/tests/unit/virt/xenapi/test_driver.py
new file mode 100644
index 0000000000..eb3e02f29e
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_driver.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2013 Rackspace Hosting
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import math
+
+import mock
+from oslo.utils import units
+
+from nova.compute import arch
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import driver
+from nova.virt import fake
+from nova.virt import xenapi
+from nova.virt.xenapi import driver as xenapi_driver
+
+
+class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Driver operations."""
+
+ def _get_driver(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass', group='xenserver')
+ return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def host_stats(self, refresh=True):
+ return {'host_memory_total': 3 * units.Mi,
+ 'host_memory_free_computed': 2 * units.Mi,
+ 'disk_total': 5 * units.Gi,
+ 'disk_used': 2 * units.Gi,
+ 'disk_allocated': 4 * units.Gi,
+ 'host_hostname': 'somename',
+ 'supported_instances': arch.X86_64,
+ 'host_cpu_info': {'cpu_count': 50},
+ 'vcpus_used': 10,
+ 'pci_passthrough_devices': ''}
+
+ def test_available_resource(self):
+ driver = self._get_driver()
+ driver._session.product_version = (6, 8, 2)
+
+ self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
+
+ resources = driver.get_available_resource(None)
+ self.assertEqual(6008002, resources['hypervisor_version'])
+ self.assertEqual(50, resources['vcpus'])
+ self.assertEqual(3, resources['memory_mb'])
+ self.assertEqual(5, resources['local_gb'])
+ self.assertEqual(10, resources['vcpus_used'])
+ self.assertEqual(3 - 2, resources['memory_mb_used'])
+ self.assertEqual(2, resources['local_gb_used'])
+ self.assertEqual('xen', resources['hypervisor_type'])
+ self.assertEqual('somename', resources['hypervisor_hostname'])
+ self.assertEqual(1, resources['disk_available_least'])
+
+ def test_overhead(self):
+ driver = self._get_driver()
+ instance = {'memory_mb': 30720, 'vcpus': 4}
+
+ # expected memory overhead per:
+ # https://wiki.openstack.org/wiki/XenServer/Overhead
+ expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
+ (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
+ xenapi_driver.OVERHEAD_BASE)
+ expected = math.ceil(expected)
+ overhead = driver.estimate_instance_overhead(instance)
+ self.assertEqual(expected, overhead['memory_mb'])
+
+ def test_set_bootable(self):
+ driver = self._get_driver()
+
+ self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
+ driver._vmops.set_bootable('inst', True)
+ self.mox.ReplayAll()
+
+ driver.set_bootable('inst', True)
+
+ def test_post_interrupted_snapshot_cleanup(self):
+ driver = self._get_driver()
+ fake_vmops_cleanup = mock.Mock()
+ driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
+
+ driver.post_interrupted_snapshot_cleanup("context", "instance")
+
+ fake_vmops_cleanup.assert_called_once_with("context", "instance")
+
+ def test_public_api_signatures(self):
+ inst = self._get_driver()
+ self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
diff --git a/nova/tests/unit/virt/xenapi/test_network_utils.py b/nova/tests/unit/virt/xenapi/test_network_utils.py
new file mode 100644
index 0000000000..5aa660f2a7
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_network_utils.py
@@ -0,0 +1,76 @@
+
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import network_utils
+
+
+class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_network_with_name_label_works(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net"]
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertEqual("net", result)
+ session.network.get_by_name_label.assert_called_once_with("label")
+
+ def test_find_network_with_name_returns_none(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = []
+
+ result = network_utils.find_network_with_name_label(session, "label")
+
+ self.assertIsNone(result)
+
+ def test_find_network_with_name_label_raises(self):
+ session = mock.Mock()
+ session.network.get_by_name_label.return_value = ["net", "net2"]
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_name_label,
+ session, "label")
+
+ def test_find_network_with_bridge_works(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {"net": "asdf"}
+
+ result = network_utils.find_network_with_bridge(session, "bridge")
+
+ self.assertEqual(result, "net")
+ expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
+ session.network.get_all_records_where.assert_called_once_with(expr)
+
+ def test_find_network_with_bridge_raises_too_many(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {
+ "net": "asdf",
+ "net2": "asdf2"
+ }
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
+
+ def test_find_network_with_bridge_raises_no_networks(self):
+ session = mock.Mock()
+ session.network.get_all_records_where.return_value = {}
+
+ self.assertRaises(exception.NovaException,
+ network_utils.find_network_with_bridge,
+ session, "bridge")
diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py
new file mode 100644
index 0000000000..ac54bd1480
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,2422 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import uuid
+
+from eventlet import greenthread
+import fixtures
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.concurrency import processutils
+from oslo.config import cfg
+from oslo.utils import timeutils
+from oslo.utils import units
+import six
+
+from nova.compute import flavors
+from nova.compute import power_state
+from nova.compute import vm_mode
+from nova import context
+from nova import exception
+from nova.i18n import _
+from nova.openstack.common.fixture import config as config_fixture
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.tests.unit.virt.xenapi import test_xenapi
+from nova import utils
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake
+from nova.virt.xenapi import vm_utils
+
+CONF = cfg.CONF
+XENSM_TYPE = 'xensm'
+ISCSI_TYPE = 'iscsi'
+
+
+def get_fake_connection_data(sr_type):
+ fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
+ 'name_label': 'fake_storage',
+ 'name_description': 'test purposes',
+ 'server': 'myserver',
+ 'serverpath': '/local/scratch/myname',
+ 'sr_type': 'nfs',
+ 'introduce_sr_keys': ['server',
+ 'serverpath',
+ 'sr_type'],
+ 'vdi_uuid': 'falseVDI'},
+ ISCSI_TYPE: {'volume_id': 'fake_volume_id',
+ 'target_lun': 1,
+ 'target_iqn': 'fake_iqn:volume-fake_volume_id',
+ 'target_portal': u'localhost:3260',
+ 'target_discovered': False}, }
+ return fakes[sr_type]
+
+
+def _get_fake_session(error=None):
+ session = mock.Mock()
+ xenapi_session.apply_session_helpers(session)
+
+ if error is not None:
+ class FakeException(Exception):
+ details = [error, "a", "b", "c"]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ return session
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
+ pass
+
+
+class LookupTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(LookupTestCase, self).setUp()
+ self.session = self.mox.CreateMockAnything('Fake Session')
+ self.name_label = 'my_vm'
+
+ def _do_mock(self, result):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label).AndReturn(result)
+ self.mox.ReplayAll()
+
+ def test_normal(self):
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertEqual('x', result)
+
+ def test_no_result(self):
+ self._do_mock([])
+ result = vm_utils.lookup(self.session, self.name_label)
+ self.assertIsNone(result)
+
+ def test_too_many(self):
+ self._do_mock(['a', 'b'])
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label)
+
+ def test_rescue_none(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
+ self._do_mock(['x'])
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('x', result)
+
+ def test_rescue_found(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['y'])
+ self.mox.ReplayAll()
+ result = vm_utils.lookup(self.session, self.name_label,
+ check_rescue=True)
+ self.assertEqual('y', result)
+
+ def test_rescue_too_many(self):
+ self.session.call_xenapi(
+ "VM.get_by_name_label",
+ self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
+ self.mox.ReplayAll()
+ self.assertRaises(exception.InstanceExists,
+ vm_utils.lookup,
+ self.session, self.name_label,
+ check_rescue=True)
+
+
+class GenerateConfigDriveTestCase(VMUtilsTestBase):
+ def test_no_admin_pass(self):
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * units.Mi).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(_self, instance, content=None, extra_md=None,
+ network_info=None):
+ self.assertEqual(network_info, "nw_info")
+
+ def metadata_for_config_drive(_self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg(), run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice', "nw_info")
+
+ @mock.patch.object(vm_utils, "destroy_vdi")
+ @mock.patch.object(vm_utils, "vdi_attached_here")
+ @mock.patch.object(vm_utils, "create_vdi")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
+ mock_destroy):
+ mock_create_vdi.return_value = 'vdi_ref'
+ mock_attached.side_effect = test.TestingException
+ mock_destroy.side_effect = exception.StorageError(reason="")
+
+ instance = {"uuid": "asdf"}
+ self.assertRaises(test.TestingException,
+ vm_utils.generate_configdrive,
+ 'session', instance, 'vm_ref', 'userdevice',
+ 'nw_info')
+ mock_destroy.assert_called_once_with('session', 'vdi_ref')
+
+
+class XenAPIGetUUID(VMUtilsTestBase):
+ def test_get_this_vm_uuid_new_kernel(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+
+ vm_utils._get_sys_hypervisor_uuid().AndReturn(
+ '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+ def test_get_this_vm_uuid_old_kernel_reboot(self):
+ self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ vm_utils._get_sys_hypervisor_uuid().AndRaise(
+ IOError(13, 'Permission denied'))
+ utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
+ ('27', ''))
+ utils.execute('xenstore-read', '/local/domain/27/vm',
+ run_as_root=True).AndReturn(
+ ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
+
+ self.mox.ReplayAll()
+ self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
+ vm_utils.get_this_vm_uuid(None))
+ self.mox.VerifyAll()
+
+
+class FakeSession(object):
+ def call_xenapi(self, *args):
+ pass
+
+ def call_plugin(self, *args):
+ pass
+
+ def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
+ pass
+
+ def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
+ callback, *args, **kwargs):
+ pass
+
+
+class FetchVhdImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(FetchVhdImageTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = FakeSession()
+ self.instance = {"uuid": "uuid"}
+
+ self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ def _stub_glance_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized_with_retry')
+ func = self.session.call_plugin_serialized_with_retry(
+ 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
+ extra_headers={'X-Service-Catalog': '[]',
+ 'X-Auth-Token': 'auth_token',
+ 'X-Roles': '',
+ 'X-Tenant-Id': None,
+ 'X-User-Id': None,
+ 'X-Identity-Status': 'Confirmed'},
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path')
+
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def _stub_bittorrent_download_vhd(self, raise_exc=None):
+ self.mox.StubOutWithMock(
+ self.session, 'call_plugin_serialized')
+ func = self.session.call_plugin_serialized(
+ 'bittorrent', 'download_vhd',
+ image_id='image_id',
+ uuid_stack=["uuid_stack"],
+ sr_path='sr_path',
+ torrent_download_stall_cutoff=600,
+ torrent_listen_port_start=6881,
+ torrent_listen_port_end=6891,
+ torrent_max_last_accessed=86400,
+ torrent_max_seeder_processes_per_host=1,
+ torrent_seed_chance=1.0,
+ torrent_seed_duration=3600,
+ torrent_url='http://foo/image_id.torrent'
+ )
+ if raise_exc:
+ func.AndRaise(raise_exc)
+ else:
+ func.AndReturn({'root': {'uuid': 'vdi'}})
+
+ def test_fetch_vhd_image_works_with_glance(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(
+ self.context, self.session, self.instance, "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_works_with_bittorrent(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi").AndRaise(exception.FlavorDiskTooSmall)
+
+ self.mox.StubOutWithMock(self.session, 'call_xenapi')
+ self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
+
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ vm_utils.destroy_vdi(self.session,
+ "ref").AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._fetch_vhd_image, self.context, self.session,
+ self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+ def test_fallback_to_default_handler(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(True)
+
+ self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
+
+ vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
+ vm_utils.get_sr_path(self.session).AndReturn('sr_path')
+
+ self._stub_glance_download_vhd()
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr(self.session).AndReturn("sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_scan_sr')
+ vm_utils._scan_sr(self.session, "sr")
+
+ self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ "vdi")
+
+ self.mox.ReplayAll()
+
+ self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
+ self.session, self.instance, 'image_id')['root']['uuid'])
+
+ self.mox.VerifyAll()
+
+ def test_default_handler_does_not_fallback_to_itself(self):
+ cfg.CONF.import_opt('torrent_base_url',
+ 'nova.virt.xenapi.image.bittorrent',
+ group='xenserver')
+ self.flags(torrent_base_url='http://foo', group='xenserver')
+
+ self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
+ vm_utils._image_uses_bittorrent(
+ self.context, self.instance).AndReturn(False)
+
+ self._stub_glance_download_vhd(raise_exc=RuntimeError)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
+ self.context, self.session, self.instance, 'image_id')
+
+ self.mox.VerifyAll()
+
+
+class TestImageCompression(VMUtilsTestBase):
+ def test_image_compression(self):
+ # Testing for nova.conf, too low, negative, and a correct value.
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=0, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=-6, group='xenserver')
+ self.assertIsNone(vm_utils.get_compression_level())
+ self.flags(image_compression_level=6, group='xenserver')
+ self.assertEqual(vm_utils.get_compression_level(), 6)
+
+
+class ResizeHelpersTestCase(VMUtilsTestBase):
+ def test_repair_filesystem(self):
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ utils.execute('e2fsck', '-f', "-y", "fakepath",
+ run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
+ ("size is: 42", ""))
+
+ self.mox.ReplayAll()
+
+ vm_utils._repair_filesystem("fakepath")
+
+ def _call_tune2fs_remove_journal(self, path):
+ utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
+
+ def _call_tune2fs_add_journal(self, path):
+ utils.execute("tune2fs", "-j", path, run_as_root=True)
+
+ def _call_parted_mkpart(self, path, start, end):
+ utils.execute('parted', '--script', path, 'rm', '1',
+ run_as_root=True)
+ utils.execute('parted', '--script', path, 'mkpart',
+ 'primary', '%ds' % start, '%ds' % end, run_as_root=True)
+
+ def _call_parted_boot_flag(sef, path):
+ utils.execute('parted', '--script', path, 'set', '1',
+ 'boot', 'on', run_as_root=True)
+
+ def test_resize_part_and_fs_down_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
+ self._call_parted_mkpart(dev_path, 0, 9)
+ self._call_parted_boot_flag(dev_path)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
+
+ def test_log_progress_if_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ vm_utils.LOG.debug(_("Sparse copy in progress, "
+ "%(complete_pct).2f%% complete. "
+ "%(left)s bytes left to copy"),
+ {"complete_pct": 50.0, "left": 1})
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_log_progress_if_not_required(self):
+ self.mox.StubOutWithMock(vm_utils.LOG, "debug")
+ current = timeutils.utcnow()
+ timeutils.set_time_override(current)
+ timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
+ self.mox.ReplayAll()
+ vm_utils._log_progress_if_required(1, current, 2)
+
+ def test_resize_part_and_fs_down_fails_disk_too_big(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ new_sectors = 10
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ mobj = utils.execute("resize2fs",
+ partition_path,
+ "%ss" % new_sectors,
+ run_as_root=True)
+ mobj.AndRaise(processutils.ProcessExecutionError)
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ResizeError,
+ vm_utils._resize_part_and_fs,
+ "fake", 0, 20, 10, "boot")
+
+ def test_resize_part_and_fs_up_succeeds(self):
+ self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ dev_path = "/dev/fake"
+ partition_path = "%s1" % dev_path
+ vm_utils._repair_filesystem(partition_path)
+ self._call_tune2fs_remove_journal(partition_path)
+ self._call_parted_mkpart(dev_path, 0, 29)
+ utils.execute("resize2fs", partition_path, run_as_root=True)
+ self._call_tune2fs_add_journal(partition_path)
+
+ self.mox.ReplayAll()
+
+ vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
+
+ def test_resize_disk_throws_on_zero_size(self):
+ self.assertRaises(exception.ResizeError,
+ vm_utils.resize_disk, "session", "instance", "vdi_ref",
+ {"root_gb": 0})
+
+ def test_auto_config_disk_returns_early_on_zero_size(self):
+ vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
+
+ @mock.patch.object(utils, "execute")
+ def test_get_partitions(self, mock_execute):
+ parted_return = "BYT;\n...\n"
+ parted_return += "1:2s:11s:10s:ext3::boot;\n"
+ parted_return += "2:20s:11s:10s::bob:;\n"
+ mock_execute.return_value = (parted_return, None)
+
+ partitions = vm_utils._get_partitions("abc")
+
+ self.assertEqual(2, len(partitions))
+ self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
+ self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
+
+
+class CheckVDISizeTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CheckVDISizeTestCase, self).setUp()
+ self.context = 'fakecontext'
+ self.session = 'fakesession'
+ self.instance = dict(uuid='fakeinstance')
+ self.vdi_uuid = 'fakeuuid'
+
+ def test_not_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(1073741824)
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+ def test_too_large(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=1))
+
+ self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
+ vm_utils._get_vdi_chain_size(self.session,
+ self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.FlavorDiskTooSmall,
+ vm_utils._check_vdi_size, self.context, self.session,
+ self.instance, self.vdi_uuid)
+
+ def test_zero_root_gb_disables_check(self):
+ self.mox.StubOutWithMock(flavors, 'extract_flavor')
+ flavors.extract_flavor(self.instance).AndReturn(
+ dict(root_gb=0))
+
+ self.mox.ReplayAll()
+
+ vm_utils._check_vdi_size(self.context, self.session, self.instance,
+ self.vdi_uuid)
+
+
+class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GetInstanceForVdisForSrTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ def test_get_instance_vdis_for_sr(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ vdi_1 = fake.create_vdi('vdiname1', sr_ref)
+ vdi_2 = fake.create_vdi('vdiname2', sr_ref)
+
+ for vdi_ref in [vdi_1, vdi_2]:
+ fake.create_vbd(vm_ref, vdi_ref)
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([vdi_1, vdi_2], result)
+
+ def test_get_instance_vdis_for_sr_no_vbd(self):
+ vm_ref = fake.create_vm("foo", "Running")
+ sr_ref = fake.create_sr()
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.get_instance_vdis_for_sr(
+ driver._session, vm_ref, sr_ref))
+
+ self.assertEqual([], result)
+
+
+class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
+
+ def test_lookup_call(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn('ignored')
+
+ mock.ReplayAll()
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ mock.VerifyAll()
+
+ def test_return_value(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
+
+ mock.ReplayAll()
+ self.assertEqual(
+ 'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
+ mock.VerifyAll()
+
+
+class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
+
+ def test_exception_raised(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ self.assertRaises(
+ exception.InstanceNotFound,
+ lambda: vm_utils.vm_ref_or_raise('session', 'somename')
+ )
+ mock.VerifyAll()
+
+ def test_exception_msg_contains_vm_name(self):
+ mock = mox.Mox()
+ mock.StubOutWithMock(vm_utils, 'lookup')
+
+ vm_utils.lookup('session', 'somename').AndReturn(None)
+
+ mock.ReplayAll()
+ try:
+ vm_utils.vm_ref_or_raise('session', 'somename')
+ except exception.InstanceNotFound as e:
+ self.assertIn('somename', six.text_type(e))
+ mock.VerifyAll()
+
+
+@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
+class CreateCachedImageTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateCachedImageTestCase, self).setUp()
+ self.session = _get_fake_session()
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
+ def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
+ None, None, None, 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ def test_no_cow_no_ext(self, mock_safe_find_sr):
+ self.flags(use_cow_images=False)
+ self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
+ 'vdi_ref', None, None, None,
+ 'vdi_uuid']
+ self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+ @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
+ @mock.patch.object(vm_utils, '_fetch_image',
+ return_value={'root': {'uuid': 'vdi_uuid',
+ 'file': None}})
+ def test_noncached(self, mock_fetch_image, mock_clone_vdi,
+ mock_safe_find_sr):
+ self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
+ None, None, None, None, None,
+ None, 'vdi_uuid']
+ self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
+ vm_utils._create_cached_image('context', self.session,
+ 'instance', 'name', 'uuid',
+ vm_utils.ImageType.DISK_VHD))
+
+
+class BittorrentTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(BittorrentTestCase, self).setUp()
+ self.context = context.get_admin_context()
+
+ def test_image_uses_bittorrent(self):
+ instance = {'system_metadata': {'image_bittorrent': True}}
+ self.flags(torrent_images='some', group='xenserver')
+ self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
+ instance))
+
+ def _test_create_image(self, cache_type):
+ instance = {'system_metadata': {'image_cache_in_nova': True}}
+ self.flags(cache_images=cache_type, group='xenserver')
+
+ was = {'called': None}
+
+ def fake_create_cached_image(*args):
+ was['called'] = 'some'
+ return (False, {})
+ self.stubs.Set(vm_utils, '_create_cached_image',
+ fake_create_cached_image)
+
+ def fake_fetch_image(*args):
+ was['called'] = 'none'
+ return {}
+ self.stubs.Set(vm_utils, '_fetch_image',
+ fake_fetch_image)
+
+ vm_utils.create_image(self.context, None, instance,
+ 'foo', 'bar', 'baz')
+
+ self.assertEqual(was['called'], cache_type)
+
+ def test_create_image_cached(self):
+ self._test_create_image('some')
+
+ def test_create_image_uncached(self):
+ self._test_create_image('none')
+
+
+class ShutdownTestCase(VMUtilsTestBase):
+
+ def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.hard_shutdown_vm(
+ session, instance, vm_ref))
+
+ def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
+ self.mock = mox.Mox()
+ session = FakeSession()
+ instance = "instance"
+ vm_ref = "vm-ref"
+ self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
+ self.mock.StubOutWithMock(vm_utils, 'LOG')
+ self.assertTrue(vm_utils.clean_shutdown_vm(
+ session, instance, vm_ref))
+
+
+class CreateVBDTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateVBDTestCase, self).setUp()
+ self.session = FakeSession()
+ self.mock = mox.Mox()
+ self.mock.StubOutWithMock(self.session, 'call_xenapi')
+ self.vbd_rec = self._generate_vbd_rec()
+
+ def _generate_vbd_rec(self):
+ vbd_rec = {}
+ vbd_rec['VM'] = 'vm_ref'
+ vbd_rec['VDI'] = 'vdi_ref'
+ vbd_rec['userdevice'] = '0'
+ vbd_rec['bootable'] = False
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ return vbd_rec
+
+ def test_create_vbd_default_args(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_osvol(self):
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
+ "osvol", "True")
+ self.mock.ReplayAll()
+ result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
+ osvol=True)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_create_vbd_extra_args(self):
+ self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
+ self.vbd_rec['type'] = 'a'
+ self.vbd_rec['mode'] = 'RO'
+ self.vbd_rec['bootable'] = True
+ self.vbd_rec['empty'] = True
+ self.vbd_rec['unpluggable'] = False
+ self.session.call_xenapi('VBD.create',
+ self.vbd_rec).AndReturn("vbd_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
+ vbd_type="a", read_only=True, bootable=True,
+ empty=True, unpluggable=False)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+ def test_attach_cd(self):
+ self.mock.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.create_vbd(self.session, "vm_ref", None, 1,
+ vbd_type='cd', read_only=True, bootable=True,
+ empty=True, unpluggable=False).AndReturn("vbd_ref")
+ self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
+ self.mock.ReplayAll()
+
+ result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
+ self.assertEqual(result, "vbd_ref")
+ self.mock.VerifyAll()
+
+
+class UnplugVbdTestCase(VMUtilsTestBase):
+ @mock.patch.object(greenthread, 'sleep')
+ def test_unplug_vbd_works(self, mock_sleep):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+
+ session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
+ self.assertEqual(0, mock_sleep.call_count)
+
+ def test_unplug_vbd_raises_unexpected_error(self):
+ session = _get_fake_session()
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+ session.call_xenapi.side_effect = test.TestingException()
+
+ self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_detached_works(self):
+ error = "DEVICE_ALREADY_DETACHED"
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
+ session = _get_fake_session("")
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vbd_ref, vm_ref)
+ self.assertEqual(1, session.call_xenapi.call_count)
+
+ def _test_uplug_vbd_retries(self, mock_sleep, error):
+ session = _get_fake_session(error)
+ vbd_ref = "vbd_ref"
+ vm_ref = 'vm_ref'
+
+ self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
+ session, vm_ref, vbd_ref)
+
+ self.assertEqual(11, session.call_xenapi.call_count)
+ self.assertEqual(10, mock_sleep.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "DEVICE_DETACH_REJECTED")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
+ self._test_uplug_vbd_retries(mock_sleep,
+ "INTERNAL_ERROR")
+
+
+class VDIOtherConfigTestCase(VMUtilsTestBase):
+ """Tests to ensure that the code is populating VDI's `other_config`
+ attribute with the correct metadta.
+ """
+
+ def setUp(self):
+ super(VDIOtherConfigTestCase, self).setUp()
+
+ class _FakeSession():
+ def call_xenapi(self, operation, *args, **kwargs):
+ # VDI.add_to_other_config -> VDI_add_to_other_config
+ method = getattr(self, operation.replace('.', '_'), None)
+ if method:
+ return method(*args, **kwargs)
+
+ self.operation = operation
+ self.args = args
+ self.kwargs = kwargs
+
+ self.session = _FakeSession()
+ self.context = context.get_admin_context()
+ self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
+ 'name': 'myinstance'}
+
+ def test_create_vdi(self):
+ # Some images are registered with XenServer explicitly by calling
+ # `create_vdi`
+ vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
+ 'myvdi', 'root', 1024, read_only=True)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, self.session.args[0]['other_config'])
+
+ def test_create_image(self):
+ # Other images are registered implicitly when they are dropped into
+ # the SR by a dom0 plugin or some other process
+ self.flags(cache_images='none', group='xenserver')
+
+ def fake_fetch_image(*args):
+ return {'root': {'uuid': 'fake-uuid'}}
+
+ self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
+
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+
+ vm_utils.create_image(self.context, self.session, self.fake_instance,
+ 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+ def test_import_migrated_vhds(self):
+ # Migrated images should preserve the `other_config`
+ other_config = {}
+
+ def VDI_add_to_other_config(ref, key, value):
+ other_config[key] = value
+
+ def call_plugin_serialized(*args, **kwargs):
+ return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
+
+ # Stubbing on the session object and not class so we don't pollute
+ # other tests
+ self.session.VDI_add_to_other_config = VDI_add_to_other_config
+ self.session.VDI_get_other_config = lambda vdi: {}
+ self.session.call_plugin_serialized = call_plugin_serialized
+
+ self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
+ self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
+
+ vm_utils._import_migrated_vhds(self.session, self.fake_instance,
+ "disk_label", "root", "vdi_label")
+
+ expected = {'nova_disk_type': 'root',
+ 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
+
+ self.assertEqual(expected, other_config)
+
+
+class GenerateDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateDiskTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+ self.vm_ref = fake.create_vm("foo", "Running")
+
+ def tearDown(self):
+ super(GenerateDiskTestCase, self).tearDown()
+ fake.destroy_vm(self.vm_ref)
+
+ def _expect_parted_calls(self):
+ self.mox.StubOutWithMock(utils, "execute")
+ self.mox.StubOutWithMock(utils, "trycmd")
+ self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
+ self.mox.StubOutWithMock(vm_utils.os.path, "exists")
+ if self.session.is_local_connection:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=False, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=False, run_as_root=True)
+ vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
+ utils.trycmd('kpartx', '-a', '/dev/fakedev',
+ discard_warnings=True, run_as_root=True)
+ else:
+ utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
+ 'msdos', check_exit_code=True, run_as_root=True)
+ utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
+ 'primary', '0', '-0',
+ check_exit_code=True, run_as_root=True)
+
+ def _check_vdi(self, vdi_ref, check_attached=True):
+ vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
+ self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
+ if check_attached:
+ vbd_ref = vdi_rec["VBDs"][0]
+ vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
+ self.assertEqual(self.vm_ref, vbd_rec['VM'])
+ else:
+ self.assertEqual(0, len(vdi_rec["VBDs"]))
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_with_no_fs_given(self):
+ self._expect_parted_calls()
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "user", 10, None)
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_swap(self):
+ self._expect_parted_calls()
+ utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "swap", 10, "linux-swap")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref)
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ensure_cleanup_called(self):
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ run_as_root=True).AndRaise(test.TestingException)
+ vm_utils.destroy_vdi(self.session,
+ mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
+
+ self.mox.ReplayAll()
+ self.assertRaises(test.TestingException, vm_utils._generate_disk,
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+
+ @test_xenapi.stub_vm_utils_with_vdi_attached_here
+ def test_generate_disk_ephemeral_local_not_attached(self):
+ self.session.is_local_connection = True
+ self._expect_parted_calls()
+ utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
+ run_as_root=True)
+
+ self.mox.ReplayAll()
+ vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
+ None, "2", "name", "ephemeral", 10, "ext4")
+ self._check_vdi(vdi_ref, check_attached=False)
+
+
+class GenerateEphemeralTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(GenerateEphemeralTestCase, self).setUp()
+ self.session = "session"
+ self.instance = "instance"
+ self.vm_ref = "vm_ref"
+ self.name_label = "name"
+ self.ephemeral_name_label = "name ephemeral"
+ self.userdevice = 4
+ self.mox.StubOutWithMock(vm_utils, "_generate_disk")
+ self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
+
+ def test_get_ephemeral_disk_sizes_simple(self):
+ result = vm_utils.get_ephemeral_disk_sizes(20)
+ expected = [20]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_three_disks_2000(self):
+ result = vm_utils.get_ephemeral_disk_sizes(4030)
+ expected = [2000, 2000, 30]
+ self.assertEqual(expected, list(result))
+
+ def test_get_ephemeral_disk_sizes_two_disks_1024(self):
+ result = vm_utils.get_ephemeral_disk_sizes(2048)
+ expected = [1024, 1024]
+ self.assertEqual(expected, list(result))
+
+ def _expect_generate_disk(self, size, device, name_label):
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(device), name_label, 'ephemeral',
+ size * 1024, None).AndReturn(device)
+
+ def test_generate_ephemeral_adds_one_disk(self):
+ self._expect_generate_disk(20, self.userdevice,
+ self.ephemeral_name_label)
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 20)
+
+ def test_generate_ephemeral_adds_multiple_disks(self):
+ self._expect_generate_disk(2000, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(2000, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+ self._expect_generate_disk(30, self.userdevice + 2,
+ self.ephemeral_name_label + " (2)")
+ self.mox.ReplayAll()
+
+ vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4030)
+
+ def test_generate_ephemeral_cleans_up_on_error(self):
+ self._expect_generate_disk(1024, self.userdevice,
+ self.ephemeral_name_label)
+ self._expect_generate_disk(1024, self.userdevice + 1,
+ self.ephemeral_name_label + " (1)")
+
+ vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
+ units.Mi, None).AndRaise(exception.NovaException)
+
+ vm_utils.safe_destroy_vdis(self.session, [4, 5])
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
+ self.session, self.instance, self.vm_ref,
+ str(self.userdevice), self.name_label, 4096)
+
+
+class FakeFile(object):
+ def __init__(self):
+ self._file_operations = []
+
+ def seek(self, offset):
+ self._file_operations.append((self.seek, offset))
+
+
+class StreamDiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ import __builtin__
+ super(StreamDiskTestCase, self).setUp()
+ self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
+ self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
+ self.mox.StubOutWithMock(vm_utils, '_write_partition')
+
+ # NOTE(matelakat): This might hide the fail reason, as test runners
+ # are unhappy with a mocked out open.
+ self.mox.StubOutWithMock(__builtin__, 'open')
+ self.image_service_func = self.mox.CreateMockAnything()
+
+ def test_non_ami(self):
+ fake_file = FakeFile()
+
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.KERNEL, None, 'dev')
+
+ self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
+
+ def test_ami_disk(self):
+ fake_file = FakeFile()
+
+ vm_utils._write_partition("session", 100, 'dev')
+ vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
+ vm_utils.utils.temporary_chown(
+ 'some_path').AndReturn(contextified(None))
+ open('some_path', 'wb').AndReturn(contextified(fake_file))
+ self.image_service_func(fake_file)
+
+ self.mox.ReplayAll()
+
+ vm_utils._stream_disk("session", self.image_service_func,
+ vm_utils.ImageType.DISK, 100, 'dev')
+
+ self.assertEqual(
+ [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
+ fake_file._file_operations)
+
+
+class VMUtilsSRPath(VMUtilsTestBase):
+ def setUp(self):
+ super(VMUtilsSRPath, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+ self.session = driver._session
+ self.session.is_local_connection = False
+
+ def test_defined(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {'path': 'sr_path'}}})
+
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
+
+ def test_default(self):
+ self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
+ self.mox.StubOutWithMock(self.session, "call_xenapi")
+
+ vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
+ self.session.host_ref = "host_ref"
+ self.session.call_xenapi('PBD.get_all_records_where',
+ 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
+ {'pbd_ref': {'device_config': {}}})
+ self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
+ {'uuid': 'sr_uuid', 'type': 'ext'})
+ self.mox.ReplayAll()
+ self.assertEqual(vm_utils.get_sr_path(self.session),
+ "/var/run/sr-mount/sr_uuid")
+
+
+class CreateKernelRamdiskTestCase(VMUtilsTestBase):
+ def setUp(self):
+ super(CreateKernelRamdiskTestCase, self).setUp()
+ self.context = "context"
+ self.session = FakeSession()
+ self.instance = {"kernel_id": None, "ramdisk_id": None}
+ self.name_label = "name"
+ self.mox.StubOutWithMock(self.session, "call_plugin")
+ self.mox.StubOutWithMock(uuid, "uuid4")
+ self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
+
+ def test_create_kernel_and_ramdisk_no_create(self):
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual((None, None), result)
+
+ def test_create_kernel_and_ramdisk_create_both_cached(self):
+ kernel_id = "kernel"
+ ramdisk_id = "ramdisk"
+ self.instance["kernel_id"] = kernel_id
+ self.instance["ramdisk_id"] = ramdisk_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("k")
+
+ args_ramdisk = {}
+ args_ramdisk['cached-image'] = ramdisk_id
+ args_ramdisk['new-image-uuid'] = "fake_uuid2"
+ uuid.uuid4().AndReturn("fake_uuid2")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_ramdisk).AndReturn("r")
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", "r"), result)
+
+ def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
+ kernel_id = "kernel"
+ self.instance["kernel_id"] = kernel_id
+
+ args_kernel = {}
+ args_kernel['cached-image'] = kernel_id
+ args_kernel['new-image-uuid'] = "fake_uuid1"
+ uuid.uuid4().AndReturn("fake_uuid1")
+ self.session.call_plugin('kernel', 'create_kernel_ramdisk',
+ args_kernel).AndReturn("")
+
+ kernel = {"kernel": {"file": "k"}}
+ vm_utils._fetch_disk_image(self.context, self.session, self.instance,
+ self.name_label, kernel_id, 0).AndReturn(kernel)
+
+ self.mox.ReplayAll()
+ result = vm_utils.create_kernel_and_ramdisk(self.context,
+ self.session, self.instance, self.name_label)
+ self.assertEqual(("k", None), result)
+
+
+class ScanSrTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, "_scan_sr")
+ @mock.patch.object(vm_utils, "safe_find_sr")
+ def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
+ mock_safe_find_sr.return_value = "sr_ref"
+
+ self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
+
+ mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
+
+ def test_scan_sr_works(self):
+ session = mock.Mock()
+ vm_utils._scan_sr(session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ def test_scan_sr_unknown_error_fails_once(self):
+ session = mock.Mock()
+ session.call_xenapi.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ vm_utils._scan_sr, session, "sr_ref")
+ session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+
+ self.assertRaises(FakeException,
+ vm_utils._scan_sr, session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(4, session.call_xenapi.call_count)
+ mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
+ session = mock.Mock()
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session.XenAPI.Failure = FakeException
+
+ def fake_call_xenapi(*args):
+ fake_call_xenapi.count += 1
+ if fake_call_xenapi.count != 2:
+ raise FakeException()
+
+ fake_call_xenapi.count = 0
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ vm_utils._scan_sr(session, "sr_ref")
+
+ session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
+ self.assertEqual(2, session.call_xenapi.call_count)
+ mock_sleep.assert_called_once_with(2)
+
+
+@mock.patch.object(flavors, 'extract_flavor',
+ return_value={
+ 'memory_mb': 1024,
+ 'vcpus': 1,
+ 'vcpu_weight': 1.0,
+ })
+class CreateVmTestCase(VMUtilsTestBase):
+ def test_vss_provider(self, mock_extract):
+ self.flags(vcpu_pin_set="2,3")
+ session = _get_fake_session()
+ instance = {
+ "uuid": "uuid", "os_type": "windows"
+ }
+
+ vm_utils.create_vm(session, instance, "label",
+ "kernel", "ramdisk")
+
+ vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1.0'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '', 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid'},
+ 'name_label': 'label',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': 'true',
+ 'acpi': 'true'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False
+ }
+ session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
+
+ def test_invalid_cpu_mask_raises(self, mock_extract):
+ self.flags(vcpu_pin_set="asdf")
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+ self.assertRaises(exception.Invalid,
+ vm_utils.create_vm,
+ session, instance, "label",
+ "kernel", "ramdisk")
+
+ def test_destroy_vm(self, mock_extract):
+ session = mock.Mock()
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+ def test_destroy_vm_silently_fails(self, mock_extract):
+ session = mock.Mock()
+ exc = test.TestingException()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.destroy.side_effect = exc
+ instance = {
+ "uuid": "uuid",
+ }
+
+ vm_utils.destroy_vm(session, instance, "vm_ref")
+
+ session.VM.destroy.assert_called_once_with("vm_ref")
+
+
+class DetermineVmModeTestCase(VMUtilsTestBase):
+ def test_determine_vm_mode_returns_xen_mode(self):
+ instance = {"vm_mode": "xen"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_mode(self):
+ instance = {"vm_mode": "hvm"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_linux(self):
+ instance = {"vm_mode": None, "os_type": "linux"}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_for_windows(self):
+ instance = {"vm_mode": None, "os_type": "windows"}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_hvm_by_default(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.HVM,
+ vm_utils.determine_vm_mode(instance, None))
+
+ def test_determine_vm_mode_returns_xen_for_VHD(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
+
+ def test_determine_vm_mode_returns_xen_for_DISK(self):
+ instance = {"vm_mode": None, "os_type": None}
+ self.assertEqual(vm_mode.XEN,
+ vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
+
+
+class CallXenAPIHelpersTestCase(VMUtilsTestBase):
+ def test_vm_get_vbd_refs(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
+ session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
+
+ def test_vbd_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
+ session.call_xenapi.assert_called_once_with("VBD.get_record",
+ "vbd_ref")
+
+ def test_vdi_get_rec(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_record",
+ "vdi_ref")
+
+ def test_vdi_snapshot(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "foo"
+ self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
+ session.call_xenapi.assert_called_once_with("VDI.snapshot",
+ "vdi_ref", {})
+
+ def test_vdi_get_virtual_size(self):
+ session = mock.Mock()
+ session.call_xenapi.return_value = "123"
+ self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
+ session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
+ "ref")
+
+ @mock.patch.object(vm_utils, '_get_resize_func_name')
+ def test_vdi_resize(self, mock_get_resize_func_name):
+ session = mock.Mock()
+ mock_get_resize_func_name.return_value = "VDI.fake"
+ vm_utils._vdi_resize(session, "ref", 123)
+ session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
+ mock_get_size.return_value = (1024 ** 3) - 1
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3
+ instance = {"uuid": "a"}
+
+ vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, '_vdi_resize')
+ @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
+ def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
+ mock_resize):
+ mock_get_size.return_value = 1024 ** 3 + 1
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.ResizeError,
+ vm_utils.update_vdi_virtual_size,
+ "s", instance, "ref", 1)
+
+ mock_get_size.assert_called_once_with("s", "ref")
+ self.assertFalse(mock_resize.called)
+
+
+@mock.patch.object(vm_utils, '_vdi_get_rec')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetVdiForVMTestCase(VMUtilsTestBase):
+ def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+ vdi_get_rec.return_value = {}
+
+ result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
+ self.assertEqual(('vdi_ref', {}), result)
+
+ vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
+ vbd_get_rec.assert_called_once_with(session, "a")
+ vdi_get_rec.assert_called_once_with(session, "vdi_ref")
+
+ def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_rec):
+ session = "session"
+
+ vm_get_vbd_refs.return_value = ["a", "b"]
+ vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils.get_vdi_for_vm_safely,
+ session, "vm_ref", userdevice='1')
+
+ self.assertEqual([], vdi_get_rec.call_args_list)
+ self.assertEqual(2, len(vbd_get_rec.call_args_list))
+
+
+@mock.patch.object(vm_utils, '_vdi_get_uuid')
+@mock.patch.object(vm_utils, '_vbd_get_rec')
+@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
+class GetAllVdiForVMTestCase(VMUtilsTestBase):
+ def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ def fake_vbd_get_rec(session, vbd_ref):
+ return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
+
+ def fake_vdi_get_uuid(session, vdi_ref):
+ return vdi_ref
+
+ vm_get_vbd_refs.return_value = ["0", "2"]
+ vbd_get_rec.side_effect = fake_vbd_get_rec
+ vdi_get_uuid.side_effect = fake_vdi_get_uuid
+
+ def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
+ expected = ['vdi_ref_0', 'vdi_ref_2']
+ self.assertEqual(expected, list(result))
+
+ def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid):
+ self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
+ vbd_get_rec, vdi_get_uuid)
+
+ result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
+ min_userdevice=1)
+ expected = ["vdi_ref_2"]
+ self.assertEqual(expected, list(result))
+
+
+class GetAllVdisTestCase(VMUtilsTestBase):
+ def test_get_all_vdis_in_sr(self):
+
+ def fake_get_rec(record_type, ref):
+ if ref == "2":
+ return "vdi_rec_2"
+
+ session = mock.Mock()
+ session.call_xenapi.return_value = ["1", "2"]
+ session.get_rec.side_effect = fake_get_rec
+
+ sr_ref = "sr_ref"
+ actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
+ self.assertEqual(actual, [('2', 'vdi_rec_2')])
+
+ session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
+
+
+class VDIAttachedHere(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, 'destroy_vbd')
+ @mock.patch.object(vm_utils, '_get_this_vm_ref')
+ @mock.patch.object(vm_utils, 'create_vbd')
+ @mock.patch.object(vm_utils, '_remap_vbd_dev')
+ @mock.patch.object(vm_utils, '_wait_for_device')
+ @mock.patch.object(utils, 'execute')
+ def test_sync_called(self, mock_execute, mock_wait_for_device,
+ mock_remap_vbd_dev, mock_create_vbd,
+ mock_get_this_vm_ref, mock_destroy_vbd):
+ session = _get_fake_session()
+ with vm_utils.vdi_attached_here(session, 'vdi_ref'):
+ pass
+ mock_execute.assert_called_with('sync', run_as_root=True)
+
+
+class SnapshotAttachedHereTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
+ def test_snapshot_attached_here(self, mock_impl):
+ def fake_impl(session, instance, vm_ref, label, userdevice,
+ post_snapshot_callback):
+ self.assertEqual("session", session)
+ self.assertEqual("instance", instance)
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("label", label)
+ self.assertEqual('0', userdevice)
+ self.assertIsNone(post_snapshot_callback)
+ yield "fake"
+
+ mock_impl.side_effect = fake_impl
+
+ with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
+ "label") as result:
+ self.assertEqual("fake", result)
+
+ mock_impl.assert_called_once_with("session", "instance", "vm_ref",
+ "label", '0', None)
+
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
+ @mock.patch.object(vm_utils, '_vdi_get_uuid')
+ @mock.patch.object(vm_utils, '_vdi_snapshot')
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
+ mock_vdi_snapshot, mock_vdi_get_uuid,
+ mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
+ mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
+ session = "session"
+ instance = {"uuid": "uuid"}
+ mock_callback = mock.Mock()
+
+ mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
+ {"SR": "sr_ref",
+ "uuid": "vdi_uuid"})
+ mock_vdi_snapshot.return_value = "snap_ref"
+ mock_vdi_get_uuid.return_value = "snap_uuid"
+ mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
+
+ try:
+ with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
+ "label", '2', mock_callback) as result:
+ self.assertEqual(["a", "b"], result)
+ raise test.TestingException()
+ self.assertTrue(False)
+ except test.TestingException:
+ pass
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
+ '2')
+ mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
+ mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
+ "sr_ref", "vdi_ref", ['a', 'b'])
+ mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
+ mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
+ mock.call(session, "snap_uuid")])
+ mock_callback.assert_called_once_with(
+ task_state="image_pending_upload")
+ mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
+ mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
+ instance, ['a', 'b'], "sr_ref")
+
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
+ instance = {"uuid": "fake"}
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid"])
+ self.assertFalse(mock_sleep.called)
+
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
+ mock_count):
+ mock_count.return_value = 2
+ instance = {"uuid": "fake"}
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertFalse(mock_sleep.called)
+ self.assertTrue(mock_count.called)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+
+ self.assertRaises(exception.NovaException,
+ vm_utils._wait_for_vhd_coalesce, "session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertTrue(mock_count.called)
+ self.assertEqual(20, mock_sleep.call_count)
+ self.assertEqual(20, mock_scan_sr.call_count)
+
+ @mock.patch.object(greenthread, 'sleep')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, '_count_children')
+ @mock.patch.object(vm_utils, '_scan_sr')
+ def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
+ mock_count, mock_get_vhd_parent_uuid, mock_sleep):
+ mock_count.return_value = 1
+ instance = {"uuid": "fake"}
+ mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
+
+ vm_utils._wait_for_vhd_coalesce("session", instance,
+ "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
+
+ self.assertEqual(1, mock_sleep.call_count)
+ self.assertEqual(2, mock_scan_sr.call_count)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_count_children(self, mock_get_all_vdis_in_sr):
+ vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
+ ('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
+ ('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
+ mock_get_all_vdis_in_sr.return_value = vdis
+ self.assertEqual(2, vm_utils._count_children('session',
+ 'parent1', 'sr'))
+
+
+class ImportMigratedDisksTestCase(VMUtilsTestBase):
+ @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
+ @mock.patch.object(vm_utils, '_import_migrated_root_disk')
+ def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
+ session = "session"
+ instance = "instance"
+ mock_root.return_value = "root_vdi"
+ mock_ephemeral.return_value = ["a", "b"]
+
+ result = vm_utils.import_all_migrated_disks(session, instance)
+
+ expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
+ self.assertEqual(expected, result)
+ mock_root.assert_called_once_with(session, instance)
+ mock_ephemeral.assert_called_once_with(session, instance)
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrated_root_disk(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name"}
+
+ result = vm_utils._import_migrated_root_disk("s", instance)
+
+ self.assertEqual("foo", result)
+ mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
+ "name")
+
+ @mock.patch.object(vm_utils, '_import_migrated_vhds')
+ def test_import_migrate_ephemeral_disks(self, mock_migrate):
+ mock_migrate.return_value = "foo"
+ instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
+
+ result = vm_utils._import_migrate_ephemeral_disks("s", instance)
+
+ self.assertEqual({'4': 'foo', '5': 'foo'}, result)
+ expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
+ "ephemeral", "name ephemeral (1)"),
+ mock.call("s", instance, "uuid_ephemeral_2",
+ "ephemeral", "name ephemeral (2)")]
+ self.assertEqual(expected_calls, mock_migrate.call_args_list)
+
+ @mock.patch.object(vm_utils, '_set_vdi_info')
+ @mock.patch.object(vm_utils, 'scan_default_sr')
+ @mock.patch.object(vm_utils, 'get_sr_path')
+ def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
+ mock_set_info):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
+ session.call_xenapi.return_value = "vdi_ref"
+ mock_get_sr_path.return_value = "sr_path"
+
+ result = vm_utils._import_migrated_vhds(session, instance,
+ 'chain_label', 'disk_type', 'vdi_label')
+
+ expected = {'uuid': "a", 'ref': "vdi_ref"}
+ self.assertEqual(expected, result)
+ mock_get_sr_path.assert_called_once_with(session)
+ session.call_plugin_serialized.assert_called_once_with('migration',
+ 'move_vhds_into_sr', instance_uuid='chain_label',
+ sr_path='sr_path', uuid_stack=mock.ANY)
+ mock_scan_sr.assert_called_once_with(session)
+ session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
+ mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
+ 'vdi_label', 'disk_type', instance)
+
+ def test_get_vhd_parent_uuid_rec_provided(self):
+ session = mock.Mock()
+ vdi_ref = 'vdi_ref'
+ vdi_rec = {'sm_config': {}}
+ self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
+ vdi_ref,
+ vdi_rec))
+ self.assertFalse(session.call_xenapi.called)
+
+
+class MigrateVHDTestCase(VMUtilsTestBase):
+ def _assert_transfer_called(self, session, label):
+ session.call_plugin_serialized.assert_called_once_with(
+ 'migration', 'transfer_vhd', instance_uuid=label, host="dest",
+ vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
+
+ def test_migrate_vhd_root(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2)
+
+ self._assert_transfer_called(session, "a")
+
+ def test_migrate_vhd_ephemeral(self):
+ session = mock.Mock()
+ instance = {"uuid": "a"}
+
+ vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
+ "sr_path", 2, 2)
+
+ self._assert_transfer_called(session, "a_ephemeral_2")
+
+ def test_migrate_vhd_converts_exceptions(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_plugin_serialized.side_effect = test.TestingException()
+ instance = {"uuid": "a"}
+
+ self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
+ session, instance, "vdi_uuid", "dest", "sr_path", 2)
+ self._assert_transfer_called(session, "a")
+
+
+class StripBaseMirrorTestCase(VMUtilsTestBase):
+ def test_strip_base_mirror_from_vdi_works(self):
+ session = mock.Mock()
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ def test_strip_base_mirror_from_vdi_hides_error(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.call_xenapi.side_effect = test.TestingException()
+
+ vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
+
+ session.call_xenapi.assert_called_once_with(
+ "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
+
+ @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
+ def test_strip_base_mirror_from_vdis(self, mock_strip):
+ def call_xenapi(method, arg):
+ if method == "VM.get_VBDs":
+ return ['VBD_ref_1', 'VBD_ref_2']
+ if method == "VBD.get_VDI":
+ return 'VDI' + arg[3:]
+ return "Unexpected call_xenapi: %s.%s" % (method, arg)
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = call_xenapi
+
+ vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
+
+ expected = [mock.call('VM.get_VBDs', "vm_ref"),
+ mock.call('VBD.get_VDI', "VBD_ref_1"),
+ mock.call('VBD.get_VDI', "VBD_ref_2")]
+ self.assertEqual(expected, session.call_xenapi.call_args_list)
+
+ expected = [mock.call(session, "VDI_ref_1"),
+ mock.call(session, "VDI_ref_2")]
+ self.assertEqual(expected, mock_strip.call_args_list)
+
+
+class DeviceIdTestCase(VMUtilsTestBase):
+ def test_device_id_is_none_if_not_specified_in_meta_data(self):
+ image_meta = {}
+ session = mock.Mock()
+ session.product_version = (6, 1, 0)
+ self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 2, 0)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+ session.product_version = (6, 3, 1)
+ self.assertEqual('0002',
+ vm_utils.get_vm_device_id(session, image_meta))
+
+ def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
+ image_meta = {'xenapi_device_id': '0002'}
+ session = mock.Mock()
+ session.product_version = (6, 0)
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version (6, 0)", exc.message)
+ session.product_version = ('6a')
+ exc = self.assertRaises(exception.NovaException,
+ vm_utils.get_vm_device_id, session, image_meta)
+ self.assertEqual("Device id 0002 specified is not supported by "
+ "hypervisor version 6a", exc.message)
+
+
+class CreateVmRecordTestCase(VMUtilsTestBase):
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_linux(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "linux"}
+ self._test_create_vm_record(mock_extract_flavor, instance, False)
+
+ @mock.patch.object(flavors, 'extract_flavor')
+ def test_create_vm_record_windows(self, mock_extract_flavor):
+ instance = {"uuid": "uuid123", "os_type": "windows"}
+ self._test_create_vm_record(mock_extract_flavor, instance, True)
+
+ def _test_create_vm_record(self, mock_extract_flavor, instance,
+ is_viridian):
+ session = _get_fake_session()
+ flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
+ mock_extract_flavor.return_value = flavor
+
+ vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
+ device_id="0002")
+
+ is_viridian_str = str(is_viridian).lower()
+
+ expected_vm_rec = {
+ 'VCPUs_params': {'cap': '0', 'weight': '2'},
+ 'PV_args': '',
+ 'memory_static_min': '0',
+ 'ha_restart_priority': '',
+ 'HVM_boot_policy': 'BIOS order',
+ 'PV_bootloader': '',
+ 'tags': [],
+ 'VCPUs_max': '1',
+ 'memory_static_max': '1073741824',
+ 'actions_after_shutdown': 'destroy',
+ 'memory_dynamic_max': '1073741824',
+ 'user_version': '0',
+ 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
+ 'blocked_operations': {},
+ 'is_a_template': False,
+ 'name_description': '',
+ 'memory_dynamic_min': '1073741824',
+ 'actions_after_crash': 'destroy',
+ 'memory_target': '1073741824',
+ 'PV_ramdisk': '',
+ 'PV_bootloader_args': '',
+ 'PCI_bus': '',
+ 'other_config': {'nova_uuid': 'uuid123'},
+ 'name_label': 'name',
+ 'actions_after_reboot': 'restart',
+ 'VCPUs_at_startup': '1',
+ 'HVM_boot_params': {'order': 'dc'},
+ 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
+ 'timeoffset': '0', 'viridian': is_viridian_str,
+ 'acpi': 'true', 'device_id': '0002'},
+ 'PV_legacy_args': '',
+ 'PV_kernel': '',
+ 'affinity': '',
+ 'recommendations': '',
+ 'ha_always_run': False}
+
+ session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
+
+ def test_list_vms(self):
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ fake.create_vm("foo1", "Halted")
+ vm_ref = fake.create_vm("foo2", "Running")
+
+ stubs.stubout_session(self.stubs, fake.SessionBase)
+ driver = xenapi_conn.XenAPIDriver(False)
+
+ result = list(vm_utils.list_vms(driver._session))
+
+ # Will have 3 VMs - but one is Dom0 and one is not running on the host
+ self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
+ self.assertEqual(len(result), 1)
+
+ result_keys = [key for (key, value) in result]
+
+ self.assertIn(vm_ref, result_keys)
+
+
+class ChildVHDsTestCase(test.NoDBTestCase):
+ all_vdis = [
+ ("my-vdi-ref",
+ {"uuid": "my-uuid", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("non-parent",
+ {"uuid": "uuid-1", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("diff-parent",
+ {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child",
+ {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child-snap",
+ {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": True, "other_config": {}}),
+ ]
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_defaults(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
+
+ self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_only_snapshots(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
+ old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_chain(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref",
+ ["my-uuid", "other-uuid"], old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ def test_is_vdi_a_snapshot_works(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {}}
+
+ self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_base_images_false(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {"image-id": "fake"}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
+ vdi_rec = {"is_a_snapshot": False,
+ "other_config": {}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+
+class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ @mock.patch.object(vm_utils, '_walk_vdi_chain')
+ @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
+ def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
+ instance = {"uuid": "fake"}
+ mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
+ mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
+
+ vm_utils.remove_old_snapshots("session", instance, "vm_ref")
+
+ mock_delete.assert_called_once_with("session", instance,
+ ["uuid1", "uuid2"], "sr_ref")
+ mock_get.assert_called_once_with("session", "vm_ref")
+ mock_walk.assert_called_once_with("session", "vdi")
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
+ instance = {"uuid": "fake"}
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid"], "sr")
+
+ self.assertFalse(mock_child.called)
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = []
+
+ vm_utils._delete_snapshots_in_vdi_chain("session", instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with("session", "sr", ["uuid2"],
+ old_snapshots_only=True)
+
+ @mock.patch.object(vm_utils, '_scan_sr')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_child_vhds')
+ def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
+ mock_destroy, mock_scan):
+ instance = {"uuid": "fake"}
+ mock_child.return_value = ["suuid1", "suuid2"]
+ session = mock.Mock()
+ session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
+
+ vm_utils._delete_snapshots_in_vdi_chain(session, instance,
+ ["uuid1", "uuid2"], "sr")
+
+ mock_child.assert_called_once_with(session, "sr", ["uuid2"],
+ old_snapshots_only=True)
+ session.VDI.get_by_uuid.assert_has_calls([
+ mock.call("suuid1"), mock.call("suuid2")])
+ mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
+ mock_scan.assert_called_once_with(session, "sr")
+
+
+class ResizeFunctionTestCase(test.NoDBTestCase):
+ def _call_get_resize_func_name(self, brand, version):
+ session = mock.Mock()
+ session.product_brand = brand
+ session.product_version = version
+
+ return vm_utils._get_resize_func_name(session)
+
+ def _test_is_resize(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize", result)
+
+ def _test_is_resize_online(self, brand, version):
+ result = self._call_get_resize_func_name(brand, version)
+ self.assertEqual("VDI.resize_online", result)
+
+ def test_xenserver_5_5(self):
+ self._test_is_resize_online("XenServer", (5, 5, 0))
+
+ def test_xenserver_6_0(self):
+ self._test_is_resize("XenServer", (6, 0, 0))
+
+ def test_xcp_1_1(self):
+ self._test_is_resize_online("XCP", (1, 1, 0))
+
+ def test_xcp_1_2(self):
+ self._test_is_resize("XCP", (1, 2, 0))
+
+ def test_xcp_2_0(self):
+ self._test_is_resize("XCP", (2, 0, 0))
+
+ def test_random_brand(self):
+ self._test_is_resize("asfd", (1, 1, 0))
+
+ def test_default(self):
+ self._test_is_resize(None, None)
+
+ def test_empty(self):
+ self._test_is_resize("", "")
+
+ def test_bad_version(self):
+ self._test_is_resize("XenServer", "asdf")
+
+
+class VMInfoTests(VMUtilsTestBase):
+ def setUp(self):
+ super(VMInfoTests, self).setUp()
+ self.session = mock.Mock()
+
+ def test_get_power_state_valid(self):
+ # Save on test setup calls by having these simple tests in one method
+ self.session.call_xenapi.return_value = "Running"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.RUNNING)
+
+ self.session.call_xenapi.return_value = "Halted"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SHUTDOWN)
+
+ self.session.call_xenapi.return_value = "Paused"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.PAUSED)
+
+ self.session.call_xenapi.return_value = "Suspended"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.SUSPENDED)
+
+ self.session.call_xenapi.return_value = "Crashed"
+ self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
+ power_state.CRASHED)
+
+ def test_get_power_state_invalid(self):
+ self.session.call_xenapi.return_value = "Invalid"
+ self.assertRaises(KeyError,
+ vm_utils.get_power_state, self.session, "ref")
+
+ _XAPI_record = {'power_state': 'Running',
+ 'memory_static_max': str(10 << 10),
+ 'memory_dynamic_max': str(9 << 10),
+ 'VCPUs_max': '5'}
+
+ def test_compile_info(self):
+
+ def call_xenapi(method, *args):
+ if method.startswith('VM.get_') and args[0] == 'dummy':
+ return self._XAPI_record[method[7:]]
+
+ self.session.call_xenapi.side_effect = call_xenapi
+
+ expected = {'state': power_state.RUNNING,
+ 'max_mem': 10L,
+ 'mem': 9L,
+ 'num_cpu': '5',
+ 'cpu_time': 0}
+
+ self.assertEqual(vm_utils.compile_info(self.session, "dummy"),
+ expected)
diff --git a/nova/tests/unit/virt/xenapi/test_vmops.py b/nova/tests/unit/virt/xenapi/test_vmops.py
new file mode 100644
index 0000000000..8140f997d2
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vmops.py
@@ -0,0 +1,1124 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova.compute import power_state
+from nova.compute import task_states
+from nova import context
+from nova import exception
+from nova import objects
+from nova.pci import manager as pci_manager
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent as xenapi_agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VMOpsTestBase, self).setUp()
+ self._setup_mock_vmops()
+ self.vms = []
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ self._session = xenapi_session.XenAPISession('test_url', 'root',
+ 'test_pass')
+ self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def create_vm(self, name, state="Running"):
+ vm_ref = xenapi_fake.create_vm(name, state)
+ self.vms.append(vm_ref)
+ vm = xenapi_fake.get_record("VM", vm_ref)
+ return vm, vm_ref
+
+ def tearDown(self):
+ super(VMOpsTestBase, self).tearDown()
+ for vm in self.vms:
+ xenapi_fake.destroy_vm(vm)
+
+
+class VMOpsTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self._setup_mock_vmops()
+
+ def _setup_mock_vmops(self, product_brand=None, product_version=None):
+ self._session = self._get_mock_session(product_brand, product_version)
+ self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
+
+ def _get_mock_session(self, product_brand, product_version):
+ class Mock(object):
+ pass
+
+ mock_session = Mock()
+ mock_session.product_brand = product_brand
+ mock_session.product_version = product_version
+ return mock_session
+
+ def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
+ vm_shutdown=True):
+ instance = {'name': 'foo',
+ 'task_state': task_states.RESIZE_MIGRATING}
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self._vmops, '_destroy')
+ self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
+ self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
+ self.mox.StubOutWithMock(self._vmops, '_start')
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+
+ vm_utils.lookup(self._session, 'foo-orig').AndReturn(
+ backup_made and 'foo' or None)
+ vm_utils.lookup(self._session, 'foo').AndReturn(
+ (not backup_made or new_made) and 'foo' or None)
+ if backup_made:
+ if new_made:
+ self._vmops._destroy(instance, 'foo')
+ vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
+ self._vmops._attach_mapped_block_devices(instance, [])
+
+ vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
+ if vm_shutdown:
+ self._vmops._start(instance, 'foo')
+
+ self.mox.ReplayAll()
+
+ self._vmops.finish_revert_migration(context, instance, [])
+
+ def test_finish_revert_migration_after_crash(self):
+ self._test_finish_revert_migration_after_crash(True, True)
+
+ def test_finish_revert_migration_after_crash_before_new(self):
+ self._test_finish_revert_migration_after_crash(True, False)
+
+ def test_finish_revert_migration_after_crash_before_backup(self):
+ self._test_finish_revert_migration_after_crash(False, False)
+
+ def test_xsm_sr_check_relaxed_cached(self):
+ self.make_plugin_call_count = 0
+
+ def fake_make_plugin_call(plugin, method, **args):
+ self.make_plugin_call_count = self.make_plugin_call_count + 1
+ return "true"
+
+ self.stubs.Set(self._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+ self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
+
+ self.assertEqual(self.make_plugin_call_count, 1)
+
+ def test_get_vm_opaque_ref_raises_instance_not_found(self):
+ instance = {"name": "dummy"}
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.InstanceNotFound,
+ self._vmops._get_vm_opaque_ref, instance)
+
+
+class InjectAutoDiskConfigTestCase(VMOpsTestBase):
+ def test_inject_auto_disk_config_when_present(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
+
+ def test_inject_auto_disk_config_none_as_false(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ xenstore_data = vm['xenstore_data']
+ self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
+
+
+class GetConsoleOutputTestCase(VMOpsTestBase):
+ def test_get_console_output_works(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
+ self.mox.ReplayAll()
+
+ self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
+
+ def test_get_console_output_throws_nova_exception(self):
+ self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
+
+ instance = {"name": "dummy"}
+ # dom_id=0 used to trigger exception in fake XenAPI
+ self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.NovaException,
+ self.vmops.get_console_output, instance)
+
+ def test_get_dom_id_works(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
+
+ def test_get_dom_id_works_with_rescue_vm(self):
+ instance = {"name": "dummy"}
+ vm, vm_ref = self.create_vm("dummy-rescue")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(instance, check_rescue=True))
+
+ def test_get_dom_id_raises_not_found(self):
+ instance = {"name": "dummy"}
+ self.create_vm("not-dummy")
+ self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
+
+ def test_get_dom_id_works_with_vmref(self):
+ vm, vm_ref = self.create_vm("dummy")
+ self.assertEqual(vm["domid"],
+ self.vmops._get_dom_id(vm_ref=vm_ref))
+
+
+class SpawnTestCase(VMOpsTestBase):
+ def _stub_out_common(self):
+ self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
+ self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
+ self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
+ self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
+ self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
+ self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
+ self.mox.StubOutWithMock(self.vmops._volumeops,
+ 'safe_cleanup_from_vdis')
+ self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
+ self.mox.StubOutWithMock(vm_utils,
+ 'create_kernel_and_ramdisk')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
+ self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
+ self.mox.StubOutWithMock(self.vmops, '_destroy')
+ self.mox.StubOutWithMock(self.vmops, '_attach_disks')
+ self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
+ self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
+ self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
+ self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
+ self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
+ self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
+ self.mox.StubOutWithMock(self.vmops, '_create_vifs')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'setup_basic_filtering')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'prepare_instance_filter')
+ self.mox.StubOutWithMock(self.vmops, '_start')
+ self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
+ self.mox.StubOutWithMock(self.vmops,
+ '_configure_new_instance_with_agent')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+ self.mox.StubOutWithMock(self.vmops.firewall_driver,
+ 'apply_instance_filter')
+
+ def _test_spawn(self, name_label_param=None, block_device_info_param=None,
+ rescue=False, include_root_vdi=True, throw_exception=None,
+ attach_pci_dev=False):
+ self._stub_out_common()
+
+ instance = {"name": "dummy", "uuid": "fake_uuid"}
+ name_label = name_label_param
+ if name_label is None:
+ name_label = "dummy"
+ image_meta = {"id": "image_id"}
+ context = "context"
+ session = self.vmops._session
+ injected_files = "fake_files"
+ admin_password = "password"
+ network_info = "net_info"
+ steps = 10
+ if rescue:
+ steps += 1
+
+ block_device_info = block_device_info_param
+ if block_device_info and not block_device_info['root_device_name']:
+ block_device_info = dict(block_device_info_param)
+ block_device_info['root_device_name'] = \
+ self.vmops.default_root_dev
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+ step = 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
+ if include_root_vdi:
+ vdis["root"] = {"ref": "fake_ref"}
+ self.vmops._get_vdis_for_instance(context, instance,
+ name_label, "image_id", di_type,
+ block_device_info).AndReturn(vdis)
+ self.vmops._resize_up_vdis(instance, vdis)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, rescue, admin_password, injected_files)
+ if attach_pci_dev:
+ fake_dev = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'id': 1,
+ 'compute_node_id': 1,
+ 'address': '00:00.0',
+ 'vendor_id': '1234',
+ 'product_id': 'abcd',
+ 'dev_type': 'type-PCI',
+ 'status': 'available',
+ 'dev_id': 'devid',
+ 'label': 'label',
+ 'instance_uuid': None,
+ 'extra_info': '{}',
+ }
+ pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
+ vm_utils.set_other_config_pci(self.vmops._session,
+ vm_ref,
+ "0/0000:00:00.0")
+ else:
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._inject_hostname(instance, vm_ref, rescue)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ if rescue:
+ self.vmops._attach_orig_disks(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step,
+ steps)
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ injected_files, admin_password)
+ self.vmops._remove_hostname(instance, vm_ref)
+ step += 1
+ self.vmops._update_instance_progress(context, instance, step, steps)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+ step += 1
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step, steps)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
+ self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
+
+ self.mox.ReplayAll()
+ self.vmops.spawn(context, instance, image_meta, injected_files,
+ admin_password, network_info,
+ block_device_info_param, name_label_param, rescue)
+
+ def test_spawn(self):
+ self._test_spawn()
+
+ def test_spawn_with_alternate_options(self):
+ self._test_spawn(include_root_vdi=False, rescue=True,
+ name_label_param="bob",
+ block_device_info_param={"root_device_name": ""})
+
+ def test_spawn_with_pci_available_on_the_host(self):
+ self._test_spawn(attach_pci_dev=True)
+
+ def test_spawn_performs_rollback_and_throws_exception(self):
+ self.assertRaises(test.TestingException, self._test_spawn,
+ throw_exception=test.TestingException())
+
+ def _test_finish_migration(self, power_on=True, resize_instance=True,
+ throw_exception=None):
+ self._stub_out_common()
+ self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
+ self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
+
+ context = "context"
+ migration = {}
+ name_label = "dummy"
+ instance = {"name": name_label, "uuid": "fake_uuid"}
+ disk_info = "disk_info"
+ network_info = "net_info"
+ image_meta = {"id": "image_id"}
+ block_device_info = "bdi"
+ session = self.vmops._session
+
+ self.vmops._ensure_instance_name_unique(name_label)
+ self.vmops._ensure_enough_free_mem(instance)
+
+ di_type = "di_type"
+ vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
+
+ root_vdi = {"ref": "fake_ref"}
+ ephemeral_vdi = {"ref": "fake_ref_e"}
+ vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
+ vm_utils.import_all_migrated_disks(self.vmops._session,
+ instance).AndReturn(vdis)
+
+ kernel_file = "kernel"
+ ramdisk_file = "ramdisk"
+ vm_utils.create_kernel_and_ramdisk(context, session,
+ instance, name_label).AndReturn((kernel_file, ramdisk_file))
+
+ vm_ref = "fake_vm_ref"
+ self.vmops._create_vm_record(context, instance, name_label,
+ di_type, kernel_file,
+ ramdisk_file, image_meta).AndReturn(vm_ref)
+
+ if resize_instance:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
+ network_info, False, None, None)
+ self.vmops._attach_mapped_block_devices(instance, block_device_info)
+ pci_manager.get_instance_pci_devs(instance).AndReturn([])
+
+ self.vmops._inject_instance_metadata(instance, vm_ref)
+ self.vmops._inject_auto_disk_config(instance, vm_ref)
+ self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
+ network_info)
+ self.vmops.inject_network_info(instance, network_info, vm_ref)
+
+ self.vmops._create_vifs(instance, vm_ref, network_info)
+ self.vmops.firewall_driver.setup_basic_filtering(instance,
+ network_info).AndRaise(NotImplementedError)
+ self.vmops.firewall_driver.prepare_instance_filter(instance,
+ network_info)
+
+ if power_on:
+ self.vmops._start(instance, vm_ref)
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ self.vmops.firewall_driver.apply_instance_filter(instance,
+ network_info)
+
+ last_call = self.vmops._update_instance_progress(context, instance,
+ step=5, total_steps=5)
+ if throw_exception:
+ last_call.AndRaise(throw_exception)
+ self.vmops._destroy(instance, vm_ref, network_info=network_info)
+ vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
+ kernel_file, ramdisk_file)
+ vm_utils.safe_destroy_vdis(self.vmops._session,
+ ["fake_ref_e", "fake_ref"])
+
+ self.mox.ReplayAll()
+ self.vmops.finish_migration(context, migration, instance, disk_info,
+ network_info, image_meta, resize_instance,
+ block_device_info, power_on)
+
+ def test_finish_migration(self):
+ self._test_finish_migration()
+
+ def test_finish_migration_no_power_on(self):
+ self._test_finish_migration(power_on=False, resize_instance=False)
+
+ def test_finish_migrate_performs_rollback_on_error(self):
+ self.assertRaises(test.TestingException, self._test_finish_migration,
+ power_on=False, resize_instance=False,
+ throw_exception=test.TestingException())
+
+ def test_remove_hostname(self):
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ self.mox.StubOutWithMock(self._session, 'call_xenapi')
+ self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
+ "vm-data/hostname")
+
+ self.mox.ReplayAll()
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.VerifyAll()
+
+ def test_reset_network(self):
+ class mock_agent(object):
+ def __init__(self):
+ self.called = False
+
+ def resetnetwork(self):
+ self.called = True
+
+ vm, vm_ref = self.create_vm("dummy")
+ instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
+ agent = mock_agent()
+
+ self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
+ self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
+
+ self.vmops.agent_enabled(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ self.vmops._inject_hostname(instance, vm_ref, False)
+ self.vmops._remove_hostname(instance, vm_ref)
+ self.mox.ReplayAll()
+ self.vmops.reset_network(instance)
+ self.assertTrue(agent.called)
+ self.mox.VerifyAll()
+
+ def test_inject_hostname(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=False)
+
+ def test_inject_hostname_with_rescue_prefix(self):
+ instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummy')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_inject_hostname_with_windows_name_truncation(self):
+ instance = {"hostname": "dummydummydummydummydummy",
+ "os_type": "windows", "uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
+ self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
+ 'RESCUE-dummydum')
+
+ self.mox.ReplayAll()
+ self.vmops._inject_hostname(instance, vm_ref, rescue=True)
+
+ def test_wait_for_instance_to_start(self):
+ instance = {"uuid": "uuid"}
+ vm_ref = "vm_ref"
+
+ self.mox.StubOutWithMock(vm_utils, 'get_power_state')
+ self.mox.StubOutWithMock(greenthread, 'sleep')
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.SHUTDOWN)
+ greenthread.sleep(0.5)
+ vm_utils.get_power_state(self._session, vm_ref).AndReturn(
+ power_state.RUNNING)
+
+ self.mox.ReplayAll()
+ self.vmops._wait_for_instance_to_start(instance, vm_ref)
+
+ def test_attach_orig_disks(self):
+ instance = {"name": "dummy"}
+ vm_ref = "vm_ref"
+ vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+
+ vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
+ self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
+ vbd_refs)
+ vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
+ vmops.DEVICE_RESCUE, bootable=False)
+
+ self.mox.ReplayAll()
+ self.vmops._attach_orig_disks(instance, vm_ref)
+
+ def test_agent_update_setup(self):
+ # agent updates need to occur after networking is configured
+ instance = {'name': 'betelgeuse',
+ 'uuid': '1-2-3-4-5-6'}
+ vm_ref = 'vm_ref'
+ agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
+ self.vmops._virtapi, instance, vm_ref)
+
+ self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
+ self.mox.StubOutWithMock(self.vmops, '_get_agent')
+ self.mox.StubOutWithMock(agent, 'get_version')
+ self.mox.StubOutWithMock(agent, 'resetnetwork')
+ self.mox.StubOutWithMock(agent, 'update_if_needed')
+
+ xenapi_agent.should_use_agent(instance).AndReturn(True)
+ self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
+ agent.get_version().AndReturn('1.2.3')
+ agent.resetnetwork()
+ agent.update_if_needed('1.2.3')
+
+ self.mox.ReplayAll()
+ self.vmops._configure_new_instance_with_agent(instance, vm_ref,
+ None, None)
+
+
+class DestroyTestCase(VMOpsTestBase):
+ def setUp(self):
+ super(DestroyTestCase, self).setUp()
+ self.context = context.RequestContext(user_id=None, project_id=None)
+ self.instance = fake_instance.fake_instance_obj(self.context)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
+ lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': []})
+ self.assertEqual(0, find_sr_by_uuid.call_count)
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ self.assertEqual(0, forget_sr.call_count)
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+ @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
+ @mock.patch.object(vm_utils, 'hard_shutdown_vm')
+ @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
+ @mock.patch.object(volume_utils, 'forget_sr')
+ def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
+ hard_shutdown_vm, lookup):
+ self.vmops.destroy(self.instance, 'network_info',
+ {'block_device_mapping': [{'connection_info':
+ {'data': {'volume_id': 'fake-uuid'}}}]})
+ find_sr_by_uuid.assert_called_once_with(self.vmops._session,
+ 'FA15E-D15C-fake-uuid')
+ forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
+ self.assertEqual(0, hard_shutdown_vm.call_count)
+
+
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
+@mock.patch.object(vm_utils, 'get_sr_path')
+@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
+@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
+class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
+ def test_migrate_disk_and_power_off_works_down(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
+ flavor = {"root_gb": 1, "ephemeral_gb": 0}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_up.called)
+ self.assertTrue(migrate_down.called)
+
+ def test_migrate_disk_and_power_off_works_up(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
+ flavor = {"root_gb": 2, "ephemeral_gb": 2}
+
+ self.vmops.migrate_disk_and_power_off(None, instance, None,
+ flavor, None)
+
+ self.assertFalse(migrate_down.called)
+ self.assertTrue(migrate_up.called)
+
+ def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
+ migrate_up, migrate_down, *mocks):
+ instance = {"ephemeral_gb": 2}
+ flavor = {"ephemeral_gb": 1}
+
+ self.assertRaises(exception.ResizeError,
+ self.vmops.migrate_disk_and_power_off,
+ None, instance, None, flavor, None)
+
+
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+class MigrateDiskResizingUpTestCase(VMOpsTestBase):
+ def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
+ userdevice, post_snapshot_callback):
+ self.assertIsInstance(instance, dict)
+ if userdevice == '0':
+ self.assertEqual("vm_ref", vm_ref)
+ self.assertEqual("fake-snapshot", label)
+ yield ["leaf", "parent", "grandp"]
+ else:
+ leaf = userdevice + "-leaf"
+ parent = userdevice + "-parent"
+ yield [leaf, parent]
+
+ def test_migrate_disk_resizing_up_works_no_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = None
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
+ dest, sr_path, 1),
+ mock.call(self.vmops._session, instance, "grandp",
+ dest, sr_path, 2),
+ mock.call(self.vmops._session, instance, "leaf",
+ dest, sr_path, 0)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.vmops._migrate_disk_resizing_up(context, instance, dest,
+ vm_ref, sr_path)
+
+ mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
+ vm_ref, min_userdevice=4)
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_shutdown.assert_called_once_with(instance, vm_ref)
+
+ m_vhd_expected = [mock.call(self.vmops._session, instance,
+ "parent", dest, sr_path, 1),
+ mock.call(self.vmops._session, instance,
+ "grandp", dest, sr_path, 2),
+ mock.call(self.vmops._session, instance,
+ "4-parent", dest, sr_path, 1, 1),
+ mock.call(self.vmops._session, instance,
+ "5-parent", dest, sr_path, 1, 2),
+ mock.call(self.vmops._session, instance,
+ "leaf", dest, sr_path, 0),
+ mock.call(self.vmops._session, instance,
+ "4-leaf", dest, sr_path, 0, 1),
+ mock.call(self.vmops._session, instance,
+ "5-leaf", dest, sr_path, 0, 2)]
+ self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected, mock_update_progress.call_args_list)
+
+ @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
+ def test_migrate_disk_resizing_up_rollback(self,
+ mock_restore,
+ mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
+ mock_shutdown, mock_migrate_vhd):
+ context = "ctxt"
+ instance = {"name": "fake", "uuid": "fake"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+
+ mock_migrate_vhd.side_effect = test.TestingException
+ mock_restore.side_effect = test.TestingException
+
+ with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
+ self._fake_snapshot_attached_here):
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.vmops._migrate_disk_resizing_up,
+ context, instance, dest, vm_ref, sr_path)
+
+ mock_apply_orig.assert_called_once_with(instance, vm_ref)
+ mock_restore.assert_called_once_with(instance)
+ mock_migrate_vhd.assert_called_once_with(self.vmops._session,
+ instance, "parent", dest, sr_path, 1)
+
+
+class CreateVMRecordTestCase(VMOpsTestBase):
+ @mock.patch.object(vm_utils, 'determine_vm_mode')
+ @mock.patch.object(vm_utils, 'get_vm_device_id')
+ @mock.patch.object(vm_utils, 'create_vm')
+ def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
+ mock_get_vm_device_id, mock_determine_vm_mode):
+
+ context = "context"
+ instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
+ name_label = "dummy"
+ disk_image_type = "vhd"
+ kernel_file = "kernel"
+ ramdisk_file = "ram"
+ device_id = "0002"
+ image_properties = {"xenapi_device_id": device_id}
+ image_meta = {"properties": image_properties}
+ session = "session"
+ self.vmops._session = session
+ mock_get_vm_device_id.return_value = device_id
+ mock_determine_vm_mode.return_value = "vm_mode"
+
+ self.vmops._create_vm_record(context, instance, name_label,
+ disk_image_type, kernel_file, ramdisk_file, image_meta)
+
+ mock_get_vm_device_id.assert_called_with(session, image_properties)
+ mock_create_vm.assert_called_with(session, instance, name_label,
+ kernel_file, ramdisk_file, False, device_id)
+
+
+class BootableTestCase(VMOpsTestBase):
+
+ def setUp(self):
+ super(BootableTestCase, self).setUp()
+
+ self.instance = {"name": "test", "uuid": "fake"}
+ vm_rec, self.vm_ref = self.create_vm('test')
+
+ # sanity check bootlock is initially disabled:
+ self.assertEqual({}, vm_rec['blocked_operations'])
+
+ def _get_blocked(self):
+ vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
+ return vm_rec['blocked_operations']
+
+ def test_acquire_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+ def test_release_bootlock(self):
+ self.vmops._acquire_bootlock(self.vm_ref)
+ self.vmops._release_bootlock(self.vm_ref)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_bootable(self):
+ self.vmops.set_bootable(self.instance, True)
+ blocked = self._get_blocked()
+ self.assertNotIn('start', blocked)
+
+ def test_set_not_bootable(self):
+ self.vmops.set_bootable(self.instance, False)
+ blocked = self._get_blocked()
+ self.assertIn('start', blocked)
+
+
+@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
+class ResizeVdisTestCase(VMOpsTestBase):
+ def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertTrue(mock_resize.called)
+
+ def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {'osvol': True}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
+ instance = fake_instance.fake_db_instance(root_gb=20)
+ vdis = {'root': {}}
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
+ 'ephemerals': ephemerals}
+ with mock.patch.object(vm_utils, 'generate_single_ephemeral',
+ autospec=True) as g:
+ self.vmops._resize_up_vdis(instance, vdis)
+ self.assertEqual([mock.call(self.vmops._session, instance, 4,
+ 2000),
+ mock.call(self.vmops._session, instance, 5,
+ 1000)],
+ mock_resize.call_args_list)
+ self.assertFalse(g.called)
+
+ def test_resize_up_vdis_root(self, mock_resize):
+ instance = {"root_gb": 20, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ "vdi_ref", 20)
+
+ def test_resize_up_vdis_zero_disks(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 0}
+ self.vmops._resize_up_vdis(instance, {"root": {}})
+ self.assertFalse(mock_resize.called)
+
+ def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ vdis = {}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ self.assertFalse(mock_resize.called)
+
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000}
+ ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ expected = [mock.call(self.vmops._session, instance, 4, 2000),
+ mock.call(self.vmops._session, instance, 5, 1000)]
+ self.assertEqual(expected, mock_resize.call_args_list)
+
+ @mock.patch.object(vm_utils, 'generate_single_ephemeral')
+ @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
+ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
+ mock_generate,
+ mock_resize):
+ mock_sizes.return_value = [2000, 1000]
+ instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
+ ephemerals = {"4": {"ref": 4}}
+ vdis = {"ephemerals": ephemerals}
+
+ self.vmops._resize_up_vdis(instance, vdis)
+
+ mock_sizes.assert_called_once_with(3000)
+ mock_resize.assert_called_once_with(self.vmops._session, instance,
+ 4, 2000)
+ mock_generate.assert_called_once_with(self.vmops._session, instance,
+ None, 5, 1000)
+
+
+@mock.patch.object(vm_utils, 'remove_old_snapshots')
+class CleanupFailedSnapshotTestCase(VMOpsTestBase):
+ def test_post_interrupted_snapshot_cleanup(self, mock_remove):
+ self.vmops._get_vm_opaque_ref = mock.Mock()
+ self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
+
+ self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
+
+ mock_remove.assert_called_once_with(self.vmops._session,
+ "instance", "vm_ref")
+
+
+class LiveMigrateHelperTestCase(VMOpsTestBase):
+ def test_connect_block_device_volumes_none(self):
+ self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume")
+ def test_connect_block_device_volumes_calls_connect(self, mock_connect):
+ with mock.patch.object(self.vmops._session,
+ "call_xenapi") as mock_session:
+ mock_connect.return_value = ("sr_uuid", None)
+ mock_session.return_value = "sr_ref"
+ bdm = {"connection_info": "c_info"}
+ bdi = {"block_device_mapping": [bdm]}
+ result = self.vmops.connect_block_device_volumes(bdi)
+
+ self.assertEqual({'sr_uuid': 'sr_ref'}, result)
+
+ mock_connect.assert_called_once_with("c_info")
+ mock_session.assert_called_once_with("SR.get_by_uuid",
+ "sr_uuid")
+
+
+@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
+@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
+@mock.patch.object(vmops.VMOps, '_update_instance_progress')
+@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+@mock.patch.object(vm_utils, 'resize_disk')
+@mock.patch.object(vm_utils, 'migrate_vhd')
+@mock.patch.object(vm_utils, 'destroy_vdi')
+class MigrateDiskResizingDownTestCase(VMOpsTestBase):
+ def test_migrate_disk_resizing_down_works_no_ephemeral(
+ self,
+ mock_destroy_vdi,
+ mock_migrate_vhd,
+ mock_resize_disk,
+ mock_get_vdi_for_vm_safely,
+ mock_update_instance_progress,
+ mock_apply_orig_vm_name_label,
+ mock_resize_ensure_vm_is_shutdown):
+
+ context = "ctx"
+ instance = {"name": "fake", "uuid": "uuid"}
+ dest = "dest"
+ vm_ref = "vm_ref"
+ sr_path = "sr_path"
+ instance_type = dict(root_gb=1)
+ old_vdi_ref = "old_ref"
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+
+ mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
+ mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
+
+ self.vmops._migrate_disk_resizing_down(context, instance, dest,
+ instance_type, vm_ref, sr_path)
+
+ mock_get_vdi_for_vm_safely.assert_called_once_with(
+ self.vmops._session,
+ vm_ref)
+ mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
+ instance, vm_ref)
+ mock_apply_orig_vm_name_label.assert_called_once_with(
+ instance, vm_ref)
+ mock_resize_disk.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ old_vdi_ref,
+ instance_type)
+ mock_migrate_vhd.assert_called_once_with(
+ self.vmops._session,
+ instance,
+ new_vdi_uuid,
+ dest,
+ sr_path, 0)
+ mock_destroy_vdi.assert_called_once_with(
+ self.vmops._session,
+ new_vdi_ref)
+
+ prog_expected = [
+ mock.call(context, instance, 1, 5),
+ mock.call(context, instance, 2, 5),
+ mock.call(context, instance, 3, 5),
+ mock.call(context, instance, 4, 5)
+ # 5/5: step to be executed by finish migration.
+ ]
+ self.assertEqual(prog_expected,
+ mock_update_instance_progress.call_args_list)
+
+
+class GetVdisForInstanceTestCase(VMOpsTestBase):
+ """Tests get_vdis_for_instance utility method."""
+ def setUp(self):
+ super(GetVdisForInstanceTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ self.context.auth_token = 'auth_token'
+ self.session = mock.Mock()
+ self.vmops._session = self.session
+ self.instance = fake_instance.fake_instance_obj(self.context)
+ self.name_label = 'name'
+ self.image = 'fake_image_id'
+
+ @mock.patch.object(volumeops.VolumeOps, "connect_volume",
+ return_value=("sr", "vdi_uuid"))
+ def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
+ # setup fake data
+ data = {'name_label': self.name_label,
+ 'sr_uuid': 'fake',
+ 'auth_password': 'scrubme'}
+ bdm = [{'mount_device': '/dev/vda',
+ 'connection_info': {'data': data}}]
+ bdi = {'root_device_name': 'vda',
+ 'block_device_mapping': bdm}
+
+ # Tests that the parameters to the to_xml method are sanitized for
+ # passwords when logged.
+ def fake_debug(*args, **kwargs):
+ if 'auth_password' in args[0]:
+ self.assertNotIn('scrubme', args[0])
+ fake_debug.matched = True
+
+ fake_debug.matched = False
+
+ with mock.patch.object(vmops.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ vdis = self.vmops._get_vdis_for_instance(self.context,
+ self.instance, self.name_label, self.image,
+ image_type=4, block_device_info=bdi)
+ self.assertEqual(1, len(vdis))
+ get_uuid_mock.assert_called_once_with({"data": data})
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+ self.assertTrue(fake_debug.matched)
diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py
new file mode 100644
index 0000000000..59fd4626b9
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volume_utils.py
@@ -0,0 +1,232 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import volume_utils
+
+
+class SROps(stubs.XenAPITestBaseNoDB):
+ def test_find_sr_valid_uuid(self):
+ self.session = mock.Mock()
+ self.session.call_xenapi.return_value = 'sr_ref'
+ self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
+ 'sr_uuid'),
+ 'sr_ref')
+
+ def test_find_sr_invalid_uuid(self):
+ class UUIDException(Exception):
+ details = ["UUID_INVALID", "", "", ""]
+
+ self.session = mock.Mock()
+ self.session.XenAPI.Failure = UUIDException
+ self.session.call_xenapi.side_effect = UUIDException
+ self.assertIsNone(
+ volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
+
+ def test_find_sr_from_vdi(self):
+ vdi_ref = 'fake-ref'
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
+ vdi_ref)
+
+ def test_find_sr_from_vdi_exception(self):
+ vdi_ref = 'fake-ref'
+
+ class FakeException(Exception):
+ pass
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+ self.assertRaises(exception.StorageError,
+ volume_utils.find_sr_from_vdi, session, vdi_ref)
+
+
+class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
+ def test_target_host(self):
+ self.assertEqual(volume_utils._get_target_host('host:port'),
+ 'host')
+
+ self.assertEqual(volume_utils._get_target_host('host'),
+ 'host')
+
+ # There is no default value
+ self.assertIsNone(volume_utils._get_target_host(':port'))
+
+ self.assertIsNone(volume_utils._get_target_host(None))
+
+ def test_target_port(self):
+ self.assertEqual(volume_utils._get_target_port('host:port'),
+ 'port')
+
+ self.assertEqual(volume_utils._get_target_port('host'),
+ '3260')
+
+
+class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
+ def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
+ fake_get_vdi_ref.call_count += 1
+ if fake_get_vdi_ref.call_count == 2:
+ return 'vdi_ref'
+
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+
+ mock_get_vdi_ref.side_effect = fake_get_vdi_ref
+ fake_get_vdi_ref.call_count = 0
+
+ self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
+ 'vdi_ref')
+ mock_sleep.assert_called_once_with(20)
+
+ @mock.patch.object(volume_utils, '_get_vdi_ref')
+ @mock.patch.object(greenthread, 'sleep')
+ def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
+ def fake_call_xenapi(method, *args):
+ if method == 'SR.scan':
+ return
+ elif method == 'VDI.get_record':
+ return {'managed': 'true'}
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ mock_get_vdi_ref.return_value = None
+
+ self.assertRaises(exception.StorageError,
+ volume_utils.introduce_vdi, session, 'sr_ref')
+ mock_sleep.assert_called_once_with(20)
+
+
+class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
+ def test_mountpoint_to_number(self):
+ cases = {
+ 'sda': 0,
+ 'sdp': 15,
+ 'hda': 0,
+ 'hdp': 15,
+ 'vda': 0,
+ 'xvda': 0,
+ '0': 0,
+ '10': 10,
+ 'vdq': -1,
+ 'sdq': -1,
+ 'hdq': -1,
+ 'xvdq': -1,
+ }
+
+ for (input, expected) in cases.iteritems():
+ actual = volume_utils._mountpoint_to_number(input)
+ self.assertEqual(actual, expected,
+ '%s yielded %s, not %s' % (input, actual, expected))
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_parse_volume_info_parsing_auth_details(self):
+ conn_info = self._make_connection_info()
+ result = volume_utils._parse_volume_info(conn_info['data'])
+
+ self.assertEqual('username', result['chapuser'])
+ self.assertEqual('password', result['chappassword'])
+
+ def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
+ self.assertRaises(
+ exception.StorageError,
+ volume_utils.get_device_number,
+ 'dev/sd')
+
+
+class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
+ def test_find_vbd_by_number_works(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "1"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertEqual("a", result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
+
+ def test_find_vbd_by_number_no_matches(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = ["a", "b"]
+ session.VBD.get_userdevice.return_value = "3"
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ expected = [mock.call("a"), mock.call("b")]
+ self.assertEqual(expected,
+ session.VBD.get_userdevice.call_args_list)
+
+ def test_find_vbd_by_number_no_vbds(self):
+ session = mock.Mock()
+ session.VM.get_VBDs.return_value = []
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ self.assertFalse(session.VBD.get_userdevice.called)
+
+ def test_find_vbd_by_number_ignores_exception(self):
+ session = mock.Mock()
+ session.XenAPI.Failure = test.TestingException
+ session.VM.get_VBDs.return_value = ["a"]
+ session.VBD.get_userdevice.side_effect = test.TestingException
+
+ result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
+
+ self.assertIsNone(result)
+ session.VM.get_VBDs.assert_called_once_with("vm_ref")
+ session.VBD.get_userdevice.assert_called_once_with("a")
diff --git a/nova/tests/unit/virt/xenapi/test_volumeops.py b/nova/tests/unit/virt/xenapi/test_volumeops.py
new file mode 100644
index 0000000000..0e840bb209
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_volumeops.py
@@ -0,0 +1,549 @@
+# Copyright (c) 2012 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import volumeops
+
+
+class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(VolumeOpsTestBase, self).setUp()
+ self._setup_mock_volumeops()
+
+ def _setup_mock_volumeops(self):
+ self.session = stubs.FakeSessionForVolumeTests('fake_uri')
+ self.ops = volumeops.VolumeOps(self.session)
+
+
+class VolumeDetachTestCase(VolumeOpsTestBase):
+ def test_detach_volume_call(self):
+ registered_calls = []
+
+ def regcall(label):
+ def side_effect(*args, **kwargs):
+ registered_calls.append(label)
+ return side_effect
+
+ ops = volumeops.VolumeOps('session')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
+ self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
+ self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
+
+ volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
+ 'vmref')
+
+ volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
+ 'devnumber')
+
+ volumeops.volume_utils.find_vbd_by_number(
+ 'session', 'vmref', 'devnumber').AndReturn('vbdref')
+
+ volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
+ False)
+
+ volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
+
+ volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
+ regcall('destroy_vbd'))
+
+ volumeops.volume_utils.find_sr_from_vbd(
+ 'session', 'vbdref').WithSideEffects(
+ regcall('find_sr_from_vbd')).AndReturn('srref')
+
+ volumeops.volume_utils.purge_sr('session', 'srref')
+
+ self.mox.ReplayAll()
+
+ ops.detach_volume(
+ dict(driver_volume_type='iscsi', data='conn_data'),
+ 'instance_1', 'mountpoint')
+
+ self.assertEqual(
+ ['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = "vbd_ref"
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ mock_vm.assert_called_once_with(self.session, "name")
+ mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
+ mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.return_value = None
+
+ self.ops.detach_volume({}, "name", "/dev/xvdd")
+
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volume_utils, "find_vbd_by_number")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_detach_volume_raises(self, mock_vm, mock_vbd,
+ mock_detach):
+ mock_vm.return_value = "vm_ref"
+ mock_vbd.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops.detach_volume, {}, "name", "/dev/xvdd")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = False
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
+
+ mock_shutdown.assert_called_once_with(self.session, "vm_ref")
+ mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
+ mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
+ mock_destroy.assert_called_once_with(self.session, "vbd_ref")
+ mock_purge.assert_called_once_with(self.session, "sr_ref")
+
+ @mock.patch.object(volume_utils, "purge_sr")
+ @mock.patch.object(vm_utils, "destroy_vbd")
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(vm_utils, "unplug_vbd")
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
+ mock_find_sr, mock_destroy, mock_purge):
+ mock_shutdown.return_value = True
+ mock_find_sr.return_value = "sr_ref"
+
+ self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
+
+ expected = [mock.call(self.session, "vbd_ref_1"),
+ mock.call(self.session, "vbd_ref_2")]
+ self.assertEqual(expected, mock_destroy.call_args_list)
+ mock_purge.assert_called_with(self.session, "sr_ref")
+ self.assertFalse(mock_unplug.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = []
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_detach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_detach_all_volumes(self, mock_get_all, mock_detach):
+ mock_get_all.return_value = ["1"]
+
+ self.ops.detach_all("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_detach.assert_called_once_with("vm_ref", ["1"])
+
+ def test_get_all_volume_vbd_refs_no_vbds(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = []
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ self.assertFalse(mock_conf.called)
+
+ def test_get_all_volume_vbd_refs_no_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1"]
+ mock_conf.return_value = {}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual([], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+ mock_conf.assert_called_once_with("1")
+
+ def test_get_all_volume_vbd_refs_with_volumes(self):
+ with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
+ with mock.patch.object(self.session.VBD,
+ "get_other_config") as mock_conf:
+ mock_get.return_value = ["1", "2"]
+ mock_conf.return_value = {"osvol": True}
+
+ result = self.ops._get_all_volume_vbd_refs("vm_ref")
+
+ self.assertEqual(["1", "2"], list(result))
+ mock_get.assert_called_once_with("vm_ref")
+
+
+class AttachVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda")
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ True)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ @mock.patch.object(vm_utils, "vm_ref_or_raise")
+ def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
+ mock_get_vm.return_value = "vm_ref"
+
+ self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
+
+ mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
+ False)
+
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
+ def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
+ self.ops.connect_volume({})
+ mock_attach.assert_called_once_with({})
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ self.assertFalse(mock_attach.called)
+
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver):
+ connection_info = {"data": {}}
+ with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.return_value = "vdi_ref"
+ mock_vdi.return_value = "vdi_uuid"
+
+ result = self.ops._attach_volume(connection_info, "vm_ref",
+ "name", 2, True)
+
+ self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, "name")
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
+ True)
+
+ @mock.patch.object(volume_utils, "forget_sr")
+ @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
+ @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
+ @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
+ def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
+ mock_provider, mock_driver, mock_forget):
+ connection_info = {"data": {}}
+ mock_provider.return_value = ("sr_ref", "sr_uuid")
+ mock_hypervisor.side_effect = test.TestingException
+
+ self.assertRaises(test.TestingException,
+ self.ops._attach_volume, connection_info)
+
+ mock_driver.assert_called_once_with(connection_info)
+ mock_provider.assert_called_once_with({}, None)
+ mock_hypervisor.assert_called_once_with("sr_ref", {})
+ mock_forget.assert_called_once_with(self.session, "sr_ref")
+ self.assertFalse(mock_attach.called)
+
+ def test_check_is_supported_driver_type_pass_iscsi(self):
+ conn_info = {"driver_volume_type": "iscsi"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_xensm(self):
+ conn_info = {"driver_volume_type": "xensm"}
+ self.ops._check_is_supported_driver_type(conn_info)
+
+ def test_check_is_supported_driver_type_pass_bad(self):
+ conn_info = {"driver_volume_type": "bad"}
+ self.assertRaises(exception.VolumeDriverNotFound,
+ self.ops._check_is_supported_driver_type, conn_info)
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = None
+ mock_introduce_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ mock_introduce_sr.assert_called_once_with(self.session, "uuid",
+ "label", "params")
+
+ @mock.patch.object(volume_utils, "introduce_sr")
+ @mock.patch.object(volume_utils, "find_sr_by_uuid")
+ @mock.patch.object(volume_utils, "parse_sr_info")
+ def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
+ mock_introduce_sr):
+ mock_parse.return_value = ("uuid", "label", "params")
+ mock_find_sr.return_value = "sr_ref"
+
+ ref, uuid = self.ops._connect_to_volume_provider({}, "name")
+
+ self.assertEqual("sr_ref", ref)
+ self.assertEqual("uuid", uuid)
+ mock_parse.assert_called_once_with({}, "Disk-for:name")
+ mock_find_sr.assert_called_once_with(self.session, "uuid")
+ self.assertFalse(mock_introduce_sr.called)
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_regular(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ result = self.ops._connect_hypervisor_to_volume("sr", {})
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"vdi_uuid": "id"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ vdi_uuid="id")
+
+ @mock.patch.object(volume_utils, "introduce_vdi")
+ def test_connect_hypervisor_to_volume_lun(self, mock_intro):
+ mock_intro.return_value = "vdi"
+
+ conn = {"target_lun": "lun"}
+ result = self.ops._connect_hypervisor_to_volume("sr", conn)
+
+ self.assertEqual("vdi", result)
+ mock_intro.assert_called_once_with(self.session, "sr",
+ target_lun="lun")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = False
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ mock_plug.assert_called_once_with("vbd", "vm")
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+ mock_shutdown.return_value = True
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ mock_shutdown.assert_called_once_with(self.session, "vm")
+
+ @mock.patch.object(vm_utils, "is_vm_shutdown")
+ @mock.patch.object(vm_utils, "create_vbd")
+ def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
+ mock_vbd.return_value = "vbd"
+
+ with mock.patch.object(self.session.VBD, "plug") as mock_plug:
+ self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
+ self.assertFalse(mock_plug.called)
+
+ mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
+ bootable=False, osvol=True)
+ self.assertFalse(mock_shutdown.called)
+
+
+class FindBadVolumeTestCase(VolumeOpsTestBase):
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_vbds(self, mock_get_all):
+ mock_get_all.return_value = []
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["1", "2"]
+ mock_find_sr.return_value = "sr_ref"
+
+ with mock.patch.object(self.session.SR, "scan") as mock_scan:
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ expected_find = [mock.call(self.session, "1"),
+ mock.call(self.session, "2")]
+ self.assertEqual(expected_find, mock_find_sr.call_args_list)
+ expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
+ self.assertEqual(expected_scan, mock_scan.call_args_list)
+ self.assertEqual([], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['SR_BACKEND_FAILURE_40', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ result = self.ops.find_bad_volumes("vm_ref")
+
+ mock_get_all.assert_called_once_with("vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+ mock_get.assert_called_once_with("vbd_ref")
+ self.assertEqual(["/dev/xvdb"], result)
+
+ @mock.patch.object(volume_utils, "find_sr_from_vbd")
+ @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
+ def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
+ mock_get_all.return_value = ["vbd_ref"]
+ mock_find_sr.return_value = "sr_ref"
+
+ class FakeException(Exception):
+ details = ['foo', "", "", ""]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ self.ops._session = session
+
+ with mock.patch.object(session.SR, "scan") as mock_scan:
+ with mock.patch.object(session.VBD,
+ "get_device") as mock_get:
+ mock_scan.side_effect = FakeException
+ mock_get.return_value = "xvdb"
+
+ self.assertRaises(FakeException,
+ self.ops.find_bad_volumes, "vm_ref")
+ mock_scan.assert_called_once_with("sr_ref")
+
+
+class CleanupFromVDIsTestCase(VolumeOpsTestBase):
+ def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs):
+ find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
+ in vdi_refs]
+ find_sr_from_vdi.assert_has_calls(find_sr_calls)
+ purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
+ in sr_refs]
+ purge_sr.assert_has_calls(purge_sr_calls)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi',
+ side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref2']
+ find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
+ sr_refs[0]]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ purge_sr.side_effects = [test.TestingException, None]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
new file mode 100644
index 0000000000..c90f8c2f63
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -0,0 +1,4105 @@
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Test suite for XenAPI."""
+
+import ast
+import base64
+import contextlib
+import copy
+import functools
+import os
+import re
+
+import mock
+import mox
+from oslo.concurrency import lockutils
+from oslo.config import cfg
+from oslo.serialization import jsonutils
+from oslo.utils import importutils
+
+from nova.compute import api as compute_api
+from nova.compute import arch
+from nova.compute import flavors
+from nova.compute import hvtype
+from nova.compute import power_state
+from nova.compute import task_states
+from nova.compute import utils as compute_utils
+from nova.compute import vm_states
+from nova.conductor import api as conductor_api
+from nova import context
+from nova import crypto
+from nova import db
+from nova import exception
+from nova import objects
+from nova.objects import instance as instance_obj
+from nova.openstack.common.fixture import config as config_fixture
+from nova.openstack.common import log as logging
+from nova import test
+from nova.tests.unit.db import fakes as db_fakes
+from nova.tests.unit import fake_instance
+from nova.tests.unit import fake_network
+from nova.tests.unit import fake_processutils
+import nova.tests.unit.image.fake as fake_image
+from nova.tests.unit import matchers
+from nova.tests.unit.objects import test_aggregate
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt import fake
+from nova.virt.xenapi import agent
+from nova.virt.xenapi.client import session as xenapi_session
+from nova.virt.xenapi import driver as xenapi_conn
+from nova.virt.xenapi import fake as xenapi_fake
+from nova.virt.xenapi import host
+from nova.virt.xenapi.image import glance
+from nova.virt.xenapi import pool
+from nova.virt.xenapi import pool_states
+from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova.virt.xenapi import volume_utils
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+CONF.import_opt('compute_manager', 'nova.service')
+CONF.import_opt('network_manager', 'nova.service')
+CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
+ group="xenserver")
+
+IMAGE_MACHINE = '1'
+IMAGE_KERNEL = '2'
+IMAGE_RAMDISK = '3'
+IMAGE_RAW = '4'
+IMAGE_VHD = '5'
+IMAGE_ISO = '6'
+IMAGE_IPXE_ISO = '7'
+IMAGE_FROM_VOLUME = '8'
+
+IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ },
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ },
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ },
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ },
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ },
+ IMAGE_ISO: {
+ 'image_meta': {'name': 'fakeiso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare'},
+ },
+ IMAGE_IPXE_ISO: {
+ 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
+ 'disk_format': 'iso',
+ 'container_format': 'bare',
+ 'properties': {'ipxe_boot': 'true'}},
+ },
+ IMAGE_FROM_VOLUME: {
+ 'image_meta': {'name': 'fake_ipxe_iso',
+ 'properties': {'foo': 'bar'}},
+ },
+}
+
+
+def get_session():
+ return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
+
+
+def set_image_fixtures():
+ image_service = fake_image.FakeImageService()
+ image_service.images.clear()
+ for image_id, image_meta in IMAGE_FIXTURES.items():
+ image_meta = image_meta['image_meta']
+ image_meta['id'] = image_id
+ image_service.create(None, image_meta)
+
+
+def get_fake_device_info():
+ # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
+ # can be removed from the dict when LP bug #1087308 is fixed
+ fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
+ fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
+ fake = {'block_device_mapping':
+ [{'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'sr_uuid': 'falseSR',
+ 'introduce_sr_keys': ['sr_type'],
+ 'sr_type': 'iscsi',
+ 'vdi_uuid': fake_vdi_uuid,
+ 'target_discovered': False,
+ 'target_iqn': 'foo_iqn:foo_volid',
+ 'target_portal': 'localhost:3260',
+ 'volume_id': 'foo_volid',
+ 'target_lun': 1,
+ 'auth_password': 'my-p@55w0rd',
+ 'auth_username': 'johndoe',
+ 'auth_method': u'CHAP'}, },
+ 'mount_device': 'vda',
+ 'delete_on_termination': False}, ],
+ 'root_device_name': '/dev/sda',
+ 'ephemerals': [],
+ 'swap': None, }
+ return fake
+
+
+def stub_vm_utils_with_vdi_attached_here(function):
+ """vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
+ """
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ @contextlib.contextmanager
+ def fake_vdi_attached_here(*args, **kwargs):
+ fake_dev = 'fakedev'
+ yield fake_dev
+
+ def fake_image_download(*args, **kwargs):
+ pass
+
+ orig_vdi_attached_here = vm_utils.vdi_attached_here
+ orig_image_download = fake_image._FakeImageService.download
+ try:
+ vm_utils.vdi_attached_here = fake_vdi_attached_here
+ fake_image._FakeImageService.download = fake_image_download
+ return function(self, *args, **kwargs)
+ finally:
+ fake_image._FakeImageService.download = orig_image_download
+ vm_utils.vdi_attached_here = orig_vdi_attached_here
+
+ return decorated_function
+
+
+def get_create_system_metadata(context, instance_type_id):
+ flavor = db.flavor_get(context, instance_type_id)
+ return flavors.save_flavor_info({}, flavor)
+
+
+def create_instance_with_system_metadata(context, instance_values):
+ instance_values['system_metadata'] = get_create_system_metadata(
+ context, instance_values['instance_type_id'])
+ instance_values['pci_devices'] = []
+ return db.instance_create(context, instance_values)
+
+
+class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for Volume operations."""
+ def setUp(self):
+ super(XenAPIVolumeTestCase, self).setUp()
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ @classmethod
+ def _make_connection_info(cls):
+ target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
+ return {'driver_volume_type': 'iscsi',
+ 'data': {'volume_id': 1,
+ 'target_iqn': target_iqn,
+ 'target_portal': '127.0.0.1:3260,fake',
+ 'target_lun': None,
+ 'auth_method': 'CHAP',
+ 'auth_username': 'username',
+ 'auth_password': 'password'}}
+
+ def test_attach_volume(self):
+ # This shows how to test Ops classes' methods.
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
+ conn_info = self._make_connection_info()
+ self.assertIsNone(
+ conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
+
+ # check that the VM has a VBD attached to it
+ # Get XenAPI record for VBD
+ vbds = xenapi_fake.get_all('VBD')
+ vbd = xenapi_fake.get_record('VBD', vbds[0])
+ vm_ref = vbd['VM']
+ self.assertEqual(vm_ref, vm)
+
+ def test_attach_volume_raise_exception(self):
+ # This shows how to test when exceptions are raised.
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForVolumeFailedTests)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ self.assertRaises(exception.VolumeDriverNotFound,
+ conn.attach_volume,
+ None, {'driver_volume_type': 'nonexist'},
+ self.instance, '/dev/sdc')
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIVMTestCase(stubs.XenAPITestBase):
+ """Unit tests for VM operations."""
+ def setUp(self):
+ super(XenAPIVMTestCase, self).setUp()
+ self.useFixture(test.SampleNetworks())
+ self.network = importutils.import_object(CONF.network_manager)
+ self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
+ self.fixture.config(disable_process_locking=True,
+ group='oslo_concurrency')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ stubs.stub_out_vm_methods(self.stubs)
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn._session.is_local_connection = False
+
+ fake_image.stub_out_image_service(self.stubs)
+ set_image_fixtures()
+ stubs.stubout_image_service_download(self.stubs)
+ stubs.stubout_stream_disk(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ name_label = "fakenamelabel"
+ disk_type = "fakedisktype"
+ virtual_size = 777
+ return vm_utils.create_vdi(
+ session, sr_ref, instance, name_label, disk_type,
+ virtual_size)
+ self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
+
+ def tearDown(self):
+ fake_image.FakeImageService_reset()
+ super(XenAPIVMTestCase, self).tearDown()
+
+ def test_init_host(self):
+ session = get_session()
+ vm = vm_utils._get_this_vm_ref(session)
+ # Local root disk
+ vdi0 = xenapi_fake.create_vdi('compute', None)
+ vbd0 = xenapi_fake.create_vbd(vm, vdi0)
+ # Instance VDI
+ vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
+ other_config={'nova_instance_uuid': 'aaaa'})
+ xenapi_fake.create_vbd(vm, vdi1)
+ # Only looks like instance VDI
+ vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
+ vbd2 = xenapi_fake.create_vbd(vm, vdi2)
+
+ self.conn.init_host(None)
+ self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
+
+ def test_instance_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'foo')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertTrue(self.conn.instance_exists(instance))
+
+ def test_instance_not_exists(self):
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
+ self.mox.ReplayAll()
+
+ self.stubs.Set(objects.Instance, 'name', 'bar')
+ instance = objects.Instance(uuid='fake-uuid')
+ self.assertFalse(self.conn.instance_exists(instance))
+
+ def test_list_instances_0(self):
+ instances = self.conn.list_instances()
+ self.assertEqual(instances, [])
+
+ def test_list_instance_uuids_0(self):
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(instance_uuids, [])
+
+ def test_list_instance_uuids(self):
+ uuids = []
+ for x in xrange(1, 4):
+ instance = self._create_instance(x)
+ uuids.append(instance['uuid'])
+ instance_uuids = self.conn.list_instance_uuids()
+ self.assertEqual(len(uuids), len(instance_uuids))
+ self.assertEqual(set(uuids), set(instance_uuids))
+
+ def test_get_rrd_server(self):
+ self.flags(connection_url='myscheme://myaddress/',
+ group='xenserver')
+ server_info = vm_utils._get_rrd_server()
+ self.assertEqual(server_info[0], 'myscheme')
+ self.assertEqual(server_info[1], 'myaddress')
+
+ expected_raw_diagnostics = {
+ 'vbd_xvdb_write': '0.0',
+ 'memory_target': '4294967296.0000',
+ 'memory_internal_free': '1415564.0000',
+ 'memory': '4294967296.0000',
+ 'vbd_xvda_write': '0.0',
+ 'cpu0': '0.0042',
+ 'vif_0_tx': '287.4134',
+ 'vbd_xvda_read': '0.0',
+ 'vif_0_rx': '1816.0144',
+ 'vif_2_rx': '0.0',
+ 'vif_2_tx': '0.0',
+ 'vbd_xvdb_read': '0.0',
+ 'last_update': '1328795567',
+ }
+
+ def test_get_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = self.expected_raw_diagnostics
+ instance = self._create_instance()
+ actual = self.conn.get_diagnostics(instance)
+ self.assertThat(actual, matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = {
+ 'config_drive': False,
+ 'state': 'running',
+ 'driver': 'xenapi',
+ 'version': '1.0',
+ 'uptime': 0,
+ 'hypervisor_os': None,
+ 'cpu_details': [{'time': 0}, {'time': 0},
+ {'time': 0}, {'time': 0}],
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'memory_details': {'maximum': 8192, 'used': 0}}
+
+ instance = self._create_instance()
+ actual = self.conn.get_instance_diagnostics(instance)
+ self.assertEqual(expected, actual.serialize())
+
+ def test_get_vnc_console(self):
+ instance = self._create_instance(obj=True)
+ session = get_session()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(vm_ref)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_for_rescue(self):
+ instance = self._create_instance(obj=True)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
+ 'Running')
+ # Set instance state to rescued
+ instance['vm_state'] = 'rescued'
+
+ console = conn.get_vnc_console(self.context, instance)
+
+ # Note(sulo): We don't care about session id in test
+ # they will always differ so strip that out
+ actual_path = console.internal_access_path.split('&')[0]
+ expected_path = "/console?ref=%s" % str(rescue_vm)
+
+ self.assertEqual(expected_path, actual_path)
+
+ def test_get_vnc_console_instance_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'building'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotFound,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_get_vnc_console_rescue_not_ready(self):
+ instance = self._create_instance(obj=True, spawn=False)
+ instance.vm_state = 'rescued'
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.InstanceNotReady,
+ conn.get_vnc_console, self.context, instance)
+
+ def test_instance_snapshot_fails_with_no_primary_vdi(self):
+
+ def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=False,
+ osvol=False):
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'userdevice': 'fake',
+ 'currently_attached': False}
+ vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
+ xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
+ return vbd_ref
+
+ self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
+ stubs.stubout_instance_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+ instance = self._create_instance()
+
+ image_id = "my_snapshot_id"
+ self.assertRaises(exception.NovaException, self.conn.snapshot,
+ self.context, instance, image_id,
+ lambda *args, **kwargs: None)
+
+ def test_instance_snapshot(self):
+ expected_calls = [
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
+ {'args': (),
+ 'kwargs':
+ {'task_state': task_states.IMAGE_UPLOADING,
+ 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
+ func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
+ image_id = "my_snapshot_id"
+
+ stubs.stubout_instance_snapshot(self.stubs)
+ stubs.stubout_is_snapshot(self.stubs)
+ # Stubbing out firewall driver as previous stub sets alters
+ # xml rpc result parsing
+ stubs.stubout_firewall_driver(self.stubs, self.conn)
+
+ instance = self._create_instance()
+
+ self.fake_upload_called = False
+
+ def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
+ self.fake_upload_called = True
+ self.assertEqual(ctx, self.context)
+ self.assertEqual(inst, instance)
+ self.assertIsInstance(vdi_uuids, list)
+ self.assertEqual(img_id, image_id)
+
+ self.stubs.Set(glance.GlanceStore, 'upload_image',
+ fake_image_upload)
+
+ self.conn.snapshot(self.context, instance, image_id,
+ func_call_matcher.call)
+
+ # Ensure VM was torn down
+ vm_labels = []
+ for vm_ref in xenapi_fake.get_all('VM'):
+ vm_rec = xenapi_fake.get_record('VM', vm_ref)
+ if not vm_rec["is_control_domain"]:
+ vm_labels.append(vm_rec["name_label"])
+
+ self.assertEqual(vm_labels, [instance['name']])
+
+ # Ensure VBDs were torn down
+ vbd_labels = []
+ for vbd_ref in xenapi_fake.get_all('VBD'):
+ vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
+ vbd_labels.append(vbd_rec["vm_name_label"])
+
+ self.assertEqual(vbd_labels, [instance['name']])
+
+ # Ensure task states changed in correct order
+ self.assertIsNone(func_call_matcher.match())
+
+ # Ensure VDIs were torn down
+ for vdi_ref in xenapi_fake.get_all('VDI'):
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ name_label = vdi_rec["name_label"]
+ self.assertFalse(name_label.endswith('snapshot'))
+
+ self.assertTrue(self.fake_upload_called)
+
+ def create_vm_record(self, conn, os_type, name):
+ instances = conn.list_instances()
+ self.assertEqual(instances, [name])
+
+ # Get Nova record for VM
+ vm_info = conn.get_info({'name': name})
+ # Get XenAPI record for VM
+ vms = [rec for ref, rec
+ in xenapi_fake.get_all_records('VM').iteritems()
+ if not rec['is_control_domain']]
+ vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+
+ def check_vm_record(self, conn, instance_type_id, check_injection):
+ flavor = db.flavor_get(conn, instance_type_id)
+ mem_kib = long(flavor['memory_mb']) << 10
+ mem_bytes = str(mem_kib << 10)
+ vcpus = flavor['vcpus']
+ vcpu_weight = flavor['vcpu_weight']
+
+ self.assertEqual(self.vm_info['max_mem'], mem_kib)
+ self.assertEqual(self.vm_info['mem'], mem_kib)
+ self.assertEqual(self.vm['memory_static_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
+ if vcpu_weight is None:
+ self.assertEqual(self.vm['VCPUs_params'], {})
+ else:
+ self.assertEqual(self.vm['VCPUs_params'],
+ {'weight': str(vcpu_weight), 'cap': '0'})
+
+ # Check that the VM is running according to Nova
+ self.assertEqual(self.vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to XenAPI.
+ self.assertEqual(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ self.assertNotIn('vm-data/hostname', xenstore_data)
+ key = 'vm-data/networking/DEADBEEF0001'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEqual(tcpip_data,
+ {'broadcast': '192.168.1.255',
+ 'dns': ['192.168.1.4', '192.168.1.3'],
+ 'gateway': '192.168.1.1',
+ 'gateway_v6': '2001:db8:0:1::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': '2001:db8:0:1:dcad:beff:feef:1',
+ 'netmask': 64,
+ 'gateway': '2001:db8:0:1::1'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.1.100',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'},
+ {'enabled': '1',
+ 'ip': '192.168.1.101',
+ 'netmask': '255.255.255.0',
+ 'gateway': '192.168.1.1'}],
+ 'label': 'test1',
+ 'mac': 'DE:AD:BE:EF:00:01'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEqual(self.vm['platform']['nx'], 'true')
+ self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], '')
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], '')
+ self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEqual(self.vm['PV_kernel'], '')
+ self.assertEqual(self.vm['PV_ramdisk'], '')
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEqual(self.vm['platform']['nx'], 'false')
+ self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEqual(self.vm['PV_kernel'], '')
+ self.assertNotEqual(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEqual(self.vm['HVM_boot_params'], {})
+ self.assertEqual(self.vm['HVM_boot_policy'], '')
+
+ def _list_vdis(self):
+ session = get_session()
+ return session.call_xenapi('VDI.get_all')
+
+ def _list_vms(self):
+ session = get_session()
+ return session.call_xenapi('VM.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if vdi_ref not in start_list:
+ vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
+ # If the cache is turned on then the base disk will be
+ # there even after the cleanup
+ if 'other_config' in vdi_rec:
+ if 'image-id' not in vdi_rec['other_config']:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+ else:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
+ def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
+ instance_type_id="3", os_type="linux",
+ hostname="test", architecture="x86-64", instance_id=1,
+ injected_files=None, check_injection=False,
+ create_record=True, empty_dns=False,
+ block_device_info=None,
+ key_data=None):
+ if injected_files is None:
+ injected_files = []
+
+ # Fake out inject_instance_metadata
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ if create_record:
+ instance = objects.Instance(context=self.context)
+ instance.project_id = self.project_id
+ instance.user_id = self.user_id
+ instance.image_ref = image_ref
+ instance.kernel_id = kernel_id
+ instance.ramdisk_id = ramdisk_id
+ instance.root_gb = 20
+ instance.ephemeral_gb = 0
+ instance.instance_type_id = instance_type_id
+ instance.os_type = os_type
+ instance.hostname = hostname
+ instance.key_data = key_data
+ instance.architecture = architecture
+ instance.system_metadata = get_create_system_metadata(
+ self.context, instance_type_id)
+ instance.create()
+ else:
+ instance = objects.Instance.get_by_id(self.context, instance_id)
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ if empty_dns:
+ # NOTE(tr3buchet): this is a terrible way to do this...
+ network_info[0]['network']['subnets'][0]['dns'] = []
+
+ image_meta = {}
+ if image_ref:
+ image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
+ self.conn.spawn(self.context, instance, image_meta, injected_files,
+ 'herp', network_info, block_device_info)
+ self.create_vm_record(self.conn, os_type, instance['name'])
+ self.check_vm_record(self.conn, instance_type_id, check_injection)
+ self.assertEqual(instance['os_type'], os_type)
+ self.assertEqual(instance['architecture'], architecture)
+
+ def test_spawn_ipxe_iso_success(self):
+ self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
+ vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
+
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url='http://boot.example.com',
+ ipxe_mkisofs_cmd='/root/mkisofs',
+ group='xenserver')
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+ self.conn._session.call_plugin_serialized(
+ 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
+ 'http://boot.example.com', '192.168.1.100', '255.255.255.0',
+ '192.168.1.1', '192.168.1.3', '/root/mkisofs')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_network_name(self):
+ self.flags(ipxe_network_name=None,
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_no_boot_menu_url(self):
+ self.flags(ipxe_network_name='test1',
+ ipxe_boot_menu_url=None,
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_ipxe_iso_unknown_network_name(self):
+ self.flags(ipxe_network_name='test2',
+ ipxe_boot_menu_url='http://boot.example.com',
+ group='xenserver')
+
+ # call_plugin_serialized shouldn't be called
+ self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
+
+ self.mox.ReplayAll()
+ self._test_spawn(IMAGE_IPXE_ISO, None, None)
+
+ def test_spawn_empty_dns(self):
+ # Test spawning with an empty dns list.
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ empty_dns=True)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_not_enough_memory(self):
+ self.assertRaises(exception.InsufficientFreeMemory,
+ self._test_spawn,
+ '1', 2, 3, "4") # m1.xlarge
+
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_fail_cleanup_3(self):
+ """Simulates an error while attaching disks.
+
+ Verifies that the VM and VDIs created are properly cleaned up.
+ """
+ stubs.stubout_attach_disks(self.stubs)
+ vdi_recs_start = self._list_vdis()
+ start_vms = self._list_vms()
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, '1', 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ end_vms = self._list_vms()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+ # No additional VMs should be found.
+ self.assertEqual(start_vms, end_vms)
+
+ def test_spawn_raw_glance(self):
+ self._test_spawn(IMAGE_RAW, None, None, os_type=None)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_vhd_glance_linux(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="windows", architecture="i386",
+ instance_type_id=5)
+ self.check_vm_params_for_windows()
+
+ def test_spawn_iso_glance(self):
+ self._test_spawn(IMAGE_ISO, None, None,
+ os_type="windows", architecture="i386")
+ self.check_vm_params_for_windows()
+
+ def test_spawn_glance(self):
+
+ def fake_fetch_disk_image(context, session, instance, name_label,
+ image_id, image_type):
+ sr_ref = vm_utils.safe_find_sr(session)
+ image_type_str = vm_utils.ImageType.to_string(image_type)
+ vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
+ name_label, image_type_str, "20")
+ vdi_role = vm_utils.ImageType.get_role(image_type)
+ vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
+ return {vdi_role: dict(uuid=vdi_uuid, file=None)}
+ self.stubs.Set(vm_utils, '_fetch_disk_image',
+ fake_fetch_disk_image)
+
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_boot_from_volume_no_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(None, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_no_glance_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_FROM_VOLUME, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_boot_from_volume_with_image_meta(self):
+ dev_info = get_fake_device_info()
+ self._test_spawn(IMAGE_VHD, None, None,
+ block_device_info=dev_info)
+
+ def test_spawn_netinject_file(self):
+ self.flags(flat_injected=True)
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ actual = kwargs.get('process_input', None)
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 192.168.1.100
+ netmask 255.255.255.0
+ broadcast 192.168.1.255
+ gateway 192.168.1.1
+ dns-nameservers 192.168.1.3 192.168.1.4
+iface eth0 inet6 static
+ address 2001:db8:0:1:dcad:beff:feef:1
+ netmask 64
+ gateway 2001:db8:0:1::1
+"""
+ self.assertEqual(expected, actual)
+ self._tee_executed = True
+ return '', ''
+
+ def _readlink_handler(cmd_parts, **kwargs):
+ return os.path.realpath(cmd_parts[2]), ''
+
+ fake_processutils.fake_execute_set_repliers([
+ # Capture the tee .../etc/network/interfaces command
+ (r'tee.*interfaces', _tee_handler),
+ (r'readlink -nm.*', _readlink_handler),
+ ])
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug('Creating files in %s to simulate guest agent',
+ self._tmpdir)
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normally make files in the mounted filesystem
+ # disappear, so do that here
+ LOG.debug('Removing simulated guest agent files in %s',
+ self._tmpdir)
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_processutils.fake_execute_set_repliers([
+ (r'mount', _mount_handler),
+ (r'umount', _umount_handler),
+ (r'tee.*interfaces', _tee_handler)])
+ self._test_spawn('1', 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_injects_auto_disk_config_to_xenstore(self):
+ instance = self._create_instance(spawn=False)
+ self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
+ self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conn.spawn(self.context, instance,
+ IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
+
+ def test_spawn_vlanmanager(self):
+ self.flags(network_manager='nova.network.manager.VlanManager',
+ vlan_interface='fake0')
+
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ ctxt = self.context.elevated()
+ self.network.conductor_api = conductor_api.LocalAPI()
+ self._create_instance(2, False)
+ networks = self.network.db.network_get_all(ctxt)
+ with mock.patch('nova.objects.network.Network._from_db_object'):
+ for network in networks:
+ self.network.set_network_host(ctxt, network)
+
+ self.network.allocate_for_instance(ctxt,
+ instance_id=2,
+ instance_uuid='00000000-0000-0000-0000-000000000002',
+ host=CONF.host,
+ vpn=None,
+ rxtx_factor=3,
+ project_id=self.project_id,
+ macs=None)
+ self._test_spawn(IMAGE_MACHINE,
+ IMAGE_KERNEL,
+ IMAGE_RAMDISK,
+ instance_id=2,
+ create_record=False)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
+ str(3 * 10 * 1024))
+
+ def test_spawn_ssh_key_injection(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ self.assertEqual("ssh-rsa fake_keydata", sshkey)
+ return "fake"
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-rsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-rsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_ssh_key_injection_non_rsa(self):
+ # Test spawning with key_data on an instance. Should use
+ # agent file injection.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ def fake_encrypt_text(sshkey, new_pass):
+ raise NotImplementedError("Should not be called")
+
+ self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
+
+ expected_data = ('\n# The following ssh key was injected by '
+ 'Nova\nssh-dsa fake_keydata\n')
+
+ injected_files = [('/root/.ssh/authorized_keys', expected_data)]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ key_data='ssh-dsa fake_keydata')
+ self.assertEqual(actual_injected_files, injected_files)
+
+ def test_spawn_injected_files(self):
+ # Test spawning with injected_files.
+ self.flags(use_agent_default=True,
+ group='xenserver')
+ actual_injected_files = []
+
+ def fake_inject_file(self, method, args):
+ path = base64.b64decode(args['b64_path'])
+ contents = base64.b64decode(args['b64_contents'])
+ actual_injected_files.append((path, contents))
+ return jsonutils.dumps({'returncode': '0', 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_inject_file', fake_inject_file)
+
+ injected_files = [('/tmp/foo', 'foobar')]
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64",
+ injected_files=injected_files)
+ self.check_vm_params_for_linux()
+ self.assertEqual(actual_injected_files, injected_files)
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade(self, mock_get):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+
+ @mock.patch('nova.db.agent_build_get_by_triple')
+ def test_spawn_agent_upgrade_fails_silently(self, mock_get):
+ mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
+ "hypervisor": "xen", "os": "windows",
+ "url": "url", "md5hash": "asdf",
+ 'created_at': None, 'updated_at': None,
+ 'deleted_at': None, 'deleted': False,
+ 'id': 1}
+
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ method="_plugin_agent_agentupdate", failure="fake_error")
+
+ def test_spawn_with_resetnetwork_alternative_returncode(self):
+ self.flags(use_agent_default=True,
+ group='xenserver')
+
+ def fake_resetnetwork(self, method, args):
+ fake_resetnetwork.called = True
+ # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
+ return jsonutils.dumps({'returncode': '500',
+ 'message': 'success'})
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ '_plugin_agent_resetnetwork', fake_resetnetwork)
+ fake_resetnetwork.called = False
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ self.assertTrue(fake_resetnetwork.called)
+
+ def _test_spawn_fails_silently_with(self, expected_exception_cls,
+ method="_plugin_agent_version",
+ failure=None, value=None):
+ self.flags(use_agent_default=True,
+ agent_version_timeout=0,
+ group='xenserver')
+
+ def fake_agent_call(self, method, args):
+ if failure:
+ raise xenapi_fake.Failure([failure])
+ else:
+ return value
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ method, fake_agent_call)
+
+ called = {}
+
+ def fake_add_instance_fault(*args, **kwargs):
+ called["fake_add_instance_fault"] = args[2]
+
+ self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ fake_add_instance_fault)
+
+ self._test_spawn(IMAGE_VHD, None, None,
+ os_type="linux", architecture="x86-64")
+ actual_exception = called["fake_add_instance_fault"]
+ self.assertIsInstance(actual_exception, expected_exception_cls)
+
+ def test_spawn_fails_silently_with_agent_timeout(self):
+ self._test_spawn_fails_silently_with(exception.AgentTimeout,
+ failure="TIMEOUT:fake")
+
+ def test_spawn_fails_silently_with_agent_not_implemented(self):
+ self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
+ failure="NOT IMPLEMENTED:fake")
+
+ def test_spawn_fails_silently_with_agent_error(self):
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ failure="fake_error")
+
+ def test_spawn_fails_silently_with_agent_bad_return(self):
+ error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
+ self._test_spawn_fails_silently_with(exception.AgentError,
+ value=error)
+
+ def test_rescue(self):
+ instance = self._create_instance(spawn=False)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+
+ session = get_session()
+ vm_ref = vm_utils.lookup(session, instance['name'])
+
+ swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
+ root_vdi_ref = xenapi_fake.create_vdi('root', None)
+ eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
+ vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
+
+ xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
+ xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
+ xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
+ xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
+ xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
+ other_config={'osvol': True})
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ conn.rescue(self.context, instance, [], image_meta, '')
+
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ rescue_name = "%s-rescue" % vm["name_label"]
+ rescue_ref = vm_utils.lookup(session, rescue_name)
+ rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
+
+ vdi_refs = {}
+ for vbd_ref in rescue_vm['VBDs']:
+ vbd = xenapi_fake.get_record('VBD', vbd_ref)
+ vdi_refs[vbd['VDI']] = vbd['userdevice']
+
+ self.assertEqual('1', vdi_refs[root_vdi_ref])
+ self.assertEqual('2', vdi_refs[swap_vdi_ref])
+ self.assertEqual('4', vdi_refs[eph1_vdi_ref])
+ self.assertEqual('5', vdi_refs[eph2_vdi_ref])
+ self.assertNotIn(vol_vdi_ref, vdi_refs)
+
+ def test_rescue_preserve_disk_on_failure(self):
+ # test that the original disk is preserved if rescue setup fails
+ # bug #1227898
+ instance = self._create_instance()
+ session = get_session()
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+
+ # raise an error in the spawn setup process and trigger the
+ # undo manager logic:
+ def fake_start(*args, **kwargs):
+ raise test.TestingException('Start Error')
+
+ self.stubs.Set(self.conn._vmops, '_start', fake_start)
+
+ self.assertRaises(test.TestingException, self.conn.rescue,
+ self.context, instance, [], image_meta, '')
+
+ # confirm original disk still exists:
+ vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
+ self.assertEqual(vdi_ref, vdi_ref2)
+ self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Unrescue expects the original instance to be powered off
+ conn.power_off(instance)
+ xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
+ conn.unrescue(instance, None)
+
+ def test_unrescue_not_in_rescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
+ instance, None)
+
+ def test_finish_revert_migration(self):
+ instance = self._create_instance()
+
+ class VMOpsMock():
+
+ def __init__(self):
+ self.finish_revert_migration_called = False
+
+ def finish_revert_migration(self, context, instance, block_info,
+ power_on):
+ self.finish_revert_migration_called = True
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn._vmops = VMOpsMock()
+ conn.finish_revert_migration(self.context, instance, None)
+ self.assertTrue(conn._vmops.finish_revert_migration_called)
+
+ def test_reboot_hard(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "HARD")
+
+ def test_poll_rebooting_instances(self):
+ self.mox.StubOutWithMock(compute_api.API, 'reboot')
+ compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+ instance = self._create_instance()
+ instances = [instance]
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.poll_rebooting_instances(60, instances)
+
+ def test_reboot_soft(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_reboot_halted(self):
+ session = get_session()
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Halted')
+ conn.reboot(self.context, instance, None, "SOFT")
+ vm_ref = vm_utils.lookup(session, instance['name'])
+ vm = xenapi_fake.get_record('VM', vm_ref)
+ self.assertEqual(vm['power_state'], 'Running')
+
+ def test_reboot_unknown_state(self):
+ instance = self._create_instance(spawn=False)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ xenapi_fake.create_vm(instance['name'], 'Unknown')
+ self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
+ instance, None, "SOFT")
+
+ def test_reboot_rescued(self):
+ instance = self._create_instance()
+ instance['vm_state'] = vm_states.RESCUED
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ real_result = vm_utils.lookup(conn._session, instance['name'])
+
+ self.mox.StubOutWithMock(vm_utils, 'lookup')
+ vm_utils.lookup(conn._session, instance['name'],
+ True).AndReturn(real_result)
+ self.mox.ReplayAll()
+
+ conn.reboot(self.context, instance, None, "SOFT")
+
+ def test_get_console_output_succeeds(self):
+
+ def fake_get_console_output(instance):
+ self.assertEqual("instance", instance)
+ return "console_log"
+ self.stubs.Set(self.conn._vmops, 'get_console_output',
+ fake_get_console_output)
+
+ self.assertEqual(self.conn.get_console_output('context', "instance"),
+ "console_log")
+
+ def _test_maintenance_mode(self, find_host, find_aggregate):
+ real_call_xenapi = self.conn._session.call_xenapi
+ instance = self._create_instance(spawn=True)
+ api_calls = {}
+
+ # Record all the xenapi calls, and return a fake list of hosts
+ # for the host.get_all call
+ def fake_call_xenapi(method, *args):
+ api_calls[method] = args
+ if method == 'host.get_all':
+ return ['foo', 'bar', 'baz']
+ return real_call_xenapi(method, *args)
+ self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
+
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [test_aggregate.fake_aggregate]
+ else:
+ return []
+ self.stubs.Set(db, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
+ def fake_host_find(context, session, src, dst):
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
+ self.stubs.Set(host, '_host_find', fake_host_find)
+
+ result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
+ self.assertEqual(result, 'on_maintenance')
+
+ # We expect the VM.pool_migrate call to have been called to
+ # migrate our instance to the 'bar' host
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ host_ref = "foo"
+ expected = (vm_ref, host_ref, {"live": "true"})
+ self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
+
+ instance = db.instance_get_by_uuid(self.context, instance['uuid'])
+ self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
+ self.assertEqual(instance['task_state'], task_states.MIGRATING)
+
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
+ def test_uuid_find(self):
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ fake_inst = fake_instance.fake_db_instance(id=123)
+ fake_inst2 = fake_instance.fake_db_instance(id=456)
+ db.instance_get_all_by_host(self.context, fake_inst['host'],
+ columns_to_join=None,
+ use_slave=False
+ ).AndReturn([fake_inst, fake_inst2])
+ self.mox.ReplayAll()
+ expected_name = CONF.instance_name_template % fake_inst['id']
+ inst_uuid = host._uuid_find(self.context, fake_inst['host'],
+ expected_name)
+ self.assertEqual(inst_uuid, fake_inst['uuid'])
+
+ def test_session_virtapi(self):
+ was = {'called': False}
+
+ def fake_aggregate_get_by_host(self, *args, **kwargs):
+ was['called'] = True
+ raise test.TestingException()
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+
+ self.stubs.Set(self.conn._session, "is_slave", True)
+
+ self.assertRaises(test.TestingException,
+ self.conn._session._get_host_uuid)
+ self.assertTrue(was['called'])
+
+ def test_per_instance_usage_running(self):
+ instance = self._create_instance(spawn=True)
+ flavor = flavors.get_flavor(3)
+
+ expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
+ 'uuid': instance['uuid']}}
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ # Paused instances still consume resources:
+ self.conn.pause(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual(expected, actual)
+
+ def test_per_instance_usage_suspended(self):
+ # Suspended instances do not consume memory:
+ instance = self._create_instance(spawn=True)
+ self.conn.suspend(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def test_per_instance_usage_halted(self):
+ instance = self._create_instance(spawn=True)
+ self.conn.power_off(instance)
+ actual = self.conn.get_per_instance_usage()
+ self.assertEqual({}, actual)
+
+ def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
+ """Creates and spawns a test instance."""
+ instance_values = {
+ 'id': instance_id,
+ 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
+ 'display_name': 'host-%d' % instance_id,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'vm_mode': 'hvm',
+ 'architecture': 'x86-64'}
+ instance_values.update(attrs)
+
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'}
+ if spawn:
+ self.conn.spawn(self.context, instance, image_meta, [], 'herp',
+ network_info)
+ if obj:
+ instance = objects.Instance._from_db_object(
+ self.context, objects.Instance(), instance,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
+ return instance
+
+ def test_destroy_clean_up_kernel_and_ramdisk(self):
+ def fake_lookup_kernel_ramdisk(session, vm_ref):
+ return "kernel", "ramdisk"
+
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
+ fake_destroy_kernel_ramdisk.called = True
+ self.assertEqual("kernel", kernel)
+ self.assertEqual("ramdisk", ramdisk)
+
+ fake_destroy_kernel_ramdisk.called = False
+
+ self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
+ fake_destroy_kernel_ramdisk)
+
+ instance = self._create_instance(spawn=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ self.conn.destroy(self.context, instance, network_info)
+
+ vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
+ self.assertIsNone(vm_ref)
+ self.assertTrue(fake_destroy_kernel_ramdisk.called)
+
+
+class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
+ """Unit tests for Diffie-Hellman code."""
+ def setUp(self):
+ super(XenAPIDiffieHellmanTestCase, self).setUp()
+ self.alice = agent.SimpleDH()
+ self.bob = agent.SimpleDH()
+
+ def test_shared(self):
+ alice_pub = self.alice.get_public()
+ bob_pub = self.bob.get_public()
+ alice_shared = self.alice.compute_shared(bob_pub)
+ bob_shared = self.bob.compute_shared(alice_pub)
+ self.assertEqual(alice_shared, bob_shared)
+
+ def _test_encryption(self, message):
+ enc = self.alice.encrypt(message)
+ self.assertFalse(enc.endswith('\n'))
+ dec = self.bob.decrypt(enc)
+ self.assertEqual(dec, message)
+
+ def test_encrypt_simple_message(self):
+ self._test_encryption('This is a simple message.')
+
+ def test_encrypt_message_with_newlines_at_end(self):
+ self._test_encryption('This message has a newline at the end.\n')
+
+ def test_encrypt_many_newlines_at_end(self):
+ self._test_encryption('Message with lotsa newlines.\n\n\n')
+
+ def test_encrypt_newlines_inside_message(self):
+ self._test_encryption('Message\nwith\ninterior\nnewlines.')
+
+ def test_encrypt_with_leading_newlines(self):
+ self._test_encryption('\n\nMessage with leading newlines.')
+
+ def test_encrypt_really_long_message(self):
+ self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIMigrateInstance(stubs.XenAPITestBase):
+ """Unit test for verifying migration-related actions."""
+
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ xenapi_fake.create_network('fake', 'fake_br1')
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ migration_values = {
+ 'source_compute': 'nova-compute',
+ 'dest_compute': 'nova-compute',
+ 'dest_host': '10.127.5.114',
+ 'status': 'post-migrating',
+ 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
+ 'old_instance_type_id': 5,
+ 'new_instance_type_id': 1
+ }
+ self.migration = db.migration_create(
+ context.get_admin_context(), migration_values)
+
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+
+ def fake_inject_instance_metadata(self, instance, vm):
+ pass
+ self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
+ fake_inject_instance_metadata)
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_passes_exceptions(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = {"root_gb": 80, 'ephemeral_gb': 0}
+
+ def fake_raise(*args, **kwargs):
+ raise exception.MigrationError(reason='test failure')
+ self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
+ instance = db.instance_create(self.context, self.instance_values)
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.ResizeError,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ 'fake_dest', flavor, None)
+
+ def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
+ flavor = {"root_gb": 0, 'ephemeral_gb': 0}
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = db.instance_create(self.context, values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ conn.migrate_disk_and_power_off(self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def _test_revert_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+ self.fake_finish_revert_migration_called = False
+ context = 'fake_context'
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ def fake_finish_revert_migration(*args, **kwargs):
+ self.fake_finish_revert_migration_called = True
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
+ fake_finish_revert_migration)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ base = xenapi_fake.create_vdi('hurr', 'fake')
+ base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
+ cow = xenapi_fake.create_vdi('durr', 'fake')
+ cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy=base_uuid, cow=cow_uuid),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ conn.finish_revert_migration(context, instance, network_info)
+ self.assertEqual(self.fake_finish_revert_migration_called, True)
+
+ def test_revert_migrate_power_on(self):
+ self._test_revert_migrate(True)
+
+ def test_revert_migrate_power_off(self):
+ self._test_revert_migrate(False)
+
+ def _test_finish_migrate(self, power_on):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ self.called = False
+ self.fake_vm_start_called = False
+
+ def fake_vm_start(*args, **kwargs):
+ self.fake_vm_start_called = True
+
+ def fake_vdi_resize(*args, **kwargs):
+ self.called = True
+
+ self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
+ product_version=(4, 0, 0),
+ product_brand='XenServer')
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True,
+ block_device_info=None, power_on=power_on)
+ self.assertEqual(self.called, True)
+ self.assertEqual(self.fake_vm_start_called, power_on)
+
+ def test_finish_migrate_power_on(self):
+ self._test_finish_migrate(True)
+
+ def test_finish_migrate_power_off(self):
+ self._test_finish_migrate(False)
+
+ def test_finish_migrate_no_local_storage(self):
+ values = copy.copy(self.instance_values)
+ values["root_gb"] = 0
+ values["ephemeral_gb"] = 0
+ instance = create_instance_with_system_metadata(self.context, values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=True)
+
+ def test_finish_migrate_no_resize_vdi(self):
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+
+ def fake_vdi_resize(*args, **kwargs):
+ raise Exception("This shouldn't be called")
+
+ self.stubs.Set(stubs.FakeSessionForVMTests,
+ "VDI_resize_online", fake_vdi_resize)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs)
+ # Resize instance would be determined by the compute call
+ image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ conn.finish_migration(self.context, self.migration, instance,
+ dict(base_copy='hurr', cow='durr'),
+ network_info, image_meta, resize_instance=False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_too_many_partitions_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_migrate_bad_fs_type_no_resize_down(self):
+ instance_values = self.instance_values
+ instance = db.instance_create(self.context, instance_values)
+ xenapi_fake.create_vm(instance['name'], 'Running')
+ flavor = db.flavor_get_by_name(self.context, 'm1.small')
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_partitions(partition):
+ return [(1, 2, 3, "ext2", "", "boot")]
+
+ self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
+
+ self.assertRaises(exception.InstanceFaultRollback,
+ conn.migrate_disk_and_power_off,
+ self.context, instance,
+ '127.0.0.1', flavor, None)
+
+ def test_migrate_rollback_when_resize_down_fs_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+
+ self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
+ self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
+ self.mox.StubOutWithMock(vm_utils, 'resize_disk')
+ self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
+ self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
+ self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
+ self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
+
+ instance = objects.Instance(context=self.context,
+ auto_disk_config=True, uuid='uuid')
+ instance.obj_reset_changes()
+ vm_ref = "vm_ref"
+ dest = "dest"
+ flavor = "type"
+ sr_path = "sr_path"
+
+ vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
+ vmops._apply_orig_vm_name_label(instance, vm_ref)
+ old_vdi_ref = "old_ref"
+ vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
+ (old_vdi_ref, None))
+ new_vdi_ref = "new_ref"
+ new_vdi_uuid = "new_uuid"
+ vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
+ flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
+ vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
+ sr_path, 0).AndRaise(
+ exception.ResizeError(reason="asdf"))
+
+ vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
+ vmops._restore_orig_vm_and_cleanup_orphan(instance)
+
+ self.mox.ReplayAll()
+
+ with mock.patch.object(instance, 'save') as mock_save:
+ self.assertRaises(exception.InstanceFaultRollback,
+ vmops._migrate_disk_resizing_down, self.context,
+ instance, dest, flavor, vm_ref, sr_path)
+ self.assertEqual(3, mock_save.call_count)
+ self.assertEqual(60.0, instance.progress)
+
+ def test_resize_ensure_vm_is_shutdown_cleanly(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_forced(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_fails(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
+ vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+ vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
+ "ref").AndReturn(False)
+
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.ResizeError,
+ vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
+
+ def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ vmops = conn._vmops
+ fake_instance = {'uuid': 'uuid'}
+
+ self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
+ self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
+ self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
+
+ vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
+
+ self.mox.ReplayAll()
+
+ vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
+
+
+class XenAPIImageTypeTestCase(test.NoDBTestCase):
+ """Test ImageType class."""
+
+ def test_to_string(self):
+ # Can convert from type id to type string.
+ self.assertEqual(
+ vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
+ vm_utils.ImageType.KERNEL_STR)
+
+ def _assert_role(self, expected_role, image_type_id):
+ self.assertEqual(
+ expected_role,
+ vm_utils.ImageType.get_role(image_type_id))
+
+ def test_get_image_role_kernel(self):
+ self._assert_role('kernel', vm_utils.ImageType.KERNEL)
+
+ def test_get_image_role_ramdisk(self):
+ self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
+
+ def test_get_image_role_disk(self):
+ self._assert_role('root', vm_utils.ImageType.DISK)
+
+ def test_get_image_role_disk_raw(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_RAW)
+
+ def test_get_image_role_disk_vhd(self):
+ self._assert_role('root', vm_utils.ImageType.DISK_VHD)
+
+
+class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
+ """Unit tests for code that detects the ImageType."""
+ def assert_disk_type(self, image_meta, expected_disk_type):
+ actual = vm_utils.determine_disk_image_type(image_meta)
+ self.assertEqual(expected_disk_type, actual)
+
+ def test_machine(self):
+ image_meta = {'id': 'a', 'disk_format': 'ami'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
+
+ def test_raw(self):
+ image_meta = {'id': 'a', 'disk_format': 'raw'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
+
+ def test_vhd(self):
+ image_meta = {'id': 'a', 'disk_format': 'vhd'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
+
+ def test_none(self):
+ image_meta = None
+ self.assert_disk_type(image_meta, None)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIHostTestCase(stubs.XenAPITestBase):
+ """Tests HostState, which holds metrics from XenServer that get
+ reported back to the Schedulers.
+ """
+
+ def setUp(self):
+ super(XenAPIHostTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.flags(use_local=True, group='conductor')
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.instance = fake_instance.fake_db_instance(name='foo')
+
+ def test_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ # Values from fake.create_local_srs (ext SR)
+ self.assertEqual(stats['disk_total'], 40000)
+ self.assertEqual(stats['disk_used'], 20000)
+ # Values from fake._plugin_xenhost_host_data
+ self.assertEqual(stats['host_memory_total'], 10)
+ self.assertEqual(stats['host_memory_overhead'], 20)
+ self.assertEqual(stats['host_memory_free'], 30)
+ self.assertEqual(stats['host_memory_free_computed'], 40)
+ self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
+ self.assertThat({'cpu_count': 50},
+ matchers.DictMatches(stats['host_cpu_info']))
+ # No VMs running
+ self.assertEqual(stats['vcpus_used'], 0)
+
+ def test_host_state_vcpus_used(self):
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 0)
+ xenapi_fake.create_vm(self.instance['name'], 'Running')
+ stats = self.conn.host_state.get_host_stats(True)
+ self.assertEqual(stats['vcpus_used'], 4)
+
+ def test_pci_passthrough_devices_whitelist(self):
+ # NOTE(guillaume-thouvenin): This pci whitelist will be used to
+ # match with _plugin_xenhost_get_pci_device_details method in fake.py.
+ white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
+ self.flags(pci_passthrough_whitelist=[white_list])
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 1)
+
+ def test_pci_passthrough_devices_no_whitelist(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual(len(stats['pci_passthrough_devices']), 0)
+
+ def test_host_state_missing_sr(self):
+ # Must trigger construction of 'host_state' property
+ # before introducing the stub which raises the error
+ hs = self.conn.host_state
+
+ def fake_safe_find_sr(session):
+ raise exception.StorageRepositoryNotFound('not there')
+
+ self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ hs.get_host_stats,
+ refresh=True)
+
+ def _test_host_action(self, method, action, expected=None):
+ result = method('host', action)
+ if not expected:
+ expected = action
+ self.assertEqual(result, expected)
+
+ def test_host_reboot(self):
+ self._test_host_action(self.conn.host_power_action, 'reboot')
+
+ def test_host_shutdown(self):
+ self._test_host_action(self.conn.host_power_action, 'shutdown')
+
+ def test_host_startup(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.host_power_action, 'host', 'startup')
+
+ def test_host_maintenance_on(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ True, 'on_maintenance')
+
+ def test_host_maintenance_off(self):
+ self._test_host_action(self.conn.host_maintenance_mode,
+ False, 'off_maintenance')
+
+ def test_set_enable_host_enable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, False)
+
+ def test_set_enable_host_disable(self):
+ _create_service_entries(self.context, values={'nova': ['fake-mini']})
+ self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
+ service = db.service_get_by_args(self.context, 'fake-mini',
+ 'nova-compute')
+ self.assertEqual(service.disabled, True)
+
+ def test_get_host_uptime(self):
+ result = self.conn.get_host_uptime('host')
+ self.assertEqual(result, 'fake uptime')
+
+ def test_supported_instances_is_included_in_host_state(self):
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertIn('supported_instances', stats)
+
+ def test_supported_instances_is_calculated_by_to_supported_instances(self):
+
+ def to_supported_instances(somedata):
+ self.assertIsNone(somedata)
+ return "SOMERETURNVALUE"
+ self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
+
+ stats = self.conn.host_state.get_host_stats(False)
+ self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
+
+ def test_update_stats_caches_hostname(self):
+ self.mox.StubOutWithMock(host, 'call_xenhost')
+ self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
+ self.mox.StubOutWithMock(vm_utils, 'list_vms')
+ self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
+ data = {'disk_total': 0,
+ 'disk_used': 0,
+ 'disk_available': 0,
+ 'supported_instances': 0,
+ 'host_capabilities': [],
+ 'host_hostname': 'foo',
+ 'vcpus_used': 0,
+ }
+ sr_rec = {
+ 'physical_size': 0,
+ 'physical_utilisation': 0,
+ 'virtual_allocation': 0,
+ }
+
+ for i in range(3):
+ host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
+ vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
+ vm_utils.list_vms(self.conn._session).AndReturn([])
+ self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
+ sr_rec)
+ if i == 2:
+ # On the third call (the second below) change the hostname
+ data = dict(data, host_hostname='bar')
+
+ self.mox.ReplayAll()
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+ stats = self.conn.host_state.get_host_stats(refresh=True)
+ self.assertEqual('foo', stats['hypervisor_hostname'])
+
+
+class ToSupportedInstancesTestCase(test.NoDBTestCase):
+ def test_default_return_value(self):
+ self.assertEqual([],
+ host.to_supported_instances(None))
+
+ def test_return_value(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64']))
+
+ def test_invalid_values_do_not_break(self):
+ self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
+
+ def test_multiple_values(self):
+ self.assertEqual(
+ [
+ (arch.X86_64, hvtype.XEN, 'xen'),
+ (arch.I686, hvtype.XEN, 'hvm')
+ ],
+ host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
+ )
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
+ def setUp(self):
+ super(XenAPIAutoDiskConfigTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False):
+ pass
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertIsPartitionCalled(self, called):
+ marker = {"partition_called": False}
+
+ def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
+ flags):
+ marker["partition_called"] = True
+ self.stubs.Set(vm_utils, "_resize_part_and_fs",
+ fake_resize_part_and_fs)
+
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ disk_image_type = vm_utils.ImageType.DISK_VHD
+ instance = create_instance_with_system_metadata(self.context,
+ self.instance_values)
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+ vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+
+ self.assertEqual(marker["partition_called"], called)
+
+ def test_instance_not_auto_disk_config(self):
+ """Should not partition unless instance is marked as
+ auto_disk_config.
+ """
+ self.instance_values['auto_disk_config'] = False
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_two_partitions(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(2, 100, 200, 'ext4', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
+ # Should not partition unless fail safes pass.
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 100, 200, 'asdf', "", "")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(False)
+
+ @stub_vm_utils_with_vdi_attached_here
+ def test_instance_auto_disk_config_passes_fail_safes(self):
+ """Should partition if instance is marked as auto_disk_config=True and
+ virt-layer specific fail-safe checks pass.
+ """
+ self.instance_values['auto_disk_config'] = True
+
+ def fake_get_partitions(dev):
+ return [(1, 0, 100, 'ext4', "", "boot")]
+ self.stubs.Set(vm_utils, "_get_partitions",
+ fake_get_partitions)
+
+ self.assertIsPartitionCalled(True)
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIGenerateLocal(stubs.XenAPITestBase):
+ """Test generating of local disks, like swap and ephemeral."""
+ def setUp(self):
+ super(XenAPIGenerateLocal, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.user_id = 'fake'
+ self.project_id = 'fake'
+
+ self.instance_values = {'id': 1,
+ 'project_id': self.project_id,
+ 'user_id': self.user_id,
+ 'image_ref': 1,
+ 'kernel_id': 2,
+ 'ramdisk_id': 3,
+ 'root_gb': 80,
+ 'ephemeral_gb': 0,
+ 'instance_type_id': '3', # m1.large
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
+
+ self.context = context.RequestContext(self.user_id, self.project_id)
+
+ def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
+ vbd_type='disk', read_only=False, bootable=True,
+ osvol=False, empty=False, unpluggable=True):
+ return session.call_xenapi('VBD.create', {'VM': vm_ref,
+ 'VDI': vdi_ref})
+
+ self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
+
+ def assertCalled(self, instance,
+ disk_image_type=vm_utils.ImageType.DISK_VHD):
+ context.RequestContext(self.user_id, self.project_id)
+ session = get_session()
+
+ vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
+ vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
+
+ vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
+
+ vdi_key = 'root'
+ if disk_image_type == vm_utils.ImageType.DISK_ISO:
+ vdi_key = 'iso'
+ vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
+
+ self.called = False
+ self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
+ vdis, disk_image_type, "fake_nw_inf")
+ self.assertTrue(self.called)
+
+ def test_generate_swap(self):
+ # Test swap disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=5)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_swap(*args, **kwargs):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
+
+ self.assertCalled(instance)
+
+ def test_generate_ephemeral(self):
+ # Test ephemeral disk generation.
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ self.assertCalled(instance)
+
+ def test_generate_iso_blank_root_disk(self):
+ instance_values = dict(self.instance_values, instance_type_id=4)
+ instance_values.pop('kernel_id')
+ instance_values.pop('ramdisk_id')
+ instance = create_instance_with_system_metadata(self.context,
+ instance_values)
+
+ def fake_generate_ephemeral(*args):
+ pass
+ self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
+
+ def fake_generate_iso(*args):
+ self.called = True
+ self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
+ fake_generate_iso)
+
+ self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
+
+
+class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
+ FAKE_VMS = {'test1:ref': dict(name_label='test1',
+ other_config=dict(nova_uuid='hash'),
+ domid='12',
+ _vifmap={'0': "a:b:c:d...",
+ '1': "e:f:12:q..."}),
+ 'test2:ref': dict(name_label='test2',
+ other_config=dict(nova_uuid='hash'),
+ domid='42',
+ _vifmap={'0': "a:3:c:d...",
+ '1': "e:f:42:q..."}),
+ }
+
+ def setUp(self):
+ super(XenAPIBWCountersTestCase, self).setUp()
+ self.stubs.Set(vm_utils, 'list_vms',
+ XenAPIBWCountersTestCase._fake_list_vms)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def _fake_get_vif_device_map(vm_rec):
+ return vm_rec['_vifmap']
+
+ self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
+ _fake_get_vif_device_map)
+
+ @classmethod
+ def _fake_list_vms(cls, session):
+ return cls.FAKE_VMS.iteritems()
+
+ @staticmethod
+ def _fake_fetch_bandwidth_mt(session):
+ return {}
+
+ @staticmethod
+ def _fake_fetch_bandwidth(session):
+ return {'42':
+ {'0': {'bw_in': 21024, 'bw_out': 22048},
+ '1': {'bw_in': 231337, 'bw_out': 221212121}},
+ '12':
+ {'0': {'bw_in': 1024, 'bw_out': 2048},
+ '1': {'bw_in': 31337, 'bw_out': 21212121}},
+ }
+
+ def test_get_all_bw_counters(self):
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(len(result), 4)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="a:b:c:d...",
+ bw_in=1024,
+ bw_out=2048), result)
+ self.assertIn(dict(uuid='1-2-3',
+ mac_address="e:f:12:q...",
+ bw_in=31337,
+ bw_out=21212121), result)
+
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="a:3:c:d...",
+ bw_in=21024,
+ bw_out=22048), result)
+ self.assertIn(dict(uuid='4-5-6',
+ mac_address="e:f:42:q...",
+ bw_in=231337,
+ bw_out=221212121), result)
+
+ def test_get_all_bw_counters_in_failure_case(self):
+ """Test that get_all_bw_conters returns an empty list when
+ no data returned from Xenserver. c.f. bug #910045.
+ """
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
+
+ self.stubs.Set(vm_utils, 'fetch_bandwidth',
+ self._fake_fetch_bandwidth_mt)
+ result = self.conn.get_all_bw_counters(instances)
+ self.assertEqual(result, [])
+
+
+# TODO(salvatore-orlando): this class and
+# nova.tests.unit.virt.test_libvirt.IPTablesFirewallDriverTestCase
+# share a lot of code. Consider abstracting common code in a base
+# class for firewall driver testing.
+#
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
+
+ REQUIRES_LOCKING = True
+
+ _in_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*mangle',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
+ '*filter',
+ ':INPUT ACCEPT [969615:281627771]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [915599:63811649]',
+ ':nova-block-ipv4 - [0:0]',
+ '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
+ ',ESTABLISHED -j ACCEPT ',
+ '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '[0:0] -A FORWARD -o virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ '[0:0] -A FORWARD -i virbr0 -j REJECT '
+ '--reject-with icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Mon Dec 6 11:54:13 2010',
+ ]
+
+ _in6_filter_rules = [
+ '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
+ '*filter',
+ ':INPUT ACCEPT [349155:75810423]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [349256:75777230]',
+ 'COMMIT',
+ '# Completed on Tue Jan 18 23:47:56 2011',
+ ]
+
+ def setUp(self):
+ super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ self.user_id = 'mappin'
+ self.project_id = 'fake'
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
+ test_case=self)
+ self.context = context.RequestContext(self.user_id, self.project_id)
+ self.network = importutils.import_object(CONF.network_manager)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.fw = self.conn._vmops.firewall_driver
+
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'instance_type_id': 1})
+
+ def _create_test_security_group(self):
+ admin_ctxt = context.get_admin_context()
+ secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testgroup',
+ 'description': 'test group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': -1,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'icmp',
+ 'from_port': 8,
+ 'to_port': -1,
+ 'cidr': '192.168.11.0/24'})
+
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'cidr': '192.168.10.0/24'})
+ return secgroup
+
+ def _validate_security_group(self):
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self._in_rules)
+ for rule in in_rules:
+ if 'nova' not in rule:
+ self.assertTrue(rule in self._out_rules,
+ 'Rule went missing: %s' % rule)
+
+ instance_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ # last two octets change
+ if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
+ instance_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(instance_chain, "The instance chain wasn't added")
+ security_group_chain = None
+ for rule in self._out_rules:
+ # This is pretty crude, but it'll do for now
+ if '-A %s -j' % instance_chain in rule:
+ security_group_chain = rule.split(' ')[-1]
+ break
+ self.assertTrue(security_group_chain,
+ "The security group chain wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
+ ' -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
+ ' --icmp-type 8 -s 192.168.11.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "ICMP Echo Request acceptance rule wasn't added")
+
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
+ ' -s 192.168.10.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ def test_static_filters(self):
+ instance_ref = self._create_instance_ref()
+ src_instance_ref = self._create_instance_ref()
+ admin_ctxt = context.get_admin_context()
+ secgroup = self._create_test_security_group()
+
+ src_secgroup = db.security_group_create(admin_ctxt,
+ {'user_id': self.user_id,
+ 'project_id': self.project_id,
+ 'name': 'testsourcegroup',
+ 'description': 'src group'})
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'tcp',
+ 'from_port': 80,
+ 'to_port': 81,
+ 'group_id': src_secgroup['id']})
+
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
+ src_secgroup['id'])
+ instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
+ src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
+
+ network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+
+ from nova.compute import utils as compute_utils # noqa
+ self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
+ lambda instance: network_model)
+
+ self.fw.prepare_instance_filter(instance_ref, network_model)
+ self.fw.apply_instance_filter(instance_ref, network_model)
+
+ self._validate_security_group()
+ # Extra test for TCP acceptance rules
+ for ip in network_model.fixed_ips():
+ if ip['version'] != 4:
+ continue
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
+ ' --dport 80:81 -s %s' % ip['address'])
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "TCP port 80/81 acceptance rule wasn't added")
+
+ db.instance_destroy(admin_ctxt, instance_ref['uuid'])
+
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 1)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEqual(len(rulesv4), 2)
+ self.assertEqual(len(rulesv6), 0)
+
+ def test_multinic_iptables(self):
+ ipv4_rules_per_addr = 1
+ ipv4_addr_per_network = 2
+ ipv6_rules_per_addr = 1
+ ipv6_addr_per_network = 1
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ _get_instance_nw_info = fake_network.fake_get_instance_nw_info
+ network_info = _get_instance_nw_info(self.stubs,
+ networks_count,
+ ipv4_addr_per_network)
+ network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
+ '1.1.1.1'
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ # Extra rules are for the DHCP request
+ rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
+ networks_count) + 2
+ self.assertEqual(ipv4_network_rules, rules)
+ self.assertEqual(ipv6_network_rules,
+ ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
+
+ def test_do_refresh_security_group_rules(self):
+ admin_ctxt = context.get_admin_context()
+ instance_ref = self._create_instance_ref()
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ secgroup = self._create_test_security_group()
+ db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
+ secgroup['id'])
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.instance_info[instance_ref['id']] = (instance_ref,
+ network_info)
+ self._validate_security_group()
+ # add a rule to the security group
+ db.security_group_rule_create(admin_ctxt,
+ {'parent_group_id': secgroup['id'],
+ 'protocol': 'udp',
+ 'from_port': 200,
+ 'to_port': 299,
+ 'cidr': '192.168.99.0/24'})
+ # validate the extra rule
+ self.fw.refresh_security_group_rules(secgroup)
+ regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
+ ' -s 192.168.99.0/24')
+ self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
+ "Rules were not updated properly."
+ "The rule for UDP acceptance is missing")
+
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ # FRAGILE: as in libvirt tests
+ # peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ admin_ctxt = context.get_admin_context()
+ # add a rule and send the update message, check for 1 rule
+ db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info)
+ self.fw.apply_instance_filter(instance_ref, network_info)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+
+class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for testing we find the right SR."""
+ def test_safe_find_sr_raise_exception(self):
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
+ self.flags(sr_matching_filter='yadayadayada', group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ self.assertRaises(exception.StorageRepositoryNotFound,
+ vm_utils.safe_find_sr, session)
+
+ def test_safe_find_sr_local_storage(self):
+ # Ensure the default local-storage is found.
+ self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ # This test is only guaranteed if there is one host in the pool
+ self.assertEqual(len(xenapi_fake.get_all('host')), 1)
+ host_ref = xenapi_fake.get_all('host')[0]
+ pbd_refs = xenapi_fake.get_all('PBD')
+ for pbd_ref in pbd_refs:
+ pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
+ if pbd_rec['host'] != host_ref:
+ continue
+ sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
+ if sr_rec['other_config']['i18n-key'] == 'local-storage':
+ local_sr = pbd_rec['SR']
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_by_other_criteria(self):
+ # Ensure the SR is found when using a different filter.
+ self.flags(sr_matching_filter='other-config:my_fake_sr=true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ host_ref = xenapi_fake.get_all('host')[0]
+ local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
+ type='lvm',
+ other_config={'my_fake_sr': 'true'},
+ host_ref=host_ref)
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(local_sr, expected)
+
+ def test_safe_find_sr_default(self):
+ # Ensure the default SR is found regardless of other-config.
+ self.flags(sr_matching_filter='default-sr:true',
+ group='xenserver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ session = get_session()
+ pool_ref = session.call_xenapi('pool.get_all')[0]
+ expected = vm_utils.safe_find_sr(session)
+ self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
+ expected)
+
+
+def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
+ 'fake_host2'],
+ 'avail_zone2': ['fake_host3'], }):
+ for avail_zone, hosts in values.iteritems():
+ for service_host in hosts:
+ db.service_create(context,
+ {'host': service_host,
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0})
+ return values
+
+
+# FIXME(sirp): convert this to use XenAPITestBaseNoDB
+class XenAPIAggregateTestCase(stubs.XenAPITestBase):
+ """Unit tests for aggregate operations."""
+ def setUp(self):
+ super(XenAPIAggregateTestCase, self).setUp()
+ self.flags(connection_url='http://test_url',
+ connection_username='test_user',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(instance_name_template='%d',
+ firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host',
+ compute_driver='xenapi.XenAPIDriver',
+ default_availability_zone='avail_zone1')
+ self.flags(use_local=True, group='conductor')
+ host_ref = xenapi_fake.get_all('host')[0]
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.context = context.get_admin_context()
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.compute = importutils.import_object(CONF.compute_manager)
+ self.api = compute_api.AggregateAPI()
+ values = {'name': 'test_aggr',
+ 'metadata': {'availability_zone': 'test_zone',
+ pool_states.POOL_FLAG: 'XenAPI'}}
+ self.aggr = db.aggregate_create(self.context, values)
+ self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
+ 'master_compute': 'host',
+ 'availability_zone': 'fake_zone',
+ pool_states.KEY: pool_states.ACTIVE,
+ 'host': xenapi_fake.get_record('host',
+ host_ref)['uuid']}
+
+ def test_pool_add_to_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_add_to_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "add_to_aggregate",
+ pool_add_to_aggregate)
+
+ self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_add_to_aggregate, calls)
+
+ def test_pool_remove_from_aggregate_called_by_driver(self):
+
+ calls = []
+
+ def pool_remove_from_aggregate(context, aggregate, host,
+ slave_info=None):
+ self.assertEqual("CONTEXT", context)
+ self.assertEqual("AGGREGATE", aggregate)
+ self.assertEqual("HOST", host)
+ self.assertEqual("SLAVEINFO", slave_info)
+ calls.append(pool_remove_from_aggregate)
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ pool_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
+ slave_info="SLAVEINFO")
+
+ self.assertIn(pool_remove_from_aggregate, calls)
+
+ def test_add_to_aggregate_for_first_host_sets_metadata(self):
+ def fake_init_pool(id, name):
+ fake_init_pool.called = True
+ self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
+
+ aggregate = self._aggregate_setup()
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_init_pool.called)
+ self.assertThat(self.fake_metadata,
+ matchers.DictMatches(result['metadetails']))
+
+ def test_join_slave(self):
+ # Ensure join_slave gets called when the request gets to master.
+ def fake_join_slave(id, compute_uuid, host, url, user, password):
+ fake_join_slave.called = True
+ self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
+ dict(compute_uuid='fake_uuid',
+ url='fake_url',
+ user='fake_user',
+ passwd='fake_pass',
+ xenhost_uuid='fake_uuid'))
+ self.assertTrue(fake_join_slave.called)
+
+ def test_add_to_aggregate_first_host(self):
+ def fake_pool_set_name_label(self, session, pool_ref, name):
+ fake_pool_set_name_label.called = True
+ self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
+ fake_pool_set_name_label)
+ self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
+
+ metadata = {'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.CREATED}
+
+ aggregate = objects.Aggregate()
+ aggregate.name = 'fake_aggregate'
+ aggregate.metadata = dict(metadata)
+ aggregate.create(self.context)
+ aggregate.add_host('host')
+ self.assertEqual(["host"], aggregate.hosts)
+ self.assertEqual(metadata, aggregate.metadata)
+
+ self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
+ self.assertTrue(fake_pool_set_name_label.called)
+
+ def test_remove_from_aggregate_called(self):
+ def fake_remove_from_aggregate(context, aggregate, host):
+ fake_remove_from_aggregate.called = True
+ self.stubs.Set(self.conn._pool,
+ "remove_from_aggregate",
+ fake_remove_from_aggregate)
+
+ self.conn.remove_from_aggregate(None, None, None)
+ self.assertTrue(fake_remove_from_aggregate.called)
+
+ def test_remove_from_empty_aggregate(self):
+ result = self._aggregate_setup()
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, result, "test_host")
+
+ def test_remove_slave(self):
+ # Ensure eject slave gets called.
+ def fake_eject_slave(id, compute_uuid, host_uuid):
+ fake_eject_slave.called = True
+ self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+
+ self.fake_metadata['host2'] = 'fake_host2_uuid'
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
+ self.assertTrue(fake_eject_slave.called)
+
+ def test_remove_master_solo(self):
+ # Ensure metadata are cleared after removal.
+ def fake_clear_pool(id):
+ fake_clear_pool.called = True
+ self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
+
+ aggregate = self._aggregate_setup(metadata=self.fake_metadata)
+ self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
+ result = db.aggregate_get(self.context, aggregate['id'])
+ self.assertTrue(fake_clear_pool.called)
+ self.assertThat({'availability_zone': 'fake_zone',
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: pool_states.ACTIVE},
+ matchers.DictMatches(result['metadetails']))
+
+ def test_remote_master_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the master.
+ aggregate = self._aggregate_setup(hosts=['host', 'host2'],
+ metadata=self.fake_metadata)
+
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn._pool.remove_from_aggregate,
+ self.context, aggregate, "host")
+
+ def _aggregate_setup(self, aggr_name='fake_aggregate',
+ aggr_zone='fake_zone',
+ aggr_state=pool_states.CREATED,
+ hosts=['host'], metadata=None):
+ aggregate = objects.Aggregate()
+ aggregate.name = aggr_name
+ aggregate.metadata = {'availability_zone': aggr_zone,
+ pool_states.POOL_FLAG: 'XenAPI',
+ pool_states.KEY: aggr_state,
+ }
+ if metadata:
+ aggregate.metadata.update(metadata)
+ aggregate.create(self.context)
+ for aggregate_host in hosts:
+ aggregate.add_host(aggregate_host)
+ return aggregate
+
+ def test_add_host_to_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when adding host while
+ aggregate is not ready.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'host')
+ self.assertIn('setup in progress', str(ex))
+
+ def test_add_host_to_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate deleted', str(ex))
+
+ def test_add_host_to_aggregate_invalid_error_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ in error.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate in error', str(ex))
+
+ def test_remove_host_from_aggregate_error(self):
+ # Ensure we can remove a host from an aggregate even if in error.
+ values = _create_service_entries(self.context)
+ fake_zone = values.keys()[0]
+ aggr = self.api.create_aggregate(self.context,
+ 'fake_aggregate', fake_zone)
+ # let's mock the fact that the aggregate is ready!
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, aggr['id'], metadata)
+ for aggregate_host in values[fake_zone]:
+ aggr = self.api.add_host_to_aggregate(self.context,
+ aggr['id'], aggregate_host)
+ # let's mock the fact that the aggregate is in error!
+ expected = self.api.remove_host_from_aggregate(self.context,
+ aggr['id'],
+ values[fake_zone][0])
+ self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
+ self.assertEqual(expected['metadata'][pool_states.KEY],
+ pool_states.ACTIVE)
+
+ def test_remove_host_from_aggregate_invalid_dismissed_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ deleted.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_remove_host_from_aggregate_invalid_changing_status(self):
+ """Ensure InvalidAggregateAction is raised when aggregate is
+ changing.
+ """
+ aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
+ self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.remove_from_aggregate, self.context,
+ aggregate, 'fake_host')
+
+ def test_add_aggregate_host_raise_err(self):
+ # Ensure the undo operation works correctly on add.
+ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ raise exception.AggregateError(
+ aggregate_id='', action='', reason='')
+ self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ fake_driver_add_to_aggregate)
+ metadata = {pool_states.POOL_FLAG: "XenAPI",
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
+ db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
+
+ self.assertRaises(exception.AggregateError,
+ self.compute.add_aggregate_host,
+ self.context, host="fake_host",
+ aggregate=jsonutils.to_primitive(self.aggr),
+ slave_info=None)
+ excepted = db.aggregate_get(self.context, self.aggr['id'])
+ self.assertEqual(excepted['metadetails'][pool_states.KEY],
+ pool_states.ERROR)
+ self.assertEqual(excepted['hosts'], [])
+
+
+class MockComputeAPI(object):
+ def __init__(self):
+ self._mock_calls = []
+
+ def add_aggregate_host(self, ctxt, aggregate,
+ host_param, host, slave_info):
+ self._mock_calls.append((
+ self.add_aggregate_host, ctxt, aggregate,
+ host_param, host, slave_info))
+
+ def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
+ host, slave_info):
+ self._mock_calls.append((
+ self.remove_aggregate_host, ctxt, aggregate_id,
+ host_param, host, slave_info))
+
+
+class StubDependencies(object):
+ """Stub dependencies for ResourcePool."""
+
+ def __init__(self):
+ self.compute_rpcapi = MockComputeAPI()
+
+ def _is_hv_pool(self, *_ignore):
+ return True
+
+ def _get_metadata(self, *_ignore):
+ return {
+ pool_states.KEY: {},
+ 'master_compute': 'master'
+ }
+
+ def _create_slave_info(self, *ignore):
+ return "SLAVE_INFO"
+
+
+class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
+ """A ResourcePool, use stub dependencies."""
+
+
+class HypervisorPoolTestCase(test.NoDBTestCase):
+
+ fake_aggregate = {
+ 'id': 98,
+ 'hosts': [],
+ 'metadata': {
+ 'master_compute': 'master',
+ pool_states.POOL_FLAG: {},
+ pool_states.KEY: {}
+ }
+ }
+
+ def test_slave_asks_master_to_add_slave_to_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.add_aggregate_host,
+ "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
+ "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+ def test_slave_asks_master_to_remove_slave_from_pool(self):
+ slave = ResourcePoolWithStubs()
+
+ slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+
+ self.assertIn(
+ (slave.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
+ slave.compute_rpcapi._mock_calls)
+
+
+class SwapXapiHostTestCase(test.NoDBTestCase):
+
+ def test_swapping(self):
+ self.assertEqual(
+ "http://otherserver:8765/somepath",
+ pool.swap_xapi_host(
+ "http://someserver:8765/somepath", 'otherserver'))
+
+ def test_no_port(self):
+ self.assertEqual(
+ "http://otherserver/somepath",
+ pool.swap_xapi_host(
+ "http://someserver/somepath", 'otherserver'))
+
+ def test_no_path(self):
+ self.assertEqual(
+ "http://otherserver",
+ pool.swap_xapi_host(
+ "http://someserver", 'otherserver'))
+
+
+class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
+ """Unit tests for live_migration."""
+ def setUp(self):
+ super(XenAPILiveMigrateTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver',
+ host='host')
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ self.context = context.get_admin_context()
+
+ def test_live_migration_calls_vmops(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_live_migrate(context, instance_ref, dest, post_method,
+ recover_method, block_migration, migrate_data):
+ fake_live_migrate.called = True
+
+ self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
+
+ self.conn.live_migration(None, None, None, None, None)
+ self.assertTrue(fake_live_migrate.called)
+
+ def test_pre_live_migration(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.conn.pre_live_migration(None, None, None, None, None)
+
+ def test_post_live_migration_at_destination(self):
+ # ensure method is present
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ fake_instance = {"name": "name"}
+ fake_network_info = "network_info"
+
+ def fake_fw(instance, network_info):
+ self.assertEqual(instance, fake_instance)
+ self.assertEqual(network_info, fake_network_info)
+ fake_fw.call_count += 1
+
+ def fake_create_kernel_and_ramdisk(context, session, instance,
+ name_label):
+ return "fake-kernel-file", "fake-ramdisk-file"
+
+ fake_fw.call_count = 0
+ _vmops = self.conn._vmops
+ self.stubs.Set(_vmops.firewall_driver,
+ 'setup_basic_filtering', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'prepare_instance_filter', fake_fw)
+ self.stubs.Set(_vmops.firewall_driver,
+ 'apply_instance_filter', fake_fw)
+ self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
+ fake_create_kernel_and_ramdisk)
+
+ def fake_get_vm_opaque_ref(instance):
+ fake_get_vm_opaque_ref.called = True
+ self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
+ fake_get_vm_opaque_ref.called = False
+
+ def fake_strip_base_mirror_from_vdis(session, vm_ref):
+ fake_strip_base_mirror_from_vdis.called = True
+ self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
+ fake_strip_base_mirror_from_vdis)
+ fake_strip_base_mirror_from_vdis.called = False
+
+ self.conn.post_live_migration_at_destination(None, fake_instance,
+ fake_network_info, None)
+ self.assertEqual(fake_fw.call_count, 3)
+ self.assertTrue(fake_get_vm_opaque_ref.called)
+ self.assertTrue(fake_strip_base_mirror_from_vdis.called)
+
+ def test_check_can_live_migrate_destination_with_block_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ expected = {'block_migration': True,
+ 'migrate_data': {
+ 'migrate_send_data': "fake_migrate_data",
+ 'destination_sr_ref': 'asdf'
+ }
+ }
+ result = self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'},
+ {}, {},
+ True, False)
+ self.assertEqual(expected, result)
+
+ def test_check_live_migrate_destination_verifies_ip(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ for pif_ref in xenapi_fake.get_all('PIF'):
+ pif_rec = xenapi_fake.get_record('PIF', pif_ref)
+ pif_rec['IP'] = ''
+ pif_rec['IPv6'] = ''
+
+ self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def test_check_can_live_migrate_destination_block_migration_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'},
+ {}, {},
+ True, False)
+
+ def _add_default_live_migrate_stubs(self, conn):
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ pass
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return []
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+
+ def fake_lookup_kernel_ramdisk(session, vm):
+ return ("fake_PV_kernel", "fake_PV_ramdisk")
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+ self.stubs.Set(conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+ self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
+ fake_lookup_kernel_ramdisk)
+
+ def test_check_can_live_migrate_source_with_block_migrate(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return "true"
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ result = self.conn.check_can_live_migrate_source(self.context,
+ {'host': 'host'},
+ dest_check_data)
+ self.assertEqual(dest_check_data, result)
+
+ def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_make_plugin_call(plugin, method, **args):
+ return {'returncode': 'error', 'message': 'Plugin not found'}
+ self.stubs.Set(self.conn._vmops, "_make_plugin_call",
+ fake_make_plugin_call)
+
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context, {'host': 'host'},
+ {})
+
+ def test_check_can_live_migrate_source_with_block_migrate_fails(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ dest_check_data = {'block_migration': True,
+ 'migrate_data': {
+ 'destination_sr_ref': None,
+ 'migrate_send_data': None
+ }}
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_source,
+ self.context,
+ {'host': 'host'},
+ dest_check_data)
+
+ def test_check_can_live_migrate_works(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"host": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.conn.check_can_live_migrate_destination(self.context,
+ {'host': 'host'}, False, False)
+
+ def test_check_can_live_migrate_fails(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_aggregate_get_by_host(context, host, key=None):
+ self.assertEqual(CONF.host, host)
+ return [dict(test_aggregate.fake_aggregate,
+ metadetails={"dest_other": "test_host_uuid"})]
+
+ self.stubs.Set(db, "aggregate_get_by_host",
+ fake_aggregate_get_by_host)
+ self.assertRaises(exception.MigrationError,
+ self.conn.check_can_live_migrate_destination,
+ self.context, {'host': 'host'}, None, None)
+
+ def test_live_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ self.conn.live_migration(self.conn, None, None, post_method, None)
+
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_on_failure(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ def fake_get_vm_opaque_ref(instance):
+ return "fake_vm"
+ self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
+ fake_get_vm_opaque_ref)
+
+ def fake_get_host_opaque_ref(context, destination_hostname):
+ return "fake_host"
+ self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def fake_call_xenapi(*args):
+ raise NotImplementedError()
+ self.stubs.Set(self.conn._vmops._session, "call_xenapi",
+ fake_call_xenapi)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+
+ self.assertRaises(NotImplementedError, self.conn.live_migration,
+ self.conn, None, None, None, recover_method)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_calls_post_migration(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ # pass block_migration = True and migrate data
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+ self.assertTrue(post_method.called, "post_method.called")
+
+ def test_live_migration_block_cleans_srs(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def fake_get_iscsi_srs(context, instance):
+ return ['sr_ref']
+ self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
+ fake_get_iscsi_srs)
+
+ def fake_forget_sr(context, instance):
+ fake_forget_sr.called = True
+ self.stubs.Set(volume_utils, "forget_sr",
+ fake_forget_sr)
+
+ def post_method(context, instance, destination_hostname,
+ block_migration, migrate_data):
+ post_method.called = True
+
+ migrate_data = {"destination_sr_ref": "foo",
+ "migrate_send_data": "bar"}
+ self.conn.live_migration(self.conn, None, None, post_method, None,
+ True, migrate_data)
+
+ self.assertTrue(post_method.called, "post_method.called")
+ self.assertTrue(fake_forget_sr.called, "forget_sr.called")
+
+ def test_live_migration_with_block_migration_raises_invalid_param(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and no migrate data
+ self.assertRaises(exception.InvalidParameterValue,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, None)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migration_with_block_migration_fails_migrate_send(self):
+ stubs.stubout_session(self.stubs,
+ stubs.FakeSessionForFailedMigrateTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(self.conn)
+
+ def recover_method(context, instance, destination_hostname,
+ block_migration):
+ recover_method.called = True
+ # pass block_migration = True and migrate data
+ migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
+ self.assertRaises(exception.MigrationError,
+ self.conn.live_migration, self.conn,
+ None, None, None, recover_method, True, migrate_data)
+ self.assertTrue(recover_method.called, "recover_method.called")
+
+ def test_live_migrate_block_migration_xapi_call_parameters(self):
+
+ fake_vdi_map = object()
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_migrate_send(self_, session, vmref, migrate_data, islive,
+ vdi_map, vif_map, options):
+ self.assertEqual('SOMEDATA', migrate_data)
+ self.assertEqual(fake_vdi_map, vdi_map)
+
+ stubs.stubout_session(self.stubs, Session)
+
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
+ return fake_vdi_map
+
+ self.stubs.Set(conn._vmops, "_generate_vdi_map",
+ fake_generate_vdi_map)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ conn.live_migration(
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration="SOMEDATA",
+ migrate_data=dict(migrate_send_data='SOMEDATA',
+ destination_sr_ref="TARGET_SR_OPAQUE_REF"))
+
+ def test_live_migrate_pool_migration_xapi_call_parameters(self):
+
+ class Session(xenapi_fake.SessionBase):
+ def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
+ self.assertEqual("fake_ref", host_ref)
+ self.assertEqual({"live": "true"}, options)
+ raise IOError()
+
+ stubs.stubout_session(self.stubs, Session)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+ self._add_default_live_migrate_stubs(conn)
+
+ def fake_get_host_opaque_ref(context, destination):
+ return "fake_ref"
+
+ self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
+ fake_get_host_opaque_ref)
+
+ def dummy_callback(*args, **kwargs):
+ pass
+
+ self.assertRaises(IOError, conn.live_migration,
+ self.context, instance=dict(name='ignore'), dest=None,
+ post_method=dummy_callback, recover_method=dummy_callback,
+ block_migration=False, migrate_data={})
+
+ def test_generate_vdi_map(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ vm_ref = "fake_vm_ref"
+
+ def fake_find_sr(_session):
+ self.assertEqual(conn._session, _session)
+ return "source_sr_ref"
+ self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
+
+ def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
+ self.assertEqual(conn._session, _session)
+ self.assertEqual(vm_ref, _vm_ref)
+ self.assertEqual("source_sr_ref", _sr_ref)
+ return ["vdi0", "vdi1"]
+
+ self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
+ fake_get_instance_vdis_for_sr)
+
+ result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
+
+ self.assertEqual({"vdi0": "dest_sr_ref",
+ "vdi1": "dest_sr_ref"}, result)
+
+ def test_rollback_live_migration_at_destination(self):
+ stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
+ conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ with mock.patch.object(conn, "destroy") as mock_destroy:
+ conn.rollback_live_migration_at_destination("context",
+ "instance", [], None)
+ self.assertFalse(mock_destroy.called)
+
+
+class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenAPIInjectMetadataTestCase, self).setUp()
+ self.flags(connection_url='test_url',
+ connection_password='test_pass',
+ group='xenserver')
+ self.flags(firewall_driver='nova.virt.xenapi.firewall.'
+ 'Dom0IptablesFirewallDriver')
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
+
+ self.xenstore = dict(persist={}, ephem={})
+
+ self.called_fake_get_vm_opaque_ref = False
+
+ def fake_get_vm_opaque_ref(inst, instance):
+ self.called_fake_get_vm_opaque_ref = True
+ if instance["uuid"] == "not_found":
+ raise exception.NotFound
+ self.assertEqual(instance, {'uuid': 'fake'})
+ return 'vm_ref'
+
+ def fake_add_to_param_xenstore(inst, vm_ref, key, val):
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['persist'][key] = val
+
+ def fake_remove_from_param_xenstore(inst, vm_ref, key):
+ self.assertEqual(vm_ref, 'vm_ref')
+ if key in self.xenstore['persist']:
+ del self.xenstore['persist'][key]
+
+ def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ self.xenstore['ephem'][path] = jsonutils.dumps(value)
+
+ def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
+ self.assertEqual(instance, {'uuid': 'fake'})
+ self.assertEqual(vm_ref, 'vm_ref')
+ if path in self.xenstore['ephem']:
+ del self.xenstore['ephem'][path]
+
+ self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
+ fake_get_vm_opaque_ref)
+ self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
+ fake_add_to_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
+ fake_remove_from_param_xenstore)
+ self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
+ fake_write_to_xenstore)
+ self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
+ fake_delete_from_xenstore)
+
+ def test_inject_instance_metadata(self):
+
+ # Add some system_metadata to ensure it doesn't get added
+ # to xenstore
+ instance = dict(metadata=[{'key': 'a', 'value': 1},
+ {'key': 'b', 'value': 2},
+ {'key': 'c', 'value': 3},
+ # Check xenstore key sanitizing
+ {'key': 'hi.there', 'value': 4},
+ {'key': 'hi!t.e/e', 'value': 5}],
+ # Check xenstore key sanitizing
+ system_metadata=[{'key': 'sys_a', 'value': 1},
+ {'key': 'sys_b', 'value': 2},
+ {'key': 'sys_c', 'value': 3}],
+ uuid='fake')
+ self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/hi_there': '4',
+ 'vm-data/user-metadata/hi_t_e_e': '5',
+ },
+ 'ephem': {},
+ })
+
+ def test_change_instance_metadata_add(self):
+ # Test XenStore key sanitizing here, too.
+ diff = {'test.key': ['+', 4]}
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ 'vm-data/user-metadata/test_key': '4',
+ },
+ })
+
+ def test_change_instance_metadata_update(self):
+ diff = dict(b=['+', 4])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '4',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_delete(self):
+ diff = dict(b=['-'])
+ instance = {'uuid': 'fake'}
+ self.xenstore = {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/b': '2',
+ 'vm-data/user-metadata/c': '3',
+ },
+ }
+
+ self.conn._vmops.change_instance_metadata(instance, diff)
+
+ self.assertEqual(self.xenstore, {
+ 'persist': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ 'ephem': {
+ 'vm-data/user-metadata/a': '1',
+ 'vm-data/user-metadata/c': '3',
+ },
+ })
+
+ def test_change_instance_metadata_not_found(self):
+ instance = {'uuid': 'not_found'}
+ self.conn._vmops.change_instance_metadata(instance, "fake_diff")
+ self.assertTrue(self.called_fake_get_vm_opaque_ref)
+
+
+class XenAPISessionTestCase(test.NoDBTestCase):
+ def _get_mock_xapisession(self, software_version):
+ class MockXapiSession(xenapi_session.XenAPISession):
+ def __init__(_ignore):
+ "Skip the superclass's dirty init"
+
+ def _get_software_version(_ignore):
+ return software_version
+
+ return MockXapiSession()
+
+ def test_local_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = True
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.xapi_local().AndReturn("local_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("local_connection",
+ session._create_session("unix://local"))
+
+ def test_remote_session(self):
+ session = self._get_mock_xapisession({})
+ session.is_local_connection = False
+ session.XenAPI = self.mox.CreateMockAnything()
+ session.XenAPI.Session("url").AndReturn("remote_connection")
+
+ self.mox.ReplayAll()
+ self.assertEqual("remote_connection", session._create_session("url"))
+
+ def test_get_product_version_product_brand_does_not_fail(self):
+ session = self._get_mock_xapisession({
+ 'build_number': '0',
+ 'date': '2012-08-03',
+ 'hostname': 'komainu',
+ 'linux': '3.2.0-27-generic',
+ 'network_backend': 'bridge',
+ 'platform_name': 'XCP_Kronos',
+ 'platform_version': '1.6.0',
+ 'xapi': '1.3',
+ 'xen': '4.1.2',
+ 'xencenter_max': '1.10',
+ 'xencenter_min': '1.10'
+ })
+
+ self.assertEqual(
+ ((1, 6, 0), None),
+ session._get_product_version_and_brand()
+ )
+
+ def test_get_product_version_product_brand_xs_6(self):
+ session = self._get_mock_xapisession({
+ 'product_brand': 'XenServer',
+ 'product_version': '6.0.50',
+ 'platform_version': '0.0.1'
+ })
+
+ self.assertEqual(
+ ((6, 0, 50), 'XenServer'),
+ session._get_product_version_and_brand()
+ )
+
+ def test_verify_plugin_version_same(self):
+ session = self._get_mock_xapisession({})
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.4")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_compatible(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.5")
+
+ self.mox.ReplayAll()
+ session._verify_plugin_version()
+
+ def test_verify_plugin_version_bad_maj(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("3.0")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_plugin_version_bad_min(self):
+ session = self._get_mock_xapisession({})
+ session.XenAPI = xenapi_fake.FakeXenAPI()
+
+ session.PLUGIN_REQUIRED_VERSION = '2.4'
+
+ self.mox.StubOutWithMock(session, 'call_plugin_serialized')
+ session.call_plugin_serialized('nova_plugin_version', 'get_version',
+ ).AndReturn("2.3")
+
+ self.mox.ReplayAll()
+ self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
+
+ def test_verify_current_version_matches(self):
+ session = self._get_mock_xapisession({})
+
+ # Import the plugin to extract its version
+ path = os.path.dirname(__file__)
+ rel_path_elem = "../../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
+ "plugins/nova_plugin_version"
+ for elem in rel_path_elem.split('/'):
+ path = os.path.join(path, elem)
+ path = os.path.realpath(path)
+
+ plugin_version = None
+ with open(path) as plugin_file:
+ for line in plugin_file:
+ if "PLUGIN_VERSION = " in line:
+ plugin_version = line.strip()[17:].strip('"')
+
+ self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
+ plugin_version)
+
+
+class XenAPIFakeTestCase(test.NoDBTestCase):
+ def test_query_matches(self):
+ record = {'a': '1', 'b': '2', 'c_d': '3'}
+
+ tests = {'field "a"="1"': True,
+ 'field "b"="2"': True,
+ 'field "b"="4"': False,
+ 'not field "b"="4"': True,
+ 'field "a"="1" and field "b"="4"': False,
+ 'field "a"="1" or field "b"="4"': True,
+ 'field "c__d"="3"': True,
+ 'field \'b\'=\'2\'': True,
+ }
+
+ for query in tests.keys():
+ expected = tests[query]
+ fail_msg = "for test '%s'" % query
+ self.assertEqual(xenapi_fake._query_matches(record, query),
+ expected, fail_msg)
+
+ def test_query_bad_format(self):
+ record = {'a': '1', 'b': '2', 'c': '3'}
+
+ tests = ['"a"="1" or "b"="4"',
+ 'a=1',
+ ]
+
+ for query in tests:
+ fail_msg = "for test '%s'" % query
+ self.assertFalse(xenapi_fake._query_matches(record, query),
+ fail_msg)
diff --git a/nova/tests/virt/xenapi/vm_rrd.xml b/nova/tests/unit/virt/xenapi/vm_rrd.xml
index f9a7c8083e..f9a7c8083e 100644
--- a/nova/tests/virt/xenapi/vm_rrd.xml
+++ b/nova/tests/unit/virt/xenapi/vm_rrd.xml
diff --git a/nova/tests/volume/__init__.py b/nova/tests/unit/volume/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/volume/__init__.py
+++ b/nova/tests/unit/volume/__init__.py
diff --git a/nova/tests/volume/encryptors/__init__.py b/nova/tests/unit/volume/encryptors/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/tests/volume/encryptors/__init__.py
+++ b/nova/tests/unit/volume/encryptors/__init__.py
diff --git a/nova/tests/unit/volume/encryptors/test_base.py b/nova/tests/unit/volume/encryptors/test_base.py
new file mode 100644
index 0000000000..d60c20ecd3
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_base.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.i18n import _LE
+from nova import keymgr
+from nova import test
+from nova.tests.unit.keymgr import fake
+from nova.volume import encryptors
+
+
+class VolumeEncryptorTestCase(test.TestCase):
+ def _create(self, device_path):
+ pass
+
+ def setUp(self):
+ super(VolumeEncryptorTestCase, self).setUp()
+
+ self.stubs.Set(keymgr, 'API', fake.fake_api)
+
+ self.connection_info = {
+ "data": {
+ "device_path": "/dev/disk/by-path/"
+ "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack"
+ ":volume-fake_uuid-lun-1",
+ },
+ }
+ self.encryptor = self._create(self.connection_info)
+
+ @mock.patch('nova.volume.encryptors.LOG')
+ def test_error_log(self, log):
+ encryption = {'control_location': 'front-end',
+ 'provider': 'TestEncryptor'}
+ provider = 'TestEncryptor'
+ try:
+ encryptors.get_volume_encryptor(self.connection_info, **encryption)
+ except Exception as e:
+ log.error.assert_called_once_with(_LE("Error instantiating "
+ "%(provider)s: "
+ "%(exception)s"),
+ {'provider': provider, 'exception': e})
diff --git a/nova/tests/unit/volume/encryptors/test_cryptsetup.py b/nova/tests/unit/volume/encryptors/test_cryptsetup.py
new file mode 100644
index 0000000000..ab84d17d9b
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_cryptsetup.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import array
+import os
+
+from nova.keymgr import key
+from nova.tests.unit.volume.encryptors import test_base
+from nova import utils
+from nova.volume.encryptors import cryptsetup
+
+
+def fake__get_key(context):
+ raw = array.array('B', ('0' * 64).decode('hex')).tolist()
+
+ symmetric_key = key.SymmetricKey('AES', raw)
+ return symmetric_key
+
+
+class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase):
+ def _create(self, connection_info):
+ return cryptsetup.CryptsetupEncryptor(connection_info)
+
+ def setUp(self):
+ super(CryptsetupEncryptorTestCase, self).setUp()
+
+ self.executes = []
+
+ def fake_execute(*cmd, **kwargs):
+ self.executes.append(cmd)
+ return None, None
+
+ self.stubs.Set(utils, 'execute', fake_execute)
+ self.stubs.Set(os.path, "realpath", lambda x: x)
+
+ self.dev_path = self.connection_info['data']['device_path']
+ self.dev_name = self.dev_path.split('/')[-1]
+
+ self.symlink_path = self.dev_path
+
+ def test__open_volume(self):
+ self.encryptor._open_volume("passphrase")
+
+ expected_commands = [('cryptsetup', 'create', '--key-file=-',
+ self.dev_name, self.dev_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_attach_volume(self):
+ self.stubs.Set(self.encryptor, '_get_key', fake__get_key)
+
+ self.encryptor.attach_volume(None)
+
+ expected_commands = [('cryptsetup', 'create', '--key-file=-',
+ self.dev_name, self.dev_path),
+ ('ln', '--symbolic', '--force',
+ '/dev/mapper/%s' % self.dev_name,
+ self.symlink_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__close_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'remove', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'remove', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/unit/volume/encryptors/test_luks.py b/nova/tests/unit/volume/encryptors/test_luks.py
new file mode 100644
index 0000000000..00e03053ea
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_luks.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests.unit.volume.encryptors import test_cryptsetup
+from nova.volume.encryptors import luks
+
+
+"""
+The utility of these test cases is limited given the simplicity of the
+LuksEncryptor class. The attach_volume method has the only significant logic
+to handle cases where the volume has not previously been formatted, but
+exercising this logic requires "real" devices and actually executing the
+various cryptsetup commands rather than simply logging them.
+"""
+
+
+class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
+ def _create(self, connection_info):
+ return luks.LuksEncryptor(connection_info)
+
+ def test__format_volume(self):
+ self.encryptor._format_volume("passphrase")
+
+ expected_commands = [('cryptsetup', '--batch-mode', 'luksFormat',
+ '--key-file=-', self.dev_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__open_volume(self):
+ self.encryptor._open_volume("passphrase")
+
+ expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
+ self.dev_path, self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_attach_volume(self):
+ self.stubs.Set(self.encryptor, '_get_key',
+ test_cryptsetup.fake__get_key)
+
+ self.encryptor.attach_volume(None)
+
+ expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
+ self.dev_path, self.dev_name),
+ ('ln', '--symbolic', '--force',
+ '/dev/mapper/%s' % self.dev_name,
+ self.symlink_path)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test__close_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
+
+ expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
+ self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/unit/volume/encryptors/test_nop.py b/nova/tests/unit/volume/encryptors/test_nop.py
new file mode 100644
index 0000000000..aa32a9c0e6
--- /dev/null
+++ b/nova/tests/unit/volume/encryptors/test_nop.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.unit.volume.encryptors import test_base
+from nova.volume.encryptors import nop
+
+
+class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase):
+ def _create(self, connection_info):
+ return nop.NoOpEncryptor(connection_info)
+
+ def test_attach_volume(self):
+ self.encryptor.attach_volume(None)
+
+ def test_detach_volume(self):
+ self.encryptor.detach_volume()
diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index 1aa3f85c97..1aa3f85c97 100644
--- a/nova/tests/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
diff --git a/nova/tests/virt/disk/test_inject.py b/nova/tests/virt/disk/test_inject.py
deleted file mode 100644
index 9685c2e6f2..0000000000
--- a/nova/tests/virt/disk/test_inject.py
+++ /dev/null
@@ -1,284 +0,0 @@
-# Copyright (C) 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import sys
-
-from nova import exception
-from nova import test
-from nova.tests.virt.disk.vfs import fakeguestfs
-from nova.virt.disk import api as diskapi
-from nova.virt.disk.vfs import guestfs as vfsguestfs
-
-
-class VirtDiskTest(test.NoDBTestCase):
-
- def setUp(self):
- super(VirtDiskTest, self).setUp()
- sys.modules['guestfs'] = fakeguestfs
- vfsguestfs.guestfs = fakeguestfs
-
- def test_inject_data(self):
-
- self.assertTrue(diskapi.inject_data("/some/file", use_cow=True))
-
- self.assertTrue(diskapi.inject_data("/some/file",
- mandatory=('files',)))
-
- self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey",
- mandatory=('key',)))
-
- os_name = os.name
- os.name = 'nt' # Cause password injection to fail
- self.assertRaises(exception.NovaException,
- diskapi.inject_data,
- "/some/file", admin_password="p",
- mandatory=('admin_password',))
- self.assertFalse(diskapi.inject_data("/some/file", admin_password="p"))
- os.name = os_name
-
- self.assertFalse(diskapi.inject_data("/some/fail/file",
- key="mysshkey"))
-
- def test_inject_data_key(self):
-
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- diskapi._inject_key_into_fs("mysshkey", vfs)
-
- self.assertIn("/root/.ssh", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/root/.ssh"],
- {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
- self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
- {'isdir': False,
- 'content': "Hello World\n# The following ssh " +
- "key was injected by Nova\nmysshkey\n",
- 'gid': 100,
- 'uid': 100,
- 'mode': 0o600})
-
- vfs.teardown()
-
- def test_inject_data_key_with_selinux(self):
-
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- vfs.make_path("etc/selinux")
- vfs.make_path("etc/rc.d")
- diskapi._inject_key_into_fs("mysshkey", vfs)
-
- self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
- {'isdir': False,
- 'content': "Hello World#!/bin/sh\n# Added by " +
- "Nova to ensure injected ssh keys " +
- "have the right context\nrestorecon " +
- "-RF root/.ssh 2>/dev/null || :\n",
- 'gid': 100,
- 'uid': 100,
- 'mode': 0o700})
-
- self.assertIn("/root/.ssh", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/root/.ssh"],
- {'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
- self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
- {'isdir': False,
- 'content': "Hello World\n# The following ssh " +
- "key was injected by Nova\nmysshkey\n",
- 'gid': 100,
- 'uid': 100,
- 'mode': 0o600})
-
- vfs.teardown()
-
- def test_inject_data_key_with_selinux_append_with_newline(self):
-
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
- vfs.make_path("etc/selinux")
- vfs.make_path("etc/rc.d")
- diskapi._inject_key_into_fs("mysshkey", vfs)
-
- self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
- {'isdir': False,
- 'content': "#!/bin/sh\necho done\n# Added "
- "by Nova to ensure injected ssh keys have "
- "the right context\nrestorecon -RF "
- "root/.ssh 2>/dev/null || :\n",
- 'gid': 100,
- 'uid': 100,
- 'mode': 0o700})
- vfs.teardown()
-
- def test_inject_net(self):
-
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- diskapi._inject_net_into_fs("mynetconfig", vfs)
-
- self.assertIn("/etc/network/interfaces", vfs.handle.files)
- self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
- {'content': 'mynetconfig',
- 'gid': 100,
- 'isdir': False,
- 'mode': 0o700,
- 'uid': 100})
- vfs.teardown()
-
- def test_inject_metadata(self):
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- diskapi._inject_metadata_into_fs({"foo": "bar", "eek": "wizz"}, vfs)
-
- self.assertIn("/meta.js", vfs.handle.files)
- self.assertEqual({'content': '{"foo": "bar", ' +
- '"eek": "wizz"}',
- 'gid': 100,
- 'isdir': False,
- 'mode': 0o700,
- 'uid': 100},
- vfs.handle.files["/meta.js"])
- vfs.teardown()
-
- def test_inject_admin_password(self):
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- def fake_salt():
- return "1234567890abcdef"
-
- self.stubs.Set(diskapi, '_generate_salt', fake_salt)
-
- vfs.handle.write("/etc/shadow",
- "root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
- "bin:*:14495:0:99999:7:::\n" +
- "daemon:*:14495:0:99999:7:::\n")
-
- vfs.handle.write("/etc/passwd",
- "root:x:0:0:root:/root:/bin/bash\n" +
- "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
- "daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
-
- diskapi._inject_admin_password_into_fs("123456", vfs)
-
- self.assertEqual(vfs.handle.files["/etc/passwd"],
- {'content': "root:x:0:0:root:/root:/bin/bash\n" +
- "bin:x:1:1:bin:/bin:/sbin/nologin\n" +
- "daemon:x:2:2:daemon:/sbin:" +
- "/sbin/nologin\n",
- 'gid': 100,
- 'isdir': False,
- 'mode': 0o700,
- 'uid': 100})
- shadow = vfs.handle.files["/etc/shadow"]
-
- # if the encrypted password is only 13 characters long, then
- # nova.virt.disk.api:_set_password fell back to DES.
- if len(shadow['content']) == 91:
- self.assertEqual(shadow,
- {'content': "root:12tir.zIbWQ3c" +
- ":14917:0:99999:7:::\n" +
- "bin:*:14495:0:99999:7:::\n" +
- "daemon:*:14495:0:99999:7:::\n",
- 'gid': 100,
- 'isdir': False,
- 'mode': 0o700,
- 'uid': 100})
- else:
- self.assertEqual(shadow,
- {'content': "root:$1$12345678$a4ge4d5iJ5vw" +
- "vbFS88TEN0:14917:0:99999:7:::\n" +
- "bin:*:14495:0:99999:7:::\n" +
- "daemon:*:14495:0:99999:7:::\n",
- 'gid': 100,
- 'isdir': False,
- 'mode': 0o700,
- 'uid': 100})
- vfs.teardown()
-
- def test_inject_files_into_fs(self):
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- diskapi._inject_files_into_fs([("/path/to/not/exists/file",
- "inject-file-contents")],
- vfs)
-
- self.assertIn("/path/to/not/exists", vfs.handle.files)
- shadow_dir = vfs.handle.files["/path/to/not/exists"]
- self.assertEqual(shadow_dir,
- {"isdir": True,
- "gid": 0,
- "uid": 0,
- "mode": 0o744})
-
- shadow_file = vfs.handle.files["/path/to/not/exists/file"]
- self.assertEqual(shadow_file,
- {"isdir": False,
- "content": "inject-file-contents",
- "gid": 100,
- "uid": 100,
- "mode": 0o700})
- vfs.teardown()
-
- def test_inject_files_into_fs_dir_exists(self):
- vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
- vfs.setup()
-
- called = {'make_path': False}
-
- def fake_has_file(*args, **kwargs):
- return True
-
- def fake_make_path(*args, **kwargs):
- called['make_path'] = True
-
- self.stubs.Set(vfs, 'has_file', fake_has_file)
- self.stubs.Set(vfs, 'make_path', fake_make_path)
-
- # test for already exists dir
- diskapi._inject_files_into_fs([("/path/to/exists/file",
- "inject-file-contents")],
- vfs)
-
- self.assertIn("/path/to/exists/file", vfs.handle.files)
- self.assertFalse(called['make_path'])
-
- # test for root dir
- diskapi._inject_files_into_fs([("/inject-file",
- "inject-file-contents")],
- vfs)
-
- self.assertIn("/inject-file", vfs.handle.files)
- self.assertFalse(called['make_path'])
-
- # test for null dir
- vfs.handle.files.pop("/inject-file")
- diskapi._inject_files_into_fs([("inject-file",
- "inject-file-contents")],
- vfs)
-
- self.assertIn("/inject-file", vfs.handle.files)
- self.assertFalse(called['make_path'])
-
- vfs.teardown()
diff --git a/nova/tests/virt/disk/vfs/test_guestfs.py b/nova/tests/virt/disk/vfs/test_guestfs.py
deleted file mode 100644
index cf954e9a35..0000000000
--- a/nova/tests/virt/disk/vfs/test_guestfs.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# Copyright (C) 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-from nova import exception
-from nova import test
-from nova.tests.virt.disk.vfs import fakeguestfs
-from nova.virt.disk.vfs import guestfs as vfsimpl
-
-
-class VirtDiskVFSGuestFSTest(test.NoDBTestCase):
-
- def setUp(self):
- super(VirtDiskVFSGuestFSTest, self).setUp()
- sys.modules['guestfs'] = fakeguestfs
- vfsimpl.guestfs = fakeguestfs
-
- def _do_test_appliance_setup_inspect(self, forcetcg):
- if forcetcg:
- vfsimpl.force_tcg()
- else:
- vfsimpl.force_tcg(False)
-
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
- imgfmt="qcow2",
- partition=-1)
- vfs.setup()
-
- if forcetcg:
- self.assertEqual("force_tcg", vfs.handle.backend_settings)
- vfsimpl.force_tcg(False)
- else:
- self.assertIsNone(vfs.handle.backend_settings)
-
- self.assertTrue(vfs.handle.running)
- self.assertEqual(3, len(vfs.handle.mounts))
- self.assertEqual("/dev/mapper/guestvgf-lv_root",
- vfs.handle.mounts[0][1])
- self.assertEqual("/dev/vda1",
- vfs.handle.mounts[1][1])
- self.assertEqual("/dev/mapper/guestvgf-lv_home",
- vfs.handle.mounts[2][1])
- self.assertEqual("/", vfs.handle.mounts[0][2])
- self.assertEqual("/boot", vfs.handle.mounts[1][2])
- self.assertEqual("/home", vfs.handle.mounts[2][2])
-
- handle = vfs.handle
- vfs.teardown()
-
- self.assertIsNone(vfs.handle)
- self.assertFalse(handle.running)
- self.assertTrue(handle.closed)
- self.assertEqual(0, len(handle.mounts))
-
- def test_appliance_setup_inspect_auto(self):
- self._do_test_appliance_setup_inspect(False)
-
- def test_appliance_setup_inspect_tcg(self):
- self._do_test_appliance_setup_inspect(True)
-
- def test_appliance_setup_inspect_no_root_raises(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
- imgfmt="qcow2",
- partition=-1)
- # call setup to init the handle so we can stub it
- vfs.setup()
-
- self.assertIsNone(vfs.handle.backend_settings)
-
- def fake_inspect_os():
- return []
-
- self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
- self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
-
- def test_appliance_setup_inspect_multi_boots_raises(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
- imgfmt="qcow2",
- partition=-1)
- # call setup to init the handle so we can stub it
- vfs.setup()
-
- self.assertIsNone(vfs.handle.backend_settings)
-
- def fake_inspect_os():
- return ['fake1', 'fake2']
-
- self.stubs.Set(vfs.handle, 'inspect_os', fake_inspect_os)
- self.assertRaises(exception.NovaException, vfs.setup_os_inspect)
-
- def test_appliance_setup_static_nopart(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
- imgfmt="qcow2",
- partition=None)
- vfs.setup()
-
- self.assertIsNone(vfs.handle.backend_settings)
- self.assertTrue(vfs.handle.running)
- self.assertEqual(1, len(vfs.handle.mounts))
- self.assertEqual("/dev/sda", vfs.handle.mounts[0][1])
- self.assertEqual("/", vfs.handle.mounts[0][2])
-
- handle = vfs.handle
- vfs.teardown()
-
- self.assertIsNone(vfs.handle)
- self.assertFalse(handle.running)
- self.assertTrue(handle.closed)
- self.assertEqual(0, len(handle.mounts))
-
- def test_appliance_setup_static_part(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2",
- imgfmt="qcow2",
- partition=2)
- vfs.setup()
-
- self.assertIsNone(vfs.handle.backend_settings)
- self.assertTrue(vfs.handle.running)
- self.assertEqual(1, len(vfs.handle.mounts))
- self.assertEqual("/dev/sda2", vfs.handle.mounts[0][1])
- self.assertEqual("/", vfs.handle.mounts[0][2])
-
- handle = vfs.handle
- vfs.teardown()
-
- self.assertIsNone(vfs.handle)
- self.assertFalse(handle.running)
- self.assertTrue(handle.closed)
- self.assertEqual(0, len(handle.mounts))
-
- def test_makepath(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.make_path("/some/dir")
- vfs.make_path("/other/dir")
-
- self.assertIn("/some/dir", vfs.handle.files)
- self.assertIn("/other/dir", vfs.handle.files)
- self.assertTrue(vfs.handle.files["/some/dir"]["isdir"])
- self.assertTrue(vfs.handle.files["/other/dir"]["isdir"])
-
- vfs.teardown()
-
- def test_append_file(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.append_file("/some/file", " Goodbye")
-
- self.assertIn("/some/file", vfs.handle.files)
- self.assertEqual("Hello World Goodbye",
- vfs.handle.files["/some/file"]["content"])
-
- vfs.teardown()
-
- def test_replace_file(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.replace_file("/some/file", "Goodbye")
-
- self.assertIn("/some/file", vfs.handle.files)
- self.assertEqual("Goodbye",
- vfs.handle.files["/some/file"]["content"])
-
- vfs.teardown()
-
- def test_read_file(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertEqual("Hello World", vfs.read_file("/some/file"))
-
- vfs.teardown()
-
- def test_has_file(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.read_file("/some/file")
-
- self.assertTrue(vfs.has_file("/some/file"))
- self.assertFalse(vfs.has_file("/other/file"))
-
- vfs.teardown()
-
- def test_set_permissions(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.read_file("/some/file")
-
- self.assertEqual(0o700, vfs.handle.files["/some/file"]["mode"])
-
- vfs.set_permissions("/some/file", 0o7777)
- self.assertEqual(0o7777, vfs.handle.files["/some/file"]["mode"])
-
- vfs.teardown()
-
- def test_set_ownership(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- vfs.read_file("/some/file")
-
- self.assertEqual(100, vfs.handle.files["/some/file"]["uid"])
- self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
-
- vfs.set_ownership("/some/file", "fred", None)
- self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
- self.assertEqual(100, vfs.handle.files["/some/file"]["gid"])
-
- vfs.set_ownership("/some/file", None, "users")
- self.assertEqual(105, vfs.handle.files["/some/file"]["uid"])
- self.assertEqual(500, vfs.handle.files["/some/file"]["gid"])
-
- vfs.set_ownership("/some/file", "joe", "admins")
- self.assertEqual(110, vfs.handle.files["/some/file"]["uid"])
- self.assertEqual(600, vfs.handle.files["/some/file"]["gid"])
-
- vfs.teardown()
-
- def test_close_on_error(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertFalse(vfs.handle.kwargs['close_on_exit'])
- vfs.teardown()
- self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_CLOSE_ON_EXIT', False)
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertNotIn('close_on_exit', vfs.handle.kwargs)
- vfs.teardown()
-
- def test_python_return_dict(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertFalse(vfs.handle.kwargs['python_return_dict'])
- vfs.teardown()
- self.stubs.Set(fakeguestfs.GuestFS, 'SUPPORT_RETURN_DICT', False)
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertNotIn('python_return_dict', vfs.handle.kwargs)
- vfs.teardown()
-
- def test_setup_debug_disable(self):
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertFalse(vfs.handle.trace_enabled)
- self.assertFalse(vfs.handle.verbose_enabled)
- self.assertIsNone(vfs.handle.event_callback)
-
- def test_setup_debug_enabled(self):
- self.flags(debug=True, group='guestfs')
- vfs = vfsimpl.VFSGuestFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.setup()
- self.assertTrue(vfs.handle.trace_enabled)
- self.assertTrue(vfs.handle.verbose_enabled)
- self.assertIsNotNone(vfs.handle.event_callback)
diff --git a/nova/tests/virt/disk/vfs/test_localfs.py b/nova/tests/virt/disk/vfs/test_localfs.py
deleted file mode 100644
index 16935498ac..0000000000
--- a/nova/tests/virt/disk/vfs/test_localfs.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# Copyright (C) 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.concurrency import processutils
-from oslo.config import cfg
-
-from nova import exception
-from nova import test
-from nova.tests import utils as tests_utils
-import nova.utils
-from nova.virt.disk.vfs import localfs as vfsimpl
-
-CONF = cfg.CONF
-
-dirs = []
-files = {}
-commands = []
-
-
-def fake_execute(*args, **kwargs):
- commands.append({"args": args, "kwargs": kwargs})
-
- if args[0] == "readlink":
- if args[1] == "-nm":
- if args[2] in ["/scratch/dir/some/file",
- "/scratch/dir/some/dir",
- "/scratch/dir/other/dir",
- "/scratch/dir/other/file"]:
- return args[2], ""
- elif args[1] == "-e":
- if args[2] in files:
- return args[2], ""
-
- return "", "No such file"
- elif args[0] == "mkdir":
- dirs.append(args[2])
- elif args[0] == "chown":
- owner = args[1]
- path = args[2]
- if path not in files:
- raise Exception("No such file: " + path)
-
- sep = owner.find(':')
- if sep != -1:
- user = owner[0:sep]
- group = owner[sep + 1:]
- else:
- user = owner
- group = None
-
- if user:
- if user == "fred":
- uid = 105
- else:
- uid = 110
- files[path]["uid"] = uid
- if group:
- if group == "users":
- gid = 500
- else:
- gid = 600
- files[path]["gid"] = gid
- elif args[0] == "chgrp":
- group = args[1]
- path = args[2]
- if path not in files:
- raise Exception("No such file: " + path)
-
- if group == "users":
- gid = 500
- else:
- gid = 600
- files[path]["gid"] = gid
- elif args[0] == "chmod":
- mode = args[1]
- path = args[2]
- if path not in files:
- raise Exception("No such file: " + path)
-
- files[path]["mode"] = int(mode, 8)
- elif args[0] == "cat":
- path = args[1]
- if path not in files:
- files[path] = {
- "content": "Hello World",
- "gid": 100,
- "uid": 100,
- "mode": 0o700
- }
- return files[path]["content"], ""
- elif args[0] == "tee":
- if args[1] == "-a":
- path = args[2]
- append = True
- else:
- path = args[1]
- append = False
- if path not in files:
- files[path] = {
- "content": "Hello World",
- "gid": 100,
- "uid": 100,
- "mode": 0o700,
- }
- if append:
- files[path]["content"] += kwargs["process_input"]
- else:
- files[path]["content"] = kwargs["process_input"]
-
-
-class VirtDiskVFSLocalFSTestPaths(test.NoDBTestCase):
- def setUp(self):
- super(VirtDiskVFSLocalFSTestPaths, self).setUp()
-
- real_execute = processutils.execute
-
- def nonroot_execute(*cmd_parts, **kwargs):
- kwargs.pop('run_as_root', None)
- return real_execute(*cmd_parts, **kwargs)
-
- self.stubs.Set(processutils, 'execute', nonroot_execute)
-
- def test_check_safe_path(self):
- if not tests_utils.coreutils_readlink_available():
- self.skipTest("coreutils readlink(1) unavailable")
- vfs = vfsimpl.VFSLocalFS("dummy.img")
- vfs.imgdir = "/foo"
- ret = vfs._canonical_path('etc/something.conf')
- self.assertEqual(ret, '/foo/etc/something.conf')
-
- def test_check_unsafe_path(self):
- if not tests_utils.coreutils_readlink_available():
- self.skipTest("coreutils readlink(1) unavailable")
- vfs = vfsimpl.VFSLocalFS("dummy.img")
- vfs.imgdir = "/foo"
- self.assertRaises(exception.Invalid,
- vfs._canonical_path,
- 'etc/../../../something.conf')
-
-
-class VirtDiskVFSLocalFSTest(test.NoDBTestCase):
- def test_makepath(self):
- global dirs, commands
- dirs = []
- commands = []
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.make_path("/some/dir")
- vfs.make_path("/other/dir")
-
- self.assertEqual(dirs,
- ["/scratch/dir/some/dir", "/scratch/dir/other/dir"]),
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/dir'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('mkdir', '-p',
- '/scratch/dir/some/dir'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/other/dir'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('mkdir', '-p',
- '/scratch/dir/other/dir'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}}])
-
- def test_append_file(self):
- global files, commands
- files = {}
- commands = []
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.append_file("/some/file", " Goodbye")
-
- self.assertIn("/scratch/dir/some/file", files)
- self.assertEqual(files["/scratch/dir/some/file"]["content"],
- "Hello World Goodbye")
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('tee', '-a',
- '/scratch/dir/some/file'),
- 'kwargs': {'process_input': ' Goodbye',
- 'run_as_root': True,
- 'root_helper': root_helper}}])
-
- def test_replace_file(self):
- global files, commands
- files = {}
- commands = []
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.replace_file("/some/file", "Goodbye")
-
- self.assertIn("/scratch/dir/some/file", files)
- self.assertEqual(files["/scratch/dir/some/file"]["content"],
- "Goodbye")
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('tee', '/scratch/dir/some/file'),
- 'kwargs': {'process_input': 'Goodbye',
- 'run_as_root': True,
- 'root_helper': root_helper}}])
-
- def test_read_file(self):
- global commands, files
- files = {}
- commands = []
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- self.assertEqual(vfs.read_file("/some/file"), "Hello World")
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('cat', '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}}])
-
- def test_has_file(self):
- global commands, files
- files = {}
- commands = []
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.read_file("/some/file")
-
- self.assertTrue(vfs.has_file("/some/file"))
- self.assertFalse(vfs.has_file("/other/file"))
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('cat', '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-e',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/other/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-e',
- '/scratch/dir/other/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- ])
-
- def test_set_permissions(self):
- global commands, files
- commands = []
- files = {}
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.read_file("/some/file")
-
- vfs.set_permissions("/some/file", 0o777)
- self.assertEqual(files["/scratch/dir/some/file"]["mode"], 0o777)
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('cat', '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('chmod', '777',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}}])
-
- def test_set_ownership(self):
- global commands, files
- commands = []
- files = {}
- self.stubs.Set(processutils, 'execute', fake_execute)
-
- vfs = vfsimpl.VFSLocalFS(imgfile="/dummy.qcow2", imgfmt="qcow2")
- vfs.imgdir = "/scratch/dir"
- vfs.read_file("/some/file")
-
- self.assertEqual(files["/scratch/dir/some/file"]["uid"], 100)
- self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
-
- vfs.set_ownership("/some/file", "fred", None)
- self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
- self.assertEqual(files["/scratch/dir/some/file"]["gid"], 100)
-
- vfs.set_ownership("/some/file", None, "users")
- self.assertEqual(files["/scratch/dir/some/file"]["uid"], 105)
- self.assertEqual(files["/scratch/dir/some/file"]["gid"], 500)
-
- vfs.set_ownership("/some/file", "joe", "admins")
- self.assertEqual(files["/scratch/dir/some/file"]["uid"], 110)
- self.assertEqual(files["/scratch/dir/some/file"]["gid"], 600)
-
- root_helper = nova.utils._get_root_helper()
- self.assertEqual(commands,
- [{'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('cat', '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('chown', 'fred',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('chgrp', 'users',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('readlink', '-nm',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}},
- {'args': ('chown', 'joe:admins',
- '/scratch/dir/some/file'),
- 'kwargs': {'run_as_root': True,
- 'root_helper': root_helper}}])
diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py
deleted file mode 100644
index 94d9f63918..0000000000
--- a/nova/tests/virt/hyperv/test_hypervapi.py
+++ /dev/null
@@ -1,1967 +0,0 @@
-# Copyright 2012 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for the Hyper-V driver and related APIs.
-"""
-
-import contextlib
-import datetime
-import io
-import os
-import platform
-import shutil
-import time
-import uuid
-
-import mock
-import mox
-from oslo.config import cfg
-from oslo.utils import units
-
-from nova.api.metadata import base as instance_metadata
-from nova.compute import power_state
-from nova.compute import task_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.i18n import _
-from nova.image import glance
-from nova.openstack.common import fileutils
-from nova import test
-from nova.tests import fake_network
-from nova.tests.image import fake as fake_image
-from nova.tests import matchers
-from nova.tests.virt.hyperv import db_fakes
-from nova.tests.virt.hyperv import fake
-from nova import utils
-from nova.virt import configdrive
-from nova.virt import driver
-from nova.virt.hyperv import basevolumeutils
-from nova.virt.hyperv import constants
-from nova.virt.hyperv import driver as driver_hyperv
-from nova.virt.hyperv import hostops
-from nova.virt.hyperv import hostutils
-from nova.virt.hyperv import ioutils
-from nova.virt.hyperv import livemigrationutils
-from nova.virt.hyperv import networkutils
-from nova.virt.hyperv import networkutilsv2
-from nova.virt.hyperv import pathutils
-from nova.virt.hyperv import rdpconsoleutils
-from nova.virt.hyperv import utilsfactory
-from nova.virt.hyperv import vhdutils
-from nova.virt.hyperv import vhdutilsv2
-from nova.virt.hyperv import vmutils
-from nova.virt.hyperv import vmutilsv2
-from nova.virt.hyperv import volumeops
-from nova.virt.hyperv import volumeutils
-from nova.virt.hyperv import volumeutilsv2
-from nova.virt import images
-
-CONF = cfg.CONF
-CONF.import_opt('vswitch_name', 'nova.virt.hyperv.vif', 'hyperv')
-
-
-class HyperVAPIBaseTestCase(test.NoDBTestCase):
- """Base unit tests class for Hyper-V driver calls."""
-
- def __init__(self, test_case_name):
- self._mox = mox.Mox()
- super(HyperVAPIBaseTestCase, self).__init__(test_case_name)
-
- def setUp(self):
- super(HyperVAPIBaseTestCase, self).setUp()
-
- self._user_id = 'fake'
- self._project_id = 'fake'
- self._instance_data = None
- self._image_metadata = None
- self._fetched_image = None
- self._update_image_raise_exception = False
- self._volume_target_portal = 'testtargetportal:3260'
- self._volume_id = '0ef5d708-45ab-4129-8c59-d774d2837eb7'
- self._context = context.RequestContext(self._user_id, self._project_id)
- self._instance_ide_disks = []
- self._instance_ide_dvds = []
- self._instance_volume_disks = []
- self._test_vm_name = None
- self._test_instance_dir = 'C:\\FakeInstancesPath\\instance-0000001'
- self._check_min_windows_version_satisfied = True
-
- self._setup_stubs()
-
- self.flags(instances_path=r'C:\Hyper-V\test\instances',
- network_api_class='nova.network.neutronv2.api.API')
- self.flags(force_volumeutils_v1=True, group='hyperv')
- self.flags(force_hyperv_utils_v1=True, group='hyperv')
-
- self._conn = driver_hyperv.HyperVDriver(None)
-
- def _setup_stubs(self):
- db_fakes.stub_out_db_instance_api(self.stubs)
- fake_image.stub_out_image_service(self.stubs)
- fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs)
-
- def fake_fetch(context, image_id, target, user, project):
- self._fetched_image = target
- self.stubs.Set(images, 'fetch', fake_fetch)
-
- def fake_get_remote_image_service(context, name):
- class FakeGlanceImageService(object):
- def update(self_fake, context, image_id, image_metadata, f):
- if self._update_image_raise_exception:
- raise vmutils.HyperVException(
- "Simulated update failure")
- self._image_metadata = image_metadata
- return (FakeGlanceImageService(), 1)
- self.stubs.Set(glance, 'get_remote_image_service',
- fake_get_remote_image_service)
-
- def fake_check_min_windows_version(fake_self, major, minor):
- if [major, minor] >= [6, 3]:
- return False
- return self._check_min_windows_version_satisfied
- self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
- fake_check_min_windows_version)
-
- def fake_sleep(ms):
- pass
- self.stubs.Set(time, 'sleep', fake_sleep)
-
- class FakeIOThread(object):
- def __init__(self, src, dest, max_bytes):
- pass
-
- def start(self):
- pass
-
- self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
- self.stubs.Set(ioutils, 'IOThread', FakeIOThread)
- self._mox.StubOutWithMock(fake.PathUtils, 'open')
- self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
- self._mox.StubOutWithMock(fake.PathUtils, 'rmtree')
- self._mox.StubOutWithMock(fake.PathUtils, 'copy')
- self._mox.StubOutWithMock(fake.PathUtils, 'remove')
- self._mox.StubOutWithMock(fake.PathUtils, 'rename')
- self._mox.StubOutWithMock(fake.PathUtils, 'makedirs')
- self._mox.StubOutWithMock(fake.PathUtils,
- 'get_instance_migr_revert_dir')
- self._mox.StubOutWithMock(fake.PathUtils, 'get_instance_dir')
- self._mox.StubOutWithMock(fake.PathUtils, 'get_vm_console_log_paths')
-
- self._mox.StubOutWithMock(vmutils.VMUtils, 'vm_exists')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'create_vm')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'destroy_vm')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'attach_ide_drive')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'create_scsi_controller')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'create_nic')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'set_vm_state')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'list_instances')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_summary_info')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'take_vm_snapshot')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'remove_vm_snapshot')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'attach_volume_to_controller')
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'get_mounted_disk_by_drive_number')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'detach_vm_disk')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_storage_paths')
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'get_controller_volume_paths')
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'enable_vm_metrics_collection')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_id')
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'get_vm_serial_port_connection')
-
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_differencing_vhd')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'reconnect_parent_vhd')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'merge_vhd')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_parent_path')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_info')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'resize_vhd')
- self._mox.StubOutWithMock(vhdutils.VHDUtils,
- 'get_internal_vhd_size_by_file_size')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'validate_vhd')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'get_vhd_format')
- self._mox.StubOutWithMock(vhdutils.VHDUtils, 'create_dynamic_vhd')
-
- self._mox.StubOutWithMock(hostutils.HostUtils, 'get_cpus_info')
- self._mox.StubOutWithMock(hostutils.HostUtils,
- 'is_cpu_feature_present')
- self._mox.StubOutWithMock(hostutils.HostUtils, 'get_memory_info')
- self._mox.StubOutWithMock(hostutils.HostUtils, 'get_volume_info')
- self._mox.StubOutWithMock(hostutils.HostUtils, 'get_windows_version')
- self._mox.StubOutWithMock(hostutils.HostUtils, 'get_local_ips')
-
- self._mox.StubOutWithMock(networkutils.NetworkUtils,
- 'get_external_vswitch')
- self._mox.StubOutWithMock(networkutils.NetworkUtils,
- 'create_vswitch_port')
- self._mox.StubOutWithMock(networkutils.NetworkUtils,
- 'vswitch_port_needed')
-
- self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
- 'live_migrate_vm')
- self._mox.StubOutWithMock(livemigrationutils.LiveMigrationUtils,
- 'check_live_migration_config')
-
- self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
- 'volume_in_mapping')
- self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
- 'get_session_id_from_mounted_disk')
- self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
- 'get_device_number_for_target')
- self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
- 'get_target_from_disk_path')
- self._mox.StubOutWithMock(basevolumeutils.BaseVolumeUtils,
- 'get_target_lun_count')
-
- self._mox.StubOutWithMock(volumeutils.VolumeUtils,
- 'login_storage_target')
- self._mox.StubOutWithMock(volumeutils.VolumeUtils,
- 'logout_storage_target')
- self._mox.StubOutWithMock(volumeutils.VolumeUtils,
- 'execute_log_out')
- self._mox.StubOutWithMock(volumeutils.VolumeUtils,
- 'get_iscsi_initiator')
-
- self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
- 'login_storage_target')
- self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
- 'logout_storage_target')
- self._mox.StubOutWithMock(volumeutilsv2.VolumeUtilsV2,
- 'execute_log_out')
-
- self._mox.StubOutWithMock(rdpconsoleutils.RDPConsoleUtils,
- 'get_rdp_console_port')
-
- self._mox.StubOutClassWithMocks(instance_metadata, 'InstanceMetadata')
- self._mox.StubOutWithMock(instance_metadata.InstanceMetadata,
- 'metadata_for_config_drive')
-
- # Can't use StubOutClassWithMocks due to __exit__ and __enter__
- self._mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
- self._mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
-
- self._mox.StubOutWithMock(fileutils, 'delete_if_exists')
- self._mox.StubOutWithMock(utils, 'execute')
-
- def tearDown(self):
- self._mox.UnsetStubs()
- super(HyperVAPIBaseTestCase, self).tearDown()
-
-
-class HyperVAPITestCase(HyperVAPIBaseTestCase):
- """Unit tests for Hyper-V driver calls."""
-
- def test_public_api_signatures(self):
- self.assertPublicAPISignatures(driver.ComputeDriver(None), self._conn)
-
- def test_get_available_resource(self):
- cpu_info = {'Architecture': 'fake',
- 'Name': 'fake',
- 'Manufacturer': 'ACME, Inc.',
- 'NumberOfCores': 2,
- 'NumberOfLogicalProcessors': 4}
-
- tot_mem_kb = 2000000L
- free_mem_kb = 1000000L
-
- tot_hdd_b = 4L * 1024 ** 3
- free_hdd_b = 3L * 1024 ** 3
-
- windows_version = '6.2.9200'
-
- hostutils.HostUtils.get_memory_info().AndReturn((tot_mem_kb,
- free_mem_kb))
-
- m = hostutils.HostUtils.get_volume_info(mox.IsA(str))
- m.AndReturn((tot_hdd_b, free_hdd_b))
-
- hostutils.HostUtils.get_cpus_info().AndReturn([cpu_info])
- m = hostutils.HostUtils.is_cpu_feature_present(mox.IsA(int))
- m.MultipleTimes()
-
- m = hostutils.HostUtils.get_windows_version()
- m.AndReturn(windows_version)
-
- self._mox.ReplayAll()
- dic = self._conn.get_available_resource(None)
- self._mox.VerifyAll()
-
- self.assertEqual(dic['vcpus'], cpu_info['NumberOfLogicalProcessors'])
- self.assertEqual(dic['hypervisor_hostname'], platform.node())
- self.assertEqual(dic['memory_mb'], tot_mem_kb / units.Ki)
- self.assertEqual(dic['memory_mb_used'],
- tot_mem_kb / units.Ki - free_mem_kb / units.Ki)
- self.assertEqual(dic['local_gb'], tot_hdd_b / units.Gi)
- self.assertEqual(dic['local_gb_used'],
- tot_hdd_b / units.Gi - free_hdd_b / units.Gi)
- self.assertEqual(dic['hypervisor_version'],
- windows_version.replace('.', ''))
- self.assertEqual(dic['supported_instances'],
- '[["i686", "hyperv", "hvm"], ["x86_64", "hyperv", "hvm"]]')
-
- def test_list_instances(self):
- fake_instances = ['fake1', 'fake2']
- vmutils.VMUtils.list_instances().AndReturn(fake_instances)
-
- self._mox.ReplayAll()
- instances = self._conn.list_instances()
- self._mox.VerifyAll()
-
- self.assertEqual(instances, fake_instances)
-
- def test_get_host_uptime(self):
- fake_host = "fake_host"
- with mock.patch.object(self._conn._hostops,
- "get_host_uptime") as mock_uptime:
- self._conn._hostops.get_host_uptime(fake_host)
- mock_uptime.assert_called_once_with(fake_host)
-
- def test_get_info(self):
- self._instance_data = self._get_instance_data()
-
- summary_info = {'NumberOfProcessors': 2,
- 'EnabledState': constants.HYPERV_VM_STATE_ENABLED,
- 'MemoryUsage': 1000,
- 'UpTime': 1}
-
- m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
- m.AndReturn(True)
-
- func = mox.Func(self._check_instance_name)
- m = vmutils.VMUtils.get_vm_summary_info(func)
- m.AndReturn(summary_info)
-
- self._mox.ReplayAll()
- info = self._conn.get_info(self._instance_data)
- self._mox.VerifyAll()
-
- self.assertEqual(info["state"], power_state.RUNNING)
-
- def test_get_info_instance_not_found(self):
- # Tests that InstanceNotFound is raised if the instance isn't found
- # from the vmutils.vm_exists method.
- self._instance_data = self._get_instance_data()
-
- m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
- m.AndReturn(False)
-
- self._mox.ReplayAll()
- self.assertRaises(exception.InstanceNotFound, self._conn.get_info,
- self._instance_data)
- self._mox.VerifyAll()
-
- def test_spawn_cow_image(self):
- self._test_spawn_instance(True)
-
- def test_spawn_cow_image_vhdx(self):
- self._test_spawn_instance(True, vhd_format=constants.DISK_FORMAT_VHDX)
-
- def test_spawn_no_cow_image(self):
- self._test_spawn_instance(False)
-
- def test_spawn_dynamic_memory(self):
- CONF.set_override('dynamic_memory_ratio', 2.0, 'hyperv')
- self._test_spawn_instance()
-
- def test_spawn_no_cow_image_vhdx(self):
- self._test_spawn_instance(False, vhd_format=constants.DISK_FORMAT_VHDX)
-
- def _setup_spawn_config_drive_mocks(self, use_cdrom):
- instance_metadata.InstanceMetadata(mox.IgnoreArg(),
- content=mox.IsA(list),
- extra_md=mox.IsA(dict))
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- m.AndReturn(self._test_instance_dir)
-
- cdb = self._mox.CreateMockAnything()
- m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
- m.AndReturn(cdb)
- # __enter__ and __exit__ are required by "with"
- cdb.__enter__().AndReturn(cdb)
- cdb.make_drive(mox.IsA(str))
- cdb.__exit__(None, None, None).AndReturn(None)
-
- if not use_cdrom:
- utils.execute(CONF.hyperv.qemu_img_cmd,
- 'convert',
- '-f',
- 'raw',
- '-O',
- 'vpc',
- mox.IsA(str),
- mox.IsA(str),
- attempts=1)
- fake.PathUtils.remove(mox.IsA(str))
-
- m = vmutils.VMUtils.attach_ide_drive(mox.IsA(str),
- mox.IsA(str),
- mox.IsA(int),
- mox.IsA(int),
- mox.IsA(str))
- m.WithSideEffects(self._add_ide_disk)
-
- def _test_spawn_config_drive(self, use_cdrom, format_error=False):
- self.flags(force_config_drive=True)
- self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
- self.flags(mkisofs_cmd='mkisofs.exe')
-
- if use_cdrom:
- expected_ide_disks = 1
- expected_ide_dvds = 1
- else:
- expected_ide_disks = 2
- expected_ide_dvds = 0
-
- if format_error:
- self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
- self._test_spawn_instance,
- with_exception=True,
- config_drive=True,
- use_cdrom=use_cdrom)
- else:
- self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
- expected_ide_dvds=expected_ide_dvds,
- config_drive=True,
- use_cdrom=use_cdrom)
-
- def test_spawn_config_drive(self):
- self._test_spawn_config_drive(False)
-
- def test_spawn_config_drive_format_error(self):
- CONF.set_override('config_drive_format', 'wrong_format')
- self._test_spawn_config_drive(True, True)
-
- def test_spawn_config_drive_cdrom(self):
- self._test_spawn_config_drive(True)
-
- def test_spawn_no_config_drive(self):
- self.flags(force_config_drive=False)
-
- expected_ide_disks = 1
- expected_ide_dvds = 0
-
- self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
- expected_ide_dvds=expected_ide_dvds)
-
- def _test_spawn_nova_net_vif(self, with_port):
- self.flags(network_api_class='nova.network.api.API')
- # Reinstantiate driver, as the VIF plugin is loaded during __init__
- self._conn = driver_hyperv.HyperVDriver(None)
-
- def setup_vif_mocks():
- fake_vswitch_path = 'fake vswitch path'
- fake_vswitch_port = 'fake port'
-
- m = networkutils.NetworkUtils.get_external_vswitch(
- CONF.hyperv.vswitch_name)
- m.AndReturn(fake_vswitch_path)
-
- m = networkutils.NetworkUtils.vswitch_port_needed()
- m.AndReturn(with_port)
-
- if with_port:
- m = networkutils.NetworkUtils.create_vswitch_port(
- fake_vswitch_path, mox.IsA(str))
- m.AndReturn(fake_vswitch_port)
- vswitch_conn_data = fake_vswitch_port
- else:
- vswitch_conn_data = fake_vswitch_path
-
- vmutils.VMUtils.set_nic_connection(mox.IsA(str),
- mox.IsA(str), vswitch_conn_data)
-
- self._test_spawn_instance(setup_vif_mocks_func=setup_vif_mocks)
-
- def test_spawn_nova_net_vif_with_port(self):
- self._test_spawn_nova_net_vif(True)
-
- def test_spawn_nova_net_vif_without_port(self):
- self._test_spawn_nova_net_vif(False)
-
- def test_spawn_nova_net_vif_no_vswitch_exception(self):
- self.flags(network_api_class='nova.network.api.API')
- # Reinstantiate driver, as the VIF plugin is loaded during __init__
- self._conn = driver_hyperv.HyperVDriver(None)
-
- def setup_vif_mocks():
- m = networkutils.NetworkUtils.get_external_vswitch(
- CONF.hyperv.vswitch_name)
- m.AndRaise(vmutils.HyperVException(_('fake vswitch not found')))
-
- self.assertRaises(vmutils.HyperVException, self._test_spawn_instance,
- setup_vif_mocks_func=setup_vif_mocks,
- with_exception=True)
-
- def test_spawn_with_metrics_collection(self):
- self.flags(enable_instance_metrics_collection=True, group='hyperv')
- self._test_spawn_instance(False)
-
- def test_spawn_with_ephemeral_storage(self):
- self._test_spawn_instance(True, expected_ide_disks=2,
- ephemeral_storage=True)
-
- def _check_instance_name(self, vm_name):
- return vm_name == self._instance_data['name']
-
- def _test_vm_state_change(self, action, from_state, to_state):
- self._instance_data = self._get_instance_data()
-
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- to_state)
-
- if to_state in (constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_REBOOT):
- self._setup_delete_vm_log_mocks()
- if to_state in (constants.HYPERV_VM_STATE_ENABLED,
- constants.HYPERV_VM_STATE_REBOOT):
- self._setup_log_vm_output_mocks()
-
- self._mox.ReplayAll()
- action(self._instance_data)
- self._mox.VerifyAll()
-
- def test_pause(self):
- self._test_vm_state_change(self._conn.pause, None,
- constants.HYPERV_VM_STATE_PAUSED)
-
- def test_pause_already_paused(self):
- self._test_vm_state_change(self._conn.pause,
- constants.HYPERV_VM_STATE_PAUSED,
- constants.HYPERV_VM_STATE_PAUSED)
-
- def test_unpause(self):
- self._test_vm_state_change(self._conn.unpause,
- constants.HYPERV_VM_STATE_PAUSED,
- constants.HYPERV_VM_STATE_ENABLED)
-
- def test_unpause_already_running(self):
- self._test_vm_state_change(self._conn.unpause, None,
- constants.HYPERV_VM_STATE_ENABLED)
-
- def test_suspend(self):
- self._test_vm_state_change(self._conn.suspend, None,
- constants.HYPERV_VM_STATE_SUSPENDED)
-
- def test_suspend_already_suspended(self):
- self._test_vm_state_change(self._conn.suspend,
- constants.HYPERV_VM_STATE_SUSPENDED,
- constants.HYPERV_VM_STATE_SUSPENDED)
-
- def test_resume(self):
- self._test_vm_state_change(lambda i: self._conn.resume(self._context,
- i, None),
- constants.HYPERV_VM_STATE_SUSPENDED,
- constants.HYPERV_VM_STATE_ENABLED)
-
- def test_resume_already_running(self):
- self._test_vm_state_change(lambda i: self._conn.resume(self._context,
- i, None), None,
- constants.HYPERV_VM_STATE_ENABLED)
-
- def test_power_off(self):
- self._test_vm_state_change(self._conn.power_off, None,
- constants.HYPERV_VM_STATE_DISABLED)
-
- def test_power_off_already_powered_off(self):
- self._test_vm_state_change(self._conn.power_off,
- constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_DISABLED)
-
- def _test_power_on(self, block_device_info):
- self._instance_data = self._get_instance_data()
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- constants.HYPERV_VM_STATE_ENABLED)
- if block_device_info:
- self._mox.StubOutWithMock(volumeops.VolumeOps,
- 'fix_instance_volume_disk_paths')
- volumeops.VolumeOps.fix_instance_volume_disk_paths(
- mox.Func(self._check_instance_name), block_device_info)
-
- self._setup_log_vm_output_mocks()
-
- self._mox.ReplayAll()
- self._conn.power_on(self._context, self._instance_data, network_info,
- block_device_info=block_device_info)
- self._mox.VerifyAll()
-
- def test_power_on_having_block_devices(self):
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
- self._test_power_on(block_device_info=block_device_info)
-
- def test_power_on_without_block_devices(self):
- self._test_power_on(block_device_info=None)
-
- def test_power_on_already_running(self):
- self._instance_data = self._get_instance_data()
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- constants.HYPERV_VM_STATE_ENABLED)
- self._setup_log_vm_output_mocks()
- self._mox.ReplayAll()
- self._conn.power_on(self._context, self._instance_data, network_info)
- self._mox.VerifyAll()
-
- def test_reboot(self):
-
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- self._instance_data = self._get_instance_data()
-
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- constants.HYPERV_VM_STATE_REBOOT)
-
- self._setup_delete_vm_log_mocks()
- self._setup_log_vm_output_mocks()
-
- self._mox.ReplayAll()
- self._conn.reboot(self._context, self._instance_data, network_info,
- None)
- self._mox.VerifyAll()
-
- def _setup_destroy_mocks(self, destroy_disks=True):
- fake_volume_drives = ['fake_volume_drive']
- fake_target_iqn = 'fake_target_iqn'
- fake_target_lun = 'fake_target_lun'
-
- m = vmutils.VMUtils.vm_exists(mox.Func(self._check_instance_name))
- m.AndReturn(True)
-
- func = mox.Func(self._check_instance_name)
- vmutils.VMUtils.set_vm_state(func, constants.HYPERV_VM_STATE_DISABLED)
-
- self._setup_delete_vm_log_mocks()
-
- m = vmutils.VMUtils.get_vm_storage_paths(func)
- m.AndReturn(([], fake_volume_drives))
-
- vmutils.VMUtils.destroy_vm(func)
-
- m = self._conn._volumeops.get_target_from_disk_path(
- fake_volume_drives[0])
- m.AndReturn((fake_target_iqn, fake_target_lun))
-
- self._mock_logout_storage_target(fake_target_iqn)
-
- if destroy_disks:
- m = fake.PathUtils.get_instance_dir(mox.IsA(str),
- create_dir=False,
- remove_dir=True)
- m.AndReturn(self._test_instance_dir)
-
- def test_destroy(self):
- self._instance_data = self._get_instance_data()
-
- self._setup_destroy_mocks()
-
- self._mox.ReplayAll()
- self._conn.destroy(self._context, self._instance_data, None)
- self._mox.VerifyAll()
-
- def test_live_migration_unsupported_os(self):
- self._check_min_windows_version_satisfied = False
- self._conn = driver_hyperv.HyperVDriver(None)
- self._test_live_migration(unsupported_os=True)
-
- def test_live_migration_without_volumes(self):
- self._test_live_migration()
-
- def test_live_migration_with_volumes(self):
- self._test_live_migration(with_volumes=True)
-
- def test_live_migration_with_multiple_luns_per_target(self):
- self._test_live_migration(with_volumes=True,
- other_luns_available=True)
-
- def test_live_migration_with_target_failure(self):
- self._test_live_migration(test_failure=True)
-
- def _test_live_migration(self, test_failure=False,
- with_volumes=False,
- other_luns_available=False,
- unsupported_os=False):
- dest_server = 'fake_server'
-
- instance_data = self._get_instance_data()
-
- fake_post_method = self._mox.CreateMockAnything()
- if not test_failure and not unsupported_os:
- fake_post_method(self._context, instance_data, dest_server,
- False)
-
- fake_recover_method = self._mox.CreateMockAnything()
- if test_failure:
- fake_recover_method(self._context, instance_data, dest_server,
- False)
-
- if with_volumes:
- fake_target_iqn = 'fake_target_iqn'
- fake_target_lun_count = 1
-
- if not unsupported_os:
- m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
- m.AndReturn(('fake_local_vm_log_path', 'fake_vm_log_path.1'))
-
- m = fake.PathUtils.get_vm_console_log_paths(
- mox.IsA(str), remote_server=mox.IsA(str))
- m.AndReturn(('fake_remote_vm_log_path',
- 'fake_remote_vm_log_path.1'))
-
- self._mox.StubOutWithMock(fake.PathUtils, 'exists')
- m = fake.PathUtils.exists(mox.IsA(str))
- m.AndReturn(True)
- m = fake.PathUtils.exists(mox.IsA(str))
- m.AndReturn(False)
-
- fake.PathUtils.copy(mox.IsA(str), mox.IsA(str))
-
- m = livemigrationutils.LiveMigrationUtils.live_migrate_vm(
- instance_data['name'], dest_server)
- if test_failure:
- m.AndRaise(vmutils.HyperVException('Simulated failure'))
-
- if with_volumes:
- m.AndReturn({fake_target_iqn: fake_target_lun_count})
-
- self._mock_logout_storage_target(fake_target_iqn,
- other_luns_available)
- else:
- m.AndReturn({})
-
- self._mox.ReplayAll()
- try:
- hyperv_exception_raised = False
- unsupported_os_exception_raised = False
- self._conn.live_migration(self._context, instance_data,
- dest_server, fake_post_method,
- fake_recover_method)
- except vmutils.HyperVException:
- hyperv_exception_raised = True
- except NotImplementedError:
- unsupported_os_exception_raised = True
-
- self.assertTrue(not test_failure ^ hyperv_exception_raised)
- self.assertTrue(not unsupported_os ^ unsupported_os_exception_raised)
- self._mox.VerifyAll()
-
- def test_pre_live_migration_cow_image(self):
- self._test_pre_live_migration(True, False)
-
- def test_pre_live_migration_no_cow_image(self):
- self._test_pre_live_migration(False, False)
-
- def test_pre_live_migration_with_volumes(self):
- self._test_pre_live_migration(False, True)
-
- def _test_pre_live_migration(self, cow, with_volumes):
- self.flags(use_cow_images=cow)
-
- instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, instance_data)
- instance['system_metadata'] = {}
-
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- m = livemigrationutils.LiveMigrationUtils.check_live_migration_config()
- m.AndReturn(True)
-
- if cow:
- self._setup_get_cached_image_mocks(cow)
-
- if with_volumes:
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- mapping = driver.block_device_info_get_mapping(block_device_info)
- data = mapping[0]['connection_info']['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
-
- fake_mounted_disk = "fake_mounted_disk"
- fake_device_number = 0
-
- self._mock_login_storage_target(target_iqn, target_lun,
- target_portal,
- fake_mounted_disk,
- fake_device_number)
- else:
- block_device_info = None
-
- self._mox.ReplayAll()
- self._conn.pre_live_migration(self._context, instance,
- block_device_info, None, network_info)
- self._mox.VerifyAll()
-
- if cow:
- self.assertIsNotNone(self._fetched_image)
- else:
- self.assertIsNone(self._fetched_image)
-
- def test_get_instance_disk_info_is_implemented(self):
- # Ensure that the method has been implemented in the driver
- try:
- disk_info = self._conn.get_instance_disk_info('fake_instance_name')
- self.assertIsNone(disk_info)
- except NotImplementedError:
- self.fail("test_get_instance_disk_info() should not raise "
- "NotImplementedError")
-
- def test_snapshot_with_update_failure(self):
- (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
-
- self._update_image_raise_exception = True
-
- self._mox.ReplayAll()
- self.assertRaises(vmutils.HyperVException, self._conn.snapshot,
- self._context, self._instance_data, snapshot_name,
- func_call_matcher.call)
- self._mox.VerifyAll()
-
- # Assert states changed in correct order
- self.assertIsNone(func_call_matcher.match())
-
- def _setup_snapshot_mocks(self):
- expected_calls = [
- {'args': (),
- 'kwargs': {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs': {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}
- ]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- snapshot_name = 'test_snapshot_' + str(uuid.uuid4())
-
- fake_hv_snapshot_path = 'fake_snapshot_path'
- fake_parent_vhd_path = 'C:\\fake_vhd_path\\parent.vhd'
-
- self._instance_data = self._get_instance_data()
-
- func = mox.Func(self._check_instance_name)
- m = vmutils.VMUtils.take_vm_snapshot(func)
- m.AndReturn(fake_hv_snapshot_path)
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- m.AndReturn(self._test_instance_dir)
-
- m = vhdutils.VHDUtils.get_vhd_parent_path(mox.IsA(str))
- m.AndReturn(fake_parent_vhd_path)
-
- self._fake_dest_disk_path = None
-
- def copy_dest_disk_path(src, dest):
- self._fake_dest_disk_path = dest
-
- m = fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
- m.WithSideEffects(copy_dest_disk_path)
-
- self._fake_dest_base_disk_path = None
-
- def copy_dest_base_disk_path(src, dest):
- self._fake_dest_base_disk_path = dest
-
- m = fake.PathUtils.copyfile(fake_parent_vhd_path, mox.IsA(str))
- m.WithSideEffects(copy_dest_base_disk_path)
-
- def check_dest_disk_path(path):
- return path == self._fake_dest_disk_path
-
- def check_dest_base_disk_path(path):
- return path == self._fake_dest_base_disk_path
-
- func1 = mox.Func(check_dest_disk_path)
- func2 = mox.Func(check_dest_base_disk_path)
- # Make sure that the hyper-v base and differential VHDs are merged
- vhdutils.VHDUtils.reconnect_parent_vhd(func1, func2)
- vhdutils.VHDUtils.merge_vhd(func1, func2)
-
- def check_snapshot_path(snapshot_path):
- return snapshot_path == fake_hv_snapshot_path
-
- # Make sure that the Hyper-V snapshot is removed
- func = mox.Func(check_snapshot_path)
- vmutils.VMUtils.remove_vm_snapshot(func)
-
- fake.PathUtils.rmtree(mox.IsA(str))
-
- m = fake.PathUtils.open(func2, 'rb')
- m.AndReturn(io.BytesIO(b'fake content'))
-
- return (snapshot_name, func_call_matcher)
-
- def test_snapshot(self):
- (snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
-
- self._mox.ReplayAll()
- self._conn.snapshot(self._context, self._instance_data, snapshot_name,
- func_call_matcher.call)
- self._mox.VerifyAll()
-
- self.assertTrue(self._image_metadata)
- self.assertIn("disk_format", self._image_metadata)
- self.assertEqual("vhd", self._image_metadata["disk_format"])
-
- # Assert states changed in correct order
- self.assertIsNone(func_call_matcher.match())
-
- def _get_instance_data(self):
- instance_name = 'openstack_unit_test_vm_' + str(uuid.uuid4())
- return db_fakes.get_fake_instance_data(instance_name,
- self._project_id,
- self._user_id)
-
- def _spawn_instance(self, cow, block_device_info=None,
- ephemeral_storage=False):
- self.flags(use_cow_images=cow)
-
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
- instance['system_metadata'] = {}
-
- if ephemeral_storage:
- instance['ephemeral_gb'] = 1
-
- image = db_fakes.get_fake_image_data(self._project_id, self._user_id)
-
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- self._conn.spawn(self._context, instance, image,
- injected_files=[], admin_password=None,
- network_info=network_info,
- block_device_info=block_device_info)
-
- def _add_ide_disk(self, vm_name, path, ctrller_addr,
- drive_addr, drive_type):
- if drive_type == constants.IDE_DISK:
- self._instance_ide_disks.append(path)
- elif drive_type == constants.IDE_DVD:
- self._instance_ide_dvds.append(path)
-
- def _add_volume_disk(self, vm_name, controller_path, address,
- mounted_disk_path):
- self._instance_volume_disks.append(mounted_disk_path)
-
- def _check_img_path(self, image_path):
- return image_path == self._fetched_image
-
- def _setup_create_instance_mocks(self, setup_vif_mocks_func=None,
- boot_from_volume=False,
- block_device_info=None,
- admin_permissions=True,
- ephemeral_storage=False):
- vmutils.VMUtils.create_vm(mox.Func(self._check_vm_name), mox.IsA(int),
- mox.IsA(int), mox.IsA(bool),
- CONF.hyperv.dynamic_memory_ratio,
- mox.IsA(list))
-
- if not boot_from_volume:
- m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
- mox.IsA(str),
- mox.IsA(int),
- mox.IsA(int),
- mox.IsA(str))
- m.WithSideEffects(self._add_ide_disk).InAnyOrder()
-
- if ephemeral_storage:
- m = vmutils.VMUtils.attach_ide_drive(mox.Func(self._check_vm_name),
- mox.IsA(str),
- mox.IsA(int),
- mox.IsA(int),
- mox.IsA(str))
- m.WithSideEffects(self._add_ide_disk).InAnyOrder()
-
- func = mox.Func(self._check_vm_name)
- m = vmutils.VMUtils.create_scsi_controller(func)
- m.InAnyOrder()
-
- if boot_from_volume:
- mapping = driver.block_device_info_get_mapping(block_device_info)
- data = mapping[0]['connection_info']['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
-
- self._mock_attach_volume(mox.Func(self._check_vm_name), target_iqn,
- target_lun, target_portal, True)
-
- vmutils.VMUtils.create_nic(mox.Func(self._check_vm_name),
- mox.IsA(str), mox.IsA(unicode)).InAnyOrder()
-
- if setup_vif_mocks_func:
- setup_vif_mocks_func()
-
- if CONF.hyperv.enable_instance_metrics_collection:
- vmutils.VMUtils.enable_vm_metrics_collection(
- mox.Func(self._check_vm_name))
-
- vmutils.VMUtils.get_vm_serial_port_connection(
- mox.IsA(str), update_connection=mox.IsA(str))
-
- def _set_vm_name(self, vm_name):
- self._test_vm_name = vm_name
-
- def _check_vm_name(self, vm_name):
- return vm_name == self._test_vm_name
-
- def _setup_check_admin_permissions_mocks(self, admin_permissions=True):
- self._mox.StubOutWithMock(vmutils.VMUtils,
- 'check_admin_permissions')
- m = vmutils.VMUtils.check_admin_permissions()
- if admin_permissions:
- m.AndReturn(None)
- else:
- m.AndRaise(vmutils.HyperVAuthorizationException(_(
- 'Simulated failure')))
-
- def _setup_log_vm_output_mocks(self):
- m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
- m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
- ioutils.IOThread('fake_pipe', 'fake_vm_log_path',
- units.Mi).start()
-
- def _setup_delete_vm_log_mocks(self):
- m = fake.PathUtils.get_vm_console_log_paths(mox.IsA(str))
- m.AndReturn(('fake_vm_log_path', 'fake_vm_log_path.1'))
- fileutils.delete_if_exists(mox.IsA(str))
- fileutils.delete_if_exists(mox.IsA(str))
-
- def _setup_get_cached_image_mocks(self, cow=True,
- vhd_format=constants.DISK_FORMAT_VHD):
- m = vhdutils.VHDUtils.get_vhd_format(
- mox.Func(self._check_img_path))
- m.AndReturn(vhd_format)
-
- def check_img_path_with_ext(image_path):
- return image_path == self._fetched_image + '.' + vhd_format.lower()
-
- fake.PathUtils.rename(mox.Func(self._check_img_path),
- mox.Func(check_img_path_with_ext))
-
- if cow and vhd_format == constants.DISK_FORMAT_VHD:
- m = vhdutils.VHDUtils.get_vhd_info(
- mox.Func(check_img_path_with_ext))
- m.AndReturn({'MaxInternalSize': 1024})
-
- fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
-
- m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
- mox.IsA(str), mox.IsA(object))
- m.AndReturn(1025)
-
- vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
- is_file_max_size=False)
-
- def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
- with_exception=False,
- block_device_info=None,
- boot_from_volume=False,
- config_drive=False,
- use_cdrom=False,
- admin_permissions=True,
- vhd_format=constants.DISK_FORMAT_VHD,
- ephemeral_storage=False):
- m = vmutils.VMUtils.vm_exists(mox.IsA(str))
- m.WithSideEffects(self._set_vm_name).AndReturn(False)
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str),
- create_dir=False,
- remove_dir=True)
- m.AndReturn(self._test_instance_dir)
-
- if block_device_info:
- m = basevolumeutils.BaseVolumeUtils.volume_in_mapping(
- 'fake_root_device_name', block_device_info)
- m.AndReturn(boot_from_volume)
-
- if not boot_from_volume:
- m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
- m.AndReturn(self._test_instance_dir)
-
- self._setup_get_cached_image_mocks(cow, vhd_format)
- m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
- m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
- 'Type': 2})
-
- if cow:
- m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
- m.AndReturn(vhd_format)
- if vhd_format == constants.DISK_FORMAT_VHD:
- vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
- mox.IsA(str))
- else:
- m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
- mox.IsA(str), mox.IsA(object))
- m.AndReturn(1025)
- vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
- mox.IsA(str),
- mox.IsA(int))
- else:
- fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
- m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
- mox.IsA(str), mox.IsA(object))
- m.AndReturn(1025)
- vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
- is_file_max_size=False)
-
- self._setup_check_admin_permissions_mocks(
- admin_permissions=admin_permissions)
- if ephemeral_storage:
- m = fake.PathUtils.get_instance_dir(mox.Func(self._check_vm_name))
- m.AndReturn(self._test_instance_dir)
- vhdutils.VHDUtils.create_dynamic_vhd(mox.IsA(str), mox.IsA(int),
- mox.IsA(str))
-
- self._setup_create_instance_mocks(setup_vif_mocks_func,
- boot_from_volume,
- block_device_info,
- ephemeral_storage=ephemeral_storage)
-
- if config_drive and not with_exception:
- self._setup_spawn_config_drive_mocks(use_cdrom)
-
- # TODO(alexpilotti) Based on where the exception is thrown
- # some of the above mock calls need to be skipped
- if with_exception:
- self._setup_destroy_mocks()
- else:
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_vm_name),
- constants.HYPERV_VM_STATE_ENABLED)
- self._setup_log_vm_output_mocks()
-
- def _test_spawn_instance(self, cow=True,
- expected_ide_disks=1,
- expected_ide_dvds=0,
- setup_vif_mocks_func=None,
- with_exception=False,
- config_drive=False,
- use_cdrom=False,
- admin_permissions=True,
- vhd_format=constants.DISK_FORMAT_VHD,
- ephemeral_storage=False):
- self._setup_spawn_instance_mocks(cow,
- setup_vif_mocks_func,
- with_exception,
- config_drive=config_drive,
- use_cdrom=use_cdrom,
- admin_permissions=admin_permissions,
- vhd_format=vhd_format,
- ephemeral_storage=ephemeral_storage)
-
- self._mox.ReplayAll()
- self._spawn_instance(cow, ephemeral_storage=ephemeral_storage)
- self._mox.VerifyAll()
-
- self.assertEqual(len(self._instance_ide_disks), expected_ide_disks)
- self.assertEqual(len(self._instance_ide_dvds), expected_ide_dvds)
-
- vhd_path = os.path.join(self._test_instance_dir, 'root.' +
- vhd_format.lower())
- self.assertEqual(vhd_path, self._instance_ide_disks[0])
-
- def _mock_get_mounted_disk_from_lun(self, target_iqn, target_lun,
- fake_mounted_disk,
- fake_device_number):
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
-
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
-
- def _mock_login_storage_target(self, target_iqn, target_lun, target_portal,
- fake_mounted_disk, fake_device_number):
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
-
- volumeutils.VolumeUtils.login_storage_target(target_lun,
- target_iqn,
- target_portal)
-
- self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
- fake_mounted_disk,
- fake_device_number)
-
- def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
- target_portal=None, boot_from_volume=False):
- fake_mounted_disk = "fake_mounted_disk"
- fake_device_number = 0
- fake_controller_path = 'fake_scsi_controller_path'
- self._mox.StubOutWithMock(self._conn._volumeops,
- '_get_free_controller_slot')
-
- self._mock_login_storage_target(target_iqn, target_lun,
- target_portal,
- fake_mounted_disk,
- fake_device_number)
-
- self._mock_get_mounted_disk_from_lun(target_iqn, target_lun,
- fake_mounted_disk,
- fake_device_number)
-
- if boot_from_volume:
- m = vmutils.VMUtils.get_vm_ide_controller(instance_name, 0)
- m.AndReturn(fake_controller_path)
- fake_free_slot = 0
- else:
- m = vmutils.VMUtils.get_vm_scsi_controller(instance_name)
- m.AndReturn(fake_controller_path)
-
- fake_free_slot = 1
- m = self._conn._volumeops._get_free_controller_slot(
- fake_controller_path)
- m.AndReturn(fake_free_slot)
-
- m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
- fake_controller_path,
- fake_free_slot,
- fake_mounted_disk)
- m.WithSideEffects(self._add_volume_disk)
-
- def _test_util_class_version(self, v1_class, v2_class,
- get_instance_action, is_hyperv_2012,
- force_v1_flag, force_utils_v1):
- self._check_min_windows_version_satisfied = is_hyperv_2012
- CONF.set_override(force_v1_flag, force_v1_flag, 'hyperv')
- self._conn = driver_hyperv.HyperVDriver(None)
-
- instance = get_instance_action()
- is_v1 = isinstance(instance, v1_class)
- # v2_class can inherit from v1_class
- is_v2 = isinstance(instance, v2_class)
-
- self.assertTrue((is_hyperv_2012 and not force_v1_flag) ^
- (is_v1 and not is_v2))
-
- def test_volumeutils_version_hyperv_2012(self):
- self._test_util_class_version(volumeutils.VolumeUtils,
- volumeutilsv2.VolumeUtilsV2,
- lambda: utilsfactory.get_volumeutils(),
- True, 'force_volumeutils_v1', False)
-
- def test_volumeutils_version_hyperv_2012_force_v1(self):
- self._test_util_class_version(volumeutils.VolumeUtils,
- volumeutilsv2.VolumeUtilsV2,
- lambda: utilsfactory.get_volumeutils(),
- True, 'force_volumeutils_v1', True)
-
- def test_volumeutils_version_hyperv_2008R2(self):
- self._test_util_class_version(volumeutils.VolumeUtils,
- volumeutilsv2.VolumeUtilsV2,
- lambda: utilsfactory.get_volumeutils(),
- False, 'force_volumeutils_v1', False)
-
- def test_vmutils_version_hyperv_2012(self):
- self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
- lambda: utilsfactory.get_vmutils(),
- True, 'force_hyperv_utils_v1', False)
-
- def test_vmutils_version_hyperv_2012_force_v1(self):
- self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
- lambda: utilsfactory.get_vmutils(),
- True, 'force_hyperv_utils_v1', True)
-
- def test_vmutils_version_hyperv_2008R2(self):
- self._test_util_class_version(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
- lambda: utilsfactory.get_vmutils(),
- False, 'force_hyperv_utils_v1', False)
-
- def test_vhdutils_version_hyperv_2012(self):
- self._test_util_class_version(vhdutils.VHDUtils,
- vhdutilsv2.VHDUtilsV2,
- lambda: utilsfactory.get_vhdutils(),
- True, 'force_hyperv_utils_v1', False)
-
- def test_vhdutils_version_hyperv_2012_force_v1(self):
- self._test_util_class_version(vhdutils.VHDUtils,
- vhdutilsv2.VHDUtilsV2,
- lambda: utilsfactory.get_vhdutils(),
- True, 'force_hyperv_utils_v1', True)
-
- def test_vhdutils_version_hyperv_2008R2(self):
- self._test_util_class_version(vhdutils.VHDUtils,
- vhdutilsv2.VHDUtilsV2,
- lambda: utilsfactory.get_vhdutils(),
- False, 'force_hyperv_utils_v1', False)
-
- def test_networkutils_version_hyperv_2012(self):
- self._test_util_class_version(networkutils.NetworkUtils,
- networkutilsv2.NetworkUtilsV2,
- lambda: utilsfactory.get_networkutils(),
- True, 'force_hyperv_utils_v1', False)
-
- def test_networkutils_version_hyperv_2012_force_v1(self):
- self._test_util_class_version(networkutils.NetworkUtils,
- networkutilsv2.NetworkUtilsV2,
- lambda: utilsfactory.get_networkutils(),
- True, 'force_hyperv_utils_v1', True)
-
- def test_networkutils_version_hyperv_2008R2(self):
- self._test_util_class_version(networkutils.NetworkUtils,
- networkutilsv2.NetworkUtilsV2,
- lambda: utilsfactory.get_networkutils(),
- False, 'force_hyperv_utils_v1', False)
-
- def test_attach_volume(self):
- instance_data = self._get_instance_data()
-
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
- mount_point = '/dev/sdc'
-
- self._mock_attach_volume(instance_data['name'], target_iqn, target_lun,
- target_portal)
-
- self._mox.ReplayAll()
- self._conn.attach_volume(None, connection_info, instance_data,
- mount_point)
- self._mox.VerifyAll()
-
- self.assertEqual(len(self._instance_volume_disks), 1)
-
- def _mock_get_mounted_disk_from_lun_error(self, target_iqn, target_lun,
- fake_mounted_disk,
- fake_device_number):
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndRaise(vmutils.HyperVException('Simulated failure'))
-
- def _mock_attach_volume_target_logout(self, instance_name, target_iqn,
- target_lun, target_portal=None,
- boot_from_volume=False):
- fake_mounted_disk = "fake_mounted disk"
- fake_device_number = 0
-
- self._mock_login_storage_target(target_iqn, target_lun,
- target_portal,
- fake_mounted_disk,
- fake_device_number)
-
- self._mock_get_mounted_disk_from_lun_error(target_iqn, target_lun,
- fake_mounted_disk,
- fake_device_number)
-
- self._mock_logout_storage_target(target_iqn)
-
- def test_attach_volume_logout(self):
- instance_data = self._get_instance_data()
-
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- target_portal = data['target_portal']
- mount_point = '/dev/sdc'
-
- self._mock_attach_volume_target_logout(instance_data['name'],
- target_iqn, target_lun,
- target_portal)
-
- self._mox.ReplayAll()
- self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
- None, connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
-
- def test_attach_volume_connection_error(self):
- instance_data = self._get_instance_data()
-
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- mount_point = '/dev/sdc'
-
- def fake_login_storage_target(connection_info):
- raise vmutils.HyperVException('Fake connection exception')
-
- self.stubs.Set(self._conn._volumeops, '_login_storage_target',
- fake_login_storage_target)
- self.assertRaises(vmutils.HyperVException, self._conn.attach_volume,
- None, connection_info, instance_data, mount_point)
-
- def _mock_detach_volume(self, target_iqn, target_lun,
- other_luns_available=False):
- fake_mounted_disk = "fake_mounted_disk"
- fake_device_number = 0
- m = volumeutils.VolumeUtils.get_device_number_for_target(target_iqn,
- target_lun)
- m.AndReturn(fake_device_number)
-
- m = vmutils.VMUtils.get_mounted_disk_by_drive_number(
- fake_device_number)
- m.AndReturn(fake_mounted_disk)
-
- vmutils.VMUtils.detach_vm_disk(mox.IsA(str), fake_mounted_disk)
-
- self._mock_logout_storage_target(target_iqn, other_luns_available)
-
- def _mock_logout_storage_target(self, target_iqn,
- other_luns_available=False):
-
- m = volumeutils.VolumeUtils.get_target_lun_count(target_iqn)
- m.AndReturn(1 + int(other_luns_available))
-
- if not other_luns_available:
- volumeutils.VolumeUtils.logout_storage_target(target_iqn)
-
- def _test_detach_volume(self, other_luns_available=False):
- instance_data = self._get_instance_data()
- self.assertIn('name', instance_data)
-
- connection_info = db_fakes.get_fake_volume_info_data(
- self._volume_target_portal, self._volume_id)
- data = connection_info['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
- self.assertIn('target_portal', data)
-
- mount_point = '/dev/sdc'
-
- self._mock_detach_volume(target_iqn, target_lun, other_luns_available)
- self._mox.ReplayAll()
- self._conn.detach_volume(connection_info, instance_data, mount_point)
- self._mox.VerifyAll()
-
- def test_detach_volume(self):
- self._test_detach_volume()
-
- def test_detach_volume_multiple_luns_per_target(self):
- # The iSCSI target should not be disconnected in this case.
- self._test_detach_volume(other_luns_available=True)
-
- def test_boot_from_volume(self):
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- self._setup_spawn_instance_mocks(cow=False,
- block_device_info=block_device_info,
- boot_from_volume=True)
-
- self._mox.ReplayAll()
- self._spawn_instance(False, block_device_info)
- self._mox.VerifyAll()
-
- self.assertEqual(len(self._instance_volume_disks), 1)
-
- def test_get_volume_connector(self):
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
-
- fake_my_ip = "fake_ip"
- fake_host = "fake_host"
- fake_initiator = "fake_initiator"
-
- self.flags(my_ip=fake_my_ip)
- self.flags(host=fake_host)
-
- m = volumeutils.VolumeUtils.get_iscsi_initiator()
- m.AndReturn(fake_initiator)
-
- self._mox.ReplayAll()
- data = self._conn.get_volume_connector(instance)
- self._mox.VerifyAll()
-
- self.assertEqual(fake_my_ip, data.get('ip'))
- self.assertEqual(fake_host, data.get('host'))
- self.assertEqual(fake_initiator, data.get('initiator'))
-
- def _setup_test_migrate_disk_and_power_off_mocks(self, same_host=False,
- copy_exception=False,
- size_exception=False):
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- instance['root_gb'] = 10
-
- fake_local_ip = '10.0.0.1'
- if same_host:
- fake_dest_ip = fake_local_ip
- else:
- fake_dest_ip = '10.0.0.2'
-
- if size_exception:
- flavor = 'm1.tiny'
- else:
- flavor = 'm1.small'
-
- flavor = db.flavor_get_by_name(self._context, flavor)
-
- if not size_exception:
- fake_root_vhd_path = 'C:\\FakePath\\root.vhd'
- fake_revert_path = os.path.join(self._test_instance_dir, '_revert')
-
- func = mox.Func(self._check_instance_name)
- vmutils.VMUtils.set_vm_state(func,
- constants.HYPERV_VM_STATE_DISABLED)
-
- self._setup_delete_vm_log_mocks()
-
- m = vmutils.VMUtils.get_vm_storage_paths(func)
- m.AndReturn(([fake_root_vhd_path], []))
-
- m = hostutils.HostUtils.get_local_ips()
- m.AndReturn([fake_local_ip])
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- m.AndReturn(self._test_instance_dir)
-
- m = pathutils.PathUtils.get_instance_migr_revert_dir(
- instance['name'], remove_dir=True)
- m.AndReturn(fake_revert_path)
-
- if same_host:
- fake.PathUtils.makedirs(mox.IsA(str))
-
- m = fake.PathUtils.copy(fake_root_vhd_path, mox.IsA(str))
- if copy_exception:
- m.AndRaise(shutil.Error('Simulated copy error'))
- m = fake.PathUtils.get_instance_dir(mox.IsA(str),
- mox.IsA(str),
- remove_dir=True)
- m.AndReturn(self._test_instance_dir)
- else:
- fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
- destroy_disks = True
- if same_host:
- fake.PathUtils.rename(mox.IsA(str), mox.IsA(str))
- destroy_disks = False
-
- self._setup_destroy_mocks(False)
-
- if destroy_disks:
- m = fake.PathUtils.get_instance_dir(mox.IsA(str),
- mox.IsA(str),
- remove_dir=True)
- m.AndReturn(self._test_instance_dir)
-
- return (instance, fake_dest_ip, network_info, flavor)
-
- def test_migrate_disk_and_power_off(self):
- (instance,
- fake_dest_ip,
- network_info,
- flavor) = self._setup_test_migrate_disk_and_power_off_mocks()
-
- self._mox.ReplayAll()
- self._conn.migrate_disk_and_power_off(self._context, instance,
- fake_dest_ip, flavor,
- network_info)
- self._mox.VerifyAll()
-
- def test_migrate_disk_and_power_off_same_host(self):
- args = self._setup_test_migrate_disk_and_power_off_mocks(
- same_host=True)
- (instance, fake_dest_ip, network_info, flavor) = args
-
- self._mox.ReplayAll()
- self._conn.migrate_disk_and_power_off(self._context, instance,
- fake_dest_ip, flavor,
- network_info)
- self._mox.VerifyAll()
-
- def test_migrate_disk_and_power_off_copy_exception(self):
- args = self._setup_test_migrate_disk_and_power_off_mocks(
- copy_exception=True)
- (instance, fake_dest_ip, network_info, flavor) = args
-
- self._mox.ReplayAll()
- self.assertRaises(shutil.Error, self._conn.migrate_disk_and_power_off,
- self._context, instance, fake_dest_ip,
- flavor, network_info)
- self._mox.VerifyAll()
-
- def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
- args = self._setup_test_migrate_disk_and_power_off_mocks(
- size_exception=True)
- (instance, fake_dest_ip, network_info, flavor) = args
-
- self._mox.ReplayAll()
- self.assertRaises(exception.InstanceFaultRollback,
- self._conn.migrate_disk_and_power_off,
- self._context, instance, fake_dest_ip,
- flavor, network_info)
- self._mox.VerifyAll()
-
- def _mock_attach_config_drive(self, instance, config_drive_format):
- instance['config_drive'] = True
- self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
- m = fake.PathUtils.lookup_configdrive_path(
- mox.Func(self._check_instance_name))
-
- if config_drive_format in constants.DISK_FORMAT_MAP:
- m.AndReturn(self._test_instance_dir + '/configdrive.' +
- config_drive_format)
- else:
- m.AndReturn(None)
-
- m = vmutils.VMUtils.attach_ide_drive(
- mox.Func(self._check_instance_name),
- mox.IsA(str),
- mox.IsA(int),
- mox.IsA(int),
- mox.IsA(str))
- m.WithSideEffects(self._add_ide_disk).InAnyOrder()
-
- def _verify_attach_config_drive(self, config_drive_format):
- if config_drive_format == constants.IDE_DISK_FORMAT.lower():
- self.assertEqual(self._instance_ide_disks[1],
- self._test_instance_dir + '/configdrive.' +
- config_drive_format)
- elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
- self.assertEqual(self._instance_ide_dvds[0],
- self._test_instance_dir + '/configdrive.' +
- config_drive_format)
-
- def _test_finish_migration(self, power_on, ephemeral_storage=False,
- config_drive=False,
- config_drive_format='iso'):
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
- instance['system_metadata'] = {}
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- m.AndReturn(self._test_instance_dir)
-
- self._mox.StubOutWithMock(fake.PathUtils, 'exists')
- m = fake.PathUtils.exists(mox.IsA(str))
- m.AndReturn(True)
-
- fake_parent_vhd_path = (os.path.join('FakeParentPath', '%s.vhd' %
- instance["image_ref"]))
-
- m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
- m.AndReturn({'ParentPath': fake_parent_vhd_path,
- 'MaxInternalSize': 1})
- m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
- mox.IsA(str), mox.IsA(object))
- m.AndReturn(1025)
-
- vhdutils.VHDUtils.reconnect_parent_vhd(mox.IsA(str), mox.IsA(str))
-
- m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
- m.AndReturn({'MaxInternalSize': 1024})
-
- m = fake.PathUtils.exists(mox.IsA(str))
- m.AndReturn(True)
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- if ephemeral_storage:
- return m.AndReturn(self._test_instance_dir)
- else:
- m.AndReturn(None)
-
- self._set_vm_name(instance['name'])
- self._setup_create_instance_mocks(None, False,
- ephemeral_storage=ephemeral_storage)
-
- if power_on:
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- constants.HYPERV_VM_STATE_ENABLED)
- self._setup_log_vm_output_mocks()
-
- if config_drive:
- self._mock_attach_config_drive(instance, config_drive_format)
-
- self._mox.ReplayAll()
- self._conn.finish_migration(self._context, None, instance, "",
- network_info, None, False, None, power_on)
- self._mox.VerifyAll()
-
- if config_drive:
- self._verify_attach_config_drive(config_drive_format)
-
- def test_finish_migration_power_on(self):
- self._test_finish_migration(True)
-
- def test_finish_migration_power_off(self):
- self._test_finish_migration(False)
-
- def test_finish_migration_with_ephemeral_storage(self):
- self._test_finish_migration(False, ephemeral_storage=True)
-
- def test_finish_migration_attach_config_drive_iso(self):
- self._test_finish_migration(False, config_drive=True,
- config_drive_format=constants.IDE_DVD_FORMAT.lower())
-
- def test_finish_migration_attach_config_drive_vhd(self):
- self._test_finish_migration(False, config_drive=True,
- config_drive_format=constants.IDE_DISK_FORMAT.lower())
-
- def test_confirm_migration(self):
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'],
- remove_dir=True)
- self._mox.ReplayAll()
- self._conn.confirm_migration(None, instance, network_info)
- self._mox.VerifyAll()
-
- def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
- config_drive=False,
- config_drive_format='iso'):
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
-
- fake_revert_path = ('C:\\FakeInstancesPath\\%s\\_revert' %
- instance['name'])
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str),
- create_dir=False,
- remove_dir=True)
- m.AndReturn(self._test_instance_dir)
-
- m = pathutils.PathUtils.get_instance_migr_revert_dir(instance['name'])
- m.AndReturn(fake_revert_path)
- fake.PathUtils.rename(fake_revert_path, mox.IsA(str))
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- m.AndReturn(self._test_instance_dir)
-
- m = fake.PathUtils.get_instance_dir(mox.IsA(str))
- if ephemeral_storage:
- m.AndReturn(self._test_instance_dir)
- else:
- m.AndReturn(None)
-
- self._set_vm_name(instance['name'])
- self._setup_create_instance_mocks(None, False,
- ephemeral_storage=ephemeral_storage)
-
- if power_on:
- vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
- constants.HYPERV_VM_STATE_ENABLED)
- self._setup_log_vm_output_mocks()
-
- if config_drive:
- self._mock_attach_config_drive(instance, config_drive_format)
-
- self._mox.ReplayAll()
- self._conn.finish_revert_migration(self._context, instance,
- network_info, None,
- power_on)
- self._mox.VerifyAll()
-
- if config_drive:
- self._verify_attach_config_drive(config_drive_format)
-
- def test_finish_revert_migration_power_on(self):
- self._test_finish_revert_migration(True)
-
- def test_finish_revert_migration_power_off(self):
- self._test_finish_revert_migration(False)
-
- def test_spawn_no_admin_permissions(self):
- self.assertRaises(vmutils.HyperVAuthorizationException,
- self._test_spawn_instance,
- with_exception=True,
- admin_permissions=False)
-
- def test_finish_revert_migration_with_ephemeral_storage(self):
- self._test_finish_revert_migration(False, ephemeral_storage=True)
-
- def test_finish_revert_migration_attach_config_drive_iso(self):
- self._test_finish_revert_migration(False, config_drive=True,
- config_drive_format=constants.IDE_DVD_FORMAT.lower())
-
- def test_finish_revert_migration_attach_config_drive_vhd(self):
- self._test_finish_revert_migration(False, config_drive=True,
- config_drive_format=constants.IDE_DISK_FORMAT.lower())
-
- def test_plug_vifs(self):
- # Check to make sure the method raises NotImplementedError.
- self.assertRaises(NotImplementedError,
- self._conn.plug_vifs,
- instance=self._test_spawn_instance,
- network_info=None)
-
- def test_unplug_vifs(self):
- # Check to make sure the method raises NotImplementedError.
- self.assertRaises(NotImplementedError,
- self._conn.unplug_vifs,
- instance=self._test_spawn_instance,
- network_info=None)
-
- def test_rollback_live_migration_at_destination(self):
- with mock.patch.object(self._conn, "destroy") as mock_destroy:
- self._conn.rollback_live_migration_at_destination(self._context,
- self._test_spawn_instance, [], None)
- mock_destroy.assert_called_once_with(self._context,
- self._test_spawn_instance, [], None)
-
- def test_refresh_instance_security_rules(self):
- self.assertRaises(NotImplementedError,
- self._conn.refresh_instance_security_rules,
- instance=None)
-
- def test_get_rdp_console(self):
- self.flags(my_ip="192.168.1.1")
-
- self._instance_data = self._get_instance_data()
- instance = db.instance_create(self._context, self._instance_data)
-
- fake_port = 9999
- fake_vm_id = "fake_vm_id"
-
- m = rdpconsoleutils.RDPConsoleUtils.get_rdp_console_port()
- m.AndReturn(fake_port)
-
- m = vmutils.VMUtils.get_vm_id(mox.IsA(str))
- m.AndReturn(fake_vm_id)
-
- self._mox.ReplayAll()
- connect_info = self._conn.get_rdp_console(self._context, instance)
- self._mox.VerifyAll()
-
- self.assertEqual(CONF.my_ip, connect_info.host)
- self.assertEqual(fake_port, connect_info.port)
- self.assertEqual(fake_vm_id, connect_info.internal_access_path)
-
-
-class VolumeOpsTestCase(HyperVAPIBaseTestCase):
- """Unit tests for VolumeOps class."""
-
- def setUp(self):
- super(VolumeOpsTestCase, self).setUp()
- self.volumeops = volumeops.VolumeOps()
-
- def test_get_mounted_disk_from_lun(self):
- with contextlib.nested(
- mock.patch.object(self.volumeops._volutils,
- 'get_device_number_for_target'),
- mock.patch.object(self.volumeops._vmutils,
- 'get_mounted_disk_by_drive_number')
- ) as (mock_get_device_number_for_target,
- mock_get_mounted_disk_by_drive_number):
-
- mock_get_device_number_for_target.return_value = 0
- mock_get_mounted_disk_by_drive_number.return_value = 'disk_path'
-
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- mapping = driver.block_device_info_get_mapping(block_device_info)
- data = mapping[0]['connection_info']['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
-
- disk = self.volumeops._get_mounted_disk_from_lun(target_iqn,
- target_lun)
- self.assertEqual(disk, 'disk_path')
-
- def test_get_mounted_disk_from_lun_failure(self):
- self.flags(mounted_disk_query_retry_count=1, group='hyperv')
-
- with mock.patch.object(self.volumeops._volutils,
- 'get_device_number_for_target') as m_device_num:
- m_device_num.side_effect = [None, -1]
-
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- mapping = driver.block_device_info_get_mapping(block_device_info)
- data = mapping[0]['connection_info']['data']
- target_lun = data['target_lun']
- target_iqn = data['target_iqn']
-
- for attempt in xrange(1):
- self.assertRaises(exception.NotFound,
- self.volumeops._get_mounted_disk_from_lun,
- target_iqn, target_lun)
-
- def test_get_free_controller_slot_exception(self):
- fake_drive = mock.MagicMock()
- type(fake_drive).AddressOnParent = mock.PropertyMock(
- side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
- fake_scsi_controller_path = 'fake_scsi_controller_path'
-
- with mock.patch.object(self.volumeops._vmutils,
- 'get_attached_disks') as fake_get_attached_disks:
- fake_get_attached_disks.return_value = (
- [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
- self.assertRaises(vmutils.HyperVException,
- self.volumeops._get_free_controller_slot,
- fake_scsi_controller_path)
-
- def test_fix_instance_volume_disk_paths(self):
- block_device_info = db_fakes.get_fake_block_device_info(
- self._volume_target_portal, self._volume_id)
-
- with contextlib.nested(
- mock.patch.object(self.volumeops,
- '_get_mounted_disk_from_lun'),
- mock.patch.object(self.volumeops._vmutils,
- 'get_vm_scsi_controller'),
- mock.patch.object(self.volumeops._vmutils,
- 'set_disk_host_resource'),
- mock.patch.object(self.volumeops,
- 'ebs_root_in_block_devices')
- ) as (mock_get_mounted_disk_from_lun,
- mock_get_vm_scsi_controller,
- mock_set_disk_host_resource,
- mock_ebs_in_block_devices):
-
- mock_ebs_in_block_devices.return_value = False
- mock_get_mounted_disk_from_lun.return_value = "fake_mounted_path"
- mock_set_disk_host_resource.return_value = "fake_controller_path"
-
- self.volumeops.fix_instance_volume_disk_paths(
- "test_vm_name",
- block_device_info)
-
- mock_get_mounted_disk_from_lun.assert_called_with(
- 'iqn.2010-10.org.openstack:volume-' + self._volume_id, 1, True)
- mock_get_vm_scsi_controller.assert_called_with("test_vm_name")
- mock_set_disk_host_resource("test_vm_name", "fake_controller_path",
- 0, "fake_mounted_path")
-
-
-class HostOpsTestCase(HyperVAPIBaseTestCase):
- """Unit tests for the Hyper-V hostops class."""
-
- def setUp(self):
- self._hostops = hostops.HostOps()
- self._hostops._hostutils = mock.MagicMock()
- self._hostops.time = mock.MagicMock()
- super(HostOpsTestCase, self).setUp()
-
- @mock.patch('nova.virt.hyperv.hostops.time')
- def test_host_uptime(self, mock_time):
- self._hostops._hostutils.get_host_tick_count64.return_value = 100
- mock_time.strftime.return_value = "01:01:01"
-
- result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
- str(datetime.timedelta(
- milliseconds = long(100))))
- actual_uptime = self._hostops.get_host_uptime()
- self.assertEqual(result_uptime, actual_uptime)
diff --git a/nova/tests/virt/hyperv/test_migrationops.py b/nova/tests/virt/hyperv/test_migrationops.py
deleted file mode 100644
index 4e715a2988..0000000000
--- a/nova/tests/virt/hyperv/test_migrationops.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-from nova.tests import fake_instance
-from nova.virt.hyperv import migrationops
-from nova.virt.hyperv import vmutils
-
-
-class MigrationOpsTestCase(test.NoDBTestCase):
- """Unit tests for the Hyper-V MigrationOps class."""
-
- _FAKE_TIMEOUT = 10
- _FAKE_RETRY_INTERVAL = 5
-
- def setUp(self):
- super(MigrationOpsTestCase, self).setUp()
- self.context = 'fake-context'
-
- # utilsfactory will check the host OS version via get_hostutils,
- # in order to return the proper Utils Class, so it must be mocked.
- patched_func = mock.patch.object(migrationops.utilsfactory,
- "get_hostutils")
- patched_func.start()
- self.addCleanup(patched_func.stop)
-
- self._migrationops = migrationops.MigrationOps()
- self._migrationops._vmops = mock.MagicMock()
- self._migrationops._vmutils = mock.MagicMock()
-
- def test_check_and_attach_config_drive_unknown_path(self):
- instance = fake_instance.fake_instance_obj(self.context,
- expected_attrs=['system_metadata'])
- instance.config_drive = 'True'
- self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
- return_value=None)
- self.assertRaises(vmutils.HyperVException,
- self._migrationops._check_and_attach_config_drive,
- instance)
-
- @mock.patch.object(migrationops.MigrationOps, '_migrate_disk_files')
- @mock.patch.object(migrationops.MigrationOps, '_check_target_flavor')
- def test_migrate_disk_and_power_off(self, mock_check_flavor,
- mock_migrate_disk_files):
- instance = fake_instance.fake_instance_obj(self.context)
- flavor = mock.MagicMock()
- network_info = mock.MagicMock()
-
- disk_files = [mock.MagicMock()]
- volume_drives = [mock.MagicMock()]
-
- mock_get_vm_st_path = self._migrationops._vmutils.get_vm_storage_paths
- mock_get_vm_st_path.return_value = (disk_files, volume_drives)
-
- self._migrationops.migrate_disk_and_power_off(
- self.context, instance, mock.sentinel.FAKE_DEST, flavor,
- network_info, None, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
-
- mock_check_flavor.assert_called_once_with(instance, flavor)
- self._migrationops._vmops.power_off.assert_called_once_with(
- instance, self._FAKE_TIMEOUT, self._FAKE_RETRY_INTERVAL)
- mock_get_vm_st_path.assert_called_once_with(instance.name)
- mock_migrate_disk_files.assert_called_once_with(
- instance.name, disk_files, mock.sentinel.FAKE_DEST)
- self._migrationops._vmops.destroy.assert_called_once_with(
- instance, destroy_disks=False)
diff --git a/nova/tests/virt/hyperv/test_networkutilsv2.py b/nova/tests/virt/hyperv/test_networkutilsv2.py
deleted file mode 100644
index bd79709d78..0000000000
--- a/nova/tests/virt/hyperv/test_networkutilsv2.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2013 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.tests.virt.hyperv import test_networkutils
-from nova.virt.hyperv import networkutilsv2
-
-
-class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
- """Unit tests for the Hyper-V NetworkUtilsV2 class."""
-
- _MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
-
- def setUp(self):
- super(NetworkUtilsV2TestCase, self).setUp()
- self._networkutils = networkutilsv2.NetworkUtilsV2()
- self._networkutils._conn = mock.MagicMock()
-
- def _prepare_external_port(self, mock_vswitch, mock_ext_port):
- mock_lep = mock_ext_port.associators()[0]
- mock_lep1 = mock_lep.associators()[0]
- mock_esw = mock_lep1.associators()[0]
- mock_esw.associators.return_value = [mock_vswitch]
-
- def test_create_vswitch_port(self):
- self.assertRaises(
- NotImplementedError,
- self._networkutils.create_vswitch_port,
- mock.sentinel.FAKE_VSWITCH_PATH,
- mock.sentinel.FAKE_PORT_NAME)
-
- def test_vswitch_port_needed(self):
- self.assertFalse(self._networkutils.vswitch_port_needed())
diff --git a/nova/tests/virt/hyperv/test_vmops.py b/nova/tests/virt/hyperv/test_vmops.py
deleted file mode 100644
index b0c1bfd260..0000000000
--- a/nova/tests/virt/hyperv/test_vmops.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from eventlet import timeout as etimeout
-import mock
-
-from nova import exception
-from nova import test
-from nova.tests import fake_instance
-from nova.virt.hyperv import constants
-from nova.virt.hyperv import pathutils
-from nova.virt.hyperv import vmops
-from nova.virt.hyperv import vmutils
-
-
-class VMOpsTestCase(test.NoDBTestCase):
- """Unit tests for the Hyper-V VMOps class."""
-
- _FAKE_TIMEOUT = 2
-
- def __init__(self, test_case_name):
- super(VMOpsTestCase, self).__init__(test_case_name)
-
- def setUp(self):
- super(VMOpsTestCase, self).setUp()
- self.context = 'fake-context'
-
- # utilsfactory will check the host OS version via get_hostutils,
- # in order to return the proper Utils Class, so it must be mocked.
- patched_func = mock.patch.object(vmops.utilsfactory,
- "get_hostutils")
- patched_func.start()
- self.addCleanup(patched_func.stop)
-
- self._vmops = vmops.VMOps()
-
- def test_attach_config_drive(self):
- instance = fake_instance.fake_instance_obj(self.context)
- self.assertRaises(exception.InvalidDiskFormat,
- self._vmops.attach_config_drive,
- instance, 'C:/fake_instance_dir/configdrive.xxx')
-
- def test_reboot_hard(self):
- self._test_reboot(vmops.REBOOT_TYPE_HARD,
- constants.HYPERV_VM_STATE_REBOOT)
-
- @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
- def test_reboot_soft(self, mock_soft_shutdown):
- mock_soft_shutdown.return_value = True
- self._test_reboot(vmops.REBOOT_TYPE_SOFT,
- constants.HYPERV_VM_STATE_ENABLED)
-
- @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
- def test_reboot_soft_failed(self, mock_soft_shutdown):
- mock_soft_shutdown.return_value = False
- self._test_reboot(vmops.REBOOT_TYPE_SOFT,
- constants.HYPERV_VM_STATE_REBOOT)
-
- @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
- @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
- def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
- mock_soft_shutdown.return_value = True
- mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
- instance = fake_instance.fake_instance_obj(self.context)
-
- self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
- instance, {}, vmops.REBOOT_TYPE_SOFT)
-
- mock_soft_shutdown.assert_called_once_with(instance)
- mock_power_on.assert_called_once_with(instance)
-
- def _test_reboot(self, reboot_type, vm_state):
- instance = fake_instance.fake_instance_obj(self.context)
- with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
- self._vmops.reboot(instance, {}, reboot_type)
- mock_set_state.assert_called_once_with(instance, vm_state)
-
- @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
- @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
- def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm):
- instance = fake_instance.fake_instance_obj(self.context)
- mock_wait_for_power_off.return_value = True
-
- result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
-
- mock_shutdown_vm.assert_called_once_with(instance.name)
- mock_wait_for_power_off.assert_called_once_with(
- instance.name, self._FAKE_TIMEOUT)
-
- self.assertTrue(result)
-
- @mock.patch("time.sleep")
- @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
- def test_soft_shutdown_failed(self, mock_shutdown_vm, mock_sleep):
- instance = fake_instance.fake_instance_obj(self.context)
-
- mock_shutdown_vm.side_effect = vmutils.HyperVException(
- "Expected failure.")
-
- result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
-
- mock_shutdown_vm.assert_called_once_with(instance.name)
- self.assertFalse(result)
-
- @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
- @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
- def test_soft_shutdown_wait(self, mock_wait_for_power_off,
- mock_shutdown_vm):
- instance = fake_instance.fake_instance_obj(self.context)
- mock_wait_for_power_off.side_effect = [False, True]
-
- result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
-
- calls = [mock.call(instance.name, 1),
- mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
- mock_shutdown_vm.assert_called_with(instance.name)
- mock_wait_for_power_off.assert_has_calls(calls)
-
- self.assertTrue(result)
-
- @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
- @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
- def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off,
- mock_shutdown_vm):
- instance = fake_instance.fake_instance_obj(self.context)
- mock_wait_for_power_off.return_value = False
-
- result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
-
- calls = [mock.call(instance.name, 1.5),
- mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
- mock_shutdown_vm.assert_called_with(instance.name)
- mock_wait_for_power_off.assert_has_calls(calls)
-
- self.assertFalse(result)
-
- def _test_power_off(self, timeout):
- instance = fake_instance.fake_instance_obj(self.context)
- with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
- self._vmops.power_off(instance, timeout)
-
- mock_set_state.assert_called_once_with(
- instance, constants.HYPERV_VM_STATE_DISABLED)
-
- def test_power_off_hard(self):
- self._test_power_off(timeout=0)
-
- @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
- def test_power_off_exception(self, mock_soft_shutdown):
- mock_soft_shutdown.return_value = False
- self._test_power_off(timeout=1)
-
- @mock.patch("nova.virt.hyperv.vmops.VMOps._set_vm_state")
- @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
- def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
- instance = fake_instance.fake_instance_obj(self.context)
- mock_soft_shutdown.return_value = True
-
- self._vmops.power_off(instance, 1, 0)
-
- mock_soft_shutdown.assert_called_once_with(
- instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
- self.assertFalse(mock_set_state.called)
-
- def test_get_vm_state(self):
- summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
-
- with mock.patch.object(self._vmops._vmutils,
- 'get_vm_summary_info') as mock_get_summary_info:
- mock_get_summary_info.return_value = summary_info
-
- response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
-
- @mock.patch.object(vmops.VMOps, '_get_vm_state')
- def test_wait_for_power_off_true(self, mock_get_state):
- mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
- result = self._vmops._wait_for_power_off(
- mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
- mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
- self.assertTrue(result)
-
- @mock.patch.object(vmops.etimeout, "with_timeout")
- def test_wait_for_power_off_false(self, mock_with_timeout):
- mock_with_timeout.side_effect = etimeout.Timeout()
- result = self._vmops._wait_for_power_off(
- mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
- self.assertFalse(result)
-
- @mock.patch("__builtin__.open")
- @mock.patch("os.path.exists")
- @mock.patch.object(pathutils.PathUtils, 'get_vm_console_log_paths')
- def test_get_console_output_exception(self,
- fake_get_vm_log_path,
- fake_path_exists,
- fake_open):
- fake_vm = mock.MagicMock()
-
- fake_open.side_effect = vmutils.HyperVException
- fake_path_exists.return_value = True
- fake_get_vm_log_path.return_value = (
- mock.sentinel.fake_console_log_path,
- mock.sentinel.fake_console_log_archived)
-
- with mock.patch('nova.virt.hyperv.vmops.open', fake_open, create=True):
- self.assertRaises(vmutils.HyperVException,
- self._vmops.get_console_output,
- fake_vm)
-
- def test_list_instance_uuids(self):
- fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
- with mock.patch.object(self._vmops._vmutils,
- 'list_instance_notes') as mock_list_notes:
- mock_list_notes.return_value = [('fake_name', [fake_uuid])]
-
- response = self._vmops.list_instance_uuids()
- mock_list_notes.assert_called_once_with()
-
- self.assertEqual(response, [fake_uuid])
diff --git a/nova/tests/virt/hyperv/test_vmutilsv2.py b/nova/tests/virt/hyperv/test_vmutilsv2.py
deleted file mode 100644
index e19ec217ae..0000000000
--- a/nova/tests/virt/hyperv/test_vmutilsv2.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2014 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.tests.virt.hyperv import test_vmutils
-from nova.virt.hyperv import vmutilsv2
-
-
-class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
- """Unit tests for the Hyper-V VMUtilsV2 class."""
-
- _DEFINE_SYSTEM = 'DefineSystem'
- _DESTROY_SYSTEM = 'DestroySystem'
- _DESTROY_SNAPSHOT = 'DestroySnapshot'
-
- _ADD_RESOURCE = 'AddResourceSettings'
- _REMOVE_RESOURCE = 'RemoveResourceSettings'
- _SETTING_TYPE = 'VirtualSystemType'
-
- _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
-
- def setUp(self):
- super(VMUtilsV2TestCase, self).setUp()
- self._vmutils = vmutilsv2.VMUtilsV2()
- self._vmutils._conn = mock.MagicMock()
-
- def test_modify_virt_resource(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
- mock.MagicMock(),
- self._FAKE_RET_VAL)
- mock_res_setting_data = mock.MagicMock()
- mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
-
- self._vmutils._modify_virt_resource(mock_res_setting_data,
- self._FAKE_VM_PATH)
-
- mock_svc.ModifyResourceSettings.assert_called_with(
- ResourceSettings=[self._FAKE_RES_DATA])
-
- @mock.patch.object(vmutilsv2, 'wmi', create=True)
- @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
- def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
- self._lookup_vm()
-
- mock_svc = self._get_snapshot_service()
- mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
- mock.MagicMock(),
- self._FAKE_RET_VAL)
-
- self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
-
- mock_svc.CreateSnapshot.assert_called_with(
- AffectedSystem=self._FAKE_VM_PATH,
- SnapshotType=self._vmutils._SNAPSHOT_FULL)
-
- mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
- self._FAKE_JOB_PATH)
-
- @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
- @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
- @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
- def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
- mock_add_virt_res):
- self._lookup_vm()
- fake_eth_port = mock_get_new_sd.return_value
-
- self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
- mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
-
- @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
- def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
- self._lookup_vm()
- mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
-
- metric_def = mock.MagicMock()
- mock_disk = mock.MagicMock()
- mock_disk.path_.return_value = self._FAKE_RES_PATH
- mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
-
- fake_metric_def_paths = ["fake_0", None]
- fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH]
-
- metric_def.path_.side_effect = fake_metric_def_paths
- self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
- metric_def]
-
- self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
-
- calls = []
- for i in range(len(fake_metric_def_paths)):
- calls.append(mock.call(
- Subject=fake_metric_resource_paths[i],
- Definition=fake_metric_def_paths[i],
- MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
-
- mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
-
- def _get_snapshot_service(self):
- return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
-
- def _assert_add_resources(self, mock_svc):
- getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
- self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
-
- def _assert_remove_resources(self, mock_svc):
- getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
- [self._FAKE_RES_PATH])
-
- def test_list_instance_notes(self):
- vs = mock.MagicMock()
- attrs = {'ElementName': 'fake_name',
- 'Notes': ['4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3']}
- vs.configure_mock(**attrs)
- self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
- response = self._vmutils.list_instance_notes()
-
- self.assertEqual([(attrs['ElementName'], attrs['Notes'])], response)
- self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
- ['ElementName', 'Notes'],
- VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
-
- @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2.check_ret_val')
- @mock.patch('nova.virt.hyperv.vmutilsv2.VMUtilsV2._get_wmi_obj')
- def _test_create_vm_obj(self, mock_get_wmi_obj, mock_check_ret_val,
- vm_path, dynamic_memory_ratio=1.0):
- mock_vs_man_svc = mock.MagicMock()
- mock_vs_data = mock.MagicMock()
- mock_job = mock.MagicMock()
- fake_job_path = 'fake job path'
- fake_ret_val = 'fake return value'
- _conn = self._vmutils._conn.Msvm_VirtualSystemSettingData
-
- mock_check_ret_val.return_value = mock_job
- _conn.new.return_value = mock_vs_data
- mock_vs_man_svc.DefineSystem.return_value = (fake_job_path,
- vm_path,
- fake_ret_val)
- mock_job.associators.return_value = ['fake vm path']
-
- response = self._vmutils._create_vm_obj(
- vs_man_svc=mock_vs_man_svc,
- vm_name='fake vm',
- notes='fake notes',
- dynamic_memory_ratio=dynamic_memory_ratio)
-
- if not vm_path:
- mock_job.associators.assert_called_once_with(
- self._vmutils._AFFECTED_JOB_ELEMENT_CLASS)
-
- _conn.new.assert_called_once_with()
- self.assertEqual(mock_vs_data.ElementName, 'fake vm')
- mock_vs_man_svc.DefineSystem.assert_called_once_with(
- ResourceSettings=[], ReferenceConfiguration=None,
- SystemSettings=mock_vs_data.GetText_(1))
- mock_check_ret_val.assert_called_once_with(fake_ret_val, fake_job_path)
-
- if dynamic_memory_ratio > 1:
- self.assertFalse(mock_vs_data.VirtualNumaEnabled)
-
- mock_get_wmi_obj.assert_called_with('fake vm path')
-
- self.assertEqual(mock_vs_data.Notes, 'fake notes')
- self.assertEqual(response, mock_get_wmi_obj())
-
- def test_create_vm_obj(self):
- self._test_create_vm_obj(vm_path='fake vm path')
-
- def test_create_vm_obj_no_vm_path(self):
- self._test_create_vm_obj(vm_path=None)
-
- def test_create_vm_obj_dynamic_memory(self):
- self._test_create_vm_obj(vm_path=None, dynamic_memory_ratio=1.1)
-
- def test_list_instances(self):
- vs = mock.MagicMock()
- attrs = {'ElementName': 'fake_name'}
- vs.configure_mock(**attrs)
- self._vmutils._conn.Msvm_VirtualSystemSettingData.return_value = [vs]
- response = self._vmutils.list_instances()
-
- self.assertEqual([(attrs['ElementName'])], response)
- self._vmutils._conn.Msvm_VirtualSystemSettingData.assert_called_with(
- ['ElementName'],
- VirtualSystemType=self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED)
diff --git a/nova/tests/virt/hyperv/test_volumeutils.py b/nova/tests/virt/hyperv/test_volumeutils.py
deleted file mode 100644
index f44ee14594..0000000000
--- a/nova/tests/virt/hyperv/test_volumeutils.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2014 Cloudbase Solutions Srl
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-
-from nova.tests.virt.hyperv import test_basevolumeutils
-from nova.virt.hyperv import vmutils
-from nova.virt.hyperv import volumeutils
-
-CONF = cfg.CONF
-CONF.import_opt('volume_attach_retry_count', 'nova.virt.hyperv.volumeops',
- 'hyperv')
-
-
-class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
- """Unit tests for the Hyper-V VolumeUtils class."""
-
- _FAKE_PORTAL_ADDR = '10.1.1.1'
- _FAKE_PORTAL_PORT = '3260'
- _FAKE_LUN = 0
- _FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
-
- _FAKE_STDOUT_VALUE = 'The operation completed successfully'
-
- def setUp(self):
- super(VolumeUtilsTestCase, self).setUp()
- self._volutils = volumeutils.VolumeUtils()
- self._volutils._conn_wmi = mock.MagicMock()
- self._volutils._conn_cimv2 = mock.MagicMock()
- self.flags(volume_attach_retry_count=4, group='hyperv')
- self.flags(volume_attach_retry_interval=0, group='hyperv')
-
- def _test_login_target_portal(self, portal_connected):
- fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
- self._FAKE_PORTAL_PORT)
-
- self._volutils.execute = mock.MagicMock()
- if portal_connected:
- exec_output = 'Address and Socket: %s %s' % (
- self._FAKE_PORTAL_ADDR, self._FAKE_PORTAL_PORT)
- else:
- exec_output = ''
-
- self._volutils.execute.return_value = exec_output
-
- self._volutils._login_target_portal(fake_portal)
-
- call_list = self._volutils.execute.call_args_list
- all_call_args = [arg for call in call_list for arg in call[0]]
-
- if portal_connected:
- self.assertIn('RefreshTargetPortal', all_call_args)
- else:
- self.assertIn('AddTargetPortal', all_call_args)
-
- def test_login_connected_portal(self):
- self._test_login_target_portal(True)
-
- def test_login_new_portal(self):
- self._test_login_target_portal(False)
-
- def _test_login_target(self, target_connected, raise_exception=False):
- fake_portal = '%s:%s' % (self._FAKE_PORTAL_ADDR,
- self._FAKE_PORTAL_PORT)
- self._volutils.execute = mock.MagicMock()
- self._volutils._login_target_portal = mock.MagicMock()
-
- if target_connected:
- self._volutils.execute.return_value = self._FAKE_TARGET
- elif raise_exception:
- self._volutils.execute.return_value = ''
- else:
- self._volutils.execute.side_effect = (
- ['', '', '', self._FAKE_TARGET, ''])
-
- if raise_exception:
- self.assertRaises(vmutils.HyperVException,
- self._volutils.login_storage_target,
- self._FAKE_LUN, self._FAKE_TARGET, fake_portal)
- else:
- self._volutils.login_storage_target(self._FAKE_LUN,
- self._FAKE_TARGET,
- fake_portal)
-
- call_list = self._volutils.execute.call_args_list
- all_call_args = [arg for call in call_list for arg in call[0]]
-
- if target_connected:
- self.assertNotIn('qlogintarget', all_call_args)
- else:
- self.assertIn('qlogintarget', all_call_args)
-
- def test_login_connected_target(self):
- self._test_login_target(True)
-
- def test_login_disconncted_target(self):
- self._test_login_target(False)
-
- def test_login_target_exception(self):
- self._test_login_target(False, True)
-
- def _test_execute_wrapper(self, raise_exception):
- fake_cmd = ('iscsicli.exe', 'ListTargetPortals')
-
- if raise_exception:
- output = 'fake error'
- else:
- output = 'The operation completed successfully'
-
- with mock.patch('nova.utils.execute') as fake_execute:
- fake_execute.return_value = (output, None)
-
- if raise_exception:
- self.assertRaises(vmutils.HyperVException,
- self._volutils.execute,
- *fake_cmd)
- else:
- ret_val = self._volutils.execute(*fake_cmd)
- self.assertEqual(output, ret_val)
-
- def test_execute_raise_exception(self):
- self._test_execute_wrapper(True)
-
- def test_execute_exception(self):
- self._test_execute_wrapper(False)
-
- @mock.patch.object(volumeutils, 'utils')
- def test_logout_storage_target(self, mock_utils):
- mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
- mock.sentinel.FAKE_STDERR_VALUE)
- session = mock.MagicMock()
- session.SessionId = mock.sentinel.FAKE_SESSION_ID
- self._volutils._conn_wmi.query.return_value = [session]
-
- self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
- mock_utils.execute.assert_called_once_with(
- 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
diff --git a/nova/tests/virt/ironic/test_client_wrapper.py b/nova/tests/virt/ironic/test_client_wrapper.py
deleted file mode 100644
index 9b1b923580..0000000000
--- a/nova/tests/virt/ironic/test_client_wrapper.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from ironicclient import client as ironic_client
-from ironicclient import exc as ironic_exception
-import mock
-from oslo.config import cfg
-
-from nova import exception
-from nova import test
-from nova.tests.virt.ironic import utils as ironic_utils
-from nova.virt.ironic import client_wrapper
-
-CONF = cfg.CONF
-
-FAKE_CLIENT = ironic_utils.FakeClient()
-
-
-class IronicClientWrapperTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(IronicClientWrapperTestCase, self).setUp()
- self.ironicclient = client_wrapper.IronicClientWrapper()
- # Do not waste time sleeping
- cfg.CONF.set_override('api_retry_interval', 0, 'ironic')
-
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
- def test_call_good_no_args(self, mock_get_client, mock_multi_getattr):
- mock_get_client.return_value = FAKE_CLIENT
- self.ironicclient.call("node.list")
- mock_get_client.assert_called_once_with()
- mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
- mock_multi_getattr.return_value.assert_called_once_with()
-
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
- def test_call_good_with_args(self, mock_get_client, mock_multi_getattr):
- mock_get_client.return_value = FAKE_CLIENT
- self.ironicclient.call("node.list", 'test', associated=True)
- mock_get_client.assert_called_once_with()
- mock_multi_getattr.assert_called_once_with(FAKE_CLIENT, "node.list")
- mock_multi_getattr.return_value.assert_called_once_with(
- 'test', associated=True)
-
- @mock.patch.object(ironic_client, 'get_client')
- def test__get_client_no_auth_token(self, mock_ir_cli):
- self.flags(admin_auth_token=None, group='ironic')
- ironicclient = client_wrapper.IronicClientWrapper()
- # dummy call to have _get_client() called
- ironicclient.call("node.list")
- expected = {'os_username': CONF.ironic.admin_username,
- 'os_password': CONF.ironic.admin_password,
- 'os_auth_url': CONF.ironic.admin_url,
- 'os_tenant_name': CONF.ironic.admin_tenant_name,
- 'os_service_type': 'baremetal',
- 'os_endpoint_type': 'public',
- 'ironic_url': CONF.ironic.api_endpoint}
- mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
- **expected)
-
- @mock.patch.object(ironic_client, 'get_client')
- def test__get_client_with_auth_token(self, mock_ir_cli):
- self.flags(admin_auth_token='fake-token', group='ironic')
- ironicclient = client_wrapper.IronicClientWrapper()
- # dummy call to have _get_client() called
- ironicclient.call("node.list")
- expected = {'os_auth_token': 'fake-token',
- 'ironic_url': CONF.ironic.api_endpoint}
- mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
- **expected)
-
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
- def test_call_fail(self, mock_get_client, mock_multi_getattr):
- cfg.CONF.set_override('api_max_retries', 2, 'ironic')
- test_obj = mock.Mock()
- test_obj.side_effect = ironic_exception.HTTPServiceUnavailable
- mock_multi_getattr.return_value = test_obj
- mock_get_client.return_value = FAKE_CLIENT
- self.assertRaises(exception.NovaException, self.ironicclient.call,
- "node.list")
- self.assertEqual(2, test_obj.call_count)
-
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_multi_getattr')
- @mock.patch.object(client_wrapper.IronicClientWrapper, '_get_client')
- def test_call_fail_unexpected_exception(self, mock_get_client,
- mock_multi_getattr):
- test_obj = mock.Mock()
- test_obj.side_effect = ironic_exception.HTTPNotFound
- mock_multi_getattr.return_value = test_obj
- mock_get_client.return_value = FAKE_CLIENT
- self.assertRaises(ironic_exception.HTTPNotFound,
- self.ironicclient.call, "node.list")
-
- @mock.patch.object(ironic_client, 'get_client')
- def test__get_client_unauthorized(self, mock_get_client):
- mock_get_client.side_effect = ironic_exception.Unauthorized
- self.assertRaises(exception.NovaException,
- self.ironicclient._get_client)
-
- @mock.patch.object(ironic_client, 'get_client')
- def test__get_client_unexpected_exception(self, mock_get_client):
- mock_get_client.side_effect = ironic_exception.ConnectionRefused
- self.assertRaises(ironic_exception.ConnectionRefused,
- self.ironicclient._get_client)
-
- def test__multi_getattr_good(self):
- response = self.ironicclient._multi_getattr(FAKE_CLIENT, "node.list")
- self.assertEqual(FAKE_CLIENT.node.list, response)
-
- def test__multi_getattr_fail(self):
- self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
- FAKE_CLIENT, "nonexistent")
diff --git a/nova/tests/virt/ironic/test_driver.py b/nova/tests/virt/ironic/test_driver.py
deleted file mode 100644
index 32d1bb7a15..0000000000
--- a/nova/tests/virt/ironic/test_driver.py
+++ /dev/null
@@ -1,1268 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Tests for the ironic driver."""
-
-from ironicclient import exc as ironic_exception
-import mock
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-
-from nova.compute import power_state as nova_states
-from nova.compute import task_states
-from nova import context as nova_context
-from nova import exception
-from nova import objects
-from nova.openstack.common import loopingcall
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_instance
-from nova.tests import utils
-from nova.tests.virt.ironic import utils as ironic_utils
-from nova.virt import driver
-from nova.virt import fake
-from nova.virt import firewall
-from nova.virt.ironic import client_wrapper as cw
-from nova.virt.ironic import driver as ironic_driver
-from nova.virt.ironic import ironic_states
-
-
-CONF = cfg.CONF
-
-IRONIC_FLAGS = dict(
- api_version=1,
- group='ironic',
-)
-
-FAKE_CLIENT = ironic_utils.FakeClient()
-
-
-class FakeClientWrapper(cw.IronicClientWrapper):
- def _get_client(self):
- return FAKE_CLIENT
-
-
-class FakeLoopingCall(object):
- def __init__(self):
- self.wait = mock.MagicMock()
- self.start = mock.MagicMock()
- self.start.return_value = self
-
-
-def _get_properties():
- return {'cpus': 2,
- 'memory_mb': 512,
- 'local_gb': 10,
- 'cpu_arch': 'x86_64'}
-
-
-def _get_stats():
- return {'cpu_arch': 'x86_64'}
-
-
-FAKE_CLIENT_WRAPPER = FakeClientWrapper()
-
-
-@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
-class IronicDriverTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(IronicDriverTestCase, self).setUp()
- self.flags(**IRONIC_FLAGS)
- self.driver = ironic_driver.IronicDriver(None)
- self.driver.virtapi = fake.FakeVirtAPI()
- self.ctx = nova_context.get_admin_context()
-
- # mock retries configs to avoid sleeps and make tests run quicker
- CONF.set_default('api_max_retries', default=1, group='ironic')
- CONF.set_default('api_retry_interval', default=0, group='ironic')
-
- def test_public_api_signatures(self):
- self.assertPublicAPISignatures(driver.ComputeDriver(None), self.driver)
-
- def test_validate_driver_loading(self):
- self.assertIsInstance(self.driver, ironic_driver.IronicDriver)
-
- def test__get_hypervisor_type(self):
- self.assertEqual('ironic', self.driver._get_hypervisor_type())
-
- def test__get_hypervisor_version(self):
- self.assertEqual(1, self.driver._get_hypervisor_version())
-
- @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test__validate_instance_and_node(self, mock_gbiui):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- instance_uuid = uuidutils.generate_uuid()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=instance_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid)
- ironicclient = cw.IronicClientWrapper()
-
- mock_gbiui.return_value = node
- result = ironic_driver._validate_instance_and_node(ironicclient,
- instance)
- self.assertEqual(result.uuid, node_uuid)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test__validate_instance_and_node_failed(self, mock_gbiui):
- ironicclient = cw.IronicClientWrapper()
- mock_gbiui.side_effect = ironic_exception.NotFound()
- instance_uuid = uuidutils.generate_uuid(),
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid)
- self.assertRaises(exception.InstanceNotFound,
- ironic_driver._validate_instance_and_node,
- ironicclient, instance)
-
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__wait_for_active_pass(self, fake_validate):
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=uuidutils.generate_uuid())
- node = ironic_utils.get_test_node(
- provision_state=ironic_states.DEPLOYING)
-
- fake_validate.return_value = node
- self.driver._wait_for_active(FAKE_CLIENT, instance)
- self.assertTrue(fake_validate.called)
-
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__wait_for_active_done(self, fake_validate):
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=uuidutils.generate_uuid())
- node = ironic_utils.get_test_node(
- provision_state=ironic_states.ACTIVE)
-
- fake_validate.return_value = node
- self.assertRaises(loopingcall.LoopingCallDone,
- self.driver._wait_for_active,
- FAKE_CLIENT, instance)
- self.assertTrue(fake_validate.called)
-
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__wait_for_active_fail(self, fake_validate):
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=uuidutils.generate_uuid())
- node = ironic_utils.get_test_node(
- provision_state=ironic_states.DEPLOYFAIL)
-
- fake_validate.return_value = node
- self.assertRaises(exception.InstanceDeployFailure,
- self.driver._wait_for_active,
- FAKE_CLIENT, instance)
- self.assertTrue(fake_validate.called)
-
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__wait_for_power_state_pass(self, fake_validate):
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=uuidutils.generate_uuid())
- node = ironic_utils.get_test_node(
- target_power_state=ironic_states.POWER_OFF)
-
- fake_validate.return_value = node
- self.driver._wait_for_power_state(
- FAKE_CLIENT, instance, 'fake message')
- self.assertTrue(fake_validate.called)
-
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__wait_for_power_state_ok(self, fake_validate):
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=uuidutils.generate_uuid())
- node = ironic_utils.get_test_node(
- target_power_state=ironic_states.NOSTATE)
-
- fake_validate.return_value = node
- self.assertRaises(loopingcall.LoopingCallDone,
- self.driver._wait_for_power_state,
- FAKE_CLIENT, instance, 'fake message')
- self.assertTrue(fake_validate.called)
-
- def test__node_resource(self):
- node_uuid = uuidutils.generate_uuid()
- instance_uuid = uuidutils.generate_uuid()
- props = _get_properties()
- stats = _get_stats()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=instance_uuid,
- properties=props)
-
- result = self.driver._node_resource(node)
- self.assertEqual(props['cpus'], result['vcpus'])
- self.assertEqual(props['cpus'], result['vcpus_used'])
- self.assertEqual(props['memory_mb'], result['memory_mb'])
- self.assertEqual(props['memory_mb'], result['memory_mb_used'])
- self.assertEqual(props['local_gb'], result['local_gb'])
- self.assertEqual(props['local_gb'], result['local_gb_used'])
- self.assertEqual(node_uuid, result['hypervisor_hostname'])
- self.assertEqual(stats, jsonutils.loads(result['stats']))
-
- def test__node_resource_canonicalizes_arch(self):
- node_uuid = uuidutils.generate_uuid()
- props = _get_properties()
- props['cpu_arch'] = 'i386'
- node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
-
- result = self.driver._node_resource(node)
- self.assertEqual('i686',
- jsonutils.loads(result['supported_instances'])[0][0])
- self.assertEqual('i386',
- jsonutils.loads(result['stats'])['cpu_arch'])
-
- def test__node_resource_unknown_arch(self):
- node_uuid = uuidutils.generate_uuid()
- props = _get_properties()
- del props['cpu_arch']
- node = ironic_utils.get_test_node(uuid=node_uuid, properties=props)
-
- result = self.driver._node_resource(node)
- self.assertEqual([], jsonutils.loads(result['supported_instances']))
-
- def test__node_resource_exposes_capabilities(self):
- props = _get_properties()
- props['capabilities'] = 'test:capability'
- node = ironic_utils.get_test_node(properties=props)
- result = self.driver._node_resource(node)
- stats = jsonutils.loads(result['stats'])
- self.assertIsNone(stats.get('capabilities'))
- self.assertEqual('capability', stats.get('test'))
-
- def test__node_resource_no_capabilities(self):
- props = _get_properties()
- props['capabilities'] = None
- node = ironic_utils.get_test_node(properties=props)
- result = self.driver._node_resource(node)
- self.assertIsNone(jsonutils.loads(result['stats']).get('capabilities'))
-
- def test__node_resource_malformed_capabilities(self):
- props = _get_properties()
- props['capabilities'] = 'test:capability,:no_key,no_val:'
- node = ironic_utils.get_test_node(properties=props)
- result = self.driver._node_resource(node)
- stats = jsonutils.loads(result['stats'])
- self.assertEqual('capability', stats.get('test'))
-
- def test__node_resource_no_instance_uuid(self):
- node_uuid = uuidutils.generate_uuid()
- props = _get_properties()
- stats = _get_stats()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=None,
- power_state=ironic_states.POWER_OFF,
- properties=props)
-
- result = self.driver._node_resource(node)
- self.assertEqual(props['cpus'], result['vcpus'])
- self.assertEqual(0, result['vcpus_used'])
- self.assertEqual(props['memory_mb'], result['memory_mb'])
- self.assertEqual(0, result['memory_mb_used'])
- self.assertEqual(props['local_gb'], result['local_gb'])
- self.assertEqual(0, result['local_gb_used'])
- self.assertEqual(node_uuid, result['hypervisor_hostname'])
- self.assertEqual(stats, jsonutils.loads(result['stats']))
-
- @mock.patch.object(ironic_driver.IronicDriver,
- '_node_resources_unavailable')
- def test__node_resource_unavailable_node_res(self, mock_res_unavail):
- mock_res_unavail.return_value = True
- node_uuid = uuidutils.generate_uuid()
- props = _get_properties()
- stats = _get_stats()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=None,
- properties=props)
-
- result = self.driver._node_resource(node)
- self.assertEqual(0, result['vcpus'])
- self.assertEqual(0, result['vcpus_used'])
- self.assertEqual(0, result['memory_mb'])
- self.assertEqual(0, result['memory_mb_used'])
- self.assertEqual(0, result['local_gb'])
- self.assertEqual(0, result['local_gb_used'])
- self.assertEqual(node_uuid, result['hypervisor_hostname'])
- self.assertEqual(stats, jsonutils.loads(result['stats']))
-
- @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
- create=True)
- @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
- create=True)
- @mock.patch.object(firewall.NoopFirewallDriver, 'apply_instance_filter',
- create=True)
- def test__start_firewall(self, mock_aif, mock_sbf, mock_pif):
- fake_inst = 'fake-inst'
- fake_net_info = utils.get_test_network_info()
- self.driver._start_firewall(fake_inst, fake_net_info)
-
- mock_aif.assert_called_once_with(fake_inst, fake_net_info)
- mock_sbf.assert_called_once_with(fake_inst, fake_net_info)
- mock_pif.assert_called_once_with(fake_inst, fake_net_info)
-
- @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
- create=True)
- def test__stop_firewall(self, mock_ui):
- fake_inst = 'fake-inst'
- fake_net_info = utils.get_test_network_info()
- self.driver._stop_firewall(fake_inst, fake_net_info)
- mock_ui.assert_called_once_with(fake_inst, fake_net_info)
-
- @mock.patch.object(cw.IronicClientWrapper, 'call')
- def test_instance_exists(self, mock_call):
- instance_uuid = 'fake-uuid'
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid)
- self.assertTrue(self.driver.instance_exists(instance))
- mock_call.assert_called_once_with('node.get_by_instance_uuid',
- instance_uuid)
-
- @mock.patch.object(cw.IronicClientWrapper, 'call')
- def test_instance_exists_fail(self, mock_call):
- mock_call.side_effect = ironic_exception.NotFound
- instance_uuid = 'fake-uuid'
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid)
- self.assertFalse(self.driver.instance_exists(instance))
- mock_call.assert_called_once_with('node.get_by_instance_uuid',
- instance_uuid)
-
- @mock.patch.object(cw.IronicClientWrapper, 'call')
- @mock.patch.object(objects.Instance, 'get_by_uuid')
- def test_list_instances(self, mock_inst_by_uuid, mock_call):
- nodes = []
- instances = []
- for i in range(2):
- uuid = uuidutils.generate_uuid()
- instances.append(fake_instance.fake_instance_obj(self.ctx,
- id=i,
- uuid=uuid))
- nodes.append(ironic_utils.get_test_node(instance_uuid=uuid))
-
- mock_inst_by_uuid.side_effect = instances
- mock_call.return_value = nodes
-
- response = self.driver.list_instances()
- mock_call.assert_called_with("node.list", associated=True, limit=0)
- expected_calls = [mock.call(mock.ANY, instances[0].uuid),
- mock.call(mock.ANY, instances[1].uuid)]
- mock_inst_by_uuid.assert_has_calls(expected_calls)
- self.assertEqual(['instance-00000000', 'instance-00000001'],
- sorted(response))
-
- @mock.patch.object(cw.IronicClientWrapper, 'call')
- def test_list_instance_uuids(self, mock_call):
- num_nodes = 2
- nodes = []
- for n in range(num_nodes):
- nodes.append(ironic_utils.get_test_node(
- instance_uuid=uuidutils.generate_uuid()))
-
- mock_call.return_value = nodes
- uuids = self.driver.list_instance_uuids()
- mock_call.assert_called_with('node.list', associated=True, limit=0)
- expected = [n.instance_uuid for n in nodes]
- self.assertEqual(sorted(expected), sorted(uuids))
-
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- def test_node_is_available_empty_cache_empty_list(self, mock_get,
- mock_list):
- node = ironic_utils.get_test_node()
- mock_get.return_value = node
- mock_list.return_value = []
- self.assertTrue(self.driver.node_is_available(node.uuid))
- mock_get.assert_called_with(node.uuid)
- mock_list.assert_called_with(detail=True, limit=0)
-
- mock_get.side_effect = ironic_exception.NotFound
- self.assertFalse(self.driver.node_is_available(node.uuid))
-
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- def test_node_is_available_empty_cache(self, mock_get, mock_list):
- node = ironic_utils.get_test_node()
- mock_get.return_value = node
- mock_list.return_value = [node]
- self.assertTrue(self.driver.node_is_available(node.uuid))
- mock_list.assert_called_with(detail=True, limit=0)
- self.assertEqual(0, mock_get.call_count)
-
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- def test_node_is_available_with_cache(self, mock_get, mock_list):
- node = ironic_utils.get_test_node()
- mock_get.return_value = node
- mock_list.return_value = [node]
- # populate the cache
- self.driver.get_available_nodes(refresh=True)
- # prove that zero calls are made after populating cache
- mock_list.reset_mock()
- self.assertTrue(self.driver.node_is_available(node.uuid))
- self.assertEqual(0, mock_list.call_count)
- self.assertEqual(0, mock_get.call_count)
-
- def test__node_resources_unavailable(self):
- node_dicts = [
- # a node in maintenance /w no instance and power OFF
- {'uuid': uuidutils.generate_uuid(),
- 'maintenance': True,
- 'power_state': ironic_states.POWER_OFF},
- # a node in maintenance /w no instance and ERROR power state
- {'uuid': uuidutils.generate_uuid(),
- 'maintenance': True,
- 'power_state': ironic_states.ERROR},
- # a node not in maintenance /w no instance and bad power state
- {'uuid': uuidutils.generate_uuid(),
- 'power_state': ironic_states.NOSTATE},
- ]
- for n in node_dicts:
- node = ironic_utils.get_test_node(**n)
- self.assertTrue(self.driver._node_resources_unavailable(node))
-
- avail_node = ironic_utils.get_test_node(
- power_state=ironic_states.POWER_OFF)
- self.assertFalse(self.driver._node_resources_unavailable(avail_node))
-
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- def test_get_available_nodes(self, mock_list):
- node_dicts = [
- # a node in maintenance /w no instance and power OFF
- {'uuid': uuidutils.generate_uuid(),
- 'maintenance': True,
- 'power_state': ironic_states.POWER_OFF},
- # a node /w instance and power ON
- {'uuid': uuidutils.generate_uuid(),
- 'instance_uuid': uuidutils.generate_uuid(),
- 'power_state': ironic_states.POWER_ON},
- # a node not in maintenance /w no instance and bad power state
- {'uuid': uuidutils.generate_uuid(),
- 'power_state': ironic_states.ERROR},
- ]
- nodes = [ironic_utils.get_test_node(**n) for n in node_dicts]
- mock_list.return_value = nodes
- available_nodes = self.driver.get_available_nodes()
- expected_uuids = [n['uuid'] for n in node_dicts]
- self.assertEqual(sorted(expected_uuids), sorted(available_nodes))
-
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
- def test_get_available_resource(self, mock_nr, mock_list, mock_get):
- node = ironic_utils.get_test_node()
- node_2 = ironic_utils.get_test_node(uuid=uuidutils.generate_uuid())
- fake_resource = 'fake-resource'
- mock_get.return_value = node
- # ensure cache gets populated without the node we want
- mock_list.return_value = [node_2]
- mock_nr.return_value = fake_resource
-
- result = self.driver.get_available_resource(node.uuid)
- self.assertEqual(fake_resource, result)
- mock_nr.assert_called_once_with(node)
- mock_get.assert_called_once_with(node.uuid)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- @mock.patch.object(FAKE_CLIENT.node, 'list')
- @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
- def test_get_available_resource_with_cache(self, mock_nr, mock_list,
- mock_get):
- node = ironic_utils.get_test_node()
- fake_resource = 'fake-resource'
- mock_list.return_value = [node]
- mock_nr.return_value = fake_resource
- # populate the cache
- self.driver.get_available_nodes(refresh=True)
- mock_list.reset_mock()
-
- result = self.driver.get_available_resource(node.uuid)
- self.assertEqual(fake_resource, result)
- self.assertEqual(0, mock_list.call_count)
- self.assertEqual(0, mock_get.call_count)
- mock_nr.assert_called_once_with(node)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test_get_info(self, mock_gbiu):
- instance_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- properties = {'memory_mb': 512, 'cpus': 2}
- power_state = ironic_states.POWER_ON
- node = ironic_utils.get_test_node(instance_uuid=instance_uuid,
- properties=properties,
- power_state=power_state)
-
- mock_gbiu.return_value = node
-
- # ironic_states.POWER_ON should be mapped to
- # nova_states.RUNNING
- memory_kib = properties['memory_mb'] * 1024
- expected = {'state': nova_states.RUNNING,
- 'max_mem': memory_kib,
- 'mem': memory_kib,
- 'num_cpu': properties['cpus'],
- 'cpu_time': 0}
- instance = fake_instance.fake_instance_obj('fake-context',
- uuid=instance_uuid)
- result = self.driver.get_info(instance)
- self.assertEqual(expected, result)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
- def test_get_info_http_not_found(self, mock_gbiu):
- mock_gbiu.side_effect = ironic_exception.NotFound()
-
- expected = {'state': nova_states.NOSTATE,
- 'max_mem': 0,
- 'mem': 0,
- 'num_cpu': 0,
- 'cpu_time': 0}
- instance = fake_instance.fake_instance_obj(
- self.ctx, uuid=uuidutils.generate_uuid())
- result = self.driver.get_info(instance)
- self.assertEqual(expected, result)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- def test_macs_for_instance(self, mock_node):
- node = ironic_utils.get_test_node()
- port = ironic_utils.get_test_port()
- mock_node.get.return_value = node
- mock_node.list_ports.return_value = [port]
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- result = self.driver.macs_for_instance(instance)
- self.assertEqual(set([port.address]), result)
- mock_node.list_ports.assert_called_once_with(node.uuid)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- def test_macs_for_instance_http_not_found(self, mock_get):
- mock_get.side_effect = ironic_exception.NotFound()
-
- instance = fake_instance.fake_instance_obj(
- self.ctx, node=uuidutils.generate_uuid())
- result = self.driver.macs_for_instance(instance)
- self.assertIsNone(result)
-
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
- @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- def test_spawn(self, mock_sf, mock_pvifs, mock_adf, mock_wait_active,
- mock_fg_bid, mock_node, mock_looping, mock_save):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- fake_flavor = {'ephemeral_gb': 0}
-
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
- mock_node.get_by_instance_uuid.return_value = node
- mock_node.set_provision_state.return_value = mock.MagicMock()
- mock_fg_bid.return_value = fake_flavor
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
-
- self.driver.spawn(self.ctx, instance, None, [], None)
-
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.validate.assert_called_once_with(node_uuid)
- mock_fg_bid.assert_called_once_with(self.ctx,
- instance['instance_type_id'])
- mock_adf.assert_called_once_with(node, instance, None, fake_flavor)
- mock_pvifs.assert_called_once_with(node, instance, None)
- mock_sf.assert_called_once_with(instance, None)
- mock_node.set_provision_state.assert_called_once_with(node_uuid,
- 'active')
-
- self.assertIsNone(instance['default_ephemeral_device'])
- self.assertFalse(mock_save.called)
-
- mock_looping.assert_called_once_with(mock_wait_active,
- FAKE_CLIENT_WRAPPER,
- instance)
- fake_looping_call.start.assert_called_once_with(
- interval=CONF.ironic.api_retry_interval)
- fake_looping_call.wait.assert_called_once_with()
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
- @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
- @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- def test_spawn_destroyed_after_failure(self, mock_sf, mock_pvifs, mock_adf,
- mock_wait_active, mock_destroy,
- mock_fg_bid, mock_node,
- mock_looping):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- fake_flavor = {'ephemeral_gb': 0}
-
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
- mock_node.get_by_instance_uuid.return_value = node
- mock_node.set_provision_state.return_value = mock.MagicMock()
- mock_fg_bid.return_value = fake_flavor
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
-
- deploy_exc = exception.InstanceDeployFailure('foo')
- fake_looping_call.wait.side_effect = deploy_exc
- self.assertRaises(
- exception.InstanceDeployFailure,
- self.driver.spawn, self.ctx, instance, None, [], None)
- mock_destroy.assert_called_once_with(self.ctx, instance, None)
-
- @mock.patch.object(FAKE_CLIENT.node, 'update')
- def test__add_driver_fields_good(self, mock_update):
- node = ironic_utils.get_test_node(driver='fake')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- image_meta = ironic_utils.get_test_image_meta()
- flavor = ironic_utils.get_test_flavor()
- self.driver._add_driver_fields(node, instance, image_meta, flavor)
- expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
- 'value': image_meta['id']},
- {'path': '/instance_info/root_gb', 'op': 'add',
- 'value': str(instance.root_gb)},
- {'path': '/instance_info/swap_mb', 'op': 'add',
- 'value': str(flavor['swap'])},
- {'path': '/instance_uuid', 'op': 'add',
- 'value': instance.uuid}]
- mock_update.assert_called_once_with(node.uuid, expected_patch)
-
- @mock.patch.object(FAKE_CLIENT.node, 'update')
- def test__add_driver_fields_fail(self, mock_update):
- mock_update.side_effect = ironic_exception.BadRequest()
- node = ironic_utils.get_test_node(driver='fake')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- image_meta = ironic_utils.get_test_image_meta()
- flavor = ironic_utils.get_test_flavor()
- self.assertRaises(exception.InstanceDeployFailure,
- self.driver._add_driver_fields,
- node, instance, image_meta, flavor)
-
- @mock.patch.object(FAKE_CLIENT.node, 'update')
- def test__cleanup_deploy_good_with_flavor(self, mock_update):
- node = ironic_utils.get_test_node(driver='fake',
- instance_uuid='fake-id')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- flavor = ironic_utils.get_test_flavor(extra_specs={})
- self.driver._cleanup_deploy(self.ctx, node, instance, None,
- flavor=flavor)
- expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
- mock_update.assert_called_once_with(node.uuid, expected_patch)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(FAKE_CLIENT.node, 'update')
- def test__cleanup_deploy_without_flavor(self, mock_update, mock_flavor):
- mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
- node = ironic_utils.get_test_node(driver='fake',
- instance_uuid='fake-id')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- self.driver._cleanup_deploy(self.ctx, node, instance, None)
- expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
- mock_update.assert_called_once_with(node.uuid, expected_patch)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(FAKE_CLIENT.node, 'update')
- def test__cleanup_deploy_fail(self, mock_update, mock_flavor):
- mock_flavor.return_value = ironic_utils.get_test_flavor(extra_specs={})
- mock_update.side_effect = ironic_exception.BadRequest()
- node = ironic_utils.get_test_node(driver='fake',
- instance_uuid='fake-id')
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- self.assertRaises(exception.InstanceTerminationFailure,
- self.driver._cleanup_deploy,
- self.ctx, node, instance, None)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_spawn_node_driver_validation_fail(self, mock_flavor, mock_node):
- mock_flavor.return_value = ironic_utils.get_test_flavor()
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
-
- mock_node.validate.return_value = ironic_utils.get_test_validation(
- power=False, deploy=False)
- mock_node.get.return_value = node
- image_meta = ironic_utils.get_test_image_meta()
-
- self.assertRaises(exception.ValidationError, self.driver.spawn,
- self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.validate.assert_called_once_with(node_uuid)
- mock_flavor.assert_called_with(mock.ANY, instance['instance_type_id'])
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
- def test_spawn_node_prepare_for_deploy_fail(self, mock_cleanup_deploy,
- mock_pvifs, mock_sf,
- mock_flavor, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
- flavor = ironic_utils.get_test_flavor()
- mock_flavor.return_value = flavor
- image_meta = ironic_utils.get_test_image_meta()
-
- class TestException(Exception):
- pass
-
- mock_sf.side_effect = TestException()
- self.assertRaises(TestException, self.driver.spawn,
- self.ctx, instance, image_meta, [], None)
-
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.validate.assert_called_once_with(node_uuid)
- mock_flavor.assert_called_once_with(self.ctx,
- instance['instance_type_id'])
- mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
- flavor=flavor)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
- def test_spawn_node_trigger_deploy_fail(self, mock_cleanup_deploy,
- mock_pvifs, mock_sf,
- mock_flavor, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- flavor = ironic_utils.get_test_flavor()
- mock_flavor.return_value = flavor
- image_meta = ironic_utils.get_test_image_meta()
-
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
-
- mock_node.set_provision_state.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, self.driver.spawn,
- self.ctx, instance, image_meta, [], None)
-
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.validate.assert_called_once_with(node_uuid)
- mock_flavor.assert_called_once_with(self.ctx,
- instance['instance_type_id'])
- mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
- instance, None,
- flavor=flavor)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
- def test_spawn_node_trigger_deploy_fail2(self, mock_cleanup_deploy,
- mock_pvifs, mock_sf,
- mock_flavor, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- flavor = ironic_utils.get_test_flavor()
- mock_flavor.return_value = flavor
- image_meta = ironic_utils.get_test_image_meta()
-
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
- mock_node.set_provision_state.side_effect = ironic_exception.BadRequest
- self.assertRaises(ironic_exception.BadRequest,
- self.driver.spawn,
- self.ctx, instance, image_meta, [], None)
-
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.validate.assert_called_once_with(node_uuid)
- mock_flavor.assert_called_once_with(self.ctx,
- instance['instance_type_id'])
- mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
- instance, None,
- flavor=flavor)
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, 'destroy')
- def test_spawn_node_trigger_deploy_fail3(self, mock_destroy,
- mock_pvifs, mock_sf,
- mock_flavor, mock_node,
- mock_looping):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- fake_net_info = utils.get_test_network_info()
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- mock_flavor.return_value = ironic_utils.get_test_flavor()
- image_meta = ironic_utils.get_test_image_meta()
-
- mock_node.get.return_value = node
- mock_node.validate.return_value = ironic_utils.get_test_validation()
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
-
- fake_looping_call.wait.side_effect = ironic_exception.BadRequest
- fake_net_info = utils.get_test_network_info()
- self.assertRaises(ironic_exception.BadRequest,
- self.driver.spawn, self.ctx, instance,
- image_meta, [], None, fake_net_info)
- mock_destroy.assert_called_once_with(self.ctx, instance,
- fake_net_info)
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- @mock.patch.object(ironic_driver.IronicDriver, '_start_firewall')
- def test_spawn_sets_default_ephemeral_device(self, mock_sf, mock_pvifs,
- mock_wait, mock_flavor,
- mock_node, mock_save,
- mock_looping):
- mock_flavor.return_value = ironic_utils.get_test_flavor(ephemeral_gb=1)
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- mock_node.get_by_instance_uuid.return_value = node
- mock_node.set_provision_state.return_value = mock.MagicMock()
- image_meta = ironic_utils.get_test_image_meta()
-
- self.driver.spawn(self.ctx, instance, image_meta, [], None)
- mock_flavor.assert_called_once_with(self.ctx,
- instance['instance_type_id'])
- self.assertTrue(mock_save.called)
- self.assertEqual('/dev/sda1', instance['default_ephemeral_device'])
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
- def test_destroy(self, mock_cleanup_deploy, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- network_info = 'foo'
-
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
- provision_state=ironic_states.ACTIVE)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
-
- def fake_set_provision_state(*_):
- node.provision_state = None
-
- mock_node.get_by_instance_uuid.return_value = node
- mock_node.set_provision_state.side_effect = fake_set_provision_state
- self.driver.destroy(self.ctx, instance, network_info, None)
- mock_node.set_provision_state.assert_called_once_with(node_uuid,
- 'deleted')
- mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
- mock_cleanup_deploy.assert_called_with(self.ctx, node,
- instance, network_info)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- @mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
- def test_destroy_ignore_unexpected_state(self, mock_cleanup_deploy,
- mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- network_info = 'foo'
-
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
- provision_state=ironic_states.DELETING)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
-
- mock_node.get_by_instance_uuid.return_value = node
- self.driver.destroy(self.ctx, instance, network_info, None)
- self.assertFalse(mock_node.set_provision_state.called)
- mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
- mock_cleanup_deploy.assert_called_with(self.ctx, node, instance,
- network_info)
-
- @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
- provision_state=ironic_states.ACTIVE)
- fake_validate.return_value = node
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- mock_sps.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, self.driver.destroy,
- self.ctx, instance, None, None)
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- def test_destroy_unprovision_fail(self, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
- provision_state=ironic_states.ACTIVE)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
-
- def fake_set_provision_state(*_):
- node.provision_state = ironic_states.ERROR
-
- mock_node.get_by_instance_uuid.return_value = node
- self.assertRaises(exception.NovaException, self.driver.destroy,
- self.ctx, instance, None, None)
- mock_node.set_provision_state.assert_called_once_with(node_uuid,
- 'deleted')
-
- @mock.patch.object(FAKE_CLIENT, 'node')
- def test_destroy_unassociate_fail(self, mock_node):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
- provision_state=ironic_states.ACTIVE)
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
-
- mock_node.get_by_instance_uuid.return_value = node
- mock_node.update.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, self.driver.destroy,
- self.ctx, instance, None, None)
- mock_node.set_provision_state.assert_called_once_with(node_uuid,
- 'deleted')
- mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
- def test_reboot(self, mock_sp, fake_validate, mock_looping):
- node = ironic_utils.get_test_node()
- fake_validate.side_effect = [node, node]
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node.uuid)
- self.driver.reboot(self.ctx, instance, None, None)
- mock_sp.assert_called_once_with(node.uuid, 'reboot')
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
- def test_power_off(self, mock_sp, fake_validate, mock_looping):
- node = ironic_utils.get_test_node()
- fake_validate.side_effect = [node, node]
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
- instance_uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=instance_uuid)
-
- self.driver.power_off(instance)
- mock_sp.assert_called_once_with(node.uuid, 'off')
-
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- @mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
- def test_power_on(self, mock_sp, fake_validate, mock_looping):
- node = ironic_utils.get_test_node()
- fake_validate.side_effect = [node, node]
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
- instance_uuid = uuidutils.generate_uuid()
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=instance_uuid)
-
- self.driver.power_on(self.ctx, instance,
- utils.get_test_network_info())
- mock_sp.assert_called_once_with(node.uuid, 'on')
-
- @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
- def test_plug_vifs_with_port(self, mock_uvifs, mock_port_udt, mock_lp):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
- port = ironic_utils.get_test_port()
-
- mock_lp.return_value = [port]
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- network_info = utils.get_test_network_info()
-
- port_id = unicode(network_info[0]['id'])
- expected_patch = [{'op': 'add',
- 'path': '/extra/vif_port_id',
- 'value': port_id}]
- self.driver._plug_vifs(node, instance, network_info)
-
- # asserts
- mock_uvifs.assert_called_once_with(node, instance, network_info)
- mock_lp.assert_called_once_with(node_uuid)
- mock_port_udt.assert_called_with(port.uuid, expected_patch)
-
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- @mock.patch.object(ironic_driver.IronicDriver, '_plug_vifs')
- def test_plug_vifs(self, mock__plug_vifs, mock_get):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
-
- mock_get.return_value = node
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- network_info = utils.get_test_network_info()
- self.driver.plug_vifs(instance, network_info)
-
- mock_get.assert_called_once_with(node_uuid)
- mock__plug_vifs.assert_called_once_with(node, instance, network_info)
-
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
- @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
- def test_plug_vifs_count_mismatch(self, mock_uvifs, mock_lp,
- mock_port_udt):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
- port = ironic_utils.get_test_port()
-
- mock_lp.return_value = [port]
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- # len(network_info) > len(ports)
- network_info = (utils.get_test_network_info() +
- utils.get_test_network_info())
- self.assertRaises(exception.NovaException,
- self.driver._plug_vifs, node, instance,
- network_info)
-
- # asserts
- mock_uvifs.assert_called_once_with(node, instance, network_info)
- mock_lp.assert_called_once_with(node_uuid)
- # assert port.update() was not called
- self.assertFalse(mock_port_udt.called)
-
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- @mock.patch.object(FAKE_CLIENT.node, 'list_ports')
- @mock.patch.object(ironic_driver.IronicDriver, '_unplug_vifs')
- def test_plug_vifs_no_network_info(self, mock_uvifs, mock_lp,
- mock_port_udt):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
- port = ironic_utils.get_test_port()
-
- mock_lp.return_value = [port]
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- network_info = []
- self.driver._plug_vifs(node, instance, network_info)
-
- # asserts
- mock_uvifs.assert_called_once_with(node, instance, network_info)
- mock_lp.assert_called_once_with(node_uuid)
- # assert port.update() was not called
- self.assertFalse(mock_port_udt.called)
-
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- @mock.patch.object(FAKE_CLIENT, 'node')
- def test_unplug_vifs(self, mock_node, mock_update):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
- port = ironic_utils.get_test_port(extra={'vif_port_id': 'fake-vif'})
-
- mock_node.get.return_value = node
- mock_node.list_ports.return_value = [port]
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- node=node_uuid)
- expected_patch = [{'op': 'remove', 'path':
- '/extra/vif_port_id'}]
- self.driver.unplug_vifs(instance,
- utils.get_test_network_info())
-
- # asserts
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
- mock_update.assert_called_once_with(port.uuid, expected_patch)
-
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- @mock.patch.object(FAKE_CLIENT, 'node')
- def test_unplug_vifs_port_not_associated(self, mock_node, mock_update):
- node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
- node = ironic_utils.get_test_node(uuid=node_uuid)
- port = ironic_utils.get_test_port(extra={})
-
- mock_node.get.return_value = node
- mock_node.list_ports.return_value = [port]
- instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
- self.driver.unplug_vifs(instance, utils.get_test_network_info())
-
- mock_node.get.assert_called_once_with(node_uuid)
- mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
- # assert port.update() was not called
- self.assertFalse(mock_update.called)
-
- @mock.patch.object(FAKE_CLIENT.port, 'update')
- def test_unplug_vifs_no_network_info(self, mock_update):
- instance = fake_instance.fake_instance_obj(self.ctx)
- network_info = []
- self.driver.unplug_vifs(instance, network_info)
-
- # assert port.update() was not called
- self.assertFalse(mock_update.called)
-
- @mock.patch.object(firewall.NoopFirewallDriver, 'unfilter_instance',
- create=True)
- def test_unfilter_instance(self, mock_ui):
- instance = fake_instance.fake_instance_obj(self.ctx)
- network_info = utils.get_test_network_info()
- self.driver.unfilter_instance(instance, network_info)
- mock_ui.assert_called_once_with(instance, network_info)
-
- @mock.patch.object(firewall.NoopFirewallDriver, 'setup_basic_filtering',
- create=True)
- @mock.patch.object(firewall.NoopFirewallDriver, 'prepare_instance_filter',
- create=True)
- def test_ensure_filtering_rules_for_instance(self, mock_pif, mock_sbf):
- instance = fake_instance.fake_instance_obj(self.ctx)
- network_info = utils.get_test_network_info()
- self.driver.ensure_filtering_rules_for_instance(instance,
- network_info)
- mock_sbf.assert_called_once_with(instance, network_info)
- mock_pif.assert_called_once_with(instance, network_info)
-
- @mock.patch.object(firewall.NoopFirewallDriver,
- 'refresh_instance_security_rules', create=True)
- def test_refresh_instance_security_rules(self, mock_risr):
- instance = fake_instance.fake_instance_obj(self.ctx)
- self.driver.refresh_instance_security_rules(instance)
- mock_risr.assert_called_once_with(instance)
-
- @mock.patch.object(firewall.NoopFirewallDriver,
- 'refresh_provider_fw_rules', create=True)
- def test_refresh_provider_fw_rules(self, mock_rpfr):
- fake_instance.fake_instance_obj(self.ctx)
- self.driver.refresh_provider_fw_rules()
- mock_rpfr.assert_called_once_with()
-
- @mock.patch.object(firewall.NoopFirewallDriver,
- 'refresh_security_group_members', create=True)
- def test_refresh_security_group_members(self, mock_rsgm):
- fake_group = 'fake-security-group-members'
- self.driver.refresh_security_group_members(fake_group)
- mock_rsgm.assert_called_once_with(fake_group)
-
- @mock.patch.object(firewall.NoopFirewallDriver,
- 'refresh_instance_security_rules', create=True)
- def test_refresh_security_group_rules(self, mock_risr):
- fake_group = 'fake-security-group-members'
- self.driver.refresh_instance_security_rules(fake_group)
- mock_risr.assert_called_once_with(fake_group)
-
- @mock.patch.object(ironic_driver.IronicDriver, '_wait_for_active')
- @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- @mock.patch.object(objects.Instance, 'save')
- def _test_rebuild(self, mock_save, mock_get, mock_driver_fields,
- mock_fg_bid, mock_set_pstate, mock_looping,
- mock_wait_active, preserve=False):
- node_uuid = uuidutils.generate_uuid()
- instance_uuid = uuidutils.generate_uuid()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=instance_uuid,
- instance_type_id=5)
- mock_get.return_value = node
-
- image_meta = ironic_utils.get_test_image_meta()
- flavor_id = 5
- flavor = {'id': flavor_id, 'name': 'baremetal'}
- mock_fg_bid.return_value = flavor
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
-
- fake_looping_call = FakeLoopingCall()
- mock_looping.return_value = fake_looping_call
-
- self.driver.rebuild(
- context=self.ctx, instance=instance, image_meta=image_meta,
- injected_files=None, admin_password=None, bdms=None,
- detach_block_devices=None, attach_block_devices=None,
- preserve_ephemeral=preserve)
-
- mock_save.assert_called_once_with(
- expected_task_state=[task_states.REBUILDING])
- mock_driver_fields.assert_called_once_with(node, instance, image_meta,
- flavor, preserve)
- mock_set_pstate.assert_called_once_with(node_uuid,
- ironic_states.REBUILD)
- mock_looping.assert_called_once_with(mock_wait_active,
- FAKE_CLIENT_WRAPPER,
- instance)
- fake_looping_call.start.assert_called_once_with(
- interval=CONF.ironic.api_retry_interval)
- fake_looping_call.wait.assert_called_once_with()
-
- def test_rebuild_preserve_ephemeral(self):
- self._test_rebuild(preserve=True)
-
- def test_rebuild_no_preserve_ephemeral(self):
- self._test_rebuild(preserve=False)
-
- @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(ironic_driver.IronicDriver, '_add_driver_fields')
- @mock.patch.object(FAKE_CLIENT.node, 'get')
- @mock.patch.object(objects.Instance, 'save')
- def test_rebuild_failures(self, mock_save, mock_get, mock_driver_fields,
- mock_fg_bid, mock_set_pstate):
- node_uuid = uuidutils.generate_uuid()
- instance_uuid = uuidutils.generate_uuid()
- node = ironic_utils.get_test_node(uuid=node_uuid,
- instance_uuid=instance_uuid,
- instance_type_id=5)
- mock_get.return_value = node
-
- image_meta = ironic_utils.get_test_image_meta()
- flavor_id = 5
- flavor = {'id': flavor_id, 'name': 'baremetal'}
- mock_fg_bid.return_value = flavor
-
- instance = fake_instance.fake_instance_obj(self.ctx,
- uuid=instance_uuid,
- node=node_uuid,
- instance_type_id=flavor_id)
-
- exceptions = [
- exception.NovaException(),
- ironic_exception.BadRequest(),
- ironic_exception.InternalServerError(),
- ]
- for e in exceptions:
- mock_set_pstate.side_effect = e
- self.assertRaises(exception.InstanceDeployFailure,
- self.driver.rebuild,
- context=self.ctx, instance=instance, image_meta=image_meta,
- injected_files=None, admin_password=None, bdms=None,
- detach_block_devices=None, attach_block_devices=None)
diff --git a/nova/tests/virt/ironic/test_patcher.py b/nova/tests/virt/ironic/test_patcher.py
deleted file mode 100644
index d37620b0e8..0000000000
--- a/nova/tests/virt/ironic/test_patcher.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova import context as nova_context
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.virt.ironic import utils as ironic_utils
-from nova.virt.ironic import patcher
-
-CONF = cfg.CONF
-
-
-class IronicDriverFieldsTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(IronicDriverFieldsTestCase, self).setUp()
- self.image_meta = ironic_utils.get_test_image_meta()
- self.flavor = ironic_utils.get_test_flavor()
- self.ctx = nova_context.get_admin_context()
- self.instance = fake_instance.fake_instance_obj(self.ctx)
- # Generic expected patches
- self._expected_deploy_patch = [{'path': '/instance_info/image_source',
- 'value': self.image_meta['id'],
- 'op': 'add'},
- {'path': '/instance_info/root_gb',
- 'value': str(self.instance['root_gb']),
- 'op': 'add'},
- {'path': '/instance_info/swap_mb',
- 'value': str(self.flavor['swap']),
- 'op': 'add'}]
- self._expected_cleanup_patch = []
-
- def test_create_generic(self):
- node = ironic_utils.get_test_node(driver='fake')
- patcher_obj = patcher.create(node)
- self.assertIsInstance(patcher_obj, patcher.GenericDriverFields)
-
- def test_create_pxe(self):
- node = ironic_utils.get_test_node(driver='pxe_fake')
- patcher_obj = patcher.create(node)
- self.assertIsInstance(patcher_obj, patcher.PXEDriverFields)
-
- def test_generic_get_deploy_patch(self):
- node = ironic_utils.get_test_node(driver='fake')
- patch = patcher.create(node).get_deploy_patch(
- self.instance, self.image_meta, self.flavor)
- self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
-
- def test_generic_get_deploy_patch_ephemeral(self):
- CONF.set_override('default_ephemeral_format', 'testfmt')
- node = ironic_utils.get_test_node(driver='fake')
- instance = fake_instance.fake_instance_obj(self.ctx,
- ephemeral_gb=10)
- patch = patcher.create(node).get_deploy_patch(
- instance, self.image_meta, self.flavor)
- expected = [{'path': '/instance_info/ephemeral_gb',
- 'value': str(instance.ephemeral_gb),
- 'op': 'add'},
- {'path': '/instance_info/ephemeral_format',
- 'value': 'testfmt',
- 'op': 'add'}]
- expected += self._expected_deploy_patch
- self.assertEqual(sorted(expected), sorted(patch))
-
- def test_generic_get_deploy_patch_preserve_ephemeral(self):
- node = ironic_utils.get_test_node(driver='fake')
- for preserve in [True, False]:
- patch = patcher.create(node).get_deploy_patch(
- self.instance, self.image_meta, self.flavor,
- preserve_ephemeral=preserve)
- expected = [{'path': '/instance_info/preserve_ephemeral',
- 'value': str(preserve), 'op': 'add', }]
- expected += self._expected_deploy_patch
- self.assertEqual(sorted(expected), sorted(patch))
-
- def test_generic_get_cleanup_patch(self):
- node = ironic_utils.get_test_node(driver='fake')
- patch = patcher.create(node).get_cleanup_patch(self.instance, None,
- self.flavor)
- self.assertEqual(self._expected_cleanup_patch, patch)
-
- def test_pxe_get_deploy_patch(self):
- node = ironic_utils.get_test_node(driver='pxe_fake')
- extra_specs = self.flavor['extra_specs']
- expected = [{'path': '/driver_info/pxe_deploy_kernel',
- 'value': extra_specs['baremetal:deploy_kernel_id'],
- 'op': 'add'},
- {'path': '/driver_info/pxe_deploy_ramdisk',
- 'value': extra_specs['baremetal:deploy_ramdisk_id'],
- 'op': 'add'}]
- expected += self._expected_deploy_patch
- patch = patcher.create(node).get_deploy_patch(
- self.instance, self.image_meta, self.flavor)
- self.assertEqual(sorted(expected), sorted(patch))
-
- def test_pxe_get_deploy_patch_no_flavor_kernel_ramdisk_ids(self):
- flavor = ironic_utils.get_test_flavor(extra_specs={})
- node = ironic_utils.get_test_node(driver='pxe_fake')
- patch = patcher.create(node).get_deploy_patch(
- self.instance, self.image_meta, flavor)
- # If there's no extra_specs patch should be exactly like a
- # generic patch
- self.assertEqual(sorted(self._expected_deploy_patch), sorted(patch))
-
- def test_pxe_get_cleanup_patch(self):
- driver_info = {'pxe_deploy_kernel': 'fake-kernel-id',
- 'pxe_deploy_ramdisk': 'fake-ramdisk-id'}
- node = ironic_utils.get_test_node(driver='pxe_fake',
- driver_info=driver_info)
- patch = patcher.create(node).get_cleanup_patch(self.instance, None,
- self.flavor)
- expected = [{'path': '/driver_info/pxe_deploy_kernel',
- 'op': 'remove'},
- {'path': '/driver_info/pxe_deploy_ramdisk',
- 'op': 'remove'}]
- self.assertEqual(sorted(expected), sorted(patch))
-
- def test_pxe_get_cleanup_patch_no_flavor_kernel_ramdisk_ids(self):
- self.flavor = ironic_utils.get_test_flavor(extra_specs={})
- node = ironic_utils.get_test_node(driver='pxe_fake')
- patch = patcher.create(node).get_cleanup_patch(self.instance, None,
- self.flavor)
- # If there's no extra_specs patch should be exactly like a
- # generic patch
- self.assertEqual(self._expected_cleanup_patch, patch)
diff --git a/nova/tests/virt/libvirt/test_blockinfo.py b/nova/tests/virt/libvirt/test_blockinfo.py
deleted file mode 100644
index b7ee6e3c64..0000000000
--- a/nova/tests/virt/libvirt/test_blockinfo.py
+++ /dev/null
@@ -1,991 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# Copyright 2012 University Of Minho
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import mock
-
-from nova import block_device
-from nova.compute import arch
-from nova import context
-from nova import exception
-from nova import objects
-from nova import test
-from nova.tests import fake_block_device
-import nova.tests.image.fake
-from nova.virt import block_device as driver_block_device
-from nova.virt.libvirt import blockinfo
-
-
-class LibvirtBlockInfoTest(test.NoDBTestCase):
-
- def setUp(self):
- super(LibvirtBlockInfoTest, self).setUp()
-
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.get_admin_context()
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.test_instance = {
- 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
- 'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'root_gb': 10,
- 'ephemeral_gb': 20,
- 'instance_type_id': 2, # m1.tiny
- 'config_drive': None,
- 'system_metadata': {
- 'instance_type_memory_mb': 128,
- 'instance_type_root_gb': 0,
- 'instance_type_name': 'm1.micro',
- 'instance_type_ephemeral_gb': 0,
- 'instance_type_vcpus': 1,
- 'instance_type_swap': 0,
- 'instance_type_rxtx_factor': 1.0,
- 'instance_type_flavorid': '1',
- 'instance_type_vcpu_weight': None,
- 'instance_type_id': 2,
- }
- }
-
- def test_volume_in_mapping(self):
- swap = {'device_name': '/dev/sdb',
- 'swap_size': 1}
- ephemerals = [{'device_type': 'disk', 'guest_format': 'ext3',
- 'device_name': '/dev/sdc1', 'size': 10},
- {'disk_bus': 'ide', 'guest_format': None,
- 'device_name': '/dev/sdd', 'size': 10}]
- block_device_mapping = [{'mount_device': '/dev/sde',
- 'device_path': 'fake_device'},
- {'mount_device': '/dev/sdf',
- 'device_path': 'fake_device'}]
- block_device_info = {
- 'root_device_name': '/dev/sda',
- 'swap': swap,
- 'ephemerals': ephemerals,
- 'block_device_mapping': block_device_mapping}
-
- def _assert_volume_in_mapping(device_name, true_or_false):
- self.assertEqual(
- true_or_false,
- block_device.volume_in_mapping(device_name,
- block_device_info))
-
- _assert_volume_in_mapping('sda', False)
- _assert_volume_in_mapping('sdb', True)
- _assert_volume_in_mapping('sdc1', True)
- _assert_volume_in_mapping('sdd', True)
- _assert_volume_in_mapping('sde', True)
- _assert_volume_in_mapping('sdf', True)
- _assert_volume_in_mapping('sdg', False)
- _assert_volume_in_mapping('sdh1', False)
-
- def test_find_disk_dev(self):
- mapping = {
- "disk.local": {
- 'dev': 'sda',
- 'bus': 'scsi',
- 'type': 'disk',
- },
- "disk.swap": {
- 'dev': 'sdc',
- 'bus': 'scsi',
- 'type': 'disk',
- },
- }
-
- dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
- self.assertEqual('sdb', dev)
-
- dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
- last_device=True)
- self.assertEqual('sdz', dev)
-
- dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
- self.assertEqual('vda', dev)
-
- dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'fdc')
- self.assertEqual('fda', dev)
-
- def test_get_next_disk_dev(self):
- mapping = {}
- mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
- 'virtio')
- self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'},
- mapping['disk.local'])
-
- mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
- 'virtio')
- self.assertEqual({'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'},
- mapping['disk.swap'])
-
- mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
- 'ide',
- 'cdrom',
- True)
- self.assertEqual({'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'},
- mapping['disk.config'])
-
- def test_get_next_disk_dev_boot_index(self):
- info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=-1)
- self.assertEqual({'dev': 'vda', 'bus': 'virtio', 'type': 'disk'}, info)
-
- info = blockinfo.get_next_disk_info({}, 'virtio', boot_index=2)
- self.assertEqual({'dev': 'vda', 'bus': 'virtio',
- 'type': 'disk', 'boot_index': '2'},
- info)
-
- def test_get_disk_mapping_simple(self):
- # The simplest possible disk mapping setup, all defaults
-
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide")
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'}
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_simple_rootdev(self):
- # A simple disk mapping setup, but with custom root device name
-
- instance_ref = objects.Instance(**self.test_instance)
- block_device_info = {
- 'root_device_name': '/dev/sda'
- }
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- 'disk': {'bus': 'scsi', 'dev': 'sda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
- 'root': {'bus': 'scsi', 'dev': 'sda',
- 'type': 'disk', 'boot_index': '1'}
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_rescue(self):
- # A simple disk mapping setup, but in rescue mode
-
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- rescue=True)
-
- expect = {
- 'disk.rescue': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_lxc(self):
- # A simple disk mapping setup, but for lxc
-
- self.test_instance['ephemeral_gb'] = 0
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
- "lxc", "lxc",
- None)
- expect = {
- 'disk': {'bus': 'lxc', 'dev': None,
- 'type': 'disk', 'boot_index': '1'},
- 'root': {'bus': 'lxc', 'dev': None,
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_simple_iso(self):
- # A simple disk mapping setup, but with a ISO for root device
-
- instance_ref = objects.Instance(**self.test_instance)
- image_meta = {'disk_format': 'iso'}
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- None,
- image_meta)
-
- expect = {
- 'disk': {'bus': 'ide', 'dev': 'hda',
- 'type': 'cdrom', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
- 'root': {'bus': 'ide', 'dev': 'hda',
- 'type': 'cdrom', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_simple_swap(self):
- # A simple disk mapping setup, but with a swap device added
-
- self.test_instance['system_metadata']['instance_type_swap'] = 5
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide")
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_simple_configdrive(self):
- # A simple disk mapping setup, but with configdrive added
- # It's necessary to check if the architecture is power, because
- # power doesn't have support to ide, and so libvirt translate
- # all ide calls to scsi
-
- self.flags(force_config_drive=True)
-
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide")
-
- # The last device is selected for this. on x86 is the last ide
- # device (hdd). Since power only support scsi, the last device
- # is sdz
-
- bus_ppc = ("scsi", "sdz")
- expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
-
- bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
- ("ide", "hdd"))
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'}
- }
-
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_cdrom_configdrive(self):
- # A simple disk mapping setup, with configdrive added as cdrom
- # It's necessary to check if the architecture is power, because
- # power doesn't have support to ide, and so libvirt translate
- # all ide calls to scsi
-
- self.flags(force_config_drive=True)
- self.flags(config_drive_format='iso9660')
-
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide")
-
- bus_ppc = ("scsi", "sdz")
- expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
-
- bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
- ("ide", "hdd"))
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'}
- }
-
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_disk_configdrive(self):
- # A simple disk mapping setup, with configdrive added as disk
-
- self.flags(force_config_drive=True)
- self.flags(config_drive_format='vfat')
-
- instance_ref = objects.Instance(**self.test_instance)
-
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide")
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_ephemeral(self):
- # A disk mapping with ephemeral devices
- self.test_instance['system_metadata']['instance_type_swap'] = 5
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'ephemerals': [
- {'device_type': 'disk', 'guest_format': 'ext3',
- 'device_name': '/dev/vdb', 'size': 10},
- {'disk_bus': 'ide', 'guest_format': None,
- 'device_name': '/dev/vdc', 'size': 10},
- {'device_type': 'floppy',
- 'device_name': '/dev/vdd', 'size': 10},
- ]
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
- 'type': 'disk', 'format': 'ext3'},
- 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
- 'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'floppy'},
- 'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_custom_swap(self):
- # A disk mapping with a swap device at position vdb. This
- # should cause disk.local to be removed
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'swap': {'device_name': '/dev/vdb',
- 'swap_size': 10},
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_blockdev_root(self):
- # A disk mapping with a blockdev replacing the default root
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'block_device_mapping': [
- {'connection_info': "fake",
- 'mount_device': "/dev/vda",
- 'boot_index': 0,
- 'device_type': 'disk',
- 'delete_on_termination': True},
- ]
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_blockdev_eph(self):
- # A disk mapping with a blockdev replacing the ephemeral device
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'block_device_mapping': [
- {'connection_info': "fake",
- 'mount_device': "/dev/vdb",
- 'boot_index': -1,
- 'delete_on_termination': True},
- ]
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_blockdev_many(self):
- # A disk mapping with a blockdev replacing all devices
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'block_device_mapping': [
- {'connection_info': "fake",
- 'mount_device': "/dev/vda",
- 'boot_index': 0,
- 'disk_bus': 'scsi',
- 'delete_on_termination': True},
- {'connection_info': "fake",
- 'mount_device': "/dev/vdb",
- 'boot_index': -1,
- 'delete_on_termination': True},
- {'connection_info': "fake",
- 'mount_device': "/dev/vdc",
- 'boot_index': -1,
- 'device_type': 'cdrom',
- 'delete_on_termination': True},
- ]
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'cdrom'},
- 'root': {'bus': 'scsi', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_complex(self):
- # The strangest possible disk mapping setup
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'root_device_name': '/dev/vdf',
- 'swap': {'device_name': '/dev/vdy',
- 'swap_size': 10},
- 'ephemerals': [
- {'device_type': 'disk', 'guest_format': 'ext3',
- 'device_name': '/dev/vdb', 'size': 10},
- {'disk_bus': 'ide', 'guest_format': None,
- 'device_name': '/dev/vdc', 'size': 10},
- ],
- 'block_device_mapping': [
- {'connection_info': "fake",
- 'mount_device': "/dev/vda",
- 'boot_index': 1,
- 'delete_on_termination': True},
- ]
- }
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- block_device_info)
-
- expect = {
- 'disk': {'bus': 'virtio', 'dev': 'vdf',
- 'type': 'disk', 'boot_index': '1'},
- '/dev/vda': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '2'},
- 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
- 'type': 'disk', 'format': 'ext3'},
- 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
- 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vdf',
- 'type': 'disk', 'boot_index': '1'},
- }
- self.assertEqual(expect, mapping)
-
- def test_get_disk_mapping_updates_original(self):
- instance_ref = objects.Instance(**self.test_instance)
-
- block_device_info = {
- 'root_device_name': '/dev/vda',
- 'swap': {'device_name': '/dev/vdb',
- 'device_type': 'really_lame_type',
- 'swap_size': 10},
- 'ephemerals': [{'disk_bus': 'no_such_bus',
- 'device_type': 'yeah_right',
- 'device_name': '/dev/vdc', 'size': 10}],
- 'block_device_mapping': [
- {'connection_info': "fake",
- 'mount_device': None,
- 'device_type': 'lawnmower',
- 'delete_on_termination': True}]
- }
- expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
- 'device_type': 'disk', 'swap_size': 10}
- expected_ephemeral = {'disk_bus': 'virtio',
- 'device_type': 'disk',
- 'device_name': '/dev/vdc', 'size': 10}
- expected_bdm = {'connection_info': "fake",
- 'mount_device': '/dev/vdd',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True}
-
- blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide", block_device_info)
-
- self.assertEqual(expected_swap, block_device_info['swap'])
- self.assertEqual(expected_ephemeral,
- block_device_info['ephemerals'][0])
- self.assertEqual(expected_bdm,
- block_device_info['block_device_mapping'][0])
-
- def test_get_disk_bus(self):
- expected = (
- (arch.X86_64, 'disk', 'virtio'),
- (arch.X86_64, 'cdrom', 'ide'),
- (arch.X86_64, 'floppy', 'fdc'),
- (arch.PPC, 'disk', 'virtio'),
- (arch.PPC, 'cdrom', 'scsi'),
- (arch.PPC64, 'disk', 'virtio'),
- (arch.PPC64, 'cdrom', 'scsi')
- )
- for guestarch, dev, res in expected:
- with mock.patch.object(blockinfo.libvirt_utils,
- 'get_arch',
- return_value=guestarch):
- bus = blockinfo.get_disk_bus_for_device_type('kvm',
- device_type=dev)
- self.assertEqual(res, bus)
-
- expected = (
- ('scsi', None, 'disk', 'scsi'),
- (None, 'scsi', 'cdrom', 'scsi'),
- ('usb', None, 'disk', 'usb')
- )
- for dbus, cbus, dev, res in expected:
- image_meta = {'properties': {'hw_disk_bus': dbus,
- 'hw_cdrom_bus': cbus}}
- bus = blockinfo.get_disk_bus_for_device_type('kvm',
- image_meta,
- device_type=dev)
- self.assertEqual(res, bus)
-
- image_meta = {'properties': {'hw_disk_bus': 'xen'}}
- self.assertRaises(exception.UnsupportedHardware,
- blockinfo.get_disk_bus_for_device_type,
- 'kvm',
- image_meta)
-
- def test_success_get_disk_bus_for_disk_dev(self):
- expected = (
- ('ide', ("kvm", "hda")),
- ('scsi', ("kvm", "sdf")),
- ('virtio', ("kvm", "vds")),
- ('fdc', ("kvm", "fdc")),
- ('uml', ("kvm", "ubd")),
- ('xen', ("xen", "sdf")),
- ('xen', ("xen", "xvdb"))
- )
- for res, args in expected:
- self.assertEqual(res, blockinfo.get_disk_bus_for_disk_dev(*args))
-
- def test_fail_get_disk_bus_for_disk_dev(self):
- self.assertRaises(exception.NovaException,
- blockinfo.get_disk_bus_for_disk_dev, 'inv', 'val')
-
- def test_get_config_drive_type_default(self):
- config_drive_type = blockinfo.get_config_drive_type()
- self.assertEqual('cdrom', config_drive_type)
-
- def test_get_config_drive_type_cdrom(self):
- self.flags(config_drive_format='iso9660')
- config_drive_type = blockinfo.get_config_drive_type()
- self.assertEqual('cdrom', config_drive_type)
-
- def test_get_config_drive_type_disk(self):
- self.flags(config_drive_format='vfat')
- config_drive_type = blockinfo.get_config_drive_type()
- self.assertEqual('disk', config_drive_type)
-
- def test_get_config_drive_type_improper_value(self):
- self.flags(config_drive_format='test')
- self.assertRaises(exception.ConfigDriveUnknownFormat,
- blockinfo.get_config_drive_type)
-
- def test_get_info_from_bdm(self):
- bdms = [{'device_name': '/dev/vds', 'device_type': 'disk',
- 'disk_bus': 'usb', 'swap_size': 4},
- {'device_type': 'disk', 'guest_format': 'ext3',
- 'device_name': '/dev/vdb', 'size': 2},
- {'disk_bus': 'ide', 'guest_format': None,
- 'device_name': '/dev/vdc', 'size': 3},
- {'connection_info': "fake",
- 'mount_device': "/dev/sdr",
- 'disk_bus': 'lame_bus',
- 'device_type': 'cdrom',
- 'boot_index': 0,
- 'delete_on_termination': True},
- {'connection_info': "fake",
- 'mount_device': "/dev/vdo",
- 'disk_bus': 'scsi',
- 'boot_index': 1,
- 'device_type': 'lame_type',
- 'delete_on_termination': True}]
- expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
- {'dev': 'vdb', 'type': 'disk',
- 'bus': 'virtio', 'format': 'ext3'},
- {'dev': 'vdc', 'type': 'disk', 'bus': 'ide'},
- {'dev': 'sdr', 'type': 'cdrom',
- 'bus': 'scsi', 'boot_index': '1'},
- {'dev': 'vdo', 'type': 'disk',
- 'bus': 'scsi', 'boot_index': '2'}]
-
- for bdm, expected in zip(bdms, expected):
- self.assertEqual(expected,
- blockinfo.get_info_from_bdm('kvm', bdm, {}))
-
- # Test that passed bus and type are considered
- bdm = {'device_name': '/dev/vda'}
- expected = {'dev': 'vda', 'type': 'disk', 'bus': 'ide'}
- self.assertEqual(
- expected, blockinfo.get_info_from_bdm('kvm', bdm, {},
- disk_bus='ide',
- dev_type='disk'))
-
- # Test that lame bus values are defaulted properly
- bdm = {'disk_bus': 'lame_bus', 'device_type': 'cdrom'}
- with mock.patch.object(blockinfo,
- 'get_disk_bus_for_device_type',
- return_value='ide') as get_bus:
- blockinfo.get_info_from_bdm('kvm', bdm, {})
- get_bus.assert_called_once_with('kvm', None, 'cdrom')
-
- # Test that missing device is defaulted as expected
- bdm = {'disk_bus': 'ide', 'device_type': 'cdrom'}
- expected = {'dev': 'vdd', 'type': 'cdrom', 'bus': 'ide'}
- mapping = {'root': {'dev': 'vda'}}
- with mock.patch.object(blockinfo,
- 'find_disk_dev_for_disk_bus',
- return_value='vdd') as find_dev:
- got = blockinfo.get_info_from_bdm(
- 'kvm', bdm, mapping, assigned_devices=['vdb', 'vdc'])
- find_dev.assert_called_once_with(
- {'root': {'dev': 'vda'},
- 'vdb': {'dev': 'vdb'},
- 'vdc': {'dev': 'vdc'}}, 'ide')
- self.assertEqual(expected, got)
-
- def test_get_device_name(self):
- bdm_obj = objects.BlockDeviceMapping(self.context,
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'boot_index': 0}))
- self.assertEqual('/dev/vda', blockinfo.get_device_name(bdm_obj))
-
- driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
- self.assertEqual('/dev/vda', blockinfo.get_device_name(driver_bdm))
-
- bdm_obj.device_name = None
- self.assertIsNone(blockinfo.get_device_name(bdm_obj))
-
- driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm_obj)
- self.assertIsNone(blockinfo.get_device_name(driver_bdm))
-
- @mock.patch('nova.virt.libvirt.blockinfo.find_disk_dev_for_disk_bus',
- return_value='vda')
- @mock.patch('nova.virt.libvirt.blockinfo.get_disk_bus_for_disk_dev',
- return_value='virtio')
- def test_get_root_info_no_bdm(self, mock_get_bus, mock_find_dev):
- blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide')
- mock_find_dev.assert_called_once_with({}, 'virtio')
-
- blockinfo.get_root_info('kvm', None, None, 'virtio', 'ide',
- root_device_name='/dev/vda')
- mock_get_bus.assert_called_once_with('kvm', '/dev/vda')
-
- @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
- def test_get_root_info_bdm(self, mock_get_info):
- root_bdm = {'mount_device': '/dev/vda',
- 'disk_bus': 'scsi',
- 'device_type': 'disk'}
- # No root_device_name
- blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide')
- mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
- mock_get_info.reset_mock()
- # Both device names
- blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
- root_device_name='sda')
- mock_get_info.assert_called_once_with('kvm', root_bdm, {}, 'virtio')
- mock_get_info.reset_mock()
- # Missing device names
- del root_bdm['mount_device']
- blockinfo.get_root_info('kvm', None, root_bdm, 'virtio', 'ide',
- root_device_name='sda')
- mock_get_info.assert_called_once_with('kvm',
- {'device_name': 'sda',
- 'disk_bus': 'scsi',
- 'device_type': 'disk'},
- {}, 'virtio')
-
- def test_get_boot_order_simple(self):
- disk_info = {
- 'disk_bus': 'virtio',
- 'cdrom_bus': 'ide',
- 'mapping': {
- 'disk': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- 'root': {'bus': 'virtio', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- }
- expected_order = ['hd']
- self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
-
- def test_get_boot_order_complex(self):
- disk_info = {
- 'disk_bus': 'virtio',
- 'cdrom_bus': 'ide',
- 'mapping': {
- 'disk': {'bus': 'virtio', 'dev': 'vdf',
- 'type': 'disk', 'boot_index': '1'},
- '/dev/hda': {'bus': 'ide', 'dev': 'hda',
- 'type': 'cdrom', 'boot_index': '3'},
- '/dev/fda': {'bus': 'fdc', 'dev': 'fda',
- 'type': 'floppy', 'boot_index': '2'},
- 'disk.eph0': {'bus': 'virtio', 'dev': 'vdb',
- 'type': 'disk', 'format': 'ext3'},
- 'disk.eph1': {'bus': 'ide', 'dev': 'vdc', 'type': 'disk'},
- 'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
- 'root': {'bus': 'virtio', 'dev': 'vdf',
- 'type': 'disk', 'boot_index': '1'},
- }
- }
- expected_order = ['hd', 'fd', 'cdrom']
- self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
-
- def test_get_boot_order_overlapping(self):
- disk_info = {
- 'disk_bus': 'virtio',
- 'cdrom_bus': 'ide',
- 'mapping': {
- '/dev/vda': {'bus': 'scsi', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- '/dev/vdb': {'bus': 'virtio', 'dev': 'vdb',
- 'type': 'disk', 'boot_index': '2'},
- '/dev/vdc': {'bus': 'virtio', 'dev': 'vdc',
- 'type': 'cdrom', 'boot_index': '3'},
- 'root': {'bus': 'scsi', 'dev': 'vda',
- 'type': 'disk', 'boot_index': '1'},
- }
- }
- expected_order = ['hd', 'cdrom']
- self.assertEqual(expected_order, blockinfo.get_boot_order(disk_info))
-
-
-class DefaultDeviceNamesTestCase(test.NoDBTestCase):
- def setUp(self):
- super(DefaultDeviceNamesTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.instance = {
- 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
- 'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'root_gb': 10,
- 'ephemeral_gb': 20,
- 'instance_type_id': 2}
- self.root_device_name = '/dev/vda'
- self.virt_type = 'kvm'
- self.flavor = {'swap': 4}
- self.patchers = []
- self.patchers.append(mock.patch('nova.compute.flavors.extract_flavor',
- return_value=self.flavor))
- self.patchers.append(mock.patch(
- 'nova.objects.block_device.BlockDeviceMapping.save'))
- for patcher in self.patchers:
- patcher.start()
-
- self.ephemerals = [objects.BlockDeviceMapping(
- self.context, **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdb',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True,
- 'guest_format': None,
- 'volume_size': 1,
- 'boot_index': -1}))]
-
- self.swap = [objects.BlockDeviceMapping(
- self.context, **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdc',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'volume_size': 1,
- 'boot_index': -1}))]
-
- self.block_device_mapping = [
- objects.BlockDeviceMapping(self.context,
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'volume_id': 'fake-volume-id-1',
- 'boot_index': 0})),
- objects.BlockDeviceMapping(self.context,
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vdd',
- 'source_type': 'snapshot',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'destination_type': 'volume',
- 'snapshot_id': 'fake-snapshot-id-1',
- 'boot_index': -1})),
- objects.BlockDeviceMapping(self.context,
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 5, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/vde',
- 'source_type': 'blank',
- 'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'destination_type': 'volume',
- 'boot_index': -1}))]
-
- def tearDown(self):
- super(DefaultDeviceNamesTestCase, self).tearDown()
- for patcher in self.patchers:
- patcher.stop()
-
- def _test_default_device_names(self, *block_device_lists):
- blockinfo.default_device_names(self.virt_type,
- self.context,
- self.instance,
- self.root_device_name,
- *block_device_lists)
-
- def test_only_block_device_mapping(self):
- # Test no-op
- original_bdm = copy.deepcopy(self.block_device_mapping)
- self._test_default_device_names([], [], self.block_device_mapping)
- for original, defaulted in zip(
- original_bdm, self.block_device_mapping):
- self.assertEqual(original.device_name, defaulted.device_name)
-
- # Assert it defaults the missing one as expected
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names([], [], self.block_device_mapping)
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vde',
- self.block_device_mapping[2]['device_name'])
-
- def test_with_ephemerals(self):
- # Test ephemeral gets assigned
- self.ephemerals[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals, [],
- self.block_device_mapping)
- self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
-
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names(self.ephemerals, [],
- self.block_device_mapping)
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vde',
- self.block_device_mapping[2]['device_name'])
-
- def test_with_swap(self):
- # Test swap only
- self.swap[0]['device_name'] = None
- self._test_default_device_names([], self.swap, [])
- self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
-
- # Test swap and block_device_mapping
- self.swap[0]['device_name'] = None
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names([], self.swap,
- self.block_device_mapping)
- self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vde',
- self.block_device_mapping[2]['device_name'])
-
- def test_all_together(self):
- # Test swap missing
- self.swap[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
-
- # Test swap and eph missing
- self.swap[0]['device_name'] = None
- self.ephemerals[0]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
- self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
-
- # Test all missing
- self.swap[0]['device_name'] = None
- self.ephemerals[0]['device_name'] = None
- self.block_device_mapping[1]['device_name'] = None
- self.block_device_mapping[2]['device_name'] = None
- self._test_default_device_names(self.ephemerals,
- self.swap, self.block_device_mapping)
- self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
- self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
- self.assertEqual('/dev/vdd',
- self.block_device_mapping[1]['device_name'])
- self.assertEqual('/dev/vde',
- self.block_device_mapping[2]['device_name'])
diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py
deleted file mode 100644
index 2e4682395c..0000000000
--- a/nova/tests/virt/libvirt/test_config.py
+++ /dev/null
@@ -1,2344 +0,0 @@
-# Copyright (C) 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from lxml import etree
-from oslo.utils import units
-
-from nova.compute import arch
-from nova import test
-from nova.tests import matchers
-from nova.virt.libvirt import config
-
-
-class LibvirtConfigBaseTest(test.NoDBTestCase):
- def assertXmlEqual(self, expectedXmlstr, actualXmlstr):
- self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
-
-
-class LibvirtConfigTest(LibvirtConfigBaseTest):
-
- def test_config_plain(self):
- obj = config.LibvirtConfigObject(root_name="demo")
- xml = obj.to_xml()
-
- self.assertXmlEqual(xml, "<demo/>")
-
- def test_config_ns(self):
- obj = config.LibvirtConfigObject(root_name="demo", ns_prefix="foo",
- ns_uri="http://example.com/foo")
- xml = obj.to_xml()
-
- self.assertXmlEqual(xml, """
- <foo:demo xmlns:foo="http://example.com/foo"/>""")
-
- def test_config_text(self):
- obj = config.LibvirtConfigObject(root_name="demo")
- root = obj.format_dom()
- root.append(obj._text_node("foo", "bar"))
-
- xml = etree.tostring(root)
- self.assertXmlEqual(xml, "<demo><foo>bar</foo></demo>")
-
- def test_config_text_unicode(self):
- obj = config.LibvirtConfigObject(root_name='demo')
- root = obj.format_dom()
- root.append(obj._text_node('foo', u'\xF0\x9F\x92\xA9'))
- self.assertXmlEqual('<demo><foo>&#240;&#159;&#146;&#169;</foo></demo>',
- etree.tostring(root))
-
- def test_config_parse(self):
- inxml = "<demo><foo/></demo>"
- obj = config.LibvirtConfigObject(root_name="demo")
- obj.parse_str(inxml)
-
-
-class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
-
- def test_config_host(self):
- xmlin = """
- <capabilities>
- <host>
- <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid>
- <cpu>
- <arch>x86_64</arch>
- <model>Opteron_G3</model>
- <vendor>AMD</vendor>
- <topology sockets='1' cores='4' threads='1'/>
- <feature name='ibs'/>
- <feature name='osvw'/>
- </cpu>
- <topology>
- <cells num='2'>
- <cell id='0'>
- <memory unit='KiB'>4048280</memory>
- <pages unit='KiB' size='4'>1011941</pages>
- <pages unit='KiB' size='2048'>0</pages>
- <cpus num='4'>
- <cpu id='0' socket_id='0' core_id='0' siblings='0'/>
- <cpu id='1' socket_id='0' core_id='1' siblings='1'/>
- <cpu id='2' socket_id='0' core_id='2' siblings='2'/>
- <cpu id='3' socket_id='0' core_id='3' siblings='3'/>
- </cpus>
- </cell>
- <cell id='1'>
- <memory unit='KiB'>4127684</memory>
- <pages unit='KiB' size='4'>1031921</pages>
- <pages unit='KiB' size='2048'>0</pages>
- <cpus num='4'>
- <cpu id='4' socket_id='1' core_id='0' siblings='4'/>
- <cpu id='5' socket_id='1' core_id='1' siblings='5'/>
- <cpu id='6' socket_id='1' core_id='2' siblings='6'/>
- <cpu id='7' socket_id='1' core_id='3' siblings='7'/>
- </cpus>
- </cell>
- </cells>
- </topology>
- </host>
- <guest>
- <os_type>hvm</os_type>
- <arch name='x86_64'/>
- </guest>
- <guest>
- <os_type>hvm</os_type>
- <arch name='i686'/>
- </guest>
- </capabilities>"""
-
- obj = config.LibvirtConfigCaps()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.host, config.LibvirtConfigCapsHost)
- self.assertEqual(obj.host.uuid, "c7a5fdbd-edaf-9455-926a-d65c16db1809")
-
- xmlout = obj.to_xml()
-
- self.assertXmlEqual(xmlin, xmlout)
-
-
-class LibvirtConfigGuestTimerTest(LibvirtConfigBaseTest):
- def test_config_platform(self):
- obj = config.LibvirtConfigGuestTimer()
- obj.track = "host"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <timer name="platform" track="host"/>
- """)
-
- def test_config_pit(self):
- obj = config.LibvirtConfigGuestTimer()
- obj.name = "pit"
- obj.tickpolicy = "discard"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <timer name="pit" tickpolicy="discard"/>
- """)
-
- def test_config_hpet(self):
- obj = config.LibvirtConfigGuestTimer()
- obj.name = "hpet"
- obj.present = False
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <timer name="hpet" present="no"/>
- """)
-
-
-class LibvirtConfigGuestClockTest(LibvirtConfigBaseTest):
- def test_config_utc(self):
- obj = config.LibvirtConfigGuestClock()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <clock offset="utc"/>
- """)
-
- def test_config_localtime(self):
- obj = config.LibvirtConfigGuestClock()
- obj.offset = "localtime"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <clock offset="localtime"/>
- """)
-
- def test_config_timezone(self):
- obj = config.LibvirtConfigGuestClock()
- obj.offset = "timezone"
- obj.timezone = "EDT"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <clock offset="timezone" timezone="EDT"/>
- """)
-
- def test_config_variable(self):
- obj = config.LibvirtConfigGuestClock()
- obj.offset = "variable"
- obj.adjustment = "123456"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <clock offset="variable" adjustment="123456"/>
- """)
-
- def test_config_timers(self):
- obj = config.LibvirtConfigGuestClock()
-
- tmpit = config.LibvirtConfigGuestTimer()
- tmpit.name = "pit"
- tmpit.tickpolicy = "discard"
-
- tmrtc = config.LibvirtConfigGuestTimer()
- tmrtc.name = "rtc"
- tmrtc.tickpolicy = "merge"
-
- obj.add_timer(tmpit)
- obj.add_timer(tmrtc)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <clock offset="utc">
- <timer name="pit" tickpolicy="discard"/>
- <timer name="rtc" tickpolicy="merge"/>
- </clock>
- """)
-
-
-class LibvirtConfigCPUFeatureTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigCPUFeature("mtrr")
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <feature name="mtrr"/>
- """)
-
-
-class LibvirtConfigGuestCPUFeatureTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigGuestCPUFeature("mtrr")
- obj.policy = "force"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <feature name="mtrr" policy="force"/>
- """)
-
-
-class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
-
- def test_parse_dom(self):
- xml = """
- <numa>
- <cell id="0" cpus="0-1" memory="1000000"/>
- <cell id="1" cpus="2-3" memory="1500000"/>
- </numa>
- """
- xmldoc = etree.fromstring(xml)
- obj = config.LibvirtConfigGuestCPUNUMA()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(2, len(obj.cells))
-
- def test_config_simple(self):
- obj = config.LibvirtConfigGuestCPUNUMA()
-
- cell = config.LibvirtConfigGuestCPUNUMACell()
- cell.id = 0
- cell.cpus = set([0, 1])
- cell.memory = 1000000
-
- obj.cells.append(cell)
-
- cell = config.LibvirtConfigGuestCPUNUMACell()
- cell.id = 1
- cell.cpus = set([2, 3])
- cell.memory = 1500000
-
- obj.cells.append(cell)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <numa>
- <cell id="0" cpus="0-1" memory="1000000"/>
- <cell id="1" cpus="2-3" memory="1500000"/>
- </numa>
- """)
-
-
-class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigCPU()
- obj.model = "Penryn"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <model>Penryn</model>
- </cpu>
- """)
-
- def test_config_complex(self):
- obj = config.LibvirtConfigCPU()
- obj.model = "Penryn"
- obj.vendor = "Intel"
- obj.arch = arch.X86_64
-
- obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
- obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <feature name="apic"/>
- <feature name="mtrr"/>
- </cpu>
- """)
-
- def test_only_uniq_cpu_featues(self):
- obj = config.LibvirtConfigCPU()
- obj.model = "Penryn"
- obj.vendor = "Intel"
- obj.arch = arch.X86_64
-
- obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
- obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
- obj.add_feature(config.LibvirtConfigCPUFeature("apic"))
- obj.add_feature(config.LibvirtConfigCPUFeature("mtrr"))
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <feature name="apic"/>
- <feature name="mtrr"/>
- </cpu>
- """)
-
- def test_config_topology(self):
- obj = config.LibvirtConfigCPU()
- obj.model = "Penryn"
- obj.sockets = 4
- obj.cores = 4
- obj.threads = 2
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu>
- <model>Penryn</model>
- <topology sockets="4" cores="4" threads="2"/>
- </cpu>
- """)
-
-
-class LibvirtConfigGuestCPUTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigGuestCPU()
- obj.model = "Penryn"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu match="exact">
- <model>Penryn</model>
- </cpu>
- """)
-
- def test_config_complex(self):
- obj = config.LibvirtConfigGuestCPU()
- obj.model = "Penryn"
- obj.vendor = "Intel"
- obj.arch = arch.X86_64
- obj.mode = "custom"
-
- obj.add_feature(config.LibvirtConfigGuestCPUFeature("mtrr"))
- obj.add_feature(config.LibvirtConfigGuestCPUFeature("apic"))
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu mode="custom" match="exact">
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <feature name="apic" policy="require"/>
- <feature name="mtrr" policy="require"/>
- </cpu>
- """)
-
- def test_config_host(self):
- obj = config.LibvirtConfigGuestCPU()
- obj.mode = "host-model"
- obj.match = "exact"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu mode="host-model" match="exact"/>
- """)
-
- def test_config_host_with_numa(self):
- obj = config.LibvirtConfigGuestCPU()
- obj.mode = "host-model"
- obj.match = "exact"
-
- numa = config.LibvirtConfigGuestCPUNUMA()
-
- cell = config.LibvirtConfigGuestCPUNUMACell()
- cell.id = 0
- cell.cpus = set([0, 1])
- cell.memory = 1000000
-
- numa.cells.append(cell)
-
- cell = config.LibvirtConfigGuestCPUNUMACell()
- cell.id = 1
- cell.cpus = set([2, 3])
- cell.memory = 1500000
-
- numa.cells.append(cell)
-
- obj.numa = numa
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <cpu mode="host-model" match="exact">
- <numa>
- <cell id="0" cpus="0-1" memory="1000000"/>
- <cell id="1" cpus="2-3" memory="1500000"/>
- </numa>
- </cpu>
- """)
-
-
-class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigGuestSMBIOS()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <smbios mode="sysinfo"/>
- """)
-
-
-class LibvirtConfigGuestSysinfoTest(LibvirtConfigBaseTest):
-
- def test_config_simple(self):
- obj = config.LibvirtConfigGuestSysinfo()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <sysinfo type="smbios"/>
- """)
-
- def test_config_bios(self):
- obj = config.LibvirtConfigGuestSysinfo()
- obj.bios_vendor = "Acme"
- obj.bios_version = "6.6.6"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <sysinfo type="smbios">
- <bios>
- <entry name="vendor">Acme</entry>
- <entry name="version">6.6.6</entry>
- </bios>
- </sysinfo>
- """)
-
- def test_config_system(self):
- obj = config.LibvirtConfigGuestSysinfo()
- obj.system_manufacturer = "Acme"
- obj.system_product = "Wile Coyote"
- obj.system_version = "6.6.6"
- obj.system_serial = "123456"
- obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <sysinfo type="smbios">
- <system>
- <entry name="manufacturer">Acme</entry>
- <entry name="product">Wile Coyote</entry>
- <entry name="version">6.6.6</entry>
- <entry name="serial">123456</entry>
- <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
- </system>
- </sysinfo>
- """)
-
- def test_config_mixed(self):
- obj = config.LibvirtConfigGuestSysinfo()
- obj.bios_vendor = "Acme"
- obj.system_manufacturer = "Acme"
- obj.system_product = "Wile Coyote"
- obj.system_uuid = "c7a5fdbd-edaf-9455-926a-d65c16db1809"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <sysinfo type="smbios">
- <bios>
- <entry name="vendor">Acme</entry>
- </bios>
- <system>
- <entry name="manufacturer">Acme</entry>
- <entry name="product">Wile Coyote</entry>
- <entry name="uuid">c7a5fdbd-edaf-9455-926a-d65c16db1809</entry>
- </system>
- </sysinfo>
- """)
-
-
-class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
-
- def test_config_file(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "file"
- obj.source_path = "/tmp/hello"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>""")
-
- def test_config_file_parse(self):
- xml = """<disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'file')
- self.assertEqual(obj.source_path, '/tmp/hello')
- self.assertEqual(obj.target_dev, '/dev/hda')
- self.assertEqual(obj.target_bus, 'ide')
-
- def test_config_file_serial(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "file"
- obj.source_path = "/tmp/hello"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
- obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
- </disk>""")
-
- def test_config_file_serial_parse(self):
- xml = """<disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'file')
- self.assertEqual(obj.serial, '7a97c4a3-6f59-41d4-bf47-191d7f97f8e9')
-
- def test_config_file_discard(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.driver_name = "qemu"
- obj.driver_format = "qcow2"
- obj.driver_cache = "none"
- obj.driver_discard = "unmap"
- obj.source_type = "file"
- obj.source_path = "/tmp/hello.qcow2"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
- obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
-
- xml = obj.to_xml()
- self.assertXmlEqual("""
- <disk type="file" device="disk">
- <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
- <source file="/tmp/hello.qcow2"/>
- <target bus="ide" dev="/dev/hda"/>
- <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
- </disk>""", xml)
-
- def test_config_file_discard_parse(self):
- xml = """
- <disk type="file" device="disk">
- <driver name="qemu" type="qcow2" cache="none" discard="unmap"/>
- <source file="/tmp/hello.qcow2"/>
- <target bus="ide" dev="/dev/hda"/>
- <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual('unmap', obj.driver_discard)
-
- def test_config_block(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "block"
- obj.source_path = "/tmp/hello"
- obj.source_device = "cdrom"
- obj.driver_name = "qemu"
- obj.target_dev = "/dev/hdc"
- obj.target_bus = "ide"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="block" device="cdrom">
- <driver name="qemu"/>
- <source dev="/tmp/hello"/>
- <target bus="ide" dev="/dev/hdc"/>
- </disk>""")
-
- def test_config_block_parse(self):
- xml = """<disk type="block" device="cdrom">
- <driver name="qemu"/>
- <source dev="/tmp/hello"/>
- <target bus="ide" dev="/dev/hdc"/>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'block')
- self.assertEqual(obj.source_path, '/tmp/hello')
- self.assertEqual(obj.target_dev, '/dev/hdc')
- self.assertEqual(obj.target_bus, 'ide')
-
- def test_config_network(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "network"
- obj.source_protocol = "iscsi"
- obj.source_name = "foo.bar.com"
- obj.driver_name = "qemu"
- obj.driver_format = "qcow2"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="network" device="disk">
- <driver name="qemu" type="qcow2"/>
- <source name="foo.bar.com" protocol="iscsi"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>""")
-
- def test_config_network_parse(self):
- xml = """<disk type="network" device="disk">
- <driver name="qemu" type="qcow2"/>
- <source name="foo.bar.com" protocol="iscsi"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'network')
- self.assertEqual(obj.source_protocol, 'iscsi')
- self.assertEqual(obj.source_name, 'foo.bar.com')
- self.assertEqual(obj.driver_name, 'qemu')
- self.assertEqual(obj.driver_format, 'qcow2')
- self.assertEqual(obj.target_dev, '/dev/hda')
- self.assertEqual(obj.target_bus, 'ide')
-
- def test_config_network_no_name(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = 'network'
- obj.source_protocol = 'nbd'
- obj.source_hosts = ['foo.bar.com']
- obj.source_ports = [None]
- obj.driver_name = 'qemu'
- obj.driver_format = 'raw'
- obj.target_dev = '/dev/vda'
- obj.target_bus = 'virtio'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="network" device="disk">
- <driver name="qemu" type="raw"/>
- <source protocol="nbd">
- <host name="foo.bar.com"/>
- </source>
- <target bus="virtio" dev="/dev/vda"/>
- </disk>""")
-
- def test_config_network_multihost(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = 'network'
- obj.source_protocol = 'rbd'
- obj.source_name = 'pool/image'
- obj.source_hosts = ['foo.bar.com', '::1', '1.2.3.4']
- obj.source_ports = [None, '123', '456']
- obj.driver_name = 'qemu'
- obj.driver_format = 'raw'
- obj.target_dev = '/dev/vda'
- obj.target_bus = 'virtio'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="network" device="disk">
- <driver name="qemu" type="raw"/>
- <source name="pool/image" protocol="rbd">
- <host name="foo.bar.com"/>
- <host name="::1" port="123"/>
- <host name="1.2.3.4" port="456"/>
- </source>
- <target bus="virtio" dev="/dev/vda"/>
- </disk>""")
-
- def test_config_network_auth(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "network"
- obj.source_protocol = "rbd"
- obj.source_name = "pool/image"
- obj.driver_name = "qemu"
- obj.driver_format = "raw"
- obj.target_dev = "/dev/vda"
- obj.target_bus = "virtio"
- obj.auth_username = "foo"
- obj.auth_secret_type = "ceph"
- obj.auth_secret_uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="network" device="disk">
- <driver name="qemu" type="raw"/>
- <source name="pool/image" protocol="rbd"/>
- <auth username="foo">
- <secret type="ceph"
- uuid="b38a3f43-4be2-4046-897f-b67c2f5e0147"/>
- </auth>
- <target bus="virtio" dev="/dev/vda"/>
- </disk>""")
-
- def test_config_iotune(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "file"
- obj.source_path = "/tmp/hello"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
- obj.disk_read_bytes_sec = 1024000
- obj.disk_read_iops_sec = 1000
- obj.disk_total_bytes_sec = 2048000
- obj.disk_write_bytes_sec = 1024000
- obj.disk_write_iops_sec = 1000
- obj.disk_total_iops_sec = 2000
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- <iotune>
- <read_bytes_sec>1024000</read_bytes_sec>
- <read_iops_sec>1000</read_iops_sec>
- <write_bytes_sec>1024000</write_bytes_sec>
- <write_iops_sec>1000</write_iops_sec>
- <total_bytes_sec>2048000</total_bytes_sec>
- <total_iops_sec>2000</total_iops_sec>
- </iotune>
- </disk>""")
-
- def test_config_blockio(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "file"
- obj.source_path = "/tmp/hello"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
- obj.logical_block_size = "4096"
- obj.physical_block_size = "4096"
-
- xml = obj.to_xml()
- self.assertXmlEqual("""
- <disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- <blockio logical_block_size="4096" physical_block_size="4096"/>
- </disk>""", xml)
-
-
-class LibvirtConfigGuestSnapshotDiskTest(LibvirtConfigBaseTest):
-
- def test_config_file(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "file"
- obj.source_path = "/tmp/hello"
- obj.target_dev = "/dev/hda"
- obj.target_bus = "ide"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>""")
-
- def test_config_file_parse(self):
- xml = """<disk type="file" device="disk">
- <source file="/tmp/hello"/>
- <target bus="ide" dev="/dev/hda"/>
- </disk>"""
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDisk()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'file')
- self.assertEqual(obj.source_path, '/tmp/hello')
- self.assertEqual(obj.target_dev, '/dev/hda')
- self.assertEqual(obj.target_bus, 'ide')
-
-
-class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
-
- def test_config_file_parse(self):
- xml = """<backingStore type='file'>
- <driver name='qemu' type='qcow2'/>
- <source file='/var/lib/libvirt/images/mid.qcow2'/>
- <backingStore type='file'>
- <driver name='qemu' type='qcow2'/>
- <source file='/var/lib/libvirt/images/base.qcow2'/>
- <backingStore/>
- </backingStore>
- </backingStore>
- """
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDiskBackingStore()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.driver_name, 'qemu')
- self.assertEqual(obj.driver_format, 'qcow2')
- self.assertEqual(obj.source_type, 'file')
- self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
- self.assertEqual(obj.backing_store.driver_name, 'qemu')
- self.assertEqual(obj.backing_store.source_type, 'file')
- self.assertEqual(obj.backing_store.source_file,
- '/var/lib/libvirt/images/base.qcow2')
- self.assertIsNone(obj.backing_store.backing_store)
-
- def test_config_network_parse(self):
- xml = """<backingStore type='network' index='1'>
- <format type='qcow2'/>
- <source protocol='gluster' name='volume1/img1'>
- <host name='host1' port='24007'/>
- </source>
- <backingStore type='network' index='2'>
- <format type='qcow2'/>
- <source protocol='gluster' name='volume1/img2'>
- <host name='host1' port='24007'/>
- </source>
- <backingStore/>
- </backingStore>
- </backingStore>
- """
- xmldoc = etree.fromstring(xml)
-
- obj = config.LibvirtConfigGuestDiskBackingStore()
- obj.parse_dom(xmldoc)
-
- self.assertEqual(obj.source_type, 'network')
- self.assertEqual(obj.source_protocol, 'gluster')
- self.assertEqual(obj.source_name, 'volume1/img1')
- self.assertEqual(obj.source_hosts[0], 'host1')
- self.assertEqual(obj.source_ports[0], '24007')
- self.assertEqual(obj.index, '1')
- self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
- self.assertEqual(obj.backing_store.index, '2')
- self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
- self.assertEqual(obj.backing_store.source_ports[0], '24007')
- self.assertIsNone(obj.backing_store.backing_store)
-
-
-class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
-
- def test_config_mount(self):
- obj = config.LibvirtConfigGuestFilesys()
- obj.source_type = "mount"
- obj.source_dir = "/tmp/hello"
- obj.target_dir = "/mnt"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <filesystem type="mount">
- <source dir="/tmp/hello"/>
- <target dir="/mnt"/>
- </filesystem>""")
-
-
-class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
-
- def test_config_tablet(self):
- obj = config.LibvirtConfigGuestInput()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <input type="tablet" bus="usb"/>""")
-
-
-class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
-
- def test_config_graphics(self):
- obj = config.LibvirtConfigGuestGraphics()
- obj.type = "vnc"
- obj.autoport = True
- obj.keymap = "en_US"
- obj.listen = "127.0.0.1"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
- """)
-
-
-class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
-
- def test_config_pci_guest_host_dev(self):
- obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
- xml = obj.to_xml()
- expected = """
- <hostdev mode="subsystem" type="pci" managed="yes"/>
- """
- self.assertXmlEqual(xml, expected)
-
- def test_parse_GuestHostdev(self):
- xmldoc = """<hostdev mode="subsystem" type="pci" managed="yes"/>"""
- obj = config.LibvirtConfigGuestHostdev()
- obj.parse_str(xmldoc)
- self.assertEqual(obj.mode, 'subsystem')
- self.assertEqual(obj.type, 'pci')
- self.assertEqual(obj.managed, 'yes')
-
- def test_parse_GuestHostdev_non_pci(self):
- xmldoc = """<hostdev mode="subsystem" type="usb" managed="no"/>"""
- obj = config.LibvirtConfigGuestHostdev()
- obj.parse_str(xmldoc)
- self.assertEqual(obj.mode, 'subsystem')
- self.assertEqual(obj.type, 'usb')
- self.assertEqual(obj.managed, 'no')
-
-
-class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
-
- expected = """
- <hostdev mode="subsystem" type="pci" managed="yes">
- <source>
- <address bus="0x11" domain="0x1234" function="0x3"
- slot="0x22" />
- </source>
- </hostdev>
- """
-
- def test_config_guest_hosdev_pci(self):
- hostdev = config.LibvirtConfigGuestHostdevPCI()
- hostdev.domain = "1234"
- hostdev.bus = "11"
- hostdev.slot = "22"
- hostdev.function = "3"
- xml = hostdev.to_xml()
- self.assertXmlEqual(self.expected, xml)
-
- def test_parse_guest_hosdev_pci(self):
- xmldoc = self.expected
- obj = config.LibvirtConfigGuestHostdevPCI()
- obj.parse_str(xmldoc)
- self.assertEqual(obj.mode, 'subsystem')
- self.assertEqual(obj.type, 'pci')
- self.assertEqual(obj.managed, 'yes')
- self.assertEqual(obj.domain, '0x1234')
- self.assertEqual(obj.bus, '0x11')
- self.assertEqual(obj.slot, '0x22')
- self.assertEqual(obj.function, '0x3')
-
- def test_parse_guest_hosdev_usb(self):
- xmldoc = """<hostdev mode='subsystem' type='usb'>
- <source startupPolicy='optional'>
- <vendor id='0x1234'/>
- <product id='0xbeef'/>
- </source>
- <boot order='2'/>
- </hostdev>"""
- obj = config.LibvirtConfigGuestHostdevPCI()
- obj.parse_str(xmldoc)
- self.assertEqual(obj.mode, 'subsystem')
- self.assertEqual(obj.type, 'usb')
-
-
-class LibvirtConfigGuestSerialTest(LibvirtConfigBaseTest):
-
- def test_config_file(self):
- obj = config.LibvirtConfigGuestSerial()
- obj.type = "file"
- obj.source_path = "/tmp/vm.log"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <serial type="file">
- <source path="/tmp/vm.log"/>
- </serial>""")
-
- def test_config_serial_port(self):
- obj = config.LibvirtConfigGuestSerial()
- obj.type = "tcp"
- obj.listen_port = 11111
- obj.listen_host = "0.0.0.0"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <serial type="tcp">
- <source host="0.0.0.0" service="11111" mode="bind"/>
- </serial>""")
-
-
-class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
- def test_config_pty(self):
- obj = config.LibvirtConfigGuestConsole()
- obj.type = "pty"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <console type="pty"/>""")
-
-
-class LibvirtConfigGuestChannelTest(LibvirtConfigBaseTest):
- def test_config_spice_minimal(self):
- obj = config.LibvirtConfigGuestChannel()
- obj.type = "spicevmc"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <channel type="spicevmc">
- <target type='virtio'/>
- </channel>""")
-
- def test_config_spice_full(self):
- obj = config.LibvirtConfigGuestChannel()
- obj.type = "spicevmc"
- obj.target_name = "com.redhat.spice.0"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <channel type="spicevmc">
- <target type='virtio' name='com.redhat.spice.0'/>
- </channel>""")
-
- def test_config_qga_full(self):
- obj = config.LibvirtConfigGuestChannel()
- obj.type = "unix"
- obj.target_name = "org.qemu.guest_agent.0"
- obj.source_path = "/var/lib/libvirt/qemu/%s.%s.sock" % (
- obj.target_name, "instance-name")
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <channel type="unix">
- <source path="%s" mode="bind"/>
- <target type="virtio" name="org.qemu.guest_agent.0"/>
- </channel>""" % obj.source_path)
-
-
-class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
- def test_config_ethernet(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "ethernet"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.model = "virtio"
- obj.target_dev = "vnet0"
- obj.driver_name = "vhost"
- obj.vif_inbound_average = 1024000
- obj.vif_inbound_peak = 10240000
- obj.vif_inbound_burst = 1024000
- obj.vif_outbound_average = 1024000
- obj.vif_outbound_peak = 10240000
- obj.vif_outbound_burst = 1024000
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="ethernet">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <model type="virtio"/>
- <driver name="vhost"/>
- <target dev="vnet0"/>
- <bandwidth>
- <inbound average="1024000" peak="10240000" burst="1024000"/>
- <outbound average="1024000" peak="10240000" burst="1024000"/>
- </bandwidth>
- </interface>""")
-
- def test_config_bridge(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "bridge"
- obj.source_dev = "br0"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.model = "virtio"
- obj.target_dev = "tap12345678"
- obj.filtername = "clean-traffic"
- obj.filterparams.append({"key": "IP", "value": "192.168.122.1"})
- obj.vif_inbound_average = 1024000
- obj.vif_inbound_peak = 10240000
- obj.vif_inbound_burst = 1024000
- obj.vif_outbound_average = 1024000
- obj.vif_outbound_peak = 10240000
- obj.vif_outbound_burst = 1024000
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="bridge">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <model type="virtio"/>
- <source bridge="br0"/>
- <target dev="tap12345678"/>
- <filterref filter="clean-traffic">
- <parameter name="IP" value="192.168.122.1"/>
- </filterref>
- <bandwidth>
- <inbound average="1024000" peak="10240000" burst="1024000"/>
- <outbound average="1024000" peak="10240000" burst="1024000"/>
- </bandwidth>
- </interface>""")
-
- def test_config_bridge_ovs(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "bridge"
- obj.source_dev = "br0"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.model = "virtio"
- obj.target_dev = "tap12345678"
- obj.vporttype = "openvswitch"
- obj.vportparams.append({"key": "instanceid", "value": "foobar"})
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="bridge">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <model type="virtio"/>
- <source bridge="br0"/>
- <target dev="tap12345678"/>
- <virtualport type="openvswitch">
- <parameters instanceid="foobar"/>
- </virtualport>
- </interface>""")
-
- def test_config_8021Qbh(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "direct"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.model = "virtio"
- obj.target_dev = "tap12345678"
- obj.source_dev = "eth0"
- obj.vporttype = "802.1Qbh"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="direct">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <model type="virtio"/>
- <source dev="eth0" mode="private"/>
- <target dev="tap12345678"/>
- <virtualport type="802.1Qbh"/>
- </interface>""")
-
- def test_config_direct(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "direct"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.model = "virtio"
- obj.source_dev = "eth0"
- obj.source_mode = "passthrough"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="direct">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <model type="virtio"/>
- <source dev="eth0" mode="passthrough"/>
- </interface>""")
-
- def test_config_8021Qbh_hostdev(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "hostdev"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.source_dev = "0000:0a:00.1"
- obj.vporttype = "802.1Qbh"
- obj.add_vport_param("profileid", "MyPortProfile")
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="hostdev" managed="yes">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <source>
- <address type="pci" domain="0x0000"
- bus="0x0a" slot="0x00" function="0x1"/>
- </source>
- <virtualport type="802.1Qbh">
- <parameters profileid="MyPortProfile"/>
- </virtualport>
- </interface>""")
-
- def test_config_hw_veb_hostdev(self):
- obj = config.LibvirtConfigGuestInterface()
- obj.net_type = "hostdev"
- obj.mac_addr = "DE:AD:BE:EF:CA:FE"
- obj.source_dev = "0000:0a:00.1"
- obj.vlan = "100"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <interface type="hostdev" managed="yes">
- <mac address="DE:AD:BE:EF:CA:FE"/>
- <source>
- <address type="pci" domain="0x0000"
- bus="0x0a" slot="0x00" function="0x1"/>
- </source>
- <vlan>
- <tag id="100"/>
- </vlan>
- </interface>""")
-
-
-class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
-
- def test_config_lxc(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "lxc"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.cpuset = set([0, 1, 3, 4, 5])
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "exe"
- obj.os_init_path = "/sbin/init"
-
- fs = config.LibvirtConfigGuestFilesys()
- fs.source_dir = "/root/lxc"
- fs.target_dir = "/"
-
- obj.add_device(fs)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domain type="lxc">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <vcpu cpuset="0-1,3-5">2</vcpu>
- <os>
- <type>exe</type>
- <init>/sbin/init</init>
- </os>
- <devices>
- <filesystem type="mount">
- <source dir="/root/lxc"/>
- <target dir="/"/>
- </filesystem>
- </devices>
- </domain>""")
-
- def test_config_lxc_with_idmap(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "lxc"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.cpuset = set([0, 1, 3, 4, 5])
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "exe"
- obj.os_init_path = "/sbin/init"
-
- uidmap = config.LibvirtConfigGuestUIDMap()
- uidmap.target = "10000"
- uidmap.count = "1"
- obj.idmaps.append(uidmap)
- gidmap = config.LibvirtConfigGuestGIDMap()
- gidmap.target = "10000"
- gidmap.count = "1"
- obj.idmaps.append(gidmap)
-
- fs = config.LibvirtConfigGuestFilesys()
- fs.source_dir = "/root/lxc"
- fs.target_dir = "/"
-
- obj.add_device(fs)
-
- xml = obj.to_xml()
- self.assertXmlEqual("""
- <domain type="lxc">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <vcpu cpuset="0-1,3-5">2</vcpu>
- <os>
- <type>exe</type>
- <init>/sbin/init</init>
- </os>
- <devices>
- <filesystem type="mount">
- <source dir="/root/lxc"/>
- <target dir="/"/>
- </filesystem>
- </devices>
- <idmap>
- <uid start="0" target="10000" count="1"/>
- <gid start="0" target="10000" count="1"/>
- </idmap>
- </domain>""", xml)
-
- def test_config_xen_pv(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "xen"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.cpuset = set([0, 1, 3, 4, 5])
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "linux"
- obj.os_kernel = "/tmp/vmlinuz"
- obj.os_initrd = "/tmp/ramdisk"
- obj.os_cmdline = "console=xvc0"
-
- disk = config.LibvirtConfigGuestDisk()
- disk.source_type = "file"
- disk.source_path = "/tmp/img"
- disk.target_dev = "/dev/xvda"
- disk.target_bus = "xen"
-
- obj.add_device(disk)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domain type="xen">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <vcpu cpuset="0-1,3-5">2</vcpu>
- <os>
- <type>linux</type>
- <kernel>/tmp/vmlinuz</kernel>
- <initrd>/tmp/ramdisk</initrd>
- <cmdline>console=xvc0</cmdline>
- </os>
- <devices>
- <disk type="file" device="disk">
- <source file="/tmp/img"/>
- <target bus="xen" dev="/dev/xvda"/>
- </disk>
- </devices>
- </domain>""")
-
- def test_config_xen_hvm(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "xen"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.cpuset = set([0, 1, 3, 4, 5])
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "hvm"
- obj.os_loader = '/usr/lib/xen/boot/hvmloader'
- obj.os_root = "root=xvda"
- obj.os_cmdline = "console=xvc0"
- obj.pae = True
- obj.acpi = True
- obj.apic = True
-
- disk = config.LibvirtConfigGuestDisk()
- disk.source_type = "file"
- disk.source_path = "/tmp/img"
- disk.target_dev = "/dev/xvda"
- disk.target_bus = "xen"
-
- obj.add_device(disk)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domain type="xen">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <vcpu cpuset="0-1,3-5">2</vcpu>
- <os>
- <type>hvm</type>
- <loader>/usr/lib/xen/boot/hvmloader</loader>
- <cmdline>console=xvc0</cmdline>
- <root>root=xvda</root>
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <devices>
- <disk type="file" device="disk">
- <source file="/tmp/img"/>
- <target bus="xen" dev="/dev/xvda"/>
- </disk>
- </devices>
- </domain>""")
-
- def test_config_kvm(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "kvm"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.cpuset = set([0, 1, 3, 4, 5])
-
- obj.cputune = config.LibvirtConfigGuestCPUTune()
- obj.cputune.shares = 100
- obj.cputune.quota = 50000
- obj.cputune.period = 25000
-
- obj.membacking = config.LibvirtConfigGuestMemoryBacking()
- obj.membacking.hugepages = True
-
- obj.memtune = config.LibvirtConfigGuestMemoryTune()
- obj.memtune.hard_limit = 496
- obj.memtune.soft_limit = 672
- obj.memtune.swap_hard_limit = 1638
- obj.memtune.min_guarantee = 2970
-
- obj.numatune = config.LibvirtConfigGuestNUMATune()
-
- numamemory = config.LibvirtConfigGuestNUMATuneMemory()
- numamemory.mode = "preferred"
- numamemory.nodeset = [0, 1, 2, 3, 8]
-
- obj.numatune.memory = numamemory
-
- numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode0.cellid = 0
- numamemnode0.mode = "preferred"
- numamemnode0.nodeset = [0, 1]
-
- numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode1.cellid = 1
- numamemnode1.mode = "preferred"
- numamemnode1.nodeset = [2, 3]
-
- numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode2.cellid = 2
- numamemnode2.mode = "preferred"
- numamemnode2.nodeset = [8]
-
- obj.numatune.memnodes.extend([numamemnode0,
- numamemnode1,
- numamemnode2])
-
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "linux"
- obj.os_boot_dev = ["hd", "cdrom", "fd"]
- obj.os_smbios = config.LibvirtConfigGuestSMBIOS()
- obj.pae = True
- obj.acpi = True
- obj.apic = True
-
- obj.sysinfo = config.LibvirtConfigGuestSysinfo()
- obj.sysinfo.bios_vendor = "Acme"
- obj.sysinfo.system_version = "1.0.0"
-
- disk = config.LibvirtConfigGuestDisk()
- disk.source_type = "file"
- disk.source_path = "/tmp/img"
- disk.target_dev = "/dev/vda"
- disk.target_bus = "virtio"
-
- obj.add_device(disk)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domain type="kvm">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <memoryBacking>
- <hugepages/>
- </memoryBacking>
- <memtune>
- <hard_limit units="K">496</hard_limit>
- <soft_limit units="K">672</soft_limit>
- <swap_hard_limit units="K">1638</swap_hard_limit>
- <min_guarantee units="K">2970</min_guarantee>
- </memtune>
- <numatune>
- <memory mode="preferred" nodeset="0-3,8"/>
- <memnode cellid="0" mode="preferred" nodeset="0-1"/>
- <memnode cellid="1" mode="preferred" nodeset="2-3"/>
- <memnode cellid="2" mode="preferred" nodeset="8"/>
- </numatune>
- <vcpu cpuset="0-1,3-5">2</vcpu>
- <sysinfo type='smbios'>
- <bios>
- <entry name="vendor">Acme</entry>
- </bios>
- <system>
- <entry name="version">1.0.0</entry>
- </system>
- </sysinfo>
- <os>
- <type>linux</type>
- <boot dev="hd"/>
- <boot dev="cdrom"/>
- <boot dev="fd"/>
- <smbios mode="sysinfo"/>
- </os>
- <features>
- <acpi/>
- <apic/>
- <pae/>
- </features>
- <cputune>
- <shares>100</shares>
- <quota>50000</quota>
- <period>25000</period>
- </cputune>
- <devices>
- <disk type="file" device="disk">
- <source file="/tmp/img"/>
- <target bus="virtio" dev="/dev/vda"/>
- </disk>
- </devices>
- </domain>""")
-
- def test_config_machine_type(self):
- obj = config.LibvirtConfigGuest()
- obj.virt_type = "kvm"
- obj.memory = 100 * units.Mi
- obj.vcpus = 2
- obj.name = "demo"
- obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
- obj.os_type = "hvm"
- obj.os_mach_type = "fake_machine_type"
- xml = obj.to_xml()
-
- self.assertXmlEqual(xml, """
- <domain type="kvm">
- <uuid>b38a3f43-4be2-4046-897f-b67c2f5e0147</uuid>
- <name>demo</name>
- <memory>104857600</memory>
- <vcpu>2</vcpu>
- <os>
- <type machine="fake_machine_type">hvm</type>
- </os>
- </domain>""")
-
- def test_ConfigGuest_parse_devices(self):
- xmldoc = """ <domain type="kvm">
- <devices>
- <hostdev mode="subsystem" type="pci" managed="no">
- </hostdev>
- </devices>
- </domain>
- """
- obj = config.LibvirtConfigGuest()
- obj.parse_str(xmldoc)
- self.assertEqual(len(obj.devices), 1)
- self.assertIsInstance(obj.devices[0],
- config.LibvirtConfigGuestHostdevPCI)
- self.assertEqual(obj.devices[0].mode, 'subsystem')
- self.assertEqual(obj.devices[0].managed, 'no')
-
- def test_ConfigGuest_parse_devices_wrong_type(self):
- xmldoc = """ <domain type="kvm">
- <devices>
- <hostdev mode="subsystem" type="xxxx" managed="no">
- </hostdev>
- </devices>
- </domain>
- """
- obj = config.LibvirtConfigGuest()
- obj.parse_str(xmldoc)
- self.assertEqual(len(obj.devices), 0)
-
- def test_ConfigGuest_parese_cpu(self):
- xmldoc = """ <domain>
- <cpu mode='custom' match='exact'>
- <model>kvm64</model>
- </cpu>
- </domain>
- """
- obj = config.LibvirtConfigGuest()
- obj.parse_str(xmldoc)
-
- self.assertEqual(obj.cpu.mode, 'custom')
- self.assertEqual(obj.cpu.match, 'exact')
- self.assertEqual(obj.cpu.model, 'kvm64')
-
-
-class LibvirtConfigGuestSnapshotTest(LibvirtConfigBaseTest):
-
- def test_config_snapshot(self):
- obj = config.LibvirtConfigGuestSnapshot()
- obj.name = "Demo"
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domainsnapshot>
- <name>Demo</name>
- <disks/>
- </domainsnapshot>""")
-
- def test_config_snapshot_with_disks(self):
- obj = config.LibvirtConfigGuestSnapshot()
- obj.name = "Demo"
-
- disk = config.LibvirtConfigGuestSnapshotDisk()
- disk.name = 'vda'
- disk.source_path = 'source-path'
- disk.source_type = 'file'
- disk.snapshot = 'external'
- disk.driver_name = 'qcow2'
- obj.add_disk(disk)
-
- disk2 = config.LibvirtConfigGuestSnapshotDisk()
- disk2.name = 'vdb'
- disk2.snapshot = 'no'
- obj.add_disk(disk2)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domainsnapshot>
- <name>Demo</name>
- <disks>
- <disk name='vda' snapshot='external' type='file'>
- <source file='source-path'/>
- </disk>
- <disk name='vdb' snapshot='no'/>
- </disks>
- </domainsnapshot>""")
-
- def test_config_snapshot_with_network_disks(self):
- obj = config.LibvirtConfigGuestSnapshot()
- obj.name = "Demo"
-
- disk = config.LibvirtConfigGuestSnapshotDisk()
- disk.name = 'vda'
- disk.source_name = 'source-file'
- disk.source_type = 'network'
- disk.source_hosts = ['host1']
- disk.source_ports = ['12345']
- disk.source_protocol = 'glusterfs'
- disk.snapshot = 'external'
- disk.driver_name = 'qcow2'
- obj.add_disk(disk)
-
- disk2 = config.LibvirtConfigGuestSnapshotDisk()
- disk2.name = 'vdb'
- disk2.snapshot = 'no'
- obj.add_disk(disk2)
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <domainsnapshot>
- <name>Demo</name>
- <disks>
- <disk name='vda' snapshot='external' type='network'>
- <source protocol='glusterfs' name='source-file'>
- <host name='host1' port='12345'/>
- </source>
- </disk>
- <disk name='vdb' snapshot='no'/>
- </disks>
- </domainsnapshot>""")
-
-
-class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
-
- def test_config_virt_usb_device(self):
- xmlin = """
- <device>
- <name>usb_0000_09_00_0</name>
- <parent>pci_0000_00_1c_0</parent>
- <driver>
- <name>vxge</name>
- </driver>
- <capability type="usb">
- <domain>0</domain>
- <capability type="fake_usb">
- <address fake_usb="fake"/>
- </capability>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsNone(obj.pci_capability)
-
- def test_config_virt_device(self):
- xmlin = """
- <device>
- <name>pci_0000_09_00_0</name>
- <parent>pci_0000_00_1c_0</parent>
- <driver>
- <name>vxge</name>
- </driver>
- <capability type="pci">
- <domain>0</domain>
- <bus>9</bus>
- <slot>0</slot>
- <function>0</function>
- <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
- <vendor id="0x17d5">Neterion Inc.</vendor>
- <capability type="virt_functions">
- <address domain="0x0000" bus="0x0a" slot="0x00" function="0x1"/>
- <address domain="0x0000" bus="0x0a" slot="0x00" function="0x2"/>
- <address domain="0x0000" bus="0x0a" slot="0x00" function="0x3"/>
- </capability>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.pci_capability,
- config.LibvirtConfigNodeDevicePciCap)
- self.assertIsInstance(obj.pci_capability.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
- self.assertEqual(obj.pci_capability.fun_capability[0].type,
- "virt_functions")
- self.assertEqual(len(obj.pci_capability.fun_capability[0].
- device_addrs),
- 3)
- self.assertEqual(obj.pci_capability.bus, 9)
-
- def test_config_phy_device(self):
- xmlin = """
- <device>
- <name>pci_0000_33_00_0</name>
- <parent>pci_0000_22_1c_0</parent>
- <driver>
- <name>vxx</name>
- </driver>
- <capability type="pci">
- <domain>0</domain>
- <bus>9</bus>
- <slot>0</slot>
- <function>0</function>
- <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
- <vendor id="0x17d5">Neterion Inc.</vendor>
- <capability type="phys_function">
- <address domain='0x0000' bus='0x09' slot='0x00' function='0x0'/>
- </capability>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.pci_capability,
- config.LibvirtConfigNodeDevicePciCap)
- self.assertIsInstance(obj.pci_capability.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
- self.assertEqual(obj.pci_capability.fun_capability[0].type,
- "phys_function")
- self.assertEqual(len(obj.pci_capability.fun_capability[0].
- device_addrs),
- 1)
-
- def test_config_non_device(self):
- xmlin = """
- <device>
- <name>pci_0000_33_00_0</name>
- <parent>pci_0000_22_1c_0</parent>
- <driver>
- <name>vxx</name>
- </driver>
- <capability type="pci">
- <domain>0</domain>
- <bus>9</bus>
- <slot>0</slot>
- <function>0</function>
- <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
- <vendor id="0x17d5">Neterion Inc.</vendor>
- <capability type="virt_functions"/>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.pci_capability,
- config.LibvirtConfigNodeDevicePciCap)
- self.assertIsInstance(obj.pci_capability.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
- self.assertEqual(obj.pci_capability.fun_capability[0].type,
- "virt_functions")
-
- def test_config_fail_device(self):
- xmlin = """
- <device>
- <name>pci_0000_33_00_0</name>
- <parent>pci_0000_22_1c_0</parent>
- <driver>
- <name>vxx</name>
- </driver>
- <capability type="pci">
- <domain>0</domain>
- <bus>9</bus>
- <slot>0</slot>
- <function>0</function>
- <product id="0x5833">X3100 Series 10 Gigabit Ethernet PCIe</product>
- <vendor id="0x17d5">Neterion Inc.</vendor>
- <capability type="virt_functions">
- </capability>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.pci_capability,
- config.LibvirtConfigNodeDevicePciCap)
- self.assertIsInstance(obj.pci_capability.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
- self.assertEqual(obj.pci_capability.fun_capability[0].type,
- "virt_functions")
-
- def test_config_2cap_device(self):
- xmlin = """
- <device>
- <name>pci_0000_04_10_7</name>
- <parent>pci_0000_00_01_1</parent>
- <driver>
- <name>igbvf</name>
- </driver>
- <capability type='pci'>
- <domain>0</domain>
- <bus>4</bus>
- <slot>16</slot>
- <function>7</function>
- <product id='0x1520'>I350 Ethernet Controller Virtual</product>
- <vendor id='0x8086'>Intel Corporation</vendor>
- <capability type='phys_function'>
- <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
- </capability>
- <capability type='virt_functions'>
- <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
- </capability>
- </capability>
- </device>"""
-
- obj = config.LibvirtConfigNodeDevice()
- obj.parse_str(xmlin)
-
- self.assertIsInstance(obj.pci_capability,
- config.LibvirtConfigNodeDevicePciCap)
- self.assertIsInstance(obj.pci_capability.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
- self.assertEqual(obj.pci_capability.fun_capability[0].type,
- "phys_function")
- self.assertEqual(obj.pci_capability.fun_capability[1].type,
- "virt_functions")
-
-
-class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
-
- def test_config_device_pci_cap(self):
- xmlin = """
- <capability type="pci">
- <domain>0</domain>
- <bus>10</bus>
- <slot>1</slot>
- <function>5</function>
- <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
- <vendor id="0x8086">Intel Inc.</vendor>
- <capability type="virt_functions">
- <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
- <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
- </capability>
- </capability>"""
- obj = config.LibvirtConfigNodeDevicePciCap()
- obj.parse_str(xmlin)
-
- self.assertEqual(obj.domain, 0)
- self.assertEqual(obj.bus, 10)
- self.assertEqual(obj.slot, 1)
- self.assertEqual(obj.function, 5)
- self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
- self.assertEqual(obj.product_id, 0x10bd)
- self.assertEqual(obj.vendor, "Intel Inc.")
- self.assertEqual(obj.vendor_id, 0x8086)
- self.assertIsInstance(obj.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
-
- self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
- self.assertEqual(obj.fun_capability[0].device_addrs,
- [(0, 10, 1, 1),
- (1, 10, 2, 3), ])
-
- def test_config_device_pci_2cap(self):
- xmlin = """
- <capability type="pci">
- <domain>0</domain>
- <bus>10</bus>
- <slot>1</slot>
- <function>5</function>
- <product id="0x10bd">Intel 10 Gigabit Ethernet</product>
- <vendor id="0x8086">Intel Inc.</vendor>
- <capability type="virt_functions">
- <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
- <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
- </capability>
- <capability type="phys_function">
- <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
- </capability>
- </capability>"""
- obj = config.LibvirtConfigNodeDevicePciCap()
- obj.parse_str(xmlin)
-
- self.assertEqual(obj.domain, 0)
- self.assertEqual(obj.bus, 10)
- self.assertEqual(obj.slot, 1)
- self.assertEqual(obj.function, 5)
- self.assertEqual(obj.product, "Intel 10 Gigabit Ethernet")
- self.assertEqual(obj.product_id, 0x10bd)
- self.assertEqual(obj.vendor, "Intel Inc.")
- self.assertEqual(obj.vendor_id, 0x8086)
- self.assertIsInstance(obj.fun_capability[0],
- config.LibvirtConfigNodeDevicePciSubFunctionCap)
-
- self.assertEqual(obj.fun_capability[0].type, 'virt_functions')
- self.assertEqual(obj.fun_capability[0].device_addrs,
- [(0, 10, 1, 1),
- (1, 10, 2, 3), ])
- self.assertEqual(obj.fun_capability[1].type, 'phys_function')
- self.assertEqual(obj.fun_capability[1].device_addrs,
- [(0, 10, 1, 1), ])
-
- def test_config_read_only_disk(self):
- obj = config.LibvirtConfigGuestDisk()
- obj.source_type = "disk"
- obj.source_device = "disk"
- obj.driver_name = "kvm"
- obj.target_dev = "/dev/hdc"
- obj.target_bus = "virtio"
- obj.readonly = True
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="disk" device="disk">
- <driver name="kvm"/>
- <target bus="virtio" dev="/dev/hdc"/>
- <readonly/>
- </disk>""")
-
- obj.readonly = False
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <disk type="disk" device="disk">
- <driver name="kvm"/>
- <target bus="virtio" dev="/dev/hdc"/>
- </disk>""")
-
-
-class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
-
- def test_config_device_pci_subfunction(self):
- xmlin = """
- <capability type="virt_functions">
- <address domain="0000" bus="0x0a" slot="0x1" function="0x1"/>
- <address domain="0001" bus="0x0a" slot="0x02" function="0x03"/>
- </capability>"""
- fun_capability = config.LibvirtConfigNodeDevicePciSubFunctionCap()
- fun_capability.parse_str(xmlin)
- self.assertEqual('virt_functions', fun_capability.type)
- self.assertEqual([(0, 10, 1, 1),
- (1, 10, 2, 3)],
- fun_capability.device_addrs)
-
-
-class LibvirtConfigGuestVideoTest(LibvirtConfigBaseTest):
-
- def test_config_video_driver(self):
- obj = config.LibvirtConfigGuestVideo()
- obj.type = 'qxl'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <video>
- <model type='qxl'/>
- </video>""")
-
- def test_config_video_driver_vram_heads(self):
- obj = config.LibvirtConfigGuestVideo()
- obj.type = 'qxl'
- obj.vram = '9216'
- obj.heads = '1'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <video>
- <model type='qxl' vram='9216' heads='1'/>
- </video>""")
-
-
-class LibvirtConfigGuestSeclabel(LibvirtConfigBaseTest):
-
- def test_config_seclabel_config(self):
- obj = config.LibvirtConfigSeclabel()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <seclabel type='dynamic'/>""")
-
- def test_config_seclabel_baselabel(self):
- obj = config.LibvirtConfigSeclabel()
- obj.type = 'dynamic'
- obj.baselabel = 'system_u:system_r:my_svirt_t:s0'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <seclabel type='dynamic'>
- <baselabel>system_u:system_r:my_svirt_t:s0</baselabel>
- </seclabel>""")
-
-
-class LibvirtConfigGuestRngTest(LibvirtConfigBaseTest):
-
- def test_config_rng_driver(self):
- obj = config.LibvirtConfigGuestRng()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
-<rng model='virtio'>
- <backend model='random'/>
-</rng>""")
-
- def test_config_rng_driver_with_rate(self):
- obj = config.LibvirtConfigGuestRng()
- obj.backend = '/dev/random'
- obj.rate_period = '12'
- obj.rate_bytes = '34'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
-<rng model='virtio'>
- <rate period='12' bytes='34'/>
- <backend model='random'>/dev/random</backend>
-</rng>""")
-
-
-class LibvirtConfigGuestControllerTest(LibvirtConfigBaseTest):
-
- def test_config_guest_contoller(self):
- obj = config.LibvirtConfigGuestController()
- obj.type = 'scsi'
- obj.index = 0
- obj.model = 'virtio-scsi'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <controller type='scsi' index='0' model='virtio-scsi'/>""")
-
-
-class LibvirtConfigGuestWatchdogTest(LibvirtConfigBaseTest):
- def test_config_watchdog(self):
- obj = config.LibvirtConfigGuestWatchdog()
- obj.action = 'none'
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='none'/>")
-
- def test_config_watchdog_default_action(self):
- obj = config.LibvirtConfigGuestWatchdog()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, "<watchdog model='i6300esb' action='reset'/>")
-
-
-class LibvirtConfigGuestCPUTuneTest(LibvirtConfigBaseTest):
-
- def test_config_cputune_timeslice(self):
- cputune = config.LibvirtConfigGuestCPUTune()
- cputune.shares = 100
- cputune.quota = 50000
- cputune.period = 25000
-
- xml = cputune.to_xml()
- self.assertXmlEqual(xml, """
- <cputune>
- <shares>100</shares>
- <quota>50000</quota>
- <period>25000</period>
- </cputune>""")
-
- def test_config_cputune_vcpus(self):
- cputune = config.LibvirtConfigGuestCPUTune()
-
- vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
- vcpu0.id = 0
- vcpu0.cpuset = set([0, 1])
- vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
- vcpu1.id = 1
- vcpu1.cpuset = set([2, 3])
- vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
- vcpu2.id = 2
- vcpu2.cpuset = set([4, 5])
- vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
- vcpu3.id = 3
- vcpu3.cpuset = set([6, 7])
- cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
-
- xml = cputune.to_xml()
- self.assertXmlEqual(xml, """
- <cputune>
- <vcpupin vcpu="0" cpuset="0-1"/>
- <vcpupin vcpu="1" cpuset="2-3"/>
- <vcpupin vcpu="2" cpuset="4-5"/>
- <vcpupin vcpu="3" cpuset="6-7"/>
- </cputune>""")
-
-
-class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
- def test_config_memory_backing_none(self):
- obj = config.LibvirtConfigGuestMemoryBacking()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, "<memoryBacking/>")
-
- def test_config_memory_backing_all(self):
- obj = config.LibvirtConfigGuestMemoryBacking()
- obj.locked = True
- obj.sharedpages = False
- obj.hugepages = True
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <memoryBacking>
- <hugepages/>
- <nosharedpages/>
- <locked/>
- </memoryBacking>""")
-
-
-class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
- def test_config_memory_backing_none(self):
- obj = config.LibvirtConfigGuestMemoryTune()
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, "<memtune/>")
-
- def test_config_memory_backing_all(self):
- obj = config.LibvirtConfigGuestMemoryTune()
- obj.soft_limit = 6
- obj.hard_limit = 28
- obj.swap_hard_limit = 140
- obj.min_guarantee = 270
-
- xml = obj.to_xml()
- self.assertXmlEqual(xml, """
- <memtune>
- <hard_limit units="K">28</hard_limit>
- <soft_limit units="K">6</soft_limit>
- <swap_hard_limit units="K">140</swap_hard_limit>
- <min_guarantee units="K">270</min_guarantee>
- </memtune>""")
-
-
-class LibvirtConfigGuestNUMATuneTest(LibvirtConfigBaseTest):
- def test_config_numa_tune_none(self):
- obj = config.LibvirtConfigGuestNUMATune()
-
- xml = obj.to_xml()
- self.assertXmlEqual("<numatune/>", xml)
-
- def test_config_numa_tune_memory(self):
- obj = config.LibvirtConfigGuestNUMATune()
-
- numamemory = config.LibvirtConfigGuestNUMATuneMemory()
- numamemory.nodeset = [0, 1, 2, 3, 8]
-
- obj.memory = numamemory
-
- xml = obj.to_xml()
- self.assertXmlEqual("""
- <numatune>
- <memory mode="strict" nodeset="0-3,8"/>
- </numatune>""", xml)
-
- def test_config_numa_tune_memnodes(self):
- obj = config.LibvirtConfigGuestNUMATune()
-
- numamemnode0 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode0.cellid = 0
- numamemnode0.nodeset = [0, 1]
-
- numamemnode1 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode1.cellid = 1
- numamemnode1.nodeset = [2, 3]
-
- numamemnode2 = config.LibvirtConfigGuestNUMATuneMemNode()
- numamemnode2.cellid = 2
- numamemnode2.nodeset = [8]
-
- obj.memnodes.extend([numamemnode0,
- numamemnode1,
- numamemnode2])
-
- xml = obj.to_xml()
- self.assertXmlEqual("""
- <numatune>
- <memnode cellid="0" mode="strict" nodeset="0-1"/>
- <memnode cellid="1" mode="strict" nodeset="2-3"/>
- <memnode cellid="2" mode="strict" nodeset="8"/>
- </numatune>""", xml)
-
-
-class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
-
- def test_config_metadata(self):
- meta = config.LibvirtConfigGuestMetaNovaInstance()
- meta.package = "2014.2.3"
- meta.name = "moonbuggy"
- meta.creationTime = 1234567890
- meta.roottype = "image"
- meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
-
- owner = config.LibvirtConfigGuestMetaNovaOwner()
- owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
- owner.username = "buzz"
- owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
- owner.projectname = "moonshot"
-
- meta.owner = owner
-
- flavor = config.LibvirtConfigGuestMetaNovaFlavor()
- flavor.name = "m1.lowgravity"
- flavor.vcpus = 8
- flavor.memory = 2048
- flavor.swap = 10
- flavor.disk = 50
- flavor.ephemeral = 10
-
- meta.flavor = flavor
-
- xml = meta.to_xml()
- self.assertXmlEqual(xml, """
- <nova:instance xmlns:nova='http://openstack.org/xmlns/libvirt/nova/1.0'>
- <nova:package version="2014.2.3"/>
- <nova:name>moonbuggy</nova:name>
- <nova:creationTime>2009-02-13 23:31:30</nova:creationTime>
- <nova:flavor name="m1.lowgravity">
- <nova:memory>2048</nova:memory>
- <nova:disk>50</nova:disk>
- <nova:swap>10</nova:swap>
- <nova:ephemeral>10</nova:ephemeral>
- <nova:vcpus>8</nova:vcpus>
- </nova:flavor>
- <nova:owner>
- <nova:user
- uuid="3472c2a6-de91-4fb5-b618-42bc781ef670">buzz</nova:user>
- <nova:project
- uuid="f241e906-010e-4917-ae81-53f4fb8aa021">moonshot</nova:project>
- </nova:owner>
- <nova:root type="image" uuid="fe55c69a-8b2e-4bbc-811a-9ad2023a0426"/>
- </nova:instance>
- """)
-
-
-class LibvirtConfigGuestIDMap(LibvirtConfigBaseTest):
- def test_config_id_map_parse_start_not_int(self):
- xmlin = "<uid start='a' target='20000' count='5'/>"
- obj = config.LibvirtConfigGuestIDMap()
-
- self.assertRaises(ValueError, obj.parse_str, xmlin)
-
- def test_config_id_map_parse_target_not_int(self):
- xmlin = "<uid start='2' target='a' count='5'/>"
- obj = config.LibvirtConfigGuestIDMap()
-
- self.assertRaises(ValueError, obj.parse_str, xmlin)
-
- def test_config_id_map_parse_count_not_int(self):
- xmlin = "<uid start='2' target='20000' count='a'/>"
- obj = config.LibvirtConfigGuestIDMap()
-
- self.assertRaises(ValueError, obj.parse_str, xmlin)
-
- def test_config_uid_map(self):
- obj = config.LibvirtConfigGuestUIDMap()
- obj.start = 1
- obj.target = 10000
- obj.count = 2
-
- xml = obj.to_xml()
- self.assertXmlEqual("<uid start='1' target='10000' count='2'/>", xml)
-
- def test_config_uid_map_parse(self):
- xmlin = "<uid start='2' target='20000' count='5'/>"
- obj = config.LibvirtConfigGuestUIDMap()
- obj.parse_str(xmlin)
-
- self.assertEqual(2, obj.start)
- self.assertEqual(20000, obj.target)
- self.assertEqual(5, obj.count)
-
- def test_config_gid_map(self):
- obj = config.LibvirtConfigGuestGIDMap()
- obj.start = 1
- obj.target = 10000
- obj.count = 2
-
- xml = obj.to_xml()
- self.assertXmlEqual("<gid start='1' target='10000' count='2'/>", xml)
-
- def test_config_gid_map_parse(self):
- xmlin = "<gid start='2' target='20000' count='5'/>"
- obj = config.LibvirtConfigGuestGIDMap()
- obj.parse_str(xmlin)
-
- self.assertEqual(2, obj.start)
- self.assertEqual(20000, obj.target)
- self.assertEqual(5, obj.count)
-
-
-class LibvirtConfigMemoryBalloonTest(LibvirtConfigBaseTest):
-
- def test_config_memory_balloon_period(self):
- balloon = config.LibvirtConfigMemoryBalloon()
- balloon.model = 'fake_virtio'
- balloon.period = 11
-
- xml = balloon.to_xml()
- expected_xml = """
- <memballoon model='fake_virtio'>
- <stats period='11'/>
- </memballoon>"""
-
- self.assertXmlEqual(expected_xml, xml)
diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py
deleted file mode 100644
index 505ad6d786..0000000000
--- a/nova/tests/virt/libvirt/test_driver.py
+++ /dev/null
@@ -1,12576 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# Copyright 2012 University Of Minho
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import __builtin__
-import contextlib
-import copy
-import datetime
-import errno
-import os
-import random
-import re
-import shutil
-import threading
-import time
-import uuid
-
-import eventlet
-from eventlet import greenthread
-import fixtures
-from lxml import etree
-import mock
-import mox
-from oslo.concurrency import lockutils
-from oslo.concurrency import processutils
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import encodeutils
-from oslo.utils import importutils
-from oslo.utils import timeutils
-from oslo.utils import units
-import six
-
-from nova.api.metadata import base as instance_metadata
-from nova.compute import arch
-from nova.compute import manager
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_mode
-from nova.compute import vm_states
-from nova import context
-from nova import db
-from nova import exception
-from nova.network import model as network_model
-from nova import objects
-from nova.openstack.common import fileutils
-from nova.openstack.common import loopingcall
-from nova.openstack.common import uuidutils
-from nova.pci import manager as pci_manager
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests import fake_instance
-from nova.tests import fake_network
-import nova.tests.image.fake
-from nova.tests import matchers
-from nova.tests.objects import test_pci_device
-from nova.tests.virt.libvirt import fake_imagebackend
-from nova.tests.virt.libvirt import fake_libvirt_utils
-from nova.tests.virt.libvirt import fakelibvirt
-from nova import utils
-from nova import version
-from nova.virt import block_device as driver_block_device
-from nova.virt import configdrive
-from nova.virt.disk import api as disk
-from nova.virt import driver
-from nova.virt import event as virtevent
-from nova.virt import fake
-from nova.virt import firewall as base_firewall
-from nova.virt import hardware
-from nova.virt import images
-from nova.virt.libvirt import blockinfo
-from nova.virt.libvirt import config as vconfig
-from nova.virt.libvirt import driver as libvirt_driver
-from nova.virt.libvirt import firewall
-from nova.virt.libvirt import imagebackend
-from nova.virt.libvirt import rbd_utils
-from nova.virt.libvirt import utils as libvirt_utils
-
-try:
- import libvirt
-except ImportError:
- libvirt = fakelibvirt
-libvirt_driver.libvirt = libvirt
-
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('my_ip', 'nova.netconf')
-CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
-CONF.import_opt('instances_path', 'nova.compute.manager')
-
-_fake_network_info = fake_network.fake_get_instance_nw_info
-
-_fake_NodeDevXml = \
- {"pci_0000_04_00_3": """
- <device>
- <name>pci_0000_04_00_3</name>
- <parent>pci_0000_00_01_1</parent>
- <driver>
- <name>igb</name>
- </driver>
- <capability type='pci'>
- <domain>0</domain>
- <bus>4</bus>
- <slot>0</slot>
- <function>3</function>
- <product id='0x1521'>I350 Gigabit Network Connection</product>
- <vendor id='0x8086'>Intel Corporation</vendor>
- <capability type='virt_functions'>
- <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
- <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
- <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
- <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
- </capability>
- </capability>
- </device>""",
- "pci_0000_04_10_7": """
- <device>
- <name>pci_0000_04_10_7</name>
- <parent>pci_0000_00_01_1</parent>
- <driver>
- <name>igbvf</name>
- </driver>
- <capability type='pci'>
- <domain>0</domain>
- <bus>4</bus>
- <slot>16</slot>
- <function>7</function>
- <product id='0x1520'>I350 Ethernet Controller Virtual Function</product>
- <vendor id='0x8086'>Intel Corporation</vendor>
- <capability type='phys_function'>
- <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
- </capability>
- <capability type='virt_functions'>
- </capability>
- </capability>
- </device>"""}
-
-
-def _concurrency(signal, wait, done, target, is_block_dev=False):
- signal.send()
- wait.wait()
- done.send()
-
-
-class FakeVirDomainSnapshot(object):
-
- def __init__(self, dom=None):
- self.dom = dom
-
- def delete(self, flags):
- pass
-
-
-class FakeVirtDomain(object):
-
- def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
- if uuidstr is None:
- uuidstr = str(uuid.uuid4())
- self.uuidstr = uuidstr
- self.id = id
- self.domname = name
- self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
- None, None]
- if fake_xml:
- self._fake_dom_xml = fake_xml
- else:
- self._fake_dom_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- </devices>
- </domain>
- """
-
- def name(self):
- if self.domname is None:
- return "fake-domain %s" % self
- else:
- return self.domname
-
- def ID(self):
- return self.id
-
- def info(self):
- return self._info
-
- def create(self):
- pass
-
- def managedSave(self, *args):
- pass
-
- def createWithFlags(self, launch_flags):
- pass
-
- def XMLDesc(self, *args):
- return self._fake_dom_xml
-
- def UUIDString(self):
- return self.uuidstr
-
- def attachDeviceFlags(self, xml, flags):
- pass
-
- def attachDevice(self, xml):
- pass
-
- def detachDeviceFlags(self, xml, flags):
- pass
-
- def snapshotCreateXML(self, xml, flags):
- pass
-
- def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
- pass
-
- def blockRebase(self, disk, base, bandwidth=0, flags=0):
- pass
-
- def blockJobInfo(self, path, flags):
- pass
-
- def resume(self):
- pass
-
- def destroy(self):
- pass
-
-
-class CacheConcurrencyTestCase(test.NoDBTestCase):
- def setUp(self):
- super(CacheConcurrencyTestCase, self).setUp()
-
- self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
-
- # utils.synchronized() will create the lock_path for us if it
- # doesn't already exist. It will also delete it when it's done,
- # which can cause race conditions with the multiple threads we
- # use for tests. So, create the path here so utils.synchronized()
- # won't delete it out from under one of the threads.
- self.lock_path = os.path.join(CONF.instances_path, 'locks')
- fileutils.ensure_tree(self.lock_path)
-
- def fake_exists(fname):
- basedir = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name)
- if fname == basedir or fname == self.lock_path:
- return True
- return False
-
- def fake_execute(*args, **kwargs):
- pass
-
- def fake_extend(image, size, use_cow=False):
- pass
-
- self.stubs.Set(os.path, 'exists', fake_exists)
- self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.imagebackend.libvirt_utils',
- fake_libvirt_utils))
-
- def test_same_fname_concurrency(self):
- # Ensures that the same fname cache runs at a sequentially.
- uuid = uuidutils.generate_uuid()
-
- backend = imagebackend.Backend(False)
- wait1 = eventlet.event.Event()
- done1 = eventlet.event.Event()
- sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image({'name': 'instance',
- 'uuid': uuid},
- 'name').cache,
- _concurrency, 'fname', None,
- signal=sig1, wait=wait1, done=done1)
- eventlet.sleep(0)
- # Thread 1 should run before thread 2.
- sig1.wait()
-
- wait2 = eventlet.event.Event()
- done2 = eventlet.event.Event()
- sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image({'name': 'instance',
- 'uuid': uuid},
- 'name').cache,
- _concurrency, 'fname', None,
- signal=sig2, wait=wait2, done=done2)
-
- wait2.send()
- eventlet.sleep(0)
- try:
- self.assertFalse(done2.ready())
- finally:
- wait1.send()
- done1.wait()
- eventlet.sleep(0)
- self.assertTrue(done2.ready())
- # Wait on greenthreads to assert they didn't raise exceptions
- # during execution
- thr1.wait()
- thr2.wait()
-
- def test_different_fname_concurrency(self):
- # Ensures that two different fname caches are concurrent.
- uuid = uuidutils.generate_uuid()
-
- backend = imagebackend.Backend(False)
- wait1 = eventlet.event.Event()
- done1 = eventlet.event.Event()
- sig1 = eventlet.event.Event()
- thr1 = eventlet.spawn(backend.image({'name': 'instance',
- 'uuid': uuid},
- 'name').cache,
- _concurrency, 'fname2', None,
- signal=sig1, wait=wait1, done=done1)
- eventlet.sleep(0)
- # Thread 1 should run before thread 2.
- sig1.wait()
-
- wait2 = eventlet.event.Event()
- done2 = eventlet.event.Event()
- sig2 = eventlet.event.Event()
- thr2 = eventlet.spawn(backend.image({'name': 'instance',
- 'uuid': uuid},
- 'name').cache,
- _concurrency, 'fname1', None,
- signal=sig2, wait=wait2, done=done2)
- eventlet.sleep(0)
- # Wait for thread 2 to start.
- sig2.wait()
-
- wait2.send()
- tries = 0
- while not done2.ready() and tries < 10:
- eventlet.sleep(0)
- tries += 1
- try:
- self.assertTrue(done2.ready())
- finally:
- wait1.send()
- eventlet.sleep(0)
- # Wait on greenthreads to assert they didn't raise exceptions
- # during execution
- thr1.wait()
- thr2.wait()
-
-
-class FakeVolumeDriver(object):
- def __init__(self, *args, **kwargs):
- pass
-
- def attach_volume(self, *args):
- pass
-
- def detach_volume(self, *args):
- pass
-
- def get_xml(self, *args):
- return ""
-
- def get_config(self, *args):
- """Connect the volume to a fake device."""
- conf = vconfig.LibvirtConfigGuestDisk()
- conf.source_type = "network"
- conf.source_protocol = "fake"
- conf.source_name = "fake"
- conf.target_dev = "fake"
- conf.target_bus = "fake"
- return conf
-
- def connect_volume(self, *args):
- """Connect the volume to a fake device."""
- return self.get_config()
-
-
-class FakeConfigGuestDisk(object):
- def __init__(self, *args, **kwargs):
- self.source_type = None
- self.driver_cache = None
-
-
-class FakeConfigGuest(object):
- def __init__(self, *args, **kwargs):
- self.driver_cache = None
-
-
-class FakeNodeDevice(object):
- def __init__(self, fakexml):
- self.xml = fakexml
-
- def XMLDesc(self, *args):
- return self.xml
-
-
-class LibvirtConnTestCase(test.NoDBTestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(LibvirtConnTestCase, self).setUp()
- self.flags(fake_call=True)
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.get_admin_context()
- temp_dir = self.useFixture(fixtures.TempDir()).path
- self.flags(instances_path=temp_dir)
- self.flags(snapshots_directory=temp_dir, group='libvirt')
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.driver.libvirt_utils',
- fake_libvirt_utils))
- # Force libvirt to return a host UUID that matches the serial in
- # nova.tests.fakelibvirt. This is necessary because the host UUID
- # returned by libvirt becomes the serial whose value is checked for in
- # test_xml_and_uri_* below.
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid',
- lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
- # Prevent test suite trying to find /etc/machine-id
- # which isn't guaranteed to exist. Instead it will use
- # the host UUID from libvirt which we mock above
- self.flags(sysinfo_serial="hardware", group="libvirt")
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.imagebackend.libvirt_utils',
- fake_libvirt_utils))
-
- def fake_extend(image, size, use_cow=False):
- pass
-
- self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
-
- self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
- imagebackend.Image._get_driver_format)
-
- class FakeConn():
- def baselineCPU(self, cpu, flag):
- """Add new libvirt API."""
- return """<cpu mode='custom' match='exact'>
- <model fallback='allow'>Westmere</model>
- <vendor>Intel</vendor>
- <feature policy='require' name='aes'/>
- <feature policy='require' name='hypervisor'/>
- </cpu>"""
-
- def getCapabilities(self):
- """Ensure standard capabilities being returned."""
- return """<capabilities>
- <host><cpu><arch>x86_64</arch>
- <feature policy='require' name='hypervisor'/>
- </cpu></host>
- </capabilities>"""
-
- def getVersion(self):
- return 1005001
-
- def getLibVersion(self):
- return (0 * 1000 * 1000) + (9 * 1000) + 11
-
- def domainEventRegisterAny(self, *args, **kwargs):
- pass
-
- def registerCloseCallback(self, cb, opaque):
- pass
-
- def nwfilterDefineXML(self, *args, **kwargs):
- pass
-
- def nodeDeviceLookupByName(self, x):
- pass
-
- def listDevices(self, cap, flags):
- return []
-
- def lookupByName(self, name):
- pass
-
- def getHostname(self):
- return "mustard"
-
- def getType(self):
- return "QEMU"
-
- def numOfDomains(self):
- return 0
-
- def listDomainsID(self):
- return []
-
- def listDefinedDomains(self):
- return []
-
- def getInfo(self):
- return [arch.X86_64, 123456, 2, 2000,
- 2, 1, 1, 1]
-
- self.conn = FakeConn()
- self.stubs.Set(libvirt_driver.LibvirtDriver, '_connect',
- lambda *a, **k: self.conn)
-
- sys_meta = {
- 'instance_type_memory_mb': 2048,
- 'instance_type_swap': 0,
- 'instance_type_vcpu_weight': None,
- 'instance_type_root_gb': 1,
- 'instance_type_id': 2,
- 'instance_type_name': u'm1.small',
- 'instance_type_ephemeral_gb': 0,
- 'instance_type_rxtx_factor': 1.0,
- 'instance_type_flavorid': u'1',
- 'instance_type_vcpus': 1
- }
-
- self.image_service = nova.tests.image.fake.stub_out_image_service(
- self.stubs)
- self.test_instance = {
- 'id': 1,
- 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
- 'memory_kb': '1024000',
- 'basepath': '/some/path',
- 'bridge_name': 'br100',
- 'display_name': "Acme webserver",
- 'vcpus': 2,
- 'project_id': 'fake',
- 'bridge': 'br101',
- 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
- 'root_gb': 10,
- 'ephemeral_gb': 20,
- 'instance_type_id': '5', # m1.small
- 'extra_specs': {},
- 'system_metadata': sys_meta,
- 'pci_devices': objects.PciDeviceList(),
- 'numa_topology': None,
- 'config_drive': None,
- 'vm_mode': None,
- 'kernel_id': None,
- 'ramdisk_id': None,
- 'os_type': 'linux',
- 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
- 'ephemeral_key_uuid': None,
- }
-
- def relpath(self, path):
- return os.path.relpath(path, CONF.instances_path)
-
- def tearDown(self):
- nova.tests.image.fake.FakeImageService_reset()
- super(LibvirtConnTestCase, self).tearDown()
-
- def create_fake_libvirt_mock(self, **kwargs):
- """Defining mocks for LibvirtDriver(libvirt is not used)."""
-
- # A fake libvirt.virConnect
- class FakeLibvirtDriver(object):
- def defineXML(self, xml):
- return FakeVirtDomain()
-
- # Creating mocks
- volume_driver = ('iscsi=nova.tests.virt.libvirt.test_driver'
- '.FakeVolumeDriver')
- self.flags(volume_drivers=[volume_driver],
- group='libvirt')
- fake = FakeLibvirtDriver()
- # Customizing above fake if necessary
- for key, val in kwargs.items():
- fake.__setattr__(key, val)
-
- self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
-
- def fake_lookup(self, instance_name):
- return FakeVirtDomain()
-
- def fake_execute(self, *args, **kwargs):
- open(args[-1], "a").close()
-
- def _create_service(self, **kwargs):
- service_ref = {'host': kwargs.get('host', 'dummy'),
- 'disabled': kwargs.get('disabled', False),
- 'binary': 'nova-compute',
- 'topic': 'compute',
- 'report_count': 0}
-
- return objects.Service(**service_ref)
-
- def _get_launch_flags(self, conn, network_info, power_on=True,
- vifs_already_plugged=False):
- timeout = CONF.vif_plugging_timeout
-
- events = []
- if (conn._conn_supports_start_paused and
- utils.is_neutron() and
- not vifs_already_plugged and
- power_on and timeout):
- events = conn._get_neutron_events(network_info)
-
- launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
-
- return launch_flags
-
- def test_public_api_signatures(self):
- baseinst = driver.ComputeDriver(None)
- inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertPublicAPISignatures(baseinst, inst)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
- def test_min_version_start_ok(self, mock_version):
- mock_version.return_value = True
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- drvr.init_host("dummyhost")
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_min_version")
- def test_min_version_start_abort(self, mock_version):
- mock_version.return_value = False
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertRaises(exception.NovaException,
- drvr.init_host,
- "dummyhost")
-
- @mock.patch.object(objects.Service, 'get_by_compute_host')
- def test_set_host_enabled_with_disable(self, mock_svc):
- # Tests disabling an enabled host.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- svc = self._create_service(host='fake-mini')
- mock_svc.return_value = svc
- conn._set_host_enabled(False)
- self.assertTrue(svc.disabled)
-
- @mock.patch.object(objects.Service, 'get_by_compute_host')
- def test_set_host_enabled_with_enable(self, mock_svc):
- # Tests enabling a disabled host.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- svc = self._create_service(disabled=True, host='fake-mini')
- mock_svc.return_value = svc
- conn._set_host_enabled(True)
- self.assertTrue(svc.disabled)
-
- @mock.patch.object(objects.Service, 'get_by_compute_host')
- def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
- # Tests enabling an enabled host.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- svc = self._create_service(disabled=False, host='fake-mini')
- mock_svc.return_value = svc
- conn._set_host_enabled(True)
- self.assertFalse(svc.disabled)
-
- @mock.patch.object(objects.Service, 'get_by_compute_host')
- def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
- # Tests disabling a disabled host.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- svc = self._create_service(disabled=True, host='fake-mini')
- mock_svc.return_value = svc
- conn._set_host_enabled(False)
- self.assertTrue(svc.disabled)
-
- def test_set_host_enabled_swallows_exceptions(self):
- # Tests that set_host_enabled will swallow exceptions coming from the
- # db_api code so they don't break anything calling it, e.g. the
- # _get_new_connection method.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
- # Make db.service_get_by_compute_host raise NovaException; this
- # is more robust than just raising ComputeHostNotFound.
- db_mock.side_effect = exception.NovaException
- conn._set_host_enabled(False)
-
- def test_prepare_pci_device(self):
-
- pci_devices = [dict(hypervisor_name='xxx')]
-
- self.flags(virt_type='xen', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- class FakeDev():
- def attach(self):
- pass
-
- def dettach(self):
- pass
-
- def reset(self):
- pass
-
- self.mox.StubOutWithMock(self.conn, 'nodeDeviceLookupByName')
- self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
- self.conn.nodeDeviceLookupByName('xxx').AndReturn(FakeDev())
- self.mox.ReplayAll()
- conn._prepare_pci_devices_for_use(pci_devices)
-
- def test_prepare_pci_device_exception(self):
-
- pci_devices = [dict(hypervisor_name='xxx',
- id='id1',
- instance_uuid='uuid')]
-
- self.flags(virt_type='xen', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- class FakeDev():
-
- def attach(self):
- pass
-
- def dettach(self):
- raise libvirt.libvirtError("xxxxx")
-
- def reset(self):
- pass
-
- self.stubs.Set(self.conn, 'nodeDeviceLookupByName',
- lambda x: FakeDev())
- self.assertRaises(exception.PciDevicePrepareFailed,
- conn._prepare_pci_devices_for_use, pci_devices)
-
- def test_detach_pci_devices_exception(self):
-
- pci_devices = [dict(hypervisor_name='xxx',
- id='id1',
- instance_uuid='uuid')]
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_has_min_version')
- libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: False
-
- self.assertRaises(exception.PciDeviceDetachFailed,
- conn._detach_pci_devices, None, pci_devices)
-
- def test_detach_pci_devices(self):
-
- fake_domXML1 =\
- """<domain> <devices>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2' cache='none'/>
- <source file='xxx'/>
- <target dev='vda' bus='virtio'/>
- <alias name='virtio-disk0'/>
- <address type='pci' domain='0x0000' bus='0x00'
- slot='0x04' function='0x0'/>
- </disk>
- <hostdev mode="subsystem" type="pci" managed="yes">
- <source>
- <address function="0x1" slot="0x10" domain="0x0000"
- bus="0x04"/>
- </source>
- </hostdev></devices></domain>"""
-
- pci_devices = [dict(hypervisor_name='xxx',
- id='id1',
- instance_uuid='uuid',
- address="0001:04:10:1")]
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_has_min_version')
- libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_get_guest_pci_device')
-
- class FakeDev():
- def to_xml(self):
- pass
-
- libvirt_driver.LibvirtDriver._get_guest_pci_device =\
- lambda x, y: FakeDev()
-
- class FakeDomain():
- def detachDeviceFlags(self, xml, flag):
- pci_devices[0]['hypervisor_name'] = 'marked'
- pass
-
- def XMLDesc(self, flag):
- return fake_domXML1
-
- conn._detach_pci_devices(FakeDomain(), pci_devices)
- self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
-
- def test_detach_pci_devices_timeout(self):
-
- fake_domXML1 =\
- """<domain>
- <devices>
- <hostdev mode="subsystem" type="pci" managed="yes">
- <source>
- <address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
- </source>
- </hostdev>
- </devices>
- </domain>"""
-
- pci_devices = [dict(hypervisor_name='xxx',
- id='id1',
- instance_uuid='uuid',
- address="0000:04:10:1")]
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_has_min_version')
- libvirt_driver.LibvirtDriver._has_min_version = lambda x, y: True
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_get_guest_pci_device')
-
- class FakeDev():
- def to_xml(self):
- pass
-
- libvirt_driver.LibvirtDriver._get_guest_pci_device =\
- lambda x, y: FakeDev()
-
- class FakeDomain():
- def detachDeviceFlags(self, xml, flag):
- pass
-
- def XMLDesc(self, flag):
- return fake_domXML1
- self.assertRaises(exception.PciDeviceDetachFailed,
- conn._detach_pci_devices, FakeDomain(), pci_devices)
-
- def test_get_connector(self):
- initiator = 'fake.initiator.iqn'
- ip = 'fakeip'
- host = 'fakehost'
- wwpns = ['100010604b019419']
- wwnns = ['200010604b019419']
- self.flags(my_ip=ip)
- self.flags(host=host)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- expected = {
- 'ip': ip,
- 'initiator': initiator,
- 'host': host,
- 'wwpns': wwpns,
- 'wwnns': wwnns
- }
- volume = {
- 'id': 'fake'
- }
- result = conn.get_volume_connector(volume)
- self.assertThat(expected, matchers.DictMatches(result))
-
- def test_lifecycle_event_registration(self):
- calls = []
-
- def fake_registerErrorHandler(*args, **kwargs):
- calls.append('fake_registerErrorHandler')
-
- def fake_get_host_capabilities(**args):
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.arch = arch.ARMV7
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = cpu
- calls.append('fake_get_host_capabilities')
- return caps
-
- @mock.patch.object(libvirt, 'registerErrorHandler',
- side_effect=fake_registerErrorHandler)
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- '_get_host_capabilities',
- side_effect=fake_get_host_capabilities)
- def test_init_host(get_host_capabilities, register_error_handler):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- conn.init_host("test_host")
-
- test_init_host()
- # NOTE(dkliban): Will fail if get_host_capabilities is called before
- # registerErrorHandler
- self.assertEqual(['fake_registerErrorHandler',
- 'fake_get_host_capabilities'], calls)
-
- @mock.patch.object(libvirt_driver, 'LOG')
- def test_connect_auth_cb_exception(self, log_mock):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- creds = dict(authname='nova', password='verybadpass')
- self.assertRaises(exception.NovaException,
- conn._connect_auth_cb, creds, False)
- self.assertEqual(0, len(log_mock.method_calls),
- 'LOG should not be used in _connect_auth_cb.')
-
- def test_sanitize_log_to_xml(self):
- # setup fake data
- data = {'auth_password': 'scrubme'}
- bdm = [{'connection_info': {'data': data}}]
- bdi = {'block_device_mapping': bdm}
-
- # Tests that the parameters to the _get_guest_xml method
- # are sanitized for passwords when logged.
- def fake_debug(*args, **kwargs):
- if 'auth_password' in args[0]:
- self.assertNotIn('scrubme', args[0])
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- conf = mock.Mock()
- with contextlib.nested(
- mock.patch.object(libvirt_driver.LOG, 'debug',
- side_effect=fake_debug),
- mock.patch.object(conn, '_get_guest_config', return_value=conf)
- ) as (
- debug_mock, conf_mock
- ):
- conn._get_guest_xml(self.context, self.test_instance,
- network_info={}, disk_info={},
- image_meta={}, block_device_info=bdi)
- # we don't care what the log message is, we just want to make sure
- # our stub method is called which asserts the password is scrubbed
- self.assertTrue(debug_mock.called)
-
- def test_close_callback(self):
- self.close_callback = None
-
- def set_close_callback(cb, opaque):
- self.close_callback = cb
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- service_mock = mock.MagicMock()
- service_mock.disabled.return_value = False
- with contextlib.nested(
- mock.patch.object(conn, "_connect", return_value=self.conn),
- mock.patch.object(self.conn, "registerCloseCallback",
- side_effect=set_close_callback),
- mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock)):
-
- # verify that the driver registers for the close callback
- # and re-connects after receiving the callback
- conn._get_connection()
- self.assertFalse(service_mock.disabled)
- self.assertTrue(self.close_callback)
- conn._init_events_pipe()
- self.close_callback(self.conn, 1, None)
- conn._dispatch_events()
-
- self.assertTrue(service_mock.disabled)
- conn._get_connection()
-
- def test_close_callback_bad_signature(self):
- '''Validates that a connection to libvirt exist,
- even when registerCloseCallback method has a different
- number of arguments in the libvirt python library.
- '''
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- service_mock = mock.MagicMock()
- service_mock.disabled.return_value = False
- with contextlib.nested(
- mock.patch.object(conn, "_connect", return_value=self.conn),
- mock.patch.object(self.conn, "registerCloseCallback",
- side_effect=TypeError('dd')),
- mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock)):
-
- connection = conn._get_connection()
- self.assertTrue(connection)
-
- def test_close_callback_not_defined(self):
- '''Validates that a connection to libvirt exist,
- even when registerCloseCallback method missing from
- the libvirt python library.
- '''
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- service_mock = mock.MagicMock()
- service_mock.disabled.return_value = False
- with contextlib.nested(
- mock.patch.object(conn, "_connect", return_value=self.conn),
- mock.patch.object(self.conn, "registerCloseCallback",
- side_effect=AttributeError('dd')),
- mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock)):
-
- connection = conn._get_connection()
- self.assertTrue(connection)
-
- def test_cpu_features_bug_1217630(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- # Test old version of libvirt, it shouldn't see the `aes' feature
- with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
- del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
- caps = conn._get_host_capabilities()
- self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
-
- # Test new verion of libvirt, should find the `aes' feature
- with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
- mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
- # Cleanup the capabilities cache firstly
- conn._caps = None
- caps = conn._get_host_capabilities()
- self.assertIn('aes', [x.name for x in caps.host.cpu.features])
-
- def test_cpu_features_are_not_duplicated(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- # Test old version of libvirt. Should return single 'hypervisor'
- with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
- del mock_libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
- caps = conn._get_host_capabilities()
- cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
- self.assertEqual(1, cnt)
-
- # Test new version of libvirt. Should still return single 'hypervisor'
- with mock.patch('nova.virt.libvirt.driver.libvirt') as mock_libvirt:
- mock_libvirt['VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'] = 1
- # Cleanup the capabilities cache firstly
- conn._caps = None
- caps = conn._get_host_capabilities()
- cnt = [x.name for x in caps.host.cpu.features].count('hypervisor')
- self.assertEqual(1, cnt)
-
- def test_baseline_cpu_not_supported(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- # `mock` has trouble stubbing attributes that don't exist yet, so
- # fallback to plain-Python attribute setting/deleting
- cap_str = 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'
- if not hasattr(libvirt_driver.libvirt, cap_str):
- setattr(libvirt_driver.libvirt, cap_str, True)
- self.addCleanup(delattr, libvirt_driver.libvirt, cap_str)
-
- # Handle just the NO_SUPPORT error
- not_supported_exc = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- 'this function is not supported by the connection driver:'
- ' virConnectBaselineCPU',
- error_code=libvirt.VIR_ERR_NO_SUPPORT)
-
- with mock.patch.object(conn._conn, 'baselineCPU',
- side_effect=not_supported_exc):
- caps = conn._get_host_capabilities()
- self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
- self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
-
- # Clear cached result so we can test again...
- conn._caps = None
-
- # Other errors should not be caught
- other_exc = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- 'other exc',
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
-
- with mock.patch.object(conn._conn, 'baselineCPU',
- side_effect=other_exc):
- self.assertRaises(libvirt.libvirtError,
- conn._get_host_capabilities)
-
- def test_lxc_get_host_capabilities_failed(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- with mock.patch.object(conn._conn, 'baselineCPU', return_value=-1):
- setattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES', 1)
- caps = conn._get_host_capabilities()
- delattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
- self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
- self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(time, "time")
- def test_get_guest_config(self, time_mock, mock_flavor):
- time_mock.return_value = 1234567.89
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["display_name"] = "purple tomatoes"
-
- ctxt = context.RequestContext(project_id=123,
- project_name="aubergine",
- user_id=456,
- user_name="pie")
-
- flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
- vcpus=28,
- root_gb=496,
- ephemeral_gb=8128,
- swap=33550336,
- extra_specs={})
- instance_ref = objects.Instance(**test_instance)
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info,
- context=ctxt)
-
- self.assertEqual(cfg.uuid, instance_ref["uuid"])
- self.assertEqual(cfg.pae, False)
- self.assertEqual(cfg.acpi, True)
- self.assertEqual(cfg.apic, True)
- self.assertEqual(cfg.memory, 6 * units.Ki)
- self.assertEqual(cfg.vcpus, 28)
- self.assertEqual(cfg.os_type, vm_mode.HVM)
- self.assertEqual(cfg.os_boot_dev, ["hd"])
- self.assertIsNone(cfg.os_root)
- self.assertEqual(len(cfg.devices), 9)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigMemoryBalloon)
- self.assertEqual(len(cfg.metadata), 1)
- self.assertIsInstance(cfg.metadata[0],
- vconfig.LibvirtConfigGuestMetaNovaInstance)
- self.assertEqual(version.version_string_with_package(),
- cfg.metadata[0].package)
- self.assertEqual("purple tomatoes",
- cfg.metadata[0].name)
- self.assertEqual(1234567.89,
- cfg.metadata[0].creationTime)
- self.assertEqual("image",
- cfg.metadata[0].roottype)
- self.assertEqual(str(instance_ref["image_ref"]),
- cfg.metadata[0].rootid)
-
- self.assertIsInstance(cfg.metadata[0].owner,
- vconfig.LibvirtConfigGuestMetaNovaOwner)
- self.assertEqual(456,
- cfg.metadata[0].owner.userid)
- self.assertEqual("pie",
- cfg.metadata[0].owner.username)
- self.assertEqual(123,
- cfg.metadata[0].owner.projectid)
- self.assertEqual("aubergine",
- cfg.metadata[0].owner.projectname)
-
- self.assertIsInstance(cfg.metadata[0].flavor,
- vconfig.LibvirtConfigGuestMetaNovaFlavor)
- self.assertEqual("m1.small",
- cfg.metadata[0].flavor.name)
- self.assertEqual(6,
- cfg.metadata[0].flavor.memory)
- self.assertEqual(28,
- cfg.metadata[0].flavor.vcpus)
- self.assertEqual(496,
- cfg.metadata[0].flavor.disk)
- self.assertEqual(8128,
- cfg.metadata[0].flavor.ephemeral)
- self.assertEqual(33550336,
- cfg.metadata[0].flavor.swap)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_lxc(self, mock_flavor):
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- None, {'mapping': {}})
- self.assertEqual(instance_ref["uuid"], cfg.uuid)
- self.assertEqual(2 * units.Mi, cfg.memory)
- self.assertEqual(1, cfg.vcpus)
- self.assertEqual(vm_mode.EXE, cfg.os_type)
- self.assertEqual("/sbin/init", cfg.os_init_path)
- self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
- self.assertIsNone(cfg.os_root)
- self.assertEqual(3, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestFilesys)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestConsole)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_lxc_with_id_maps(self, mock_flavor):
- self.flags(virt_type='lxc', group='libvirt')
- self.flags(uid_maps=['0:1000:100'], group='libvirt')
- self.flags(gid_maps=['0:1000:100'], group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- None, {'mapping': {}})
- self.assertEqual(instance_ref["uuid"], cfg.uuid)
- self.assertEqual(2 * units.Mi, cfg.memory)
- self.assertEqual(1, cfg.vcpus)
- self.assertEqual(vm_mode.EXE, cfg.os_type)
- self.assertEqual("/sbin/init", cfg.os_init_path)
- self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
- self.assertIsNone(cfg.os_root)
- self.assertEqual(3, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestFilesys)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestConsole)
- self.assertEqual(len(cfg.idmaps), 2)
- self.assertIsInstance(cfg.idmaps[0],
- vconfig.LibvirtConfigGuestUIDMap)
- self.assertIsInstance(cfg.idmaps[1],
- vconfig.LibvirtConfigGuestGIDMap)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_numa_host_instance_fits(self, mock_flavor):
- instance_ref = objects.Instance(**self.test_instance)
- flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
- ephemeral_gb=8128, swap=33550336, name='fake',
- extra_specs={})
- mock_flavor.return_value = flavor
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = vconfig.LibvirtConfigCPU()
- caps.host.cpu.arch = "x86_64"
- caps.host.topology = self._fake_caps_numa_topology()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with contextlib.nested(
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(
- conn, "_get_host_capabilities", return_value=caps),
- mock.patch.object(
- random, 'choice', side_effect=lambda cells: cells[0])):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(set([0, 1]), cfg.cpuset)
- self.assertIsNone(cfg.cputune)
- self.assertIsNone(cfg.cpu.numa)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_numa_host_instance_no_fit(self, mock_flavor):
- instance_ref = objects.Instance(**self.test_instance)
- flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
- ephemeral_gb=8128, swap=33550336, name='fake',
- extra_specs={})
- mock_flavor.return_value = flavor
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = vconfig.LibvirtConfigCPU()
- caps.host.cpu.arch = "x86_64"
- caps.host.topology = self._fake_caps_numa_topology()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with contextlib.nested(
- mock.patch.object(
- conn, "_get_host_capabilities", return_value=caps),
- mock.patch.object(
- hardware, 'get_vcpu_pin_set', return_value=set([3])),
- mock.patch.object(random, 'choice')
- ) as (get_host_cap_mock,
- get_vcpu_pin_set_mock, choice_mock):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertFalse(choice_mock.called)
- self.assertEqual(set([3]), cfg.cpuset)
- self.assertIsNone(cfg.cputune)
- self.assertIsNone(cfg.cpu.numa)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self,
- mock_flavor):
- instance_ref = objects.Instance(**self.test_instance)
- flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
- ephemeral_gb=8128, swap=33550336, name='fake',
- extra_specs={})
- mock_flavor.return_value = flavor
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = vconfig.LibvirtConfigCPU()
- caps.host.cpu.arch = "x86_64"
- caps.host.topology = self._fake_caps_numa_topology()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with contextlib.nested(
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(
- conn, "_get_host_capabilities", return_value=caps),
- mock.patch.object(
- hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
- mock.patch.object(
- random, 'choice', side_effect=lambda cells: cells[0])
- ) as (has_min_version_mock, get_host_cap_mock,
- get_vcpu_pin_set_mock, choice_mock):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- # NOTE(ndipanov): we make sure that pin_set was taken into account
- # when choosing viable cells
- choice_mock.assert_called_once_with([set([2, 3])])
- self.assertEqual(set([2, 3]), cfg.cpuset)
- self.assertIsNone(cfg.cputune)
- self.assertIsNone(cfg.cpu.numa)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_non_numa_host_instance_topo(self, mock_flavor):
- instance_topology = objects.InstanceNUMATopology.obj_from_topology(
- hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 0, set([0]), 1024),
- hardware.VirtNUMATopologyCellInstance(
- 1, set([2]), 1024)]))
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.numa_topology = instance_topology
- flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
- ephemeral_gb=8128, swap=33550336, name='fake',
- extra_specs={})
- mock_flavor.return_value = flavor
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = vconfig.LibvirtConfigCPU()
- caps.host.cpu.arch = "x86_64"
- caps.host.topology = None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with contextlib.nested(
- mock.patch.object(
- objects.InstanceNUMATopology, "get_by_instance_uuid",
- return_value=instance_topology),
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(
- conn, "_get_host_capabilities", return_value=caps)):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertIsNone(cfg.cpuset)
- self.assertIsNone(cfg.cputune)
- self.assertIsNotNone(cfg.cpu.numa)
- for instance_cell, numa_cfg_cell in zip(
- instance_topology.cells, cfg.cpu.numa.cells):
- self.assertEqual(instance_cell.id, numa_cfg_cell.id)
- self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
- self.assertEqual(instance_cell.memory * units.Ki,
- numa_cfg_cell.memory)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_numa_host_instance_topo(self, mock_flavor):
- instance_topology = objects.InstanceNUMATopology.obj_from_topology(
- hardware.VirtNUMAInstanceTopology(
- cells=[hardware.VirtNUMATopologyCellInstance(
- 0, set([0, 1]), 1024),
- hardware.VirtNUMATopologyCellInstance(
- 1, set([2, 3]),
- 1024)]))
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.numa_topology = instance_topology
- flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
- ephemeral_gb=8128, swap=33550336, name='fake',
- extra_specs={})
- mock_flavor.return_value = flavor
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = vconfig.LibvirtConfigCPU()
- caps.host.cpu.arch = "x86_64"
- caps.host.topology = self._fake_caps_numa_topology()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with contextlib.nested(
- mock.patch.object(
- objects.Flavor, "get_by_id", return_value=flavor),
- mock.patch.object(
- objects.InstanceNUMATopology, "get_by_instance_uuid",
- return_value=instance_topology),
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(
- conn, "_get_host_capabilities", return_value=caps),
- mock.patch.object(
- hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 2]))
- ):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertIsNone(cfg.cpuset)
- # Test that the pinning is correct and limited to allowed only
- self.assertEqual(0, cfg.cputune.vcpupin[0].id)
- self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[0].cpuset)
- self.assertEqual(1, cfg.cputune.vcpupin[1].id)
- self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[1].cpuset)
- self.assertEqual(2, cfg.cputune.vcpupin[2].id)
- self.assertEqual(set([2]), cfg.cputune.vcpupin[2].cpuset)
- self.assertEqual(3, cfg.cputune.vcpupin[3].id)
- self.assertEqual(set([2]), cfg.cputune.vcpupin[3].cpuset)
- self.assertIsNotNone(cfg.cpu.numa)
- for instance_cell, numa_cfg_cell in zip(
- instance_topology.cells, cfg.cpu.numa.cells):
- self.assertEqual(instance_cell.id, numa_cfg_cell.id)
- self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
- self.assertEqual(instance_cell.memory * units.Ki,
- numa_cfg_cell.memory)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_clock(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {}
- hpet_map = {
- arch.X86_64: True,
- arch.I686: True,
- arch.PPC: False,
- arch.PPC64: False,
- arch.ARMV7: False,
- arch.AARCH64: False,
- }
-
- for guestarch, expect_hpet in hpet_map.items():
- with mock.patch.object(libvirt_driver.libvirt_utils,
- 'get_arch',
- return_value=guestarch):
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta,
- disk_info)
- self.assertIsInstance(cfg.clock,
- vconfig.LibvirtConfigGuestClock)
- self.assertEqual(cfg.clock.offset, "utc")
- self.assertIsInstance(cfg.clock.timers[0],
- vconfig.LibvirtConfigGuestTimer)
- self.assertIsInstance(cfg.clock.timers[1],
- vconfig.LibvirtConfigGuestTimer)
- self.assertEqual(cfg.clock.timers[0].name, "pit")
- self.assertEqual(cfg.clock.timers[0].tickpolicy,
- "delay")
- self.assertEqual(cfg.clock.timers[1].name, "rtc")
- self.assertEqual(cfg.clock.timers[1].tickpolicy,
- "catchup")
- if expect_hpet:
- self.assertEqual(3, len(cfg.clock.timers))
- self.assertIsInstance(cfg.clock.timers[2],
- vconfig.LibvirtConfigGuestTimer)
- self.assertEqual('hpet', cfg.clock.timers[2].name)
- self.assertFalse(cfg.clock.timers[2].present)
- else:
- self.assertEqual(2, len(cfg.clock.timers))
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_windows(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref['os_type'] = 'windows'
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
-
- self.assertIsInstance(cfg.clock,
- vconfig.LibvirtConfigGuestClock)
- self.assertEqual(cfg.clock.offset, "localtime")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_two_nics(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 2),
- {}, disk_info)
- self.assertEqual(cfg.acpi, True)
- self.assertEqual(cfg.memory, 2 * units.Mi)
- self.assertEqual(cfg.vcpus, 1)
- self.assertEqual(cfg.os_type, vm_mode.HVM)
- self.assertEqual(cfg.os_boot_dev, ["hd"])
- self.assertIsNone(cfg.os_root)
- self.assertEqual(len(cfg.devices), 10)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigMemoryBalloon)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_bug_1118829(self, mock_flavor):
- self.flags(virt_type='uml', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = {'disk_bus': 'virtio',
- 'cdrom_bus': 'ide',
- 'mapping': {u'vda': {'bus': 'virtio',
- 'type': 'disk',
- 'dev': u'vda'},
- 'root': {'bus': 'virtio',
- 'type': 'disk',
- 'dev': 'vda'}}}
-
- # NOTE(jdg): For this specific test leave this blank
- # This will exercise the failed code path still,
- # and won't require fakes and stubs of the iscsi discovery
- block_device_info = {}
- conn._get_guest_config(instance_ref, [], {}, disk_info,
- None, block_device_info)
- self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_root_device_name(self, mock_flavor):
- self.flags(virt_type='uml', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- block_device_info = {'root_device_name': '/dev/vdb'}
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- block_device_info)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
- None, block_device_info)
- self.assertEqual(cfg.acpi, False)
- self.assertEqual(cfg.memory, 2 * units.Mi)
- self.assertEqual(cfg.vcpus, 1)
- self.assertEqual(cfg.os_type, "uml")
- self.assertEqual(cfg.os_boot_dev, [])
- self.assertEqual(cfg.os_root, '/dev/vdb')
- self.assertEqual(len(cfg.devices), 3)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestConsole)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_block_device(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- conn_info = {'driver_volume_type': 'fake'}
- info = {'block_device_mapping': driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/vdc'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/vdd'}),
- ])}
- info['block_device_mapping'][0]['connection_info'] = conn_info
- info['block_device_mapping'][1]['connection_info'] = conn_info
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref, info)
- with mock.patch.object(
- driver_block_device.DriverVolumeBlockDevice, 'save'):
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
- None, info)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'vdc')
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[3].target_dev, 'vdd')
- self.assertTrue(info['block_device_mapping'][0].save.called)
- self.assertTrue(info['block_device_mapping'][1].save.called)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_lxc_with_attached_volume(self, mock_flavor):
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- conn_info = {'driver_volume_type': 'fake'}
- info = {'block_device_mapping': driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'boot_index': 0}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2,
- 'source_type': 'volume', 'destination_type': 'volume',
- }),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 3,
- 'source_type': 'volume', 'destination_type': 'volume',
- }),
- ])}
-
- info['block_device_mapping'][0]['connection_info'] = conn_info
- info['block_device_mapping'][1]['connection_info'] = conn_info
- info['block_device_mapping'][2]['connection_info'] = conn_info
- info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
- info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
- info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
- with mock.patch.object(
- driver_block_device.DriverVolumeBlockDevice, 'save'):
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref, info)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
- None, info)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[1].target_dev, 'vdc')
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'vdd')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_configdrive(self, mock_flavor):
- # It's necessary to check if the architecture is power, because
- # power doesn't have support to ide, and so libvirt translate
- # all ide calls to scsi
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- # make configdrive.required_by() return True
- instance_ref['config_drive'] = True
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
-
- # The last device is selected for this. on x86 is the last ide
- # device (hdd). Since power only support scsi, the last device
- # is sdz
-
- expect = {"ppc": "sdz", "ppc64": "sdz"}
- disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, disk)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_virtio_scsi_bus(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref, [], image_meta)
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestController)
- self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_virtio_scsi_bus_bdm(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- conn_info = {'driver_volume_type': 'fake'}
- bd_info = {
- 'block_device_mapping': driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 2,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
- ])}
- bd_info['block_device_mapping'][0]['connection_info'] = conn_info
- bd_info['block_device_mapping'][1]['connection_info'] = conn_info
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref, bd_info, image_meta)
- with mock.patch.object(
- driver_block_device.DriverVolumeBlockDevice, 'save'):
- cfg = conn._get_guest_config(instance_ref, [], image_meta,
- disk_info, [], bd_info)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'sdc')
- self.assertEqual(cfg.devices[2].target_bus, 'scsi')
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[3].target_dev, 'sdd')
- self.assertEqual(cfg.devices[3].target_bus, 'scsi')
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestController)
- self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_vnc(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- group='libvirt')
- self.flags(enabled=False, group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 7)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "vnc")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_vnc_and_tablet(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='kvm',
- use_usb_tablet=True,
- group='libvirt')
- self.flags(enabled=False, group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "tablet")
- self.assertEqual(cfg.devices[5].type, "vnc")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_spice_and_tablet(self, mock_flavor):
- self.flags(vnc_enabled=False)
- self.flags(virt_type='kvm',
- use_usb_tablet=True,
- group='libvirt')
- self.flags(enabled=True,
- agent_enabled=False,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "tablet")
- self.assertEqual(cfg.devices[5].type, "spice")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_spice_and_agent(self, mock_flavor):
- self.flags(vnc_enabled=False)
- self.flags(virt_type='kvm',
- use_usb_tablet=True,
- group='libvirt')
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestChannel)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
- self.assertEqual(cfg.devices[5].type, "spice")
- self.assertEqual(cfg.devices[6].type, "qxl")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch('nova.console.serial.acquire_port')
- def test_get_guest_config_serial_console(self, acquire_port,
- mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- acquire_port.return_value = 11111
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(8, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("tcp", cfg.devices[2].type)
- self.assertEqual(11111, cfg.devices[2].listen_port)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_serial_console_through_flavor(self, mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw:serial_port_count': 3}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(10, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("tcp", cfg.devices[2].type)
- self.assertEqual("tcp", cfg.devices[3].type)
- self.assertEqual("tcp", cfg.devices[4].type)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_serial_console_invalid_flavor(self, mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw:serial_port_count': "a"}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- self.assertRaises(
- exception.ImageSerialPortNumberInvalid,
- conn._get_guest_config, instance_ref, [], {}, disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_serial_console_image_and_flavor(self,
- mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- image_meta = {"properties": {"hw_serial_port_count": "3"}}
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw:serial_port_count': 4}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], image_meta,
- disk_info)
- self.assertEqual(10, len(cfg.devices), cfg.devices)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("tcp", cfg.devices[2].type)
- self.assertEqual("tcp", cfg.devices[3].type)
- self.assertEqual("tcp", cfg.devices[4].type)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_serial_console_invalid_img_meta(self,
- mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_serial_port_count": "fail"}}
- self.assertRaises(
- exception.ImageSerialPortNumberInvalid,
- conn._get_guest_config, instance_ref, [], image_meta, disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch('nova.console.serial.acquire_port')
- def test_get_guest_config_serial_console_through_port_rng_exhausted(
- self, acquire_port, mock_flavor):
- self.flags(enabled=True, group='serial_console')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
- '127.0.0.1')
- self.assertRaises(
- exception.SocketPortRangeExhaustedException,
- conn._get_guest_config, instance_ref, [], {}, disk_info)
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_get_serial_ports_from_instance(self, _lookup_by_name):
- i = self._test_get_serial_ports_from_instance(_lookup_by_name)
- self.assertEqual([
- ('127.0.0.1', 100),
- ('127.0.0.1', 101),
- ('127.0.0.2', 100),
- ('127.0.0.2', 101)], list(i))
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_get_serial_ports_from_instance_bind_only(self, _lookup_by_name):
- i = self._test_get_serial_ports_from_instance(
- _lookup_by_name, mode='bind')
- self.assertEqual([
- ('127.0.0.1', 101),
- ('127.0.0.2', 100)], list(i))
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_get_serial_ports_from_instance_connect_only(self,
- _lookup_by_name):
- i = self._test_get_serial_ports_from_instance(
- _lookup_by_name, mode='connect')
- self.assertEqual([
- ('127.0.0.1', 100),
- ('127.0.0.2', 101)], list(i))
-
- def _test_get_serial_ports_from_instance(self, _lookup_by_name, mode=None):
- xml = """
- <domain type='kvm'>
- <devices>
- <serial type="tcp">
- <source host="127.0.0.1" service="100" mode="connect"/>
- </serial>
- <serial type="tcp">
- <source host="127.0.0.1" service="101" mode="bind"/>
- </serial>
- <serial type="tcp">
- <source host="127.0.0.2" service="100" mode="bind"/>
- </serial>
- <serial type="tcp">
- <source host="127.0.0.2" service="101" mode="connect"/>
- </serial>
- </devices>
- </domain>"""
-
- dom = mock.MagicMock()
- dom.XMLDesc.return_value = xml
- _lookup_by_name.return_value = dom
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- return conn._get_serial_ports_from_instance(
- {'name': 'fake_instance'}, mode=mode)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_type_xen(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='xen',
- use_usb_tablet=False,
- group='libvirt')
- self.flags(enabled=False,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 6)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestConsole)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[3].type, "vnc")
- self.assertEqual(cfg.devices[4].type, "xen")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_type_xen_pae_hvm(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='xen',
- use_usb_tablet=False,
- group='libvirt')
- self.flags(enabled=False,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref['vm_mode'] = vm_mode.HVM
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
-
- self.assertEqual(cfg.os_type, vm_mode.HVM)
- self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
- self.assertEqual(cfg.pae, True)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_type_xen_pae_pvm(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='xen',
- use_usb_tablet=False,
- group='libvirt')
- self.flags(enabled=False,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
-
- self.assertEqual(cfg.os_type, vm_mode.XEN)
- self.assertEqual(cfg.pae, True)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_vnc_and_spice(self, mock_flavor):
- self.flags(vnc_enabled=True)
- self.flags(virt_type='kvm',
- use_usb_tablet=True,
- group='libvirt')
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- self.assertEqual(len(cfg.devices), 10)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestChannel)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "tablet")
- self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
- self.assertEqual(cfg.devices[6].type, "vnc")
- self.assertEqual(cfg.devices[7].type, "spice")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_invalid_watchdog_action(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_watchdog_action": "something"}}
- self.assertRaises(exception.InvalidWatchdogAction,
- conn._get_guest_config,
- instance_ref,
- [],
- image_meta,
- disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_watchdog_action_image_meta(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_watchdog_action": "none"}}
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 9)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestWatchdog)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("none", cfg.devices[7].action)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _test_get_guest_config_with_watchdog_action_flavor(self, mock_flavor,
- hw_watchdog_action="hw:watchdog_action"):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {hw_watchdog_action: 'none'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
-
- self.assertEqual(9, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestWatchdog)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("none", cfg.devices[7].action)
-
- def test_get_guest_config_with_watchdog_action_through_flavor(self):
- self._test_get_guest_config_with_watchdog_action_flavor()
-
- # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
- # should be removed in the next release
- def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
- self):
- self._test_get_guest_config_with_watchdog_action_flavor(
- hw_watchdog_action="hw_watchdog_action")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_watchdog_overrides_flavor(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_watchdog_action': 'none'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_watchdog_action": "pause"}}
-
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
-
- self.assertEqual(9, len(cfg.devices))
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestWatchdog)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual("pause", cfg.devices[7].action)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_unsupported_video_driver_through_image_meta(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_video_model": "something"}}
- self.assertRaises(exception.InvalidVideoMode,
- conn._get_guest_config,
- instance_ref,
- [],
- image_meta,
- disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_video_driver_image_meta(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_video_model": "vmvga"}}
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[5].type, "vnc")
- self.assertEqual(cfg.devices[6].type, "vmvga")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_qga_through_image_meta(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 9)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestChannel)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "tablet")
- self.assertEqual(cfg.devices[5].type, "vnc")
- self.assertEqual(cfg.devices[7].type, "unix")
- self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_video_driver_vram(self, mock_flavor):
- self.flags(vnc_enabled=False)
- self.flags(virt_type='kvm', group='libvirt')
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_video_model": "qxl",
- "hw_video_ram": "64"}}
-
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestChannel)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[5].type, "spice")
- self.assertEqual(cfg.devices[6].type, "qxl")
- self.assertEqual(cfg.devices[6].vram, 64)
-
- @mock.patch('nova.virt.disk.api.teardown_container')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
- @mock.patch('nova.virt.disk.api.setup_container')
- @mock.patch('nova.openstack.common.fileutils.ensure_tree')
- @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
- def test_unmount_fs_if_error_during_lxc_create_domain(self,
- mock_get_inst_path, mock_ensure_tree, mock_setup_container,
- mock_get_info, mock_teardown):
- """If we hit an error during a `_create_domain` call to `libvirt+lxc`
- we need to ensure the guest FS is unmounted from the host so that any
- future `lvremove` calls will work.
- """
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- mock_instance = mock.MagicMock()
- mock_get_inst_path.return_value = '/tmp/'
- mock_image_backend = mock.MagicMock()
- conn.image_backend = mock_image_backend
- mock_image = mock.MagicMock()
- mock_image.path = '/tmp/test.img'
- conn.image_backend.image.return_value = mock_image
- mock_setup_container.return_value = '/dev/nbd0'
- mock_get_info.side_effect = exception.InstanceNotFound(
- instance_id='foo')
- conn._conn.defineXML = mock.Mock()
- conn._conn.defineXML.side_effect = ValueError('somethingbad')
- with contextlib.nested(
- mock.patch.object(conn, '_is_booted_from_volume',
- return_value=False),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn, 'firewall_driver'),
- mock.patch.object(conn, 'cleanup')):
- self.assertRaises(ValueError,
- conn._create_domain_and_network,
- self.context,
- 'xml',
- mock_instance, None)
-
- mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
-
- def test_video_driver_flavor_limit_not_set(self):
- self.flags(virt_type='kvm', group='libvirt')
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_video_model": "qxl",
- "hw_video_ram": "64"}}
-
- with contextlib.nested(
- mock.patch.object(objects.Flavor, 'get_by_id'),
- mock.patch.object(objects.Instance, 'save'),
- ) as (mock_flavor, mock_instance):
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- self.assertRaises(exception.RequestedVRamTooHigh,
- conn._get_guest_config,
- instance_ref,
- [],
- image_meta,
- disk_info)
-
- def test_video_driver_ram_above_flavor_limit(self):
- self.flags(virt_type='kvm', group='libvirt')
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
-
- instance_ref = objects.Instance(**self.test_instance)
- instance_type = instance_ref.get_flavor()
- instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_video_model": "qxl",
- "hw_video_ram": "64"}}
- with contextlib.nested(
- mock.patch.object(objects.Flavor, 'get_by_id',
- return_value=instance_type),
- mock.patch.object(objects.Instance, 'save')):
- self.assertRaises(exception.RequestedVRamTooHigh,
- conn._get_guest_config,
- instance_ref,
- [],
- image_meta,
- disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_without_qga_through_image_meta(self,
- mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[4].type, "tablet")
- self.assertEqual(cfg.devices[5].type, "vnc")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_rng_device(self, mock_flavor):
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_rng:allowed': 'True'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_rng_model": "virtio"}}
-
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[6].model, 'random')
- self.assertIsNone(cfg.devices[6].backend)
- self.assertIsNone(cfg.devices[6].rate_bytes)
- self.assertIsNone(cfg.devices[6].rate_period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_rng_not_allowed(self, mock_flavor):
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_rng_model": "virtio"}}
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 7)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigMemoryBalloon)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_rng_limits(self, mock_flavor):
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_rng:allowed': 'True',
- 'hw_rng:rate_bytes': '1024',
- 'hw_rng:rate_period': '2'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_rng_model": "virtio"}}
-
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[6].model, 'random')
- self.assertIsNone(cfg.devices[6].backend)
- self.assertEqual(cfg.devices[6].rate_bytes, 1024)
- self.assertEqual(cfg.devices[6].rate_period, 2)
-
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_rng_backend(self, mock_flavor, mock_path):
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- rng_dev_path='/dev/hw_rng',
- group='libvirt')
- mock_path.return_value = True
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_rng:allowed': 'True'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_rng_model": "virtio"}}
-
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta, disk_info)
- self.assertEqual(len(cfg.devices), 8)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigMemoryBalloon)
-
- self.assertEqual(cfg.devices[6].model, 'random')
- self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
- self.assertIsNone(cfg.devices[6].rate_bytes)
- self.assertIsNone(cfg.devices[6].rate_period)
-
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_rng_dev_not_present(self, mock_flavor,
- mock_path):
- self.flags(virt_type='kvm',
- use_usb_tablet=False,
- rng_dev_path='/dev/hw_rng',
- group='libvirt')
- mock_path.return_value = False
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'hw_rng:allowed': 'True'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_rng_model": "virtio"}}
-
- self.assertRaises(exception.RngDeviceNotExist,
- conn._get_guest_config,
- instance_ref,
- [],
- image_meta, disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_cpu_quota(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'quota:cpu_shares': '10000',
- 'quota:cpu_period': '20000'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
-
- self.assertEqual(10000, cfg.cputune.shares)
- self.assertEqual(20000, cfg.cputune.period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_bogus_cpu_quota(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
- 'quota:cpu_period': '20000'}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- self.assertRaises(ValueError,
- conn._get_guest_config,
- instance_ref, [], {}, disk_info)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _test_get_guest_config_sysinfo_serial(self, expected_serial,
- mock_flavor):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- cfg = drvr._get_guest_config_sysinfo(instance_ref)
-
- self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
- self.assertEqual(version.vendor_string(),
- cfg.system_manufacturer)
- self.assertEqual(version.product_string(),
- cfg.system_product)
- self.assertEqual(version.version_string_with_package(),
- cfg.system_version)
- self.assertEqual(expected_serial,
- cfg.system_serial)
- self.assertEqual(instance_ref['uuid'],
- cfg.system_uuid)
-
- def test_get_guest_config_sysinfo_serial_none(self):
- self.flags(sysinfo_serial="none", group="libvirt")
- self._test_get_guest_config_sysinfo_serial(None)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid")
- def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
- self.flags(sysinfo_serial="hardware", group="libvirt")
-
- theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
- mock_uuid.return_value = theuuid
-
- self._test_get_guest_config_sysinfo_serial(theuuid)
-
- def test_get_guest_config_sysinfo_serial_os(self):
- self.flags(sysinfo_serial="os", group="libvirt")
-
- real_open = __builtin__.open
- with contextlib.nested(
- mock.patch.object(__builtin__, "open"),
- ) as (mock_open, ):
- theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
-
- def fake_open(filename, *args, **kwargs):
- if filename == "/etc/machine-id":
- h = mock.MagicMock()
- h.read.return_value = theuuid
- h.__enter__.return_value = h
- return h
- return real_open(filename, *args, **kwargs)
-
- mock_open.side_effect = fake_open
-
- self._test_get_guest_config_sysinfo_serial(theuuid)
-
- def test_get_guest_config_sysinfo_serial_auto_hardware(self):
- self.flags(sysinfo_serial="auto", group="libvirt")
-
- real_exists = os.path.exists
- with contextlib.nested(
- mock.patch.object(os.path, "exists"),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- "_get_host_uuid")
- ) as (mock_exists, mock_uuid):
- def fake_exists(filename):
- if filename == "/etc/machine-id":
- return False
- return real_exists(filename)
-
- mock_exists.side_effect = fake_exists
-
- theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
- mock_uuid.return_value = theuuid
-
- self._test_get_guest_config_sysinfo_serial(theuuid)
-
- def test_get_guest_config_sysinfo_serial_auto_os(self):
- self.flags(sysinfo_serial="auto", group="libvirt")
-
- real_exists = os.path.exists
- real_open = __builtin__.open
- with contextlib.nested(
- mock.patch.object(os.path, "exists"),
- mock.patch.object(__builtin__, "open"),
- ) as (mock_exists, mock_open):
- def fake_exists(filename):
- if filename == "/etc/machine-id":
- return True
- return real_exists(filename)
-
- mock_exists.side_effect = fake_exists
-
- theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
-
- def fake_open(filename, *args, **kwargs):
- if filename == "/etc/machine-id":
- h = mock.MagicMock()
- h.read.return_value = theuuid
- h.__enter__.return_value = h
- return h
- return real_open(filename, *args, **kwargs)
-
- mock_open.side_effect = fake_open
-
- self._test_get_guest_config_sysinfo_serial(theuuid)
-
- def test_get_guest_config_sysinfo_serial_invalid(self):
- self.flags(sysinfo_serial="invalid", group="libvirt")
-
- self.assertRaises(exception.NovaException,
- libvirt_driver.LibvirtDriver,
- fake.FakeVirtAPI(),
- True)
-
- def _create_fake_service_compute(self):
- service_info = {
- 'id': 1729,
- 'host': 'fake',
- 'report_count': 0
- }
- service_ref = objects.Service(**service_info)
-
- compute_info = {
- 'id': 1729,
- 'vcpus': 2,
- 'memory_mb': 1024,
- 'local_gb': 2048,
- 'vcpus_used': 0,
- 'memory_mb_used': 0,
- 'local_gb_used': 0,
- 'free_ram_mb': 1024,
- 'free_disk_gb': 2048,
- 'hypervisor_type': 'xen',
- 'hypervisor_version': 1,
- 'running_vms': 0,
- 'cpu_info': '',
- 'current_workload': 0,
- 'service_id': service_ref['id']
- }
- compute_ref = objects.ComputeNode(**compute_info)
- return (service_ref, compute_ref)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_pci_passthrough_kvm(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
- service_ref, compute_ref = self._create_fake_service_compute()
-
- instance = objects.Instance(**self.test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- pci_device_info = dict(test_pci_device.fake_db_dev)
- pci_device_info.update(compute_node_id=1,
- label='fake',
- status='allocated',
- address='0000:00:00.1',
- compute_id=compute_ref['id'],
- instance_uuid=instance.uuid,
- request_id=None,
- extra_info={})
- pci_device = objects.PciDevice(**pci_device_info)
- pci_list = objects.PciDeviceList()
- pci_list.objects.append(pci_device)
- instance.pci_devices = pci_list
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance)
- cfg = conn._get_guest_config(instance, [], {}, disk_info)
-
- had_pci = 0
- # care only about the PCI devices
- for dev in cfg.devices:
- if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
- had_pci += 1
- self.assertEqual(dev.type, 'pci')
- self.assertEqual(dev.managed, 'yes')
- self.assertEqual(dev.mode, 'subsystem')
-
- self.assertEqual(dev.domain, "0000")
- self.assertEqual(dev.bus, "00")
- self.assertEqual(dev.slot, "00")
- self.assertEqual(dev.function, "1")
- self.assertEqual(had_pci, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_with_pci_passthrough_xen(self, mock_flavor):
- self.flags(virt_type='xen', group='libvirt')
- service_ref, compute_ref = self._create_fake_service_compute()
-
- instance = objects.Instance(**self.test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- pci_device_info = dict(test_pci_device.fake_db_dev)
- pci_device_info.update(compute_node_id=1,
- label='fake',
- status='allocated',
- address='0000:00:00.2',
- compute_id=compute_ref['id'],
- instance_uuid=instance.uuid,
- request_id=None,
- extra_info={})
- pci_device = objects.PciDevice(**pci_device_info)
- pci_list = objects.PciDeviceList()
- pci_list.objects.append(pci_device)
- instance.pci_devices = pci_list
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance)
- cfg = conn._get_guest_config(instance, [], {}, disk_info)
- had_pci = 0
- # care only about the PCI devices
- for dev in cfg.devices:
- if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
- had_pci += 1
- self.assertEqual(dev.type, 'pci')
- self.assertEqual(dev.managed, 'no')
- self.assertEqual(dev.mode, 'subsystem')
-
- self.assertEqual(dev.domain, "0000")
- self.assertEqual(dev.bus, "00")
- self.assertEqual(dev.slot, "00")
- self.assertEqual(dev.function, "2")
- self.assertEqual(had_pci, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_os_command_line_through_image_meta(self,
- mock_flavor):
- self.flags(virt_type="kvm",
- cpu_mode=None,
- group='libvirt')
-
- self.test_instance['kernel_id'] = "fake_kernel_id"
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"os_command_line":
- "fake_os_command_line"}}
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- image_meta, disk_info)
- self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_os_command_line_without_kernel_id(self,
- mock_flavor):
- self.flags(virt_type="kvm",
- cpu_mode=None,
- group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"os_command_line":
- "fake_os_command_line"}}
-
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- image_meta, disk_info)
- self.assertIsNone(cfg.os_cmdline)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_os_command_empty(self, mock_flavor):
- self.flags(virt_type="kvm",
- cpu_mode=None,
- group='libvirt')
-
- self.test_instance['kernel_id'] = "fake_kernel_id"
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
- # default, so testing an empty string and None value in the
- # os_command_line image property must pass
- image_meta = {"properties": {"os_command_line": ""}}
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- image_meta, disk_info)
- self.assertNotEqual(cfg.os_cmdline, "")
-
- image_meta = {"properties": {"os_command_line": None}}
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- image_meta, disk_info)
- self.assertIsNotNone(cfg.os_cmdline)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_armv7(self, mock_flavor):
- def get_host_capabilities_stub(self):
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.arch = arch.ARMV7
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = cpu
- return caps
-
- self.flags(virt_type="kvm",
- group="libvirt")
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- "_get_host_capabilities",
- get_host_capabilities_stub)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertEqual(cfg.os_mach_type, "vexpress-a15")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_aarch64(self, mock_flavor):
- def get_host_capabilities_stub(self):
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.arch = arch.AARCH64
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = cpu
- return caps
-
- self.flags(virt_type="kvm",
- group="libvirt")
-
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- "_get_host_capabilities",
- get_host_capabilities_stub)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertEqual(cfg.os_mach_type, "virt")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_machine_type_through_image_meta(self,
- mock_flavor):
- self.flags(virt_type="kvm",
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- image_meta = {"properties": {"hw_machine_type":
- "fake_machine_type"}}
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- image_meta, disk_info)
- self.assertEqual(cfg.os_mach_type, "fake_machine_type")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_config_machine_type_from_config(self, mock_flavor):
- self.flags(virt_type='kvm', group='libvirt')
- self.flags(hw_machine_type=['x86_64=fake_machine_type'],
- group='libvirt')
-
- def fake_getCapabilities():
- return """
- <capabilities>
- <host>
- <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
- <cpu>
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <topology sockets='1' cores='2' threads='1'/>
- <feature name='xtpr'/>
- </cpu>
- </host>
- </capabilities>
- """
-
- def fake_baselineCPU(cpu, flag):
- return """<cpu mode='custom' match='exact'>
- <model fallback='allow'>Penryn</model>
- <vendor>Intel</vendor>
- <feature policy='require' name='xtpr'/>
- </cpu>
- """
-
- # Make sure the host arch is mocked as x86_64
- self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
- baselineCPU=fake_baselineCPU,
- getVersion=lambda: 1005001)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertEqual(cfg.os_mach_type, "fake_machine_type")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _test_get_guest_config_ppc64(self, device_index, mock_flavor):
- """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
- """
- self.flags(virt_type='kvm', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- image_meta = {}
- expected = (arch.PPC64, arch.PPC)
- for guestarch in expected:
- with mock.patch.object(libvirt_driver.libvirt_utils,
- 'get_arch',
- return_value=guestarch):
- cfg = conn._get_guest_config(instance_ref, [],
- image_meta,
- disk_info)
- self.assertIsInstance(cfg.devices[device_index],
- vconfig.LibvirtConfigGuestVideo)
- self.assertEqual(cfg.devices[device_index].type, 'vga')
-
- def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
- self.flags(vnc_enabled=True)
- self._test_get_guest_config_ppc64(6)
-
- def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
- self.flags(enabled=True,
- agent_enabled=True,
- group='spice')
- self._test_get_guest_config_ppc64(8)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_none(self, mock_flavor):
- self.flags(cpu_mode="none", group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertIsNone(conf.cpu.mode)
- self.assertIsNone(conf.cpu.model)
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_default_kvm(self, mock_flavor):
- self.flags(virt_type="kvm",
- cpu_mode=None,
- group='libvirt')
-
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 11
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertEqual(conf.cpu.mode, "host-model")
- self.assertIsNone(conf.cpu.model)
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_default_uml(self, mock_flavor):
- self.flags(virt_type="uml",
- cpu_mode=None,
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsNone(conf.cpu)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_default_lxc(self, mock_flavor):
- self.flags(virt_type="lxc",
- cpu_mode=None,
- group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsNone(conf.cpu)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_host_passthrough(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- self.flags(cpu_mode="host-passthrough", group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertEqual(conf.cpu.mode, "host-passthrough")
- self.assertIsNone(conf.cpu.model)
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_host_model(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- self.flags(cpu_mode="host-model", group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertEqual(conf.cpu.mode, "host-model")
- self.assertIsNone(conf.cpu.model)
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_config_custom(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- self.flags(cpu_mode="custom",
- cpu_model="Penryn",
- group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertEqual(conf.cpu.mode, "custom")
- self.assertEqual(conf.cpu.model, "Penryn")
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_cpu_topology(self, mock_flavor):
- fake_flavor = objects.flavor.Flavor.get_by_id(
- self.context,
- self.test_instance['instance_type_id'])
- fake_flavor.vcpus = 8
- fake_flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
- return_value=fake_flavor):
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertEqual(conf.cpu.mode, "host-model")
- self.assertEqual(conf.cpu.sockets, 4)
- self.assertEqual(conf.cpu.cores, 2)
- self.assertEqual(conf.cpu.threads, 1)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_by_default(self, mock_flavor):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- self.assertIsInstance(device,
- vconfig.LibvirtConfigMemoryBalloon)
- self.assertEqual('virtio', device.model)
- self.assertEqual(10, device.period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_disable(self, mock_flavor):
- self.flags(mem_stats_period_seconds=0, group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- no_exist = True
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- no_exist = False
- break
- self.assertTrue(no_exist)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_period_value(self, mock_flavor):
- self.flags(mem_stats_period_seconds=21, group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- self.assertIsInstance(device,
- vconfig.LibvirtConfigMemoryBalloon)
- self.assertEqual('virtio', device.model)
- self.assertEqual(21, device.period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_qemu(self, mock_flavor):
- self.flags(virt_type='qemu', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- self.assertIsInstance(device,
- vconfig.LibvirtConfigMemoryBalloon)
- self.assertEqual('virtio', device.model)
- self.assertEqual(10, device.period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_xen(self, mock_flavor):
- self.flags(virt_type='xen', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- self.assertIsInstance(device,
- vconfig.LibvirtConfigMemoryBalloon)
- self.assertEqual('xen', device.model)
- self.assertEqual(10, device.period)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_get_guest_memory_balloon_config_lxc(self, mock_flavor):
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
- no_exist = True
- for device in cfg.devices:
- if device.root_name == 'memballoon':
- no_exist = False
- break
- self.assertTrue(no_exist)
-
- def test_xml_and_uri_no_ramdisk_no_kernel(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_uri(instance_data,
- expect_kernel=False, expect_ramdisk=False)
-
- def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
- instance_data = dict(self.test_instance)
- instance_data.update({'vm_mode': vm_mode.HVM})
- self._check_xml_and_uri(instance_data, expect_kernel=False,
- expect_ramdisk=False, expect_xen_hvm=True)
-
- def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
- instance_data = dict(self.test_instance)
- instance_data.update({'vm_mode': vm_mode.XEN})
- self._check_xml_and_uri(instance_data, expect_kernel=False,
- expect_ramdisk=False, expect_xen_hvm=False,
- xen_only=True)
-
- def test_xml_and_uri_no_ramdisk(self):
- instance_data = dict(self.test_instance)
- instance_data['kernel_id'] = 'aki-deadbeef'
- self._check_xml_and_uri(instance_data,
- expect_kernel=True, expect_ramdisk=False)
-
- def test_xml_and_uri_no_kernel(self):
- instance_data = dict(self.test_instance)
- instance_data['ramdisk_id'] = 'ari-deadbeef'
- self._check_xml_and_uri(instance_data,
- expect_kernel=False, expect_ramdisk=False)
-
- def test_xml_and_uri(self):
- instance_data = dict(self.test_instance)
- instance_data['ramdisk_id'] = 'ari-deadbeef'
- instance_data['kernel_id'] = 'aki-deadbeef'
- self._check_xml_and_uri(instance_data,
- expect_kernel=True, expect_ramdisk=True)
-
- def test_xml_and_uri_rescue(self):
- instance_data = dict(self.test_instance)
- instance_data['ramdisk_id'] = 'ari-deadbeef'
- instance_data['kernel_id'] = 'aki-deadbeef'
- self._check_xml_and_uri(instance_data, expect_kernel=True,
- expect_ramdisk=True, rescue=instance_data)
-
- def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_uri(instance_data, expect_kernel=False,
- expect_ramdisk=False, rescue=instance_data)
-
- def test_xml_and_uri_rescue_no_kernel(self):
- instance_data = dict(self.test_instance)
- instance_data['ramdisk_id'] = 'aki-deadbeef'
- self._check_xml_and_uri(instance_data, expect_kernel=False,
- expect_ramdisk=True, rescue=instance_data)
-
- def test_xml_and_uri_rescue_no_ramdisk(self):
- instance_data = dict(self.test_instance)
- instance_data['kernel_id'] = 'aki-deadbeef'
- self._check_xml_and_uri(instance_data, expect_kernel=True,
- expect_ramdisk=False, rescue=instance_data)
-
- def test_xml_uuid(self):
- self._check_xml_and_uuid({"disk_format": "raw"})
-
- def test_lxc_container_and_uri(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_container(instance_data)
-
- def test_xml_disk_prefix(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_disk_prefix(instance_data, None)
-
- def test_xml_user_specified_disk_prefix(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_disk_prefix(instance_data, 'sd')
-
- def test_xml_disk_driver(self):
- instance_data = dict(self.test_instance)
- self._check_xml_and_disk_driver(instance_data)
-
- def test_xml_disk_bus_virtio(self):
- self._check_xml_and_disk_bus({"disk_format": "raw"},
- None,
- (("disk", "virtio", "vda"),))
-
- def test_xml_disk_bus_ide(self):
- # It's necessary to check if the architecture is power, because
- # power doesn't have support to ide, and so libvirt translate
- # all ide calls to scsi
-
- expected = {arch.PPC: ("cdrom", "scsi", "sda"),
- arch.PPC64: ("cdrom", "scsi", "sda")}
-
- expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
- ("cdrom", "ide", "hda"))
- self._check_xml_and_disk_bus({"disk_format": "iso"},
- None,
- (expec_val,))
-
- def test_xml_disk_bus_ide_and_virtio(self):
- # It's necessary to check if the architecture is power, because
- # power doesn't have support to ide, and so libvirt translate
- # all ide calls to scsi
-
- expected = {arch.PPC: ("cdrom", "scsi", "sda"),
- arch.PPC64: ("cdrom", "scsi", "sda")}
-
- swap = {'device_name': '/dev/vdc',
- 'swap_size': 1}
- ephemerals = [{'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'device_name': '/dev/vdb',
- 'size': 1}]
- block_device_info = {
- 'swap': swap,
- 'ephemerals': ephemerals}
- expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
- ("cdrom", "ide", "hda"))
- self._check_xml_and_disk_bus({"disk_format": "iso"},
- block_device_info,
- (expec_val,
- ("disk", "virtio", "vdb"),
- ("disk", "virtio", "vdc")))
-
- def test_list_instance_domains_fast(self):
- if not hasattr(libvirt, "VIR_CONNECT_LIST_DOMAINS_ACTIVE"):
- self.skipTest("libvirt missing VIR_CONNECT_LIST_DOMAINS_ACTIVE")
-
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vm3 = FakeVirtDomain(name="instance00000003")
- vm4 = FakeVirtDomain(name="instance00000004")
-
- def fake_list_all(flags):
- vms = []
- if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE:
- vms.extend([vm1, vm2])
- if flags & libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE:
- vms.extend([vm3, vm4])
- return vms
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
-
- self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- doms = drvr._list_instance_domains_fast()
- self.assertEqual(len(doms), 2)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
-
- doms = drvr._list_instance_domains_fast(only_running=False)
- self.assertEqual(len(doms), 4)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
- self.assertEqual(doms[2].name(), vm3.name())
- self.assertEqual(doms[3].name(), vm4.name())
-
- def test_list_instance_domains_slow(self):
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vm3 = FakeVirtDomain(name="instance00000003")
- vm4 = FakeVirtDomain(name="instance00000004")
- vms = [vm1, vm2, vm3, vm4]
-
- def fake_lookup_id(id):
- for vm in vms:
- if vm.ID() == id:
- return vm
- ex = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- "No such domain",
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
- raise ex
-
- def fake_lookup_name(name):
- for vm in vms:
- if vm.name() == name:
- return vm
- ex = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- "No such domain",
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
- raise ex
-
- def fake_list_doms():
- # Include one ID that no longer exists
- return [vm1.ID(), vm2.ID(), 666]
-
- def fake_list_ddoms():
- # Include one name that no longer exists and
- # one dup from running list to show race in
- # transition from inactive -> running
- return [vm1.name(), vm3.name(), vm4.name(), "fishfood"]
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
- libvirt_driver.LibvirtDriver._conn.listDefinedDomains = fake_list_ddoms
- libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
- libvirt_driver.LibvirtDriver._conn.numOfDefinedDomains = lambda: 2
-
- self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- doms = drvr._list_instance_domains_slow()
- self.assertEqual(len(doms), 2)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
-
- doms = drvr._list_instance_domains_slow(only_running=False)
- self.assertEqual(len(doms), 4)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
- self.assertEqual(doms[2].name(), vm3.name())
- self.assertEqual(doms[3].name(), vm4.name())
-
- def test_list_instance_domains_fallback_no_support(self):
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vms = [vm1, vm2]
-
- def fake_lookup_id(id):
- for vm in vms:
- if vm.ID() == id:
- return vm
- ex = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- "No such domain",
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
- raise ex
-
- def fake_list_doms():
- return [vm1.ID(), vm2.ID()]
-
- def fake_list_all(flags):
- ex = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- "API is not supported",
- error_code=libvirt.VIR_ERR_NO_SUPPORT)
- raise ex
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.listDomainsID = fake_list_doms
- libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup_id
- libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 2
- libvirt_driver.LibvirtDriver._conn.listAllDomains = fake_list_all
-
- self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- doms = drvr._list_instance_domains()
- self.assertEqual(len(doms), 2)
- self.assertEqual(doms[0].id, vm1.id)
- self.assertEqual(doms[1].id, vm2.id)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains_fast")
- def test_list_instance_domains_filtering(self, mock_list):
- vm0 = FakeVirtDomain(id=0, name="Domain-0") # Xen dom-0
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vm3 = FakeVirtDomain(name="instance00000003")
- vm4 = FakeVirtDomain(name="instance00000004")
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- mock_list.return_value = [vm0, vm1, vm2]
- doms = drvr._list_instance_domains()
- self.assertEqual(len(doms), 2)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
- mock_list.assert_called_with(True)
-
- mock_list.return_value = [vm0, vm1, vm2, vm3, vm4]
- doms = drvr._list_instance_domains(only_running=False)
- self.assertEqual(len(doms), 4)
- self.assertEqual(doms[0].name(), vm1.name())
- self.assertEqual(doms[1].name(), vm2.name())
- self.assertEqual(doms[2].name(), vm3.name())
- self.assertEqual(doms[3].name(), vm4.name())
- mock_list.assert_called_with(False)
-
- mock_list.return_value = [vm0, vm1, vm2]
- doms = drvr._list_instance_domains(only_guests=False)
- self.assertEqual(len(doms), 3)
- self.assertEqual(doms[0].name(), vm0.name())
- self.assertEqual(doms[1].name(), vm1.name())
- self.assertEqual(doms[2].name(), vm2.name())
- mock_list.assert_called_with(True)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_list_instances(self, mock_list):
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vm3 = FakeVirtDomain(name="instance00000003")
- vm4 = FakeVirtDomain(name="instance00000004")
-
- mock_list.return_value = [vm1, vm2, vm3, vm4]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- names = drvr.list_instances()
- self.assertEqual(names[0], vm1.name())
- self.assertEqual(names[1], vm2.name())
- self.assertEqual(names[2], vm3.name())
- self.assertEqual(names[3], vm4.name())
- mock_list.assert_called_with(only_running=False)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_list_instance_uuids(self, mock_list):
- vm1 = FakeVirtDomain(id=3, name="instance00000001")
- vm2 = FakeVirtDomain(id=17, name="instance00000002")
- vm3 = FakeVirtDomain(name="instance00000003")
- vm4 = FakeVirtDomain(name="instance00000004")
-
- mock_list.return_value = [vm1, vm2, vm3, vm4]
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- uuids = drvr.list_instance_uuids()
- self.assertEqual(len(uuids), 4)
- self.assertEqual(uuids[0], vm1.UUIDString())
- self.assertEqual(uuids[1], vm2.UUIDString())
- self.assertEqual(uuids[2], vm3.UUIDString())
- self.assertEqual(uuids[3], vm4.UUIDString())
- mock_list.assert_called_with(only_running=False)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_get_all_block_devices(self, mock_list):
- xml = [
- """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- </disk>
- </devices>
- </domain>
- """,
- """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- </devices>
- </domain>
- """,
- """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/3'/>
- </disk>
- </devices>
- </domain>
- """,
- ]
-
- mock_list.return_value = [
- FakeVirtDomain(xml[0], id=3, name="instance00000001"),
- FakeVirtDomain(xml[1], id=1, name="instance00000002"),
- FakeVirtDomain(xml[2], id=5, name="instance00000003")]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- devices = drvr._get_all_block_devices()
- self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
- mock_list.assert_called_with()
-
- def test_snapshot_in_ami_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./', group='libvirt')
-
- # Assign different image_ref from nova/images/fakes for testing ami
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'ami')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lxc_snapshot_in_ami_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- virt_type='lxc',
- group='libvirt')
-
- # Assign different image_ref from nova/images/fakes for testing ami
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'ami')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_snapshot_in_raw_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./', group='libvirt')
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
-
- def convert_image(source, dest, out_format):
- libvirt_driver.libvirt_utils.files[dest] = ''
-
- self.stubs.Set(images, 'convert_image', convert_image)
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'raw')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lvm_snapshot_in_raw_format(self):
- # Tests Lvm backend snapshot functionality with raw format
- # snapshots.
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='block' device='disk'>
- <source dev='/dev/some-vg/some-lv'/>
- </disk>
- </devices>
- </domain>
- """
- update_task_state_calls = [
- mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
- mock.call(task_state=task_states.IMAGE_UPLOADING,
- expected_state=task_states.IMAGE_PENDING_UPLOAD)]
- mock_update_task_state = mock.Mock()
- mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
- autospec=True)
- volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
- mock_volume_info = mock.Mock(return_value=volume_info,
- autospec=True)
- mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
- mock_convert_image = mock.Mock()
-
- def convert_image_side_effect(source, dest, out_format,
- run_as_root=True):
- libvirt_driver.libvirt_utils.files[dest] = ''
- mock_convert_image.side_effect = convert_image_side_effect
-
- self.flags(snapshots_directory='./',
- snapshot_image_format='raw',
- images_type='lvm',
- images_volume_group='nova-vg', group='libvirt')
- libvirt_driver.libvirt_utils.disk_type = "lvm"
-
- # Start test
- image_service = nova.tests.image.fake.FakeImageService()
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = image_service.create(context, sent_meta)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(libvirt_driver.LibvirtDriver,
- '_conn',
- autospec=True),
- mock.patch.object(libvirt_driver.imagebackend.lvm,
- 'volume_info',
- mock_volume_info),
- mock.patch.object(libvirt_driver.imagebackend.images,
- 'convert_image',
- mock_convert_image),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- '_lookup_by_name',
- mock_lookupByName)):
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- mock_update_task_state)
-
- mock_lookupByName.assert_called_once_with("instance-00000001")
- mock_volume_info.assert_has_calls(mock_volume_info_calls)
- mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
- mock.ANY,
- 'raw',
- run_as_root=True)
- snapshot = image_service.show(context, recv_meta['id'])
- mock_update_task_state.assert_has_calls(update_task_state_calls)
- self.assertEqual('available', snapshot['properties']['image_state'])
- self.assertEqual('active', snapshot['status'])
- self.assertEqual('raw', snapshot['disk_format'])
- self.assertEqual(snapshot_name, snapshot['name'])
- # This is for all the subsequent tests that do not set the value of
- # images type
- self.flags(images_type='default', group='libvirt')
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- def test_lxc_snapshot_in_raw_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- virt_type='lxc',
- group='libvirt')
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- self.stubs.Set(libvirt_driver.libvirt_utils, 'disk_type', 'raw')
- libvirt_driver.libvirt_utils.disk_type = "raw"
-
- def convert_image(source, dest, out_format):
- libvirt_driver.libvirt_utils.files[dest] = ''
-
- self.stubs.Set(images, 'convert_image', convert_image)
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'raw')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_snapshot_in_qcow2_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshot_image_format='qcow2',
- snapshots_directory='./',
- group='libvirt')
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'qcow2')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lxc_snapshot_in_qcow2_format(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshot_image_format='qcow2',
- snapshots_directory='./',
- virt_type='lxc',
- group='libvirt')
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['disk_format'], 'qcow2')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lvm_snapshot_in_qcow2_format(self):
- # Tests Lvm backend snapshot functionality with raw format
- # snapshots.
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='block' device='disk'>
- <source dev='/dev/some-vg/some-lv'/>
- </disk>
- </devices>
- </domain>
- """
- update_task_state_calls = [
- mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
- mock.call(task_state=task_states.IMAGE_UPLOADING,
- expected_state=task_states.IMAGE_PENDING_UPLOAD)]
- mock_update_task_state = mock.Mock()
- mock_lookupByName = mock.Mock(return_value=FakeVirtDomain(xml),
- autospec=True)
- volume_info = {'VG': 'nova-vg', 'LV': 'disk'}
- mock_volume_info = mock.Mock(return_value=volume_info, autospec=True)
- mock_volume_info_calls = [mock.call('/dev/nova-vg/lv')]
- mock_convert_image = mock.Mock()
-
- def convert_image_side_effect(source, dest, out_format,
- run_as_root=True):
- libvirt_driver.libvirt_utils.files[dest] = ''
- mock_convert_image.side_effect = convert_image_side_effect
-
- self.flags(snapshots_directory='./',
- snapshot_image_format='qcow2',
- images_type='lvm',
- images_volume_group='nova-vg', group='libvirt')
- libvirt_driver.libvirt_utils.disk_type = "lvm"
-
- # Start test
- image_service = nova.tests.image.fake.FakeImageService()
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = image_service.create(context, sent_meta)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(libvirt_driver.LibvirtDriver,
- '_conn',
- autospec=True),
- mock.patch.object(libvirt_driver.imagebackend.lvm,
- 'volume_info',
- mock_volume_info),
- mock.patch.object(libvirt_driver.imagebackend.images,
- 'convert_image',
- mock_convert_image),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- '_lookup_by_name',
- mock_lookupByName)):
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- mock_update_task_state)
-
- mock_lookupByName.assert_called_once_with("instance-00000001")
- mock_volume_info.assert_has_calls(mock_volume_info_calls)
- mock_convert_image.assert_called_once_with('/dev/nova-vg/lv',
- mock.ANY,
- 'qcow2',
- run_as_root=True)
- snapshot = image_service.show(context, recv_meta['id'])
- mock_update_task_state.assert_has_calls(update_task_state_calls)
- self.assertEqual('available', snapshot['properties']['image_state'])
- self.assertEqual('active', snapshot['status'])
- self.assertEqual('qcow2', snapshot['disk_format'])
- self.assertEqual(snapshot_name, snapshot['name'])
- self.flags(images_type='default', group='libvirt')
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- def test_snapshot_no_image_architecture(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- group='libvirt')
-
- # Assign different image_ref from nova/images/fakes for
- # testing different base image
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lxc_snapshot_no_image_architecture(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- virt_type='lxc',
- group='libvirt')
-
- # Assign different image_ref from nova/images/fakes for
- # testing different base image
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
-
- # Assuming that base image already exists in image_service
- instance_ref = objects.Instance(**test_instance)
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- # Create new image. It will be updated in snapshot method
- # To work with it from snapshot, the single image_service is needed
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_snapshot_no_original_image(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- group='libvirt')
-
- # Assign a non-existent image
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
-
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_lxc_snapshot_no_original_image(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- virt_type='lxc',
- group='libvirt')
- libvirt_driver.libvirt_utils.disk_type = "qcow2"
-
- # Assign a non-existent image
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
-
- instance_ref = objects.Instance(**test_instance)
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id)}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_snapshot_metadata_image(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- group='libvirt')
-
- # Assign an image with an architecture defined (x86_64)
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
-
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
-
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id),
- 'architecture': 'fake_arch',
- 'key_a': 'value_a',
- 'key_b': 'value_b'}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['properties']['architecture'], 'fake_arch')
- self.assertEqual(snapshot['properties']['key_a'], 'value_a')
- self.assertEqual(snapshot['properties']['key_b'], 'value_b')
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test_snapshot_with_os_type(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
-
- self.flags(snapshots_directory='./',
- group='libvirt')
-
- # Assign a non-existent image
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["image_ref"] = '661122aa-1234-dede-fefe-babababababa'
- test_instance["os_type"] = 'linux'
-
- instance_ref = objects.Instance(**test_instance)
- instance_ref.info_cache = objects.InstanceInfoCache(
- network_info=None)
- properties = {'instance_id': instance_ref['id'],
- 'user_id': str(self.context.user_id),
- 'os_type': instance_ref['os_type']}
- snapshot_name = 'test-snap'
- sent_meta = {'name': snapshot_name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
- recv_meta = self.image_service.create(context, sent_meta)
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- self.mox.StubOutWithMock(libvirt_driver.utils, 'execute')
- libvirt_driver.utils.execute = self.fake_execute
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.snapshot(self.context, instance_ref, recv_meta['id'],
- func_call_matcher.call)
-
- snapshot = self.image_service.show(context, recv_meta['id'])
- self.assertIsNone(func_call_matcher.match())
- self.assertEqual(snapshot['properties']['image_state'], 'available')
- self.assertEqual(snapshot['properties']['os_type'],
- instance_ref['os_type'])
- self.assertEqual(snapshot['status'], 'active')
- self.assertEqual(snapshot['name'], snapshot_name)
-
- def test__create_snapshot_metadata(self):
- base = {}
- instance = {'kernel_id': 'kernel',
- 'project_id': 'prj_id',
- 'ramdisk_id': 'ram_id',
- 'os_type': None}
- img_fmt = 'raw'
- snp_name = 'snapshot_name'
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
- expected = {'is_public': False,
- 'status': 'active',
- 'name': snp_name,
- 'properties': {
- 'kernel_id': instance['kernel_id'],
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': instance['project_id'],
- 'ramdisk_id': instance['ramdisk_id'],
- },
- 'disk_format': img_fmt,
- 'container_format': base.get('container_format', 'bare')
- }
- self.assertEqual(ret, expected)
-
- # simulate an instance with os_type field defined
- # disk format equals to ami
- # container format not equals to bare
- instance['os_type'] = 'linux'
- base['disk_format'] = 'ami'
- base['container_format'] = 'test_container'
- expected['properties']['os_type'] = instance['os_type']
- expected['disk_format'] = base['disk_format']
- expected['container_format'] = base.get('container_format', 'bare')
- ret = conn._create_snapshot_metadata(base, instance, img_fmt, snp_name)
- self.assertEqual(ret, expected)
-
- @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
- 'connect_volume')
- @mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
- def test_get_volume_config(self, get_config, connect_volume):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- connection_info = {'driver_volume_type': 'fake',
- 'data': {'device_path': '/fake',
- 'access_mode': 'rw'}}
- bdm = {'device_name': 'vdb',
- 'disk_bus': 'fake-bus',
- 'device_type': 'fake-type'}
- disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
- 'dev': 'vdb'}
- mock_config = mock.MagicMock()
-
- get_config.return_value = mock_config
- config = conn._get_volume_config(connection_info, disk_info)
- get_config.assert_called_once_with(connection_info, disk_info)
- self.assertEqual(mock_config, config)
-
- def test_attach_invalid_volume_type(self):
- self.create_fake_libvirt_mock()
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- instance = fake_instance.fake_instance_obj(
- self.context, **self.test_instance)
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.VolumeDriverNotFound,
- conn.attach_volume, None,
- {"driver_volume_type": "badtype"},
- instance,
- "/dev/sda")
-
- def test_attach_blockio_invalid_hypervisor(self):
- self.flags(virt_type='fake_type', group='libvirt')
- self.create_fake_libvirt_mock()
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- instance = fake_instance.fake_instance_obj(
- self.context, **self.test_instance)
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.InvalidHypervisorType,
- conn.attach_volume, None,
- {"driver_volume_type": "fake",
- "data": {"logical_block_size": "4096",
- "physical_block_size": "4096"}
- },
- instance,
- "/dev/sda")
-
- def test_attach_blockio_invalid_version(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 8
- self.flags(virt_type='qemu', group='libvirt')
- self.create_fake_libvirt_mock()
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
- instance = fake_instance.fake_instance_obj(
- self.context, **self.test_instance)
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
- self.assertRaises(exception.Invalid,
- conn.attach_volume, None,
- {"driver_volume_type": "fake",
- "data": {"logical_block_size": "4096",
- "physical_block_size": "4096"}
- },
- instance,
- "/dev/sda")
-
- @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_attach_volume_with_vir_domain_affect_live_flag(self,
- mock_lookup_by_name, mock_get_info):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- mock_dom = mock.MagicMock()
- mock_lookup_by_name.return_value = mock_dom
-
- connection_info = {"driver_volume_type": "fake",
- "data": {"device_path": "/fake",
- "access_mode": "rw"}}
- bdm = {'device_name': 'vdb',
- 'disk_bus': 'fake-bus',
- 'device_type': 'fake-type'}
- disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
- 'dev': 'vdb'}
- mock_get_info.return_value = disk_info
- mock_conf = mock.MagicMock()
- flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
- fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
-
- with contextlib.nested(
- mock.patch.object(conn, '_connect_volume'),
- mock.patch.object(conn, '_get_volume_config',
- return_value=mock_conf),
- mock.patch.object(conn, '_set_cache_mode')
- ) as (mock_connect_volume, mock_get_volume_config,
- mock_set_cache_mode):
- for state in (power_state.RUNNING, power_state.PAUSED):
- mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
-
- conn.attach_volume(self.context, connection_info, instance,
- "/dev/vdb", disk_bus=bdm['disk_bus'],
- device_type=bdm['device_type'])
-
- mock_lookup_by_name.assert_called_with(instance['name'])
- mock_get_info.assert_called_with(CONF.libvirt.virt_type, bdm)
- mock_connect_volume.assert_called_with(
- connection_info, disk_info)
- mock_get_volume_config.assert_called_with(
- connection_info, disk_info)
- mock_set_cache_mode.assert_called_with(mock_conf)
- mock_dom.attachDeviceFlags.assert_called_with(
- mock_conf.to_xml(), flags)
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_disk_xml')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_detach_volume_with_vir_domain_affect_live_flag(self,
- mock_lookup_by_name, mock_get_disk_xml):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- mock_dom = mock.MagicMock()
- mock_xml = \
- """
- <disk type='file'>
- <source file='/path/to/fake-volume'/>
- <target dev='vdc' bus='virtio'/>
- </disk>
- """
- mock_get_disk_xml.return_value = mock_xml
-
- connection_info = {"driver_volume_type": "fake",
- "data": {"device_path": "/fake",
- "access_mode": "rw"}}
- flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
- fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
-
- with mock.patch.object(conn, '_disconnect_volume') as \
- mock_disconnect_volume:
- for state in (power_state.RUNNING, power_state.PAUSED):
- mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
- mock_lookup_by_name.return_value = mock_dom
-
- conn.detach_volume(connection_info, instance, '/dev/vdc')
-
- mock_lookup_by_name.assert_called_with(instance['name'])
- mock_get_disk_xml.assert_called_with(mock_dom.XMLDesc(0),
- 'vdc')
- mock_dom.detachDeviceFlags.assert_called_with(mock_xml, flags)
- mock_disconnect_volume.assert_called_with(
- connection_info, 'vdc')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_multi_nic(self, mock_flavor):
- network_info = _fake_network_info(self.stubs, 2)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- xml = conn._get_guest_xml(self.context, instance_ref,
- network_info, disk_info)
- tree = etree.fromstring(xml)
- interfaces = tree.findall("./devices/interface")
- self.assertEqual(len(interfaces), 2)
- self.assertEqual(interfaces[0].get('type'), 'bridge')
-
- def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
- exc=ValueError()):
- open_behavior = os.open(os.path.join('.', '.directio.test'),
- os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
- if raise_open:
- open_behavior.AndRaise(exc)
- else:
- open_behavior.AndReturn(3)
- write_bahavior = os.write(3, mox.IgnoreArg())
- if raise_write:
- write_bahavior.AndRaise(exc)
- else:
- os.close(3)
- os.unlink(3)
-
- def test_supports_direct_io(self):
- # O_DIRECT is not supported on all Python runtimes, so on platforms
- # where it's not supported (e.g. Mac), we can still test the code-path
- # by stubbing out the value.
- if not hasattr(os, 'O_DIRECT'):
- # `mock` seems to have trouble stubbing an attr that doesn't
- # originally exist, so falling back to stubbing out the attribute
- # directly.
- os.O_DIRECT = 16384
- self.addCleanup(delattr, os, 'O_DIRECT')
-
- einval = OSError()
- einval.errno = errno.EINVAL
- self.mox.StubOutWithMock(os, 'open')
- self.mox.StubOutWithMock(os, 'write')
- self.mox.StubOutWithMock(os, 'close')
- self.mox.StubOutWithMock(os, 'unlink')
- _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
-
- self._behave_supports_direct_io()
- self._behave_supports_direct_io(raise_write=True)
- self._behave_supports_direct_io(raise_open=True)
- self._behave_supports_direct_io(raise_write=True, exc=einval)
- self._behave_supports_direct_io(raise_open=True, exc=einval)
-
- self.mox.ReplayAll()
- self.assertTrue(_supports_direct_io('.'))
- self.assertRaises(ValueError, _supports_direct_io, '.')
- self.assertRaises(ValueError, _supports_direct_io, '.')
- self.assertFalse(_supports_direct_io('.'))
- self.assertFalse(_supports_direct_io('.'))
- self.mox.VerifyAll()
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_container(self, instance, mock_flavor):
- instance_ref = objects.Instance(**instance)
-
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- self.assertEqual(conn.uri(), 'lxc:///')
-
- network_info = _fake_network_info(self.stubs, 1)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- xml = conn._get_guest_xml(self.context, instance_ref,
- network_info, disk_info)
- tree = etree.fromstring(xml)
-
- check = [
- (lambda t: t.find('.').get('type'), 'lxc'),
- (lambda t: t.find('./os/type').text, 'exe'),
- (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
-
- for i, (check, expected_result) in enumerate(check):
- self.assertEqual(check(tree),
- expected_result,
- '%s failed common check %d' % (xml, i))
-
- target = tree.find('./devices/filesystem/source').get('dir')
- self.assertTrue(len(target) > 0)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_disk_prefix(self, instance, prefix, mock_flavor):
- instance_ref = objects.Instance(**instance)
-
- def _get_prefix(p, default):
- if p:
- return p + 'a'
- return default
-
- type_disk_map = {
- 'qemu': [
- (lambda t: t.find('.').get('type'), 'qemu'),
- (lambda t: t.find('./devices/disk/target').get('dev'),
- _get_prefix(prefix, 'vda'))],
- 'xen': [
- (lambda t: t.find('.').get('type'), 'xen'),
- (lambda t: t.find('./devices/disk/target').get('dev'),
- _get_prefix(prefix, 'sda'))],
- 'kvm': [
- (lambda t: t.find('.').get('type'), 'kvm'),
- (lambda t: t.find('./devices/disk/target').get('dev'),
- _get_prefix(prefix, 'vda'))],
- 'uml': [
- (lambda t: t.find('.').get('type'), 'uml'),
- (lambda t: t.find('./devices/disk/target').get('dev'),
- _get_prefix(prefix, 'ubda'))]
- }
-
- for (virt_type, checks) in type_disk_map.iteritems():
- self.flags(virt_type=virt_type, group='libvirt')
- if prefix:
- self.flags(disk_prefix=prefix, group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- network_info = _fake_network_info(self.stubs, 1)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- xml = conn._get_guest_xml(self.context, instance_ref,
- network_info, disk_info)
- tree = etree.fromstring(xml)
-
- for i, (check, expected_result) in enumerate(checks):
- self.assertEqual(check(tree),
- expected_result,
- '%s != %s failed check %d' %
- (check(tree), expected_result, i))
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_disk_driver(self, image_meta, mock_flavor):
- os_open = os.open
- directio_supported = True
-
- def os_open_stub(path, flags, *args, **kwargs):
- if flags & os.O_DIRECT:
- if not directio_supported:
- raise OSError(errno.EINVAL,
- '%s: %s' % (os.strerror(errno.EINVAL), path))
- flags &= ~os.O_DIRECT
- return os_open(path, flags, *args, **kwargs)
-
- self.stubs.Set(os, 'open', os_open_stub)
-
- @staticmethod
- def connection_supports_direct_io_stub(dirpath):
- return directio_supported
-
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- '_supports_direct_io', connection_supports_direct_io_stub)
-
- instance_ref = objects.Instance(**self.test_instance)
- network_info = _fake_network_info(self.stubs, 1)
-
- drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
-
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- xml = drv._get_guest_xml(self.context, instance_ref,
- network_info, disk_info, image_meta)
- tree = etree.fromstring(xml)
- disks = tree.findall('./devices/disk/driver')
- for guest_disk in disks:
- self.assertEqual(guest_disk.get("cache"), "none")
-
- directio_supported = False
-
- # The O_DIRECT availability is cached on first use in
- # LibvirtDriver, hence we re-create it here
- drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- xml = drv._get_guest_xml(self.context, instance_ref,
- network_info, disk_info, image_meta)
- tree = etree.fromstring(xml)
- disks = tree.findall('./devices/disk/driver')
- for guest_disk in disks:
- self.assertEqual(guest_disk.get("cache"), "writethrough")
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_disk_bus(self, image_meta,
- block_device_info, wantConfig,
- mock_flavor):
- instance_ref = objects.Instance(**self.test_instance)
- network_info = _fake_network_info(self.stubs, 1)
-
- drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- block_device_info,
- image_meta)
-
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- xml = drv._get_guest_xml(self.context, instance_ref,
- network_info, disk_info, image_meta,
- block_device_info=block_device_info)
- tree = etree.fromstring(xml)
-
- got_disks = tree.findall('./devices/disk')
- got_disk_targets = tree.findall('./devices/disk/target')
- for i in range(len(wantConfig)):
- want_device_type = wantConfig[i][0]
- want_device_bus = wantConfig[i][1]
- want_device_dev = wantConfig[i][2]
-
- got_device_type = got_disks[i].get('device')
- got_device_bus = got_disk_targets[i].get('bus')
- got_device_dev = got_disk_targets[i].get('dev')
-
- self.assertEqual(got_device_type, want_device_type)
- self.assertEqual(got_device_bus, want_device_bus)
- self.assertEqual(got_device_dev, want_device_dev)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_uuid(self, image_meta, mock_flavor):
- instance_ref = objects.Instance(**self.test_instance)
- network_info = _fake_network_info(self.stubs, 1)
-
- drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- xml = drv._get_guest_xml(self.context, instance_ref,
- network_info, disk_info, image_meta)
- tree = etree.fromstring(xml)
- self.assertEqual(tree.find('./uuid').text,
- instance_ref['uuid'])
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _check_xml_and_uri(self, instance, mock_flavor,
- expect_ramdisk=False, expect_kernel=False,
- rescue=None, expect_xen_hvm=False, xen_only=False):
- instance_ref = objects.Instance(**instance)
-
- xen_vm_mode = vm_mode.XEN
- if expect_xen_hvm:
- xen_vm_mode = vm_mode.HVM
-
- type_uri_map = {'qemu': ('qemu:///system',
- [(lambda t: t.find('.').get('type'), 'qemu'),
- (lambda t: t.find('./os/type').text,
- vm_mode.HVM),
- (lambda t: t.find('./devices/emulator'), None)]),
- 'kvm': ('qemu:///system',
- [(lambda t: t.find('.').get('type'), 'kvm'),
- (lambda t: t.find('./os/type').text,
- vm_mode.HVM),
- (lambda t: t.find('./devices/emulator'), None)]),
- 'uml': ('uml:///system',
- [(lambda t: t.find('.').get('type'), 'uml'),
- (lambda t: t.find('./os/type').text,
- vm_mode.UML)]),
- 'xen': ('xen:///',
- [(lambda t: t.find('.').get('type'), 'xen'),
- (lambda t: t.find('./os/type').text,
- xen_vm_mode)])}
-
- if expect_xen_hvm or xen_only:
- hypervisors_to_check = ['xen']
- else:
- hypervisors_to_check = ['qemu', 'kvm', 'xen']
-
- for hypervisor_type in hypervisors_to_check:
- check_list = type_uri_map[hypervisor_type][1]
-
- if rescue:
- suffix = '.rescue'
- else:
- suffix = ''
- if expect_kernel:
- check = (lambda t: self.relpath(t.find('./os/kernel').text).
- split('/')[1], 'kernel' + suffix)
- else:
- check = (lambda t: t.find('./os/kernel'), None)
- check_list.append(check)
-
- if expect_kernel:
- check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
- text, hypervisor_type == "qemu")
- check_list.append(check)
- # Hypervisors that only support vm_mode.HVM and Xen
- # should not produce configuration that results in kernel
- # arguments
- if not expect_kernel and (hypervisor_type in
- ['qemu', 'kvm', 'xen']):
- check = (lambda t: t.find('./os/root'), None)
- check_list.append(check)
- check = (lambda t: t.find('./os/cmdline'), None)
- check_list.append(check)
-
- if expect_ramdisk:
- check = (lambda t: self.relpath(t.find('./os/initrd').text).
- split('/')[1], 'ramdisk' + suffix)
- else:
- check = (lambda t: t.find('./os/initrd'), None)
- check_list.append(check)
-
- if hypervisor_type in ['qemu', 'kvm']:
- xpath = "./sysinfo/system/entry"
- check = (lambda t: t.findall(xpath)[0].get("name"),
- "manufacturer")
- check_list.append(check)
- check = (lambda t: t.findall(xpath)[0].text,
- version.vendor_string())
- check_list.append(check)
-
- check = (lambda t: t.findall(xpath)[1].get("name"),
- "product")
- check_list.append(check)
- check = (lambda t: t.findall(xpath)[1].text,
- version.product_string())
- check_list.append(check)
-
- check = (lambda t: t.findall(xpath)[2].get("name"),
- "version")
- check_list.append(check)
- # NOTE(sirp): empty strings don't roundtrip in lxml (they are
- # converted to None), so we need an `or ''` to correct for that
- check = (lambda t: t.findall(xpath)[2].text or '',
- version.version_string_with_package())
- check_list.append(check)
-
- check = (lambda t: t.findall(xpath)[3].get("name"),
- "serial")
- check_list.append(check)
- check = (lambda t: t.findall(xpath)[3].text,
- "cef19ce0-0ca2-11df-855d-b19fbce37686")
- check_list.append(check)
-
- check = (lambda t: t.findall(xpath)[4].get("name"),
- "uuid")
- check_list.append(check)
- check = (lambda t: t.findall(xpath)[4].text,
- instance['uuid'])
- check_list.append(check)
-
- if hypervisor_type in ['qemu', 'kvm']:
- check = (lambda t: t.findall('./devices/serial')[0].get(
- 'type'), 'file')
- check_list.append(check)
- check = (lambda t: t.findall('./devices/serial')[1].get(
- 'type'), 'pty')
- check_list.append(check)
- check = (lambda t: self.relpath(t.findall(
- './devices/serial/source')[0].get('path')).
- split('/')[1], 'console.log')
- check_list.append(check)
- else:
- check = (lambda t: t.find('./devices/console').get(
- 'type'), 'pty')
- check_list.append(check)
-
- common_checks = [
- (lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find('./memory').text, '2097152')]
- if rescue:
- common_checks += [
- (lambda t: self.relpath(t.findall('./devices/disk/source')[0].
- get('file')).split('/')[1], 'disk.rescue'),
- (lambda t: self.relpath(t.findall('./devices/disk/source')[1].
- get('file')).split('/')[1], 'disk')]
- else:
- common_checks += [(lambda t: self.relpath(t.findall(
- './devices/disk/source')[0].get('file')).split('/')[1],
- 'disk')]
- common_checks += [(lambda t: self.relpath(t.findall(
- './devices/disk/source')[1].get('file')).split('/')[1],
- 'disk.local')]
-
- for virt_type in hypervisors_to_check:
- expected_uri = type_uri_map[virt_type][0]
- checks = type_uri_map[virt_type][1]
- self.flags(virt_type=virt_type, group='libvirt')
-
- with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
- del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- self.assertEqual(conn.uri(), expected_uri)
-
- network_info = _fake_network_info(self.stubs, 1)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- rescue=rescue)
-
- flavor = instance_ref.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
- xml = conn._get_guest_xml(self.context, instance_ref,
- network_info, disk_info,
- rescue=rescue)
- tree = etree.fromstring(xml)
- for i, (check, expected_result) in enumerate(checks):
- self.assertEqual(check(tree),
- expected_result,
- '%s != %s failed check %d' %
- (check(tree), expected_result, i))
-
- for i, (check, expected_result) in enumerate(common_checks):
- self.assertEqual(check(tree),
- expected_result,
- '%s != %s failed common check %d' %
- (check(tree), expected_result, i))
-
- filterref = './devices/interface/filterref'
- vif = network_info[0]
- nic_id = vif['address'].replace(':', '')
- fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), conn)
- instance_filter_name = fw._instance_filter_name(instance_ref,
- nic_id)
- self.assertEqual(tree.find(filterref).get('filter'),
- instance_filter_name)
-
- # This test is supposed to make sure we don't
- # override a specifically set uri
- #
- # Deliberately not just assigning this string to CONF.connection_uri
- # and checking against that later on. This way we make sure the
- # implementation doesn't fiddle around with the CONF.
- testuri = 'something completely different'
- self.flags(connection_uri=testuri, group='libvirt')
- for (virt_type, (expected_uri, checks)) in type_uri_map.iteritems():
- self.flags(virt_type=virt_type, group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertEqual(conn.uri(), testuri)
-
- def test_ensure_filtering_rules_for_instance_timeout(self):
- # ensure_filtering_fules_for_instance() finishes with timeout.
- # Preparing mocks
- def fake_none(self, *args):
- return
-
- def fake_raise(self):
- raise libvirt.libvirtError('ERR')
-
- class FakeTime(object):
- def __init__(self):
- self.counter = 0
-
- def sleep(self, t):
- self.counter += t
-
- fake_timer = FakeTime()
-
- def fake_sleep(t):
- fake_timer.sleep(t)
-
- # _fake_network_info must be called before create_fake_libvirt_mock(),
- # as _fake_network_info calls importutils.import_class() and
- # create_fake_libvirt_mock() mocks importutils.import_class().
- network_info = _fake_network_info(self.stubs, 1)
- self.create_fake_libvirt_mock()
- instance_ref = objects.Instance(**self.test_instance)
-
- # Start test
- self.mox.ReplayAll()
- try:
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn.firewall_driver,
- 'setup_basic_filtering',
- fake_none)
- self.stubs.Set(conn.firewall_driver,
- 'prepare_instance_filter',
- fake_none)
- self.stubs.Set(conn.firewall_driver,
- 'instance_filter_exists',
- fake_none)
- self.stubs.Set(greenthread,
- 'sleep',
- fake_sleep)
- conn.ensure_filtering_rules_for_instance(instance_ref,
- network_info)
- except exception.NovaException as e:
- msg = ('The firewall filter for %s does not exist' %
- instance_ref['name'])
- c1 = (0 <= six.text_type(e).find(msg))
- self.assertTrue(c1)
-
- self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
- "amount of time")
-
- def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
- instance_ref = objects.Instance(**self.test_instance)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- compute_info = {'disk_available_least': 400,
- 'cpu_info': 'asdf',
- }
- filename = "file"
-
- self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
- self.mox.StubOutWithMock(conn, '_compare_cpu')
-
- # _check_cpu_match
- conn._compare_cpu("asdf")
-
- # mounted_on_same_shared_storage
- conn._create_shared_storage_test_file().AndReturn(filename)
-
- self.mox.ReplayAll()
- return_value = conn.check_can_live_migrate_destination(self.context,
- instance_ref, compute_info, compute_info, True)
- self.assertThat({"filename": "file",
- 'image_type': 'default',
- 'disk_available_mb': 409600,
- "disk_over_commit": False,
- "block_migration": True},
- matchers.DictMatches(return_value))
-
- def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
- instance_ref = objects.Instance(**self.test_instance)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- compute_info = {'cpu_info': 'asdf'}
- filename = "file"
-
- self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
- self.mox.StubOutWithMock(conn, '_compare_cpu')
-
- # _check_cpu_match
- conn._compare_cpu("asdf")
-
- # mounted_on_same_shared_storage
- conn._create_shared_storage_test_file().AndReturn(filename)
-
- self.mox.ReplayAll()
- return_value = conn.check_can_live_migrate_destination(self.context,
- instance_ref, compute_info, compute_info, False)
- self.assertThat({"filename": "file",
- "image_type": 'default',
- "block_migration": False,
- "disk_over_commit": False,
- "disk_available_mb": None},
- matchers.DictMatches(return_value))
-
- def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
- instance_ref = objects.Instance(**self.test_instance)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- compute_info = {'cpu_info': 'asdf'}
-
- self.mox.StubOutWithMock(conn, '_compare_cpu')
-
- conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo(
- reason='foo')
- )
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidCPUInfo,
- conn.check_can_live_migrate_destination,
- self.context, instance_ref,
- compute_info, compute_info, False)
-
- def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
- objects.Instance(**self.test_instance)
- dest_check_data = {"filename": "file",
- "block_migration": True,
- "disk_over_commit": False,
- "disk_available_mb": 1024}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
- conn._cleanup_shared_storage_test_file("file")
-
- self.mox.ReplayAll()
- conn.check_can_live_migrate_destination_cleanup(self.context,
- dest_check_data)
-
- def _mock_can_live_migrate_source(self, block_migration=False,
- is_shared_block_storage=False,
- is_shared_instance_path=False,
- disk_available_mb=1024):
- instance = objects.Instance(**self.test_instance)
- dest_check_data = {'filename': 'file',
- 'image_type': 'default',
- 'block_migration': block_migration,
- 'disk_over_commit': False,
- 'disk_available_mb': disk_available_mb}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.mox.StubOutWithMock(conn, '_is_shared_block_storage')
- conn._is_shared_block_storage(instance, dest_check_data).AndReturn(
- is_shared_block_storage)
- self.mox.StubOutWithMock(conn, '_check_shared_storage_test_file')
- conn._check_shared_storage_test_file('file').AndReturn(
- is_shared_instance_path)
-
- return (instance, dest_check_data, conn)
-
- def test_check_can_live_migrate_source_block_migration(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- block_migration=True)
-
- self.mox.StubOutWithMock(conn, "_assert_dest_node_has_enough_disk")
- conn._assert_dest_node_has_enough_disk(
- self.context, instance, dest_check_data['disk_available_mb'],
- False, None)
-
- self.mox.ReplayAll()
- ret = conn.check_can_live_migrate_source(self.context, instance,
- dest_check_data)
- self.assertIsInstance(ret, dict)
- self.assertIn('is_shared_block_storage', ret)
- self.assertIn('is_shared_instance_path', ret)
-
- def test_check_can_live_migrate_source_shared_block_storage(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- is_shared_block_storage=True)
- self.mox.ReplayAll()
- conn.check_can_live_migrate_source(self.context, instance,
- dest_check_data)
-
- def test_check_can_live_migrate_source_shared_instance_path(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- is_shared_instance_path=True)
- self.mox.ReplayAll()
- conn.check_can_live_migrate_source(self.context, instance,
- dest_check_data)
-
- def test_check_can_live_migrate_source_non_shared_fails(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source()
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidSharedStorage,
- conn.check_can_live_migrate_source, self.context,
- instance, dest_check_data)
-
- def test_check_can_live_migrate_source_shared_block_migration_fails(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- block_migration=True,
- is_shared_block_storage=True)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidLocalStorage,
- conn.check_can_live_migrate_source,
- self.context, instance, dest_check_data)
-
- def test_check_can_live_migrate_shared_path_block_migration_fails(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- block_migration=True,
- is_shared_instance_path=True)
-
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidLocalStorage,
- conn.check_can_live_migrate_source,
- self.context, instance, dest_check_data)
-
- def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source()
- self.mox.ReplayAll()
- self.assertRaises(exception.InvalidSharedStorage,
- conn.check_can_live_migrate_source,
- self.context, instance, dest_check_data)
-
- def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
- instance, dest_check_data, conn = self._mock_can_live_migrate_source(
- block_migration=True,
- disk_available_mb=0)
-
- self.mox.StubOutWithMock(conn, "get_instance_disk_info")
- conn.get_instance_disk_info(instance["name"],
- block_device_info=None).AndReturn(
- '[{"virt_disk_size":2}]')
-
- self.mox.ReplayAll()
- self.assertRaises(exception.MigrationError,
- conn.check_can_live_migrate_source,
- self.context, instance, dest_check_data)
-
- def test_is_shared_block_storage_rbd(self):
- CONF.set_override('images_type', 'rbd', 'libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertTrue(conn._is_shared_block_storage(
- 'instance', {'image_type': 'rbd'}))
-
- def test_is_shared_block_storage_non_remote(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertFalse(conn._is_shared_block_storage(
- 'instance', {'is_shared_instance_path': False}))
-
- def test_is_shared_block_storage_rbd_only_source(self):
- CONF.set_override('images_type', 'rbd', 'libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertFalse(conn._is_shared_block_storage(
- 'instance', {'is_shared_instance_path': False}))
-
- def test_is_shared_block_storage_rbd_only_dest(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertFalse(conn._is_shared_block_storage(
- 'instance', {'image_type': 'rbd',
- 'is_shared_instance_path': False}))
-
- def test_is_shared_block_storage_volume_backed(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
- mock_get.return_value = '[]'
- self.assertTrue(conn._is_shared_block_storage(
- {'name': 'name'}, {'is_volume_backed': True,
- 'is_shared_instance_path': False}))
-
- def test_is_shared_block_storage_volume_backed_with_disk(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with mock.patch.object(conn, 'get_instance_disk_info') as mock_get:
- mock_get.return_value = '[{"virt_disk_size":2}]'
- self.assertFalse(conn._is_shared_block_storage(
- {'name': 'instance_name'},
- {'is_volume_backed': True, 'is_shared_instance_path': False}))
- mock_get.assert_called_once_with('instance_name')
-
- def test_is_shared_block_storage_nfs(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- mock_image_backend = mock.MagicMock()
- conn.image_backend = mock_image_backend
- mock_backend = mock.MagicMock()
- mock_image_backend.backend.return_value = mock_backend
- mock_backend.is_file_in_instance_path.return_value = True
- self.assertTrue(conn._is_shared_block_storage(
- 'instance', {'is_shared_instance_path': True}))
-
- @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
- def test_live_migration_changes_listen_addresses(self):
- self.compute = importutils.import_object(CONF.compute_manager)
- instance_dict = dict(self.test_instance)
- instance_dict.update({'host': 'fake',
- 'power_state': power_state.RUNNING,
- 'vm_state': vm_states.ACTIVE})
- instance_ref = objects.Instance(**instance_dict)
-
- xml_tmpl = ("<domain type='kvm'>"
- "<devices>"
- "<graphics type='vnc' listen='{vnc}'>"
- "<listen address='{vnc}'/>"
- "</graphics>"
- "<graphics type='spice' listen='{spice}'>"
- "<listen address='{spice}'/>"
- "</graphics>"
- "</devices>"
- "</domain>")
-
- initial_xml = xml_tmpl.format(vnc='1.2.3.4',
- spice='5.6.7.8')
-
- target_xml = xml_tmpl.format(vnc='10.0.0.1',
- spice='10.0.0.2')
- target_xml = etree.tostring(etree.fromstring(target_xml))
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI2")
- _bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
- initial_xml)
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
- None,
- target_xml,
- mox.IgnoreArg(),
- None,
- _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
- self.compute._rollback_live_migration(self.context, instance_ref,
- 'dest', False)
-
- # start test
- migrate_data = {'pre_live_migration_result':
- {'graphics_listen_addrs':
- {'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- self.context, instance_ref, 'dest', False,
- self.compute._rollback_live_migration,
- migrate_data=migrate_data)
-
- @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
- def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
- self.compute = importutils.import_object(CONF.compute_manager)
- instance_dict = dict(self.test_instance)
- instance_dict.update({'host': 'fake',
- 'power_state': power_state.RUNNING,
- 'vm_state': vm_states.ACTIVE})
- instance_ref = objects.Instance(**instance_dict)
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI")
- _bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
- mox.IgnoreArg(),
- None,
- _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
- self.compute._rollback_live_migration(self.context, instance_ref,
- 'dest', False)
-
- # start test
- migrate_data = {'pre_live_migration_result':
- {'graphics_listen_addrs':
- {'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- self.context, instance_ref, 'dest', False,
- self.compute._rollback_live_migration,
- migrate_data=migrate_data)
-
- def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
- self.compute = importutils.import_object(CONF.compute_manager)
- instance_dict = dict(self.test_instance)
- instance_dict.update({'host': 'fake',
- 'power_state': power_state.RUNNING,
- 'vm_state': vm_states.ACTIVE})
- instance_ref = objects.Instance(**instance_dict)
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI")
- _bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
- mox.IgnoreArg(),
- None,
- _bandwidth).AndRaise(libvirt.libvirtError("ERR"))
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
- self.compute._rollback_live_migration(self.context, instance_ref,
- 'dest', False)
-
- # start test
- migrate_data = {}
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- self.context, instance_ref, 'dest', False,
- self.compute._rollback_live_migration,
- migrate_data=migrate_data)
-
- @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True)
- def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
- self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
- self.compute = importutils.import_object(CONF.compute_manager)
- instance_dict = dict(self.test_instance)
- instance_dict.update({'host': 'fake',
- 'power_state': power_state.RUNNING,
- 'vm_state': vm_states.ACTIVE})
- instance_ref = objects.Instance(**instance_dict)
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI")
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
- self.compute._rollback_live_migration(self.context, instance_ref,
- 'dest', False)
-
- # start test
- migrate_data = {'pre_live_migration_result':
- {'graphics_listen_addrs':
- {'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.MigrationError,
- conn._live_migration,
- self.context, instance_ref, 'dest', False,
- self.compute._rollback_live_migration,
- migrate_data=migrate_data)
-
- def test_live_migration_raises_exception(self):
- # Confirms recover method is called when exceptions are raised.
- # Preparing data
- self.compute = importutils.import_object(CONF.compute_manager)
- instance_dict = dict(self.test_instance)
- instance_dict.update({'host': 'fake',
- 'power_state': power_state.RUNNING,
- 'vm_state': vm_states.ACTIVE})
- instance_ref = objects.Instance(**instance_dict)
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "migrateToURI2")
- _bandwidth = CONF.libvirt.live_migration_bandwidth
- if getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
- mox.IgnoreArg(),
- None,
- _bandwidth).AndRaise(
- libvirt.libvirtError('ERR'))
- else:
- vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
- FakeVirtDomain().XMLDesc(0))
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
- None,
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- None,
- _bandwidth).AndRaise(
- libvirt.libvirtError('ERR'))
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
- self.mox.StubOutWithMock(self.compute, "_rollback_live_migration")
- self.compute._rollback_live_migration(self.context, instance_ref,
- 'dest', False)
-
- # start test
- migrate_data = {'pre_live_migration_result':
- {'graphics_listen_addrs':
- {'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(libvirt.libvirtError,
- conn._live_migration,
- self.context, instance_ref, 'dest', False,
- self.compute._rollback_live_migration,
- migrate_data=migrate_data)
-
- self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
- self.assertEqual(power_state.RUNNING, instance_ref.power_state)
-
- @mock.patch.object(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE', 8675, create=True)
- def test_live_migration_raises_unsupported_config_exception(self):
- # Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
- # migrateToURI is used instead.
-
- # Preparing data
- instance_ref = fake_instance.fake_instance_obj(
- self.context, **self.test_instance)
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
- self.mox.StubOutWithMock(vdmock, 'migrateToURI')
- _bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.XMLDesc(libvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
- FakeVirtDomain().XMLDesc(0))
- unsupported_config_error = libvirt.libvirtError('ERR')
- unsupported_config_error.err = (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
- # This is the first error we hit but since the error code is
- # VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
- mox.IgnoreArg(), mox.IgnoreArg(), None,
- _bandwidth).AndRaise(unsupported_config_error)
- # This is the second and final error that will actually kill the run,
- # we use TestingException to make sure it's not the same libvirtError
- # above.
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
- mox.IgnoreArg(), None,
- _bandwidth).AndRaise(test.TestingException('oops'))
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref.name:
- return vdmock
-
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- def fake_recover_method(context, instance, dest, block_migration):
- pass
-
- graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
- migrate_data = {'pre_live_migration_result':
- {'graphics_listen_addrs': graphics_listen_addrs}}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.mox.StubOutWithMock(
- conn, '_check_graphics_addresses_can_live_migrate')
- conn._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
- self.mox.ReplayAll()
-
- # start test
- self.assertRaises(test.TestingException, conn._live_migration,
- self.context, instance_ref, 'dest', post_method=None,
- recover_method=fake_recover_method,
- migrate_data=migrate_data)
-
- def test_rollback_live_migration_at_destination(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with mock.patch.object(conn, "destroy") as mock_destroy:
- conn.rollback_live_migration_at_destination("context",
- "instance", [], None, True, None)
- mock_destroy.assert_called_once_with("context",
- "instance", [], None, True, None)
-
- def _do_test_create_images_and_backing(self, disk_type):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
- self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
-
- disk_info = {'path': 'foo', 'type': disk_type,
- 'disk_size': 1 * 1024 ** 3,
- 'virt_disk_size': 20 * 1024 ** 3,
- 'backing_file': None}
- disk_info_json = jsonutils.dumps([disk_info])
-
- libvirt_driver.libvirt_utils.create_image(
- disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
- conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
- self.mox.ReplayAll()
-
- self.stubs.Set(os.path, 'exists', lambda *args: False)
- conn._create_images_and_backing(self.context, self.test_instance,
- "/fake/instance/dir", disk_info_json)
-
- def test_create_images_and_backing_qcow2(self):
- self._do_test_create_images_and_backing('qcow2')
-
- def test_create_images_and_backing_raw(self):
- self._do_test_create_images_and_backing('raw')
-
- def test_create_images_and_backing_ephemeral_gets_created(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- disk_info_json = jsonutils.dumps(
- [{u'backing_file': u'fake_image_backing_file',
- u'disk_size': 10747904,
- u'path': u'disk_path',
- u'type': u'qcow2',
- u'virt_disk_size': 25165824},
- {u'backing_file': u'ephemeral_1_default',
- u'disk_size': 393216,
- u'over_committed_disk_size': 1073348608,
- u'path': u'disk_eph_path',
- u'type': u'qcow2',
- u'virt_disk_size': 1073741824}])
-
- base_dir = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name)
- self.test_instance.update({'name': 'fake_instance',
- 'user_id': 'fake-user',
- 'os_type': None,
- 'project_id': 'fake-project'})
-
- with contextlib.nested(
- mock.patch.object(conn, '_fetch_instance_kernel_ramdisk'),
- mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
- mock.patch.object(conn, '_create_ephemeral')
- ) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
- create_ephemeral_mock):
- conn._create_images_and_backing(self.context, self.test_instance,
- "/fake/instance/dir",
- disk_info_json)
- self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
- m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
- self.assertEqual(
- os.path.join(base_dir, 'ephemeral_1_default'),
- m_kwargs['target'])
- self.assertEqual(len(fetch_image_mock.call_args_list), 1)
- m_args, m_kwargs = fetch_image_mock.call_args_list[0]
- self.assertEqual(
- os.path.join(base_dir, 'fake_image_backing_file'),
- m_kwargs['target'])
-
- def test_create_images_and_backing_disk_info_none(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(conn, '_fetch_instance_kernel_ramdisk')
-
- conn._fetch_instance_kernel_ramdisk(self.context, self.test_instance)
- self.mox.ReplayAll()
-
- conn._create_images_and_backing(self.context, self.test_instance,
- "/fake/instance/dir", None)
-
- def test_pre_live_migration_works_correctly_mocked(self):
- # Creating testdata
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
- {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- class FakeNetworkInfo():
- def fixed_ips(self):
- return ["test_ip_addr"]
-
- def fake_none(*args, **kwargs):
- return
-
- self.stubs.Set(conn, '_create_images_and_backing', fake_none)
-
- inst_ref = {'id': 'foo'}
- c = context.get_admin_context()
- nw_info = FakeNetworkInfo()
-
- # Creating mocks
- self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
- driver.block_device_info_get_mapping(vol
- ).AndReturn(vol['block_device_mapping'])
- self.mox.StubOutWithMock(conn, "_connect_volume")
- for v in vol['block_device_mapping']:
- disk_info = {
- 'bus': "scsi",
- 'dev': v['mount_device'].rpartition("/")[2],
- 'type': "disk"
- }
- conn._connect_volume(v['connection_info'],
- disk_info)
- self.mox.StubOutWithMock(conn, 'plug_vifs')
- conn.plug_vifs(mox.IsA(inst_ref), nw_info)
-
- self.mox.ReplayAll()
- result = conn.pre_live_migration(c, inst_ref, vol, nw_info, None)
-
- target_res = {'graphics_listen_addrs': {'spice': '127.0.0.1',
- 'vnc': '127.0.0.1'}}
- self.assertEqual(result, target_res)
-
- def test_pre_live_migration_block_with_config_drive_mocked(self):
- # Creating testdata
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
- {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- def fake_true(*args, **kwargs):
- return True
-
- self.stubs.Set(configdrive, 'required_by', fake_true)
-
- inst_ref = {'id': 'foo'}
- c = context.get_admin_context()
-
- self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
- conn.pre_live_migration, c, inst_ref, vol, None,
- None, {'is_shared_instance_path': False,
- 'is_shared_block_storage': False})
-
- def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
- # Creating testdata, using temp dir.
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
- {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- def fake_none(*args, **kwargs):
- return
-
- self.stubs.Set(conn, '_create_images_and_backing', fake_none)
-
- class FakeNetworkInfo():
- def fixed_ips(self):
- return ["test_ip_addr"]
- inst_ref = objects.Instance(**self.test_instance)
- c = context.get_admin_context()
- nw_info = FakeNetworkInfo()
- # Creating mocks
- self.mox.StubOutWithMock(conn, "_connect_volume")
- for v in vol['block_device_mapping']:
- disk_info = {
- 'bus': "scsi",
- 'dev': v['mount_device'].rpartition("/")[2],
- 'type': "disk"
- }
- conn._connect_volume(v['connection_info'],
- disk_info)
- self.mox.StubOutWithMock(conn, 'plug_vifs')
- conn.plug_vifs(mox.IsA(inst_ref), nw_info)
- self.mox.ReplayAll()
- migrate_data = {'is_shared_instance_path': False,
- 'is_volume_backed': True,
- 'block_migration': False,
- 'instance_relative_path': inst_ref['name']
- }
- ret = conn.pre_live_migration(c, inst_ref, vol, nw_info, None,
- migrate_data)
- target_ret = {'graphics_listen_addrs': {'spice': '127.0.0.1',
- 'vnc': '127.0.0.1'}}
- self.assertEqual(ret, target_ret)
- self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
- inst_ref['name'])))
-
- def test_pre_live_migration_plug_vifs_retry_fails(self):
- self.flags(live_migration_retry_count=3)
- instance = {'name': 'test', 'uuid': 'uuid'}
-
- def fake_plug_vifs(instance, network_info):
- raise processutils.ProcessExecutionError()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
- self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
- self.assertRaises(processutils.ProcessExecutionError,
- conn.pre_live_migration,
- self.context, instance, block_device_info=None,
- network_info=[], disk_info={})
-
- def test_pre_live_migration_plug_vifs_retry_works(self):
- self.flags(live_migration_retry_count=3)
- called = {'count': 0}
- instance = {'name': 'test', 'uuid': 'uuid'}
-
- def fake_plug_vifs(instance, network_info):
- called['count'] += 1
- if called['count'] < CONF.live_migration_retry_count:
- raise processutils.ProcessExecutionError()
- else:
- return
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, 'plug_vifs', fake_plug_vifs)
- self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: None)
- conn.pre_live_migration(self.context, instance, block_device_info=None,
- network_info=[], disk_info={})
-
- def test_pre_live_migration_image_not_created_with_shared_storage(self):
- migrate_data_set = [{'is_shared_block_storage': False,
- 'block_migration': False},
- {'is_shared_block_storage': True,
- 'block_migration': False},
- {'is_shared_block_storage': False,
- 'block_migration': True}]
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(**self.test_instance)
- # creating mocks
- with contextlib.nested(
- mock.patch.object(conn,
- '_create_images_and_backing'),
- mock.patch.object(conn,
- 'ensure_filtering_rules_for_instance'),
- mock.patch.object(conn, 'plug_vifs'),
- ) as (
- create_image_mock,
- rules_mock,
- plug_mock,
- ):
- for migrate_data in migrate_data_set:
- res = conn.pre_live_migration(self.context, instance,
- block_device_info=None,
- network_info=[], disk_info={},
- migrate_data=migrate_data)
- self.assertFalse(create_image_mock.called)
- self.assertIsInstance(res, dict)
-
- def test_pre_live_migration_with_not_shared_instance_path(self):
- migrate_data = {'is_shared_block_storage': False,
- 'is_shared_instance_path': False}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(**self.test_instance)
-
- def check_instance_dir(context, instance,
- instance_dir, disk_info):
- self.assertTrue(instance_dir)
- # creating mocks
- with contextlib.nested(
- mock.patch.object(conn,
- '_create_images_and_backing',
- side_effect=check_instance_dir),
- mock.patch.object(conn,
- 'ensure_filtering_rules_for_instance'),
- mock.patch.object(conn, 'plug_vifs'),
- ) as (
- create_image_mock,
- rules_mock,
- plug_mock,
- ):
- res = conn.pre_live_migration(self.context, instance,
- block_device_info=None,
- network_info=[], disk_info={},
- migrate_data=migrate_data)
- self.assertTrue(create_image_mock.called)
- self.assertIsInstance(res, dict)
-
- def test_get_instance_disk_info_works_correctly(self):
- # Test data
- instance_ref = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "</devices></domain>")
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
- fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
- fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
-
- self.mox.StubOutWithMock(os.path, "getsize")
- os.path.getsize('/test/disk').AndReturn((10737418240))
- os.path.getsize('/test/disk.local').AndReturn((3328599655))
-
- ret = ("image: /test/disk\n"
- "file format: raw\n"
- "virtual size: 20G (21474836480 bytes)\n"
- "disk size: 3.1G\n"
- "cluster_size: 2097152\n"
- "backing file: /test/dummy (actual path: /backing/file)\n")
-
- self.mox.StubOutWithMock(os.path, "exists")
- os.path.exists('/test/disk.local').AndReturn(True)
-
- self.mox.StubOutWithMock(utils, "execute")
- utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
- '/test/disk.local').AndReturn((ret, ''))
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- info = conn.get_instance_disk_info(instance_ref['name'])
- info = jsonutils.loads(info)
- self.assertEqual(info[0]['type'], 'raw')
- self.assertEqual(info[0]['path'], '/test/disk')
- self.assertEqual(info[0]['disk_size'], 10737418240)
- self.assertEqual(info[0]['backing_file'], "")
- self.assertEqual(info[0]['over_committed_disk_size'], 0)
- self.assertEqual(info[1]['type'], 'qcow2')
- self.assertEqual(info[1]['path'], '/test/disk.local')
- self.assertEqual(info[1]['virt_disk_size'], 21474836480)
- self.assertEqual(info[1]['backing_file'], "file")
- self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
-
- def test_post_live_migration(self):
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
- {'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- inst_ref = {'id': 'foo'}
- cntx = context.get_admin_context()
-
- # Set up the mock expectations
- with contextlib.nested(
- mock.patch.object(driver, 'block_device_info_get_mapping',
- return_value=vol['block_device_mapping']),
- mock.patch.object(conn, '_disconnect_volume')
- ) as (block_device_info_get_mapping, _disconnect_volume):
- conn.post_live_migration(cntx, inst_ref, vol)
-
- block_device_info_get_mapping.assert_has_calls([
- mock.call(vol)])
- _disconnect_volume.assert_has_calls([
- mock.call(v['connection_info'],
- v['mount_device'].rpartition("/")[2])
- for v in vol['block_device_mapping']])
-
- def test_get_instance_disk_info_excludes_volumes(self):
- # Test data
- instance_ref = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/fake/path/to/volume1'/>"
- "<target dev='vdc' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/fake/path/to/volume2'/>"
- "<target dev='vdd' bus='virtio'/></disk>"
- "</devices></domain>")
-
- # Preparing mocks
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance_ref['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
- fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
- fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
-
- self.mox.StubOutWithMock(os.path, "getsize")
- os.path.getsize('/test/disk').AndReturn((10737418240))
- os.path.getsize('/test/disk.local').AndReturn((3328599655))
-
- ret = ("image: /test/disk\n"
- "file format: raw\n"
- "virtual size: 20G (21474836480 bytes)\n"
- "disk size: 3.1G\n"
- "cluster_size: 2097152\n"
- "backing file: /test/dummy (actual path: /backing/file)\n")
-
- self.mox.StubOutWithMock(os.path, "exists")
- os.path.exists('/test/disk.local').AndReturn(True)
-
- self.mox.StubOutWithMock(utils, "execute")
- utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
- '/test/disk.local').AndReturn((ret, ''))
-
- self.mox.ReplayAll()
- conn_info = {'driver_volume_type': 'fake'}
- info = {'block_device_mapping': [
- {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
- {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- info = conn.get_instance_disk_info(instance_ref['name'],
- block_device_info=info)
- info = jsonutils.loads(info)
- self.assertEqual(info[0]['type'], 'raw')
- self.assertEqual(info[0]['path'], '/test/disk')
- self.assertEqual(info[0]['disk_size'], 10737418240)
- self.assertEqual(info[0]['backing_file'], "")
- self.assertEqual(info[0]['over_committed_disk_size'], 0)
- self.assertEqual(info[1]['type'], 'qcow2')
- self.assertEqual(info[1]['path'], '/test/disk.local')
- self.assertEqual(info[1]['virt_disk_size'], 21474836480)
- self.assertEqual(info[1]['backing_file'], "file")
- self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def test_spawn_with_network_info(self, mock_flavor):
- # Preparing mocks
- def fake_none(*args, **kwargs):
- return
-
- def fake_getLibVersion():
- return 9011
-
- def fake_getCapabilities():
- return """
- <capabilities>
- <host>
- <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
- <cpu>
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <topology sockets='1' cores='2' threads='1'/>
- <feature name='xtpr'/>
- </cpu>
- </host>
- </capabilities>
- """
-
- def fake_baselineCPU(cpu, flag):
- return """<cpu mode='custom' match='exact'>
- <model fallback='allow'>Penryn</model>
- <vendor>Intel</vendor>
- <feature policy='require' name='xtpr'/>
- </cpu>
- """
-
- # _fake_network_info must be called before create_fake_libvirt_mock(),
- # as _fake_network_info calls importutils.import_class() and
- # create_fake_libvirt_mock() mocks importutils.import_class().
- network_info = _fake_network_info(self.stubs, 1)
- self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
- getCapabilities=fake_getCapabilities,
- getVersion=lambda: 1005001,
- baselineCPU=fake_baselineCPU)
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
- instance = objects.Instance(**instance_ref)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
-
- mock_flavor.return_value = flavor
-
- # Mock out the get_info method of the LibvirtDriver so that the polling
- # in the spawn method of the LibvirtDriver returns immediately
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
- libvirt_driver.LibvirtDriver.get_info(instance
- ).AndReturn({'state': power_state.RUNNING})
-
- # Start test
- self.mox.ReplayAll()
-
- with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
- del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn.firewall_driver,
- 'setup_basic_filtering',
- fake_none)
- self.stubs.Set(conn.firewall_driver,
- 'prepare_instance_filter',
- fake_none)
- self.stubs.Set(imagebackend.Image,
- 'cache',
- fake_none)
-
- conn.spawn(self.context, instance, None, [], 'herp',
- network_info=network_info)
-
- path = os.path.join(CONF.instances_path, instance['name'])
- if os.path.isdir(path):
- shutil.rmtree(path)
-
- path = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name)
- if os.path.isdir(path):
- shutil.rmtree(os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name))
-
- def test_spawn_without_image_meta(self):
- self.create_image_called = False
-
- def fake_none(*args, **kwargs):
- return
-
- def fake_create_image(*args, **kwargs):
- self.create_image_called = True
-
- def fake_get_info(instance):
- return {'state': power_state.RUNNING}
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
- instance = objects.Instance(**instance_ref)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_get_guest_xml', fake_none)
- self.stubs.Set(conn, '_create_image', fake_create_image)
- self.stubs.Set(conn, '_create_domain_and_network', fake_none)
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- conn.spawn(self.context, instance, None, [], None)
- self.assertTrue(self.create_image_called)
-
- conn.spawn(self.context,
- instance,
- {'id': instance['image_ref']},
- [],
- None)
- self.assertTrue(self.create_image_called)
-
- def test_spawn_from_volume_calls_cache(self):
- self.cache_called_for_disk = False
-
- def fake_none(*args, **kwargs):
- return
-
- def fake_cache(*args, **kwargs):
- if kwargs.get('image_id') == 'my_fake_image':
- self.cache_called_for_disk = True
-
- def fake_get_info(instance):
- return {'state': power_state.RUNNING}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_get_guest_xml', fake_none)
-
- self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
- self.stubs.Set(conn, '_create_domain_and_network', fake_none)
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- block_device_info = {'root_device_name': '/dev/vda',
- 'block_device_mapping': [
- {'mount_device': 'vda',
- 'boot_index': 0}
- ]
- }
-
- # Volume-backed instance created without image
- instance_ref = self.test_instance
- instance_ref['image_ref'] = ''
- instance_ref['root_device_name'] = '/dev/vda'
- instance_ref['uuid'] = uuidutils.generate_uuid()
- instance = objects.Instance(**instance_ref)
-
- conn.spawn(self.context, instance, None, [], None,
- block_device_info=block_device_info)
- self.assertFalse(self.cache_called_for_disk)
-
- # Booted from volume but with placeholder image
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 'my_fake_image'
- instance_ref['root_device_name'] = '/dev/vda'
- instance_ref['uuid'] = uuidutils.generate_uuid()
- instance = objects.Instance(**instance_ref)
-
- conn.spawn(self.context, instance, None, [], None,
- block_device_info=block_device_info)
- self.assertFalse(self.cache_called_for_disk)
-
- # Booted from an image
- instance_ref['image_ref'] = 'my_fake_image'
- instance_ref['uuid'] = uuidutils.generate_uuid()
- instance = objects.Instance(**instance_ref)
- conn.spawn(self.context, instance, None, [], None)
- self.assertTrue(self.cache_called_for_disk)
-
- def test_start_lxc_from_volume(self):
- self.flags(virt_type="lxc",
- group='libvirt')
-
- def check_setup_container(path, container_dir=None, use_cow=False):
- self.assertEqual(path, '/dev/path/to/dev')
- self.assertTrue(use_cow)
- return '/dev/nbd1'
-
- bdm = {
- 'guest_format': None,
- 'boot_index': 0,
- 'mount_device': '/dev/sda',
- 'connection_info': {
- 'driver_volume_type': 'iscsi',
- 'serial': 'afc1',
- 'data': {
- 'access_mode': 'rw',
- 'device_path': '/dev/path/to/dev',
- 'target_discovered': False,
- 'encrypted': False,
- 'qos_specs': None,
- 'target_iqn': 'iqn: volume-afc1',
- 'target_portal': 'ip: 3260',
- 'volume_id': 'afc1',
- 'target_lun': 1,
- 'auth_password': 'uj',
- 'auth_username': '47',
- 'auth_method': 'CHAP'
- }
- },
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'delete_on_termination': False
- }
-
- def _get(key, opt=None):
- return bdm.get(key, opt)
-
- def getitem(key):
- return bdm[key]
-
- def setitem(key, val):
- bdm[key] = val
-
- bdm_mock = mock.MagicMock()
- bdm_mock.__getitem__.side_effect = getitem
- bdm_mock.__setitem__.side_effect = setitem
- bdm_mock.get = _get
-
- disk_mock = mock.MagicMock()
- disk_mock.source_path = '/dev/path/to/dev'
-
- block_device_info = {'block_device_mapping': [bdm_mock],
- 'root_device_name': '/dev/sda'}
-
- # Volume-backed instance created without image
- instance_ref = self.test_instance
- instance_ref['image_ref'] = ''
- instance_ref['root_device_name'] = '/dev/sda'
- instance_ref['ephemeral_gb'] = 0
- instance_ref['uuid'] = uuidutils.generate_uuid()
- instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
- inst_obj = objects.Instance(**instance_ref)
-
- flavor = inst_obj.get_flavor()
- flavor.extra_specs = {}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
- mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
- mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn, '_connect_volume'),
- mock.patch.object(conn, '_get_volume_config',
- return_value=disk_mock),
- mock.patch.object(conn, 'get_info',
- return_value={'state': power_state.RUNNING}),
- mock.patch('nova.virt.disk.api.setup_container',
- side_effect=check_setup_container),
- mock.patch('nova.virt.disk.api.teardown_container'),
- mock.patch.object(objects.Instance, 'save'),
- mock.patch.object(objects.Flavor, 'get_by_id',
- return_value=flavor)):
-
- conn.spawn(self.context, inst_obj, None, [], None,
- network_info=[],
- block_device_info=block_device_info)
- self.assertEqual('/dev/nbd1',
- inst_obj.system_metadata.get(
- 'rootfs_device_name'))
-
- def test_spawn_with_pci_devices(self):
- def fake_none(*args, **kwargs):
- return None
-
- def fake_get_info(instance):
- return {'state': power_state.RUNNING}
-
- class FakeLibvirtPciDevice():
- def dettach(self):
- return None
-
- def reset(self):
- return None
-
- def fake_node_device_lookup_by_name(address):
- pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
- % dict(hex='[\da-f]', oct='[0-8]'))
- pattern = re.compile(pattern)
- if pattern.match(address) is None:
- raise libvirt.libvirtError()
- return FakeLibvirtPciDevice()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_get_guest_xml', fake_none)
- self.stubs.Set(conn, '_create_image', fake_none)
- self.stubs.Set(conn, '_create_domain_and_network', fake_none)
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- conn._conn.nodeDeviceLookupByName = \
- fake_node_device_lookup_by_name
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 'my_fake_image'
- instance = objects.Instance(**instance_ref)
- instance = dict(instance.iteritems())
- instance['pci_devices'] = [{'address': '0000:00:00.0'}]
-
- conn.spawn(self.context, instance, None, [], None)
-
- def test_chown_disk_config_for_instance(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = copy.deepcopy(self.test_instance)
- instance['name'] = 'test_name'
- self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
- fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
- os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
- fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
-
- self.mox.ReplayAll()
- conn._chown_disk_config_for_instance(instance)
-
- def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
- gotFiles = []
-
- def fake_image(self, instance, name, image_type=''):
- class FakeImage(imagebackend.Image):
- def __init__(self, instance, name, is_block_dev=False):
- self.path = os.path.join(instance['name'], name)
- self.is_block_dev = is_block_dev
-
- def create_image(self, prepare_template, base,
- size, *args, **kwargs):
- pass
-
- def cache(self, fetch_func, filename, size=None,
- *args, **kwargs):
- gotFiles.append({'filename': filename,
- 'size': size})
-
- def snapshot(self, name):
- pass
-
- return FakeImage(instance, name)
-
- def fake_none(*args, **kwargs):
- return
-
- def fake_get_info(instance):
- return {'state': power_state.RUNNING}
-
- # Stop 'libvirt_driver._create_image' touching filesystem
- self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
- fake_image)
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
- instance = objects.Instance(**instance_ref)
- instance['os_type'] = os_type
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_get_guest_xml', fake_none)
- self.stubs.Set(conn, '_create_domain_and_network', fake_none)
- self.stubs.Set(conn, 'get_info', fake_get_info)
- if mkfs:
- self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
- {os_type: 'mkfs.ext3 --label %(fs_label)s %(target)s'})
-
- image_meta = {'id': instance['image_ref']}
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- None,
- image_meta)
- conn._create_image(context, instance, disk_info['mapping'])
- conn._get_guest_xml(self.context, instance, None,
- disk_info, image_meta)
-
- wantFiles = [
- {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
- 'size': 10 * units.Gi},
- {'filename': filename,
- 'size': 20 * units.Gi},
- ]
- self.assertEqual(gotFiles, wantFiles)
-
- def test_create_image_plain_os_type_blank(self):
- self._test_create_image_plain(os_type='',
- filename='ephemeral_20_default',
- mkfs=False)
-
- def test_create_image_plain_os_type_none(self):
- self._test_create_image_plain(os_type=None,
- filename='ephemeral_20_default',
- mkfs=False)
-
- def test_create_image_plain_os_type_set_no_fs(self):
- self._test_create_image_plain(os_type='test',
- filename='ephemeral_20_default',
- mkfs=False)
-
- def test_create_image_plain_os_type_set_with_fs(self):
- self._test_create_image_plain(os_type='test',
- filename='ephemeral_20_test',
- mkfs=True)
-
- def test_create_image_with_swap(self):
- gotFiles = []
-
- def fake_image(self, instance, name, image_type=''):
- class FakeImage(imagebackend.Image):
- def __init__(self, instance, name, is_block_dev=False):
- self.path = os.path.join(instance['name'], name)
- self.is_block_dev = is_block_dev
-
- def create_image(self, prepare_template, base,
- size, *args, **kwargs):
- pass
-
- def cache(self, fetch_func, filename, size=None,
- *args, **kwargs):
- gotFiles.append({'filename': filename,
- 'size': size})
-
- def snapshot(self, name):
- pass
-
- return FakeImage(instance, name)
-
- def fake_none(*args, **kwargs):
- return
-
- def fake_get_info(instance):
- return {'state': power_state.RUNNING}
-
- # Stop 'libvirt_driver._create_image' touching filesystem
- self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
- fake_image)
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
- # Turn on some swap to exercise that codepath in _create_image
- instance_ref['system_metadata']['instance_type_swap'] = 500
- instance = objects.Instance(**instance_ref)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_get_guest_xml', fake_none)
- self.stubs.Set(conn, '_create_domain_and_network', fake_none)
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- image_meta = {'id': instance['image_ref']}
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- None,
- image_meta)
- conn._create_image(context, instance, disk_info['mapping'])
- conn._get_guest_xml(self.context, instance, None,
- disk_info, image_meta)
-
- wantFiles = [
- {'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
- 'size': 10 * units.Gi},
- {'filename': 'ephemeral_20_default',
- 'size': 20 * units.Gi},
- {'filename': 'swap_500',
- 'size': 500 * units.Mi},
- ]
- self.assertEqual(gotFiles, wantFiles)
-
- @mock.patch.object(utils, 'execute')
- def test_create_ephemeral_specified_fs(self, mock_exec):
- self.flags(default_ephemeral_format='ext3')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
- is_block_dev=True, max_size=20,
- specified_fs='ext4')
- mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
- 'myVol', '/dev/something',
- run_as_root=True)
-
- def test_create_ephemeral_specified_fs_not_valid(self):
- CONF.set_override('default_ephemeral_format', 'ext4')
- ephemerals = [{'device_type': 'disk',
- 'disk_bus': 'virtio',
- 'device_name': '/dev/vdb',
- 'guest_format': 'dummy',
- 'size': 1}]
- block_device_info = {
- 'ephemerals': ephemerals}
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 1
- instance = objects.Instance(**instance_ref)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- image_meta = {'id': instance['image_ref']}
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- None,
- image_meta)
- disk_info['mapping'].pop('disk.local')
-
- with contextlib.nested(
- mock.patch.object(utils, 'execute'),
- mock.patch.object(conn, 'get_info'),
- mock.patch.object(conn, '_create_domain_and_network')):
- self.assertRaises(exception.InvalidBDMFormat, conn._create_image,
- context, instance, disk_info['mapping'],
- block_device_info=block_device_info)
-
- def test_create_ephemeral_default(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs', '-t', 'ext3', '-F', '-L', 'myVol',
- '/dev/something', run_as_root=True)
- self.mox.ReplayAll()
- conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
- is_block_dev=True, max_size=20)
-
- def test_create_ephemeral_with_conf(self):
- CONF.set_override('default_ephemeral_format', 'ext4')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
- '/dev/something', run_as_root=True)
- self.mox.ReplayAll()
- conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
- is_block_dev=True)
-
- def test_create_ephemeral_with_arbitrary(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
- {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
- run_as_root=True)
- self.mox.ReplayAll()
- conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
- is_block_dev=True)
-
- def test_create_swap_default(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('mkswap', '/dev/something', run_as_root=False)
- self.mox.ReplayAll()
-
- conn._create_swap('/dev/something', 1, max_size=20)
-
- def test_get_console_output_file(self):
- fake_libvirt_utils.files['console.log'] = '01234567890'
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 123456
- instance = objects.Instance(**instance_ref)
-
- console_dir = (os.path.join(tmpdir, instance['name']))
- console_log = '%s/console.log' % (console_dir)
- fake_dom_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- <console type='file'>
- <source path='%s'/>
- <target port='0'/>
- </console>
- </devices>
- </domain>
- """ % console_log
-
- def fake_lookup(id):
- return FakeVirtDomain(fake_dom_xml)
-
- self.create_fake_libvirt_mock()
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- try:
- prev_max = libvirt_driver.MAX_CONSOLE_BYTES
- libvirt_driver.MAX_CONSOLE_BYTES = 5
- output = conn.get_console_output(self.context, instance)
- finally:
- libvirt_driver.MAX_CONSOLE_BYTES = prev_max
-
- self.assertEqual('67890', output)
-
- def test_get_console_output_pty(self):
- fake_libvirt_utils.files['pty'] = '01234567890'
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
-
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 123456
- instance = objects.Instance(**instance_ref)
-
- console_dir = (os.path.join(tmpdir, instance['name']))
- pty_file = '%s/fake_pty' % (console_dir)
- fake_dom_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- <console type='pty'>
- <source path='%s'/>
- <target port='0'/>
- </console>
- </devices>
- </domain>
- """ % pty_file
-
- def fake_lookup(id):
- return FakeVirtDomain(fake_dom_xml)
-
- def _fake_flush(self, fake_pty):
- return 'foo'
-
- def _fake_append_to_file(self, data, fpath):
- return 'pty'
-
- self.create_fake_libvirt_mock()
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
- libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
- libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- try:
- prev_max = libvirt_driver.MAX_CONSOLE_BYTES
- libvirt_driver.MAX_CONSOLE_BYTES = 5
- output = conn.get_console_output(self.context, instance)
- finally:
- libvirt_driver.MAX_CONSOLE_BYTES = prev_max
-
- self.assertEqual('67890', output)
-
- def test_get_host_ip_addr(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- ip = conn.get_host_ip_addr()
- self.assertEqual(ip, CONF.my_ip)
-
- def test_broken_connection(self):
- for (error, domain) in (
- (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE),
- (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC),
- (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)):
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.mox.StubOutWithMock(conn, "_wrapped_conn")
- self.mox.StubOutWithMock(conn._wrapped_conn, "getLibVersion")
- self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
- self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_domain")
-
- conn._wrapped_conn.getLibVersion().AndRaise(
- libvirt.libvirtError("fake failure"))
-
- libvirt.libvirtError.get_error_code().AndReturn(error)
- libvirt.libvirtError.get_error_domain().AndReturn(domain)
-
- self.mox.ReplayAll()
-
- self.assertFalse(conn._test_connection(conn._wrapped_conn))
-
- self.mox.UnsetStubs()
-
- def test_command_with_broken_connection(self):
- self.mox.UnsetStubs()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(libvirt, 'openAuth',
- side_effect=libvirt.libvirtError("fake")),
- mock.patch.object(libvirt.libvirtError, "get_error_code"),
- mock.patch.object(libvirt.libvirtError, "get_error_domain"),
- mock.patch.object(conn, '_set_host_enabled')):
- self.assertRaises(exception.HypervisorUnavailable,
- conn.get_num_instances)
-
- def test_broken_connection_disable_service(self):
- self.mox.UnsetStubs()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn._init_events_pipe()
- with contextlib.nested(
- mock.patch.object(conn, '_set_host_enabled')):
- conn._close_callback(conn._wrapped_conn, 'ERROR!', '')
- conn._dispatch_events()
- conn._set_host_enabled.assert_called_once_with(
- False,
- disable_reason=u'Connection to libvirt lost: ERROR!')
-
- def test_service_resume_after_broken_connection(self):
- self.mox.UnsetStubs()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- service_mock = mock.MagicMock()
- service_mock.disabled.return_value = True
- with contextlib.nested(
- mock.patch.object(libvirt, 'openAuth',
- return_value=mock.MagicMock()),
- mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock)):
-
- conn.get_num_instances()
- self.assertTrue(not service_mock.disabled and
- service_mock.disabled_reason is 'None')
-
- def test_broken_connection_no_wrapped_conn(self):
- # Tests that calling _close_callback when _wrapped_conn is None
- # is a no-op, i.e. set_host_enabled won't be called.
- self.mox.UnsetStubs()
- # conn._wrapped_conn will be None since we never call libvirt.openAuth
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- # create our mock connection that libvirt will send to the callback
- mock_failed_conn = mock.MagicMock()
- mock_failed_conn.__getitem__.return_value = True
- # nothing should happen when calling _close_callback since
- # _wrapped_conn is None in the driver
- conn._init_events_pipe()
- conn._close_callback(mock_failed_conn, reason=None, opaque=None)
- conn._dispatch_events()
-
- def test_immediate_delete(self):
- def fake_lookup_by_name(instance_name):
- raise exception.InstanceNotFound(instance_id=instance_name)
-
- def fake_delete_instance_files(instance):
- pass
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, '_delete_instance_files',
- fake_delete_instance_files)
-
- instance = objects.Instance(**self.test_instance)
- conn.destroy(self.context, instance, {})
-
- def _test_destroy_removes_disk(self, volume_fail=False):
- instance = {"name": "instancename", "id": "42",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
- "cleaned": 0, 'info_cache': None, 'security_groups': []}
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_undefine_domain')
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(instance)
- self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
- driver.block_device_info_get_mapping(vol
- ).AndReturn(vol['block_device_mapping'])
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- "_disconnect_volume")
- if volume_fail:
- libvirt_driver.LibvirtDriver._disconnect_volume(
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\
- AndRaise(exception.VolumeNotFound('vol'))
- else:
- libvirt_driver.LibvirtDriver._disconnect_volume(
- mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- 'delete_instance_files')
- (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()).
- AndReturn(True))
- libvirt_driver.LibvirtDriver._undefine_domain(instance)
-
- # Start test
- self.mox.ReplayAll()
-
- def fake_destroy(instance):
- pass
-
- def fake_os_path_exists(path):
- return True
-
- def fake_unplug_vifs(instance, network_info, ignore_errors=False):
- pass
-
- def fake_unfilter_instance(instance, network_info):
- pass
-
- def fake_obj_load_attr(self, attrname):
- if not hasattr(self, attrname):
- self[attrname] = {}
-
- def fake_save(self, context):
- pass
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.stubs.Set(conn, '_destroy', fake_destroy)
- self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
- self.stubs.Set(conn.firewall_driver,
- 'unfilter_instance', fake_unfilter_instance)
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
- self.stubs.Set(objects.Instance, 'fields',
- {'id': int, 'uuid': str, 'cleaned': int})
- self.stubs.Set(objects.Instance, 'obj_load_attr',
- fake_obj_load_attr)
- self.stubs.Set(objects.Instance, 'save', fake_save)
-
- conn.destroy(self.context, instance, [], vol)
-
- def test_destroy_removes_disk(self):
- self._test_destroy_removes_disk(volume_fail=False)
-
- def test_destroy_removes_disk_volume_fails(self):
- self._test_destroy_removes_disk(volume_fail=True)
-
- def test_destroy_not_removes_disk(self):
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
- '_undefine_domain')
- libvirt_driver.LibvirtDriver._undefine_domain(instance)
-
- # Start test
- self.mox.ReplayAll()
-
- def fake_destroy(instance):
- pass
-
- def fake_os_path_exists(path):
- return True
-
- def fake_unplug_vifs(instance, network_info, ignore_errors=False):
- pass
-
- def fake_unfilter_instance(instance, network_info):
- pass
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.stubs.Set(conn, '_destroy', fake_destroy)
- self.stubs.Set(conn, 'unplug_vifs', fake_unplug_vifs)
- self.stubs.Set(conn.firewall_driver,
- 'unfilter_instance', fake_unfilter_instance)
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
- conn.destroy(self.context, instance, [], None, False)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
- def test_destroy_lxc_calls_teardown_container(self, mock_look_up,
- mock_teardown_container,
- mock_cleanup):
- self.flags(virt_type='lxc', group='libvirt')
- fake_domain = FakeVirtDomain()
-
- def destroy_side_effect(*args, **kwargs):
- fake_domain._info[0] = power_state.SHUTDOWN
-
- with mock.patch.object(fake_domain, 'destroy',
- side_effect=destroy_side_effect) as mock_domain_destroy:
- mock_look_up.return_value = fake_domain
- instance = fake_instance.fake_instance_obj(self.context)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- network_info = []
- conn.destroy(self.context, instance, network_info, None, False)
-
- mock_look_up.assert_has_calls([mock.call(instance.name),
- mock.call(instance.name)])
- mock_domain_destroy.assert_called_once_with()
- mock_teardown_container.assert_called_once_with(instance)
- mock_cleanup.assert_called_once_with(self.context, instance,
- network_info, None, False,
- None)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
- @mock.patch.object(libvirt_driver.LibvirtDriver, '_lookup_by_name')
- def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
- mock_look_up, mock_teardown_container, mock_cleanup):
- self.flags(virt_type='lxc', group='libvirt')
- instance = fake_instance.fake_instance_obj(self.context)
- inf_exception = exception.InstanceNotFound(instance_id=instance.name)
- mock_look_up.side_effect = inf_exception
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- network_info = []
- conn.destroy(self.context, instance, network_info, None, False)
-
- mock_look_up.assert_has_calls([mock.call(instance.name),
- mock.call(instance.name)])
- mock_teardown_container.assert_called_once_with(instance)
- mock_cleanup.assert_called_once_with(self.context, instance,
- network_info, None, False,
- None)
-
- def test_reboot_different_ids(self):
- class FakeLoopingCall:
- def start(self, *a, **k):
- return self
-
- def wait(self):
- return None
-
- self.flags(wait_soft_reboot_seconds=1, group='libvirt')
- info_tuple = ('fake', 'fake', 'fake', 'also_fake')
- self.reboot_create_called = False
-
- # Mock domain
- mock_domain = self.mox.CreateMock(libvirt.virDomain)
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
- mock_domain.ID().AndReturn('some_fake_id')
- mock_domain.shutdown()
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
- mock_domain.ID().AndReturn('some_other_fake_id')
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock_domain
-
- def fake_create_domain(**kwargs):
- self.reboot_create_called = True
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(**self.test_instance)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, '_create_domain', fake_create_domain)
- self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
- lambda *a, **k: FakeLoopingCall())
- self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
- conn.reboot(None, instance, [], 'SOFT')
- self.assertTrue(self.reboot_create_called)
-
- def test_reboot_same_ids(self):
- class FakeLoopingCall:
- def start(self, *a, **k):
- return self
-
- def wait(self):
- return None
-
- self.flags(wait_soft_reboot_seconds=1, group='libvirt')
- info_tuple = ('fake', 'fake', 'fake', 'also_fake')
- self.reboot_hard_reboot_called = False
-
- # Mock domain
- mock_domain = self.mox.CreateMock(libvirt.virDomain)
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
- mock_domain.ID().AndReturn('some_fake_id')
- mock_domain.shutdown()
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
- mock_domain.ID().AndReturn('some_fake_id')
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock_domain
-
- def fake_hard_reboot(*args, **kwargs):
- self.reboot_hard_reboot_called = True
-
- def fake_sleep(interval):
- pass
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(**self.test_instance)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(greenthread, 'sleep', fake_sleep)
- self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
- self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
- lambda *a, **k: FakeLoopingCall())
- self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
- conn.reboot(None, instance, [], 'SOFT')
- self.assertTrue(self.reboot_hard_reboot_called)
-
- def test_soft_reboot_libvirt_exception(self):
- # Tests that a hard reboot is performed when a soft reboot results
- # in raising a libvirtError.
- info_tuple = ('fake', 'fake', 'fake', 'also_fake')
-
- # setup mocks
- mock_domain = self.mox.CreateMock(libvirt.virDomain)
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
- mock_domain.ID().AndReturn('some_fake_id')
- mock_domain.shutdown().AndRaise(libvirt.libvirtError('Err'))
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- context = None
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- network_info = []
-
- self.mox.StubOutWithMock(conn, '_lookup_by_name')
- conn._lookup_by_name(instance['name']).AndReturn(mock_domain)
- self.mox.StubOutWithMock(conn, '_hard_reboot')
- conn._hard_reboot(context, instance, network_info, None)
-
- self.mox.ReplayAll()
-
- conn.reboot(context, instance, network_info, 'SOFT')
-
- def _test_resume_state_on_host_boot_with_state(self, state):
- called = {'count': 0}
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.info().AndReturn([state, None, None, None, None])
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_hard_reboot(*args):
- called['count'] += 1
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
- instance_details = {"name": "instancename", "id": 1,
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- instance = fake_instance.fake_instance_obj(
- self.context, **instance_details)
- network_info = _fake_network_info(self.stubs, 1)
-
- conn.resume_state_on_host_boot(self.context, instance, network_info,
- block_device_info=None)
-
- ignored_states = (power_state.RUNNING,
- power_state.SUSPENDED,
- power_state.NOSTATE,
- power_state.PAUSED)
- if state in ignored_states:
- self.assertEqual(called['count'], 0)
- else:
- self.assertEqual(called['count'], 1)
-
- def test_resume_state_on_host_boot_with_running_state(self):
- self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
-
- def test_resume_state_on_host_boot_with_suspended_state(self):
- self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
-
- def test_resume_state_on_host_boot_with_paused_state(self):
- self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
-
- def test_resume_state_on_host_boot_with_nostate(self):
- self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
-
- def test_resume_state_on_host_boot_with_shutdown_state(self):
- self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
-
- def test_resume_state_on_host_boot_with_crashed_state(self):
- self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
-
- def test_resume_state_on_host_boot_with_instance_not_found_on_driver(self):
- called = {'count': 0}
- instance_details = {'name': 'test'}
- instance = fake_instance.fake_instance_obj(
- self.context, **instance_details)
-
- def fake_lookup_by_name(instance_name):
- raise exception.InstanceNotFound(instance_id='fake')
-
- def fake_hard_reboot(*args):
- called['count'] += 1
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, '_hard_reboot', fake_hard_reboot)
- conn.resume_state_on_host_boot(self.context, instance, network_info=[],
- block_device_info=None)
-
- self.assertEqual(called['count'], 1)
-
- def test_hard_reboot(self):
- called = {'count': 0}
- instance = objects.Instance(**self.test_instance)
- network_info = _fake_network_info(self.stubs, 1)
- block_device_info = None
-
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "</devices></domain>")
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.mox.StubOutWithMock(conn, '_destroy')
- self.mox.StubOutWithMock(conn, '_get_instance_disk_info')
- self.mox.StubOutWithMock(conn, '_get_guest_xml')
- self.mox.StubOutWithMock(conn, '_create_images_and_backing')
- self.mox.StubOutWithMock(conn, '_create_domain_and_network')
-
- def fake_get_info(instance_name):
- called['count'] += 1
- if called['count'] == 1:
- state = power_state.SHUTDOWN
- else:
- state = power_state.RUNNING
- return dict(state=state)
-
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- conn._destroy(instance)
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance, block_device_info)
-
- system_meta = utils.instance_sys_meta(instance)
- image_meta = utils.get_image_from_system_metadata(system_meta)
-
- conn._get_guest_xml(self.context, instance, network_info, disk_info,
- image_meta=image_meta,
- block_device_info=block_device_info,
- write_to_disk=True).AndReturn(dummyxml)
- disk_info_json = '[{"virt_disk_size": 2}]'
- conn._get_instance_disk_info(instance["name"], dummyxml,
- block_device_info).AndReturn(disk_info_json)
- conn._create_images_and_backing(self.context, instance,
- libvirt_utils.get_instance_path(instance),
- disk_info_json)
- conn._create_domain_and_network(self.context, dummyxml, instance,
- network_info, block_device_info,
- reboot=True, vifs_already_plugged=True)
- self.mox.ReplayAll()
-
- conn._hard_reboot(self.context, instance, network_info,
- block_device_info)
-
- @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.manager.get_instance_pci_devs')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
- @mock.patch('nova.virt.libvirt.utils.write_to_file')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
- @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
- @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
- def test_hard_reboot_does_not_call_glance_show(self,
- mock_destroy, mock_get_disk_info, mock_get_guest_config,
- mock_get_instance_path, mock_write_to_file,
- mock_get_instance_disk_info, mock_create_images_and_backing,
- mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
- mock_get_instance_pci_devs, mock_looping_call):
- """For a hard reboot, we shouldn't need an additional call to glance
- to get the image metadata.
-
- This is important for automatically spinning up instances on a
- host-reboot, since we won't have a user request context that'll allow
- the Glance request to go through. We have to rely on the cached image
- metadata, instead.
-
- https://bugs.launchpad.net/nova/+bug/1339386
- """
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- instance = objects.Instance(**self.test_instance)
-
- network_info = mock.MagicMock()
- block_device_info = mock.MagicMock()
- mock_get_disk_info.return_value = {}
- mock_get_guest_config.return_value = mock.MagicMock()
- mock_get_instance_path.return_value = '/foo'
- mock_looping_call.return_value = mock.MagicMock()
- conn._image_api = mock.MagicMock()
-
- conn._hard_reboot(self.context, instance, network_info,
- block_device_info)
-
- self.assertFalse(conn._image_api.get.called)
-
- def test_power_on(self):
-
- def _check_xml_bus(name, xml, block_info):
- tree = etree.fromstring(xml)
- got_disk_targets = tree.findall('./devices/disk/target')
- system_meta = utils.instance_sys_meta(instance)
- image_meta = utils.get_image_from_system_metadata(system_meta)
- want_device_bus = image_meta.get('hw_disk_bus')
- if not want_device_bus:
- want_device_bus = self.fake_img['properties']['hw_disk_bus']
- got_device_bus = got_disk_targets[0].get('bus')
- self.assertEqual(got_device_bus, want_device_bus)
-
- def fake_get_info(instance_name):
- called['count'] += 1
- if called['count'] == 1:
- state = power_state.SHUTDOWN
- else:
- state = power_state.RUNNING
- return dict(state=state)
-
- def _get_inst(with_meta=True):
- inst_ref = self.test_instance
- inst_ref['uuid'] = uuidutils.generate_uuid()
- if with_meta:
- inst_ref['system_metadata']['image_hw_disk_bus'] = 'ide'
- instance = objects.Instance(**inst_ref)
- instance['image_ref'] = '70a599e0-31e7-49b7-b260-868f221a761e'
- return instance
-
- called = {'count': 0}
- self.fake_img = {'id': '70a599e0-31e7-49b7-b260-868f221a761e',
- 'name': 'myfakeimage',
- 'created_at': '',
- 'updated_at': '',
- 'deleted_at': None,
- 'deleted': False,
- 'status': 'active',
- 'is_public': False,
- 'container_format': 'bare',
- 'disk_format': 'qcow2',
- 'size': '74185822',
- 'properties': {'hw_disk_bus': 'ide'}}
-
- instance = _get_inst()
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- network_info = _fake_network_info(self.stubs, 1)
- block_device_info = None
- image_service_mock = mock.Mock()
- image_service_mock.show.return_value = self.fake_img
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(conn, '_destroy', return_value=None),
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, '_create_domain_and_network'),
- mock.patch.object(objects.Flavor, 'get_by_id',
- return_value = flavor),
- mock.patch.object(objects.Instance, 'save')):
- conn.get_info = fake_get_info
- conn._get_instance_disk_info = _check_xml_bus
- conn._hard_reboot(self.context, instance, network_info,
- block_device_info)
-
- instance = _get_inst(with_meta=False)
- conn._hard_reboot(self.context, instance, network_info,
- block_device_info)
-
- def _test_clean_shutdown(self, seconds_to_shutdown,
- timeout, retry_interval,
- shutdown_attempts, succeeds):
- self.stubs.Set(time, 'sleep', lambda x: None)
- info_tuple = ('fake', 'fake', 'fake', 'also_fake')
- shutdown_count = []
-
- def count_shutdowns():
- shutdown_count.append("shutdown")
-
- # Mock domain
- mock_domain = self.mox.CreateMock(libvirt.virDomain)
-
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
- mock_domain.shutdown().WithSideEffects(count_shutdowns)
-
- retry_countdown = retry_interval
- for x in xrange(min(seconds_to_shutdown, timeout)):
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
- if retry_countdown == 0:
- mock_domain.shutdown().WithSideEffects(count_shutdowns)
- retry_countdown = retry_interval
- else:
- retry_countdown -= 1
-
- if seconds_to_shutdown < timeout:
- mock_domain.info().AndReturn(
- (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock_domain
-
- def fake_create_domain(**kwargs):
- self.reboot_create_called = True
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, '_create_domain', fake_create_domain)
- result = conn._clean_shutdown(instance, timeout, retry_interval)
-
- self.assertEqual(succeeds, result)
- self.assertEqual(shutdown_attempts, len(shutdown_count))
-
- def test_clean_shutdown_first_time(self):
- self._test_clean_shutdown(seconds_to_shutdown=2,
- timeout=5,
- retry_interval=3,
- shutdown_attempts=1,
- succeeds=True)
-
- def test_clean_shutdown_with_retry(self):
- self._test_clean_shutdown(seconds_to_shutdown=4,
- timeout=5,
- retry_interval=3,
- shutdown_attempts=2,
- succeeds=True)
-
- def test_clean_shutdown_failure(self):
- self._test_clean_shutdown(seconds_to_shutdown=6,
- timeout=5,
- retry_interval=3,
- shutdown_attempts=2,
- succeeds=False)
-
- def test_clean_shutdown_no_wait(self):
- self._test_clean_shutdown(seconds_to_shutdown=6,
- timeout=0,
- retry_interval=3,
- shutdown_attempts=1,
- succeeds=False)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(FakeVirtDomain, 'attachDevice')
- @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
- @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
- def test_attach_sriov_ports(self,
- mock_get_image_metadata,
- mock_ID,
- mock_attachDevice,
- mock_flavor):
- instance = objects.Instance(**self.test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- network_info = _fake_network_info(self.stubs, 1)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- domain = FakeVirtDomain()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- conn._attach_sriov_ports(self.context, instance, domain, network_info)
- mock_get_image_metadata.assert_called_once_with(self.context,
- conn._image_api, instance['image_ref'], instance)
- self.assertTrue(mock_attachDevice.called)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(FakeVirtDomain, 'attachDevice')
- @mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
- @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
- def test_attach_sriov_ports_with_info_cache(self,
- mock_get_image_metadata,
- mock_ID,
- mock_attachDevice,
- mock_flavor):
- instance = objects.Instance(**self.test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- network_info = _fake_network_info(self.stubs, 1)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- instance.info_cache = objects.InstanceInfoCache(
- network_info=network_info)
- domain = FakeVirtDomain()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- conn._attach_sriov_ports(self.context, instance, domain, None)
- mock_get_image_metadata.assert_called_once_with(self.context,
- conn._image_api, instance['image_ref'], instance)
- self.assertTrue(mock_attachDevice.called)
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- '_has_min_version', return_value=True)
- @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
- @mock.patch.object(compute_utils, 'get_image_metadata', return_value=None)
- def test_detach_sriov_ports(self,
- mock_get_image_metadata,
- mock_detachDeviceFlags,
- mock_has_min_version,
- mock_flavor):
- instance = objects.Instance(**self.test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- mock_flavor.return_value = flavor
-
- network_info = _fake_network_info(self.stubs, 1)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- instance.info_cache = objects.InstanceInfoCache(
- network_info=network_info)
-
- domain = FakeVirtDomain()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- conn._detach_sriov_ports(instance, domain)
- mock_get_image_metadata.assert_called_once_with(mock.ANY,
- conn._image_api, instance['image_ref'], instance)
- self.assertTrue(mock_detachDeviceFlags.called)
-
- def test_resume(self):
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "</devices></domain>")
- instance = objects.Instance(**self.test_instance)
- network_info = _fake_network_info(self.stubs, 1)
- block_device_info = None
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(conn, '_get_existing_domain_xml',
- return_value=dummyxml),
- mock.patch.object(conn, '_create_domain_and_network',
- return_value='fake_dom'),
- mock.patch.object(conn, '_attach_pci_devices'),
- mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='fake_pci_devs'),
- ) as (_get_existing_domain_xml, _create_domain_and_network,
- _attach_pci_devices, get_instance_pci_devs):
- conn.resume(self.context, instance, network_info,
- block_device_info)
- _get_existing_domain_xml.assert_has_calls([mock.call(instance,
- network_info, block_device_info)])
- _create_domain_and_network.assert_has_calls([mock.call(
- self.context, dummyxml,
- instance, network_info,
- block_device_info=block_device_info,
- vifs_already_plugged=True)])
- _attach_pci_devices.assert_has_calls([mock.call('fake_dom',
- 'fake_pci_devs')])
-
- def test_destroy_undefines(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy()
- mock.undefineFlags(1).AndReturn(1)
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN, 'id': -1}
-
- def fake_delete_instance_files(instance):
- return None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, 'get_info', fake_get_info)
- self.stubs.Set(conn, '_delete_instance_files',
- fake_delete_instance_files)
-
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- conn.destroy(self.context, instance, [])
-
- @mock.patch.object(rbd_utils, 'RBDDriver')
- def test_cleanup_rbd(self, mock_driver):
- driver = mock_driver.return_value
- driver.cleanup_volumes = mock.Mock()
- fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- conn._cleanup_rbd(fake_instance)
-
- driver.cleanup_volumes.assert_called_once_with(fake_instance)
-
- def test_destroy_undefines_no_undefine_flags(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy()
- mock.undefineFlags(1).AndRaise(libvirt.libvirtError('Err'))
- mock.undefine()
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN, 'id': -1}
-
- def fake_delete_instance_files(instance):
- return None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, 'get_info', fake_get_info)
- self.stubs.Set(conn, '_delete_instance_files',
- fake_delete_instance_files)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- conn.destroy(self.context, instance, [])
-
- def test_destroy_undefines_no_attribute_with_managed_save(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy()
- mock.undefineFlags(1).AndRaise(AttributeError())
- mock.hasManagedSaveImage(0).AndReturn(True)
- mock.managedSaveRemove(0)
- mock.undefine()
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN, 'id': -1}
-
- def fake_delete_instance_files(instance):
- return None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, 'get_info', fake_get_info)
- self.stubs.Set(conn, '_delete_instance_files',
- fake_delete_instance_files)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- conn.destroy(self.context, instance, [])
-
- def test_destroy_undefines_no_attribute_no_managed_save(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy()
- mock.undefineFlags(1).AndRaise(AttributeError())
- mock.hasManagedSaveImage(0).AndRaise(AttributeError())
- mock.undefine()
-
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN, 'id': -1}
-
- def fake_delete_instance_files(instance):
- return None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, 'get_info', fake_get_info)
- self.stubs.Set(conn, '_delete_instance_files',
- fake_delete_instance_files)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- conn.destroy(self.context, instance, [])
-
- def test_destroy_timed_out(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy().AndRaise(libvirt.libvirtError("timed out"))
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_error_code(self):
- return libvirt.VIR_ERR_OPERATION_TIMEOUT
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(libvirt.libvirtError, 'get_error_code',
- fake_get_error_code)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- self.assertRaises(exception.InstancePowerOffFailure,
- conn.destroy, self.context, instance, [])
-
- def test_private_destroy_not_found(self):
- ex = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- "No such domain",
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
- mock = self.mox.CreateMock(libvirt.virDomain)
- mock.ID()
- mock.destroy().AndRaise(ex)
- mock.info().AndRaise(ex)
- self.mox.ReplayAll()
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- instance = {"name": "instancename", "id": "instanceid",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
- # NOTE(vish): verifies destroy doesn't raise if the instance disappears
- conn._destroy(instance)
-
- def test_undefine_domain_with_not_found_instance(self):
- def fake_lookup(instance_name):
- raise libvirt.libvirtError("not found")
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
- self.mox.StubOutWithMock(libvirt.libvirtError, "get_error_code")
- libvirt.libvirtError.get_error_code().AndReturn(
- libvirt.VIR_ERR_NO_DOMAIN)
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = {'name': 'test'}
-
- # NOTE(wenjianhn): verifies undefine doesn't raise if the
- # instance disappears
- conn._undefine_domain(instance)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_disk_over_committed_size_total(self, mock_list):
- # Ensure destroy calls managedSaveRemove for saved instance.
- class DiagFakeDomain(object):
- def __init__(self, name):
- self._name = name
-
- def ID(self):
- return 1
-
- def name(self):
- return self._name
-
- def UUIDString(self):
- return "19479fee-07a5-49bb-9138-d3738280d63c"
-
- def XMLDesc(self, flags):
- return "<domain/>"
-
- mock_list.return_value = [
- DiagFakeDomain("instance0000001"),
- DiagFakeDomain("instance0000002")]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- fake_disks = {'instance0000001':
- [{'type': 'qcow2', 'path': '/somepath/disk1',
- 'virt_disk_size': '10737418240',
- 'backing_file': '/somepath/disk1',
- 'disk_size': '83886080',
- 'over_committed_disk_size': '10653532160'}],
- 'instance0000002':
- [{'type': 'raw', 'path': '/somepath/disk2',
- 'virt_disk_size': '0',
- 'backing_file': '/somepath/disk2',
- 'disk_size': '10737418240',
- 'over_committed_disk_size': '0'}]}
-
- def get_info(instance_name, xml, **kwargs):
- return jsonutils.dumps(fake_disks.get(instance_name))
-
- with mock.patch.object(drvr,
- "_get_instance_disk_info") as mock_info:
- mock_info.side_effect = get_info
-
- result = drvr._get_disk_over_committed_size_total()
- self.assertEqual(result, 10653532160)
- mock_list.assert_called_with()
- mock_info.assert_called()
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_disk_over_committed_size_total_eperm(self, mock_list):
- # Ensure destroy calls managedSaveRemove for saved instance.
- class DiagFakeDomain(object):
- def __init__(self, name):
- self._name = name
-
- def ID(self):
- return 1
-
- def name(self):
- return self._name
-
- def UUIDString(self):
- return "19479fee-07a5-49bb-9138-d3738280d63c"
-
- def XMLDesc(self, flags):
- return "<domain/>"
-
- mock_list.return_value = [
- DiagFakeDomain("instance0000001"),
- DiagFakeDomain("instance0000002")]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- fake_disks = {'instance0000001':
- [{'type': 'qcow2', 'path': '/somepath/disk1',
- 'virt_disk_size': '10737418240',
- 'backing_file': '/somepath/disk1',
- 'disk_size': '83886080',
- 'over_committed_disk_size': '10653532160'}],
- 'instance0000002':
- [{'type': 'raw', 'path': '/somepath/disk2',
- 'virt_disk_size': '0',
- 'backing_file': '/somepath/disk2',
- 'disk_size': '10737418240',
- 'over_committed_disk_size': '21474836480'}]}
-
- def side_effect(name, dom):
- if name == 'instance0000001':
- raise OSError(errno.EACCES, 'Permission denied')
- if name == 'instance0000002':
- return jsonutils.dumps(fake_disks.get(name))
- get_disk_info = mock.Mock()
- get_disk_info.side_effect = side_effect
- drvr._get_instance_disk_info = get_disk_info
-
- result = drvr._get_disk_over_committed_size_total()
- self.assertEqual(21474836480, result)
- mock_list.assert_called_with()
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_list_instance_domains",
- return_value=[mock.MagicMock(name='foo')])
- @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
- side_effect=exception.VolumeBDMPathNotFound(path='bar'))
- def test_disk_over_committed_size_total_bdm_not_found(self,
- mock_get_disk_info,
- mock_list_domains):
- # Tests that we handle VolumeBDMPathNotFound gracefully.
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertEqual(0, drvr._get_disk_over_committed_size_total())
-
- def test_cpu_info(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- def get_host_capabilities_stub(self):
- cpu = vconfig.LibvirtConfigCPU()
- cpu.model = "Opteron_G4"
- cpu.vendor = "AMD"
- cpu.arch = arch.X86_64
-
- cpu.cores = 2
- cpu.threads = 1
- cpu.sockets = 4
-
- cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
- cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = cpu
-
- guest = vconfig.LibvirtConfigGuest()
- guest.ostype = vm_mode.HVM
- guest.arch = arch.X86_64
- guest.domtype = ["kvm"]
- caps.guests.append(guest)
-
- guest = vconfig.LibvirtConfigGuest()
- guest.ostype = vm_mode.HVM
- guest.arch = arch.I686
- guest.domtype = ["kvm"]
- caps.guests.append(guest)
-
- return caps
-
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- '_get_host_capabilities',
- get_host_capabilities_stub)
-
- want = {"vendor": "AMD",
- "features": ["extapic", "3dnow"],
- "model": "Opteron_G4",
- "arch": arch.X86_64,
- "topology": {"cores": 2, "threads": 1, "sockets": 4}}
- got = jsonutils.loads(conn._get_cpu_info())
- self.assertEqual(want, got)
-
- def test_get_pcidev_info(self):
-
- def fake_nodeDeviceLookupByName(name):
- return FakeNodeDevice(_fake_NodeDevXml[name])
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
- fake_nodeDeviceLookupByName
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actualvf = conn._get_pcidev_info("pci_0000_04_00_3")
- expect_vf = {
- "dev_id": "pci_0000_04_00_3",
- "address": "0000:04:00.3",
- "product_id": '1521',
- "vendor_id": '8086',
- "label": 'label_8086_1521',
- "dev_type": 'type-PF',
- }
-
- self.assertEqual(actualvf, expect_vf)
- actualvf = conn._get_pcidev_info("pci_0000_04_10_7")
- expect_vf = {
- "dev_id": "pci_0000_04_10_7",
- "address": "0000:04:10.7",
- "product_id": '1520',
- "vendor_id": '8086',
- "label": 'label_8086_1520',
- "dev_type": 'type-VF',
- "phys_function": '0000:04:00.3',
- }
-
- self.assertEqual(actualvf, expect_vf)
-
- def test_pci_device_assignable(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: True)
-
- fake_dev = {'dev_type': 'type-PF'}
- self.assertFalse(conn._pci_device_assignable(fake_dev))
- fake_dev = {'dev_type': 'type-VF'}
- self.assertTrue(conn._pci_device_assignable(fake_dev))
- fake_dev = {'dev_type': 'type-PCI'}
- self.assertTrue(conn._pci_device_assignable(fake_dev))
-
- def test_list_devices_not_supported(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- # Handle just the NO_SUPPORT error
- not_supported_exc = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- 'this function is not supported by the connection driver:'
- ' virNodeNumOfDevices',
- error_code=libvirt.VIR_ERR_NO_SUPPORT)
-
- with mock.patch.object(conn._conn, 'listDevices',
- side_effect=not_supported_exc):
- self.assertEqual('[]', conn._get_pci_passthrough_devices())
-
- # We cache not supported status to avoid emitting too many logging
- # messages. Clear this value to test the other exception case.
- del conn._list_devices_supported
-
- # Other errors should not be caught
- other_exc = fakelibvirt.make_libvirtError(
- libvirt.libvirtError,
- 'other exc',
- error_code=libvirt.VIR_ERR_NO_DOMAIN)
-
- with mock.patch.object(conn._conn, 'listDevices',
- side_effect=other_exc):
- self.assertRaises(libvirt.libvirtError,
- conn._get_pci_passthrough_devices)
-
- def test_get_pci_passthrough_devices(self):
-
- def fakelistDevices(caps, fakeargs=0):
- return ['pci_0000_04_00_3', 'pci_0000_04_10_7']
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
-
- def fake_nodeDeviceLookupByName(name):
- return FakeNodeDevice(_fake_NodeDevXml[name])
-
- libvirt_driver.LibvirtDriver._conn.nodeDeviceLookupByName =\
- fake_nodeDeviceLookupByName
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn.dev_filter, 'device_assignable', lambda x: x)
- actjson = conn._get_pci_passthrough_devices()
-
- expectvfs = [
- {
- "dev_id": "pci_0000_04_00_3",
- "address": "0000:04:10.3",
- "product_id": '1521',
- "vendor_id": '8086',
- "dev_type": 'type-PF',
- "phys_function": None},
- {
- "dev_id": "pci_0000_04_10_7",
- "domain": 0,
- "address": "0000:04:10.7",
- "product_id": '1520',
- "vendor_id": '8086',
- "dev_type": 'type-VF',
- "phys_function": [('0x0000', '0x04', '0x00', '0x3')],
- }
- ]
-
- actctualvfs = jsonutils.loads(actjson)
- for key in actctualvfs[0].keys():
- if key not in ['phys_function', 'virt_functions', 'label']:
- self.assertEqual(actctualvfs[0][key], expectvfs[1][key])
-
- def _fake_caps_numa_topology(self):
- topology = vconfig.LibvirtConfigCapsNUMATopology()
-
- cell_0 = vconfig.LibvirtConfigCapsNUMACell()
- cell_0.id = 0
- cell_0.memory = 1024 * units.Ki
- cpu_0_0 = vconfig.LibvirtConfigCapsNUMACPU()
- cpu_0_0.id = 0
- cpu_0_0.socket_id = 0
- cpu_0_0.core_id = 0
- cpu_0_0.sibling = 0
- cpu_0_1 = vconfig.LibvirtConfigCapsNUMACPU()
- cpu_0_1.id = 1
- cpu_0_1.socket_id = 0
- cpu_0_1.core_id = 1
- cpu_0_1.sibling = 1
- cell_0.cpus = [cpu_0_0, cpu_0_1]
-
- cell_1 = vconfig.LibvirtConfigCapsNUMACell()
- cell_1.id = 1
- cell_1.memory = 1024 * units.Ki
- cpu_1_0 = vconfig.LibvirtConfigCapsNUMACPU()
- cpu_1_0.id = 2
- cpu_1_0.socket_id = 1
- cpu_1_0.core_id = 0
- cpu_1_0.sibling = 2
- cpu_1_1 = vconfig.LibvirtConfigCapsNUMACPU()
- cpu_1_1.id = 3
- cpu_1_1.socket_id = 1
- cpu_1_1.core_id = 1
- cpu_1_1.sibling = 3
- cell_1.cpus = [cpu_1_0, cpu_1_1]
-
- topology.cells = [cell_0, cell_1]
- return topology
-
- def test_get_host_numa_topology(self):
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.topology = self._fake_caps_numa_topology()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- expected_topo_dict = {'cells': [
- {'cpus': '0,1', 'cpu_usage': 0,
- 'mem': {'total': 1024, 'used': 0},
- 'id': 0},
- {'cpus': '3', 'cpu_usage': 0,
- 'mem': {'total': 1024, 'used': 0},
- 'id': 1}]}
- with contextlib.nested(
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(
- conn, '_get_host_capabilities', return_value=caps),
- mock.patch.object(
- hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3]))
- ):
- got_topo = conn._get_host_numa_topology()
- got_topo_dict = got_topo._to_dict()
- self.assertThat(
- expected_topo_dict, matchers.DictMatches(got_topo_dict))
-
- def test_get_host_numa_topology_empty(self):
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.topology = None
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with contextlib.nested(
- mock.patch.object(conn, '_has_min_version', return_value=True),
- mock.patch.object(conn, '_get_host_capabilities',
- return_value=caps)
- ) as (has_min_version, get_caps):
- self.assertIsNone(conn._get_host_numa_topology())
- get_caps.assert_called_once_with()
-
- def test_get_host_numa_topology_not_supported(self):
- # Tests that libvirt isn't new enough to support numa topology.
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- with mock.patch.object(conn, '_has_min_version', return_value=False):
- self.assertIsNone(conn._get_host_numa_topology())
-
- def test_diagnostic_vcpus_exception(self):
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='network'>
- <mac address='52:54:00:a4:38:38'/>
- <source network='default'/>
- <target dev='vnet0'/>
- </interface>
- </devices>
- </domain>
- """
-
- class DiagFakeDomain(FakeVirtDomain):
-
- def __init__(self):
- super(DiagFakeDomain, self).__init__(fake_xml=xml)
-
- def vcpus(self):
- raise libvirt.libvirtError('vcpus missing')
-
- def blockStats(self, path):
- return (169L, 688640L, 0L, 0L, -1L)
-
- def interfaceStats(self, path):
- return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
-
- def memoryStats(self):
- return {'actual': 220160L, 'rss': 200164L}
-
- def maxMemory(self):
- return 280160L
-
- def fake_lookup_name(name):
- return DiagFakeDomain()
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actual = conn.get_diagnostics({"name": "testvirt"})
- expect = {'vda_read': 688640L,
- 'vda_read_req': 169L,
- 'vda_write': 0L,
- 'vda_write_req': 0L,
- 'vda_errors': -1L,
- 'vdb_read': 688640L,
- 'vdb_read_req': 169L,
- 'vdb_write': 0L,
- 'vdb_write_req': 0L,
- 'vdb_errors': -1L,
- 'memory': 280160L,
- 'memory-actual': 220160L,
- 'memory-rss': 200164L,
- 'vnet0_rx': 4408L,
- 'vnet0_rx_drop': 0L,
- 'vnet0_rx_errors': 0L,
- 'vnet0_rx_packets': 82L,
- 'vnet0_tx': 0L,
- 'vnet0_tx_drop': 0L,
- 'vnet0_tx_errors': 0L,
- 'vnet0_tx_packets': 0L,
- }
- self.assertEqual(actual, expect)
-
- lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
- diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- timeutils.set_time_override(diags_time)
-
- actual = conn.get_instance_diagnostics({"name": "testvirt",
- "launched_at": lt})
- expected = {'config_drive': False,
- 'cpu_details': [],
- 'disk_details': [{'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L},
- {'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L}],
- 'driver': 'libvirt',
- 'hypervisor_os': 'linux',
- 'memory_details': {'maximum': 2048, 'used': 1234},
- 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
- 'rx_drop': 0L,
- 'rx_errors': 0L,
- 'rx_octets': 4408L,
- 'rx_packets': 82L,
- 'tx_drop': 0L,
- 'tx_errors': 0L,
- 'tx_octets': 0L,
- 'tx_packets': 0L}],
- 'state': 'running',
- 'uptime': 10,
- 'version': '1.0'}
- self.assertEqual(expected, actual.serialize())
-
- def test_diagnostic_blockstats_exception(self):
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='network'>
- <mac address='52:54:00:a4:38:38'/>
- <source network='default'/>
- <target dev='vnet0'/>
- </interface>
- </devices>
- </domain>
- """
-
- class DiagFakeDomain(FakeVirtDomain):
-
- def __init__(self):
- super(DiagFakeDomain, self).__init__(fake_xml=xml)
-
- def vcpus(self):
- return ([(0, 1, 15340000000L, 0),
- (1, 1, 1640000000L, 0),
- (2, 1, 3040000000L, 0),
- (3, 1, 1420000000L, 0)],
- [(True, False),
- (True, False),
- (True, False),
- (True, False)])
-
- def blockStats(self, path):
- raise libvirt.libvirtError('blockStats missing')
-
- def interfaceStats(self, path):
- return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
-
- def memoryStats(self):
- return {'actual': 220160L, 'rss': 200164L}
-
- def maxMemory(self):
- return 280160L
-
- def fake_lookup_name(name):
- return DiagFakeDomain()
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actual = conn.get_diagnostics({"name": "testvirt"})
- expect = {'cpu0_time': 15340000000L,
- 'cpu1_time': 1640000000L,
- 'cpu2_time': 3040000000L,
- 'cpu3_time': 1420000000L,
- 'memory': 280160L,
- 'memory-actual': 220160L,
- 'memory-rss': 200164L,
- 'vnet0_rx': 4408L,
- 'vnet0_rx_drop': 0L,
- 'vnet0_rx_errors': 0L,
- 'vnet0_rx_packets': 82L,
- 'vnet0_tx': 0L,
- 'vnet0_tx_drop': 0L,
- 'vnet0_tx_errors': 0L,
- 'vnet0_tx_packets': 0L,
- }
- self.assertEqual(actual, expect)
-
- lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
- diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- timeutils.set_time_override(diags_time)
-
- actual = conn.get_instance_diagnostics({"name": "testvirt",
- "launched_at": lt})
- expected = {'config_drive': False,
- 'cpu_details': [{'time': 15340000000L},
- {'time': 1640000000L},
- {'time': 3040000000L},
- {'time': 1420000000L}],
- 'disk_details': [],
- 'driver': 'libvirt',
- 'hypervisor_os': 'linux',
- 'memory_details': {'maximum': 2048, 'used': 1234},
- 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
- 'rx_drop': 0L,
- 'rx_errors': 0L,
- 'rx_octets': 4408L,
- 'rx_packets': 82L,
- 'tx_drop': 0L,
- 'tx_errors': 0L,
- 'tx_octets': 0L,
- 'tx_packets': 0L}],
- 'state': 'running',
- 'uptime': 10,
- 'version': '1.0'}
- self.assertEqual(expected, actual.serialize())
-
- def test_diagnostic_interfacestats_exception(self):
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='network'>
- <mac address='52:54:00:a4:38:38'/>
- <source network='default'/>
- <target dev='vnet0'/>
- </interface>
- </devices>
- </domain>
- """
-
- class DiagFakeDomain(FakeVirtDomain):
-
- def __init__(self):
- super(DiagFakeDomain, self).__init__(fake_xml=xml)
-
- def vcpus(self):
- return ([(0, 1, 15340000000L, 0),
- (1, 1, 1640000000L, 0),
- (2, 1, 3040000000L, 0),
- (3, 1, 1420000000L, 0)],
- [(True, False),
- (True, False),
- (True, False),
- (True, False)])
-
- def blockStats(self, path):
- return (169L, 688640L, 0L, 0L, -1L)
-
- def interfaceStats(self, path):
- raise libvirt.libvirtError('interfaceStat missing')
-
- def memoryStats(self):
- return {'actual': 220160L, 'rss': 200164L}
-
- def maxMemory(self):
- return 280160L
-
- def fake_lookup_name(name):
- return DiagFakeDomain()
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actual = conn.get_diagnostics({"name": "testvirt"})
- expect = {'cpu0_time': 15340000000L,
- 'cpu1_time': 1640000000L,
- 'cpu2_time': 3040000000L,
- 'cpu3_time': 1420000000L,
- 'vda_read': 688640L,
- 'vda_read_req': 169L,
- 'vda_write': 0L,
- 'vda_write_req': 0L,
- 'vda_errors': -1L,
- 'vdb_read': 688640L,
- 'vdb_read_req': 169L,
- 'vdb_write': 0L,
- 'vdb_write_req': 0L,
- 'vdb_errors': -1L,
- 'memory': 280160L,
- 'memory-actual': 220160L,
- 'memory-rss': 200164L,
- }
- self.assertEqual(actual, expect)
-
- lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
- diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- timeutils.set_time_override(diags_time)
-
- actual = conn.get_instance_diagnostics({"name": "testvirt",
- "launched_at": lt})
- expected = {'config_drive': False,
- 'cpu_details': [{'time': 15340000000L},
- {'time': 1640000000L},
- {'time': 3040000000L},
- {'time': 1420000000L}],
- 'disk_details': [{'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L},
- {'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L}],
- 'driver': 'libvirt',
- 'hypervisor_os': 'linux',
- 'memory_details': {'maximum': 2048, 'used': 1234},
- 'nic_details': [],
- 'state': 'running',
- 'uptime': 10,
- 'version': '1.0'}
- self.assertEqual(expected, actual.serialize())
-
- def test_diagnostic_memorystats_exception(self):
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='network'>
- <mac address='52:54:00:a4:38:38'/>
- <source network='default'/>
- <target dev='vnet0'/>
- </interface>
- </devices>
- </domain>
- """
-
- class DiagFakeDomain(FakeVirtDomain):
-
- def __init__(self):
- super(DiagFakeDomain, self).__init__(fake_xml=xml)
-
- def vcpus(self):
- return ([(0, 1, 15340000000L, 0),
- (1, 1, 1640000000L, 0),
- (2, 1, 3040000000L, 0),
- (3, 1, 1420000000L, 0)],
- [(True, False),
- (True, False),
- (True, False),
- (True, False)])
-
- def blockStats(self, path):
- return (169L, 688640L, 0L, 0L, -1L)
-
- def interfaceStats(self, path):
- return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
-
- def memoryStats(self):
- raise libvirt.libvirtError('memoryStats missing')
-
- def maxMemory(self):
- return 280160L
-
- def fake_lookup_name(name):
- return DiagFakeDomain()
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actual = conn.get_diagnostics({"name": "testvirt"})
- expect = {'cpu0_time': 15340000000L,
- 'cpu1_time': 1640000000L,
- 'cpu2_time': 3040000000L,
- 'cpu3_time': 1420000000L,
- 'vda_read': 688640L,
- 'vda_read_req': 169L,
- 'vda_write': 0L,
- 'vda_write_req': 0L,
- 'vda_errors': -1L,
- 'vdb_read': 688640L,
- 'vdb_read_req': 169L,
- 'vdb_write': 0L,
- 'vdb_write_req': 0L,
- 'vdb_errors': -1L,
- 'memory': 280160L,
- 'vnet0_rx': 4408L,
- 'vnet0_rx_drop': 0L,
- 'vnet0_rx_errors': 0L,
- 'vnet0_rx_packets': 82L,
- 'vnet0_tx': 0L,
- 'vnet0_tx_drop': 0L,
- 'vnet0_tx_errors': 0L,
- 'vnet0_tx_packets': 0L,
- }
- self.assertEqual(actual, expect)
-
- lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
- diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- timeutils.set_time_override(diags_time)
-
- actual = conn.get_instance_diagnostics({"name": "testvirt",
- "launched_at": lt})
- expected = {'config_drive': False,
- 'cpu_details': [{'time': 15340000000L},
- {'time': 1640000000L},
- {'time': 3040000000L},
- {'time': 1420000000L}],
- 'disk_details': [{'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L},
- {'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L}],
- 'driver': 'libvirt',
- 'hypervisor_os': 'linux',
- 'memory_details': {'maximum': 2048, 'used': 1234},
- 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
- 'rx_drop': 0L,
- 'rx_errors': 0L,
- 'rx_octets': 4408L,
- 'rx_packets': 82L,
- 'tx_drop': 0L,
- 'tx_errors': 0L,
- 'tx_octets': 0L,
- 'tx_packets': 0L}],
- 'state': 'running',
- 'uptime': 10,
- 'version': '1.0'}
- self.assertEqual(expected, actual.serialize())
-
- def test_diagnostic_full(self):
- xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio'/>
- </disk>
- <interface type='network'>
- <mac address='52:54:00:a4:38:38'/>
- <source network='default'/>
- <target dev='vnet0'/>
- </interface>
- </devices>
- </domain>
- """
-
- class DiagFakeDomain(FakeVirtDomain):
-
- def __init__(self):
- super(DiagFakeDomain, self).__init__(fake_xml=xml)
-
- def vcpus(self):
- return ([(0, 1, 15340000000L, 0),
- (1, 1, 1640000000L, 0),
- (2, 1, 3040000000L, 0),
- (3, 1, 1420000000L, 0)],
- [(True, False),
- (True, False),
- (True, False),
- (True, False)])
-
- def blockStats(self, path):
- return (169L, 688640L, 0L, 0L, -1L)
-
- def interfaceStats(self, path):
- return (4408L, 82L, 0L, 0L, 0L, 0L, 0L, 0L)
-
- def memoryStats(self):
- return {'actual': 220160L, 'rss': 200164L}
-
- def maxMemory(self):
- return 280160L
-
- def fake_lookup_name(name):
- return DiagFakeDomain()
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- actual = conn.get_diagnostics({"name": "testvirt"})
- expect = {'cpu0_time': 15340000000L,
- 'cpu1_time': 1640000000L,
- 'cpu2_time': 3040000000L,
- 'cpu3_time': 1420000000L,
- 'vda_read': 688640L,
- 'vda_read_req': 169L,
- 'vda_write': 0L,
- 'vda_write_req': 0L,
- 'vda_errors': -1L,
- 'vdb_read': 688640L,
- 'vdb_read_req': 169L,
- 'vdb_write': 0L,
- 'vdb_write_req': 0L,
- 'vdb_errors': -1L,
- 'memory': 280160L,
- 'memory-actual': 220160L,
- 'memory-rss': 200164L,
- 'vnet0_rx': 4408L,
- 'vnet0_rx_drop': 0L,
- 'vnet0_rx_errors': 0L,
- 'vnet0_rx_packets': 82L,
- 'vnet0_tx': 0L,
- 'vnet0_tx_drop': 0L,
- 'vnet0_tx_errors': 0L,
- 'vnet0_tx_packets': 0L,
- }
- self.assertEqual(actual, expect)
-
- lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
- diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- timeutils.set_time_override(diags_time)
-
- actual = conn.get_instance_diagnostics({"name": "testvirt",
- "launched_at": lt})
- expected = {'config_drive': False,
- 'cpu_details': [{'time': 15340000000L},
- {'time': 1640000000L},
- {'time': 3040000000L},
- {'time': 1420000000L}],
- 'disk_details': [{'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L},
- {'errors_count': 0,
- 'id': '',
- 'read_bytes': 688640L,
- 'read_requests': 169L,
- 'write_bytes': 0L,
- 'write_requests': 0L}],
- 'driver': 'libvirt',
- 'hypervisor_os': 'linux',
- 'memory_details': {'maximum': 2048, 'used': 1234},
- 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
- 'rx_drop': 0L,
- 'rx_errors': 0L,
- 'rx_octets': 4408L,
- 'rx_packets': 82L,
- 'tx_drop': 0L,
- 'tx_errors': 0L,
- 'tx_octets': 0L,
- 'tx_packets': 0L}],
- 'state': 'running',
- 'uptime': 10,
- 'version': '1.0'}
- self.assertEqual(expected, actual.serialize())
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_failing_vcpu_count(self, mock_list):
- """Domain can fail to return the vcpu description in case it's
- just starting up or shutting down. Make sure None is handled
- gracefully.
- """
-
- class DiagFakeDomain(object):
- def __init__(self, vcpus):
- self._vcpus = vcpus
-
- def vcpus(self):
- if self._vcpus is None:
- raise libvirt.libvirtError("fake-error")
- else:
- return ([1] * self._vcpus, [True] * self._vcpus)
-
- def ID(self):
- return 1
-
- def name(self):
- return "instance000001"
-
- def UUIDString(self):
- return "19479fee-07a5-49bb-9138-d3738280d63c"
-
- mock_list.return_value = [
- DiagFakeDomain(None), DiagFakeDomain(5)]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.assertEqual(5, drvr._get_vcpu_used())
- mock_list.assert_called_with()
-
- @mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains")
- def test_failing_vcpu_count_none(self, mock_list):
- """Domain will return zero if the current number of vcpus used
- is None. This is in case of VM state starting up or shutting
- down. None type returned is counted as zero.
- """
-
- class DiagFakeDomain(object):
- def __init__(self):
- pass
-
- def vcpus(self):
- return None
-
- def ID(self):
- return 1
-
- def name(self):
- return "instance000001"
-
- mock_list.return_value = [DiagFakeDomain()]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertEqual(0, drvr._get_vcpu_used())
- mock_list.assert_called_with()
-
- def test_get_memory_used_normal(self):
- m = mock.mock_open(read_data="""
-MemTotal: 16194180 kB
-MemFree: 233092 kB
-MemAvailable: 8892356 kB
-Buffers: 567708 kB
-Cached: 8362404 kB
-SwapCached: 0 kB
-Active: 8381604 kB
-""")
- with contextlib.nested(
- mock.patch("__builtin__.open", m, create=True),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- "_conn"),
- mock.patch('sys.platform', 'linux2'),
- ) as (mock_file, mock_conn, mock_platform):
- mock_conn.getInfo.return_value = [
- arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- self.assertEqual(6866, drvr._get_memory_mb_used())
-
- def test_get_memory_used_xen(self):
- self.flags(virt_type='xen', group='libvirt')
-
- class DiagFakeDomain(object):
- def __init__(self, id, memmb):
- self.id = id
- self.memmb = memmb
-
- def info(self):
- return [0, 0, self.memmb * 1024]
-
- def ID(self):
- return self.id
-
- def name(self):
- return "instance000001"
-
- def UUIDString(self):
- return str(uuid.uuid4())
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- m = mock.mock_open(read_data="""
-MemTotal: 16194180 kB
-MemFree: 233092 kB
-MemAvailable: 8892356 kB
-Buffers: 567708 kB
-Cached: 8362404 kB
-SwapCached: 0 kB
-Active: 8381604 kB
-""")
-
- with contextlib.nested(
- mock.patch("__builtin__.open", m, create=True),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- "_list_instance_domains"),
- mock.patch.object(libvirt_driver.LibvirtDriver,
- "_conn"),
- mock.patch('sys.platform', 'linux2'),
- ) as (mock_file, mock_list, mock_conn, mock_platform):
- mock_list.return_value = [
- DiagFakeDomain(0, 15814),
- DiagFakeDomain(1, 750),
- DiagFakeDomain(2, 1042)]
- mock_conn.getInfo.return_value = [
- arch.X86_64, 15814L, 8, 1208, 1, 1, 4, 2]
-
- self.assertEqual(8657, drvr._get_memory_mb_used())
- mock_list.assert_called_with(only_guests=False)
-
- def test_get_instance_capabilities(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- def get_host_capabilities_stub(self):
- caps = vconfig.LibvirtConfigCaps()
-
- guest = vconfig.LibvirtConfigGuest()
- guest.ostype = 'hvm'
- guest.arch = arch.X86_64
- guest.domtype = ['kvm', 'qemu']
- caps.guests.append(guest)
-
- guest = vconfig.LibvirtConfigGuest()
- guest.ostype = 'hvm'
- guest.arch = arch.I686
- guest.domtype = ['kvm']
- caps.guests.append(guest)
-
- return caps
-
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- '_get_host_capabilities',
- get_host_capabilities_stub)
-
- want = [(arch.X86_64, 'kvm', 'hvm'),
- (arch.X86_64, 'qemu', 'hvm'),
- (arch.I686, 'kvm', 'hvm')]
- got = conn._get_instance_capabilities()
- self.assertEqual(want, got)
-
- def test_event_dispatch(self):
- # Validate that the libvirt self-pipe for forwarding
- # events between threads is working sanely
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- got_events = []
-
- def handler(event):
- got_events.append(event)
-
- conn.register_event_listener(handler)
-
- conn._init_events_pipe()
-
- event1 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STARTED)
- event2 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_PAUSED)
- conn._queue_event(event1)
- conn._queue_event(event2)
- conn._dispatch_events()
-
- want_events = [event1, event2]
- self.assertEqual(want_events, got_events)
-
- event3 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_RESUMED)
- event4 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STOPPED)
-
- conn._queue_event(event3)
- conn._queue_event(event4)
- conn._dispatch_events()
-
- want_events = [event1, event2, event3, event4]
- self.assertEqual(want_events, got_events)
-
- def test_event_lifecycle(self):
- # Validate that libvirt events are correctly translated
- # to Nova events
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- got_events = []
-
- def handler(event):
- got_events.append(event)
-
- conn.register_event_listener(handler)
- conn._init_events_pipe()
- fake_dom_xml = """
- <domain type='kvm'>
- <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
- <devices>
- <disk type='file'>
- <source file='filename'/>
- </disk>
- </devices>
- </domain>
- """
- dom = FakeVirtDomain(fake_dom_xml,
- "cef19ce0-0ca2-11df-855d-b19fbce37686")
-
- conn._event_lifecycle_callback(conn._conn,
- dom,
- libvirt.VIR_DOMAIN_EVENT_STOPPED,
- 0,
- conn)
- conn._dispatch_events()
- self.assertEqual(len(got_events), 1)
- self.assertIsInstance(got_events[0], virtevent.LifecycleEvent)
- self.assertEqual(got_events[0].uuid,
- "cef19ce0-0ca2-11df-855d-b19fbce37686")
- self.assertEqual(got_events[0].transition,
- virtevent.EVENT_LIFECYCLE_STOPPED)
-
- @mock.patch.object(libvirt_driver.LibvirtDriver, 'emit_event')
- def test_event_emit_delayed_call_now(self, emit_event_mock):
- self.flags(virt_type="kvm", group="libvirt")
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- conn._event_emit_delayed(None)
- emit_event_mock.assert_called_once_with(None)
-
- @mock.patch.object(greenthread, 'spawn_after')
- def test_event_emit_delayed_call_delayed(self, spawn_after_mock):
- CONF.set_override("virt_type", "xen", group="libvirt")
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- event = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STOPPED)
- conn._event_emit_delayed(event)
- spawn_after_mock.assert_called_once_with(15, conn.emit_event, event)
-
- @mock.patch.object(greenthread, 'spawn_after')
- def test_event_emit_delayed_call_delayed_pending(self, spawn_after_mock):
- self.flags(virt_type="xen", group="libvirt")
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
- conn._events_delayed[uuid] = None
- event = virtevent.LifecycleEvent(
- uuid, virtevent.EVENT_LIFECYCLE_STOPPED)
- conn._event_emit_delayed(event)
- self.assertFalse(spawn_after_mock.called)
-
- def test_event_delayed_cleanup(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- uuid = "cef19ce0-0ca2-11df-855d-b19fbce37686"
- event = virtevent.LifecycleEvent(
- uuid, virtevent.EVENT_LIFECYCLE_STARTED)
- gt_mock = mock.Mock()
- conn._events_delayed[uuid] = gt_mock
- conn._event_delayed_cleanup(event)
- gt_mock.cancel.assert_called_once_with()
- self.assertNotIn(uuid, conn._events_delayed.keys())
-
- def test_set_cache_mode(self):
- self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_conf = FakeConfigGuestDisk()
-
- fake_conf.source_type = 'file'
- conn._set_cache_mode(fake_conf)
- self.assertEqual(fake_conf.driver_cache, 'directsync')
-
- def test_set_cache_mode_invalid_mode(self):
- self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_conf = FakeConfigGuestDisk()
-
- fake_conf.source_type = 'file'
- conn._set_cache_mode(fake_conf)
- self.assertIsNone(fake_conf.driver_cache)
-
- def test_set_cache_mode_invalid_object(self):
- self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_conf = FakeConfigGuest()
-
- fake_conf.driver_cache = 'fake'
- conn._set_cache_mode(fake_conf)
- self.assertEqual(fake_conf.driver_cache, 'fake')
-
- def _test_shared_storage_detection(self, is_same):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
- self.mox.StubOutWithMock(utils, 'execute')
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(os, 'unlink')
- conn.get_host_ip_addr().AndReturn('bar')
- utils.execute('ssh', 'foo', 'touch', mox.IgnoreArg())
- os.path.exists(mox.IgnoreArg()).AndReturn(is_same)
- if is_same:
- os.unlink(mox.IgnoreArg())
- else:
- utils.execute('ssh', 'foo', 'rm', mox.IgnoreArg())
- self.mox.ReplayAll()
- return conn._is_storage_shared_with('foo', '/path')
-
- def test_shared_storage_detection_same_host(self):
- self.assertTrue(self._test_shared_storage_detection(True))
-
- def test_shared_storage_detection_different_host(self):
- self.assertFalse(self._test_shared_storage_detection(False))
-
- def test_shared_storage_detection_easy(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.mox.StubOutWithMock(conn, 'get_host_ip_addr')
- self.mox.StubOutWithMock(utils, 'execute')
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(os, 'unlink')
- conn.get_host_ip_addr().AndReturn('foo')
- self.mox.ReplayAll()
- self.assertTrue(conn._is_storage_shared_with('foo', '/path'))
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._lookup_by_name')
- def test_get_domain_info_with_more_return(self, lookup_mock):
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- dom_mock = mock.MagicMock()
- dom_mock.info.return_value = [
- 1, 2048, 737, 8, 12345, 888888
- ]
- dom_mock.ID.return_value = mock.sentinel.instance_id
- lookup_mock.return_value = dom_mock
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- info = conn.get_info(instance)
- expect = {'state': 1,
- 'max_mem': 2048,
- 'mem': 737,
- 'num_cpu': 8,
- 'cpu_time': 12345,
- 'id': mock.sentinel.instance_id}
- self.assertEqual(expect, info)
- dom_mock.info.assert_called_once_with()
- dom_mock.ID.assert_called_once_with()
- lookup_mock.assert_called_once_with(instance['name'])
-
- @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
- @mock.patch.object(encodeutils, 'safe_decode')
- def test_create_domain(self, mock_safe_decode, mock_get_inst_path):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- mock_domain = mock.MagicMock()
- mock_instance = mock.MagicMock()
- mock_get_inst_path.return_value = '/tmp/'
-
- domain = conn._create_domain(domain=mock_domain,
- instance=mock_instance)
-
- self.assertEqual(mock_domain, domain)
- mock_get_inst_path.assertHasCalls([mock.call(mock_instance)])
- mock_domain.createWithFlags.assertHasCalls([mock.call(0)])
- self.assertEqual(2, mock_safe_decode.call_count)
-
- @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
- @mock.patch('nova.virt.disk.api.setup_container')
- @mock.patch('nova.openstack.common.fileutils.ensure_tree')
- @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
- def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
- mock_setup_container, mock_get_info, mock_clean):
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- mock_instance = mock.MagicMock()
- inst_sys_meta = dict()
- mock_instance.system_metadata = inst_sys_meta
- mock_get_inst_path.return_value = '/tmp/'
- mock_image_backend = mock.MagicMock()
- conn.image_backend = mock_image_backend
- mock_image = mock.MagicMock()
- mock_image.path = '/tmp/test.img'
- conn.image_backend.image.return_value = mock_image
- mock_setup_container.return_value = '/dev/nbd0'
- mock_get_info.return_value = {'state': power_state.RUNNING}
-
- with contextlib.nested(
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, '_is_booted_from_volume',
- return_value=False),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
- mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
- mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
- conn._create_domain_and_network(self.context, 'xml',
- mock_instance, [])
-
- self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
- mock_instance.save.assert_not_called()
- mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
- mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
- conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
- 'disk')])
- setup_container_call = mock.call('/tmp/test.img',
- container_dir='/tmp/rootfs',
- use_cow=CONF.use_cow_images)
- mock_setup_container.assert_has_calls([setup_container_call])
- mock_get_info.assert_has_calls([mock.call(mock_instance)])
- mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
-
- @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
- @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
- @mock.patch('nova.virt.disk.api.setup_container')
- @mock.patch('nova.openstack.common.fileutils.ensure_tree')
- @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
- def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
- mock_ensure_tree, mock_setup_container,
- mock_chown, mock_get_info, mock_clean):
- self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
- gid_maps=["0:1000:100"], group='libvirt')
-
- def chown_side_effect(path, id_maps):
- self.assertEqual('/tmp/rootfs', path)
- self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
- self.assertEqual(0, id_maps[0].start)
- self.assertEqual(1000, id_maps[0].target)
- self.assertEqual(100, id_maps[0].count)
- self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
- self.assertEqual(0, id_maps[1].start)
- self.assertEqual(1000, id_maps[1].target)
- self.assertEqual(100, id_maps[1].count)
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- mock_instance = mock.MagicMock()
- inst_sys_meta = dict()
- mock_instance.system_metadata = inst_sys_meta
- mock_get_inst_path.return_value = '/tmp/'
- mock_image_backend = mock.MagicMock()
- conn.image_backend = mock_image_backend
- mock_image = mock.MagicMock()
- mock_image.path = '/tmp/test.img'
- conn.image_backend.image.return_value = mock_image
- mock_setup_container.return_value = '/dev/nbd0'
- mock_chown.side_effect = chown_side_effect
- mock_get_info.return_value = {'state': power_state.RUNNING}
-
- with contextlib.nested(
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, '_is_booted_from_volume',
- return_value=False),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
- mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
- mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
- conn._create_domain_and_network(self.context, 'xml',
- mock_instance, [])
-
- self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
- mock_instance.save.assert_not_called()
- mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
- mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
- conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
- 'disk')])
- setup_container_call = mock.call('/tmp/test.img',
- container_dir='/tmp/rootfs',
- use_cow=CONF.use_cow_images)
- mock_setup_container.assert_has_calls([setup_container_call])
- mock_get_info.assert_has_calls([mock.call(mock_instance)])
- mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
-
- @mock.patch('nova.virt.disk.api.teardown_container')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
- @mock.patch('nova.virt.disk.api.setup_container')
- @mock.patch('nova.openstack.common.fileutils.ensure_tree')
- @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
- def test_create_domain_lxc_not_running(self, mock_get_inst_path,
- mock_ensure_tree,
- mock_setup_container,
- mock_get_info, mock_teardown):
- self.flags(virt_type='lxc', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- mock_instance = mock.MagicMock()
- inst_sys_meta = dict()
- mock_instance.system_metadata = inst_sys_meta
- mock_get_inst_path.return_value = '/tmp/'
- mock_image_backend = mock.MagicMock()
- conn.image_backend = mock_image_backend
- mock_image = mock.MagicMock()
- mock_image.path = '/tmp/test.img'
- conn.image_backend.image.return_value = mock_image
- mock_setup_container.return_value = '/dev/nbd0'
- mock_get_info.return_value = {'state': power_state.SHUTDOWN}
-
- with contextlib.nested(
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, '_is_booted_from_volume',
- return_value=False),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
- mock.patch.object(conn.firewall_driver, 'prepare_instance_filter'),
- mock.patch.object(conn.firewall_driver, 'apply_instance_filter')):
- conn._create_domain_and_network(self.context, 'xml',
- mock_instance, [])
-
- self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
- mock_instance.save.assert_not_called()
- mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
- mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
- conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
- 'disk')])
- setup_container_call = mock.call('/tmp/test.img',
- container_dir='/tmp/rootfs',
- use_cow=CONF.use_cow_images)
- mock_setup_container.assert_has_calls([setup_container_call])
- mock_get_info.assert_has_calls([mock.call(mock_instance)])
- teardown_call = mock.call(container_dir='/tmp/rootfs')
- mock_teardown.assert_has_calls([teardown_call])
-
- def test_create_domain_define_xml_fails(self):
- """Tests that the xml is logged when defining the domain fails."""
- fake_xml = "<test>this is a test</test>"
-
- def fake_defineXML(xml):
- self.assertEqual(fake_xml, xml)
- raise libvirt.libvirtError('virDomainDefineXML() failed')
-
- self.log_error_called = False
-
- def fake_error(msg, *args):
- self.log_error_called = True
- self.assertIn(fake_xml, msg % args)
-
- self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
-
- self.create_fake_libvirt_mock(defineXML=fake_defineXML)
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- self.assertRaises(libvirt.libvirtError, conn._create_domain, fake_xml)
- self.assertTrue(self.log_error_called)
-
- def test_create_domain_with_flags_fails(self):
- """Tests that the xml is logged when creating the domain with flags
- fails
- """
- fake_xml = "<test>this is a test</test>"
- fake_domain = FakeVirtDomain(fake_xml)
-
- def fake_createWithFlags(launch_flags):
- raise libvirt.libvirtError('virDomainCreateWithFlags() failed')
-
- self.log_error_called = False
-
- def fake_error(msg, *args):
- self.log_error_called = True
- self.assertIn(fake_xml, msg % args)
-
- self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
- self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
-
- self.create_fake_libvirt_mock()
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- self.assertRaises(libvirt.libvirtError, conn._create_domain,
- domain=fake_domain)
- self.assertTrue(self.log_error_called)
-
- def test_create_domain_enable_hairpin_fails(self):
- """Tests that the xml is logged when enabling hairpin mode for the
- domain fails.
- """
- fake_xml = "<test>this is a test</test>"
- fake_domain = FakeVirtDomain(fake_xml)
-
- def fake_enable_hairpin(launch_flags):
- raise processutils.ProcessExecutionError('error')
-
- self.log_error_called = False
-
- def fake_error(msg, *args):
- self.log_error_called = True
- self.assertIn(fake_xml, msg % args)
-
- self.stubs.Set(nova.virt.libvirt.driver.LOG, 'error', fake_error)
-
- self.create_fake_libvirt_mock()
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.stubs.Set(conn, '_enable_hairpin', fake_enable_hairpin)
-
- self.assertRaises(processutils.ProcessExecutionError,
- conn._create_domain,
- domain=fake_domain,
- power_on=False)
- self.assertTrue(self.log_error_called)
-
- def test_get_vnc_console(self):
- instance = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<graphics type='vnc' port='5900'/>"
- "</devices></domain>")
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- vnc_dict = conn.get_vnc_console(self.context, instance)
- self.assertEqual(vnc_dict.port, '5900')
-
- def test_get_vnc_console_unavailable(self):
- instance = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices></devices></domain>")
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.ConsoleTypeUnavailable,
- conn.get_vnc_console, self.context, instance)
-
- def test_get_spice_console(self):
- instance = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<graphics type='spice' port='5950'/>"
- "</devices></domain>")
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- spice_dict = conn.get_spice_console(self.context, instance)
- self.assertEqual(spice_dict.port, '5950')
-
- def test_get_spice_console_unavailable(self):
- instance = objects.Instance(**self.test_instance)
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices></devices></domain>")
-
- vdmock = self.mox.CreateMock(libvirt.virDomain)
- self.mox.StubOutWithMock(vdmock, "XMLDesc")
- vdmock.XMLDesc(0).AndReturn(dummyxml)
-
- def fake_lookup(instance_name):
- if instance_name == instance['name']:
- return vdmock
- self.create_fake_libvirt_mock(lookupByName=fake_lookup)
-
- self.mox.ReplayAll()
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.ConsoleTypeUnavailable,
- conn.get_spice_console, self.context, instance)
-
- def test_detach_volume_with_instance_not_found(self):
- # Test that detach_volume() method does not raise exception,
- # if the instance does not exist.
-
- instance = objects.Instance(**self.test_instance)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- with contextlib.nested(
- mock.patch.object(conn, '_lookup_by_name',
- side_effect=exception.InstanceNotFound(
- instance_id=instance.name)),
- mock.patch.object(conn, '_disconnect_volume')
- ) as (_lookup_by_name, _disconnect_volume):
- connection_info = {'driver_volume_type': 'fake'}
- conn.detach_volume(connection_info, instance, '/dev/sda')
- _lookup_by_name.assert_called_once_with(instance.name)
- _disconnect_volume.assert_called_once_with(connection_info,
- 'sda')
-
- @mock.patch.object(objects.Flavor, 'get_by_id')
- def _test_attach_detach_interface_get_config(self, method_name,
- mock_flavor):
- """Tests that the get_config() method is properly called in
- attach_interface() and detach_interface().
-
- method_name: either \"attach_interface\" or \"detach_interface\"
- depending on the method to test.
- """
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
-
- instance = objects.Instance(**self.test_instance)
- mock_flavor.return_value = instance.get_flavor()
- network_info = _fake_network_info(self.stubs, 1)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- if method_name == "attach_interface":
- fake_image_meta = {'id': instance['image_ref']}
- elif method_name == "detach_interface":
- fake_image_meta = None
- else:
- raise ValueError("Unhandled method %" % method_name)
-
- if method_name == "attach_interface":
- self.mox.StubOutWithMock(conn.firewall_driver,
- 'setup_basic_filtering')
- conn.firewall_driver.setup_basic_filtering(instance, network_info)
-
- expected = conn.vif_driver.get_config(instance, network_info[0],
- fake_image_meta,
- instance.get_flavor(),
- CONF.libvirt.virt_type)
- self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
- conn.vif_driver.get_config(instance, network_info[0],
- fake_image_meta,
- mox.IsA(objects.Flavor),
- CONF.libvirt.virt_type).\
- AndReturn(expected)
-
- self.mox.ReplayAll()
-
- if method_name == "attach_interface":
- conn.attach_interface(instance, fake_image_meta,
- network_info[0])
- elif method_name == "detach_interface":
- conn.detach_interface(instance, network_info[0])
- else:
- raise ValueError("Unhandled method %" % method_name)
-
- @mock.patch.object(lockutils, "external_lock")
- def test_attach_interface_get_config(self, mock_lock):
- """Tests that the get_config() method is properly called in
- attach_interface().
- """
- mock_lock.return_value = threading.Semaphore()
-
- self._test_attach_detach_interface_get_config("attach_interface")
-
- def test_detach_interface_get_config(self):
- """Tests that the get_config() method is properly called in
- detach_interface().
- """
- self._test_attach_detach_interface_get_config("detach_interface")
-
- def test_default_root_device_name(self):
- instance = {'uuid': 'fake_instance'}
- image_meta = {'id': 'fake'}
- root_bdm = {'source_type': 'image',
- 'detination_type': 'volume',
- 'image_id': 'fake_id'}
- self.flags(virt_type='fake_libvirt_type', group='libvirt')
-
- self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
- self.mox.StubOutWithMock(blockinfo, 'get_root_info')
-
- blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
- image_meta,
- 'disk').InAnyOrder().\
- AndReturn('virtio')
- blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
- image_meta,
- 'cdrom').InAnyOrder().\
- AndReturn('ide')
- blockinfo.get_root_info('fake_libvirt_type',
- image_meta, root_bdm,
- 'virtio', 'ide').AndReturn({'dev': 'vda'})
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertEqual(conn.default_root_device_name(instance, image_meta,
- root_bdm), '/dev/vda')
-
- def test_default_device_names_for_instance(self):
- instance = {'uuid': 'fake_instance'}
- root_device_name = '/dev/vda'
- ephemerals = [{'device_name': 'vdb'}]
- swap = [{'device_name': 'vdc'}]
- block_device_mapping = [{'device_name': 'vdc'}]
- self.flags(virt_type='fake_libvirt_type', group='libvirt')
-
- self.mox.StubOutWithMock(blockinfo, 'default_device_names')
-
- blockinfo.default_device_names('fake_libvirt_type', mox.IgnoreArg(),
- instance, root_device_name,
- ephemerals, swap, block_device_mapping)
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn.default_device_names_for_instance(instance, root_device_name,
- ephemerals, swap,
- block_device_mapping)
-
- def test_is_supported_fs_format(self):
- supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
- disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- for fs in supported_fs:
- self.assertTrue(conn.is_supported_fs_format(fs))
-
- supported_fs = ['', 'dummy']
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- for fs in supported_fs:
- self.assertFalse(conn.is_supported_fs_format(fs))
-
- def test_hypervisor_hostname_caching(self):
- # Make sure that the first hostname is always returned
- class FakeConn(object):
- def getHostname(self):
- pass
-
- def getLibVersion(self):
- return 99999
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- conn._wrapped_conn = FakeConn()
- self.mox.StubOutWithMock(conn._wrapped_conn, 'getHostname')
- conn._conn.getHostname().AndReturn('foo')
- conn._conn.getHostname().AndReturn('bar')
- self.mox.ReplayAll()
- self.assertEqual('foo', conn._get_hypervisor_hostname())
- self.assertEqual('foo', conn._get_hypervisor_hostname())
-
- def test_get_connection_serial(self):
-
- def get_conn_currency(driver):
- driver._conn.getLibVersion()
-
- def connect_with_block(*a, **k):
- # enough to allow another connect to run
- eventlet.sleep(0)
- self.connect_calls += 1
- return self.conn
-
- def fake_register(*a, **k):
- self.register_calls += 1
-
- self.connect_calls = 0
- self.register_calls = 0
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- '_connect', connect_with_block)
- driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
-
- # call serially
- get_conn_currency(driver)
- get_conn_currency(driver)
- self.assertEqual(self.connect_calls, 1)
- self.assertEqual(self.register_calls, 1)
-
- def test_get_connection_concurrency(self):
-
- def get_conn_currency(driver):
- driver._conn.getLibVersion()
-
- def connect_with_block(*a, **k):
- # enough to allow another connect to run
- eventlet.sleep(0)
- self.connect_calls += 1
- return self.conn
-
- def fake_register(*a, **k):
- self.register_calls += 1
-
- self.connect_calls = 0
- self.register_calls = 0
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- '_connect', connect_with_block)
- driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
-
- # call concurrently
- thr1 = eventlet.spawn(get_conn_currency, driver=driver)
- thr2 = eventlet.spawn(get_conn_currency, driver=driver)
-
- # let threads run
- eventlet.sleep(0)
-
- thr1.wait()
- thr2.wait()
- self.assertEqual(self.connect_calls, 1)
- self.assertEqual(self.register_calls, 1)
-
- def test_post_live_migration_at_destination_with_block_device_info(self):
- # Preparing mocks
- mock_domain = self.mox.CreateMock(libvirt.virDomain)
- self.resultXML = None
-
- def fake_none(*args, **kwargs):
- return
-
- def fake_getLibVersion():
- return 9011
-
- def fake_getCapabilities():
- return """
- <capabilities>
- <host>
- <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
- <cpu>
- <arch>x86_64</arch>
- <model>Penryn</model>
- <vendor>Intel</vendor>
- <topology sockets='1' cores='2' threads='1'/>
- <feature name='xtpr'/>
- </cpu>
- </host>
- </capabilities>
- """
-
- def fake_to_xml(context, instance, network_info, disk_info,
- image_meta=None, rescue=None,
- block_device_info=None, write_to_disk=False):
- if image_meta is None:
- image_meta = {}
- conf = conn._get_guest_config(instance, network_info, image_meta,
- disk_info, rescue, block_device_info)
- self.resultXML = conf.to_xml()
- return self.resultXML
-
- def fake_lookup_name(instance_name):
- return mock_domain
-
- def fake_defineXML(xml):
- return
-
- def fake_baselineCPU(cpu, flag):
- return """<cpu mode='custom' match='exact'>
- <model fallback='allow'>Westmere</model>
- <vendor>Intel</vendor>
- <feature policy='require' name='aes'/>
- </cpu>
- """
-
- network_info = _fake_network_info(self.stubs, 1)
- self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
- getCapabilities=fake_getCapabilities,
- getVersion=lambda: 1005001)
- instance_ref = self.test_instance
- instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
- instance = objects.Instance(**instance_ref)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
- libvirt_driver.LibvirtDriver._conn.getCapabilities = \
- fake_getCapabilities
- libvirt_driver.LibvirtDriver._conn.getVersion = lambda: 1005001
- libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
- libvirt_driver.LibvirtDriver._conn.defineXML = fake_defineXML
- libvirt_driver.LibvirtDriver._conn.baselineCPU = fake_baselineCPU
-
- self.mox.ReplayAll()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn,
- '_get_guest_xml',
- fake_to_xml)
- self.stubs.Set(conn,
- '_lookup_by_name',
- fake_lookup_name)
- block_device_info = {'block_device_mapping':
- driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'guest_format': None,
- 'boot_index': 0,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': '/dev/vda',
- 'disk_bus': 'virtio',
- 'device_type': 'disk',
- 'delete_on_termination': False}),
- ])}
- block_device_info['block_device_mapping'][0]['connection_info'] = (
- {'driver_volume_type': 'iscsi'})
- with contextlib.nested(
- mock.patch.object(
- driver_block_device.DriverVolumeBlockDevice, 'save'),
- mock.patch.object(objects.Flavor, 'get_by_id',
- return_value=flavor),
- mock.patch.object(objects.Instance, 'save')):
- conn.post_live_migration_at_destination(
- self.context, instance, network_info, True,
- block_device_info=block_device_info)
- self.assertTrue('fake' in self.resultXML)
- self.assertTrue(
- block_device_info['block_device_mapping'][0].save.called)
-
- def test_create_propagates_exceptions(self):
- self.flags(virt_type='lxc', group='libvirt')
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(id=1, uuid='fake-uuid',
- image_ref='my_fake_image')
-
- with contextlib.nested(
- mock.patch.object(conn, '_create_domain_setup_lxc'),
- mock.patch.object(conn, '_create_domain_cleanup_lxc'),
- mock.patch.object(conn, '_is_booted_from_volume',
- return_value=False),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn, 'firewall_driver'),
- mock.patch.object(conn, '_create_domain',
- side_effect=exception.NovaException),
- mock.patch.object(conn, 'cleanup')):
- self.assertRaises(exception.NovaException,
- conn._create_domain_and_network,
- self.context,
- 'xml',
- instance, None)
-
- def test_create_without_pause(self):
- self.flags(virt_type='lxc', group='libvirt')
-
- @contextlib.contextmanager
- def fake_lxc_disk_handler(*args, **kwargs):
- yield
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(id=1, uuid='fake-uuid')
-
- with contextlib.nested(
- mock.patch.object(conn, '_lxc_disk_handler',
- side_effect=fake_lxc_disk_handler),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn, 'firewall_driver'),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn, 'cleanup')) as (
- _handler, cleanup, firewall_driver, create, plug_vifs):
- domain = conn._create_domain_and_network(self.context, 'xml',
- instance, None)
- self.assertEqual(0, create.call_args_list[0][1]['launch_flags'])
- self.assertEqual(0, domain.resume.call_count)
-
- def _test_create_with_network_events(self, neutron_failure=None,
- power_on=True):
- generated_events = []
-
- def wait_timeout():
- event = mock.MagicMock()
- if neutron_failure == 'timeout':
- raise eventlet.timeout.Timeout()
- elif neutron_failure == 'error':
- event.status = 'failed'
- else:
- event.status = 'completed'
- return event
-
- def fake_prepare(instance, event_name):
- m = mock.MagicMock()
- m.instance = instance
- m.event_name = event_name
- m.wait.side_effect = wait_timeout
- generated_events.append(m)
- return m
-
- virtapi = manager.ComputeVirtAPI(mock.MagicMock())
- prepare = virtapi._compute.instance_events.prepare_for_instance_event
- prepare.side_effect = fake_prepare
- conn = libvirt_driver.LibvirtDriver(virtapi, False)
-
- instance = objects.Instance(id=1, uuid='fake-uuid')
- vifs = [{'id': 'vif1', 'active': False},
- {'id': 'vif2', 'active': False}]
-
- @mock.patch.object(conn, 'plug_vifs')
- @mock.patch.object(conn, 'firewall_driver')
- @mock.patch.object(conn, '_create_domain')
- @mock.patch.object(conn, 'cleanup')
- def test_create(cleanup, create, fw_driver, plug_vifs):
- domain = conn._create_domain_and_network(self.context, 'xml',
- instance, vifs,
- power_on=power_on)
- plug_vifs.assert_called_with(instance, vifs)
-
- flag = self._get_launch_flags(conn, vifs, power_on=power_on)
- self.assertEqual(flag,
- create.call_args_list[0][1]['launch_flags'])
- if flag:
- domain.resume.assert_called_once_with()
- if neutron_failure and CONF.vif_plugging_is_fatal:
- cleanup.assert_called_once_with(self.context,
- instance, network_info=vifs,
- block_device_info=None)
-
- test_create()
-
- if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
- prepare.assert_has_calls([
- mock.call(instance, 'network-vif-plugged-vif1'),
- mock.call(instance, 'network-vif-plugged-vif2')])
- for event in generated_events:
- if neutron_failure and generated_events.index(event) != 0:
- self.assertEqual(0, event.call_count)
- elif (neutron_failure == 'error' and
- not CONF.vif_plugging_is_fatal):
- event.wait.assert_called_once_with()
- else:
- self.assertEqual(0, prepare.call_count)
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron(self, is_neutron):
- self._test_create_with_network_events()
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_power_off(self,
- is_neutron):
- # Tests that we don't wait for events if we don't start the instance.
- self._test_create_with_network_events(power_on=False)
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_nowait(self, is_neutron):
- self.flags(vif_plugging_timeout=0)
- self._test_create_with_network_events()
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_failed_nonfatal_timeout(
- self, is_neutron):
- self.flags(vif_plugging_is_fatal=False)
- self._test_create_with_network_events(neutron_failure='timeout')
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_failed_fatal_timeout(
- self, is_neutron):
- self.assertRaises(exception.VirtualInterfaceCreateException,
- self._test_create_with_network_events,
- neutron_failure='timeout')
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_failed_nonfatal_error(
- self, is_neutron):
- self.flags(vif_plugging_is_fatal=False)
- self._test_create_with_network_events(neutron_failure='error')
-
- @mock.patch('nova.utils.is_neutron', return_value=True)
- def test_create_with_network_events_neutron_failed_fatal_error(
- self, is_neutron):
- self.assertRaises(exception.VirtualInterfaceCreateException,
- self._test_create_with_network_events,
- neutron_failure='error')
-
- @mock.patch('nova.utils.is_neutron', return_value=False)
- def test_create_with_network_events_non_neutron(self, is_neutron):
- self._test_create_with_network_events()
-
- @mock.patch('nova.volume.encryptors.get_encryption_metadata')
- @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
- def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
- mock_dom = mock.MagicMock()
- mock_encryption_meta = mock.MagicMock()
- get_encryption_metadata.return_value = mock_encryption_meta
-
- fake_xml = """
- <domain>
- <name>instance-00000001</name>
- <memory>1048576</memory>
- <vcpu>1</vcpu>
- <devices>
- <disk type='file' device='disk'>
- <driver name='qemu' type='raw' cache='none'/>
- <source file='/path/fake-volume1'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- </devices>
- </domain>
- """
- fake_volume_id = "fake-volume-id"
- connection_info = {"driver_volume_type": "fake",
- "data": {"access_mode": "rw",
- "volume_id": fake_volume_id}}
-
- def fake_getitem(*args, **kwargs):
- fake_bdm = {'connection_info': connection_info,
- 'mount_device': '/dev/vda'}
- return fake_bdm.get(args[0])
-
- mock_volume = mock.MagicMock()
- mock_volume.__getitem__.side_effect = fake_getitem
- bdi = {'block_device_mapping': [mock_volume]}
- network_info = [network_model.VIF(id='1'),
- network_model.VIF(id='2', active=True)]
-
- with contextlib.nested(
- mock.patch.object(conn, '_get_volume_encryptor'),
- mock.patch.object(conn, 'plug_vifs'),
- mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
- mock.patch.object(conn.firewall_driver,
- 'prepare_instance_filter'),
- mock.patch.object(conn, '_create_domain'),
- mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
- ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
- prepare_instance_filter, create_domain, apply_instance_filter):
- create_domain.return_value = mock_dom
-
- domain = conn._create_domain_and_network(self.context, fake_xml,
- instance, network_info,
- block_device_info=bdi)
-
- get_encryption_metadata.assert_called_once_with(self.context,
- conn._volume_api, fake_volume_id, connection_info)
- get_volume_encryptor.assert_called_once_with(connection_info,
- mock_encryption_meta)
- plug_vifs.assert_called_once_with(instance, network_info)
- setup_basic_filtering.assert_called_once_with(instance,
- network_info)
- prepare_instance_filter.assert_called_once_with(instance,
- network_info)
- flags = self._get_launch_flags(conn, network_info)
- create_domain.assert_called_once_with(fake_xml, instance=instance,
- launch_flags=flags,
- power_on=True)
- self.assertEqual(mock_dom, domain)
-
- def test_get_guest_storage_config(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- test_instance = copy.deepcopy(self.test_instance)
- test_instance["default_swap_device"] = None
- instance = objects.Instance(**test_instance)
- flavor = instance.get_flavor()
- flavor.extra_specs = {}
- conn_info = {'driver_volume_type': 'fake', 'data': {}}
- bdi = {'block_device_mapping':
- driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict({
- 'id': 1,
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'device_name': '/dev/vdc'})
- ])}
- bdm = bdi['block_device_mapping'][0]
- bdm['connection_info'] = conn_info
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance, bdi)
- mock_conf = mock.MagicMock(source_path='fake')
-
- with contextlib.nested(
- mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
- 'save'),
- mock.patch.object(conn, '_connect_volume'),
- mock.patch.object(conn, '_get_volume_config',
- return_value=mock_conf),
- mock.patch.object(conn, '_set_cache_mode')
- ) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
- devices = conn._get_guest_storage_config(instance, None,
- disk_info, False, bdi, flavor)
-
- self.assertEqual(3, len(devices))
- self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
- self.assertIsNone(instance.default_swap_device)
- connect_volume.assert_called_with(bdm['connection_info'],
- {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
- get_volume_config.assert_called_with(bdm['connection_info'],
- {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
- self.assertEqual(1, volume_save.call_count)
- self.assertEqual(3, set_cache_mode.call_count)
-
- def test_get_neutron_events(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- network_info = [network_model.VIF(id='1'),
- network_model.VIF(id='2', active=True)]
- events = conn._get_neutron_events(network_info)
- self.assertEqual([('network-vif-plugged', '1')], events)
-
- def test_unplug_vifs_ignores_errors(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- with mock.patch.object(conn, 'vif_driver') as vif_driver:
- vif_driver.unplug.side_effect = exception.AgentError(
- method='unplug')
- conn._unplug_vifs('inst', [1], ignore_errors=True)
- vif_driver.unplug.assert_called_once_with('inst', 1)
-
- def test_unplug_vifs_reports_errors(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- with mock.patch.object(conn, 'vif_driver') as vif_driver:
- vif_driver.unplug.side_effect = exception.AgentError(
- method='unplug')
- self.assertRaises(exception.AgentError,
- conn.unplug_vifs, 'inst', [1])
- vif_driver.unplug.assert_called_once_with('inst', 1)
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
- def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- conn.firewall_driver = mock.Mock()
- conn._disconnect_volume = mock.Mock()
- fake_inst = {'name': 'foo'}
- fake_bdms = [{'connection_info': 'foo',
- 'mount_device': None}]
- with mock.patch('nova.virt.driver'
- '.block_device_info_get_mapping',
- return_value=fake_bdms):
- conn.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
- self.assertTrue(conn._disconnect_volume.called)
-
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
- def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- fake_inst = {'name': 'foo'}
- with mock.patch.object(conn._conn, 'lookupByName') as lookup:
- lookup.return_value = fake_inst
- # NOTE(danms): Make unplug cause us to bail early, since
- # we only care about how it was called
- unplug.side_effect = test.TestingException
- self.assertRaises(test.TestingException,
- conn.cleanup, 'ctxt', fake_inst, 'netinfo')
- unplug.assert_called_once_with(fake_inst, 'netinfo', True)
-
- @mock.patch('nova.virt.driver.block_device_info_get_mapping')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_get_serial_ports_from_instance')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
- def test_cleanup_serial_console_enabled(
- self, undefine, get_ports,
- block_device_info_get_mapping):
- self.flags(enabled="True", group='serial_console')
- instance = 'i1'
- network_info = {}
- bdm_info = {}
- firewall_driver = mock.MagicMock()
-
- get_ports.return_value = iter([('127.0.0.1', 10000)])
- block_device_info_get_mapping.return_value = ()
-
- # We want to ensure undefine_domain is called after
- # lookup_domain.
- def undefine_domain(instance):
- get_ports.side_effect = Exception("domain undefined")
- undefine.side_effect = undefine_domain
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
- conn.firewall_driver = firewall_driver
- conn.cleanup(
- 'ctx', instance, network_info,
- block_device_info=bdm_info,
- destroy_disks=False, destroy_vifs=False)
-
- get_ports.assert_called_once_with(instance)
- undefine.assert_called_once_with(instance)
- firewall_driver.unfilter_instance.assert_called_once_with(
- instance, network_info=network_info)
- block_device_info_get_mapping.assert_called_once_with(bdm_info)
-
- def test_swap_volume(self):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- mock_dom = mock.MagicMock()
-
- with mock.patch.object(drvr._conn, 'defineXML',
- create=True) as mock_define:
- xmldoc = "<domain/>"
- srcfile = "/first/path"
- dstfile = "/second/path"
-
- mock_dom.XMLDesc.return_value = xmldoc
- mock_dom.isPersistent.return_value = True
- mock_dom.blockJobInfo.return_value = {}
-
- drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
-
- mock_dom.XMLDesc.assert_called_once_with(
- fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
- fakelibvirt.VIR_DOMAIN_XML_SECURE)
- mock_dom.blockRebase.assert_called_once_with(
- srcfile, dstfile, 0,
- libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
- libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
- mock_dom.blockResize.assert_called_once_with(
- srcfile, 1 * units.Gi / units.Ki)
- mock_define.assert_called_once_with(xmldoc)
-
- def test_live_snapshot(self):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- mock_dom = mock.MagicMock()
-
- with contextlib.nested(
- mock.patch.object(drvr._conn, 'defineXML', create=True),
- mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
- mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
- mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
- mock.patch.object(fake_libvirt_utils, 'chown'),
- mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
- ) as (mock_define, mock_size, mock_backing, mock_create_cow,
- mock_chown, mock_snapshot):
-
- xmldoc = "<domain/>"
- srcfile = "/first/path"
- dstfile = "/second/path"
- bckfile = "/other/path"
- dltfile = dstfile + ".delta"
-
- mock_dom.XMLDesc.return_value = xmldoc
- mock_dom.isPersistent.return_value = True
- mock_size.return_value = 1004009
- mock_backing.return_value = bckfile
-
- drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2")
-
- mock_dom.XMLDesc.assert_called_once_with(
- fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
- fakelibvirt.VIR_DOMAIN_XML_SECURE)
- mock_dom.blockRebase.assert_called_once_with(
- srcfile, dltfile, 0,
- libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
- libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
- libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
-
- mock_size.assert_called_once_with(srcfile)
- mock_backing.assert_called_once_with(srcfile, basename=False)
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
- mock_chown.assert_called_once_with(dltfile, os.getuid())
- mock_snapshot.assert_called_once_with(dltfile, "qcow2",
- dstfile, "qcow2")
- mock_define.assert_called_once_with(xmldoc)
-
- @mock.patch.object(greenthread, "spawn")
- def test_live_migration_hostname_valid(self, mock_spawn):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- drvr.live_migration(self.context, self.test_instance,
- "host1.example.com",
- lambda x: x,
- lambda x: x)
- self.assertEqual(1, mock_spawn.call_count)
-
- @mock.patch.object(greenthread, "spawn")
- @mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
- def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- mock_hostname.return_value = False
- self.assertRaises(exception.InvalidHostname,
- drvr.live_migration,
- self.context, self.test_instance,
- "foo/?com=/bin/sh",
- lambda x: x,
- lambda x: x)
-
- @mock.patch('os.path.exists', return_value=True)
- @mock.patch('tempfile.mkstemp')
- @mock.patch('os.close', return_value=None)
- def test_check_instance_shared_storage_local_raw(self,
- mock_close,
- mock_mkstemp,
- mock_exists):
- instance_uuid = str(uuid.uuid4())
- self.flags(images_type='raw', group='libvirt')
- self.flags(instances_path='/tmp')
- mock_mkstemp.return_value = (-1,
- '/tmp/{0}/file'.format(instance_uuid))
- driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = fake_instance.fake_instance_obj(self.context)
- temp_file = driver.check_instance_shared_storage_local(self.context,
- instance)
- self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
- temp_file['filename'])
-
- def test_check_instance_shared_storage_local_rbd(self):
- self.flags(images_type='rbd', group='libvirt')
- driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = fake_instance.fake_instance_obj(self.context)
- self.assertIsNone(driver.
- check_instance_shared_storage_local(self.context,
- instance))
-
-
-class HostStateTestCase(test.NoDBTestCase):
-
- cpu_info = ('{"vendor": "Intel", "model": "pentium", "arch": "i686", '
- '"features": ["ssse3", "monitor", "pni", "sse2", "sse", '
- '"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", '
- '"mtrr", "sep", "apic"], '
- '"topology": {"cores": "1", "threads": "1", "sockets": "1"}}')
- instance_caps = [(arch.X86_64, "kvm", "hvm"),
- (arch.I686, "kvm", "hvm")]
- pci_devices = [{
- "dev_id": "pci_0000_04_00_3",
- "address": "0000:04:10.3",
- "product_id": '1521',
- "vendor_id": '8086',
- "dev_type": 'type-PF',
- "phys_function": None}]
- numa_topology = hardware.VirtNUMAHostTopology(
- cells=[hardware.VirtNUMATopologyCellUsage(
- 1, set([1, 2]), 1024),
- hardware.VirtNUMATopologyCellUsage(
- 2, set([3, 4]), 1024)])
-
- class FakeConnection(libvirt_driver.LibvirtDriver):
- """Fake connection object."""
- def __init__(self):
- super(HostStateTestCase.FakeConnection,
- self).__init__(fake.FakeVirtAPI(), True)
-
- def _get_vcpu_total(self):
- return 1
-
- def _get_vcpu_used(self):
- return 0
-
- def _get_cpu_info(self):
- return HostStateTestCase.cpu_info
-
- def _get_disk_over_committed_size_total(self):
- return 0
-
- def _get_local_gb_info(self):
- return {'total': 100, 'used': 20, 'free': 80}
-
- def _get_memory_mb_total(self):
- return 497
-
- def _get_memory_mb_used(self):
- return 88
-
- def _get_hypervisor_type(self):
- return 'QEMU'
-
- def _get_hypervisor_version(self):
- return 13091
-
- def _get_hypervisor_hostname(self):
- return 'compute1'
-
- def get_host_uptime(self):
- return ('10:01:16 up 1:36, 6 users, '
- 'load average: 0.21, 0.16, 0.19')
-
- def _get_disk_available_least(self):
- return 13091
-
- def _get_instance_capabilities(self):
- return HostStateTestCase.instance_caps
-
- def _get_pci_passthrough_devices(self):
- return jsonutils.dumps(HostStateTestCase.pci_devices)
-
- def _get_host_numa_topology(self):
- return HostStateTestCase.numa_topology
-
- def test_update_status(self):
- drvr = HostStateTestCase.FakeConnection()
-
- stats = drvr.get_available_resource("compute1")
- self.assertEqual(stats["vcpus"], 1)
- self.assertEqual(stats["memory_mb"], 497)
- self.assertEqual(stats["local_gb"], 100)
- self.assertEqual(stats["vcpus_used"], 0)
- self.assertEqual(stats["memory_mb_used"], 88)
- self.assertEqual(stats["local_gb_used"], 20)
- self.assertEqual(stats["hypervisor_type"], 'QEMU')
- self.assertEqual(stats["hypervisor_version"], 13091)
- self.assertEqual(stats["hypervisor_hostname"], 'compute1')
- self.assertEqual(jsonutils.loads(stats["cpu_info"]),
- {"vendor": "Intel", "model": "pentium",
- "arch": arch.I686,
- "features": ["ssse3", "monitor", "pni", "sse2", "sse",
- "fxsr", "clflush", "pse36", "pat", "cmov",
- "mca", "pge", "mtrr", "sep", "apic"],
- "topology": {"cores": "1", "threads": "1", "sockets": "1"}
- })
- self.assertEqual(stats["disk_available_least"], 80)
- self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
- HostStateTestCase.pci_devices)
- self.assertThat(hardware.VirtNUMAHostTopology.from_json(
- stats['numa_topology'])._to_dict(),
- matchers.DictMatches(
- HostStateTestCase.numa_topology._to_dict()))
-
-
-class LibvirtDriverTestCase(test.NoDBTestCase):
- """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
- def setUp(self):
- super(LibvirtDriverTestCase, self).setUp()
- self.libvirtconnection = libvirt_driver.LibvirtDriver(
- fake.FakeVirtAPI(), read_only=True)
- self.context = context.get_admin_context()
-
- def _create_instance(self, params=None):
- """Create a test instance."""
- if not params:
- params = {}
-
- sys_meta = {
- 'instance_type_memory_mb': 512,
- 'instance_type_swap': 0,
- 'instance_type_vcpu_weight': None,
- 'instance_type_root_gb': 1,
- 'instance_type_id': 2,
- 'instance_type_name': u'm1.tiny',
- 'instance_type_ephemeral_gb': 0,
- 'instance_type_rxtx_factor': 1.0,
- 'instance_type_flavorid': u'1',
- 'instance_type_vcpus': 1
- }
-
- inst = {}
- inst['id'] = 1
- inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
- inst['os_type'] = 'linux'
- inst['image_ref'] = '1'
- inst['reservation_id'] = 'r-fakeres'
- inst['user_id'] = 'fake'
- inst['project_id'] = 'fake'
- inst['instance_type_id'] = 2
- inst['ami_launch_index'] = 0
- inst['host'] = 'host1'
- inst['root_gb'] = 10
- inst['ephemeral_gb'] = 20
- inst['config_drive'] = True
- inst['kernel_id'] = 2
- inst['ramdisk_id'] = 3
- inst['key_data'] = 'ABCDEFG'
- inst['system_metadata'] = sys_meta
-
- inst.update(params)
-
- return objects.Instance(**inst)
-
- def test_migrate_disk_and_power_off_exception(self):
- """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
- .migrate_disk_and_power_off.
- """
-
- self.counter = 0
- self.checked_shared_storage = False
-
- def fake_get_instance_disk_info(instance,
- block_device_info=None):
- return '[]'
-
- def fake_destroy(instance):
- pass
-
- def fake_get_host_ip_addr():
- return '10.0.0.1'
-
- def fake_execute(*args, **kwargs):
- self.counter += 1
- if self.counter == 1:
- assert False, "intentional failure"
-
- def fake_os_path_exists(path):
- return True
-
- def fake_is_storage_shared(dest, inst_base):
- self.checked_shared_storage = True
- return False
-
- self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
- fake_get_instance_disk_info)
- self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
- self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
- fake_get_host_ip_addr)
- self.stubs.Set(self.libvirtconnection, '_is_storage_shared_with',
- fake_is_storage_shared)
- self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
-
- ins_ref = self._create_instance()
- flavor = {'root_gb': 10, 'ephemeral_gb': 20}
-
- self.assertRaises(AssertionError,
- self.libvirtconnection.migrate_disk_and_power_off,
- None, ins_ref, '10.0.0.2', flavor, None)
-
- def test_migrate_disk_and_power_off(self):
- """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
- .migrate_disk_and_power_off.
- """
-
- disk_info = [{'type': 'qcow2', 'path': '/test/disk',
- 'virt_disk_size': '10737418240',
- 'backing_file': '/base/disk',
- 'disk_size': '83886080'},
- {'type': 'raw', 'path': '/test/disk.local',
- 'virt_disk_size': '10737418240',
- 'backing_file': '/base/disk.local',
- 'disk_size': '83886080'}]
- disk_info_text = jsonutils.dumps(disk_info)
-
- def fake_get_instance_disk_info(instance,
- block_device_info=None):
- return disk_info_text
-
- def fake_destroy(instance):
- pass
-
- def fake_get_host_ip_addr():
- return '10.0.0.1'
-
- def fake_execute(*args, **kwargs):
- pass
-
- self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
- fake_get_instance_disk_info)
- self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
- self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
- fake_get_host_ip_addr)
- self.stubs.Set(utils, 'execute', fake_execute)
-
- ins_ref = self._create_instance()
- flavor = {'root_gb': 10, 'ephemeral_gb': 20}
-
- # dest is different host case
- out = self.libvirtconnection.migrate_disk_and_power_off(
- None, ins_ref, '10.0.0.2', flavor, None)
- self.assertEqual(out, disk_info_text)
-
- # dest is same host case
- out = self.libvirtconnection.migrate_disk_and_power_off(
- None, ins_ref, '10.0.0.1', flavor, None)
- self.assertEqual(out, disk_info_text)
-
- @mock.patch('nova.utils.execute')
- @mock.patch('nova.virt.libvirt.utils.copy_image')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '.get_instance_disk_info')
- def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
- get_host_ip_addr,
- mock_destroy,
- mock_copy_image,
- mock_execute):
- """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
- .migrate_disk_and_power_off.
- """
- self.copy_or_move_swap_called = False
-
- # 10G root and 512M swap disk
- disk_info = [{'disk_size': 1, 'type': 'qcow2',
- 'virt_disk_size': 10737418240, 'path': '/test/disk',
- 'backing_file': '/base/disk'},
- {'disk_size': 1, 'type': 'qcow2',
- 'virt_disk_size': 536870912, 'path': '/test/disk.swap',
- 'backing_file': '/base/swap_512'}]
- disk_info_text = jsonutils.dumps(disk_info)
- mock_get_disk_info.return_value = disk_info_text
- get_host_ip_addr.return_value = '10.0.0.1'
-
- def fake_copy_image(*args, **kwargs):
- # disk.swap should not be touched since it is skipped over
- if '/test/disk.swap' in list(args):
- self.copy_or_move_swap_called = True
-
- def fake_execute(*args, **kwargs):
- # disk.swap should not be touched since it is skipped over
- if set(['mv', '/test/disk.swap']).issubset(list(args)):
- self.copy_or_move_swap_called = True
-
- mock_copy_image.side_effect = fake_copy_image
- mock_execute.side_effect = fake_execute
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- # Original instance config
- instance = self._create_instance({'root_gb': 10,
- 'ephemeral_gb': 0})
-
- # Re-size fake instance to 20G root and 1024M swap disk
- flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
-
- # Destination is same host
- out = conn.migrate_disk_and_power_off(None, instance, '10.0.0.1',
- flavor, None)
-
- mock_get_disk_info.assert_called_once_with(instance.name,
- block_device_info=None)
- self.assertTrue(get_host_ip_addr.called)
- mock_destroy.assert_called_once_with(instance)
- self.assertFalse(self.copy_or_move_swap_called)
- self.assertEqual(disk_info_text, out)
-
- def test_migrate_disk_and_power_off_lvm(self):
- """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
- .migrate_disk_and_power_off.
- """
-
- self.flags(images_type='lvm', group='libvirt')
- disk_info = [{'type': 'raw', 'path': '/dev/vg/disk',
- 'disk_size': '83886080'},
- {'type': 'raw', 'path': '/dev/disk.local',
- 'disk_size': '83886080'}]
- disk_info_text = jsonutils.dumps(disk_info)
-
- def fake_get_instance_disk_info(instance, xml=None,
- block_device_info=None):
- return disk_info_text
-
- def fake_destroy(instance):
- pass
-
- def fake_get_host_ip_addr():
- return '10.0.0.1'
-
- def fake_execute(*args, **kwargs):
- pass
-
- self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
- fake_get_instance_disk_info)
- self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
- self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
- fake_get_host_ip_addr)
- self.stubs.Set(utils, 'execute', fake_execute)
-
- ins_ref = self._create_instance()
- flavor = {'root_gb': 10, 'ephemeral_gb': 20}
-
- # Migration is not implemented for LVM backed instances
- self.assertRaises(exception.MigrationPreCheckError,
- self.libvirtconnection.migrate_disk_and_power_off,
- None, ins_ref, '10.0.0.1', flavor, None)
-
- def test_migrate_disk_and_power_off_resize_error(self):
- instance = self._create_instance()
- flavor = {'root_gb': 5}
- self.assertRaises(
- exception.InstanceFaultRollback,
- self.libvirtconnection.migrate_disk_and_power_off,
- 'ctx', instance, '10.0.0.1', flavor, None)
-
- def test_wait_for_running(self):
- def fake_get_info(instance):
- if instance['name'] == "not_found":
- raise exception.InstanceNotFound(instance_id=instance['uuid'])
- elif instance['name'] == "running":
- return {'state': power_state.RUNNING}
- else:
- return {'state': power_state.SHUTDOWN}
-
- self.stubs.Set(self.libvirtconnection, 'get_info',
- fake_get_info)
-
- # instance not found case
- self.assertRaises(exception.InstanceNotFound,
- self.libvirtconnection._wait_for_running,
- {'name': 'not_found',
- 'uuid': 'not_found_uuid'})
-
- # instance is running case
- self.assertRaises(loopingcall.LoopingCallDone,
- self.libvirtconnection._wait_for_running,
- {'name': 'running',
- 'uuid': 'running_uuid'})
-
- # else case
- self.libvirtconnection._wait_for_running({'name': 'else',
- 'uuid': 'other_uuid'})
-
- def test_disk_size_from_instance_disk_info(self):
- inst = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
-
- info = {'path': '/path/disk'}
- self.assertEqual(10 * units.Gi,
- self.libvirtconnection._disk_size_from_instance(inst, info))
-
- info = {'path': '/path/disk.local'}
- self.assertEqual(20 * units.Gi,
- self.libvirtconnection._disk_size_from_instance(inst, info))
-
- info = {'path': '/path/disk.swap'}
- self.assertEqual(0,
- self.libvirtconnection._disk_size_from_instance(inst, info))
-
- @mock.patch('nova.utils.execute')
- def test_disk_raw_to_qcow2(self, mock_execute):
- path = '/test/disk'
- _path_qcow = path + '_qcow'
-
- self.libvirtconnection._disk_raw_to_qcow2(path)
- mock_execute.assert_has_calls([
- mock.call('qemu-img', 'convert', '-f', 'raw',
- '-O', 'qcow2', path, _path_qcow),
- mock.call('mv', _path_qcow, path)])
-
- @mock.patch('nova.utils.execute')
- def test_disk_qcow2_to_raw(self, mock_execute):
- path = '/test/disk'
- _path_raw = path + '_raw'
-
- self.libvirtconnection._disk_qcow2_to_raw(path)
- mock_execute.assert_has_calls([
- mock.call('qemu-img', 'convert', '-f', 'qcow2',
- '-O', 'raw', path, _path_raw),
- mock.call('mv', _path_raw, path)])
-
- @mock.patch('nova.virt.disk.api.extend')
- def test_disk_resize_raw(self, mock_extend):
- info = {'type': 'raw', 'path': '/test/disk'}
-
- self.libvirtconnection._disk_resize(info, 50)
- mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
-
- @mock.patch('nova.virt.disk.api.can_resize_image')
- @mock.patch('nova.virt.disk.api.is_image_partitionless')
- @mock.patch('nova.virt.disk.api.extend')
- def test_disk_resize_qcow2(
- self, mock_extend, mock_can_resize, mock_is_partitionless):
- info = {'type': 'qcow2', 'path': '/test/disk'}
-
- with contextlib.nested(
- mock.patch.object(
- self.libvirtconnection, '_disk_qcow2_to_raw'),
- mock.patch.object(
- self.libvirtconnection, '_disk_raw_to_qcow2'))\
- as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
-
- mock_can_resize.return_value = True
- mock_is_partitionless.return_value = True
-
- self.libvirtconnection._disk_resize(info, 50)
-
- mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
- mock_extend.assert_called_once_with(
- info['path'], 50, use_cow=False)
- mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
-
- def _test_finish_migration(self, power_on, resize_instance=False):
- """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
- .finish_migration.
- """
-
- disk_info = [{'type': 'qcow2', 'path': '/test/disk',
- 'local_gb': 10, 'backing_file': '/base/disk'},
- {'type': 'raw', 'path': '/test/disk.local',
- 'local_gb': 10, 'backing_file': '/base/disk.local'}]
- disk_info_text = jsonutils.dumps(disk_info)
- powered_on = power_on
- self.fake_create_domain_called = False
- self.fake_disk_resize_called = False
-
- def fake_to_xml(context, instance, network_info, disk_info,
- image_meta=None, rescue=None,
- block_device_info=None, write_to_disk=False):
- return ""
-
- def fake_plug_vifs(instance, network_info):
- pass
-
- def fake_create_image(context, inst,
- disk_mapping, suffix='',
- disk_images=None, network_info=None,
- block_device_info=None, inject_files=True):
- self.assertFalse(inject_files)
-
- def fake_create_domain_and_network(
- context, xml, instance, network_info,
- block_device_info=None, power_on=True, reboot=False,
- vifs_already_plugged=False):
- self.fake_create_domain_called = True
- self.assertEqual(powered_on, power_on)
- self.assertTrue(vifs_already_plugged)
-
- def fake_enable_hairpin(instance):
- pass
-
- def fake_execute(*args, **kwargs):
- pass
-
- def fake_get_info(instance):
- if powered_on:
- return {'state': power_state.RUNNING}
- else:
- return {'state': power_state.SHUTDOWN}
-
- def fake_disk_resize(info, size):
- self.fake_disk_resize_called = True
-
- self.flags(use_cow_images=True)
- self.stubs.Set(self.libvirtconnection, '_disk_resize',
- fake_disk_resize)
- self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
- self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
- self.stubs.Set(self.libvirtconnection, '_create_image',
- fake_create_image)
- self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
- fake_create_domain_and_network)
- self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
- fake_enable_hairpin)
- self.stubs.Set(utils, 'execute', fake_execute)
- fw = base_firewall.NoopFirewallDriver()
- self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
- self.stubs.Set(self.libvirtconnection, 'get_info',
- fake_get_info)
-
- ins_ref = self._create_instance()
-
- self.libvirtconnection.finish_migration(
- context.get_admin_context(), None, ins_ref,
- disk_info_text, [], None,
- resize_instance, None, power_on)
- self.assertTrue(self.fake_create_domain_called)
- self.assertEqual(
- resize_instance, self.fake_disk_resize_called)
-
- def test_finish_migration_resize(self):
- self._test_finish_migration(True, resize_instance=True)
-
- def test_finish_migration_power_on(self):
- self._test_finish_migration(True)
-
- def test_finish_migration_power_off(self):
- self._test_finish_migration(False)
-
- def _test_finish_revert_migration(self, power_on):
- """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
- .finish_revert_migration.
- """
- powered_on = power_on
- self.fake_create_domain_called = False
-
- def fake_execute(*args, **kwargs):
- pass
-
- def fake_plug_vifs(instance, network_info):
- pass
-
- def fake_create_domain(xml, instance=None, launch_flags=0,
- power_on=True):
- self.fake_create_domain_called = True
- self.assertEqual(powered_on, power_on)
- return mock.MagicMock()
-
- def fake_enable_hairpin(instance):
- pass
-
- def fake_get_info(instance):
- if powered_on:
- return {'state': power_state.RUNNING}
- else:
- return {'state': power_state.SHUTDOWN}
-
- def fake_to_xml(context, instance, network_info, disk_info,
- image_meta=None, rescue=None,
- block_device_info=None):
- return ""
-
- self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
- self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
- self.stubs.Set(utils, 'execute', fake_execute)
- fw = base_firewall.NoopFirewallDriver()
- self.stubs.Set(self.libvirtconnection, 'firewall_driver', fw)
- self.stubs.Set(self.libvirtconnection, '_create_domain',
- fake_create_domain)
- self.stubs.Set(self.libvirtconnection, '_enable_hairpin',
- fake_enable_hairpin)
- self.stubs.Set(self.libvirtconnection, 'get_info',
- fake_get_info)
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- ins_ref = self._create_instance()
- os.mkdir(os.path.join(tmpdir, ins_ref['name']))
- libvirt_xml_path = os.path.join(tmpdir,
- ins_ref['name'],
- 'libvirt.xml')
- f = open(libvirt_xml_path, 'w')
- f.close()
-
- self.libvirtconnection.finish_revert_migration(
- context.get_admin_context(), ins_ref,
- [], None, power_on)
- self.assertTrue(self.fake_create_domain_called)
-
- def test_finish_revert_migration_power_on(self):
- self._test_finish_revert_migration(True)
-
- def test_finish_revert_migration_power_off(self):
- self._test_finish_revert_migration(False)
-
- def _test_finish_revert_migration_after_crash(self, backup_made=True,
- del_inst_failed=False):
- class FakeLoopingCall:
- def start(self, *a, **k):
- return self
-
- def wait(self):
- return None
- context = 'fake_context'
-
- self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(shutil, 'rmtree')
- self.mox.StubOutWithMock(utils, 'execute')
-
- self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
- self.stubs.Set(self.libvirtconnection, '_get_guest_xml',
- lambda *a, **k: None)
- self.stubs.Set(self.libvirtconnection, '_create_domain_and_network',
- lambda *a: None)
- self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
- lambda *a, **k: FakeLoopingCall())
-
- libvirt_utils.get_instance_path({}).AndReturn('/fake/foo')
- os.path.exists('/fake/foo_resize').AndReturn(backup_made)
- if backup_made:
- if del_inst_failed:
- os_error = OSError(errno.ENOENT, 'No such file or directory')
- shutil.rmtree('/fake/foo').AndRaise(os_error)
- else:
- shutil.rmtree('/fake/foo')
- utils.execute('mv', '/fake/foo_resize', '/fake/foo')
-
- self.mox.ReplayAll()
-
- self.libvirtconnection.finish_revert_migration(context, {}, [])
-
- def test_finish_revert_migration_after_crash(self):
- self._test_finish_revert_migration_after_crash(backup_made=True)
-
- def test_finish_revert_migration_after_crash_before_new(self):
- self._test_finish_revert_migration_after_crash(backup_made=True)
-
- def test_finish_revert_migration_after_crash_before_backup(self):
- self._test_finish_revert_migration_after_crash(backup_made=False)
-
- def test_finish_revert_migration_after_crash_delete_failed(self):
- self._test_finish_revert_migration_after_crash(backup_made=True,
- del_inst_failed=True)
-
- def test_cleanup_failed_migration(self):
- self.mox.StubOutWithMock(shutil, 'rmtree')
- shutil.rmtree('/fake/inst')
- self.mox.ReplayAll()
- self.libvirtconnection._cleanup_failed_migration('/fake/inst')
-
- def test_confirm_migration(self):
- ins_ref = self._create_instance()
-
- self.mox.StubOutWithMock(self.libvirtconnection, "_cleanup_resize")
- self.libvirtconnection._cleanup_resize(ins_ref,
- _fake_network_info(self.stubs, 1))
-
- self.mox.ReplayAll()
- self.libvirtconnection.confirm_migration("migration_ref", ins_ref,
- _fake_network_info(self.stubs, 1))
-
- def test_cleanup_resize_same_host(self):
- CONF.set_override('policy_dirs', [])
- ins_ref = self._create_instance({'host': CONF.host})
-
- def fake_os_path_exists(path):
- return True
-
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
-
- self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
- self.mox.StubOutWithMock(utils, 'execute')
-
- libvirt_utils.get_instance_path(ins_ref,
- forceold=True).AndReturn('/fake/inst')
- utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
- attempts=5)
-
- self.mox.ReplayAll()
- self.libvirtconnection._cleanup_resize(ins_ref,
- _fake_network_info(self.stubs, 1))
-
- def test_cleanup_resize_not_same_host(self):
- CONF.set_override('policy_dirs', [])
- host = 'not' + CONF.host
- ins_ref = self._create_instance({'host': host})
-
- def fake_os_path_exists(path):
- return True
-
- def fake_undefine_domain(instance):
- pass
-
- def fake_unplug_vifs(instance, network_info, ignore_errors=False):
- pass
-
- def fake_unfilter_instance(instance, network_info):
- pass
-
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
- self.stubs.Set(self.libvirtconnection, '_undefine_domain',
- fake_undefine_domain)
- self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
- fake_unplug_vifs)
- self.stubs.Set(self.libvirtconnection.firewall_driver,
- 'unfilter_instance', fake_unfilter_instance)
-
- self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
- self.mox.StubOutWithMock(utils, 'execute')
-
- libvirt_utils.get_instance_path(ins_ref,
- forceold=True).AndReturn('/fake/inst')
- utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
- attempts=5)
-
- self.mox.ReplayAll()
- self.libvirtconnection._cleanup_resize(ins_ref,
- _fake_network_info(self.stubs, 1))
-
- def test_get_instance_disk_info_exception(self):
- instance_name = "fake-instance-name"
-
- class FakeExceptionDomain(FakeVirtDomain):
- def __init__(self):
- super(FakeExceptionDomain, self).__init__()
-
- def XMLDesc(self, *args):
- raise libvirt.libvirtError("Libvirt error")
-
- def fake_lookup_by_name(instance_name):
- return FakeExceptionDomain()
-
- self.stubs.Set(self.libvirtconnection, '_lookup_by_name',
- fake_lookup_by_name)
- self.assertRaises(exception.InstanceNotFound,
- self.libvirtconnection.get_instance_disk_info,
- instance_name)
-
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.lvm.list_volumes')
- def test_lvm_disks(self, listlvs, exists):
- instance = objects.Instance(uuid='fake-uuid', id=1)
- self.flags(images_volume_group='vols', group='libvirt')
- exists.return_value = True
- listlvs.return_value = ['fake-uuid_foo',
- 'other-uuid_foo']
- disks = self.libvirtconnection._lvm_disks(instance)
- self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
-
- def test_is_booted_from_volume(self):
- func = libvirt_driver.LibvirtDriver._is_booted_from_volume
- instance, disk_mapping = {}, {}
-
- self.assertTrue(func(instance, disk_mapping))
- disk_mapping['disk'] = 'map'
- self.assertTrue(func(instance, disk_mapping))
-
- instance['image_ref'] = 'uuid'
- self.assertFalse(func(instance, disk_mapping))
-
- @mock.patch('nova.virt.netutils.get_injected_network_template')
- @mock.patch('nova.virt.disk.api.inject_data')
- def _test_inject_data(self, driver_params, disk_params,
- disk_inject_data, inj_network,
- called=True):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- class ImageBackend(object):
- path = '/path'
-
- def check_image_exists(self):
- if self.path == '/fail/path':
- return False
- return True
-
- def fake_inj_network(*args, **kwds):
- return args[0] or None
- inj_network.side_effect = fake_inj_network
-
- image_backend = ImageBackend()
- image_backend.path = disk_params[0]
-
- with mock.patch.object(
- conn.image_backend,
- 'image',
- return_value=image_backend):
- self.flags(inject_partition=0, group='libvirt')
-
- conn._inject_data(**driver_params)
-
- if called:
- disk_inject_data.assert_called_once_with(
- *disk_params,
- partition=None, mandatory=('files',), use_cow=True)
-
- self.assertEqual(disk_inject_data.called, called)
-
- def _test_inject_data_default_driver_params(self):
- return {
- 'instance': {
- 'uuid': 'fake-uuid',
- 'id': 1,
- 'kernel_id': None,
- 'image_ref': 1,
- 'key_data': None,
- 'metadata': None
- },
- 'network_info': None,
- 'admin_pass': None,
- 'files': None,
- 'suffix': ''
- }
-
- def test_inject_data_adminpass(self):
- self.flags(inject_password=True, group='libvirt')
- driver_params = self._test_inject_data_default_driver_params()
- driver_params['admin_pass'] = 'foobar'
- disk_params = [
- '/path', # injection_path
- None, # key
- None, # net
- None, # metadata
- 'foobar', # admin_pass
- None, # files
- ]
- self._test_inject_data(driver_params, disk_params)
-
- # Test with the configuration setted to false.
- self.flags(inject_password=False, group='libvirt')
- self._test_inject_data(driver_params, disk_params, called=False)
-
- def test_inject_data_key(self):
- driver_params = self._test_inject_data_default_driver_params()
- driver_params['instance']['key_data'] = 'key-content'
-
- self.flags(inject_key=True, group='libvirt')
- disk_params = [
- '/path', # injection_path
- 'key-content', # key
- None, # net
- None, # metadata
- None, # admin_pass
- None, # files
- ]
- self._test_inject_data(driver_params, disk_params)
-
- # Test with the configuration setted to false.
- self.flags(inject_key=False, group='libvirt')
- self._test_inject_data(driver_params, disk_params, called=False)
-
- def test_inject_data_metadata(self):
- driver_params = self._test_inject_data_default_driver_params()
- driver_params['instance']['metadata'] = 'data'
- disk_params = [
- '/path', # injection_path
- None, # key
- None, # net
- 'data', # metadata
- None, # admin_pass
- None, # files
- ]
- self._test_inject_data(driver_params, disk_params)
-
- def test_inject_data_files(self):
- driver_params = self._test_inject_data_default_driver_params()
- driver_params['files'] = ['file1', 'file2']
- disk_params = [
- '/path', # injection_path
- None, # key
- None, # net
- None, # metadata
- None, # admin_pass
- ['file1', 'file2'], # files
- ]
- self._test_inject_data(driver_params, disk_params)
-
- def test_inject_data_net(self):
- driver_params = self._test_inject_data_default_driver_params()
- driver_params['network_info'] = {'net': 'eno1'}
- disk_params = [
- '/path', # injection_path
- None, # key
- {'net': 'eno1'}, # net
- None, # metadata
- None, # admin_pass
- None, # files
- ]
- self._test_inject_data(driver_params, disk_params)
-
- def test_inject_not_exist_image(self):
- driver_params = self._test_inject_data_default_driver_params()
- disk_params = [
- '/fail/path', # injection_path
- 'key-content', # key
- None, # net
- None, # metadata
- None, # admin_pass
- None, # files
- ]
- self._test_inject_data(driver_params, disk_params, called=False)
-
- def _test_attach_detach_interface(self, method, power_state,
- expected_flags):
- instance = self._create_instance()
- network_info = _fake_network_info(self.stubs, 1)
- domain = FakeVirtDomain()
- self.mox.StubOutWithMock(self.libvirtconnection, '_lookup_by_name')
- self.mox.StubOutWithMock(self.libvirtconnection.firewall_driver,
- 'setup_basic_filtering')
- self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
- self.mox.StubOutWithMock(domain, 'info')
- self.mox.StubOutWithMock(objects.Flavor, 'get_by_id')
-
- self.libvirtconnection._lookup_by_name(
- 'instance-00000001').AndReturn(domain)
- if method == 'attach_interface':
- self.libvirtconnection.firewall_driver.setup_basic_filtering(
- instance, [network_info[0]])
-
- fake_flavor = instance.get_flavor()
-
- objects.Flavor.get_by_id(mox.IgnoreArg(), 2).AndReturn(fake_flavor)
-
- if method == 'attach_interface':
- fake_image_meta = {'id': instance['image_ref']}
- elif method == 'detach_interface':
- fake_image_meta = None
- expected = self.libvirtconnection.vif_driver.get_config(
- instance, network_info[0], fake_image_meta, fake_flavor,
- CONF.libvirt.virt_type)
-
- self.mox.StubOutWithMock(self.libvirtconnection.vif_driver,
- 'get_config')
- self.libvirtconnection.vif_driver.get_config(
- instance, network_info[0],
- fake_image_meta,
- mox.IsA(objects.Flavor),
- CONF.libvirt.virt_type).AndReturn(expected)
- domain.info().AndReturn([power_state])
- if method == 'attach_interface':
- domain.attachDeviceFlags(expected.to_xml(), expected_flags)
- elif method == 'detach_interface':
- domain.detachDeviceFlags(expected.to_xml(), expected_flags)
-
- self.mox.ReplayAll()
- if method == 'attach_interface':
- self.libvirtconnection.attach_interface(
- instance, fake_image_meta, network_info[0])
- elif method == 'detach_interface':
- self.libvirtconnection.detach_interface(
- instance, network_info[0])
- self.mox.VerifyAll()
-
- def test_attach_interface_with_running_instance(self):
- self._test_attach_detach_interface(
- 'attach_interface', power_state.RUNNING,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
- libvirt.VIR_DOMAIN_AFFECT_LIVE))
-
- def test_attach_interface_with_pause_instance(self):
- self._test_attach_detach_interface(
- 'attach_interface', power_state.PAUSED,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
- libvirt.VIR_DOMAIN_AFFECT_LIVE))
-
- def test_attach_interface_with_shutdown_instance(self):
- self._test_attach_detach_interface(
- 'attach_interface', power_state.SHUTDOWN,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
-
- def test_detach_interface_with_running_instance(self):
- self._test_attach_detach_interface(
- 'detach_interface', power_state.RUNNING,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
- libvirt.VIR_DOMAIN_AFFECT_LIVE))
-
- def test_detach_interface_with_pause_instance(self):
- self._test_attach_detach_interface(
- 'detach_interface', power_state.PAUSED,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG |
- libvirt.VIR_DOMAIN_AFFECT_LIVE))
-
- def test_detach_interface_with_shutdown_instance(self):
- self._test_attach_detach_interface(
- 'detach_interface', power_state.SHUTDOWN,
- expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
-
- def test_rescue(self):
- instance = self._create_instance({'config_drive': None})
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "</devices></domain>")
- network_info = _fake_network_info(self.stubs, 1)
-
- self.mox.StubOutWithMock(self.libvirtconnection,
- '_get_existing_domain_xml')
- self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
- self.mox.StubOutWithMock(imagebackend.Backend, 'image')
- self.mox.StubOutWithMock(imagebackend.Image, 'cache')
- self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
- self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
- self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
-
- self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
- mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
- libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
- libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg())
- imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
- ).AndReturn(fake_imagebackend.Raw())
- imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
- ).AndReturn(fake_imagebackend.Raw())
- imagebackend.Backend.image(instance, 'disk.rescue', 'default'
- ).AndReturn(fake_imagebackend.Raw())
- imagebackend.Image.cache(context=mox.IgnoreArg(),
- fetch_func=mox.IgnoreArg(),
- filename=mox.IgnoreArg(),
- image_id=mox.IgnoreArg(),
- project_id=mox.IgnoreArg(),
- user_id=mox.IgnoreArg()).MultipleTimes()
-
- imagebackend.Image.cache(context=mox.IgnoreArg(),
- fetch_func=mox.IgnoreArg(),
- filename=mox.IgnoreArg(),
- image_id=mox.IgnoreArg(),
- project_id=mox.IgnoreArg(),
- size=None, user_id=mox.IgnoreArg())
-
- image_meta = {'id': 'fake', 'name': 'fake'}
- self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
- network_info, mox.IgnoreArg(),
- image_meta, rescue=mox.IgnoreArg(),
- write_to_disk=mox.IgnoreArg()
- ).AndReturn(dummyxml)
-
- self.libvirtconnection._destroy(instance)
- self.libvirtconnection._create_domain(mox.IgnoreArg())
-
- self.mox.ReplayAll()
-
- rescue_password = 'fake_password'
-
- self.libvirtconnection.rescue(self.context, instance,
- network_info, image_meta, rescue_password)
- self.mox.VerifyAll()
-
- def test_rescue_config_drive(self):
- instance = self._create_instance()
- uuid = instance.uuid
- configdrive_path = uuid + '/disk.config.rescue'
- dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
- "<devices>"
- "<disk type='file'><driver name='qemu' type='raw'/>"
- "<source file='/test/disk'/>"
- "<target dev='vda' bus='virtio'/></disk>"
- "<disk type='file'><driver name='qemu' type='qcow2'/>"
- "<source file='/test/disk.local'/>"
- "<target dev='vdb' bus='virtio'/></disk>"
- "</devices></domain>")
- network_info = _fake_network_info(self.stubs, 1)
-
- self.mox.StubOutWithMock(self.libvirtconnection,
- '_get_existing_domain_xml')
- self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
- self.mox.StubOutWithMock(imagebackend.Backend, 'image')
- self.mox.StubOutWithMock(imagebackend.Image, 'cache')
- self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
- '__init__')
- self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
- self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
- self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
- self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
- self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
-
- self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
- mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
- libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
- libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg())
-
- imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
- ).AndReturn(fake_imagebackend.Raw())
- imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
- ).AndReturn(fake_imagebackend.Raw())
- imagebackend.Backend.image(instance, 'disk.rescue', 'default'
- ).AndReturn(fake_imagebackend.Raw())
-
- imagebackend.Image.cache(context=mox.IgnoreArg(),
- fetch_func=mox.IgnoreArg(),
- filename=mox.IgnoreArg(),
- image_id=mox.IgnoreArg(),
- project_id=mox.IgnoreArg(),
- user_id=mox.IgnoreArg()).MultipleTimes()
-
- imagebackend.Image.cache(context=mox.IgnoreArg(),
- fetch_func=mox.IgnoreArg(),
- filename=mox.IgnoreArg(),
- image_id=mox.IgnoreArg(),
- project_id=mox.IgnoreArg(),
- size=None, user_id=mox.IgnoreArg())
-
- instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
- content=mox.IgnoreArg(),
- extra_md=mox.IgnoreArg(),
- network_info=mox.IgnoreArg())
- cdb = self.mox.CreateMockAnything()
- m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
- m.AndReturn(cdb)
- # __enter__ and __exit__ are required by "with"
- cdb.__enter__().AndReturn(cdb)
- cdb.make_drive(mox.Regex(configdrive_path))
- cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()
- ).AndReturn(None)
- image_meta = {'id': 'fake', 'name': 'fake'}
- self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
- network_info, mox.IgnoreArg(),
- image_meta, rescue=mox.IgnoreArg(),
- write_to_disk=mox.IgnoreArg()
- ).AndReturn(dummyxml)
- self.libvirtconnection._destroy(instance)
- self.libvirtconnection._create_domain(mox.IgnoreArg())
-
- self.mox.ReplayAll()
-
- rescue_password = 'fake_password'
-
- self.libvirtconnection.rescue(self.context, instance, network_info,
- image_meta, rescue_password)
- self.mox.VerifyAll()
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files(self, get_instance_path, exists, exe,
- shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- exists.side_effect = [False, False, True, False]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- exe.assert_called_with('mv', '/path', '/path_del')
- shutil.assert_called_with('/path_del')
- self.assertTrue(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_resize(self, get_instance_path, exists,
- exe, shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- nova.utils.execute.side_effect = [Exception(), None]
- exists.side_effect = [False, False, True, False]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- expected = [mock.call('mv', '/path', '/path_del'),
- mock.call('mv', '/path_resize', '/path_del')]
- self.assertEqual(expected, exe.mock_calls)
- shutil.assert_called_with('/path_del')
- self.assertTrue(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
- shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- exists.side_effect = [False, False, True, True]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- exe.assert_called_with('mv', '/path', '/path_del')
- shutil.assert_called_with('/path_del')
- self.assertFalse(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
- exe, shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- nova.utils.execute.side_effect = Exception()
- exists.side_effect = [True, True]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- expected = [mock.call('mv', '/path', '/path_del'),
- mock.call('mv', '/path_resize', '/path_del')] * 2
- self.assertEqual(expected, exe.mock_calls)
- self.assertFalse(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_resume(self, get_instance_path, exists,
- exe, shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- nova.utils.execute.side_effect = Exception()
- exists.side_effect = [False, False, True, False]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- expected = [mock.call('mv', '/path', '/path_del'),
- mock.call('mv', '/path_resize', '/path_del')] * 2
- self.assertEqual(expected, exe.mock_calls)
- self.assertTrue(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_none(self, get_instance_path, exists,
- exe, shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- nova.utils.execute.side_effect = Exception()
- exists.side_effect = [False, False, False, False]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- expected = [mock.call('mv', '/path', '/path_del'),
- mock.call('mv', '/path_resize', '/path_del')] * 2
- self.assertEqual(expected, exe.mock_calls)
- self.assertEqual(0, len(shutil.mock_calls))
- self.assertTrue(result)
-
- @mock.patch('shutil.rmtree')
- @mock.patch('nova.utils.execute')
- @mock.patch('os.path.exists')
- @mock.patch('nova.virt.libvirt.utils.get_instance_path')
- def test_delete_instance_files_concurrent(self, get_instance_path, exists,
- exe, shutil):
- lv = self.libvirtconnection
- get_instance_path.return_value = '/path'
- instance = objects.Instance(uuid='fake-uuid', id=1)
-
- nova.utils.execute.side_effect = [Exception(), Exception(), None]
- exists.side_effect = [False, False, True, False]
-
- result = lv.delete_instance_files(instance)
- get_instance_path.assert_called_with(instance)
- expected = [mock.call('mv', '/path', '/path_del'),
- mock.call('mv', '/path_resize', '/path_del')]
- expected.append(expected[0])
- self.assertEqual(expected, exe.mock_calls)
- shutil.assert_called_with('/path_del')
- self.assertTrue(result)
-
- def _assert_on_id_map(self, idmap, klass, start, target, count):
- self.assertIsInstance(idmap, klass)
- self.assertEqual(start, idmap.start)
- self.assertEqual(target, idmap.target)
- self.assertEqual(count, idmap.count)
-
- def test_get_id_maps(self):
- self.flags(virt_type="lxc", group="libvirt")
- CONF.libvirt.virt_type = "lxc"
- CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
- CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- idmaps = conn._get_guest_idmaps()
-
- self.assertEqual(len(idmaps), 4)
- self._assert_on_id_map(idmaps[0],
- vconfig.LibvirtConfigGuestUIDMap,
- 0, 10000, 1)
- self._assert_on_id_map(idmaps[1],
- vconfig.LibvirtConfigGuestUIDMap,
- 1, 20000, 10)
- self._assert_on_id_map(idmaps[2],
- vconfig.LibvirtConfigGuestGIDMap,
- 0, 10000, 1)
- self._assert_on_id_map(idmaps[3],
- vconfig.LibvirtConfigGuestGIDMap,
- 1, 20000, 10)
-
- def test_get_id_maps_not_lxc(self):
- CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
- CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- idmaps = conn._get_guest_idmaps()
-
- self.assertEqual(0, len(idmaps))
-
- def test_get_id_maps_only_uid(self):
- self.flags(virt_type="lxc", group="libvirt")
- CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
- CONF.libvirt.gid_maps = []
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- idmaps = conn._get_guest_idmaps()
-
- self.assertEqual(2, len(idmaps))
- self._assert_on_id_map(idmaps[0],
- vconfig.LibvirtConfigGuestUIDMap,
- 0, 10000, 1)
- self._assert_on_id_map(idmaps[1],
- vconfig.LibvirtConfigGuestUIDMap,
- 1, 20000, 10)
-
- def test_get_id_maps_only_gid(self):
- self.flags(virt_type="lxc", group="libvirt")
- CONF.libvirt.uid_maps = []
- CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- idmaps = conn._get_guest_idmaps()
-
- self.assertEqual(2, len(idmaps))
- self._assert_on_id_map(idmaps[0],
- vconfig.LibvirtConfigGuestGIDMap,
- 0, 10000, 1)
- self._assert_on_id_map(idmaps[1],
- vconfig.LibvirtConfigGuestGIDMap,
- 1, 20000, 10)
-
- def test_instance_on_disk(self):
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(uuid='fake-uuid', id=1)
- self.assertFalse(conn.instance_on_disk(instance))
-
- def test_instance_on_disk_rbd(self):
- self.flags(images_type='rbd', group='libvirt')
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- instance = objects.Instance(uuid='fake-uuid', id=1)
- self.assertTrue(conn.instance_on_disk(instance))
-
- @mock.patch("nova.objects.Flavor.get_by_id")
- @mock.patch("nova.compute.utils.get_image_metadata")
- def test_prepare_args_for_get_config(self, mock_image, mock_get):
- instance = self._create_instance()
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
- def fake_get_by_id(context, id):
- self.assertEqual('yes', context.read_deleted)
-
- mock_get.side_effect = fake_get_by_id
-
- conn._prepare_args_for_get_config(self.context, instance)
-
- mock_get.assert_called_once_with(self.context,
- instance['instance_type_id'])
-
-
-class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
- """Test for LibvirtDriver.get_all_volume_usage."""
-
- def setUp(self):
- super(LibvirtVolumeUsageTestCase, self).setUp()
- self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.c = context.get_admin_context()
-
- self.ins_ref = objects.Instance(
- id=1729,
- uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
- )
-
- # verify bootable volume device path also
- self.bdms = [{'volume_id': 1,
- 'device_name': '/dev/vde'},
- {'volume_id': 2,
- 'device_name': 'vda'}]
-
- def test_get_all_volume_usage(self):
- def fake_block_stats(instance_name, disk):
- return (169L, 688640L, 0L, 0L, -1L)
-
- self.stubs.Set(self.conn, 'block_stats', fake_block_stats)
- vol_usage = self.conn.get_all_volume_usage(self.c,
- [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
-
- expected_usage = [{'volume': 1,
- 'instance': self.ins_ref,
- 'rd_bytes': 688640L, 'wr_req': 0L,
- 'flush_operations': -1L, 'rd_req': 169L,
- 'wr_bytes': 0L},
- {'volume': 2,
- 'instance': self.ins_ref,
- 'rd_bytes': 688640L, 'wr_req': 0L,
- 'flush_operations': -1L, 'rd_req': 169L,
- 'wr_bytes': 0L}]
- self.assertEqual(vol_usage, expected_usage)
-
- def test_get_all_volume_usage_device_not_found(self):
- def fake_lookup(instance_name):
- raise libvirt.libvirtError('invalid path')
-
- self.stubs.Set(self.conn, '_lookup_by_name', fake_lookup)
- vol_usage = self.conn.get_all_volume_usage(self.c,
- [dict(instance=self.ins_ref, instance_bdms=self.bdms)])
- self.assertEqual(vol_usage, [])
-
-
-class LibvirtNonblockingTestCase(test.NoDBTestCase):
- """Test libvirtd calls are nonblocking."""
-
- def setUp(self):
- super(LibvirtNonblockingTestCase, self).setUp()
- self.flags(connection_uri="test:///default",
- group='libvirt')
-
- def test_connection_to_primitive(self):
- # Test bug 962840.
- import nova.virt.libvirt.driver as libvirt_driver
- connection = libvirt_driver.LibvirtDriver('')
- connection.set_host_enabled = mock.Mock()
- jsonutils.to_primitive(connection._conn, convert_instances=True)
-
- def test_tpool_execute_calls_libvirt(self):
- conn = libvirt.virConnect()
- conn.is_expected = True
-
- self.mox.StubOutWithMock(eventlet.tpool, 'execute')
- eventlet.tpool.execute(
- libvirt.openAuth,
- 'test:///default',
- mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(conn)
- eventlet.tpool.execute(
- conn.domainEventRegisterAny,
- None,
- libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
- mox.IgnoreArg(),
- mox.IgnoreArg())
- if hasattr(libvirt.virConnect, 'registerCloseCallback'):
- eventlet.tpool.execute(
- conn.registerCloseCallback,
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.ReplayAll()
-
- driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- c = driver._get_connection()
- self.assertEqual(True, c.is_expected)
-
-
-class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
- """Tests for libvirtDriver.volume_snapshot_create/delete."""
-
- def setUp(self):
- super(LibvirtVolumeSnapshotTestCase, self).setUp()
-
- self.conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.c = context.get_admin_context()
-
- self.flags(instance_name_template='instance-%s')
- self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
-
- # creating instance
- self.inst = {}
- self.inst['uuid'] = uuidutils.generate_uuid()
- self.inst['id'] = '1'
-
- # create domain info
- self.dom_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='disk1_file'/>
- <target dev='vda' bus='virtio'/>
- <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
- </disk>
- <disk type='block'>
- <source dev='/path/to/dev/1'/>
- <target dev='vdb' bus='virtio' serial='1234'/>
- </disk>
- </devices>
- </domain>"""
-
- # alternate domain info with network-backed snapshot chain
- self.dom_netdisk_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='disk1_file'/>
- <target dev='vda' bus='virtio'/>
- <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
- </disk>
- <disk type='network' device='disk'>
- <driver name='qemu' type='qcow2'/>
- <source protocol='gluster' name='vol1/root.img'>
- <host name='server1' port='24007'/>
- </source>
- <backingStore type='network' index='1'>
- <driver name='qemu' type='qcow2'/>
- <source protocol='gluster' name='vol1/snap.img'>
- <host name='server1' port='24007'/>
- </source>
- <backingStore type='network' index='2'>
- <driver name='qemu' type='qcow2'/>
- <source protocol='gluster' name='vol1/snap-b.img'>
- <host name='server1' port='24007'/>
- </source>
- <backingStore/>
- </backingStore>
- </backingStore>
- <target dev='vdb' bus='virtio'/>
- <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
- </disk>
- </devices>
- </domain>
- """
-
- self.create_info = {'type': 'qcow2',
- 'snapshot_id': '1234-5678',
- 'new_file': 'new-file'}
-
- self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
- self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
-
- self.delete_info_1 = {'type': 'qcow2',
- 'file_to_merge': 'snap.img',
- 'merge_target_file': None}
-
- self.delete_info_2 = {'type': 'qcow2',
- 'file_to_merge': 'snap.img',
- 'merge_target_file': 'other-snap.img'}
-
- self.delete_info_netdisk = {'type': 'qcow2',
- 'file_to_merge': 'snap.img',
- 'merge_target_file': 'root.img'}
-
- self.delete_info_invalid_type = {'type': 'made_up_type',
- 'file_to_merge': 'some_file',
- 'merge_target_file':
- 'some_other_file'}
-
- def tearDown(self):
- super(LibvirtVolumeSnapshotTestCase, self).tearDown()
-
- @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
- 'refresh_connection_info')
- @mock.patch('nova.objects.block_device.BlockDeviceMapping.'
- 'get_by_volume_id')
- def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
- mock_refresh_connection_info):
- fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
- 'id': 123,
- 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'connection_info': '{"fake": "connection_info"}'})
- mock_get_by_volume_id.return_value = fake_bdm
-
- self.conn._volume_refresh_connection_info(self.c, self.inst,
- self.volume_uuid)
-
- mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
- mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
- self.conn._volume_api, self.conn)
-
- def test_volume_snapshot_create(self, quiesce=True):
- """Test snapshot creation with file-based disk."""
- self.flags(instance_name_template='instance-%s')
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
-
- instance = objects.Instance(**self.inst)
-
- new_file = 'new-file'
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
- domain.XMLDesc(0).AndReturn(self.dom_xml)
-
- snap_xml_src = (
- '<domainsnapshot>\n'
- ' <disks>\n'
- ' <disk name="disk1_file" snapshot="external" type="file">\n'
- ' <source file="new-file"/>\n'
- ' </disk>\n'
- ' <disk name="vdb" snapshot="no"/>\n'
- ' </disks>\n'
- '</domainsnapshot>\n')
-
- # Older versions of libvirt may be missing these.
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
-
- snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
-
- snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
-
- if quiesce:
- domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
- else:
- domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
- AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
- domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_create(self.c, instance, domain,
- self.volume_uuid, new_file)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_create_libgfapi(self, quiesce=True):
- """Test snapshot creation with libgfapi network disk."""
- self.flags(instance_name_template = 'instance-%s')
- self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
-
- self.dom_xml = """
- <domain type='kvm'>
- <devices>
- <disk type='file'>
- <source file='disk1_file'/>
- <target dev='vda' bus='virtio'/>
- <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
- </disk>
- <disk type='block'>
- <source protocol='gluster' name='gluster1/volume-1234'>
- <host name='127.3.4.5' port='24007'/>
- </source>
- <target dev='vdb' bus='virtio' serial='1234'/>
- </disk>
- </devices>
- </domain>"""
-
- instance = objects.Instance(**self.inst)
-
- new_file = 'new-file'
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
- domain.XMLDesc(0).AndReturn(self.dom_xml)
-
- snap_xml_src = (
- '<domainsnapshot>\n'
- ' <disks>\n'
- ' <disk name="disk1_file" snapshot="external" type="file">\n'
- ' <source file="new-file"/>\n'
- ' </disk>\n'
- ' <disk name="vdb" snapshot="no"/>\n'
- ' </disks>\n'
- '</domainsnapshot>\n')
-
- # Older versions of libvirt may be missing these.
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
-
- snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
- libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
-
- snap_flags_q = snap_flags | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
-
- if quiesce:
- domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
- else:
- domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
- AndRaise(libvirt.libvirtError('quiescing failed, no qemu-ga'))
- domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_create(self.c, instance, domain,
- self.volume_uuid, new_file)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_create_noquiesce(self):
- self.test_volume_snapshot_create(quiesce=False)
-
- def test_volume_snapshot_create_outer_success(self):
- instance = objects.Instance(**self.inst)
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
- self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
-
- self.conn._lookup_by_name('instance-1').AndReturn(domain)
-
- self.conn._volume_snapshot_create(self.c,
- instance,
- domain,
- self.volume_uuid,
- self.create_info['new_file'])
-
- self.conn._volume_api.update_snapshot_status(
- self.c, self.create_info['snapshot_id'], 'creating')
-
- self.mox.StubOutWithMock(self.conn._volume_api, 'get_snapshot')
- self.conn._volume_api.get_snapshot(self.c,
- self.create_info['snapshot_id']).AndReturn({'status': 'available'})
- self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
- self.conn._volume_refresh_connection_info(self.c, instance,
- self.volume_uuid)
-
- self.mox.ReplayAll()
-
- self.conn.volume_snapshot_create(self.c, instance, self.volume_uuid,
- self.create_info)
-
- def test_volume_snapshot_create_outer_failure(self):
- instance = objects.Instance(**self.inst)
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
- self.mox.StubOutWithMock(self.conn, '_volume_snapshot_create')
-
- self.conn._lookup_by_name('instance-1').AndReturn(domain)
-
- self.conn._volume_snapshot_create(self.c,
- instance,
- domain,
- self.volume_uuid,
- self.create_info['new_file']).\
- AndRaise(exception.NovaException('oops'))
-
- self.conn._volume_api.update_snapshot_status(
- self.c, self.create_info['snapshot_id'], 'error')
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NovaException,
- self.conn.volume_snapshot_create,
- self.c,
- instance,
- self.volume_uuid,
- self.create_info)
-
- def test_volume_snapshot_delete_1(self):
- """Deleting newest snapshot -- blockRebase."""
-
- instance = objects.Instance(**self.inst)
- snapshot_id = 'snapshot-1234'
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- domain.XMLDesc(0).AndReturn(self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_has_min_version')
- self.mox.StubOutWithMock(domain, 'blockRebase')
- self.mox.StubOutWithMock(domain, 'blockCommit')
- self.mox.StubOutWithMock(domain, 'blockJobInfo')
-
- self.conn._lookup_by_name('instance-%s' % instance['id']).\
- AndReturn(domain)
- self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
-
- domain.blockRebase('vda', 'snap.img', 0, 0)
-
- domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
- domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
- snapshot_id, self.delete_info_1)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_delete_2(self):
- """Deleting older snapshot -- blockCommit."""
-
- instance = objects.Instance(**self.inst)
- snapshot_id = 'snapshot-1234'
-
- domain = FakeVirtDomain(fake_xml=self.dom_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- domain.XMLDesc(0).AndReturn(self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_has_min_version')
- self.mox.StubOutWithMock(domain, 'blockRebase')
- self.mox.StubOutWithMock(domain, 'blockCommit')
- self.mox.StubOutWithMock(domain, 'blockJobInfo')
-
- self.conn._lookup_by_name('instance-%s' % instance['id']).\
- AndReturn(domain)
- self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
-
- domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, 0)
-
- domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
- domain.blockJobInfo('vda', 0).AndReturn({})
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
- snapshot_id, self.delete_info_2)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_delete_outer_success(self):
- instance = objects.Instance(**self.inst)
- snapshot_id = 'snapshot-1234'
-
- FakeVirtDomain(fake_xml=self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
- self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
-
- self.conn._volume_snapshot_delete(self.c,
- instance,
- self.volume_uuid,
- snapshot_id,
- delete_info=self.delete_info_1)
-
- self.conn._volume_api.update_snapshot_status(
- self.c, snapshot_id, 'deleting')
-
- self.mox.StubOutWithMock(self.conn, '_volume_refresh_connection_info')
- self.conn._volume_refresh_connection_info(self.c, instance,
- self.volume_uuid)
-
- self.mox.ReplayAll()
-
- self.conn.volume_snapshot_delete(self.c, instance, self.volume_uuid,
- snapshot_id,
- self.delete_info_1)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_delete_outer_failure(self):
- instance = objects.Instance(**self.inst)
- snapshot_id = '1234-9876'
-
- FakeVirtDomain(fake_xml=self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
- self.mox.StubOutWithMock(self.conn, '_volume_snapshot_delete')
-
- self.conn._volume_snapshot_delete(self.c,
- instance,
- self.volume_uuid,
- snapshot_id,
- delete_info=self.delete_info_1).\
- AndRaise(exception.NovaException('oops'))
-
- self.conn._volume_api.update_snapshot_status(
- self.c, snapshot_id, 'error_deleting')
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NovaException,
- self.conn.volume_snapshot_delete,
- self.c,
- instance,
- self.volume_uuid,
- snapshot_id,
- self.delete_info_1)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_delete_invalid_type(self):
- instance = objects.Instance(**self.inst)
-
- FakeVirtDomain(fake_xml=self.dom_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_volume_api')
- self.mox.StubOutWithMock(self.conn, '_has_min_version')
-
- self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
-
- self.conn._volume_api.update_snapshot_status(
- self.c, self.snapshot_id, 'error_deleting')
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NovaException,
- self.conn.volume_snapshot_delete,
- self.c,
- instance,
- self.volume_uuid,
- self.snapshot_id,
- self.delete_info_invalid_type)
-
- def test_volume_snapshot_delete_netdisk_1(self):
- """Delete newest snapshot -- blockRebase for libgfapi/network disk."""
-
- class FakeNetdiskDomain(FakeVirtDomain):
- def __init__(self, *args, **kwargs):
- super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
-
- def XMLDesc(self, *args):
- return self.dom_netdisk_xml
-
- # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
- self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
-
- instance = objects.Instance(**self.inst)
- snapshot_id = 'snapshot-1234'
-
- domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_has_min_version')
- self.mox.StubOutWithMock(domain, 'blockRebase')
- self.mox.StubOutWithMock(domain, 'blockCommit')
- self.mox.StubOutWithMock(domain, 'blockJobInfo')
-
- self.conn._lookup_by_name('instance-%s' % instance['id']).\
- AndReturn(domain)
- self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
-
- domain.blockRebase('vdb', 'vdb[1]', 0, 0)
-
- domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
- domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
- snapshot_id, self.delete_info_1)
-
- self.mox.VerifyAll()
-
- def test_volume_snapshot_delete_netdisk_2(self):
- """Delete older snapshot -- blockCommit for libgfapi/network disk."""
-
- class FakeNetdiskDomain(FakeVirtDomain):
- def __init__(self, *args, **kwargs):
- super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
-
- def XMLDesc(self, *args):
- return self.dom_netdisk_xml
-
- # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
- self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
-
- instance = objects.Instance(**self.inst)
- snapshot_id = 'snapshot-1234'
-
- domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
- self.mox.StubOutWithMock(domain, 'XMLDesc')
- domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
-
- self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
- self.mox.StubOutWithMock(self.conn, '_has_min_version')
- self.mox.StubOutWithMock(domain, 'blockRebase')
- self.mox.StubOutWithMock(domain, 'blockCommit')
- self.mox.StubOutWithMock(domain, 'blockJobInfo')
-
- self.conn._lookup_by_name('instance-%s' % instance['id']).\
- AndReturn(domain)
- self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
-
- domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
- fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
-
- domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
- domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
-
- self.mox.ReplayAll()
-
- self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
- snapshot_id,
- self.delete_info_netdisk)
-
- self.mox.VerifyAll()
diff --git a/nova/tests/virt/libvirt/test_fakelibvirt.py b/nova/tests/virt/libvirt/test_fakelibvirt.py
deleted file mode 100644
index c2200f6aff..0000000000
--- a/nova/tests/virt/libvirt/test_fakelibvirt.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import test
-
-from lxml import etree
-
-from nova.compute import arch
-import nova.tests.virt.libvirt.fakelibvirt as libvirt
-
-
-def get_vm_xml(name="testname", uuid=None, source_type='file',
- interface_type='bridge'):
- uuid_tag = ''
- if uuid:
- uuid_tag = '<uuid>%s</uuid>' % (uuid,)
-
- return '''<domain type='kvm'>
- <name>%(name)s</name>
-%(uuid_tag)s
- <memory>128000</memory>
- <vcpu>1</vcpu>
- <os>
- <type>hvm</type>
- <kernel>/somekernel</kernel>
- <cmdline>root=/dev/sda</cmdline>
- <boot dev='hd'/>
- </os>
- <features>
- <acpi/>
- </features>
- <devices>
- <disk type='file' device='disk'>
- <driver name='qemu' type='qcow2'/>
- <source %(source_type)s='/somefile'/>
- <target dev='vda' bus='virtio'/>
- </disk>
- <interface type='%(interface_type)s'>
- <mac address='05:26:3e:31:28:1f'/>
- <source %(interface_type)s='br100'/>
- </interface>
- <input type='mouse' bus='ps2'/>
- <graphics type='vnc' port='5901' autoport='yes' keymap='en-us'/>
- <graphics type='spice' port='5901' autoport='yes' keymap='en-us'/>
- </devices>
-</domain>''' % {'name': name,
- 'uuid_tag': uuid_tag,
- 'source_type': source_type,
- 'interface_type': interface_type}
-
-
-class FakeLibvirtTests(test.NoDBTestCase):
- def tearDown(self):
- super(FakeLibvirtTests, self).tearDown()
- libvirt._reset()
-
- def get_openAuth_curry_func(self, readOnly=False):
- def fake_cb(credlist):
- return 0
-
- creds = [[libvirt.VIR_CRED_AUTHNAME,
- libvirt.VIR_CRED_NOECHOPROMPT],
- fake_cb,
- None]
- flags = 0
- if readOnly:
- flags = libvirt.VIR_CONNECT_RO
- return lambda uri: libvirt.openAuth(uri, creds, flags)
-
- def test_openAuth_accepts_None_uri_by_default(self):
- conn_method = self.get_openAuth_curry_func()
- conn = conn_method(None)
- self.assertNotEqual(conn, None, "Connecting to fake libvirt failed")
-
- def test_openAuth_can_refuse_None_uri(self):
- conn_method = self.get_openAuth_curry_func()
- libvirt.allow_default_uri_connection = False
- self.addCleanup(libvirt._reset)
- self.assertRaises(ValueError, conn_method, None)
-
- def test_openAuth_refuses_invalid_URI(self):
- conn_method = self.get_openAuth_curry_func()
- self.assertRaises(libvirt.libvirtError, conn_method, 'blah')
-
- def test_getInfo(self):
- conn_method = self.get_openAuth_curry_func(readOnly=True)
- res = conn_method(None).getInfo()
- self.assertIn(res[0], (arch.I686, arch.X86_64))
- self.assertTrue(1024 <= res[1] <= 16384,
- "Memory unusually high or low.")
- self.assertTrue(1 <= res[2] <= 32,
- "Active CPU count unusually high or low.")
- self.assertTrue(800 <= res[3] <= 4500,
- "CPU speed unusually high or low.")
- self.assertTrue(res[2] <= (res[5] * res[6]),
- "More active CPUs than num_sockets*cores_per_socket")
-
- def test_createXML_detects_invalid_xml(self):
- self._test_XML_func_detects_invalid_xml('createXML', [0])
-
- def test_defineXML_detects_invalid_xml(self):
- self._test_XML_func_detects_invalid_xml('defineXML', [])
-
- def _test_XML_func_detects_invalid_xml(self, xmlfunc_name, args):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- try:
- getattr(conn, xmlfunc_name)("this is not valid </xml>", *args)
- except libvirt.libvirtError as e:
- self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_XML_DETAIL)
- self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_DOMAIN)
- return
- raise self.failureException("Invalid XML didn't raise libvirtError")
-
- def test_defineXML_defines_domain(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.defineXML(get_vm_xml())
- dom = conn.lookupByName('testname')
- self.assertEqual('testname', dom.name())
- self.assertEqual(0, dom.isActive())
- dom.undefine()
- self.assertRaises(libvirt.libvirtError,
- conn.lookupByName,
- 'testname')
-
- def test_blockStats(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.createXML(get_vm_xml(), 0)
- dom = conn.lookupByName('testname')
- blockstats = dom.blockStats('vda')
- self.assertEqual(len(blockstats), 5)
- for x in blockstats:
- self.assertIn(type(x), [int, long])
-
- def test_attach_detach(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.createXML(get_vm_xml(), 0)
- dom = conn.lookupByName('testname')
- xml = '''<disk type='block'>
- <driver name='qemu' type='raw'/>
- <source dev='/dev/nbd0'/>
- <target dev='/dev/vdc' bus='virtio'/>
- </disk>'''
- self.assertTrue(dom.attachDevice(xml))
- self.assertTrue(dom.detachDevice(xml))
-
- def test_info(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.createXML(get_vm_xml(), 0)
- dom = conn.lookupByName('testname')
- info = dom.info()
- self.assertEqual(info[0], libvirt.VIR_DOMAIN_RUNNING)
- self.assertEqual(info[1], 128000)
- self.assertTrue(info[2] <= 128000)
- self.assertEqual(info[3], 1)
- self.assertIn(type(info[4]), [int, long])
-
- def test_createXML_runs_domain(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.createXML(get_vm_xml(), 0)
- dom = conn.lookupByName('testname')
- self.assertEqual('testname', dom.name())
- self.assertEqual(1, dom.isActive())
- dom.destroy()
- try:
- dom = conn.lookupByName('testname')
- except libvirt.libvirtError as e:
- self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
- self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
- return
- self.fail("lookupByName succeeded for destroyed non-defined VM")
-
- def test_defineXML_remembers_uuid(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- uuid = 'b21f957d-a72f-4b93-b5a5-45b1161abb02'
- conn.defineXML(get_vm_xml(uuid=uuid))
- dom = conn.lookupByName('testname')
- self.assertEqual(dom.UUIDString(), uuid)
-
- def test_createWithFlags(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.defineXML(get_vm_xml())
- dom = conn.lookupByName('testname')
- self.assertFalse(dom.isActive(), 'Defined domain was running.')
- dom.createWithFlags(0)
- self.assertTrue(dom.isActive(),
- 'Domain wasn\'t running after createWithFlags')
-
- def test_managedSave(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- conn.defineXML(get_vm_xml())
- dom = conn.lookupByName('testname')
- self.assertFalse(dom.isActive(), 'Defined domain was running.')
- dom.createWithFlags(0)
- self.assertEqual(dom.hasManagedSaveImage(0), 0)
- dom.managedSave(0)
- self.assertEqual(dom.hasManagedSaveImage(0), 1)
- dom.managedSaveRemove(0)
- self.assertEqual(dom.hasManagedSaveImage(0), 0)
-
- def test_listDomainsId_and_lookupById(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertEqual(conn.listDomainsID(), [])
- conn.defineXML(get_vm_xml())
- dom = conn.lookupByName('testname')
- dom.createWithFlags(0)
- self.assertEqual(len(conn.listDomainsID()), 1)
-
- dom_id = conn.listDomainsID()[0]
- self.assertEqual(conn.lookupByID(dom_id), dom)
-
- dom_id = conn.listDomainsID()[0]
- try:
- conn.lookupByID(dom_id + 1)
- except libvirt.libvirtError as e:
- self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_DOMAIN)
- self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_QEMU)
- return
- raise self.failureException("Looking up an invalid domain ID didn't "
- "raise libvirtError")
-
- def test_define_and_retrieve(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertEqual(conn.listDomainsID(), [])
- conn.defineXML(get_vm_xml())
- dom = conn.lookupByName('testname')
- xml = dom.XMLDesc(0)
- etree.fromstring(xml)
-
- def _test_accepts_source_type(self, source_type):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertEqual(conn.listDomainsID(), [])
- conn.defineXML(get_vm_xml(source_type=source_type))
- dom = conn.lookupByName('testname')
- xml = dom.XMLDesc(0)
- tree = etree.fromstring(xml)
- elem = tree.find('./devices/disk/source')
- self.assertEqual(elem.get('file'), '/somefile')
-
- def test_accepts_source_dev(self):
- self._test_accepts_source_type('dev')
-
- def test_accepts_source_path(self):
- self._test_accepts_source_type('path')
-
- def test_network_type_bridge_sticks(self):
- self._test_network_type_sticks('bridge')
-
- def test_network_type_network_sticks(self):
- self._test_network_type_sticks('network')
-
- def _test_network_type_sticks(self, network_type):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertEqual(conn.listDomainsID(), [])
- conn.defineXML(get_vm_xml(interface_type=network_type))
- dom = conn.lookupByName('testname')
- xml = dom.XMLDesc(0)
- tree = etree.fromstring(xml)
- elem = tree.find('./devices/interface')
- self.assertEqual(elem.get('type'), network_type)
- elem = elem.find('./source')
- self.assertEqual(elem.get(network_type), 'br100')
-
- def test_getType(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertEqual(conn.getType(), 'QEMU')
-
- def test_getVersion(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- self.assertIsInstance(conn.getVersion(), int)
-
- def test_getCapabilities(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- etree.fromstring(conn.getCapabilities())
-
- def test_nwfilter_define_undefine(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
- # Will raise an exception if it's not valid XML
- xml = '''<filter name='nova-instance-instance-789' chain='root'>
- <uuid>946878c6-3ad3-82b2-87f3-c709f3807f58</uuid>
- </filter>'''
-
- conn.nwfilterDefineXML(xml)
- nwfilter = conn.nwfilterLookupByName('nova-instance-instance-789')
- nwfilter.undefine()
- try:
- conn.nwfilterLookupByName('nova-instance-instance-789320334')
- except libvirt.libvirtError as e:
- self.assertEqual(e.get_error_code(), libvirt.VIR_ERR_NO_NWFILTER)
- self.assertEqual(e.get_error_domain(), libvirt.VIR_FROM_NWFILTER)
- return
- raise self.failureException("Invalid NWFilter name didn't"
- " raise libvirtError")
-
- def test_compareCPU_compatible(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
-
- xml = '''<cpu>
- <arch>%s</arch>
- <model>%s</model>
- <vendor>%s</vendor>
- <topology sockets="%d" cores="%d" threads="%d"/>
- </cpu>''' % (libvirt.node_arch,
- libvirt.node_cpu_model,
- libvirt.node_cpu_vendor,
- libvirt.node_sockets,
- libvirt.node_cores,
- libvirt.node_threads)
- self.assertEqual(conn.compareCPU(xml, 0),
- libvirt.VIR_CPU_COMPARE_IDENTICAL)
-
- def test_compareCPU_incompatible_vendor(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
-
- xml = '''<cpu>
- <arch>%s</arch>
- <model>%s</model>
- <vendor>%s</vendor>
- <topology sockets="%d" cores="%d" threads="%d"/>
- </cpu>''' % (libvirt.node_arch,
- libvirt.node_cpu_model,
- "AnotherVendor",
- libvirt.node_sockets,
- libvirt.node_cores,
- libvirt.node_threads)
- self.assertEqual(conn.compareCPU(xml, 0),
- libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
-
- def test_compareCPU_incompatible_arch(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
-
- xml = '''<cpu>
- <arch>%s</arch>
- <model>%s</model>
- <vendor>%s</vendor>
- <topology sockets="%d" cores="%d" threads="%d"/>
- </cpu>''' % ('not-a-valid-arch',
- libvirt.node_cpu_model,
- libvirt.node_cpu_vendor,
- libvirt.node_sockets,
- libvirt.node_cores,
- libvirt.node_threads)
- self.assertEqual(conn.compareCPU(xml, 0),
- libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
-
- def test_compareCPU_incompatible_model(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
-
- xml = '''<cpu>
- <arch>%s</arch>
- <model>%s</model>
- <vendor>%s</vendor>
- <topology sockets="%d" cores="%d" threads="%d"/>
- </cpu>''' % (libvirt.node_arch,
- "AnotherModel",
- libvirt.node_cpu_vendor,
- libvirt.node_sockets,
- libvirt.node_cores,
- libvirt.node_threads)
- self.assertEqual(conn.compareCPU(xml, 0),
- libvirt.VIR_CPU_COMPARE_INCOMPATIBLE)
-
- def test_compareCPU_compatible_unspecified_model(self):
- conn = self.get_openAuth_curry_func()('qemu:///system')
-
- xml = '''<cpu>
- <arch>%s</arch>
- <vendor>%s</vendor>
- <topology sockets="%d" cores="%d" threads="%d"/>
- </cpu>''' % (libvirt.node_arch,
- libvirt.node_cpu_vendor,
- libvirt.node_sockets,
- libvirt.node_cores,
- libvirt.node_threads)
- self.assertEqual(conn.compareCPU(xml, 0),
- libvirt.VIR_CPU_COMPARE_IDENTICAL)
diff --git a/nova/tests/virt/libvirt/test_firewall.py b/nova/tests/virt/libvirt/test_firewall.py
deleted file mode 100644
index f928825fba..0000000000
--- a/nova/tests/virt/libvirt/test_firewall.py
+++ /dev/null
@@ -1,749 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# Copyright 2012 University Of Minho
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import threading
-import uuid
-from xml.dom import minidom
-
-from lxml import etree
-import mock
-import mox
-from oslo.concurrency import lockutils
-
-from nova.compute import utils as compute_utils
-from nova import exception
-from nova.network import linux_net
-from nova import objects
-from nova import test
-from nova.tests import fake_network
-from nova.tests.virt.libvirt import fakelibvirt
-from nova.virt.libvirt import firewall
-from nova.virt import netutils
-from nova.virt import virtapi
-
-try:
- import libvirt
-except ImportError:
- libvirt = fakelibvirt
-
-_fake_network_info = fake_network.fake_get_instance_nw_info
-_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
-_ipv4_like = fake_network.ipv4_like
-
-
-class NWFilterFakes:
- def __init__(self):
- self.filters = {}
-
- def nwfilterLookupByName(self, name):
- if name in self.filters:
- return self.filters[name]
- raise libvirt.libvirtError('Filter Not Found')
-
- def filterDefineXMLMock(self, xml):
- class FakeNWFilterInternal:
- def __init__(self, parent, name, u, xml):
- self.name = name
- self.uuid = u
- self.parent = parent
- self.xml = xml
-
- def XMLDesc(self, flags):
- return self.xml
-
- def undefine(self):
- del self.parent.filters[self.name]
-
- tree = etree.fromstring(xml)
- name = tree.get('name')
- u = tree.find('uuid')
- if u is None:
- u = uuid.uuid4().hex
- else:
- u = u.text
- if name not in self.filters:
- self.filters[name] = FakeNWFilterInternal(self, name, u, xml)
- else:
- if self.filters[name].uuid != u:
- raise libvirt.libvirtError(
- "Mismatching name '%s' with uuid '%s' vs '%s'"
- % (name, self.filters[name].uuid, u))
- self.filters[name].xml = xml
- return True
-
-
-class FakeVirtAPI(virtapi.VirtAPI):
- def provider_fw_rule_get_all(self, context):
- return []
-
-
-class IptablesFirewallTestCase(test.NoDBTestCase):
- def setUp(self):
- super(IptablesFirewallTestCase, self).setUp()
-
- class FakeLibvirtDriver(object):
- def nwfilterDefineXML(*args, **kwargs):
- """setup_basic_rules in nwfilter calls this."""
- pass
-
- self.fake_libvirt_connection = FakeLibvirtDriver()
- self.fw = firewall.IptablesFirewallDriver(
- FakeVirtAPI(),
- get_connection=lambda: self.fake_libvirt_connection)
-
- in_rules = [
- '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
- '*nat',
- ':PREROUTING ACCEPT [1170:189210]',
- ':INPUT ACCEPT [844:71028]',
- ':OUTPUT ACCEPT [5149:405186]',
- ':POSTROUTING ACCEPT [5063:386098]',
- '# Completed on Tue Dec 18 15:50:25 2012',
- '# Generated by iptables-save v1.4.12 on Tue Dec 18 15:50:25 201;',
- '*mangle',
- ':PREROUTING ACCEPT [241:39722]',
- ':INPUT ACCEPT [230:39282]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [266:26558]',
- ':POSTROUTING ACCEPT [267:26590]',
- '-A POSTROUTING -o virbr0 -p udp -m udp --dport 68 -j CHECKSUM '
- '--checksum-fill',
- 'COMMIT',
- '# Completed on Tue Dec 18 15:50:25 2012',
- '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
- '*filter',
- ':INPUT ACCEPT [969615:281627771]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [915599:63811649]',
- ':nova-block-ipv4 - [0:0]',
- '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
- ',ESTABLISHED -j ACCEPT ',
- '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -o virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- '[0:0] -A FORWARD -i virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- 'COMMIT',
- '# Completed on Mon Dec 6 11:54:13 2010',
- ]
-
- in6_filter_rules = [
- '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
- '*filter',
- ':INPUT ACCEPT [349155:75810423]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [349256:75777230]',
- 'COMMIT',
- '# Completed on Tue Jan 18 23:47:56 2011',
- ]
-
- def _create_instance_ref(self,
- uuid="74526555-9166-4893-a203-126bdcab0d67"):
- inst = objects.Instance(
- id=7,
- uuid=uuid,
- user_id="fake",
- project_id="fake",
- image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
- instance_type_id=1)
- inst.info_cache = objects.InstanceInfoCache()
- inst.info_cache.deleted = False
- return inst
-
- @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
- @mock.patch.object(objects.SecurityGroupRuleList,
- "get_by_security_group_id")
- @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
- @mock.patch.object(lockutils, "external_lock")
- def test_static_filters(self, mock_lock, mock_secgroup,
- mock_secrule, mock_instlist):
- mock_lock.return_value = threading.Semaphore()
-
- UUID = "2674993b-6adb-4733-abd9-a7c10cc1f146"
- SRC_UUID = "0e0a76b2-7c52-4bc0-9a60-d83017e42c1a"
- instance_ref = self._create_instance_ref(UUID)
- src_instance_ref = self._create_instance_ref(SRC_UUID)
-
- secgroup = objects.SecurityGroup(id=1,
- user_id='fake',
- project_id='fake',
- name='testgroup',
- description='test group')
-
- src_secgroup = objects.SecurityGroup(id=2,
- user_id='fake',
- project_id='fake',
- name='testsourcegroup',
- description='src group')
-
- r1 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
- protocol='icmp',
- from_port=-1,
- to_port=-1,
- cidr='192.168.11.0/24',
- grantee_group=None)
-
- r2 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
- protocol='icmp',
- from_port=8,
- to_port=-1,
- cidr='192.168.11.0/24',
- grantee_group=None)
-
- r3 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
- protocol='tcp',
- from_port=80,
- to_port=81,
- cidr='192.168.10.0/24',
- grantee_group=None)
-
- r4 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
- protocol='tcp',
- from_port=80,
- to_port=81,
- cidr=None,
- grantee_group=src_secgroup,
- group_id=src_secgroup['id'])
-
- r5 = objects.SecurityGroupRule(parent_group_id=secgroup['id'],
- protocol=None,
- cidr=None,
- grantee_group=src_secgroup,
- group_id=src_secgroup['id'])
-
- secgroup_list = objects.SecurityGroupList()
- secgroup_list.objects.append(secgroup)
- src_secgroup_list = objects.SecurityGroupList()
- src_secgroup_list.objects.append(src_secgroup)
- instance_ref.security_groups = secgroup_list
- src_instance_ref.security_groups = src_secgroup_list
-
- def _fake_secgroup(ctxt, instance):
- if instance.uuid == UUID:
- return instance_ref.security_groups
- else:
- return src_instance_ref.security_groups
-
- mock_secgroup.side_effect = _fake_secgroup
-
- def _fake_secrule(ctxt, id):
- if id == secgroup.id:
- rules = objects.SecurityGroupRuleList()
- rules.objects.extend([r1, r2, r3, r4, r5])
- return rules
- else:
- return []
-
- mock_secrule.side_effect = _fake_secrule
-
- def _fake_instlist(ctxt, id):
- if id == src_secgroup['id']:
- insts = objects.InstanceList()
- insts.objects.append(src_instance_ref)
- return insts
- else:
- insts = objects.InstanceList()
- insts.objects.append(instance_ref)
- return insts
-
- mock_instlist.side_effect = _fake_instlist
-
- def fake_iptables_execute(*cmd, **kwargs):
- process_input = kwargs.get('process_input', None)
- if cmd == ('ip6tables-save', '-c'):
- return '\n'.join(self.in6_filter_rules), None
- if cmd == ('iptables-save', '-c'):
- return '\n'.join(self.in_rules), None
- if cmd == ('iptables-restore', '-c'):
- lines = process_input.split('\n')
- if '*filter' in lines:
- self.out_rules = lines
- return '', ''
- if cmd == ('ip6tables-restore', '-c',):
- lines = process_input.split('\n')
- if '*filter' in lines:
- self.out6_rules = lines
- return '', ''
-
- network_model = _fake_network_info(self.stubs, 1)
-
- linux_net.iptables_manager.execute = fake_iptables_execute
-
- self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
- lambda instance: network_model)
-
- self.fw.prepare_instance_filter(instance_ref, network_model)
- self.fw.apply_instance_filter(instance_ref, network_model)
-
- in_rules = filter(lambda l: not l.startswith('#'),
- self.in_rules)
- for rule in in_rules:
- if 'nova' not in rule:
- self.assertTrue(rule in self.out_rules,
- 'Rule went missing: %s' % rule)
-
- instance_chain = None
- for rule in self.out_rules:
- # This is pretty crude, but it'll do for now
- # last two octets change
- if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
- instance_chain = rule.split(' ')[-1]
- break
- self.assertTrue(instance_chain, "The instance chain wasn't added")
-
- security_group_chain = None
- for rule in self.out_rules:
- # This is pretty crude, but it'll do for now
- if '-A %s -j' % instance_chain in rule:
- security_group_chain = rule.split(' ')[-1]
- break
- self.assertTrue(security_group_chain,
- "The security group chain wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp '
- '-s 192.168.11.0/24')
- self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
- "ICMP acceptance rule wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp '
- '--icmp-type 8 -s 192.168.11.0/24')
- self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
- "ICMP Echo Request acceptance rule wasn't added")
-
- for ip in network_model.fixed_ips():
- if ip['version'] != 4:
- continue
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp -m multiport '
- '--dports 80:81 -s %s' % ip['address'])
- self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
- "TCP port 80/81 acceptance rule wasn't added")
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -s '
- '%s' % ip['address'])
- self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
- "Protocol/port-less acceptance rule wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp '
- '-m multiport --dports 80:81 -s 192.168.10.0/24')
- self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
- "TCP port 80/81 acceptance rule wasn't added")
-
- def test_filters_for_instance_with_ip_v6(self):
- self.flags(use_ipv6=True)
- network_info = _fake_network_info(self.stubs, 1)
- rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
- self.assertEqual(len(rulesv4), 2)
- self.assertEqual(len(rulesv6), 1)
-
- def test_filters_for_instance_without_ip_v6(self):
- self.flags(use_ipv6=False)
- network_info = _fake_network_info(self.stubs, 1)
- rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
- self.assertEqual(len(rulesv4), 2)
- self.assertEqual(len(rulesv6), 0)
-
- @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
- @mock.patch.object(lockutils, "external_lock")
- def test_multinic_iptables(self, mock_lock, mock_secgroup):
- mock_lock.return_value = threading.Semaphore()
- mock_secgroup.return_value = objects.SecurityGroupList()
-
- ipv4_rules_per_addr = 1
- ipv4_addr_per_network = 2
- ipv6_rules_per_addr = 1
- ipv6_addr_per_network = 1
- networks_count = 5
- instance_ref = self._create_instance_ref()
- network_info = _fake_network_info(self.stubs, networks_count,
- ipv4_addr_per_network)
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
- '1.1.1.1'
- ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
- ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
- inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
- network_info)
- self.fw.prepare_instance_filter(instance_ref, network_info)
- ipv4 = self.fw.iptables.ipv4['filter'].rules
- ipv6 = self.fw.iptables.ipv6['filter'].rules
- ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
- ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
- # Extra rules are for the DHCP request
- rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
- networks_count) + 2
- self.assertEqual(ipv4_network_rules, rules)
- self.assertEqual(ipv6_network_rules,
- ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
-
- @mock.patch.object(lockutils, "external_lock")
- def test_do_refresh_security_group_rules(self, mock_lock):
- mock_lock.return_value = threading.Semaphore()
- instance_ref = self._create_instance_ref()
- self.mox.StubOutWithMock(self.fw,
- 'instance_rules')
- self.mox.StubOutWithMock(self.fw,
- 'add_filters_for_instance',
- use_mock_anything=True)
- self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
- 'has_chain')
-
- self.fw.instance_rules(instance_ref,
- mox.IgnoreArg()).AndReturn((None, None))
- self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.fw.instance_rules(instance_ref,
- mox.IgnoreArg()).AndReturn((None, None))
- self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
- ).AndReturn(True)
- self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
- mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
-
- self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
- self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
- self.fw.do_refresh_security_group_rules("fake")
-
- @mock.patch.object(lockutils, "external_lock")
- def test_do_refresh_security_group_rules_instance_gone(self, mock_lock):
- mock_lock.return_value = threading.Semaphore()
- instance1 = {'id': 1, 'uuid': 'fake-uuid1'}
- instance2 = {'id': 2, 'uuid': 'fake-uuid2'}
- self.fw.instance_info = {1: (instance1, 'netinfo1'),
- 2: (instance2, 'netinfo2')}
- mock_filter = mock.MagicMock()
- with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
- mock_filter.has_chain.return_value = False
- with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
- mock_ir.return_value = (None, None)
- self.fw.do_refresh_security_group_rules('secgroup')
- self.assertEqual(2, mock_ir.call_count)
- # NOTE(danms): Make sure that it is checking has_chain each time,
- # continuing to process all the instances, and never adding the
- # new chains back if has_chain() is False
- mock_filter.has_chain.assert_has_calls([mock.call('inst-1'),
- mock.call('inst-2')],
- any_order=True)
- self.assertEqual(0, mock_filter.add_chain.call_count)
-
- @mock.patch.object(objects.InstanceList, "get_by_security_group_id")
- @mock.patch.object(objects.SecurityGroupRuleList,
- "get_by_security_group_id")
- @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
- @mock.patch.object(lockutils, "external_lock")
- def test_unfilter_instance_undefines_nwfilter(self, mock_lock,
- mock_secgroup,
- mock_secrule,
- mock_instlist):
- mock_lock.return_value = threading.Semaphore()
-
- fakefilter = NWFilterFakes()
- _xml_mock = fakefilter.filterDefineXMLMock
- self.fw.nwfilter._conn.nwfilterDefineXML = _xml_mock
- _lookup_name = fakefilter.nwfilterLookupByName
- self.fw.nwfilter._conn.nwfilterLookupByName = _lookup_name
- instance_ref = self._create_instance_ref()
-
- mock_secgroup.return_value = objects.SecurityGroupList()
-
- network_info = _fake_network_info(self.stubs, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.apply_instance_filter(instance_ref, network_info)
- original_filter_count = len(fakefilter.filters)
- self.fw.unfilter_instance(instance_ref, network_info)
-
- # should undefine just the instance filter
- self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
-
- @mock.patch.object(FakeVirtAPI, "provider_fw_rule_get_all")
- @mock.patch.object(objects.SecurityGroupList, "get_by_instance")
- @mock.patch.object(lockutils, "external_lock")
- def test_provider_firewall_rules(self, mock_lock, mock_secgroup,
- mock_fwrules):
- mock_lock.return_value = threading.Semaphore()
- mock_secgroup.return_value = objects.SecurityGroupList()
-
- # setup basic instance data
- instance_ref = self._create_instance_ref()
- # FRAGILE: peeks at how the firewall names chains
- chain_name = 'inst-%s' % instance_ref['id']
-
- # create a firewall via setup_basic_filtering like libvirt_conn.spawn
- # should have a chain with 0 rules
- network_info = _fake_network_info(self.stubs, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
- self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(0, len(rules))
-
- # add a rule angd send the update message, check for 1 rule
- mock_fwrules.return_value = [{'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
- # Add another, refresh, and make sure number of rules goes to two
- mock_fwrules.return_value = [{'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535},
- {'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(2, len(rules))
-
- # create the instance filter and make sure it has a jump rule
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.apply_instance_filter(instance_ref, network_info)
- inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == chain_name]
- jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
- provjump_rules = []
- # IptablesTable doesn't make rules unique internally
- for rule in jump_rules:
- if 'provider' in rule.rule and rule not in provjump_rules:
- provjump_rules.append(rule)
- self.assertEqual(1, len(provjump_rules))
-
- # remove a rule from the db, cast to compute to refresh rule
- mock_fwrules.return_value = [{'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
-
-class NWFilterTestCase(test.NoDBTestCase):
- def setUp(self):
- super(NWFilterTestCase, self).setUp()
-
- class Mock(object):
- pass
-
- self.fake_libvirt_connection = Mock()
-
- self.fw = firewall.NWFilterFirewall(
- FakeVirtAPI(),
- lambda: self.fake_libvirt_connection)
-
- def _create_security_group(self, instance_ref):
- secgroup = objects.SecurityGroup(id=1,
- user_id='fake',
- project_id='fake',
- name='testgroup',
- description='test group description')
-
- secgroup_list = objects.SecurityGroupList()
- secgroup_list.objects.append(secgroup)
- instance_ref.security_groups = secgroup_list
-
- return secgroup
-
- def _create_instance(self):
- inst = objects.Instance(
- id=7,
- uuid="74526555-9166-4893-a203-126bdcab0d67",
- user_id="fake",
- project_id="fake",
- image_ref='155d900f-4e14-4e4c-a73d-069cbf4541e6',
- instance_type_id=1)
- inst.info_cache = objects.InstanceInfoCache()
- inst.info_cache.deleted = False
- return inst
-
- def test_creates_base_rule_first(self):
- # These come pre-defined by libvirt
- self.defined_filters = ['no-mac-spoofing',
- 'no-ip-spoofing',
- 'no-arp-spoofing',
- 'allow-dhcp-server']
-
- self.recursive_depends = {}
- for f in self.defined_filters:
- self.recursive_depends[f] = []
-
- def _filterDefineXMLMock(xml):
- dom = minidom.parseString(xml)
- name = dom.firstChild.getAttribute('name')
- self.recursive_depends[name] = []
- for f in dom.getElementsByTagName('filterref'):
- ref = f.getAttribute('filter')
- self.assertTrue(ref in self.defined_filters,
- ('%s referenced filter that does ' +
- 'not yet exist: %s') % (name, ref))
- dependencies = [ref] + self.recursive_depends[ref]
- self.recursive_depends[name] += dependencies
-
- self.defined_filters.append(name)
- return True
-
- self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
-
- instance_ref = self._create_instance()
- self._create_security_group(instance_ref)
-
- def _ensure_all_called(mac, allow_dhcp):
- instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
- mac.translate({ord(':'): None}))
- requiredlist = ['no-arp-spoofing', 'no-ip-spoofing',
- 'no-mac-spoofing']
- required_not_list = []
- if allow_dhcp:
- requiredlist.append('allow-dhcp-server')
- else:
- required_not_list.append('allow-dhcp-server')
- for required in requiredlist:
- self.assertTrue(required in
- self.recursive_depends[instance_filter],
- "Instance's filter does not include %s" %
- required)
- for required_not in required_not_list:
- self.assertFalse(required_not in
- self.recursive_depends[instance_filter],
- "Instance filter includes %s" % required_not)
-
- network_info = _fake_network_info(self.stubs, 1)
- # since there is one (network_info) there is one vif
- # pass this vif's mac to _ensure_all_called()
- # to set the instance_filter properly
- mac = network_info[0]['address']
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
- '1.1.1.1'
- self.fw.setup_basic_filtering(instance_ref, network_info)
- allow_dhcp = True
- _ensure_all_called(mac, allow_dhcp)
-
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = None
- self.fw.setup_basic_filtering(instance_ref, network_info)
- allow_dhcp = False
- _ensure_all_called(mac, allow_dhcp)
-
- def test_unfilter_instance_undefines_nwfilters(self):
- fakefilter = NWFilterFakes()
- self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
- self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
-
- instance_ref = self._create_instance()
- self._create_security_group(instance_ref)
-
- network_info = _fake_network_info(self.stubs, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
- original_filter_count = len(fakefilter.filters)
- self.fw.unfilter_instance(instance_ref, network_info)
- self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
-
- def test_redefining_nwfilters(self):
- fakefilter = NWFilterFakes()
- self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
- self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
-
- instance_ref = self._create_instance()
- self._create_security_group(instance_ref)
-
- network_info = _fake_network_info(self.stubs, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
- self.fw.setup_basic_filtering(instance_ref, network_info)
-
- def test_nwfilter_parameters(self):
- fakefilter = NWFilterFakes()
- self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
- self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
-
- instance_ref = self._create_instance()
- self._create_security_group(instance_ref)
-
- network_info = _fake_network_info(self.stubs, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
-
- vif = network_info[0]
- nic_id = vif['address'].replace(':', '')
- instance_filter_name = self.fw._instance_filter_name(instance_ref,
- nic_id)
- f = fakefilter.nwfilterLookupByName(instance_filter_name)
- tree = etree.fromstring(f.xml)
-
- for fref in tree.findall('filterref'):
- parameters = fref.findall('./parameter')
- for parameter in parameters:
- subnet_v4, subnet_v6 = vif['network']['subnets']
- if parameter.get('name') == 'IP':
- self.assertTrue(_ipv4_like(parameter.get('value'),
- '192.168'))
- elif parameter.get('name') == 'DHCPSERVER':
- dhcp_server = subnet_v4.get('dhcp_server')
- self.assertEqual(parameter.get('value'), dhcp_server)
- elif parameter.get('name') == 'RASERVER':
- ra_server = subnet_v6['gateway']['address'] + "/128"
- self.assertEqual(parameter.get('value'), ra_server)
- elif parameter.get('name') == 'PROJNET':
- ipv4_cidr = subnet_v4['cidr']
- net, mask = netutils.get_net_and_mask(ipv4_cidr)
- self.assertEqual(parameter.get('value'), net)
- elif parameter.get('name') == 'PROJMASK':
- ipv4_cidr = subnet_v4['cidr']
- net, mask = netutils.get_net_and_mask(ipv4_cidr)
- self.assertEqual(parameter.get('value'), mask)
- elif parameter.get('name') == 'PROJNET6':
- ipv6_cidr = subnet_v6['cidr']
- net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
- self.assertEqual(parameter.get('value'), net)
- elif parameter.get('name') == 'PROJMASK6':
- ipv6_cidr = subnet_v6['cidr']
- net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr)
- self.assertEqual(parameter.get('value'), prefix)
- else:
- raise exception.InvalidParameterValue('unknown parameter '
- 'in filter')
-
- def test_multinic_base_filter_selection(self):
- fakefilter = NWFilterFakes()
- self.fw._conn.nwfilterDefineXML = fakefilter.filterDefineXMLMock
- self.fw._conn.nwfilterLookupByName = fakefilter.nwfilterLookupByName
-
- instance_ref = self._create_instance()
- self._create_security_group(instance_ref)
-
- network_info = _fake_network_info(self.stubs, 2)
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
- '1.1.1.1'
-
- self.fw.setup_basic_filtering(instance_ref, network_info)
-
- def assert_filterref(instance, vif, expected=None):
- expected = expected or []
- nic_id = vif['address'].replace(':', '')
- filter_name = self.fw._instance_filter_name(instance, nic_id)
- f = fakefilter.nwfilterLookupByName(filter_name)
- tree = etree.fromstring(f.xml)
- frefs = [fr.get('filter') for fr in tree.findall('filterref')]
- self.assertEqual(set(expected), set(frefs))
-
- assert_filterref(instance_ref, network_info[0],
- expected=['nova-base'])
- assert_filterref(instance_ref, network_info[1],
- expected=['nova-nodhcp'])
diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py
deleted file mode 100644
index 17d0791856..0000000000
--- a/nova/tests/virt/libvirt/test_imagebackend.py
+++ /dev/null
@@ -1,1309 +0,0 @@
-# Copyright 2012 Grid Dynamics
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import inspect
-import os
-import shutil
-import tempfile
-
-import fixtures
-import mock
-from oslo.concurrency import lockutils
-from oslo.config import cfg
-from oslo.utils import units
-
-from nova import context
-from nova import exception
-from nova import keymgr
-from nova.openstack.common.fixture import config as config_fixture
-from nova.openstack.common import imageutils
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_processutils
-from nova.tests.virt.libvirt import fake_libvirt_utils
-from nova.virt import images
-from nova.virt.libvirt import imagebackend
-from nova.virt.libvirt import rbd_utils
-
-CONF = cfg.CONF
-CONF.import_opt('fixed_key', 'nova.keymgr.conf_key_mgr', group='keymgr')
-
-
-class _ImageTestCase(object):
-
- def mock_create_image(self, image):
- def create_image(fn, base, size, *args, **kwargs):
- fn(target=base, *args, **kwargs)
- image.create_image = create_image
-
- def setUp(self):
- super(_ImageTestCase, self).setUp()
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instances_path=self.INSTANCES_PATH)
- self.INSTANCE = {'name': 'instance',
- 'uuid': uuidutils.generate_uuid()}
- self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
- self.INSTANCE['uuid'], 'disk.info')
- self.NAME = 'fake.vm'
- self.TEMPLATE = 'template'
- self.CONTEXT = context.get_admin_context()
-
- self.OLD_STYLE_INSTANCE_PATH = \
- fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
- self.PATH = os.path.join(
- fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
-
- # TODO(mikal): rename template_dir to base_dir and template_path
- # to cached_image_path. This will be less confusing.
- self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
- self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.imagebackend.libvirt_utils',
- fake_libvirt_utils))
-
- def tearDown(self):
- super(_ImageTestCase, self).tearDown()
- shutil.rmtree(self.INSTANCES_PATH)
-
- def test_prealloc_image(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: True)
-
- # Call twice to verify testing fallocate is only called once.
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(),
- ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
- 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
- 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
-
- def test_prealloc_image_without_write_access(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(image, 'check_image_exists', lambda: True)
- self.stubs.Set(image, '_can_fallocate', lambda: True)
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: False)
-
- # Testing fallocate is only called when user has write access.
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(), [])
-
-
-class RawTestCase(_ImageTestCase, test.NoDBTestCase):
-
- SIZE = 1024
-
- def setUp(self):
- self.image_class = imagebackend.Raw
- super(RawTestCase, self).setUp()
- self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
-
- def prepare_mocks(self):
- fn = self.mox.CreateMockAnything()
- self.mox.StubOutWithMock(imagebackend.utils.synchronized,
- '__call__')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
- self.mox.StubOutWithMock(imagebackend.disk, 'extend')
- return fn
-
- def test_cache(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_image_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_base_dir_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_template_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_create_image(self):
- fn = self.prepare_mocks()
- fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
- imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
-
- self.mox.VerifyAll()
-
- def test_create_image_generated(self):
- fn = self.prepare_mocks()
- fn(target=self.PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, None)
-
- self.mox.VerifyAll()
-
- @mock.patch.object(images, 'qemu_img_info',
- return_value=imageutils.QemuImgInfo())
- def test_create_image_extend(self, fake_qemu_img_info):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH, image_id=None)
- imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
- imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=False)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
-
- self.mox.VerifyAll()
-
- def test_correct_format(self):
- self.stubs.UnsetAll()
-
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
-
- os.path.exists(self.PATH).AndReturn(True)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- info = self.mox.CreateMockAnything()
- info.file_format = 'foo'
- imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
- os.path.exists(CONF.instances_path).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
- self.assertEqual(image.driver_format, 'foo')
-
- self.mox.VerifyAll()
-
- @mock.patch.object(images, 'qemu_img_info',
- side_effect=exception.InvalidDiskInfo(
- reason='invalid path'))
- def test_resolve_driver_format(self, fake_qemu_img_info):
- image = self.image_class(self.INSTANCE, self.NAME)
- driver_format = image.resolve_driver_format()
- self.assertEqual(driver_format, 'raw')
-
-
-class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
- SIZE = units.Gi
-
- def setUp(self):
- self.image_class = imagebackend.Qcow2
- super(Qcow2TestCase, self).setUp()
- self.QCOW2_BASE = (self.TEMPLATE_PATH +
- '_%d' % (self.SIZE / units.Gi))
-
- def prepare_mocks(self):
- fn = self.mox.CreateMockAnything()
- self.mox.StubOutWithMock(imagebackend.utils.synchronized,
- '__call__')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils,
- 'create_cow_image')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
- self.mox.StubOutWithMock(imagebackend.disk, 'extend')
- return fn
-
- def test_cache(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(CONF.instances_path).AndReturn(True)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_image_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_base_dir_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_template_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_create_image(self):
- fn = self.prepare_mocks()
- fn(max_size=None, target=self.TEMPLATE_PATH)
- imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
- self.PATH)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, None)
-
- self.mox.VerifyAll()
-
- def test_create_image_with_size(self):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(False)
- imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
- self.PATH)
- imagebackend.disk.extend(self.PATH, self.SIZE, use_cow=True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_create_image_too_small(self):
- fn = self.prepare_mocks()
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.SIZE)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.assertRaises(exception.FlavorDiskTooSmall,
- image.create_image, fn, self.TEMPLATE_PATH, 1)
- self.mox.VerifyAll()
-
- def test_generate_resized_backing_files(self):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils,
- 'get_disk_backing_file')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(CONF.instances_path).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(True)
-
- imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
- .AndReturn(self.QCOW2_BASE)
- os.path.exists(self.QCOW2_BASE).AndReturn(False)
- imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH,
- self.QCOW2_BASE)
- imagebackend.disk.extend(self.QCOW2_BASE, self.SIZE, use_cow=True)
-
- os.path.exists(self.PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_qcow2_exists_and_has_no_backing_file(self):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils,
- 'get_disk_backing_file')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
- os.path.exists(self.INSTANCES_PATH).AndReturn(True)
-
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(True)
-
- imagebackend.libvirt_utils.get_disk_backing_file(self.PATH)\
- .AndReturn(None)
- os.path.exists(self.PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_resolve_driver_format(self):
- image = self.image_class(self.INSTANCE, self.NAME)
- driver_format = image.resolve_driver_format()
- self.assertEqual(driver_format, 'qcow2')
-
-
-class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
- VG = 'FakeVG'
- TEMPLATE_SIZE = 512
- SIZE = 1024
-
- def setUp(self):
- self.image_class = imagebackend.Lvm
- super(LvmTestCase, self).setUp()
- self.flags(images_volume_group=self.VG, group='libvirt')
- self.flags(enabled=False, group='ephemeral_storage_encryption')
- self.INSTANCE['ephemeral_key_uuid'] = None
- self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
- self.OLD_STYLE_INSTANCE_PATH = None
- self.PATH = os.path.join('/dev', self.VG, self.LV)
- self.disk = imagebackend.disk
- self.utils = imagebackend.utils
- self.lvm = imagebackend.lvm
-
- def prepare_mocks(self):
- fn = self.mox.CreateMockAnything()
- self.mox.StubOutWithMock(self.disk, 'resize2fs')
- self.mox.StubOutWithMock(self.lvm, 'create_volume')
- self.mox.StubOutWithMock(self.disk, 'get_disk_size')
- self.mox.StubOutWithMock(self.utils, 'execute')
- return fn
-
- def _create_image(self, sparse):
- fn = self.prepare_mocks()
- fn(max_size=None, target=self.TEMPLATE_PATH)
- self.lvm.create_volume(self.VG,
- self.LV,
- self.TEMPLATE_SIZE,
- sparse=sparse)
- self.disk.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.TEMPLATE_SIZE)
- cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
- self.PATH)
- self.utils.execute(*cmd, run_as_root=True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, None)
-
- self.mox.VerifyAll()
-
- def _create_image_generated(self, sparse):
- fn = self.prepare_mocks()
- self.lvm.create_volume(self.VG, self.LV,
- self.SIZE, sparse=sparse)
- fn(target=self.PATH, ephemeral_size=None)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH,
- self.SIZE, ephemeral_size=None)
-
- self.mox.VerifyAll()
-
- def _create_image_resize(self, sparse):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
- self.lvm.create_volume(self.VG, self.LV,
- self.SIZE, sparse=sparse)
- self.disk.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.TEMPLATE_SIZE)
- cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
- self.PATH)
- self.utils.execute(*cmd, run_as_root=True)
- self.disk.resize2fs(self.PATH, run_as_root=True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_cache(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
-
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_image_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_base_dir_exists(self):
- self.mox.StubOutWithMock(os.path, 'exists')
- if self.OLD_STYLE_INSTANCE_PATH:
- os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- os.path.exists(self.PATH).AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_create_image(self):
- self._create_image(False)
-
- def test_create_image_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image(True)
-
- def test_create_image_generated(self):
- self._create_image_generated(False)
-
- def test_create_image_generated_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image_generated(True)
-
- def test_create_image_resize(self):
- self._create_image_resize(False)
-
- def test_create_image_resize_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image_resize(True)
-
- def test_create_image_negative(self):
- fn = self.prepare_mocks()
- fn(max_size=self.SIZE, target=self.TEMPLATE_PATH)
- self.lvm.create_volume(self.VG,
- self.LV,
- self.SIZE,
- sparse=False
- ).AndRaise(RuntimeError())
- self.disk.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.TEMPLATE_SIZE)
- self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
- self.lvm.remove_volumes([self.PATH])
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.assertRaises(RuntimeError, image.create_image, fn,
- self.TEMPLATE_PATH, self.SIZE)
- self.mox.VerifyAll()
-
- def test_create_image_generated_negative(self):
- fn = self.prepare_mocks()
- fn(target=self.PATH,
- ephemeral_size=None).AndRaise(RuntimeError())
- self.lvm.create_volume(self.VG,
- self.LV,
- self.SIZE,
- sparse=False)
- self.mox.StubOutWithMock(self.lvm, 'remove_volumes')
- self.lvm.remove_volumes([self.PATH])
- self.mox.ReplayAll()
-
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.assertRaises(RuntimeError, image.create_image, fn,
- self.TEMPLATE_PATH, self.SIZE,
- ephemeral_size=None)
- self.mox.VerifyAll()
-
- def test_prealloc_image(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(image, 'check_image_exists', lambda: True)
-
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(), [])
-
-
-class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
- VG = 'FakeVG'
- TEMPLATE_SIZE = 512
- SIZE = 1024
-
- def setUp(self):
- super(EncryptedLvmTestCase, self).setUp()
- self.image_class = imagebackend.Lvm
- self.flags(enabled=True, group='ephemeral_storage_encryption')
- self.flags(cipher='aes-xts-plain64',
- group='ephemeral_storage_encryption')
- self.flags(key_size=512, group='ephemeral_storage_encryption')
- self.flags(fixed_key='00000000000000000000000000000000'
- '00000000000000000000000000000000',
- group='keymgr')
- self.flags(images_volume_group=self.VG, group='libvirt')
- self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
- self.OLD_STYLE_INSTANCE_PATH = None
- self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
- self.PATH = os.path.join('/dev/mapper',
- imagebackend.dmcrypt.volume_name(self.LV))
- self.key_manager = keymgr.API()
- self.INSTANCE['ephemeral_key_uuid'] =\
- self.key_manager.create_key(self.CONTEXT)
- self.KEY = self.key_manager.get_key(self.CONTEXT,
- self.INSTANCE['ephemeral_key_uuid']).get_encoded()
-
- self.lvm = imagebackend.lvm
- self.disk = imagebackend.disk
- self.utils = imagebackend.utils
- self.libvirt_utils = imagebackend.libvirt_utils
- self.dmcrypt = imagebackend.dmcrypt
-
- def _create_image(self, sparse):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
- context=self.CONTEXT)
-
- fn.assert_called_with(context=self.CONTEXT,
- max_size=self.TEMPLATE_SIZE,
- target=self.TEMPLATE_PATH)
- self.lvm.create_volume.assert_called_with(self.VG,
- self.LV,
- self.TEMPLATE_SIZE,
- sparse=sparse)
- self.dmcrypt.create_volume.assert_called_with(
- self.PATH.rpartition('/')[2],
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- cmd = ('qemu-img',
- 'convert',
- '-O',
- 'raw',
- self.TEMPLATE_PATH,
- self.PATH)
- self.utils.execute.assert_called_with(*cmd, run_as_root=True)
-
- def _create_image_generated(self, sparse):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH,
- self.SIZE,
- ephemeral_size=None,
- context=self.CONTEXT)
-
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=sparse)
- self.dmcrypt.create_volume.assert_called_with(
- self.PATH.rpartition('/')[2],
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- fn.assert_called_with(target=self.PATH,
- ephemeral_size=None, context=self.CONTEXT)
-
- def _create_image_resize(self, sparse):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
- context=self.CONTEXT)
-
- fn.assert_called_with(context=self.CONTEXT, max_size=self.SIZE,
- target=self.TEMPLATE_PATH)
- self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=sparse)
- self.dmcrypt.create_volume.assert_called_with(
- self.PATH.rpartition('/')[2],
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- cmd = ('qemu-img',
- 'convert',
- '-O',
- 'raw',
- self.TEMPLATE_PATH,
- self.PATH)
- self.utils.execute.assert_called_with(*cmd, run_as_root=True)
- self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
-
- def test_create_image(self):
- self._create_image(False)
-
- def test_create_image_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image(True)
-
- def test_create_image_generated(self):
- self._create_image_generated(False)
-
- def test_create_image_generated_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image_generated(True)
-
- def test_create_image_resize(self):
- self._create_image_resize(False)
-
- def test_create_image_resize_sparsed(self):
- self.flags(sparse_logical_volumes=True, group='libvirt')
- self._create_image_resize(True)
-
- def test_create_image_negative(self):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
- self.lvm.create_volume.side_effect = RuntimeError()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.assertRaises(
- RuntimeError,
- image.create_image,
- fn,
- self.TEMPLATE_PATH,
- self.SIZE,
- context=self.CONTEXT)
-
- fn.assert_called_with(
- context=self.CONTEXT,
- max_size=self.SIZE,
- target=self.TEMPLATE_PATH)
- self.disk.get_disk_size.assert_called_with(
- self.TEMPLATE_PATH)
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=False)
- self.dmcrypt.delete_volume.assert_called_with(
- self.PATH.rpartition('/')[2])
- self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
-
- def test_create_image_encrypt_negative(self):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
- self.dmcrypt.create_volume.side_effect = RuntimeError()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.assertRaises(
- RuntimeError,
- image.create_image,
- fn,
- self.TEMPLATE_PATH,
- self.SIZE,
- context=self.CONTEXT)
-
- fn.assert_called_with(
- context=self.CONTEXT,
- max_size=self.SIZE,
- target=self.TEMPLATE_PATH)
- self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=False)
- self.dmcrypt.create_volume.assert_called_with(
- self.dmcrypt.volume_name(self.LV),
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- self.dmcrypt.delete_volume.assert_called_with(
- self.PATH.rpartition('/')[2])
- self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
-
- def test_create_image_generated_negative(self):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
- fn.side_effect = RuntimeError()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.assertRaises(RuntimeError,
- image.create_image,
- fn,
- self.TEMPLATE_PATH,
- self.SIZE,
- ephemeral_size=None,
- context=self.CONTEXT)
-
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=False)
- self.dmcrypt.create_volume.assert_called_with(
- self.PATH.rpartition('/')[2],
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- fn.assert_called_with(
- target=self.PATH,
- ephemeral_size=None,
- context=self.CONTEXT)
- self.dmcrypt.delete_volume.assert_called_with(
- self.PATH.rpartition('/')[2])
- self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
-
- def test_create_image_generated_encrypt_negative(self):
- with contextlib.nested(
- mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
- mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
- mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
- mock.patch.object(self.disk, 'get_disk_size',
- mock.Mock(return_value=self.TEMPLATE_SIZE)),
- mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
- mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'create_lvm_image',
- mock.Mock()),
- mock.patch.object(self.libvirt_utils, 'remove_logical_volumes',
- mock.Mock()),
- mock.patch.object(self.utils, 'execute', mock.Mock())):
- fn = mock.Mock()
- fn.side_effect = RuntimeError()
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.assertRaises(
- RuntimeError,
- image.create_image,
- fn,
- self.TEMPLATE_PATH,
- self.SIZE,
- ephemeral_size=None,
- context=self.CONTEXT)
-
- self.lvm.create_volume.assert_called_with(
- self.VG,
- self.LV,
- self.SIZE,
- sparse=False)
- self.dmcrypt.create_volume.assert_called_with(
- self.PATH.rpartition('/')[2],
- self.LV_PATH,
- CONF.ephemeral_storage_encryption.cipher,
- CONF.ephemeral_storage_encryption.key_size,
- self.KEY)
- self.dmcrypt.delete_volume.assert_called_with(
- self.PATH.rpartition('/')[2])
- self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
-
- def test_prealloc_image(self):
- self.flags(preallocate_images='space')
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(image, 'check_image_exists', lambda: True)
-
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(), [])
-
-
-class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
- POOL = "FakePool"
- USER = "FakeUser"
- CONF = "FakeConf"
- SIZE = 1024
-
- def setUp(self):
- self.image_class = imagebackend.Rbd
- super(RbdTestCase, self).setUp()
- self.flags(images_rbd_pool=self.POOL,
- rbd_user=self.USER,
- images_rbd_ceph_conf=self.CONF,
- group='libvirt')
- self.libvirt_utils = imagebackend.libvirt_utils
- self.utils = imagebackend.utils
- self.mox.StubOutWithMock(rbd_utils, 'rbd')
- self.mox.StubOutWithMock(rbd_utils, 'rados')
-
- def test_cache(self):
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(image, 'check_image_exists')
- os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
- image.check_image_exists().AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
- self.mox.ReplayAll()
-
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_base_dir_exists(self):
- fn = self.mox.CreateMockAnything()
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(image, 'check_image_exists')
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- image.check_image_exists().AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
- fn = self.mox.CreateMockAnything()
- fn(target=self.TEMPLATE_PATH)
- self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
- self.mox.ReplayAll()
-
- self.mock_create_image(image)
- image.cache(fn, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_image_exists(self):
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(image, 'check_image_exists')
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- image.check_image_exists().AndReturn(True)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_cache_template_exists(self):
- image = self.image_class(self.INSTANCE, self.NAME)
-
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(image, 'check_image_exists')
- os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
- image.check_image_exists().AndReturn(False)
- os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- self.mox.ReplayAll()
-
- self.mock_create_image(image)
- image.cache(None, self.TEMPLATE)
-
- self.mox.VerifyAll()
-
- def test_create_image(self):
- fn = self.mox.CreateMockAnything()
- fn(max_size=None, target=self.TEMPLATE_PATH)
-
- rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mox.StubOutWithMock(image, 'check_image_exists')
- image.check_image_exists().AndReturn(False)
- image.check_image_exists().AndReturn(False)
- self.mox.ReplayAll()
-
- image.create_image(fn, self.TEMPLATE_PATH, None)
-
- rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
- cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
- rbd_name, '--new-format', '--id', self.USER,
- '--conf', self.CONF)
- self.assertEqual(fake_processutils.fake_execute_get_log(),
- [' '.join(cmd)])
- self.mox.VerifyAll()
-
- def test_create_image_resize(self):
- fn = self.mox.CreateMockAnything()
- full_size = self.SIZE * 2
- fn(max_size=full_size, target=self.TEMPLATE_PATH)
-
- rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mox.StubOutWithMock(image, 'check_image_exists')
- image.check_image_exists().AndReturn(False)
- image.check_image_exists().AndReturn(False)
- rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
- cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
- rbd_name, '--new-format', '--id', self.USER,
- '--conf', self.CONF)
- self.mox.StubOutWithMock(image, 'get_disk_size')
- image.get_disk_size(rbd_name).AndReturn(self.SIZE)
- self.mox.StubOutWithMock(image.driver, 'resize')
- image.driver.resize(rbd_name, full_size)
-
- self.mox.ReplayAll()
-
- image.create_image(fn, self.TEMPLATE_PATH, full_size)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(),
- [' '.join(cmd)])
- self.mox.VerifyAll()
-
- def test_create_image_already_exists(self):
- rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
-
- image = self.image_class(self.INSTANCE, self.NAME)
- self.mox.StubOutWithMock(image, 'check_image_exists')
- image.check_image_exists().AndReturn(True)
- self.mox.StubOutWithMock(image, 'get_disk_size')
- image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
- image.check_image_exists().AndReturn(True)
- rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
- image.get_disk_size(rbd_name).AndReturn(self.SIZE)
-
- self.mox.ReplayAll()
-
- fn = self.mox.CreateMockAnything()
- image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
-
- self.mox.VerifyAll()
-
- def test_prealloc_image(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- def fake_resize(rbd_name, size):
- return
-
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(image, 'check_image_exists', lambda: True)
-
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(), [])
-
- def test_parent_compatible(self):
- self.assertEqual(inspect.getargspec(imagebackend.Image.libvirt_info),
- inspect.getargspec(self.image_class.libvirt_info))
-
- def test_image_path(self):
-
- conf = "FakeConf"
- pool = "FakePool"
- user = "FakeUser"
-
- self.flags(images_rbd_pool=pool, group='libvirt')
- self.flags(images_rbd_ceph_conf=conf, group='libvirt')
- self.flags(rbd_user=user, group='libvirt')
- image = self.image_class(self.INSTANCE, self.NAME)
- rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
- user, conf)
-
- self.assertEqual(image.path, rbd_path)
-
-
-class BackendTestCase(test.NoDBTestCase):
- INSTANCE = {'name': 'fake-instance',
- 'uuid': uuidutils.generate_uuid()}
- NAME = 'fake-name.suffix'
-
- def setUp(self):
- super(BackendTestCase, self).setUp()
- self.flags(enabled=False, group='ephemeral_storage_encryption')
- self.INSTANCE['ephemeral_key_uuid'] = None
-
- def get_image(self, use_cow, image_type):
- return imagebackend.Backend(use_cow).image(self.INSTANCE,
- self.NAME,
- image_type)
-
- def _test_image(self, image_type, image_not_cow, image_cow):
- image1 = self.get_image(False, image_type)
- image2 = self.get_image(True, image_type)
-
- def assertIsInstance(instance, class_object):
- failure = ('Expected %s,' +
- ' but got %s.') % (class_object.__name__,
- instance.__class__.__name__)
- self.assertIsInstance(instance, class_object, msg=failure)
-
- assertIsInstance(image1, image_not_cow)
- assertIsInstance(image2, image_cow)
-
- def test_image_raw(self):
- self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
-
- def test_image_qcow2(self):
- self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
-
- def test_image_lvm(self):
- self.flags(images_volume_group='FakeVG', group='libvirt')
- self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
-
- def test_image_rbd(self):
- conf = "FakeConf"
- pool = "FakePool"
- self.flags(images_rbd_pool=pool, group='libvirt')
- self.flags(images_rbd_ceph_conf=conf, group='libvirt')
- self.mox.StubOutWithMock(rbd_utils, 'rbd')
- self.mox.StubOutWithMock(rbd_utils, 'rados')
- self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
-
- def test_image_default(self):
- self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
-
-
-class UtilTestCase(test.NoDBTestCase):
- def test_get_hw_disk_discard(self):
- self.assertEqual('unmap', imagebackend.get_hw_disk_discard("unmap"))
- self.assertEqual('ignore', imagebackend.get_hw_disk_discard("ignore"))
- self.assertIsNone(imagebackend.get_hw_disk_discard(None))
- self.assertRaises(RuntimeError, imagebackend.get_hw_disk_discard,
- "fake")
diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py
deleted file mode 100644
index a2eb4a1c67..0000000000
--- a/nova/tests/virt/libvirt/test_imagecache.py
+++ /dev/null
@@ -1,887 +0,0 @@
-# Copyright 2012 Michael Still and Canonical Inc
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import contextlib
-import cStringIO
-import hashlib
-import os
-import time
-
-from oslo.concurrency import processutils
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-
-from nova import conductor
-from nova import db
-from nova.openstack.common import log as logging
-from nova import test
-from nova.tests import fake_instance
-from nova import utils
-from nova.virt.libvirt import imagecache
-from nova.virt.libvirt import utils as libvirt_utils
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.netconf')
-
-
-@contextlib.contextmanager
-def intercept_log_messages():
- try:
- mylog = logging.getLogger('nova')
- stream = cStringIO.StringIO()
- handler = logging.logging.StreamHandler(stream)
- handler.setFormatter(logging.ContextFormatter())
- mylog.logger.addHandler(handler)
- yield stream
- finally:
- mylog.logger.removeHandler(handler)
-
-
-class ImageCacheManagerTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(ImageCacheManagerTestCase, self).setUp()
- self.stock_instance_names = set(['instance-00000001',
- 'instance-00000002',
- 'instance-00000003',
- 'banana-42-hamster'])
-
- def test_read_stored_checksum_missing(self):
- self.stubs.Set(os.path, 'exists', lambda x: False)
- csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
- self.assertIsNone(csum)
-
- def test_read_stored_checksum(self):
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
- fname = os.path.join(tmpdir, 'aaa')
- info_fname = imagecache.get_info_filename(fname)
- f = open(info_fname, 'w')
- f.write(csum_input)
- f.close()
-
- csum_output = imagecache.read_stored_checksum(fname,
- timestamped=False)
- self.assertEqual(csum_input.rstrip(),
- '{"sha1": "%s"}' % csum_output)
-
- def test_read_stored_checksum_legacy_essex(self):
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- fname = os.path.join(tmpdir, 'aaa')
- old_fname = fname + '.sha1'
- f = open(old_fname, 'w')
- f.write('fdghkfhkgjjksfdgjksjkghsdf')
- f.close()
-
- csum_output = imagecache.read_stored_checksum(fname,
- timestamped=False)
- self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
- self.assertFalse(os.path.exists(old_fname))
- info_fname = imagecache.get_info_filename(fname)
- self.assertTrue(os.path.exists(info_fname))
-
- def test_list_base_images(self):
- listing = ['00000001',
- 'ephemeral_0_20_None',
- '17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
- '00000004']
- images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
- 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
- 'e97222e91fc4241f49a7f520d1dcf446751129b3',
- '17d1b00b81642842e514494a78e804e9a511637c',
- '17d1b00b81642842e514494a78e804e9a511637c_5368709120',
- '17d1b00b81642842e514494a78e804e9a511637c_10737418240']
- listing.extend(images)
-
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'isfile', lambda x: True)
-
- base_dir = '/var/lib/nova/instances/_base'
- self.flags(instances_path='/var/lib/nova/instances')
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._list_base_images(base_dir)
-
- sanitized = []
- for ent in image_cache_manager.unexplained_images:
- sanitized.append(ent.replace(base_dir + '/', ''))
-
- self.assertEqual(sorted(sanitized), sorted(images))
-
- expected = os.path.join(base_dir,
- 'e97222e91fc4241f49a7f520d1dcf446751129b3')
- self.assertIn(expected, image_cache_manager.unexplained_images)
-
- expected = os.path.join(base_dir,
- '17d1b00b81642842e514494a78e804e9a511637c_'
- '10737418240')
- self.assertIn(expected, image_cache_manager.unexplained_images)
-
- unexpected = os.path.join(base_dir, '00000004')
- self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
-
- for ent in image_cache_manager.unexplained_images:
- self.assertTrue(ent.startswith(base_dir))
-
- self.assertEqual(len(image_cache_manager.originals), 2)
-
- expected = os.path.join(base_dir,
- '17d1b00b81642842e514494a78e804e9a511637c')
- self.assertIn(expected, image_cache_manager.originals)
-
- unexpected = os.path.join(base_dir,
- '17d1b00b81642842e514494a78e804e9a511637c_'
- '10737418240')
- self.assertNotIn(unexpected, image_cache_manager.originals)
-
- def test_list_backing_images_small(self):
- self.stubs.Set(os, 'listdir',
- lambda x: ['_base', 'instance-00000001',
- 'instance-00000002', 'instance-00000003'])
- self.stubs.Set(os.path, 'exists',
- lambda x: x.find('instance-') != -1)
- self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
- lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
-
- found = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name,
- 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [found]
- image_cache_manager.instance_names = self.stock_instance_names
-
- inuse_images = image_cache_manager._list_backing_images()
-
- self.assertEqual(inuse_images, [found])
- self.assertEqual(len(image_cache_manager.unexplained_images), 0)
-
- def test_list_backing_images_resized(self):
- self.stubs.Set(os, 'listdir',
- lambda x: ['_base', 'instance-00000001',
- 'instance-00000002', 'instance-00000003'])
- self.stubs.Set(os.path, 'exists',
- lambda x: x.find('instance-') != -1)
- self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
- lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
- '10737418240'))
-
- found = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name,
- 'e97222e91fc4241f49a7f520d1dcf446751129b3_'
- '10737418240')
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [found]
- image_cache_manager.instance_names = self.stock_instance_names
-
- inuse_images = image_cache_manager._list_backing_images()
-
- self.assertEqual(inuse_images, [found])
- self.assertEqual(len(image_cache_manager.unexplained_images), 0)
-
- def test_list_backing_images_instancename(self):
- self.stubs.Set(os, 'listdir',
- lambda x: ['_base', 'banana-42-hamster'])
- self.stubs.Set(os.path, 'exists',
- lambda x: x.find('banana-42-hamster') != -1)
- self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
- lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
-
- found = os.path.join(CONF.instances_path,
- CONF.image_cache_subdirectory_name,
- 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [found]
- image_cache_manager.instance_names = self.stock_instance_names
-
- inuse_images = image_cache_manager._list_backing_images()
-
- self.assertEqual(inuse_images, [found])
- self.assertEqual(len(image_cache_manager.unexplained_images), 0)
-
- def test_list_backing_images_disk_notexist(self):
- self.stubs.Set(os, 'listdir',
- lambda x: ['_base', 'banana-42-hamster'])
- self.stubs.Set(os.path, 'exists',
- lambda x: x.find('banana-42-hamster') != -1)
-
- def fake_get_disk(disk_path):
- raise processutils.ProcessExecutionError()
-
- self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = []
- image_cache_manager.instance_names = self.stock_instance_names
-
- self.assertRaises(processutils.ProcessExecutionError,
- image_cache_manager._list_backing_images)
-
- def test_find_base_file_nothing(self):
- self.stubs.Set(os.path, 'exists', lambda x: False)
-
- base_dir = '/var/lib/nova/instances/_base'
- fingerprint = '549867354867'
- image_cache_manager = imagecache.ImageCacheManager()
- res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
-
- self.assertEqual(0, len(res))
-
- def test_find_base_file_small(self):
- fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
- self.stubs.Set(os.path, 'exists',
- lambda x: x.endswith('%s_sm' % fingerprint))
-
- base_dir = '/var/lib/nova/instances/_base'
- image_cache_manager = imagecache.ImageCacheManager()
- res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
-
- base_file = os.path.join(base_dir, fingerprint + '_sm')
- self.assertEqual(res, [(base_file, True, False)])
-
- def test_find_base_file_resized(self):
- fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
- listing = ['00000001',
- 'ephemeral_0_20_None',
- '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
- '00000004']
-
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'exists',
- lambda x: x.endswith('%s_10737418240' % fingerprint))
- self.stubs.Set(os.path, 'isfile', lambda x: True)
-
- base_dir = '/var/lib/nova/instances/_base'
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._list_base_images(base_dir)
- res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
-
- base_file = os.path.join(base_dir, fingerprint + '_10737418240')
- self.assertEqual(res, [(base_file, False, True)])
-
- def test_find_base_file_all(self):
- fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
- listing = ['00000001',
- 'ephemeral_0_20_None',
- '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
- '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
- '00000004']
-
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'exists', lambda x: True)
- self.stubs.Set(os.path, 'isfile', lambda x: True)
-
- base_dir = '/var/lib/nova/instances/_base'
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._list_base_images(base_dir)
- res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
-
- base_file1 = os.path.join(base_dir, fingerprint)
- base_file2 = os.path.join(base_dir, fingerprint + '_sm')
- base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
- self.assertEqual(res, [(base_file1, False, False),
- (base_file2, True, False),
- (base_file3, False, True)])
-
- @contextlib.contextmanager
- def _make_base_file(self, checksum=True):
- """Make a base file for testing."""
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
- fname = os.path.join(tmpdir, 'aaa')
-
- base_file = open(fname, 'w')
- base_file.write('data')
- base_file.close()
- base_file = open(fname, 'r')
-
- if checksum:
- imagecache.write_stored_checksum(fname)
-
- base_file.close()
- yield fname
-
- def test_remove_base_file(self):
- with self._make_base_file() as fname:
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._remove_base_file(fname)
- info_fname = imagecache.get_info_filename(fname)
-
- # Files are initially too new to delete
- self.assertTrue(os.path.exists(fname))
- self.assertTrue(os.path.exists(info_fname))
-
- # Old files get cleaned up though
- os.utime(fname, (-1, time.time() - 3601))
- image_cache_manager._remove_base_file(fname)
-
- self.assertFalse(os.path.exists(fname))
- self.assertFalse(os.path.exists(info_fname))
-
- def test_remove_base_file_original(self):
- with self._make_base_file() as fname:
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.originals = [fname]
- image_cache_manager._remove_base_file(fname)
- info_fname = imagecache.get_info_filename(fname)
-
- # Files are initially too new to delete
- self.assertTrue(os.path.exists(fname))
- self.assertTrue(os.path.exists(info_fname))
-
- # This file should stay longer than a resized image
- os.utime(fname, (-1, time.time() - 3601))
- image_cache_manager._remove_base_file(fname)
-
- self.assertTrue(os.path.exists(fname))
- self.assertTrue(os.path.exists(info_fname))
-
- # Originals don't stay forever though
- os.utime(fname, (-1, time.time() - 3600 * 25))
- image_cache_manager._remove_base_file(fname)
-
- self.assertFalse(os.path.exists(fname))
- self.assertFalse(os.path.exists(info_fname))
-
- def test_remove_base_file_dne(self):
- # This test is solely to execute the "does not exist" code path. We
- # don't expect the method being tested to do anything in this case.
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- fname = os.path.join(tmpdir, 'aaa')
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._remove_base_file(fname)
-
- def test_remove_base_file_oserror(self):
- with intercept_log_messages() as stream:
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- fname = os.path.join(tmpdir, 'aaa')
-
- os.mkdir(fname)
- os.utime(fname, (-1, time.time() - 3601))
-
- # This will raise an OSError because of file permissions
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager._remove_base_file(fname)
-
- self.assertTrue(os.path.exists(fname))
- self.assertNotEqual(stream.getvalue().find('Failed to remove'),
- -1)
-
- def test_handle_base_image_unused(self):
- img = '123'
-
- with self._make_base_file() as fname:
- os.utime(fname, (-1, time.time() - 3601))
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [fname]
- image_cache_manager._handle_base_image(img, fname)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files,
- [fname])
- self.assertEqual(image_cache_manager.corrupt_base_files, [])
-
- def test_handle_base_image_used(self):
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
- img = '123'
-
- with self._make_base_file() as fname:
- os.utime(fname, (-1, time.time() - 3601))
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [fname]
- image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
- image_cache_manager._handle_base_image(img, fname)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files, [])
- self.assertEqual(image_cache_manager.corrupt_base_files, [])
-
- def test_handle_base_image_used_remotely(self):
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
- img = '123'
-
- with self._make_base_file() as fname:
- os.utime(fname, (-1, time.time() - 3601))
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [fname]
- image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
- image_cache_manager._handle_base_image(img, fname)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files, [])
- self.assertEqual(image_cache_manager.corrupt_base_files, [])
-
- def test_handle_base_image_absent(self):
- img = '123'
-
- with intercept_log_messages() as stream:
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
- image_cache_manager._handle_base_image(img, None)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files, [])
- self.assertEqual(image_cache_manager.corrupt_base_files, [])
- self.assertNotEqual(stream.getvalue().find('an absent base file'),
- -1)
-
- def test_handle_base_image_used_missing(self):
- img = '123'
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- fname = os.path.join(tmpdir, 'aaa')
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [fname]
- image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
- image_cache_manager._handle_base_image(img, fname)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files, [])
- self.assertEqual(image_cache_manager.corrupt_base_files, [])
-
- def test_handle_base_image_checksum_fails(self):
- self.flags(checksum_base_images=True, group='libvirt')
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
-
- img = '123'
-
- with self._make_base_file() as fname:
- with open(fname, 'w') as f:
- f.write('banana')
-
- d = {'sha1': '21323454'}
- with open('%s.info' % fname, 'w') as f:
- f.write(jsonutils.dumps(d))
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.unexplained_images = [fname]
- image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
- image_cache_manager._handle_base_image(img, fname)
-
- self.assertEqual(image_cache_manager.unexplained_images, [])
- self.assertEqual(image_cache_manager.removable_base_files, [])
- self.assertEqual(image_cache_manager.corrupt_base_files,
- [fname])
-
- def test_verify_base_images(self):
- hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
- hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
- hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
- hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
-
- self.flags(instances_path='/instance_path',
- image_cache_subdirectory_name='_base')
-
- base_file_list = ['00000001',
- 'ephemeral_0_20_None',
- 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
- 'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
- hashed_42,
- hashed_1,
- hashed_21,
- hashed_22,
- '%s_5368709120' % hashed_1,
- '%s_10737418240' % hashed_1,
- '00000004']
-
- def fq_path(path):
- return os.path.join('/instance_path/_base/', path)
-
- # Fake base directory existence
- orig_exists = os.path.exists
-
- def exists(path):
- # The python coverage tool got angry with my overly broad mocks
- if not path.startswith('/instance_path'):
- return orig_exists(path)
-
- if path in ['/instance_path',
- '/instance_path/_base',
- '/instance_path/instance-1/disk',
- '/instance_path/instance-2/disk',
- '/instance_path/instance-3/disk',
- '/instance_path/_base/%s.info' % hashed_42]:
- return True
-
- for p in base_file_list:
- if path == fq_path(p):
- return True
- if path == fq_path(p) + '.info':
- return False
-
- if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
- hashed_21,
- hashed_22,
- hashed_42]]:
- return False
-
- self.fail('Unexpected path existence check: %s' % path)
-
- self.stubs.Set(os.path, 'exists', lambda x: exists(x))
-
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
-
- # We need to stub utime as well
- self.stubs.Set(os, 'utime', lambda x, y: None)
-
- # Fake up some instances in the instances directory
- orig_listdir = os.listdir
-
- def listdir(path):
- # The python coverage tool got angry with my overly broad mocks
- if not path.startswith('/instance_path'):
- return orig_listdir(path)
-
- if path == '/instance_path':
- return ['instance-1', 'instance-2', 'instance-3', '_base']
-
- if path == '/instance_path/_base':
- return base_file_list
-
- self.fail('Unexpected directory listed: %s' % path)
-
- self.stubs.Set(os, 'listdir', lambda x: listdir(x))
-
- # Fake isfile for these faked images in _base
- orig_isfile = os.path.isfile
-
- def isfile(path):
- # The python coverage tool got angry with my overly broad mocks
- if not path.startswith('/instance_path'):
- return orig_isfile(path)
-
- for p in base_file_list:
- if path == fq_path(p):
- return True
-
- self.fail('Unexpected isfile call: %s' % path)
-
- self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
-
- # Fake the database call which lists running instances
- instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '1',
- 'kernel_id': '21',
- 'ramdisk_id': '22',
- 'host': CONF.host,
- 'name': 'instance-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''}]
- all_instances = [fake_instance.fake_instance_obj(None, **instance)
- for instance in instances]
- image_cache_manager = imagecache.ImageCacheManager()
-
- # Fake the utils call which finds the backing image
- def get_disk_backing_file(path):
- if path in ['/instance_path/instance-1/disk',
- '/instance_path/instance-2/disk']:
- return fq_path('%s_5368709120' % hashed_1)
- self.fail('Unexpected backing file lookup: %s' % path)
-
- self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
- lambda x: get_disk_backing_file(x))
-
- # Fake out verifying checksums, as that is tested elsewhere
- self.stubs.Set(image_cache_manager, '_verify_checksum',
- lambda x, y: True)
-
- # Fake getmtime as well
- orig_getmtime = os.path.getmtime
-
- def getmtime(path):
- if not path.startswith('/instance_path'):
- return orig_getmtime(path)
-
- return 1000000
-
- self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
-
- # Make sure we don't accidentally remove a real file
- orig_remove = os.remove
-
- def remove(path):
- if not path.startswith('/instance_path'):
- return orig_remove(path)
-
- # Don't try to remove fake files
- return
-
- self.stubs.Set(os, 'remove', lambda x: remove(x))
-
- # And finally we can make the call we're actually testing...
- # The argument here should be a context, but it is mocked out
- image_cache_manager.update(None, all_instances)
-
- # Verify
- active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
- fq_path(hashed_21), fq_path(hashed_22)]
- for act in active:
- self.assertIn(act, image_cache_manager.active_base_files)
- self.assertEqual(len(image_cache_manager.active_base_files),
- len(active))
-
- for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
- fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
- fq_path(hashed_42),
- fq_path('%s_10737418240' % hashed_1)]:
- self.assertIn(rem, image_cache_manager.removable_base_files)
-
- # Ensure there are no "corrupt" images as well
- self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
-
- def test_verify_base_images_no_base(self):
- self.flags(instances_path='/tmp/no/such/dir/name/please')
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.update(None, [])
-
- def test_is_valid_info_file(self):
- hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
-
- self.flags(instances_path='/tmp/no/such/dir/name/please')
- self.flags(image_info_filename_pattern=('$instances_path/_base/'
- '%(image)s.info'),
- group='libvirt')
- base_filename = os.path.join(CONF.instances_path, '_base', hashed)
-
- is_valid_info_file = imagecache.is_valid_info_file
- self.assertFalse(is_valid_info_file('banana'))
- self.assertFalse(is_valid_info_file(
- os.path.join(CONF.instances_path, '_base', '00000001')))
- self.assertFalse(is_valid_info_file(base_filename))
- self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
- self.assertTrue(is_valid_info_file(base_filename + '.info'))
-
- def test_configured_checksum_path(self):
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
-
- # Ensure there is a base directory
- os.mkdir(os.path.join(tmpdir, '_base'))
-
- # Fake the database call which lists running instances
- instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''}]
-
- all_instances = []
- for instance in instances:
- all_instances.append(fake_instance.fake_instance_obj(
- None, **instance))
-
- def touch(filename):
- f = open(filename, 'w')
- f.write('Touched')
- f.close()
-
- old = time.time() - (25 * 3600)
- hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
- base_filename = os.path.join(tmpdir, hashed)
- touch(base_filename)
- touch(base_filename + '.info')
- os.utime(base_filename + '.info', (old, old))
- touch(base_filename + '.info')
- os.utime(base_filename + '.info', (old, old))
-
- image_cache_manager = imagecache.ImageCacheManager()
- image_cache_manager.update(None, all_instances)
-
- self.assertTrue(os.path.exists(base_filename))
- self.assertTrue(os.path.exists(base_filename + '.info'))
-
- def test_compute_manager(self):
- was = {'called': False}
-
- def fake_get_all_by_filters(context, *args, **kwargs):
- was['called'] = True
- instances = []
- for x in xrange(2):
- instances.append(fake_instance.fake_db_instance(
- image_ref='1',
- uuid=x,
- name=x,
- vm_state='',
- task_state=''))
- return instances
-
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
-
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all_by_filters)
- compute = importutils.import_object(CONF.compute_manager)
- self.flags(use_local=True, group='conductor')
- compute.conductor_api = conductor.API()
- compute._run_image_cache_manager_pass(None)
- self.assertTrue(was['called'])
-
-
-class VerifyChecksumTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(VerifyChecksumTestCase, self).setUp()
- self.img = {'container_format': 'ami', 'id': '42'}
- self.flags(checksum_base_images=True, group='libvirt')
-
- def _make_checksum(self, tmpdir):
- testdata = ('OpenStack Software delivers a massively scalable cloud '
- 'operating system.')
-
- fname = os.path.join(tmpdir, 'aaa')
- info_fname = imagecache.get_info_filename(fname)
-
- with open(fname, 'w') as f:
- f.write(testdata)
-
- return fname, info_fname, testdata
-
- def _write_file(self, info_fname, info_attr, testdata):
- f = open(info_fname, 'w')
- if info_attr == "csum valid":
- csum = hashlib.sha1()
- csum.update(testdata)
- f.write('{"sha1": "%s"}\n' % csum.hexdigest())
- elif info_attr == "csum invalid, not json":
- f.write('banana')
- else:
- f.write('{"sha1": "banana"}')
- f.close()
-
- def _check_body(self, tmpdir, info_attr):
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
- fname, info_fname, testdata = self._make_checksum(tmpdir)
- self._write_file(info_fname, info_attr, testdata)
- image_cache_manager = imagecache.ImageCacheManager()
- return image_cache_manager, fname
-
- def test_verify_checksum(self):
- with utils.tempdir() as tmpdir:
- image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
- res = image_cache_manager._verify_checksum(self.img, fname)
- self.assertTrue(res)
-
- def test_verify_checksum_disabled(self):
- self.flags(checksum_base_images=False, group='libvirt')
- with utils.tempdir() as tmpdir:
- image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
- res = image_cache_manager._verify_checksum(self.img, fname)
- self.assertIsNone(res)
-
- def test_verify_checksum_invalid_json(self):
- with intercept_log_messages() as stream:
- with utils.tempdir() as tmpdir:
- image_cache_manager, fname = (
- self._check_body(tmpdir, "csum invalid, not json"))
- res = image_cache_manager._verify_checksum(
- self.img, fname, create_if_missing=False)
- self.assertFalse(res)
- log = stream.getvalue()
-
- # NOTE(mikal): this is a skip not a fail because the file is
- # present, but is not in valid json format and therefore is
- # skipped.
- self.assertNotEqual(log.find('image verification skipped'), -1)
-
- def test_verify_checksum_invalid_repaired(self):
- with utils.tempdir() as tmpdir:
- image_cache_manager, fname = (
- self._check_body(tmpdir, "csum invalid, not json"))
- res = image_cache_manager._verify_checksum(
- self.img, fname, create_if_missing=True)
- self.assertIsNone(res)
-
- def test_verify_checksum_invalid(self):
- with intercept_log_messages() as stream:
- with utils.tempdir() as tmpdir:
- image_cache_manager, fname = (
- self._check_body(tmpdir, "csum invalid, valid json"))
- res = image_cache_manager._verify_checksum(self.img, fname)
- self.assertFalse(res)
- log = stream.getvalue()
- self.assertNotEqual(log.find('image verification failed'), -1)
-
- def test_verify_checksum_file_missing(self):
- with utils.tempdir() as tmpdir:
- self.flags(instances_path=tmpdir)
- self.flags(image_info_filename_pattern=('$instances_path/'
- '%(image)s.info'),
- group='libvirt')
- fname, info_fname, testdata = self._make_checksum(tmpdir)
-
- image_cache_manager = imagecache.ImageCacheManager()
- res = image_cache_manager._verify_checksum('aaa', fname)
- self.assertIsNone(res)
-
- # Checksum requests for a file with no checksum now have the
- # side effect of creating the checksum
- self.assertTrue(os.path.exists(info_fname))
diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py
deleted file mode 100644
index 256ca58046..0000000000
--- a/nova/tests/virt/libvirt/test_vif.py
+++ /dev/null
@@ -1,959 +0,0 @@
-# Copyright 2012 Nicira, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-from lxml import etree
-import mock
-from oslo.concurrency import processutils
-from oslo.config import cfg
-
-from nova import exception
-from nova.network import linux_net
-from nova.network import model as network_model
-from nova import test
-from nova.tests.virt.libvirt import fakelibvirt
-from nova import utils
-from nova.virt.libvirt import config as vconfig
-from nova.virt.libvirt import vif
-
-CONF = cfg.CONF
-
-
-class LibvirtVifTestCase(test.NoDBTestCase):
-
- gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
- dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
- ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
- subnet_bridge_4 = network_model.Subnet(cidr='101.168.1.0/24',
- dns=[dns_bridge_4],
- gateway=gateway_bridge_4,
- routes=None,
- dhcp_server='191.168.1.1')
-
- gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
- subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
- dns=None,
- gateway=gateway_bridge_6,
- ips=None,
- routes=None)
-
- network_bridge = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge='br0',
- label=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- bridge_interface='eth0',
- vlan=99)
-
- vif_bridge = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_bridge,
- type=network_model.VIF_TYPE_BRIDGE,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid=None)
-
- network_bridge_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge=None,
- label=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- bridge_interface='eth0',
- vlan=99)
-
- vif_bridge_neutron = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_bridge_neutron,
- type=None,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- network_ovs = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge='br0',
- label=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- bridge_interface=None,
- vlan=99)
-
- network_ivs = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge='br0',
- label=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- bridge_interface=None,
- vlan=99)
-
- vif_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ovs,
- type=network_model.VIF_TYPE_OVS,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_ovs_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ovs,
- type=network_model.VIF_TYPE_OVS,
- details={'ovs_hybrid_plug': True,
- 'port_filter': True},
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_ovs_filter_cap = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ovs,
- type=network_model.VIF_TYPE_OVS,
- details={'port_filter': True},
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_ovs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ovs,
- type=None,
- devname=None,
- ovs_interfaceid=None)
-
- vif_ivs = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ivs,
- type=network_model.VIF_TYPE_IVS,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_ivs_legacy = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ovs,
- type=None,
- devname=None,
- ovs_interfaceid='aaa')
-
- vif_ivs_filter_direct = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ivs,
- type=network_model.VIF_TYPE_IVS,
- details={'port_filter': True},
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_ivs_filter_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_ivs,
- type=network_model.VIF_TYPE_IVS,
- details={
- 'port_filter': True,
- 'ovs_hybrid_plug': True},
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
-
- vif_none = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_bridge,
- type=None,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid=None)
-
- network_8021 = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge=None,
- label=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- interface='eth0',
- vlan=99)
-
- vif_8021qbh = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_8021,
- type=network_model.VIF_TYPE_802_QBH,
- vnic_type=network_model.VNIC_TYPE_DIRECT,
- ovs_interfaceid=None,
- details={
- network_model.VIF_DETAILS_PROFILEID:
- 'MyPortProfile'},
- profile={'pci_vendor_info': '1137:0043',
- 'pci_slot': '0000:0a:00.1',
- 'physical_network': 'phynet1'})
-
- vif_hw_veb = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_8021,
- type=network_model.VIF_TYPE_HW_VEB,
- vnic_type=network_model.VNIC_TYPE_DIRECT,
- ovs_interfaceid=None,
- details={
- network_model.VIF_DETAILS_VLAN: '100'},
- profile={'pci_vendor_info': '1137:0043',
- 'pci_slot': '0000:0a:00.1',
- 'physical_network': 'phynet1'})
-
- vif_8021qbg = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_8021,
- type=network_model.VIF_TYPE_802_QBG,
- ovs_interfaceid=None,
- qbg_params=network_model.VIF8021QbgParams(
- managerid="xxx-yyy-zzz",
- typeid="aaa-bbb-ccc",
- typeidversion="1",
- instanceid="ddd-eee-fff"))
-
- network_mlnx = network_model.Network(id='network-id-xxx-yyy-zzz',
- label=None,
- bridge=None,
- subnets=[subnet_bridge_4,
- subnet_bridge_6],
- interface='eth0')
-
- network_midonet = network_model.Network(id='network-id-xxx-yyy-zzz',
- label=None,
- bridge=None,
- subnets=[subnet_bridge_4],
- interface='eth0')
-
- vif_mlnx = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_mlnx,
- type=network_model.VIF_TYPE_MLNX_DIRECT,
- devname='tap-xxx-yyy-zzz')
-
- vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_mlnx,
- type=network_model.VIF_TYPE_MLNX_DIRECT,
- details={'physical_network':
- 'fake_phy_network'},
- devname='tap-xxx-yyy-zzz')
-
- vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_midonet,
- type=network_model.VIF_TYPE_MIDONET,
- devname='tap-xxx-yyy-zzz')
-
- vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_bridge,
- type=network_model.VIF_TYPE_IOVISOR,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid=None)
-
- instance = {
- 'name': 'instance-name',
- 'uuid': 'instance-uuid'
- }
-
- bandwidth = {
- 'quota:vif_inbound_peak': '200',
- 'quota:vif_outbound_peak': '20',
- 'quota:vif_inbound_average': '100',
- 'quota:vif_outbound_average': '10',
- 'quota:vif_inbound_burst': '300',
- 'quota:vif_outbound_burst': '30'
- }
-
- def setUp(self):
- super(LibvirtVifTestCase, self).setUp()
- self.flags(allow_same_net_traffic=True)
- self.executes = []
-
- def fake_execute(*cmd, **kwargs):
- self.executes.append(cmd)
- return None, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
-
- def _get_conn(self, uri="qemu:///session", ver=None):
- def __inner():
- if ver is None:
- return fakelibvirt.Connection(uri, False)
- else:
- return fakelibvirt.Connection(uri, False, ver)
- return __inner
-
- def _get_node(self, xml):
- doc = etree.fromstring(xml)
- ret = doc.findall('./devices/interface')
- self.assertEqual(len(ret), 1)
- return ret[0]
-
- def _assertMacEquals(self, node, vif):
- mac = node.find("mac").get("address")
- self.assertEqual(mac, vif['address'])
-
- def _assertTypeEquals(self, node, type, attr, source, br_want,
- prefix=None):
- self.assertEqual(node.get("type"), type)
- br_name = node.find(attr).get(source)
- if prefix is None:
- self.assertEqual(br_name, br_want)
- else:
- self.assertTrue(br_name.startswith(prefix))
-
- def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
- br_want=None, size=0, prefix=None):
- ret = node.findall("filterref")
- self.assertEqual(len(ret), size)
- self._assertTypeEquals(node, type, attr, source, br_want,
- prefix)
- self._assertMacEquals(node, vif)
-
- def _assertModel(self, xml, model_want=None, driver_want=None):
- node = self._get_node(xml)
- if model_want is None:
- ret = node.findall("model")
- self.assertEqual(len(ret), 0)
- else:
- model = node.find("model").get("type")
- self.assertEqual(model, model_want)
- if driver_want is None:
- ret = node.findall("driver")
- self.assertEqual(len(ret), 0)
- else:
- driver = node.find("driver").get("name")
- self.assertEqual(driver, driver_want)
-
- def _assertTypeAndPciEquals(self, node, type, vif):
- self.assertEqual(node.get("type"), type)
- address = node.find("source").find("address")
- addr_type = address.get("type")
- self.assertEqual("pci", addr_type)
- pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
- 'domain': address.get("domain")[2:],
- 'bus': address.get("bus")[2:],
- 'slot': address.get("slot")[2:],
- 'func': address.get("function")[2:]}
-
- pci_slot_want = vif['profile']['pci_slot']
- self.assertEqual(pci_slot, pci_slot_want)
-
- def _get_conf(self):
- conf = vconfig.LibvirtConfigGuest()
- conf.virt_type = "qemu"
- conf.name = "fake-name"
- conf.uuid = "fake-uuid"
- conf.memory = 100 * 1024
- conf.vcpus = 4
- return conf
-
- def _get_instance_xml(self, driver, vif, image_meta=None):
- default_inst_type = {
- 'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
- 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
- 'ephemeral_gb': 0, 'updated_at': None,
- 'disabled': False, 'vcpus': 1,
- 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
- 'flavorid': '1', 'vcpu_weight': None, 'id': 2,
- 'extra_specs': dict(self.bandwidth)
- }
- conf = self._get_conf()
- nic = driver.get_config(self.instance, vif, image_meta,
- default_inst_type, CONF.libvirt.virt_type)
- conf.add_device(nic)
- return conf.to_xml()
-
- def test_multiple_nics(self):
- conf = self._get_conf()
- # Tests multiple nic configuration and that target_dev is
- # set for each
- nics = [{'net_type': 'bridge',
- 'mac_addr': '00:00:00:00:00:0b',
- 'source_dev': 'b_source_dev',
- 'target_dev': 'b_target_dev'},
- {'net_type': 'ethernet',
- 'mac_addr': '00:00:00:00:00:0e',
- 'source_dev': 'e_source_dev',
- 'target_dev': 'e_target_dev'},
- {'net_type': 'direct',
- 'mac_addr': '00:00:00:00:00:0d',
- 'source_dev': 'd_source_dev',
- 'target_dev': 'd_target_dev'}]
-
- for nic in nics:
- nic_conf = vconfig.LibvirtConfigGuestInterface()
- nic_conf.net_type = nic['net_type']
- nic_conf.target_dev = nic['target_dev']
- nic_conf.mac_addr = nic['mac_addr']
- nic_conf.source_dev = nic['source_dev']
- conf.add_device(nic_conf)
-
- xml = conf.to_xml()
- doc = etree.fromstring(xml)
- for nic in nics:
- path = "./devices/interface/[@type='%s']" % nic['net_type']
- node = doc.find(path)
- self.assertEqual(nic['net_type'], node.get("type"))
- self.assertEqual(nic['mac_addr'],
- node.find("mac").get("address"))
- self.assertEqual(nic['target_dev'],
- node.find("target").get("dev"))
-
- def test_model_novirtio(self):
- self.flags(use_virtio_for_bridges=False,
- virt_type='kvm',
- group='libvirt')
-
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d, self.vif_bridge)
- self._assertModel(xml)
-
- def test_model_kvm(self):
- self.flags(use_virtio_for_bridges=True,
- virt_type='kvm',
- group='libvirt')
-
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d, self.vif_bridge)
- self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
-
- def test_model_kvm_qemu_custom(self):
- for virt in ('kvm', 'qemu'):
- self.flags(use_virtio_for_bridges=True,
- virt_type=virt,
- group='libvirt')
-
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- supported = (network_model.VIF_MODEL_NE2K_PCI,
- network_model.VIF_MODEL_PCNET,
- network_model.VIF_MODEL_RTL8139,
- network_model.VIF_MODEL_E1000,
- network_model.VIF_MODEL_SPAPR_VLAN)
- for model in supported:
- image_meta = {'properties': {'hw_vif_model': model}}
- xml = self._get_instance_xml(d, self.vif_bridge,
- image_meta)
- self._assertModel(xml, model)
-
- def test_model_kvm_bogus(self):
- self.flags(use_virtio_for_bridges=True,
- virt_type='kvm',
- group='libvirt')
-
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- image_meta = {'properties': {'hw_vif_model': 'acme'}}
- self.assertRaises(exception.UnsupportedHardware,
- self._get_instance_xml,
- d,
- self.vif_bridge,
- image_meta)
-
- def _test_model_qemu(self, *vif_objs, **kw):
- libvirt_version = kw.get('libvirt_version')
- self.flags(use_virtio_for_bridges=True,
- virt_type='qemu',
- group='libvirt')
-
- for vif_obj in vif_objs:
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- if libvirt_version is not None:
- d.libvirt_version = libvirt_version
-
- xml = self._get_instance_xml(d, vif_obj)
-
- doc = etree.fromstring(xml)
-
- bandwidth = doc.find('./devices/interface/bandwidth')
- self.assertNotEqual(bandwidth, None)
-
- inbound = bandwidth.find('inbound')
- self.assertEqual(inbound.get("average"),
- self.bandwidth['quota:vif_inbound_average'])
- self.assertEqual(inbound.get("peak"),
- self.bandwidth['quota:vif_inbound_peak'])
- self.assertEqual(inbound.get("burst"),
- self.bandwidth['quota:vif_inbound_burst'])
-
- outbound = bandwidth.find('outbound')
- self.assertEqual(outbound.get("average"),
- self.bandwidth['quota:vif_outbound_average'])
- self.assertEqual(outbound.get("peak"),
- self.bandwidth['quota:vif_outbound_peak'])
- self.assertEqual(outbound.get("burst"),
- self.bandwidth['quota:vif_outbound_burst'])
-
- self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
-
- def test_model_qemu_no_firewall(self):
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- self._test_model_qemu(
- self.vif_bridge,
- self.vif_8021qbg,
- self.vif_iovisor,
- self.vif_mlnx,
- self.vif_ovs,
- )
-
- def test_model_qemu_iptables(self):
- self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
- self._test_model_qemu(
- self.vif_bridge,
- self.vif_ovs,
- self.vif_ivs,
- self.vif_8021qbg,
- self.vif_iovisor,
- self.vif_mlnx,
- )
-
- def test_model_xen(self):
- self.flags(use_virtio_for_bridges=True,
- virt_type='xen',
- group='libvirt')
-
- d = vif.LibvirtGenericVIFDriver(self._get_conn("xen:///system"))
- xml = self._get_instance_xml(d, self.vif_bridge)
- self._assertModel(xml)
-
- def test_generic_driver_none(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- self.assertRaises(exception.NovaException,
- self._get_instance_xml,
- d,
- self.vif_none)
-
- def _check_bridge_driver(self, d, vif, br_want):
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- self.vif_bridge, br_want, 1)
-
- def test_generic_driver_bridge(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- self._check_bridge_driver(d,
- self.vif_bridge,
- self.vif_bridge['network']['bridge'])
-
- def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- self.vif_ivs, prefix=dev_prefix)
- script = node.find("script").get("path")
- self.assertEqual(script, "")
-
- def test_unplug_ivs_ethernet(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
- delete.side_effect = processutils.ProcessExecutionError
- d.unplug_ivs_ethernet(None, self.vif_ovs)
-
- def test_plug_ovs_hybrid(self):
- calls = {
- 'device_exists': [mock.call('qbrvif-xxx-yyy'),
- mock.call('qvovif-xxx-yyy')],
- '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
- 'qvovif-xxx-yyy')],
- 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
- run_as_root=True),
- mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
- run_as_root=True),
- mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
- run_as_root=True),
- mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
- '/bridge/multicast_snooping'),
- process_input='0', run_as_root=True,
- check_exit_code=[0, 1]),
- mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
- run_as_root=True),
- mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
- 'qvbvif-xxx-yyy', run_as_root=True)],
- 'create_ovs_vif_port': [mock.call('br0',
- 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
- 'ca:fe:de:ad:be:ef',
- 'instance-uuid')]
- }
- with contextlib.nested(
- mock.patch.object(linux_net, 'device_exists',
- return_value=False),
- mock.patch.object(utils, 'execute'),
- mock.patch.object(linux_net, '_create_veth_pair'),
- mock.patch.object(linux_net, 'create_ovs_vif_port')
- ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- d.plug_ovs_hybrid(self.instance, self.vif_ovs)
- device_exists.assert_has_calls(calls['device_exists'])
- _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
- execute.assert_has_calls(calls['execute'])
- create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
-
- def test_unplug_ovs_hybrid(self):
- calls = {
- 'device_exists': [mock.call('qbrvif-xxx-yyy')],
- 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
- 'qvbvif-xxx-yyy', run_as_root=True),
- mock.call('ip', 'link', 'set',
- 'qbrvif-xxx-yyy', 'down', run_as_root=True),
- mock.call('brctl', 'delbr',
- 'qbrvif-xxx-yyy', run_as_root=True)],
- 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
- }
- with contextlib.nested(
- mock.patch.object(linux_net, 'device_exists',
- return_value=True),
- mock.patch.object(utils, 'execute'),
- mock.patch.object(linux_net, 'delete_ovs_vif_port')
- ) as (device_exists, execute, delete_ovs_vif_port):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- d.unplug_ovs_hybrid(None, self.vif_ovs)
- device_exists.assert_has_calls(calls['device_exists'])
- execute.assert_has_calls(calls['execute'])
- delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
-
- def test_unplug_ovs_hybrid_bridge_does_not_exist(self):
- calls = {
- 'device_exists': [mock.call('qbrvif-xxx-yyy')],
- 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')]
- }
- with contextlib.nested(
- mock.patch.object(linux_net, 'device_exists',
- return_value=False),
- mock.patch.object(linux_net, 'delete_ovs_vif_port')
- ) as (device_exists, delete_ovs_vif_port):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- d.unplug_ovs_hybrid(None, self.vif_ovs)
- device_exists.assert_has_calls(calls['device_exists'])
- delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
-
- def test_plug_ivs_hybrid(self):
- calls = {
- 'device_exists': [mock.call('qbrvif-xxx-yyy'),
- mock.call('qvovif-xxx-yyy')],
- '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
- 'qvovif-xxx-yyy')],
- 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
- run_as_root=True),
- mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
- run_as_root=True),
- mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
- run_as_root=True),
- mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
- '/bridge/multicast_snooping'),
- process_input='0', run_as_root=True,
- check_exit_code=[0, 1]),
- mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
- run_as_root=True),
- mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
- 'qvbvif-xxx-yyy', run_as_root=True)],
- 'create_ivs_vif_port': [mock.call('qvovif-xxx-yyy', 'aaa-bbb-ccc',
- 'ca:fe:de:ad:be:ef',
- 'instance-uuid')]
- }
- with contextlib.nested(
- mock.patch.object(linux_net, 'device_exists',
- return_value=False),
- mock.patch.object(utils, 'execute'),
- mock.patch.object(linux_net, '_create_veth_pair'),
- mock.patch.object(linux_net, 'create_ivs_vif_port')
- ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- d.plug_ivs_hybrid(self.instance, self.vif_ivs)
- device_exists.assert_has_calls(calls['device_exists'])
- _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
- execute.assert_has_calls(calls['execute'])
- create_ivs_vif_port.assert_has_calls(calls['create_ivs_vif_port'])
-
- def test_unplug_ivs_hybrid(self):
- calls = {
- 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
- 'qvbvif-xxx-yyy', run_as_root=True),
- mock.call('ip', 'link', 'set',
- 'qbrvif-xxx-yyy', 'down', run_as_root=True),
- mock.call('brctl', 'delbr',
- 'qbrvif-xxx-yyy', run_as_root=True)],
- 'delete_ivs_vif_port': [mock.call('qvovif-xxx-yyy')]
- }
- with contextlib.nested(
- mock.patch.object(utils, 'execute'),
- mock.patch.object(linux_net, 'delete_ivs_vif_port')
- ) as (execute, delete_ivs_vif_port):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- d.unplug_ivs_hybrid(None, self.vif_ivs)
- execute.assert_has_calls(calls['execute'])
- delete_ivs_vif_port.assert_has_calls(calls['delete_ivs_vif_port'])
-
- def test_unplug_ivs_hybrid_bridge_does_not_exist(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- execute.side_effect = processutils.ProcessExecutionError
- d.unplug_ivs_hybrid(None, self.vif_ivs)
-
- def test_unplug_iovisor(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- execute.side_effect = processutils.ProcessExecutionError
- mynetwork = network_model.Network(id='network-id-xxx-yyy-zzz',
- label='mylabel')
- myvif = network_model.VIF(id='vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=mynetwork)
- d.unplug_iovisor(None, myvif)
-
- @mock.patch('nova.network.linux_net.device_exists')
- def test_plug_iovisor(self, device_exists):
- device_exists.return_value = True
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- execute.side_effect = processutils.ProcessExecutionError
- instance = {
- 'name': 'instance-name',
- 'uuid': 'instance-uuid',
- 'project_id': 'myproject'
- }
- d.plug_iovisor(instance, self.vif_ivs)
-
- def test_unplug_mlnx_with_details(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- execute.side_effect = processutils.ProcessExecutionError
- d.unplug_mlnx_direct(None, self.vif_mlnx_net)
- execute.assert_called_once_with('ebrctl', 'del-port',
- 'fake_phy_network',
- 'ca:fe:de:ad:be:ef',
- run_as_root=True)
-
- def test_plug_mlnx_with_details(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
- execute.assert_called_once_with('ebrctl', 'add-port',
- 'ca:fe:de:ad:be:ef',
- 'instance-uuid',
- 'fake_phy_network',
- 'mlnx_direct',
- 'eth-xxx-yyy-zzz',
- run_as_root=True)
-
- def test_plug_mlnx_no_physical_network(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- with mock.patch.object(utils, 'execute') as execute:
- self.assertRaises(exception.NovaException,
- d.plug_mlnx_direct,
- self.instance,
- self.vif_mlnx)
- self.assertEqual(0, execute.call_count)
-
- def test_ivs_ethernet_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- self._check_ivs_ethernet_driver(d,
- self.vif_ivs,
- "tap")
-
- def _check_ivs_virtualport_driver(self, d, vif, want_iface_id):
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- vif, vif['devname'])
-
- def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- vif, "br0")
- vp = node.find("virtualport")
- self.assertEqual(vp.get("type"), "openvswitch")
- iface_id_found = False
- for p_elem in vp.findall("parameters"):
- iface_id = p_elem.get("interfaceid", None)
- if iface_id:
- self.assertEqual(iface_id, want_iface_id)
- iface_id_found = True
-
- self.assertTrue(iface_id_found)
-
- def test_generic_ovs_virtualport_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
- want_iface_id = self.vif_ovs['ovs_interfaceid']
- self._check_ovs_virtualport_driver(d,
- self.vif_ovs,
- want_iface_id)
-
- def test_generic_ivs_virtualport_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9011))
- want_iface_id = self.vif_ivs['ovs_interfaceid']
- self._check_ivs_virtualport_driver(d,
- self.vif_ivs,
- want_iface_id)
-
- def test_ivs_plug_with_nova_firewall(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = "qbr" + self.vif_ivs['id']
- br_want = br_want[:network_model.NIC_NAME_LEN]
- xml = self._get_instance_xml(d, self.vif_ivs)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- self.vif_ivs, br_want, 1)
-
- def test_ivs_plug_with_port_filter_direct_no_nova_firewall(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = "qbr" + self.vif_ivs_filter_hybrid['id']
- br_want = br_want[:network_model.NIC_NAME_LEN]
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, self.vif_ivs_filter_hybrid)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- self.vif_ivs_filter_hybrid, br_want, 0)
-
- def test_ivs_plug_with_port_filter_hybrid_no_nova_firewall(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = self.vif_ivs_filter_direct['devname']
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, self.vif_ivs_filter_direct)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- self.vif_ivs_filter_direct, br_want, 0)
-
- def test_hybrid_plug_without_nova_firewall(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = "qbr" + self.vif_ovs_hybrid['id']
- br_want = br_want[:network_model.NIC_NAME_LEN]
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, self.vif_ovs_hybrid)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- self.vif_ovs_hybrid, br_want, 0)
-
- def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = self.vif_midonet['devname']
- xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
- self.vif_ovs_filter_cap, br_want)
-
- def _check_neutron_hybrid_driver(self, d, vif, br_want):
- self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
- vif, br_want, 1)
-
- def test_generic_hybrid_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = "qbr" + self.vif_ovs['id']
- br_want = br_want[:network_model.NIC_NAME_LEN]
- self._check_neutron_hybrid_driver(d,
- self.vif_ovs,
- br_want)
-
- def test_ivs_hybrid_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- br_want = "qbr" + self.vif_ivs['id']
- br_want = br_want[:network_model.NIC_NAME_LEN]
- self._check_neutron_hybrid_driver(d,
- self.vif_ivs,
- br_want)
-
- def test_mlnx_direct_vif_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d,
- self.vif_mlnx)
- node = self._get_node(xml)
- self.assertEqual(node.get("type"), "direct")
- self._assertTypeEquals(node, "direct", "source",
- "dev", "eth-xxx-yyy-zzz")
- self._assertTypeEquals(node, "direct", "source",
- "mode", "passthrough")
- self._assertMacEquals(node, self.vif_mlnx)
- self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
-
- def test_midonet_ethernet_vif_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- br_want = self.vif_midonet['devname']
- xml = self._get_instance_xml(d, self.vif_midonet)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- self.vif_midonet, br_want)
-
- def test_generic_8021qbh_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d, self.vif_8021qbh)
- node = self._get_node(xml)
- self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
- self._assertMacEquals(node, self.vif_8021qbh)
- vp = node.find("virtualport")
- self.assertEqual(vp.get("type"), "802.1Qbh")
- profile_id_found = False
- for p_elem in vp.findall("parameters"):
- details = self.vif_8021qbh["details"]
- profile_id = p_elem.get("profileid", None)
- if profile_id:
- self.assertEqual(profile_id,
- details[network_model.VIF_DETAILS_PROFILEID])
- profile_id_found = True
-
- self.assertTrue(profile_id_found)
-
- def test_hw_veb_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d, self.vif_hw_veb)
- node = self._get_node(xml)
- self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
- self._assertMacEquals(node, self.vif_hw_veb)
- vlan = node.find("vlan").find("tag").get("id")
- vlan_want = self.vif_hw_veb["details"]["vlan"]
- self.assertEqual(vlan, vlan_want)
-
- def test_generic_iovisor_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- br_want = self.vif_ivs['devname']
- xml = self._get_instance_xml(d, self.vif_ivs)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- self.vif_ivs, br_want)
-
- def test_generic_8021qbg_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- xml = self._get_instance_xml(d, self.vif_8021qbg)
-
- node = self._get_node(xml)
- self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
- self._assertMacEquals(node, self.vif_8021qbg)
-
- vp = node.find("virtualport")
- self.assertEqual(vp.get("type"), "802.1Qbg")
- manager_id_found = False
- type_id_found = False
- typeversion_id_found = False
- instance_id_found = False
- for p_elem in vp.findall("parameters"):
- wantparams = self.vif_8021qbg['qbg_params']
- manager_id = p_elem.get("managerid", None)
- type_id = p_elem.get("typeid", None)
- typeversion_id = p_elem.get("typeidversion", None)
- instance_id = p_elem.get("instanceid", None)
- if manager_id:
- self.assertEqual(manager_id,
- wantparams['managerid'])
- manager_id_found = True
- if type_id:
- self.assertEqual(type_id,
- wantparams['typeid'])
- type_id_found = True
- if typeversion_id:
- self.assertEqual(typeversion_id,
- wantparams['typeidversion'])
- typeversion_id_found = True
- if instance_id:
- self.assertEqual(instance_id,
- wantparams['instanceid'])
- instance_id_found = True
-
- self.assertTrue(manager_id_found)
- self.assertTrue(type_id_found)
- self.assertTrue(typeversion_id_found)
- self.assertTrue(instance_id_found)
diff --git a/nova/tests/virt/libvirt/test_volume.py b/nova/tests/virt/libvirt/test_volume.py
deleted file mode 100644
index f2e4518d10..0000000000
--- a/nova/tests/virt/libvirt/test_volume.py
+++ /dev/null
@@ -1,1160 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-# Copyright 2012 University Of Minho
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import os
-import time
-
-import fixtures
-import mock
-from oslo.concurrency import processutils
-from oslo.config import cfg
-
-from nova import exception
-from nova.storage import linuxscsi
-from nova import test
-from nova.tests.virt.libvirt import fake_libvirt_utils
-from nova import utils
-from nova.virt import fake
-from nova.virt.libvirt import utils as libvirt_utils
-from nova.virt.libvirt import volume
-
-CONF = cfg.CONF
-
-
-class LibvirtVolumeTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(LibvirtVolumeTestCase, self).setUp()
- self.executes = []
-
- def fake_execute(*cmd, **kwargs):
- self.executes.append(cmd)
- return None, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
-
- class FakeLibvirtDriver(object):
- def __init__(self, hyperv="QEMU", version=1005001):
- self.hyperv = hyperv
- self.version = version
-
- def _get_hypervisor_version(self):
- return self.version
-
- def _get_hypervisor_type(self):
- return self.hyperv
-
- def _get_all_block_devices(self):
- return []
-
- self.fake_conn = FakeLibvirtDriver(fake.FakeVirtAPI())
- self.connr = {
- 'ip': '127.0.0.1',
- 'initiator': 'fake_initiator',
- 'host': 'fake_host'
- }
- self.disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- self.name = 'volume-00000001'
- self.location = '10.0.2.15:3260'
- self.iqn = 'iqn.2010-10.org.openstack:%s' % self.name
- self.vol = {'id': 1, 'name': self.name}
- self.uuid = '875a8070-d0b9-4949-8b31-104d125c9a64'
- self.user = 'foo'
-
- def _assertNetworkAndProtocolEquals(self, tree):
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
- rbd_name = '%s/%s' % ('rbd', self.name)
- self.assertEqual(tree.find('./source').get('name'), rbd_name)
-
- def _assertFileTypeEquals(self, tree, file_path):
- self.assertEqual(tree.get('type'), 'file')
- self.assertEqual(tree.find('./source').get('file'), file_path)
-
- def _assertDiskInfoEquals(self, tree, disk_info):
- self.assertEqual(tree.get('device'), disk_info['type'])
- self.assertEqual(tree.find('./target').get('bus'),
- disk_info['bus'])
- self.assertEqual(tree.find('./target').get('dev'),
- disk_info['dev'])
-
- def _test_libvirt_volume_driver_disk_info(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- 'device_path': '/foo',
- },
- 'serial': 'fake_serial',
- }
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertDiskInfoEquals(tree, self.disk_info)
-
- def test_libvirt_volume_disk_info_type(self):
- self.disk_info['type'] = 'cdrom'
- self._test_libvirt_volume_driver_disk_info()
-
- def test_libvirt_volume_disk_info_dev(self):
- self.disk_info['dev'] = 'hdc'
- self._test_libvirt_volume_driver_disk_info()
-
- def test_libvirt_volume_disk_info_bus(self):
- self.disk_info['bus'] = 'scsi'
- self._test_libvirt_volume_driver_disk_info()
-
- def test_libvirt_volume_driver_serial(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- 'device_path': '/foo',
- },
- 'serial': 'fake_serial',
- }
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual('block', tree.get('type'))
- self.assertEqual('fake_serial', tree.find('./serial').text)
- self.assertIsNone(tree.find('./blockio'))
-
- def test_libvirt_volume_driver_blockio(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- 'device_path': '/foo',
- 'logical_block_size': '4096',
- 'physical_block_size': '4096',
- },
- 'serial': 'fake_serial',
- }
- disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- blockio = tree.find('./blockio')
- self.assertEqual('4096', blockio.get('logical_block_size'))
- self.assertEqual('4096', blockio.get('physical_block_size'))
-
- def test_libvirt_volume_driver_iotune(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- "device_path": "/foo",
- 'qos_specs': 'bar',
- },
- }
- disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- iotune = tree.find('./iotune')
- # ensure invalid qos_specs is ignored
- self.assertIsNone(iotune)
-
- specs = {
- 'total_bytes_sec': '102400',
- 'read_bytes_sec': '51200',
- 'write_bytes_sec': '0',
- 'total_iops_sec': '0',
- 'read_iops_sec': '200',
- 'write_iops_sec': '200',
- }
- del connection_info['data']['qos_specs']
- connection_info['data'].update(dict(qos_specs=specs))
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- self.assertEqual('102400', tree.find('./iotune/total_bytes_sec').text)
- self.assertEqual('51200', tree.find('./iotune/read_bytes_sec').text)
- self.assertEqual('0', tree.find('./iotune/write_bytes_sec').text)
- self.assertEqual('0', tree.find('./iotune/total_iops_sec').text)
- self.assertEqual('200', tree.find('./iotune/read_iops_sec').text)
- self.assertEqual('200', tree.find('./iotune/write_iops_sec').text)
-
- def test_libvirt_volume_driver_readonly(self):
- libvirt_driver = volume.LibvirtVolumeDriver(self.fake_conn)
- connection_info = {
- 'driver_volume_type': 'fake',
- 'data': {
- "device_path": "/foo",
- 'access_mode': 'bar',
- },
- }
- disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- self.assertRaises(exception.InvalidVolumeAccessMode,
- libvirt_driver.get_config,
- connection_info, self.disk_info)
-
- connection_info['data']['access_mode'] = 'rw'
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- readonly = tree.find('./readonly')
- self.assertIsNone(readonly)
-
- connection_info['data']['access_mode'] = 'ro'
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- readonly = tree.find('./readonly')
- self.assertIsNotNone(readonly)
-
- def iscsi_connection(self, volume, location, iqn):
- dev_name = 'ip-%s-iscsi-%s-lun-1' % (location, iqn)
- dev_path = '/dev/disk/by-path/%s' % (dev_name)
- return {
- 'driver_volume_type': 'iscsi',
- 'data': {
- 'volume_id': volume['id'],
- 'target_portal': location,
- 'target_iqn': iqn,
- 'target_lun': 1,
- 'device_path': dev_path,
- 'qos_specs': {
- 'total_bytes_sec': '102400',
- 'read_iops_sec': '200',
- }
- }
- }
-
- def test_rescan_multipath(self):
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- libvirt_driver._rescan_multipath()
- expected_multipath_cmd = ('multipath', '-r')
- self.assertIn(expected_multipath_cmd, self.executes)
-
- def test_iscsiadm_discover_parsing(self):
- # Ensure that parsing iscsiadm discover ignores cruft.
-
- targets = [
- ["192.168.204.82:3260,1",
- ("iqn.2010-10.org.openstack:volume-"
- "f9b12623-6ce3-4dac-a71f-09ad4249bdd3")],
- ["192.168.204.82:3261,1",
- ("iqn.2010-10.org.openstack:volume-"
- "f9b12623-6ce3-4dac-a71f-09ad4249bdd4")]]
-
- # This slight wonkiness brought to you by pep8, as the actual
- # example output runs about 97 chars wide.
- sample_input = """Loading iscsi modules: done
-Starting iSCSI initiator service: done
-Setting up iSCSI targets: unused
-%s %s
-%s %s
-""" % (targets[0][0], targets[0][1], targets[1][0], targets[1][1])
- driver = volume.LibvirtISCSIVolumeDriver("none")
- out = driver._get_target_portals_from_iscsiadm_output(sample_input)
- self.assertEqual(out, targets)
-
- def test_libvirt_iscsi_driver(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- connection_info = self.iscsi_connection(self.vol, self.location,
- self.iqn)
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
- expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location),
- ('iscsiadm', '-m', 'session'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--login'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--op', 'update',
- '-n', 'node.startup', '-v', 'automatic'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--rescan'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--op', 'update',
- '-n', 'node.startup', '-v', 'manual'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--logout'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--op', 'delete')]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_iscsi_driver_still_in_use(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
- self.iqn)]
- self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
- vol = {'id': 1, 'name': self.name}
- connection_info = self.iscsi_connection(vol, self.location, self.iqn)
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
- libvirt_driver.disconnect_volume(connection_info, "vde")
- expected_commands = [('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location),
- ('iscsiadm', '-m', 'session'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--login'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--op', 'update',
- '-n', 'node.startup', '-v', 'automatic'),
- ('iscsiadm', '-m', 'node', '-T', self.iqn,
- '-p', self.location, '--rescan'),
- ('cp', '/dev/stdin',
- '/sys/block/%s/device/delete' % dev_name)]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- devs = ['/dev/disk/by-path/ip-%s-iscsi-%s-lun-2' % (self.location,
- self.iqn)]
- with contextlib.nested(
- mock.patch.object(os.path, 'exists', return_value=True),
- mock.patch.object(self.fake_conn, '_get_all_block_devices',
- return_value=devs),
- mock.patch.object(libvirt_driver, '_rescan_multipath'),
- mock.patch.object(libvirt_driver, '_run_multipath'),
- mock.patch.object(libvirt_driver, '_get_multipath_device_name',
- return_value='/dev/mapper/fake-multipath-devname'),
- mock.patch.object(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- return_value=[('fake-ip', 'fake-portal')]),
- mock.patch.object(libvirt_driver, '_get_multipath_iqn',
- return_value='fake-portal'),
- ) as (mock_exists, mock_devices, mock_rescan_multipath,
- mock_run_multipath, mock_device_name, mock_get_portals,
- mock_get_iqn):
- mock_run_multipath.side_effect = processutils.ProcessExecutionError
- vol = {'id': 1, 'name': self.name}
- connection_info = self.iscsi_connection(vol, self.location,
- self.iqn)
- libvirt_driver.connect_volume(connection_info, self.disk_info)
-
- libvirt_driver.use_multipath = True
- libvirt_driver.disconnect_volume(connection_info, "vde")
- mock_run_multipath.assert_called_once_with(
- ['-f', 'fake-multipath-devname'],
- check_exit_code=[0, 1])
-
- def test_libvirt_iscsi_driver_get_config(self):
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- dev_name = 'ip-%s-iscsi-%s-lun-1' % (self.location, self.iqn)
- dev_path = '/dev/disk/by-path/%s' % (dev_name)
- vol = {'id': 1, 'name': self.name}
- connection_info = self.iscsi_connection(vol, self.location,
- self.iqn)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual('block', tree.get('type'))
- self.assertEqual(dev_path, tree.find('./source').get('dev'))
-
- libvirt_driver.use_multipath = True
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual('block', tree.get('type'))
- self.assertEqual(dev_path, tree.find('./source').get('dev'))
-
- def test_libvirt_iscsi_driver_multipath_id(self):
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- libvirt_driver.use_multipath = True
- self.stubs.Set(libvirt_driver, '_run_iscsiadm_bare',
- lambda x, check_exit_code: ('',))
- self.stubs.Set(libvirt_driver, '_rescan_iscsi', lambda: None)
- self.stubs.Set(libvirt_driver, '_get_host_device', lambda x: None)
- self.stubs.Set(libvirt_driver, '_rescan_multipath', lambda: None)
- fake_multipath_id = 'fake_multipath_id'
- fake_multipath_device = '/dev/mapper/%s' % fake_multipath_id
- self.stubs.Set(libvirt_driver, '_get_multipath_device_name',
- lambda x: fake_multipath_device)
-
- def fake_disconnect_volume_multipath_iscsi(iscsi_properties,
- multipath_device):
- if fake_multipath_device != multipath_device:
- raise Exception('Invalid multipath_device.')
-
- self.stubs.Set(libvirt_driver, '_disconnect_volume_multipath_iscsi',
- fake_disconnect_volume_multipath_iscsi)
- with mock.patch.object(os.path, 'exists', return_value=True):
- vol = {'id': 1, 'name': self.name}
- connection_info = self.iscsi_connection(vol, self.location,
- self.iqn)
- libvirt_driver.connect_volume(connection_info,
- self.disk_info)
- self.assertEqual(fake_multipath_id,
- connection_info['data']['multipath_id'])
- libvirt_driver.disconnect_volume(connection_info, "fake")
-
- def test_sanitize_log_run_iscsiadm(self):
- # Tests that the parameters to the _run_iscsiadm function are sanitized
- # for passwords when logged.
- def fake_debug(*args, **kwargs):
- self.assertIn('node.session.auth.password', args[0])
- self.assertNotIn('scrubme', args[0])
-
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- connection_info = self.iscsi_connection(self.vol, self.location,
- self.iqn)
- iscsi_properties = connection_info['data']
- with mock.patch.object(volume.LOG, 'debug',
- side_effect=fake_debug) as debug_mock:
- libvirt_driver._iscsiadm_update(iscsi_properties,
- 'node.session.auth.password',
- 'scrubme')
- # we don't care what the log message is, we just want to make sure
- # our stub method is called which asserts the password is scrubbed
- self.assertTrue(debug_mock.called)
-
- def iser_connection(self, volume, location, iqn):
- return {
- 'driver_volume_type': 'iser',
- 'data': {
- 'volume_id': volume['id'],
- 'target_portal': location,
- 'target_iqn': iqn,
- 'target_lun': 1,
- }
- }
-
- def sheepdog_connection(self, volume):
- return {
- 'driver_volume_type': 'sheepdog',
- 'data': {
- 'name': volume['name']
- }
- }
-
- def test_libvirt_sheepdog_driver(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.sheepdog_connection(self.vol)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
- self.assertEqual(tree.find('./source').get('name'), self.name)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def rbd_connection(self, volume):
- return {
- 'driver_volume_type': 'rbd',
- 'data': {
- 'name': '%s/%s' % ('rbd', volume['name']),
- 'auth_enabled': CONF.libvirt.rbd_secret_uuid is not None,
- 'auth_username': CONF.libvirt.rbd_user,
- 'secret_type': 'ceph',
- 'secret_uuid': CONF.libvirt.rbd_secret_uuid,
- 'qos_specs': {
- 'total_bytes_sec': '1048576',
- 'read_iops_sec': '500',
- }
- }
- }
-
- def test_libvirt_rbd_driver(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertIsNone(tree.find('./source/auth'))
- self.assertEqual('1048576', tree.find('./iotune/total_bytes_sec').text)
- self.assertEqual('500', tree.find('./iotune/read_iops_sec').text)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_rbd_driver_hosts(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- hosts = ['example.com', '1.2.3.4', '::1']
- ports = [None, '6790', '6791']
- connection_info['data']['hosts'] = hosts
- connection_info['data']['ports'] = ports
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertIsNone(tree.find('./source/auth'))
- found_hosts = tree.findall('./source/host')
- self.assertEqual([host.get('name') for host in found_hosts], hosts)
- self.assertEqual([host.get('port') for host in found_hosts], ports)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_rbd_driver_auth_enabled(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = True
- connection_info['data']['auth_username'] = self.user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = self.uuid
-
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertEqual(tree.find('./auth').get('username'), self.user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), self.uuid)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = True
- connection_info['data']['auth_username'] = self.user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = self.uuid
-
- flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
- flags_user = 'bar'
- self.flags(rbd_user=flags_user,
- rbd_secret_uuid=flags_uuid,
- group='libvirt')
-
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertEqual(tree.find('./auth').get('username'), flags_user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_rbd_driver_auth_disabled(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = False
- connection_info['data']['auth_username'] = self.user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = self.uuid
-
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertIsNone(tree.find('./auth'))
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
- libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
- connection_info = self.rbd_connection(self.vol)
- secret_type = 'ceph'
- connection_info['data']['auth_enabled'] = False
- connection_info['data']['auth_username'] = self.user
- connection_info['data']['secret_type'] = secret_type
- connection_info['data']['secret_uuid'] = self.uuid
-
- # NOTE: Supplying the rbd_secret_uuid will enable authentication
- # locally in nova-compute even if not enabled in nova-volume/cinder
- flags_uuid = '37152720-1785-11e2-a740-af0c1d8b8e4b'
- flags_user = 'bar'
- self.flags(rbd_user=flags_user,
- rbd_secret_uuid=flags_uuid,
- group='libvirt')
-
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertNetworkAndProtocolEquals(tree)
- self.assertEqual(tree.find('./auth').get('username'), flags_user)
- self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
- self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_kvm_volume(self):
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- connection_info = self.iscsi_connection(self.vol, self.location,
- self.iqn)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
- self.iqn)
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./source').get('dev'), dev_str)
- libvirt_driver.disconnect_volume(connection_info, 'vde')
-
- def test_libvirt_kvm_volume_with_multipath(self):
- self.flags(iscsi_use_multipath=True, group='libvirt')
- self.stubs.Set(os.path, 'exists', lambda x: True)
- devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
- self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- connection_info = self.iscsi_connection(self.vol, self.location,
- self.iqn)
- mpdev_filepath = '/dev/mapper/foo'
- connection_info['data']['device_path'] = mpdev_filepath
- libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
- self.stubs.Set(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- lambda x: [[self.location, self.iqn]])
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
- libvirt_driver._get_multipath_iqn = lambda x: self.iqn
- libvirt_driver.disconnect_volume(connection_info, 'vde')
- expected_multipath_cmd = ('multipath', '-f', 'foo')
- self.assertIn(expected_multipath_cmd, self.executes)
-
- def test_libvirt_kvm_volume_with_multipath_still_in_use(self):
- name = 'volume-00000001'
- location = '10.0.2.15:3260'
- iqn = 'iqn.2010-10.org.openstack:%s' % name
- mpdev_filepath = '/dev/mapper/foo'
-
- def _get_multipath_device_name(path):
- if '%s-lun-1' % iqn in path:
- return mpdev_filepath
- return '/dev/mapper/donotdisconnect'
-
- self.flags(iscsi_use_multipath=True, group='libvirt')
- self.stubs.Set(os.path, 'exists', lambda x: True)
-
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- libvirt_driver._get_multipath_device_name =\
- lambda x: _get_multipath_device_name(x)
-
- block_devs = ['/dev/disks/by-path/%s-iscsi-%s-lun-2' % (location, iqn)]
- self.stubs.Set(self.fake_conn, '_get_all_block_devices',
- lambda: block_devs)
-
- vol = {'id': 1, 'name': name}
- connection_info = self.iscsi_connection(vol, location, iqn)
- connection_info['data']['device_path'] = mpdev_filepath
-
- libvirt_driver._get_multipath_iqn = lambda x: iqn
-
- iscsi_devs = ['1.2.3.4-iscsi-%s-lun-1' % iqn,
- '%s-iscsi-%s-lun-1' % (location, iqn),
- '%s-iscsi-%s-lun-2' % (location, iqn)]
- libvirt_driver._get_iscsi_devices = lambda: iscsi_devs
-
- self.stubs.Set(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- lambda x: [[location, iqn]])
-
- # Set up disconnect volume mock expectations
- self.mox.StubOutWithMock(libvirt_driver, '_delete_device')
- self.mox.StubOutWithMock(libvirt_driver, '_rescan_multipath')
- libvirt_driver._rescan_multipath()
- libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[0])
- libvirt_driver._delete_device('/dev/disk/by-path/%s' % iscsi_devs[1])
- libvirt_driver._rescan_multipath()
-
- # Ensure that the mpath devices are deleted
- self.mox.ReplayAll()
- libvirt_driver.disconnect_volume(connection_info, 'vde')
-
- def test_libvirt_kvm_volume_with_multipath_getmpdev(self):
- self.flags(iscsi_use_multipath=True, group='libvirt')
- self.stubs.Set(os.path, 'exists', lambda x: True)
- libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
- name0 = 'volume-00000000'
- iqn0 = 'iqn.2010-10.org.openstack:%s' % name0
- dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (self.location, iqn0)
- dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (self.location,
- self.iqn)
- devs = [dev0, dev]
- self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
- connection_info = self.iscsi_connection(self.vol, self.location,
- self.iqn)
- mpdev_filepath = '/dev/mapper/foo'
- libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
- self.stubs.Set(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- lambda x: [['fake_portal1', 'fake_iqn1']])
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
- libvirt_driver.disconnect_volume(connection_info, 'vde')
-
- def test_libvirt_kvm_iser_volume_with_multipath(self):
- self.flags(iser_use_multipath=True, group='libvirt')
- self.stubs.Set(os.path, 'exists', lambda x: True)
- self.stubs.Set(time, 'sleep', lambda x: None)
- devs = ['/dev/mapper/sda', '/dev/mapper/sdb']
- self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
- libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
- name = 'volume-00000001'
- location = '10.0.2.15:3260'
- iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
- vol = {'id': 1, 'name': name}
- connection_info = self.iser_connection(vol, location, iqn)
- mpdev_filepath = '/dev/mapper/foo'
- connection_info['data']['device_path'] = mpdev_filepath
- disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
- self.stubs.Set(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- lambda x: [[location, iqn]])
- libvirt_driver.connect_volume(connection_info, disk_info)
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
- libvirt_driver._get_multipath_iqn = lambda x: iqn
- libvirt_driver.disconnect_volume(connection_info, 'vde')
- expected_multipath_cmd = ('multipath', '-f', 'foo')
- self.assertIn(expected_multipath_cmd, self.executes)
-
- def test_libvirt_kvm_iser_volume_with_multipath_getmpdev(self):
- self.flags(iser_use_multipath=True, group='libvirt')
- self.stubs.Set(os.path, 'exists', lambda x: True)
- self.stubs.Set(time, 'sleep', lambda x: None)
- libvirt_driver = volume.LibvirtISERVolumeDriver(self.fake_conn)
- name0 = 'volume-00000000'
- location0 = '10.0.2.15:3260'
- iqn0 = 'iqn.2010-10.org.iser.openstack:%s' % name0
- dev0 = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-0' % (location0, iqn0)
- name = 'volume-00000001'
- location = '10.0.2.15:3260'
- iqn = 'iqn.2010-10.org.iser.openstack:%s' % name
- vol = {'id': 1, 'name': name}
- dev = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
- devs = [dev0, dev]
- self.stubs.Set(self.fake_conn, '_get_all_block_devices', lambda: devs)
- self.stubs.Set(libvirt_driver, '_get_iscsi_devices', lambda: [])
- connection_info = self.iser_connection(vol, location, iqn)
- mpdev_filepath = '/dev/mapper/foo'
- disk_info = {
- "bus": "virtio",
- "dev": "vde",
- "type": "disk",
- }
- libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
- self.stubs.Set(libvirt_driver,
- '_get_target_portals_from_iscsiadm_output',
- lambda x: [['fake_portal1', 'fake_iqn1']])
- libvirt_driver.connect_volume(connection_info, disk_info)
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
- libvirt_driver.disconnect_volume(connection_info, 'vde')
-
- def test_libvirt_nfs_driver(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- mnt_base = '/mnt'
- self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
- self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
-
- export_string = '192.168.1.1:/nfs/share1'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- device_path = os.path.join(export_mnt_base,
- connection_info['data']['name'])
- self.assertEqual(device_path, connection_info['data']['device_path'])
- expected_commands = [
- ('mkdir', '-p', export_mnt_base),
- ('mount', '-t', 'nfs', export_string, export_mnt_base),
- ('umount', export_mnt_base)]
- self.assertEqual(expected_commands, self.executes)
-
- @mock.patch.object(volume.utils, 'execute')
- @mock.patch.object(volume.LOG, 'debug')
- @mock.patch.object(volume.LOG, 'exception')
- def test_libvirt_nfs_driver_umount_error(self, mock_LOG_exception,
- mock_LOG_debug, mock_utils_exe):
- export_string = '192.168.1.1:/nfs/share1'
- connection_info = {'data': {'export': export_string,
- 'name': self.name}}
- libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
- mock_utils_exe.side_effect = processutils.ProcessExecutionError(
- None, None, None, 'umount', 'umount: device is busy.')
- libvirt_driver.disconnect_volume(connection_info, "vde")
- self.assertTrue(mock_LOG_debug.called)
- mock_utils_exe.side_effect = processutils.ProcessExecutionError(
- None, None, None, 'umount', 'umount: target is busy.')
- libvirt_driver.disconnect_volume(connection_info, "vde")
- self.assertTrue(mock_LOG_debug.called)
- mock_utils_exe.side_effect = processutils.ProcessExecutionError(
- None, None, None, 'umount', 'umount: Other error.')
- libvirt_driver.disconnect_volume(connection_info, "vde")
- self.assertTrue(mock_LOG_exception.called)
-
- def test_libvirt_nfs_driver_get_config(self):
- libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
- mnt_base = '/mnt'
- self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
- export_string = '192.168.1.1:/nfs/share1'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
- file_path = os.path.join(export_mnt_base, self.name)
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name,
- 'device_path': file_path}}
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertFileTypeEquals(tree, file_path)
- self.assertEqual('raw', tree.find('./driver').get('type'))
-
- def test_libvirt_nfs_driver_already_mounted(self):
- # NOTE(vish) exists is to make driver assume connecting worked
- mnt_base = '/mnt'
- self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
-
- export_string = '192.168.1.1:/nfs/share1'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- expected_commands = [
- ('findmnt', '--target', export_mnt_base, '--source',
- export_string),
- ('umount', export_mnt_base)]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_nfs_driver_with_opts(self):
- mnt_base = '/mnt'
- self.flags(nfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtNFSVolumeDriver(self.fake_conn)
- self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
- export_string = '192.168.1.1:/nfs/share1'
- options = '-o intr,nfsvers=3'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name,
- 'options': options}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- expected_commands = [
- ('mkdir', '-p', export_mnt_base),
- ('mount', '-t', 'nfs', '-o', 'intr,nfsvers=3',
- export_string, export_mnt_base),
- ('umount', export_mnt_base),
- ]
- self.assertEqual(expected_commands, self.executes)
-
- def aoe_connection(self, shelf, lun):
- aoedev = 'e%s.%s' % (shelf, lun)
- aoedevpath = '/dev/etherd/%s' % (aoedev)
- return {
- 'driver_volume_type': 'aoe',
- 'data': {
- 'target_shelf': shelf,
- 'target_lun': lun,
- 'device_path': aoedevpath
- }
- }
-
- @mock.patch('os.path.exists', return_value=True)
- def test_libvirt_aoe_driver(self, exists):
- libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
- shelf = '100'
- lun = '1'
- connection_info = self.aoe_connection(shelf, lun)
- aoedev = 'e%s.%s' % (shelf, lun)
- aoedevpath = '/dev/etherd/%s' % (aoedev)
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- exists.assert_called_with(aoedevpath)
- libvirt_driver.disconnect_volume(connection_info, "vde")
- self.assertEqual(aoedevpath, connection_info['data']['device_path'])
- expected_commands = [('aoe-revalidate', aoedev)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_libvirt_aoe_driver_get_config(self):
- libvirt_driver = volume.LibvirtAOEVolumeDriver(self.fake_conn)
- shelf = '100'
- lun = '1'
- connection_info = self.aoe_connection(shelf, lun)
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- aoedevpath = '/dev/etherd/e%s.%s' % (shelf, lun)
- self.assertEqual('block', tree.get('type'))
- self.assertEqual(aoedevpath, tree.find('./source').get('dev'))
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def test_libvirt_glusterfs_driver(self):
- mnt_base = '/mnt'
- self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
- self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
- export_string = '192.168.1.1:/volume-00001'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- device_path = os.path.join(export_mnt_base,
- connection_info['data']['name'])
- self.assertEqual(device_path, connection_info['data']['device_path'])
- expected_commands = [
- ('mkdir', '-p', export_mnt_base),
- ('mount', '-t', 'glusterfs', export_string, export_mnt_base),
- ('umount', export_mnt_base)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_libvirt_glusterfs_driver_get_config(self):
- mnt_base = '/mnt'
- self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
- export_string = '192.168.1.1:/volume-00001'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
- file_path = os.path.join(export_mnt_base, self.name)
-
- # Test default format - raw
- connection_info = {'data': {'export': export_string,
- 'name': self.name,
- 'device_path': file_path}}
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertFileTypeEquals(tree, file_path)
- self.assertEqual('raw', tree.find('./driver').get('type'))
-
- # Test specified format - qcow2
- connection_info = {'data': {'export': export_string,
- 'name': self.name,
- 'device_path': file_path,
- 'format': 'qcow2'}}
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self._assertFileTypeEquals(tree, file_path)
- self.assertEqual('qcow2', tree.find('./driver').get('type'))
-
- def test_libvirt_glusterfs_driver_already_mounted(self):
- mnt_base = '/mnt'
- self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
- export_string = '192.168.1.1:/volume-00001'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- expected_commands = [
- ('findmnt', '--target', export_mnt_base,
- '--source', export_string),
- ('umount', export_mnt_base)]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_glusterfs_driver_with_opts(self):
- mnt_base = '/mnt'
- self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
-
- libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
- self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
- export_string = '192.168.1.1:/volume-00001'
- options = '-o backupvolfile-server=192.168.1.2'
- export_mnt_base = os.path.join(mnt_base,
- utils.get_hash_str(export_string))
-
- connection_info = {'data': {'export': export_string,
- 'name': self.name,
- 'options': options}}
- libvirt_driver.connect_volume(connection_info, self.disk_info)
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- expected_commands = [
- ('mkdir', '-p', export_mnt_base),
- ('mount', '-t', 'glusterfs',
- '-o', 'backupvolfile-server=192.168.1.2',
- export_string, export_mnt_base),
- ('umount', export_mnt_base),
- ]
- self.assertEqual(self.executes, expected_commands)
-
- def test_libvirt_glusterfs_libgfapi(self):
- self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
- libvirt_driver = volume.LibvirtGlusterfsVolumeDriver(self.fake_conn)
- self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
- export_string = '192.168.1.1:/volume-00001'
- name = 'volume-00001'
-
- connection_info = {'data': {'export': export_string, 'name': name}}
-
- disk_info = {
- "dev": "vde",
- "type": "disk",
- "bus": "virtio",
- }
-
- libvirt_driver.connect_volume(connection_info, disk_info)
- conf = libvirt_driver.get_config(connection_info, disk_info)
- tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'network')
- self.assertEqual(tree.find('./driver').get('type'), 'raw')
-
- source = tree.find('./source')
- self.assertEqual(source.get('protocol'), 'gluster')
- self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
- self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
- self.assertEqual(source.find('./host').get('port'), '24007')
-
- libvirt_driver.disconnect_volume(connection_info, "vde")
-
- def fibrechan_connection(self, volume, location, wwn):
- return {
- 'driver_volume_type': 'fibrechan',
- 'data': {
- 'volume_id': volume['id'],
- 'target_portal': location,
- 'target_wwn': wwn,
- 'target_lun': 1,
- }
- }
-
- def test_libvirt_fibrechan_driver(self):
- self.stubs.Set(libvirt_utils, 'get_fc_hbas',
- fake_libvirt_utils.get_fc_hbas)
- self.stubs.Set(libvirt_utils, 'get_fc_hbas_info',
- fake_libvirt_utils.get_fc_hbas_info)
- # NOTE(vish) exists is to make driver assume connecting worked
- self.stubs.Set(os.path, 'exists', lambda x: True)
- self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdb')
- libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
- multipath_devname = '/dev/md-1'
- devices = {"device": multipath_devname,
- "id": "1234567890",
- "devices": [{'device': '/dev/sdb',
- 'address': '1:0:0:1',
- 'host': 1, 'channel': 0,
- 'id': 0, 'lun': 1}]}
- self.stubs.Set(linuxscsi, 'find_multipath_device', lambda x: devices)
- self.stubs.Set(linuxscsi, 'remove_device', lambda x: None)
- # Should work for string, unicode, and list
- wwns = ['1234567890123456', unicode('1234567890123456'),
- ['1234567890123456', '1234567890123457']]
- for wwn in wwns:
- connection_info = self.fibrechan_connection(self.vol,
- self.location, wwn)
- mount_device = "vde"
- libvirt_driver.connect_volume(connection_info, self.disk_info)
-
- # Test the scenario where multipath_id is returned
- libvirt_driver.disconnect_volume(connection_info, mount_device)
- self.assertEqual(multipath_devname,
- connection_info['data']['device_path'])
- expected_commands = []
- self.assertEqual(expected_commands, self.executes)
- # Test the scenario where multipath_id is not returned
- connection_info["data"]["devices"] = devices["devices"]
- del connection_info["data"]["multipath_id"]
- libvirt_driver.disconnect_volume(connection_info, mount_device)
- expected_commands = []
- self.assertEqual(expected_commands, self.executes)
-
- # Should not work for anything other than string, unicode, and list
- connection_info = self.fibrechan_connection(self.vol,
- self.location, 123)
- self.assertRaises(exception.NovaException,
- libvirt_driver.connect_volume,
- connection_info, self.disk_info)
-
- self.stubs.Set(libvirt_utils, 'get_fc_hbas', lambda: [])
- self.stubs.Set(libvirt_utils, 'get_fc_hbas_info', lambda: [])
- self.assertRaises(exception.NovaException,
- libvirt_driver.connect_volume,
- connection_info, self.disk_info)
-
- def test_libvirt_fibrechan_driver_get_config(self):
- libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
- connection_info = self.fibrechan_connection(self.vol,
- self.location, 123)
- connection_info['data']['device_path'] = ("/sys/devices/pci0000:00"
- "/0000:00:03.0/0000:05:00.3/host2/fc_host/host2")
- conf = libvirt_driver.get_config(connection_info, self.disk_info)
- tree = conf.format_dom()
- self.assertEqual('block', tree.get('type'))
- self.assertEqual(connection_info['data']['device_path'],
- tree.find('./source').get('dev'))
-
- def test_libvirt_fibrechan_getpci_num(self):
- libvirt_driver = volume.LibvirtFibreChannelVolumeDriver(self.fake_conn)
- hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
- "/0000:05:00.3/host2/fc_host/host2"}
- pci_num = libvirt_driver._get_pci_num(hba)
- self.assertEqual("0000:05:00.3", pci_num)
-
- hba = {'device_path': "/sys/devices/pci0000:00/0000:00:03.0"
- "/0000:05:00.3/0000:06:00.6/host2/fc_host/host2"}
- pci_num = libvirt_driver._get_pci_num(hba)
- self.assertEqual("0000:06:00.6", pci_num)
-
- def test_libvirt_scality_driver(self):
- tempdir = self.useFixture(fixtures.TempDir()).path
- TEST_MOUNT = os.path.join(tempdir, 'fake_mount')
- TEST_CONFIG = os.path.join(tempdir, 'fake_config')
- TEST_VOLDIR = 'volumes'
- TEST_VOLNAME = 'volume_name'
- TEST_CONN_INFO = {
- 'data': {
- 'sofs_path': os.path.join(TEST_VOLDIR, TEST_VOLNAME)
- }
- }
- TEST_VOLPATH = os.path.join(TEST_MOUNT,
- TEST_VOLDIR,
- TEST_VOLNAME)
- open(TEST_CONFIG, "w+").close()
- os.makedirs(os.path.join(TEST_MOUNT, 'sys'))
-
- def _access_wrapper(path, flags):
- if path == '/sbin/mount.sofs':
- return True
- else:
- return os.access(path, flags)
-
- self.stubs.Set(os, 'access', _access_wrapper)
- self.flags(scality_sofs_config=TEST_CONFIG,
- scality_sofs_mount_point=TEST_MOUNT,
- group='libvirt')
- driver = volume.LibvirtScalityVolumeDriver(self.fake_conn)
- driver.connect_volume(TEST_CONN_INFO, self.disk_info)
-
- device_path = os.path.join(TEST_MOUNT,
- TEST_CONN_INFO['data']['sofs_path'])
- self.assertEqual(device_path,
- TEST_CONN_INFO['data']['device_path'])
-
- conf = driver.get_config(TEST_CONN_INFO, self.disk_info)
- tree = conf.format_dom()
- self._assertFileTypeEquals(tree, TEST_VOLPATH)
diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py
deleted file mode 100644
index a87bee0818..0000000000
--- a/nova/tests/virt/test_block_device.py
+++ /dev/null
@@ -1,684 +0,0 @@
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-from oslo.serialization import jsonutils
-
-from nova import block_device
-from nova import context
-from nova import test
-from nova.tests import fake_instance
-from nova.tests import matchers
-from nova.virt import block_device as driver_block_device
-from nova.virt import driver
-from nova.volume import cinder
-from nova.volume import encryptors
-
-
-class TestDriverBlockDevice(test.NoDBTestCase):
- driver_classes = {
- 'swap': driver_block_device.DriverSwapBlockDevice,
- 'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
- 'volume': driver_block_device.DriverVolumeBlockDevice,
- 'snapshot': driver_block_device.DriverSnapshotBlockDevice,
- 'image': driver_block_device.DriverImageBlockDevice,
- 'blank': driver_block_device.DriverBlankBlockDevice
- }
-
- swap_bdm = block_device.BlockDeviceDict(
- {'id': 1, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdb1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'delete_on_termination': True,
- 'guest_format': 'swap',
- 'disk_bus': 'scsi',
- 'volume_size': 2,
- 'boot_index': -1})
-
- swap_driver_bdm = {
- 'device_name': '/dev/sdb1',
- 'swap_size': 2,
- 'disk_bus': 'scsi'}
-
- swap_legacy_driver_bdm = {
- 'device_name': '/dev/sdb1',
- 'swap_size': 2}
-
- ephemeral_bdm = block_device.BlockDeviceDict(
- {'id': 2, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sdc1',
- 'source_type': 'blank',
- 'destination_type': 'local',
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'volume_size': 4,
- 'guest_format': 'ext4',
- 'delete_on_termination': True,
- 'boot_index': -1})
-
- ephemeral_driver_bdm = {
- 'device_name': '/dev/sdc1',
- 'size': 4,
- 'device_type': 'disk',
- 'guest_format': 'ext4',
- 'disk_bus': 'scsi'}
-
- ephemeral_legacy_driver_bdm = {
- 'device_name': '/dev/sdc1',
- 'size': 4,
- 'virtual_name': 'ephemeral0',
- 'num': 0}
-
- volume_bdm = block_device.BlockDeviceDict(
- {'id': 3, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda1',
- 'source_type': 'volume',
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'volume_size': 8,
- 'destination_type': 'volume',
- 'volume_id': 'fake-volume-id-1',
- 'guest_format': 'ext4',
- 'connection_info': '{"fake": "connection_info"}',
- 'delete_on_termination': False,
- 'boot_index': 0})
-
- volume_driver_bdm = {
- 'mount_device': '/dev/sda1',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': False,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'guest_format': 'ext4',
- 'boot_index': 0}
-
- volume_legacy_driver_bdm = {
- 'mount_device': '/dev/sda1',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': False}
-
- snapshot_bdm = block_device.BlockDeviceDict(
- {'id': 4, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'delete_on_termination': True,
- 'volume_size': 3,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'source_type': 'snapshot',
- 'destination_type': 'volume',
- 'connection_info': '{"fake": "connection_info"}',
- 'snapshot_id': 'fake-snapshot-id-1',
- 'volume_id': 'fake-volume-id-2',
- 'boot_index': -1})
-
- snapshot_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'guest_format': None,
- 'boot_index': -1}
-
- snapshot_legacy_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True}
-
- image_bdm = block_device.BlockDeviceDict(
- {'id': 5, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'delete_on_termination': True,
- 'volume_size': 1,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'source_type': 'image',
- 'destination_type': 'volume',
- 'connection_info': '{"fake": "connection_info"}',
- 'image_id': 'fake-image-id-1',
- 'volume_id': 'fake-volume-id-2',
- 'boot_index': -1})
-
- image_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'guest_format': None,
- 'boot_index': -1}
-
- image_legacy_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True}
-
- blank_bdm = block_device.BlockDeviceDict(
- {'id': 6, 'instance_uuid': 'fake-instance',
- 'device_name': '/dev/sda2',
- 'delete_on_termination': True,
- 'volume_size': 3,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'source_type': 'blank',
- 'destination_type': 'volume',
- 'connection_info': '{"fake": "connection_info"}',
- 'snapshot_id': 'fake-snapshot-id-1',
- 'volume_id': 'fake-volume-id-2',
- 'boot_index': -1})
-
- blank_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True,
- 'disk_bus': 'scsi',
- 'device_type': 'disk',
- 'guest_format': None,
- 'boot_index': -1}
-
- blank_legacy_driver_bdm = {
- 'mount_device': '/dev/sda2',
- 'connection_info': {"fake": "connection_info"},
- 'delete_on_termination': True}
-
- def setUp(self):
- super(TestDriverBlockDevice, self).setUp()
- self.volume_api = self.mox.CreateMock(cinder.API)
- self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
- self.context = context.RequestContext('fake_user',
- 'fake_project')
-
- def test_no_device_raises(self):
- for name, cls in self.driver_classes.items():
- self.assertRaises(driver_block_device._NotTransformable,
- cls, {'no_device': True})
-
- def _test_driver_device(self, name):
- db_bdm = getattr(self, "%s_bdm" % name)
- test_bdm = self.driver_classes[name](db_bdm)
- self.assertThat(test_bdm, matchers.DictMatches(
- getattr(self, "%s_driver_bdm" % name)))
-
- for k, v in db_bdm.iteritems():
- field_val = getattr(test_bdm._bdm_obj, k)
- if isinstance(field_val, bool):
- v = bool(v)
- self.assertEqual(field_val, v)
-
- self.assertThat(test_bdm.legacy(),
- matchers.DictMatches(
- getattr(self, "%s_legacy_driver_bdm" % name)))
-
- # Test passthru attributes
- for passthru in test_bdm._proxy_as_attr:
- self.assertEqual(getattr(test_bdm, passthru),
- getattr(test_bdm._bdm_obj, passthru))
-
- # Make sure that all others raise _invalidType
- for other_name, cls in self.driver_classes.iteritems():
- if other_name == name:
- continue
- self.assertRaises(driver_block_device._InvalidType,
- cls,
- getattr(self, '%s_bdm' % name))
-
- # Test the save method
- with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
- test_bdm.save(self.context)
- for fld, alias in test_bdm._update_on_save.iteritems():
- self.assertEqual(test_bdm[alias or fld],
- getattr(test_bdm._bdm_obj, fld))
-
- save_mock.assert_called_once_with(self.context)
-
- # Test the save method with no context passed
- with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
- test_bdm.save()
- save_mock.assert_called_once_with()
-
- def _test_driver_default_size(self, name):
- size = 'swap_size' if name == 'swap' else 'size'
- no_size_bdm = getattr(self, "%s_bdm" % name).copy()
- no_size_bdm['volume_size'] = None
-
- driver_bdm = self.driver_classes[name](no_size_bdm)
- self.assertEqual(driver_bdm[size], 0)
-
- del no_size_bdm['volume_size']
-
- driver_bdm = self.driver_classes[name](no_size_bdm)
- self.assertEqual(driver_bdm[size], 0)
-
- def test_driver_swap_block_device(self):
- self._test_driver_device("swap")
-
- def test_driver_swap_default_size(self):
- self._test_driver_default_size('swap')
-
- def test_driver_ephemeral_block_device(self):
- self._test_driver_device("ephemeral")
-
- def test_driver_ephemeral_default_size(self):
- self._test_driver_default_size('ephemeral')
-
- def test_driver_volume_block_device(self):
- self._test_driver_device("volume")
-
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- self.assertEqual(test_bdm['connection_info'],
- jsonutils.loads(test_bdm._bdm_obj.connection_info))
- self.assertEqual(test_bdm._bdm_obj.id, 3)
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
- self.assertEqual(test_bdm.volume_size, 8)
-
- def test_driver_snapshot_block_device(self):
- self._test_driver_device("snapshot")
-
- test_bdm = self.driver_classes['snapshot'](
- self.snapshot_bdm)
- self.assertEqual(test_bdm._bdm_obj.id, 4)
- self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
- self.assertEqual(test_bdm.volume_size, 3)
-
- def test_driver_image_block_device(self):
- self._test_driver_device('image')
-
- test_bdm = self.driver_classes['image'](
- self.image_bdm)
- self.assertEqual(test_bdm._bdm_obj.id, 5)
- self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
- self.assertEqual(test_bdm.volume_size, 1)
-
- def test_driver_image_block_device_destination_local(self):
- self._test_driver_device('image')
- bdm = self.image_bdm.copy()
- bdm['destination_type'] = 'local'
- self.assertRaises(driver_block_device._InvalidType,
- self.driver_classes['image'], bdm)
-
- def test_driver_blank_block_device(self):
- self._test_driver_device('blank')
-
- test_bdm = self.driver_classes['blank'](
- self.blank_bdm)
- self.assertEqual(6, test_bdm._bdm_obj.id)
- self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
- self.assertEqual(3, test_bdm.volume_size)
-
- def _test_volume_attach(self, driver_bdm, bdm_dict,
- fake_volume, check_attach=True,
- fail_check_attach=False, driver_attach=False,
- fail_driver_attach=False, volume_attach=True,
- access_mode='rw'):
- elevated_context = self.context.elevated()
- self.stubs.Set(self.context, 'elevated',
- lambda: elevated_context)
- self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
- self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
- instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
- connector = {'ip': 'fake_ip', 'host': 'fake_host'}
- connection_info = {'data': {'access_mode': access_mode}}
- expected_conn_info = {'data': {'access_mode': access_mode},
- 'serial': fake_volume['id']}
- enc_data = {'fake': 'enc_data'}
-
- self.volume_api.get(self.context,
- fake_volume['id']).AndReturn(fake_volume)
- if check_attach:
- if not fail_check_attach:
- self.volume_api.check_attach(self.context, fake_volume,
- instance=instance).AndReturn(None)
- else:
- self.volume_api.check_attach(self.context, fake_volume,
- instance=instance).AndRaise(
- test.TestingException)
- return instance, expected_conn_info
-
- self.virt_driver.get_volume_connector(instance).AndReturn(connector)
- self.volume_api.initialize_connection(
- elevated_context, fake_volume['id'],
- connector).AndReturn(connection_info)
- if driver_attach:
- encryptors.get_encryption_metadata(
- elevated_context, self.volume_api, fake_volume['id'],
- connection_info).AndReturn(enc_data)
- if not fail_driver_attach:
- self.virt_driver.attach_volume(
- elevated_context, expected_conn_info, instance,
- bdm_dict['device_name'],
- disk_bus=bdm_dict['disk_bus'],
- device_type=bdm_dict['device_type'],
- encryption=enc_data).AndReturn(None)
- else:
- self.virt_driver.attach_volume(
- elevated_context, expected_conn_info, instance,
- bdm_dict['device_name'],
- disk_bus=bdm_dict['disk_bus'],
- device_type=bdm_dict['device_type'],
- encryption=enc_data).AndRaise(test.TestingException)
- self.volume_api.terminate_connection(
- elevated_context, fake_volume['id'],
- expected_conn_info).AndReturn(None)
- return instance, expected_conn_info
-
- if volume_attach:
- self.volume_api.attach(elevated_context, fake_volume['id'],
- 'fake_uuid', bdm_dict['device_name'],
- mode=access_mode).AndReturn(None)
- driver_bdm._bdm_obj.save(self.context).AndReturn(None)
- return instance, expected_conn_info
-
- def test_volume_attach(self):
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- volume = {'id': 'fake-volume-id-1',
- 'attach_status': 'detached'}
-
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume)
-
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance,
- self.volume_api, self.virt_driver)
- self.assertThat(test_bdm['connection_info'],
- matchers.DictMatches(expected_conn_info))
-
- def test_volume_attach_ro(self):
- test_bdm = self.driver_classes['volume'](self.volume_bdm)
- volume = {'id': 'fake-volume-id-1',
- 'attach_status': 'detached'}
-
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume, access_mode='ro')
-
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance,
- self.volume_api, self.virt_driver)
- self.assertThat(test_bdm['connection_info'],
- matchers.DictMatches(expected_conn_info))
-
- def check_volume_attach_check_attach_fails(self):
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- volume = {'id': 'fake-volume-id-1'}
-
- instance, _ = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume, fail_check_attach=True)
- self.mox.ReplayAll()
-
- self.asserRaises(test.TestingException, test_bdm.attach, self.context,
- instance, self.volume_api, self.virt_driver)
-
- def test_volume_no_volume_attach(self):
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- volume = {'id': 'fake-volume-id-1',
- 'attach_status': 'detached'}
-
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume, check_attach=False,
- driver_attach=False)
-
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance,
- self.volume_api, self.virt_driver,
- do_check_attach=False, do_driver_attach=False)
- self.assertThat(test_bdm['connection_info'],
- matchers.DictMatches(expected_conn_info))
-
- def test_volume_attach_no_check_driver_attach(self):
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- volume = {'id': 'fake-volume-id-1',
- 'attach_status': 'detached'}
-
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume, check_attach=False,
- driver_attach=True)
-
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance,
- self.volume_api, self.virt_driver,
- do_check_attach=False, do_driver_attach=True)
- self.assertThat(test_bdm['connection_info'],
- matchers.DictMatches(expected_conn_info))
-
- def check_volume_attach_driver_attach_fails(self):
- test_bdm = self.driver_classes['volume'](
- self.volume_bdm)
- volume = {'id': 'fake-volume-id-1'}
-
- instance, _ = self._test_volume_attach(
- test_bdm, self.volume_bdm, volume, fail_check_attach=True)
- self.mox.ReplayAll()
-
- self.asserRaises(test.TestingException, test_bdm.attach, self.context,
- instance, self.volume_api, self.virt_driver,
- do_driver_attach=True)
-
- def test_refresh_connection(self):
- test_bdm = self.driver_classes['snapshot'](
- self.snapshot_bdm)
-
- instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
- connector = {'ip': 'fake_ip', 'host': 'fake_host'}
- connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
- expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
- 'serial': 'fake-volume-id-2'}
-
- self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
-
- self.virt_driver.get_volume_connector(instance).AndReturn(connector)
- self.volume_api.initialize_connection(
- self.context, test_bdm.volume_id,
- connector).AndReturn(connection_info)
- test_bdm._bdm_obj.save(self.context).AndReturn(None)
-
- self.mox.ReplayAll()
-
- test_bdm.refresh_connection_info(self.context, instance,
- self.volume_api, self.virt_driver)
- self.assertThat(test_bdm['connection_info'],
- matchers.DictMatches(expected_conn_info))
-
- def test_snapshot_attach_no_volume(self):
- no_volume_snapshot = self.snapshot_bdm.copy()
- no_volume_snapshot['volume_id'] = None
- test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
-
- snapshot = {'id': 'fake-volume-id-1',
- 'attach_status': 'detached'}
- volume = {'id': 'fake-volume-id-2',
- 'attach_status': 'detached'}
-
- wait_func = self.mox.CreateMockAnything()
-
- self.volume_api.get_snapshot(self.context,
- 'fake-snapshot-id-1').AndReturn(snapshot)
- self.volume_api.create(self.context, 3,
- '', '', snapshot).AndReturn(volume)
- wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, no_volume_snapshot, volume)
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance, self.volume_api,
- self.virt_driver, wait_func)
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
-
- def test_snapshot_attach_volume(self):
- test_bdm = self.driver_classes['snapshot'](
- self.snapshot_bdm)
-
- instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
-
- volume_class = self.driver_classes['volume']
- self.mox.StubOutWithMock(volume_class, 'attach')
-
- # Make sure theses are not called
- self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
- self.mox.StubOutWithMock(self.volume_api, 'create')
-
- volume_class.attach(self.context, instance, self.volume_api,
- self.virt_driver, do_check_attach=True
- ).AndReturn(None)
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance, self.volume_api,
- self.virt_driver)
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
-
- def test_image_attach_no_volume(self):
- no_volume_image = self.image_bdm.copy()
- no_volume_image['volume_id'] = None
- test_bdm = self.driver_classes['image'](no_volume_image)
-
- image = {'id': 'fake-image-id-1'}
- volume = {'id': 'fake-volume-id-2',
- 'attach_status': 'detached'}
-
- wait_func = self.mox.CreateMockAnything()
-
- self.volume_api.create(self.context, 1,
- '', '', image_id=image['id']).AndReturn(volume)
- wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
- instance, expected_conn_info = self._test_volume_attach(
- test_bdm, no_volume_image, volume)
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance, self.volume_api,
- self.virt_driver, wait_func)
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
-
- def test_image_attach_volume(self):
- test_bdm = self.driver_classes['image'](
- self.image_bdm)
-
- instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
-
- volume_class = self.driver_classes['volume']
- self.mox.StubOutWithMock(volume_class, 'attach')
-
- # Make sure theses are not called
- self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
- self.mox.StubOutWithMock(self.volume_api, 'create')
-
- volume_class.attach(self.context, instance, self.volume_api,
- self.virt_driver, do_check_attach=True
- ).AndReturn(None)
- self.mox.ReplayAll()
-
- test_bdm.attach(self.context, instance, self.volume_api,
- self.virt_driver)
- self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
-
- def test_blank_attach_volume(self):
- no_blank_volume = self.blank_bdm.copy()
- no_blank_volume['volume_id'] = None
- test_bdm = self.driver_classes['blank'](no_blank_volume)
- instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
- **{'uuid': 'fake-uuid'})
- volume_class = self.driver_classes['volume']
- volume = {'id': 'fake-volume-id-2',
- 'display_name': 'fake-uuid-blank-vol'}
-
- with contextlib.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(volume_class, 'attach')
- ) as (vol_create, vol_attach):
- test_bdm.attach(self.context, instance, self.volume_api,
- self.virt_driver)
-
- vol_create.assert_called_once_with(self.context,
- test_bdm.volume_size,
- 'fake-uuid-blank-vol',
- '')
- vol_attach.assert_called_once_with(self.context, instance,
- self.volume_api,
- self.virt_driver,
- do_check_attach=True)
- self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
-
- def test_convert_block_devices(self):
- converted = driver_block_device._convert_block_devices(
- self.driver_classes['volume'],
- [self.volume_bdm, self.ephemeral_bdm])
- self.assertEqual(converted, [self.volume_driver_bdm])
-
- def test_legacy_block_devices(self):
- test_snapshot = self.driver_classes['snapshot'](
- self.snapshot_bdm)
-
- block_device_mapping = [test_snapshot, test_snapshot]
- legacy_bdm = driver_block_device.legacy_block_devices(
- block_device_mapping)
- self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
- self.snapshot_legacy_driver_bdm])
-
- # Test that the ephemerals work as expected
- test_ephemerals = [self.driver_classes['ephemeral'](
- self.ephemeral_bdm) for _ in xrange(2)]
- expected = [self.ephemeral_legacy_driver_bdm.copy()
- for _ in xrange(2)]
- expected[0]['virtual_name'] = 'ephemeral0'
- expected[0]['num'] = 0
- expected[1]['virtual_name'] = 'ephemeral1'
- expected[1]['num'] = 1
- legacy_ephemerals = driver_block_device.legacy_block_devices(
- test_ephemerals)
- self.assertEqual(expected, legacy_ephemerals)
-
- def test_get_swap(self):
- swap = [self.swap_driver_bdm]
- legacy_swap = [self.swap_legacy_driver_bdm]
- no_swap = [self.volume_driver_bdm]
-
- self.assertEqual(swap[0], driver_block_device.get_swap(swap))
- self.assertEqual(legacy_swap[0],
- driver_block_device.get_swap(legacy_swap))
- self.assertIsNone(driver_block_device.get_swap(no_swap))
- self.assertIsNone(driver_block_device.get_swap([]))
-
- def test_is_implemented(self):
- for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
- self.ephemeral_bdm, self.snapshot_bdm):
- self.assertTrue(driver_block_device.is_implemented(bdm))
- local_image = self.image_bdm.copy()
- local_image['destination_type'] = 'local'
- self.assertFalse(driver_block_device.is_implemented(local_image))
-
- def test_is_block_device_mapping(self):
- test_swap = self.driver_classes['swap'](self.swap_bdm)
- test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
- test_image = self.driver_classes['image'](self.image_bdm)
- test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
- test_volume = self.driver_classes['volume'](self.volume_bdm)
- test_blank = self.driver_classes['blank'](self.blank_bdm)
-
- for bdm in (test_image, test_snapshot, test_volume, test_blank):
- self.assertTrue(driver_block_device.is_block_device_mapping(
- bdm._bdm_obj))
-
- for bdm in (test_swap, test_ephemeral):
- self.assertFalse(driver_block_device.is_block_device_mapping(
- bdm._bdm_obj))
diff --git a/nova/tests/virt/test_driver.py b/nova/tests/virt/test_driver.py
deleted file mode 100644
index 4101a6a9ce..0000000000
--- a/nova/tests/virt/test_driver.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2013 Citrix Systems, Inc.
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import test
-from nova.virt import driver
-
-
-class FakeDriver(object):
- def __init__(self, *args, **kwargs):
- self.args = args
- self.kwargs = kwargs
-
-
-class FakeDriver2(FakeDriver):
- pass
-
-
-class ToDriverRegistryTestCase(test.NoDBTestCase):
-
- def assertDriverInstance(self, inst, class_, *args, **kwargs):
- self.assertEqual(class_, inst.__class__)
- self.assertEqual(args, inst.args)
- self.assertEqual(kwargs, inst.kwargs)
-
- def test_driver_dict_from_config(self):
- drvs = driver.driver_dict_from_config(
- [
- 'key1=nova.tests.virt.test_driver.FakeDriver',
- 'key2=nova.tests.virt.test_driver.FakeDriver2',
- ], 'arg1', 'arg2', param1='value1', param2='value2'
- )
-
- self.assertEqual(
- sorted(['key1', 'key2']),
- sorted(drvs.keys())
- )
-
- self.assertDriverInstance(
- drvs['key1'],
- FakeDriver, 'arg1', 'arg2', param1='value1',
- param2='value2')
-
- self.assertDriverInstance(
- drvs['key2'],
- FakeDriver2, 'arg1', 'arg2', param1='value1',
- param2='value2')
diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py
deleted file mode 100644
index 5c94f8fd06..0000000000
--- a/nova/tests/virt/test_hardware.py
+++ /dev/null
@@ -1,1439 +0,0 @@
-# Copyright 2014 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import mock
-from oslo.serialization import jsonutils
-import six
-
-from nova import context
-from nova import exception
-from nova import objects
-from nova.objects import base as base_obj
-from nova import test
-from nova.tests import matchers
-from nova.virt import hardware as hw
-
-
-class FakeFlavor(dict):
- def __init__(self, vcpus, memory, extra_specs):
- self['vcpus'] = vcpus
- self['memory_mb'] = memory
- self['extra_specs'] = extra_specs
-
-
-class FakeFlavorObject(object):
- def __init__(self, vcpus, memory, extra_specs):
- self.vcpus = vcpus
- self.memory_mb = memory
- self.extra_specs = extra_specs
-
- def __getitem__(self, item):
- try:
- return getattr(self, item)
- except AttributeError:
- raise KeyError(item)
-
- def get(self, item, default=None):
- try:
- return getattr(self, item)
- except AttributeError:
- return default
-
-
-class CpuSetTestCase(test.NoDBTestCase):
- def test_get_vcpu_pin_set(self):
- self.flags(vcpu_pin_set="1-3,5,^2")
- cpuset_ids = hw.get_vcpu_pin_set()
- self.assertEqual(set([1, 3, 5]), cpuset_ids)
-
- def test_parse_cpu_spec_none_returns_none(self):
- self.flags(vcpu_pin_set=None)
- cpuset_ids = hw.get_vcpu_pin_set()
- self.assertIsNone(cpuset_ids)
-
- def test_parse_cpu_spec_valid_syntax_works(self):
- cpuset_ids = hw.parse_cpu_spec("1")
- self.assertEqual(set([1]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec("1,2")
- self.assertEqual(set([1, 2]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec(", , 1 , ,, 2, ,")
- self.assertEqual(set([1, 2]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec("1-1")
- self.assertEqual(set([1]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec(" 1 - 1, 1 - 2 , 1 -3")
- self.assertEqual(set([1, 2, 3]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec("1,^2")
- self.assertEqual(set([1]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec("1-2, ^1")
- self.assertEqual(set([2]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec("1-3,5,^2")
- self.assertEqual(set([1, 3, 5]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec(" 1 - 3 , ^2, 5")
- self.assertEqual(set([1, 3, 5]), cpuset_ids)
-
- cpuset_ids = hw.parse_cpu_spec(" 1,1, ^1")
- self.assertEqual(set([]), cpuset_ids)
-
- def test_parse_cpu_spec_invalid_syntax_raises(self):
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- " -1-3,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-3-,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "-3,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-3,5,^2^")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-3,5,^2-")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "--13,^^5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "a-3,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-a,5,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-3,b,^2")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "1-3,5,^c")
-
- self.assertRaises(exception.Invalid,
- hw.parse_cpu_spec,
- "3 - 1, 5 , ^ 2 ")
-
- def test_format_cpu_spec(self):
- cpus = set([])
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("", spec)
-
- cpus = []
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("", spec)
-
- cpus = set([1, 3])
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("1,3", spec)
-
- cpus = [1, 3]
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("1,3", spec)
-
- cpus = set([1, 2, 4, 6])
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("1-2,4,6", spec)
-
- cpus = [1, 2, 4, 6]
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("1-2,4,6", spec)
-
- cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
-
- cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
- spec = hw.format_cpu_spec(cpus)
- self.assertEqual("10-11,13-16,19-20,40,42,48", spec)
-
- cpus = set([1, 2, 4, 6])
- spec = hw.format_cpu_spec(cpus, allow_ranges=False)
- self.assertEqual("1,2,4,6", spec)
-
- cpus = [1, 2, 4, 6]
- spec = hw.format_cpu_spec(cpus, allow_ranges=False)
- self.assertEqual("1,2,4,6", spec)
-
- cpus = set([10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48])
- spec = hw.format_cpu_spec(cpus, allow_ranges=False)
- self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
-
- cpus = [10, 11, 13, 14, 15, 16, 19, 20, 40, 42, 48]
- spec = hw.format_cpu_spec(cpus, allow_ranges=False)
- self.assertEqual("10,11,13,14,15,16,19,20,40,42,48", spec)
-
-
-class VCPUTopologyTest(test.NoDBTestCase):
-
- def test_validate_config(self):
- testdata = [
- { # Flavor sets preferred topology only
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1",
- }),
- "image": {
- "properties": {}
- },
- "expect": (
- 8, 2, 1, 65536, 65536, 65536
- )
- },
- { # Image topology overrides flavor
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1",
- "hw:cpu_max_threads": "2",
- }),
- "image": {
- "properties": {
- "hw_cpu_sockets": "4",
- "hw_cpu_cores": "2",
- "hw_cpu_threads": "2",
- }
- },
- "expect": (
- 4, 2, 2, 65536, 65536, 2,
- )
- },
- { # Partial image topology overrides flavor
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_sockets": "2",
- }
- },
- "expect": (
- 2, -1, -1, 65536, 65536, 65536,
- )
- },
- { # Restrict use of threads
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_threads": "2",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_threads": "1",
- }
- },
- "expect": (
- -1, -1, -1, 65536, 65536, 1,
- )
- },
- { # Force use of at least two sockets
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {}
- },
- "expect": (
- -1, -1, -1, 65536, 8, 1
- )
- },
- { # Image limits reduce flavor
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_cores": "4",
- }
- },
- "expect": (
- -1, -1, -1, 65536, 4, 1
- )
- },
- { # Image limits kill flavor preferred
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "2",
- "hw:cpu_cores": "8",
- "hw:cpu_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_cores": "4",
- }
- },
- "expect": (
- -1, -1, -1, 65536, 4, 65536
- )
- },
- { # Image limits cannot exceed flavor
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_cores": "16",
- }
- },
- "expect": exception.ImageVCPULimitsRangeExceeded,
- },
- { # Image preferred cannot exceed flavor
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_cores": "16",
- }
- },
- "expect": exception.ImageVCPUTopologyRangeExceeded,
- },
- ]
-
- for topo_test in testdata:
- if type(topo_test["expect"]) == tuple:
- (preferred,
- maximum) = hw.VirtCPUTopology.get_topology_constraints(
- topo_test["flavor"],
- topo_test["image"])
-
- self.assertEqual(topo_test["expect"][0], preferred.sockets)
- self.assertEqual(topo_test["expect"][1], preferred.cores)
- self.assertEqual(topo_test["expect"][2], preferred.threads)
- self.assertEqual(topo_test["expect"][3], maximum.sockets)
- self.assertEqual(topo_test["expect"][4], maximum.cores)
- self.assertEqual(topo_test["expect"][5], maximum.threads)
- else:
- self.assertRaises(topo_test["expect"],
- hw.VirtCPUTopology.get_topology_constraints,
- topo_test["flavor"],
- topo_test["image"])
-
- def test_possible_configs(self):
- testdata = [
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 8,
- "maxcores": 8,
- "maxthreads": 2,
- "expect": [
- [8, 1, 1],
- [4, 2, 1],
- [2, 4, 1],
- [1, 8, 1],
- [4, 1, 2],
- [2, 2, 2],
- [1, 4, 2],
- ]
- },
- {
- "allow_threads": False,
- "vcpus": 8,
- "maxsockets": 8,
- "maxcores": 8,
- "maxthreads": 2,
- "expect": [
- [8, 1, 1],
- [4, 2, 1],
- [2, 4, 1],
- [1, 8, 1],
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 1024,
- "maxcores": 1024,
- "maxthreads": 2,
- "expect": [
- [8, 1, 1],
- [4, 2, 1],
- [2, 4, 1],
- [1, 8, 1],
- [4, 1, 2],
- [2, 2, 2],
- [1, 4, 2],
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 1024,
- "maxcores": 1,
- "maxthreads": 2,
- "expect": [
- [8, 1, 1],
- [4, 1, 2],
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 7,
- "maxsockets": 8,
- "maxcores": 8,
- "maxthreads": 2,
- "expect": [
- [7, 1, 1],
- [1, 7, 1],
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 2,
- "maxcores": 1,
- "maxthreads": 1,
- "expect": exception.ImageVCPULimitsRangeImpossible,
- },
- {
- "allow_threads": False,
- "vcpus": 8,
- "maxsockets": 2,
- "maxcores": 1,
- "maxthreads": 4,
- "expect": exception.ImageVCPULimitsRangeImpossible,
- },
- ]
-
- for topo_test in testdata:
- if type(topo_test["expect"]) == list:
- actual = []
- for topology in hw.VirtCPUTopology.get_possible_topologies(
- topo_test["vcpus"],
- hw.VirtCPUTopology(topo_test["maxsockets"],
- topo_test["maxcores"],
- topo_test["maxthreads"]),
- topo_test["allow_threads"]):
- actual.append([topology.sockets,
- topology.cores,
- topology.threads])
-
- self.assertEqual(topo_test["expect"], actual)
- else:
- self.assertRaises(topo_test["expect"],
- hw.VirtCPUTopology.get_possible_topologies,
- topo_test["vcpus"],
- hw.VirtCPUTopology(topo_test["maxsockets"],
- topo_test["maxcores"],
- topo_test["maxthreads"]),
- topo_test["allow_threads"])
-
- def test_sorting_configs(self):
- testdata = [
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 8,
- "maxcores": 8,
- "maxthreads": 2,
- "sockets": 4,
- "cores": 2,
- "threads": 1,
- "expect": [
- [4, 2, 1], # score = 2
- [8, 1, 1], # score = 1
- [2, 4, 1], # score = 1
- [1, 8, 1], # score = 1
- [4, 1, 2], # score = 1
- [2, 2, 2], # score = 1
- [1, 4, 2], # score = 1
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 1024,
- "maxcores": 1024,
- "maxthreads": 2,
- "sockets": -1,
- "cores": 4,
- "threads": -1,
- "expect": [
- [2, 4, 1], # score = 1
- [1, 4, 2], # score = 1
- [8, 1, 1], # score = 0
- [4, 2, 1], # score = 0
- [1, 8, 1], # score = 0
- [4, 1, 2], # score = 0
- [2, 2, 2], # score = 0
- ]
- },
- {
- "allow_threads": True,
- "vcpus": 8,
- "maxsockets": 1024,
- "maxcores": 1,
- "maxthreads": 2,
- "sockets": -1,
- "cores": -1,
- "threads": 2,
- "expect": [
- [4, 1, 2], # score = 1
- [8, 1, 1], # score = 0
- ]
- },
- {
- "allow_threads": False,
- "vcpus": 8,
- "maxsockets": 1024,
- "maxcores": 1,
- "maxthreads": 2,
- "sockets": -1,
- "cores": -1,
- "threads": 2,
- "expect": [
- [8, 1, 1], # score = 0
- ]
- },
- ]
-
- for topo_test in testdata:
- actual = []
- possible = hw.VirtCPUTopology.get_possible_topologies(
- topo_test["vcpus"],
- hw.VirtCPUTopology(topo_test["maxsockets"],
- topo_test["maxcores"],
- topo_test["maxthreads"]),
- topo_test["allow_threads"])
-
- tops = hw.VirtCPUTopology.sort_possible_topologies(
- possible,
- hw.VirtCPUTopology(topo_test["sockets"],
- topo_test["cores"],
- topo_test["threads"]))
- for topology in tops:
- actual.append([topology.sockets,
- topology.cores,
- topology.threads])
-
- self.assertEqual(topo_test["expect"], actual)
-
- def test_best_config(self):
- testdata = [
- { # Flavor sets preferred topology only
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1"
- }),
- "image": {
- "properties": {}
- },
- "expect": [8, 2, 1],
- },
- { # Image topology overrides flavor
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1",
- "hw:cpu_maxthreads": "2",
- }),
- "image": {
- "properties": {
- "hw_cpu_sockets": "4",
- "hw_cpu_cores": "2",
- "hw_cpu_threads": "2",
- }
- },
- "expect": [4, 2, 2],
- },
- { # Image topology overrides flavor
- "allow_threads": False,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1",
- "hw:cpu_maxthreads": "2",
- }),
- "image": {
- "properties": {
- "hw_cpu_sockets": "4",
- "hw_cpu_cores": "2",
- "hw_cpu_threads": "2",
- }
- },
- "expect": [8, 2, 1],
- },
- { # Partial image topology overrides flavor
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "8",
- "hw:cpu_cores": "2",
- "hw:cpu_threads": "1"
- }),
- "image": {
- "properties": {
- "hw_cpu_sockets": "2"
- }
- },
- "expect": [2, 8, 1],
- },
- { # Restrict use of threads
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_threads": "1"
- }),
- "image": {
- "properties": {}
- },
- "expect": [16, 1, 1]
- },
- { # Force use of at least two sockets
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {}
- },
- "expect": [16, 1, 1]
- },
- { # Image limits reduce flavor
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_max_sockets": "8",
- "hw:cpu_max_cores": "8",
- "hw:cpu_max_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_sockets": 4,
- }
- },
- "expect": [4, 4, 1]
- },
- { # Image limits kill flavor preferred
- "allow_threads": True,
- "flavor": FakeFlavorObject(16, 2048, {
- "hw:cpu_sockets": "2",
- "hw:cpu_cores": "8",
- "hw:cpu_threads": "1",
- }),
- "image": {
- "properties": {
- "hw_cpu_max_cores": 4,
- }
- },
- "expect": [16, 1, 1]
- },
- ]
-
- for topo_test in testdata:
- topology = hw.VirtCPUTopology.get_desirable_configs(
- topo_test["flavor"],
- topo_test["image"],
- topo_test["allow_threads"])[0]
-
- self.assertEqual(topo_test["expect"][0], topology.sockets)
- self.assertEqual(topo_test["expect"][1], topology.cores)
- self.assertEqual(topo_test["expect"][2], topology.threads)
-
-
-class NUMATopologyTest(test.NoDBTestCase):
-
- def test_topology_constraints(self):
- testdata = [
- {
- "flavor": FakeFlavor(8, 2048, {
- }),
- "image": {
- },
- "expect": None,
- },
- {
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2
- }),
- "image": {
- },
- "expect": hw.VirtNUMAInstanceTopology(
- [
- hw.VirtNUMATopologyCellInstance(
- 0, set([0, 1, 2, 3]), 1024),
- hw.VirtNUMATopologyCellInstance(
- 1, set([4, 5, 6, 7]), 1024),
- ]),
- },
- {
- # vcpus is not a multiple of nodes, so it
- # is an error to not provide cpu/mem mapping
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 3
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyAsymmetric,
- },
- {
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 3,
- "hw:numa_cpus.0": "0-3",
- "hw:numa_mem.0": "1024",
- "hw:numa_cpus.1": "4,6",
- "hw:numa_mem.1": "512",
- "hw:numa_cpus.2": "5,7",
- "hw:numa_mem.2": "512",
- }),
- "image": {
- },
- "expect": hw.VirtNUMAInstanceTopology(
- [
- hw.VirtNUMATopologyCellInstance(
- 0, set([0, 1, 2, 3]), 1024),
- hw.VirtNUMATopologyCellInstance(
- 1, set([4, 6]), 512),
- hw.VirtNUMATopologyCellInstance(
- 2, set([5, 7]), 512),
- ]),
- },
- {
- # Request a CPU that is out of range
- # wrt vCPU count
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 1,
- "hw:numa_cpus.0": "0-16",
- "hw:numa_mem.0": "2048",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyCPUOutOfRange,
- },
- {
- # Request the same CPU in two nodes
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_cpus.0": "0-7",
- "hw:numa_mem.0": "1024",
- "hw:numa_cpus.1": "0-7",
- "hw:numa_mem.1": "1024",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyCPUDuplicates,
- },
- {
- # Request with some CPUs not assigned
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_cpus.0": "0-2",
- "hw:numa_mem.0": "1024",
- "hw:numa_cpus.1": "3-4",
- "hw:numa_mem.1": "1024",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyCPUsUnassigned,
- },
- {
- # Request too little memory vs flavor total
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_cpus.0": "0-3",
- "hw:numa_mem.0": "512",
- "hw:numa_cpus.1": "4-7",
- "hw:numa_mem.1": "512",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyMemoryOutOfRange,
- },
- {
- # Request too much memory vs flavor total
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_cpus.0": "0-3",
- "hw:numa_mem.0": "1576",
- "hw:numa_cpus.1": "4-7",
- "hw:numa_mem.1": "1576",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyMemoryOutOfRange,
- },
- {
- # Request missing mem.0
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_cpus.0": "0-3",
- "hw:numa_mem.1": "1576",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyIncomplete,
- },
- {
- # Request missing cpu.0
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- "hw:numa_mem.0": "1576",
- "hw:numa_cpus.1": "4-7",
- }),
- "image": {
- },
- "expect": exception.ImageNUMATopologyIncomplete,
- },
- {
- # Image attempts to override flavor
- "flavor": FakeFlavor(8, 2048, {
- "hw:numa_nodes": 2,
- }),
- "image": {
- "hw_numa_nodes": 4,
- },
- "expect": exception.ImageNUMATopologyForbidden,
- },
- ]
-
- for testitem in testdata:
- if testitem["expect"] is None:
- topology = hw.VirtNUMAInstanceTopology.get_constraints(
- testitem["flavor"], testitem["image"])
- self.assertIsNone(topology)
- elif type(testitem["expect"]) == type:
- self.assertRaises(testitem["expect"],
- hw.VirtNUMAInstanceTopology.get_constraints,
- testitem["flavor"],
- testitem["image"])
- else:
- topology = hw.VirtNUMAInstanceTopology.get_constraints(
- testitem["flavor"], testitem["image"])
- self.assertEqual(len(testitem["expect"].cells),
- len(topology.cells))
- for i in range(len(topology.cells)):
- self.assertEqual(testitem["expect"].cells[i].cpuset,
- topology.cells[i].cpuset)
- self.assertEqual(testitem["expect"].cells[i].memory,
- topology.cells[i].memory)
-
- def test_can_fit_isntances(self):
- hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
- hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512)
- ])
- instance1 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
- hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
- ])
- instance2 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
- hw.VirtNUMATopologyCellInstance(1, set([4, 6]), 256),
- hw.VirtNUMATopologyCellInstance(2, set([7, 8]), 256),
- ])
-
- self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
- hosttopo, []))
- self.assertTrue(hw.VirtNUMAHostTopology.can_fit_instances(
- hosttopo, [instance1]))
- self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
- hosttopo, [instance2]))
- self.assertFalse(hw.VirtNUMAHostTopology.can_fit_instances(
- hosttopo, [instance1, instance2]))
-
- def test_host_usage_contiguous(self):
- hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
- hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512),
- hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512),
- ])
- instance1 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
- hw.VirtNUMATopologyCellInstance(1, set([4]), 256),
- ])
- instance2 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
- hw.VirtNUMATopologyCellInstance(1, set([5, 7]), 256),
- ])
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hosttopo, [instance1, instance2])
-
- self.assertEqual(len(hosttopo), len(hostusage))
-
- self.assertIsInstance(hostusage.cells[0],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[0].cpuset,
- hostusage.cells[0].cpuset)
- self.assertEqual(hosttopo.cells[0].memory,
- hostusage.cells[0].memory)
- self.assertEqual(hostusage.cells[0].cpu_usage, 5)
- self.assertEqual(hostusage.cells[0].memory_usage, 512)
-
- self.assertIsInstance(hostusage.cells[1],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[1].cpuset,
- hostusage.cells[1].cpuset)
- self.assertEqual(hosttopo.cells[1].memory,
- hostusage.cells[1].memory)
- self.assertEqual(hostusage.cells[1].cpu_usage, 3)
- self.assertEqual(hostusage.cells[1].memory_usage, 512)
-
- self.assertIsInstance(hostusage.cells[2],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[2].cpuset,
- hostusage.cells[2].cpuset)
- self.assertEqual(hosttopo.cells[2].memory,
- hostusage.cells[2].memory)
- self.assertEqual(hostusage.cells[2].cpu_usage, 0)
- self.assertEqual(hostusage.cells[2].memory_usage, 0)
-
- def test_host_usage_sparse(self):
- hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
- hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512),
- hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512),
- ])
- instance1 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 256),
- hw.VirtNUMATopologyCellInstance(6, set([4]), 256),
- ])
- instance2 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
- hw.VirtNUMATopologyCellInstance(5, set([5, 7]), 256),
- ])
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hosttopo, [instance1, instance2])
-
- self.assertEqual(len(hosttopo), len(hostusage))
-
- self.assertIsInstance(hostusage.cells[0],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[0].id,
- hostusage.cells[0].id)
- self.assertEqual(hosttopo.cells[0].cpuset,
- hostusage.cells[0].cpuset)
- self.assertEqual(hosttopo.cells[0].memory,
- hostusage.cells[0].memory)
- self.assertEqual(hostusage.cells[0].cpu_usage, 5)
- self.assertEqual(hostusage.cells[0].memory_usage, 512)
-
- self.assertIsInstance(hostusage.cells[1],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[1].id,
- hostusage.cells[1].id)
- self.assertEqual(hosttopo.cells[1].cpuset,
- hostusage.cells[1].cpuset)
- self.assertEqual(hosttopo.cells[1].memory,
- hostusage.cells[1].memory)
- self.assertEqual(hostusage.cells[1].cpu_usage, 2)
- self.assertEqual(hostusage.cells[1].memory_usage, 256)
-
- self.assertIsInstance(hostusage.cells[2],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hosttopo.cells[2].cpuset,
- hostusage.cells[2].cpuset)
- self.assertEqual(hosttopo.cells[2].memory,
- hostusage.cells[2].memory)
- self.assertEqual(hostusage.cells[2].cpu_usage, 1)
- self.assertEqual(hostusage.cells[2].memory_usage, 256)
-
- def test_host_usage_culmulative_with_free(self):
- hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(
- 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512),
- hw.VirtNUMATopologyCellUsage(
- 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512),
- hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256),
- ])
- instance1 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1, 2]), 512),
- hw.VirtNUMATopologyCellInstance(1, set([3]), 256),
- hw.VirtNUMATopologyCellInstance(2, set([4]), 256)])
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hosttopo, [instance1])
- self.assertIsInstance(hostusage.cells[0],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hostusage.cells[0].cpu_usage, 5)
- self.assertEqual(hostusage.cells[0].memory_usage, 1024)
-
- self.assertIsInstance(hostusage.cells[1],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hostusage.cells[1].cpu_usage, 2)
- self.assertEqual(hostusage.cells[1].memory_usage, 768)
-
- self.assertIsInstance(hostusage.cells[2],
- hw.VirtNUMATopologyCellUsage)
- self.assertEqual(hostusage.cells[2].cpu_usage, 1)
- self.assertEqual(hostusage.cells[2].memory_usage, 256)
-
- # Test freeing of resources
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hostusage, [instance1], free=True)
- self.assertEqual(hostusage.cells[0].cpu_usage, 2)
- self.assertEqual(hostusage.cells[0].memory_usage, 512)
-
- self.assertEqual(hostusage.cells[1].cpu_usage, 1)
- self.assertEqual(hostusage.cells[1].memory_usage, 512)
-
- self.assertEqual(hostusage.cells[2].cpu_usage, 0)
- self.assertEqual(hostusage.cells[2].memory_usage, 0)
-
- def test_topo_usage_none(self):
- hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
- hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
- ])
- instance1 = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
- hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
- ])
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- None, [instance1])
- self.assertIsNone(hostusage)
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hosttopo, [])
- self.assertEqual(hostusage.cells[0].cpu_usage, 0)
- self.assertEqual(hostusage.cells[0].memory_usage, 0)
- self.assertEqual(hostusage.cells[1].cpu_usage, 0)
- self.assertEqual(hostusage.cells[1].memory_usage, 0)
-
- hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
- hosttopo, None)
- self.assertEqual(hostusage.cells[0].cpu_usage, 0)
- self.assertEqual(hostusage.cells[0].memory_usage, 0)
- self.assertEqual(hostusage.cells[1].cpu_usage, 0)
- self.assertEqual(hostusage.cells[1].memory_usage, 0)
-
- def _test_to_dict(self, cell_or_topo, expected):
- got = cell_or_topo._to_dict()
- self.assertThat(expected, matchers.DictMatches(got))
-
- def assertNUMACellMatches(self, expected_cell, got_cell):
- attrs = ('cpuset', 'memory', 'id')
- if isinstance(expected_cell, hw.VirtNUMAHostTopology):
- attrs += ('cpu_usage', 'memory_usage')
-
- for attr in attrs:
- self.assertEqual(getattr(expected_cell, attr),
- getattr(got_cell, attr))
-
- def _test_cell_from_dict(self, data_dict, expected_cell):
- cell_class = expected_cell.__class__
- got_cell = cell_class._from_dict(data_dict)
- self.assertNUMACellMatches(expected_cell, got_cell)
-
- def _test_topo_from_dict(self, data_dict, expected_topo):
- got_topo = expected_topo.__class__._from_dict(
- data_dict)
- for got_cell, expected_cell in zip(
- got_topo.cells, expected_topo.cells):
- self.assertNUMACellMatches(expected_cell, got_cell)
-
- def test_numa_cell_dict(self):
- cell = hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 512)
- cell_dict = {'cpus': '1,2',
- 'mem': {'total': 512},
- 'id': 1,
- 'pagesize': None}
- self._test_to_dict(cell, cell_dict)
- self._test_cell_from_dict(cell_dict, cell)
-
- def test_numa_cell_pagesize_dict(self):
- cell = hw.VirtNUMATopologyCellInstance(
- 1, set([1, 2]), 512, hw.VirtPageSize(2048))
- cell_dict = {'cpus': '1,2',
- 'mem': {'total': 512},
- 'id': 1,
- 'pagesize': 2048}
- self._test_to_dict(cell, cell_dict)
- self._test_cell_from_dict(cell_dict, cell)
-
- def test_numa_limit_cell_dict(self):
- cell = hw.VirtNUMATopologyCellLimit(1, set([1, 2]), 512, 4, 2048)
- cell_dict = {'cpus': '1,2', 'cpu_limit': 4,
- 'mem': {'total': 512, 'limit': 2048},
- 'id': 1}
- self._test_to_dict(cell, cell_dict)
- self._test_cell_from_dict(cell_dict, cell)
-
- def test_numa_cell_usage_dict(self):
- cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512)
- cell_dict = {'cpus': '1,2', 'cpu_usage': 0,
- 'mem': {'total': 512, 'used': 0},
- 'id': 1}
- self._test_to_dict(cell, cell_dict)
- self._test_cell_from_dict(cell_dict, cell)
-
- def test_numa_instance_topo_dict(self):
- topo = hw.VirtNUMAInstanceTopology(
- cells=[
- hw.VirtNUMATopologyCellInstance(1, set([1, 2]), 1024),
- hw.VirtNUMATopologyCellInstance(2, set([3, 4]), 1024)])
- topo_dict = {'cells': [
- {'cpus': '1,2',
- 'mem': {'total': 1024},
- 'id': 1,
- 'pagesize': None},
- {'cpus': '3,4',
- 'mem': {'total': 1024},
- 'id': 2,
- 'pagesize': None}]}
- self._test_to_dict(topo, topo_dict)
- self._test_topo_from_dict(topo_dict, topo)
-
- def test_numa_limits_topo_dict(self):
- topo = hw.VirtNUMALimitTopology(
- cells=[
- hw.VirtNUMATopologyCellLimit(
- 1, set([1, 2]), 1024, 4, 2048),
- hw.VirtNUMATopologyCellLimit(
- 2, set([3, 4]), 1024, 4, 2048)])
- topo_dict = {'cells': [
- {'cpus': '1,2', 'cpu_limit': 4,
- 'mem': {'total': 1024, 'limit': 2048},
- 'id': 1},
- {'cpus': '3,4', 'cpu_limit': 4,
- 'mem': {'total': 1024, 'limit': 2048},
- 'id': 2}]}
- self._test_to_dict(topo, topo_dict)
- self._test_topo_from_dict(topo_dict, topo)
-
- def test_numa_topo_dict_with_usage(self):
- topo = hw.VirtNUMAHostTopology(
- cells=[
- hw.VirtNUMATopologyCellUsage(
- 1, set([1, 2]), 1024),
- hw.VirtNUMATopologyCellUsage(
- 2, set([3, 4]), 1024)])
- topo_dict = {'cells': [
- {'cpus': '1,2', 'cpu_usage': 0,
- 'mem': {'total': 1024, 'used': 0},
- 'id': 1},
- {'cpus': '3,4', 'cpu_usage': 0,
- 'mem': {'total': 1024, 'used': 0},
- 'id': 2}]}
- self._test_to_dict(topo, topo_dict)
- self._test_topo_from_dict(topo_dict, topo)
-
- def test_json(self):
- expected = hw.VirtNUMAHostTopology(
- cells=[
- hw.VirtNUMATopologyCellUsage(
- 1, set([1, 2]), 1024),
- hw.VirtNUMATopologyCellUsage(
- 2, set([3, 4]), 1024)])
- got = hw.VirtNUMAHostTopology.from_json(expected.to_json())
-
- for exp_cell, got_cell in zip(expected.cells, got.cells):
- self.assertNUMACellMatches(exp_cell, got_cell)
-
-
-class NumberOfSerialPortsTest(test.NoDBTestCase):
- def test_flavor(self):
- flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
- num_ports = hw.get_number_of_serial_ports(flavor, None)
- self.assertEqual(3, num_ports)
-
- def test_image_meta(self):
- flavor = FakeFlavorObject(8, 2048, {})
- image_meta = {"properties": {"hw_serial_port_count": 2}}
- num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
- self.assertEqual(2, num_ports)
-
- def test_flavor_invalid_value(self):
- flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 'foo'})
- image_meta = {"properties": {}}
- self.assertRaises(exception.ImageSerialPortNumberInvalid,
- hw.get_number_of_serial_ports,
- flavor, image_meta)
-
- def test_image_meta_invalid_value(self):
- flavor = FakeFlavorObject(8, 2048, {})
- image_meta = {"properties": {"hw_serial_port_count": 'bar'}}
- self.assertRaises(exception.ImageSerialPortNumberInvalid,
- hw.get_number_of_serial_ports,
- flavor, image_meta)
-
- def test_image_meta_smaller_than_flavor(self):
- flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
- image_meta = {"properties": {"hw_serial_port_count": 2}}
- num_ports = hw.get_number_of_serial_ports(flavor, image_meta)
- self.assertEqual(2, num_ports)
-
- def test_flavor_smaller_than_image_meta(self):
- flavor = FakeFlavorObject(8, 2048, {"hw:serial_port_count": 3})
- image_meta = {"properties": {"hw_serial_port_count": 4}}
- self.assertRaises(exception.ImageSerialPortNumberExceedFlavorValue,
- hw.get_number_of_serial_ports,
- flavor, image_meta)
-
-
-class NUMATopologyClaimsTest(test.NoDBTestCase):
- def setUp(self):
- super(NUMATopologyClaimsTest, self).setUp()
-
- self.host = hw.VirtNUMAHostTopology(
- cells=[
- hw.VirtNUMATopologyCellUsage(
- 1, set([1, 2, 3, 4]), 2048,
- cpu_usage=1, memory_usage=512),
- hw.VirtNUMATopologyCellUsage(
- 2, set([5, 6]), 1024)])
-
- self.limits = hw.VirtNUMALimitTopology(
- cells=[
- hw.VirtNUMATopologyCellLimit(
- 1, set([1, 2, 3, 4]), 2048,
- cpu_limit=8, memory_limit=4096),
- hw.VirtNUMATopologyCellLimit(
- 2, set([5, 6]), 1024,
- cpu_limit=4, memory_limit=2048)])
-
- self.large_instance = hw.VirtNUMAInstanceTopology(
- cells=[
- hw.VirtNUMATopologyCellInstance(
- 1, set([1, 2, 3, 4, 5, 6]), 8192),
- hw.VirtNUMATopologyCellInstance(
- 2, set([7, 8]), 4096)])
- self.medium_instance = hw.VirtNUMAInstanceTopology(
- cells=[
- hw.VirtNUMATopologyCellInstance(
- 1, set([1, 2, 3, 4]), 1024),
- hw.VirtNUMATopologyCellInstance(
- 2, set([7, 8]), 2048)])
- self.small_instance = hw.VirtNUMAInstanceTopology(
- cells=[
- hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
- hw.VirtNUMATopologyCellInstance(2, set([5]), 1024)])
- self.no_fit_instance = hw.VirtNUMAInstanceTopology(
- cells=[
- hw.VirtNUMATopologyCellInstance(1, set([1]), 256),
- hw.VirtNUMATopologyCellInstance(2, set([2]), 256),
- hw.VirtNUMATopologyCellInstance(3, set([3]), 256)])
-
- def test_claim_not_enough_info(self):
-
- # No limits supplied
- self.assertIsNone(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.large_instance]))
- # Empty topology
- self.assertIsNone(
- hw.VirtNUMAHostTopology.claim_test(
- hw.VirtNUMAHostTopology(), [self.large_instance],
- limits=self.limits))
- # No instances to claim
- self.assertIsNone(
- hw.VirtNUMAHostTopology.claim_test(self.host, [], self.limits))
-
- def test_claim_succeeds(self):
- self.assertIsNone(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.small_instance], self.limits))
- self.assertIsNone(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.medium_instance], self.limits))
-
- def test_claim_fails(self):
- self.assertIsInstance(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.large_instance], self.limits),
- six.text_type)
-
- self.assertIsInstance(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.medium_instance, self.small_instance],
- self.limits),
- six.text_type)
-
- # Instance fails if it won't fit the topology
- self.assertIsInstance(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.no_fit_instance], self.limits),
- six.text_type)
-
- # Instance fails if it won't fit the topology even with no limits
- self.assertIsInstance(
- hw.VirtNUMAHostTopology.claim_test(
- self.host, [self.no_fit_instance]), six.text_type)
-
-
-class HelperMethodsTestCase(test.NoDBTestCase):
- def setUp(self):
- super(HelperMethodsTestCase, self).setUp()
- self.hosttopo = hw.VirtNUMAHostTopology([
- hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
- hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
- ])
- self.instancetopo = hw.VirtNUMAInstanceTopology([
- hw.VirtNUMATopologyCellInstance(0, set([0, 1]), 256),
- hw.VirtNUMATopologyCellInstance(1, set([2]), 256),
- ])
- self.context = context.RequestContext('fake-user',
- 'fake-project')
-
- def _check_usage(self, host_usage):
- self.assertEqual(2, host_usage.cells[0].cpu_usage)
- self.assertEqual(256, host_usage.cells[0].memory_usage)
- self.assertEqual(1, host_usage.cells[1].cpu_usage)
- self.assertEqual(256, host_usage.cells[1].memory_usage)
-
- def test_dicts_json(self):
- host = {'numa_topology': self.hosttopo.to_json()}
- instance = {'numa_topology': self.instancetopo.to_json()}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_dicts_instance_json(self):
- host = {'numa_topology': self.hosttopo}
- instance = {'numa_topology': self.instancetopo.to_json()}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, hw.VirtNUMAHostTopology)
- self._check_usage(res)
-
- def test_dicts_host_json(self):
- host = {'numa_topology': self.hosttopo.to_json()}
- instance = {'numa_topology': self.instancetopo}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_object_host_instance_json(self):
- host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
- instance = {'numa_topology': self.instancetopo.to_json()}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_object_host_instance(self):
- host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
- instance = {'numa_topology': self.instancetopo}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_instance_with_fetch(self):
- host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
- fake_uuid = str(uuid.uuid4())
- instance = {'uuid': fake_uuid}
-
- with mock.patch.object(objects.InstanceNUMATopology,
- 'get_by_instance_uuid', return_value=None) as get_mock:
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self.assertTrue(get_mock.called)
-
- def test_object_instance_with_load(self):
- host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
- fake_uuid = str(uuid.uuid4())
- instance = objects.Instance(context=self.context, uuid=fake_uuid)
-
- with mock.patch.object(objects.InstanceNUMATopology,
- 'get_by_instance_uuid', return_value=None) as get_mock:
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self.assertTrue(get_mock.called)
-
- def test_instance_serialized_by_build_request_spec(self):
- host = objects.ComputeNode(numa_topology=self.hosttopo.to_json())
- fake_uuid = str(uuid.uuid4())
- instance = objects.Instance(context=self.context, id=1, uuid=fake_uuid,
- numa_topology=objects.InstanceNUMATopology.obj_from_topology(
- self.instancetopo))
- # NOTE (ndipanov): This emulates scheduler.utils.build_request_spec
- # We can remove this test once we no longer use that method.
- instance_raw = jsonutils.to_primitive(
- base_obj.obj_to_primitive(instance))
- res = hw.get_host_numa_usage_from_instance(host, instance_raw)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_attr_host(self):
- class Host(object):
- def __init__(obj):
- obj.numa_topology = self.hosttopo.to_json()
-
- host = Host()
- instance = {'numa_topology': self.instancetopo.to_json()}
-
- res = hw.get_host_numa_usage_from_instance(host, instance)
- self.assertIsInstance(res, six.string_types)
- self._check_usage(hw.VirtNUMAHostTopology.from_json(res))
-
- def test_never_serialize_result(self):
- host = {'numa_topology': self.hosttopo.to_json()}
- instance = {'numa_topology': self.instancetopo}
-
- res = hw.get_host_numa_usage_from_instance(host, instance,
- never_serialize_result=True)
- self.assertIsInstance(res, hw.VirtNUMAHostTopology)
- self._check_usage(res)
-
-
-class VirtMemoryPagesTestCase(test.NoDBTestCase):
- def test_virt_pages_topology(self):
- pages = hw.VirtPagesTopology(4, 1024, 512)
- self.assertEqual(4, pages.size_kb)
- self.assertEqual(1024, pages.total)
- self.assertEqual(512, pages.used)
-
- def test_virt_pages_topology_to_dict(self):
- pages = hw.VirtPagesTopology(4, 1024, 512)
- self.assertEqual({'size_kb': 4,
- 'total': 1024,
- 'used': 512}, pages.to_dict())
-
- def test_virt_pages_topology_from_dict(self):
- pages = hw.VirtPagesTopology.from_dict({'size_kb': 4,
- 'total': 1024,
- 'used': 512})
- self.assertEqual(4, pages.size_kb)
- self.assertEqual(1024, pages.total)
- self.assertEqual(512, pages.used)
-
- def test_cell_instance_pagesize(self):
- pagesize = hw.VirtPageSize(2048)
- cell = hw.VirtNUMATopologyCellInstance(
- 0, set([0]), 1024, pagesize)
-
- self.assertEqual(0, cell.id)
- self.assertEqual(set([0]), cell.cpuset)
- self.assertEqual(1024, cell.memory)
- self.assertEqual(2048, cell.pagesize.size_kb)
diff --git a/nova/tests/virt/test_imagecache.py b/nova/tests/virt/test_imagecache.py
deleted file mode 100644
index 693b0625d6..0000000000
--- a/nova/tests/virt/test_imagecache.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from nova.compute import vm_states
-from nova import test
-from nova.tests import fake_instance
-from nova.virt import imagecache
-
-CONF = cfg.CONF
-
-
-class ImageCacheManagerTests(test.NoDBTestCase):
-
- def test_configurationi_defaults(self):
- self.assertEqual(2400, CONF.image_cache_manager_interval)
- self.assertEqual('_base', CONF.image_cache_subdirectory_name)
- self.assertTrue(CONF.remove_unused_base_images)
- self.assertEqual(24 * 3600,
- CONF.remove_unused_original_minimum_age_seconds)
-
- def test_cache_manager(self):
- cache_manager = imagecache.ImageCacheManager()
- self.assertTrue(cache_manager.remove_unused_base_images)
- self.assertRaises(NotImplementedError,
- cache_manager.update, None, [])
- self.assertRaises(NotImplementedError,
- cache_manager._get_base)
- base_images = cache_manager._list_base_images(None)
- self.assertEqual([], base_images['unexplained_images'])
- self.assertEqual([], base_images['originals'])
- self.assertRaises(NotImplementedError,
- cache_manager._age_and_verify_cached_images,
- None, [], None)
-
- def test_list_running_instances(self):
- instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'id': '1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '2',
- 'host': CONF.host,
- 'id': '2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '2',
- 'kernel_id': '21',
- 'ramdisk_id': '22',
- 'host': 'remotehost',
- 'id': '3',
- 'uuid': '789',
- 'vm_state': '',
- 'task_state': ''}]
-
- all_instances = [fake_instance.fake_instance_obj(None, **instance)
- for instance in instances]
-
- image_cache_manager = imagecache.ImageCacheManager()
-
- # The argument here should be a context, but it's mocked out
- running = image_cache_manager._list_running_instances(None,
- all_instances)
-
- self.assertEqual(4, len(running['used_images']))
- self.assertEqual((1, 0, ['instance-00000001']),
- running['used_images']['1'])
- self.assertEqual((1, 1, ['instance-00000002',
- 'instance-00000003']),
- running['used_images']['2'])
- self.assertEqual((0, 1, ['instance-00000003']),
- running['used_images']['21'])
- self.assertEqual((0, 1, ['instance-00000003']),
- running['used_images']['22'])
-
- self.assertIn('instance-00000001', running['instance_names'])
- self.assertIn('123', running['instance_names'])
-
- self.assertEqual(4, len(running['image_popularity']))
- self.assertEqual(1, running['image_popularity']['1'])
- self.assertEqual(2, running['image_popularity']['2'])
- self.assertEqual(1, running['image_popularity']['21'])
- self.assertEqual(1, running['image_popularity']['22'])
-
- def test_list_resizing_instances(self):
- instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'id': '1',
- 'uuid': '123',
- 'vm_state': vm_states.RESIZED,
- 'task_state': None}]
-
- all_instances = [fake_instance.fake_instance_obj(None, **instance)
- for instance in instances]
-
- image_cache_manager = imagecache.ImageCacheManager()
- running = image_cache_manager._list_running_instances(None,
- all_instances)
-
- self.assertEqual(1, len(running['used_images']))
- self.assertEqual((1, 0, ['instance-00000001']),
- running['used_images']['1'])
- self.assertEqual(set(['instance-00000001', '123',
- 'instance-00000001_resize', '123_resize']),
- running['instance_names'])
-
- self.assertEqual(1, len(running['image_popularity']))
- self.assertEqual(1, running['image_popularity']['1'])
diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py
deleted file mode 100644
index e4136d6eed..0000000000
--- a/nova/tests/virt/test_virt_drivers.py
+++ /dev/null
@@ -1,879 +0,0 @@
-# Copyright 2010 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import sys
-import traceback
-
-import fixtures
-import mock
-import netaddr
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-from oslo.utils import timeutils
-import six
-
-from nova.compute import manager
-from nova.console import type as ctype
-from nova import exception
-from nova import objects
-from nova.openstack.common import log as logging
-from nova import test
-from nova.tests import fake_block_device
-from nova.tests.image import fake as fake_image
-from nova.tests import utils as test_utils
-from nova.tests.virt.libvirt import fake_libvirt_utils
-from nova.virt import block_device as driver_block_device
-from nova.virt import event as virtevent
-from nova.virt import fake
-from nova.virt import libvirt
-from nova.virt.libvirt import imagebackend
-
-LOG = logging.getLogger(__name__)
-
-
-def catch_notimplementederror(f):
- """Decorator to simplify catching drivers raising NotImplementedError
-
- If a particular call makes a driver raise NotImplementedError, we
- log it so that we can extract this information afterwards as needed.
- """
- def wrapped_func(self, *args, **kwargs):
- try:
- return f(self, *args, **kwargs)
- except NotImplementedError:
- frame = traceback.extract_tb(sys.exc_info()[2])[-1]
- LOG.error("%(driver)s does not implement %(method)s "
- "required for test %(test)s" %
- {'driver': type(self.connection),
- 'method': frame[2], 'test': f.__name__})
-
- wrapped_func.__name__ = f.__name__
- wrapped_func.__doc__ = f.__doc__
- return wrapped_func
-
-
-class _FakeDriverBackendTestCase(object):
- def _setup_fakelibvirt(self):
- # So that the _supports_direct_io does the test based
- # on the current working directory, instead of the
- # default instances_path which doesn't exist
- self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
-
- # Put fakelibvirt in place
- if 'libvirt' in sys.modules:
- self.saved_libvirt = sys.modules['libvirt']
- else:
- self.saved_libvirt = None
-
- import nova.tests.virt.libvirt.fake_imagebackend as fake_imagebackend
- import nova.tests.virt.libvirt.fake_libvirt_utils as fake_libvirt_utils
- import nova.tests.virt.libvirt.fakelibvirt as fakelibvirt
-
- sys.modules['libvirt'] = fakelibvirt
- import nova.virt.libvirt.driver
- import nova.virt.libvirt.firewall
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.driver.imagebackend',
- fake_imagebackend))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.driver.libvirt',
- fakelibvirt))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.driver.libvirt_utils',
- fake_libvirt_utils))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.imagebackend.libvirt_utils',
- fake_libvirt_utils))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.libvirt.firewall.libvirt',
- fakelibvirt))
-
- self.flags(rescue_image_id="2",
- rescue_kernel_id="3",
- rescue_ramdisk_id=None,
- snapshots_directory='./',
- group='libvirt')
-
- def fake_extend(image, size):
- pass
-
- def fake_migrateToURI(*a):
- pass
-
- def fake_make_drive(_self, _path):
- pass
-
- def fake_get_instance_disk_info(_self, instance, xml=None,
- block_device_info=None):
- return '[]'
-
- def fake_delete_instance_files(_self, _instance):
- pass
-
- self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
- '_get_instance_disk_info',
- fake_get_instance_disk_info)
-
- self.stubs.Set(nova.virt.libvirt.driver.disk,
- 'extend', fake_extend)
-
- self.stubs.Set(nova.virt.libvirt.driver.LibvirtDriver,
- '_delete_instance_files',
- fake_delete_instance_files)
-
- # Like the existing fakelibvirt.migrateToURI, do nothing,
- # but don't fail for these tests.
- self.stubs.Set(nova.virt.libvirt.driver.libvirt.Domain,
- 'migrateToURI', fake_migrateToURI)
-
- # We can't actually make a config drive v2 because ensure_tree has
- # been faked out
- self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
- 'make_drive', fake_make_drive)
-
- def _teardown_fakelibvirt(self):
- # Restore libvirt
- if self.saved_libvirt:
- sys.modules['libvirt'] = self.saved_libvirt
-
- def setUp(self):
- super(_FakeDriverBackendTestCase, self).setUp()
- # TODO(sdague): it would be nice to do this in a way that only
- # the relevant backends where replaced for tests, though this
- # should not harm anything by doing it for all backends
- fake_image.stub_out_image_service(self.stubs)
- self._setup_fakelibvirt()
-
- def tearDown(self):
- fake_image.FakeImageService_reset()
- self._teardown_fakelibvirt()
- super(_FakeDriverBackendTestCase, self).tearDown()
-
-
-class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
- """Test that ComputeManager can successfully load both
- old style and new style drivers and end up with the correct
- final class.
- """
-
- # if your driver supports being tested in a fake way, it can go here
- #
- # both long form and short form drivers are supported
- new_drivers = {
- 'nova.virt.fake.FakeDriver': 'FakeDriver',
- 'nova.virt.libvirt.LibvirtDriver': 'LibvirtDriver',
- 'fake.FakeDriver': 'FakeDriver',
- 'libvirt.LibvirtDriver': 'LibvirtDriver'
- }
-
- def test_load_new_drivers(self):
- for cls, driver in self.new_drivers.iteritems():
- self.flags(compute_driver=cls)
- # NOTE(sdague) the try block is to make it easier to debug a
- # failure by knowing which driver broke
- try:
- cm = manager.ComputeManager()
- except Exception as e:
- self.fail("Couldn't load driver %s - %s" % (cls, e))
-
- self.assertEqual(cm.driver.__class__.__name__, driver,
- "Could't load driver %s" % cls)
-
- def test_fail_to_load_new_drivers(self):
- self.flags(compute_driver='nova.virt.amiga')
-
- def _fake_exit(error):
- raise test.TestingException()
-
- self.stubs.Set(sys, 'exit', _fake_exit)
- self.assertRaises(test.TestingException, manager.ComputeManager)
-
-
-class _VirtDriverTestCase(_FakeDriverBackendTestCase):
- def setUp(self):
- super(_VirtDriverTestCase, self).setUp()
-
- self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
- self.connection = importutils.import_object(self.driver_module,
- fake.FakeVirtAPI())
- self.ctxt = test_utils.get_test_admin_context()
- self.image_service = fake_image.FakeImageService()
- # NOTE(dripton): resolve_driver_format does some file reading and
- # writing and chowning that complicate testing too much by requiring
- # using real directories with proper permissions. Just stub it out
- # here; we test it in test_imagebackend.py
- self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
- imagebackend.Image._get_driver_format)
-
- def _get_running_instance(self, obj=True):
- instance_ref = test_utils.get_test_instance(obj=obj)
- network_info = test_utils.get_test_network_info()
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
- '1.1.1.1'
- image_info = test_utils.get_test_image_info(None, instance_ref)
- self.connection.spawn(self.ctxt, instance_ref, image_info,
- [], 'herp', network_info=network_info)
- return instance_ref, network_info
-
- @catch_notimplementederror
- def test_init_host(self):
- self.connection.init_host('myhostname')
-
- @catch_notimplementederror
- def test_list_instances(self):
- self.connection.list_instances()
-
- @catch_notimplementederror
- def test_list_instance_uuids(self):
- self.connection.list_instance_uuids()
-
- @catch_notimplementederror
- def test_spawn(self):
- instance_ref, network_info = self._get_running_instance()
- domains = self.connection.list_instances()
- self.assertIn(instance_ref['name'], domains)
-
- num_instances = self.connection.get_num_instances()
- self.assertEqual(1, num_instances)
-
- @catch_notimplementederror
- def test_snapshot_not_running(self):
- instance_ref = test_utils.get_test_instance()
- img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
- self.assertRaises(exception.InstanceNotRunning,
- self.connection.snapshot,
- self.ctxt, instance_ref, img_ref['id'],
- lambda *args, **kwargs: None)
-
- @catch_notimplementederror
- def test_snapshot_running(self):
- img_ref = self.image_service.create(self.ctxt, {'name': 'snap-1'})
- instance_ref, network_info = self._get_running_instance()
- self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
- lambda *args, **kwargs: None)
-
- @catch_notimplementederror
- def test_post_interrupted_snapshot_cleanup(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
- instance_ref)
-
- @catch_notimplementederror
- def test_reboot(self):
- reboot_type = "SOFT"
- instance_ref, network_info = self._get_running_instance()
- self.connection.reboot(self.ctxt, instance_ref, network_info,
- reboot_type)
-
- @catch_notimplementederror
- def test_get_host_ip_addr(self):
- host_ip = self.connection.get_host_ip_addr()
-
- # Will raise an exception if it's not a valid IP at all
- ip = netaddr.IPAddress(host_ip)
-
- # For now, assume IPv4.
- self.assertEqual(ip.version, 4)
-
- @catch_notimplementederror
- def test_set_admin_password(self):
- instance, network_info = self._get_running_instance(obj=True)
- self.connection.set_admin_password(instance, 'p4ssw0rd')
-
- @catch_notimplementederror
- def test_inject_file(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.inject_file(instance_ref,
- base64.b64encode('/testfile'),
- base64.b64encode('testcontents'))
-
- @catch_notimplementederror
- def test_resume_state_on_host_boot(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
- network_info)
-
- @catch_notimplementederror
- def test_rescue(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
-
- @catch_notimplementederror
- def test_unrescue_unrescued_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.unrescue(instance_ref, network_info)
-
- @catch_notimplementederror
- def test_unrescue_rescued_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.rescue(self.ctxt, instance_ref, network_info, None, '')
- self.connection.unrescue(instance_ref, network_info)
-
- @catch_notimplementederror
- def test_poll_rebooting_instances(self):
- instances = [self._get_running_instance()]
- self.connection.poll_rebooting_instances(10, instances)
-
- @catch_notimplementederror
- def test_migrate_disk_and_power_off(self):
- instance_ref, network_info = self._get_running_instance()
- flavor_ref = test_utils.get_test_flavor()
- self.connection.migrate_disk_and_power_off(
- self.ctxt, instance_ref, 'dest_host', flavor_ref,
- network_info)
-
- @catch_notimplementederror
- def test_power_off(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.power_off(instance_ref)
-
- @catch_notimplementederror
- def test_power_on_running(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.power_on(self.ctxt, instance_ref,
- network_info, None)
-
- @catch_notimplementederror
- def test_power_on_powered_off(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.power_off(instance_ref)
- self.connection.power_on(self.ctxt, instance_ref, network_info, None)
-
- @catch_notimplementederror
- def test_soft_delete(self):
- instance_ref, network_info = self._get_running_instance(obj=True)
- self.connection.soft_delete(instance_ref)
-
- @catch_notimplementederror
- def test_restore_running(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.restore(instance_ref)
-
- @catch_notimplementederror
- def test_restore_soft_deleted(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.soft_delete(instance_ref)
- self.connection.restore(instance_ref)
-
- @catch_notimplementederror
- def test_pause(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.pause(instance_ref)
-
- @catch_notimplementederror
- def test_unpause_unpaused_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.unpause(instance_ref)
-
- @catch_notimplementederror
- def test_unpause_paused_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.pause(instance_ref)
- self.connection.unpause(instance_ref)
-
- @catch_notimplementederror
- def test_suspend(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.suspend(instance_ref)
-
- @catch_notimplementederror
- def test_resume_unsuspended_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.resume(self.ctxt, instance_ref, network_info)
-
- @catch_notimplementederror
- def test_resume_suspended_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.suspend(instance_ref)
- self.connection.resume(self.ctxt, instance_ref, network_info)
-
- @catch_notimplementederror
- def test_destroy_instance_nonexistent(self):
- fake_instance = {'id': 42, 'name': 'I just made this up!',
- 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00'}
- network_info = test_utils.get_test_network_info()
- self.connection.destroy(self.ctxt, fake_instance, network_info)
-
- @catch_notimplementederror
- def test_destroy_instance(self):
- instance_ref, network_info = self._get_running_instance()
- self.assertIn(instance_ref['name'],
- self.connection.list_instances())
- self.connection.destroy(self.ctxt, instance_ref, network_info)
- self.assertNotIn(instance_ref['name'],
- self.connection.list_instances())
-
- @catch_notimplementederror
- def test_get_volume_connector(self):
- result = self.connection.get_volume_connector({'id': 'fake'})
- self.assertIn('ip', result)
- self.assertIn('initiator', result)
- self.assertIn('host', result)
-
- @catch_notimplementederror
- def test_attach_detach_volume(self):
- instance_ref, network_info = self._get_running_instance()
- connection_info = {
- "driver_volume_type": "fake",
- "serial": "fake_serial",
- "data": {}
- }
- self.assertIsNone(
- self.connection.attach_volume(None, connection_info, instance_ref,
- '/dev/sda'))
- self.assertIsNone(
- self.connection.detach_volume(connection_info, instance_ref,
- '/dev/sda'))
-
- @catch_notimplementederror
- def test_swap_volume(self):
- instance_ref, network_info = self._get_running_instance()
- self.assertIsNone(
- self.connection.attach_volume(None, {'driver_volume_type': 'fake',
- 'data': {}},
- instance_ref,
- '/dev/sda'))
- self.assertIsNone(
- self.connection.swap_volume({'driver_volume_type': 'fake',
- 'data': {}},
- {'driver_volume_type': 'fake',
- 'data': {}},
- instance_ref,
- '/dev/sda', 2))
-
- @catch_notimplementederror
- def test_attach_detach_different_power_states(self):
- instance_ref, network_info = self._get_running_instance()
- connection_info = {
- "driver_volume_type": "fake",
- "serial": "fake_serial",
- "data": {}
- }
- self.connection.power_off(instance_ref)
- self.connection.attach_volume(None, connection_info, instance_ref,
- '/dev/sda')
-
- bdm = {
- 'root_device_name': None,
- 'swap': None,
- 'ephemerals': [],
- 'block_device_mapping': driver_block_device.convert_volumes([
- fake_block_device.FakeDbBlockDeviceDict(
- {'id': 1, 'instance_uuid': instance_ref['uuid'],
- 'device_name': '/dev/sda',
- 'source_type': 'volume',
- 'destination_type': 'volume',
- 'delete_on_termination': False,
- 'snapshot_id': None,
- 'volume_id': 'abcdedf',
- 'volume_size': None,
- 'no_device': None
- }),
- ])
- }
- bdm['block_device_mapping'][0]['connection_info'] = (
- {'driver_volume_type': 'fake', 'data': {}})
- with mock.patch.object(
- driver_block_device.DriverVolumeBlockDevice, 'save'):
- self.connection.power_on(
- self.ctxt, instance_ref, network_info, bdm)
- self.connection.detach_volume(connection_info,
- instance_ref,
- '/dev/sda')
-
- @catch_notimplementederror
- def test_get_info(self):
- instance_ref, network_info = self._get_running_instance()
- info = self.connection.get_info(instance_ref)
- self.assertIn('state', info)
- self.assertIn('max_mem', info)
- self.assertIn('mem', info)
- self.assertIn('num_cpu', info)
- self.assertIn('cpu_time', info)
-
- @catch_notimplementederror
- def test_get_info_for_unknown_instance(self):
- self.assertRaises(exception.NotFound,
- self.connection.get_info,
- {'name': 'I just made this name up'})
-
- @catch_notimplementederror
- def test_get_diagnostics(self):
- instance_ref, network_info = self._get_running_instance(obj=True)
- self.connection.get_diagnostics(instance_ref)
-
- @catch_notimplementederror
- def test_get_instance_diagnostics(self):
- instance_ref, network_info = self._get_running_instance(obj=True)
- instance_ref['launched_at'] = timeutils.utcnow()
- self.connection.get_instance_diagnostics(instance_ref)
-
- @catch_notimplementederror
- def test_block_stats(self):
- instance_ref, network_info = self._get_running_instance()
- stats = self.connection.block_stats(instance_ref['name'], 'someid')
- self.assertEqual(len(stats), 5)
-
- @catch_notimplementederror
- def test_interface_stats(self):
- instance_ref, network_info = self._get_running_instance()
- stats = self.connection.interface_stats(instance_ref['name'], 'someid')
- self.assertEqual(len(stats), 8)
-
- @catch_notimplementederror
- def test_get_console_output(self):
- fake_libvirt_utils.files['dummy.log'] = ''
- instance_ref, network_info = self._get_running_instance()
- console_output = self.connection.get_console_output(self.ctxt,
- instance_ref)
- self.assertIsInstance(console_output, six.string_types)
-
- @catch_notimplementederror
- def test_get_vnc_console(self):
- instance, network_info = self._get_running_instance(obj=True)
- vnc_console = self.connection.get_vnc_console(self.ctxt, instance)
- self.assertIsInstance(vnc_console, ctype.ConsoleVNC)
-
- @catch_notimplementederror
- def test_get_spice_console(self):
- instance_ref, network_info = self._get_running_instance()
- spice_console = self.connection.get_spice_console(self.ctxt,
- instance_ref)
- self.assertIsInstance(spice_console, ctype.ConsoleSpice)
-
- @catch_notimplementederror
- def test_get_rdp_console(self):
- instance_ref, network_info = self._get_running_instance()
- rdp_console = self.connection.get_rdp_console(self.ctxt, instance_ref)
- self.assertIsInstance(rdp_console, ctype.ConsoleRDP)
-
- @catch_notimplementederror
- def test_get_serial_console(self):
- instance_ref, network_info = self._get_running_instance()
- serial_console = self.connection.get_serial_console(self.ctxt,
- instance_ref)
- self.assertIsInstance(serial_console, ctype.ConsoleSerial)
-
- @catch_notimplementederror
- def test_get_console_pool_info(self):
- instance_ref, network_info = self._get_running_instance()
- console_pool = self.connection.get_console_pool_info(instance_ref)
- self.assertIn('address', console_pool)
- self.assertIn('username', console_pool)
- self.assertIn('password', console_pool)
-
- @catch_notimplementederror
- def test_refresh_security_group_rules(self):
- # FIXME: Create security group and add the instance to it
- instance_ref, network_info = self._get_running_instance()
- self.connection.refresh_security_group_rules(1)
-
- @catch_notimplementederror
- def test_refresh_security_group_members(self):
- # FIXME: Create security group and add the instance to it
- instance_ref, network_info = self._get_running_instance()
- self.connection.refresh_security_group_members(1)
-
- @catch_notimplementederror
- def test_refresh_instance_security_rules(self):
- # FIXME: Create security group and add the instance to it
- instance_ref, network_info = self._get_running_instance()
- self.connection.refresh_instance_security_rules(instance_ref)
-
- @catch_notimplementederror
- def test_refresh_provider_fw_rules(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.refresh_provider_fw_rules()
-
- @catch_notimplementederror
- def test_ensure_filtering_for_instance(self):
- instance = test_utils.get_test_instance(obj=True)
- network_info = test_utils.get_test_network_info()
- self.connection.ensure_filtering_rules_for_instance(instance,
- network_info)
-
- @catch_notimplementederror
- def test_unfilter_instance(self):
- instance_ref = test_utils.get_test_instance()
- network_info = test_utils.get_test_network_info()
- self.connection.unfilter_instance(instance_ref, network_info)
-
- @catch_notimplementederror
- def test_live_migration(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
- lambda *a: None, lambda *a: None)
-
- @catch_notimplementederror
- def _check_available_resource_fields(self, host_status):
- keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
- 'memory_mb_used', 'hypervisor_type', 'hypervisor_version',
- 'hypervisor_hostname', 'cpu_info', 'disk_available_least',
- 'supported_instances']
- for key in keys:
- self.assertIn(key, host_status)
- self.assertIsInstance(host_status['hypervisor_version'], int)
-
- @catch_notimplementederror
- def test_get_available_resource(self):
- available_resource = self.connection.get_available_resource(
- 'myhostname')
- self._check_available_resource_fields(available_resource)
-
- @catch_notimplementederror
- def test_get_available_nodes(self):
- self.connection.get_available_nodes(False)
-
- @catch_notimplementederror
- def _check_host_cpu_status_fields(self, host_cpu_status):
- self.assertIn('kernel', host_cpu_status)
- self.assertIn('idle', host_cpu_status)
- self.assertIn('user', host_cpu_status)
- self.assertIn('iowait', host_cpu_status)
- self.assertIn('frequency', host_cpu_status)
-
- @catch_notimplementederror
- def test_get_host_cpu_stats(self):
- host_cpu_status = self.connection.get_host_cpu_stats()
- self._check_host_cpu_status_fields(host_cpu_status)
-
- @catch_notimplementederror
- def test_set_host_enabled(self):
- self.connection.set_host_enabled('a useless argument?', True)
-
- @catch_notimplementederror
- def test_get_host_uptime(self):
- self.connection.get_host_uptime('a useless argument?')
-
- @catch_notimplementederror
- def test_host_power_action_reboot(self):
- self.connection.host_power_action('a useless argument?', 'reboot')
-
- @catch_notimplementederror
- def test_host_power_action_shutdown(self):
- self.connection.host_power_action('a useless argument?', 'shutdown')
-
- @catch_notimplementederror
- def test_host_power_action_startup(self):
- self.connection.host_power_action('a useless argument?', 'startup')
-
- @catch_notimplementederror
- def test_add_to_aggregate(self):
- self.connection.add_to_aggregate(self.ctxt, 'aggregate', 'host')
-
- @catch_notimplementederror
- def test_remove_from_aggregate(self):
- self.connection.remove_from_aggregate(self.ctxt, 'aggregate', 'host')
-
- def test_events(self):
- got_events = []
-
- def handler(event):
- got_events.append(event)
-
- self.connection.register_event_listener(handler)
-
- event1 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STARTED)
- event2 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_PAUSED)
-
- self.connection.emit_event(event1)
- self.connection.emit_event(event2)
- want_events = [event1, event2]
- self.assertEqual(want_events, got_events)
-
- event3 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_RESUMED)
- event4 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STOPPED)
-
- self.connection.emit_event(event3)
- self.connection.emit_event(event4)
-
- want_events = [event1, event2, event3, event4]
- self.assertEqual(want_events, got_events)
-
- def test_event_bad_object(self):
- # Passing in something which does not inherit
- # from virtevent.Event
-
- def handler(event):
- pass
-
- self.connection.register_event_listener(handler)
-
- badevent = {
- "foo": "bar"
- }
-
- self.assertRaises(ValueError,
- self.connection.emit_event,
- badevent)
-
- def test_event_bad_callback(self):
- # Check that if a callback raises an exception,
- # it does not propagate back out of the
- # 'emit_event' call
-
- def handler(event):
- raise Exception("Hit Me!")
-
- self.connection.register_event_listener(handler)
-
- event1 = virtevent.LifecycleEvent(
- "cef19ce0-0ca2-11df-855d-b19fbce37686",
- virtevent.EVENT_LIFECYCLE_STARTED)
-
- self.connection.emit_event(event1)
-
- def test_set_bootable(self):
- self.assertRaises(NotImplementedError, self.connection.set_bootable,
- 'instance', True)
-
- @catch_notimplementederror
- def test_get_instance_disk_info(self):
- # This should be implemented by any driver that supports live migrate.
- instance_ref, network_info = self._get_running_instance()
- self.connection.get_instance_disk_info(instance_ref['name'],
- block_device_info={})
-
-
-class AbstractDriverTestCase(_VirtDriverTestCase, test.TestCase):
- def setUp(self):
- self.driver_module = "nova.virt.driver.ComputeDriver"
- super(AbstractDriverTestCase, self).setUp()
-
-
-class FakeConnectionTestCase(_VirtDriverTestCase, test.TestCase):
- def setUp(self):
- self.driver_module = 'nova.virt.fake.FakeDriver'
- fake.set_nodes(['myhostname'])
- super(FakeConnectionTestCase, self).setUp()
-
- def _check_available_resource_fields(self, host_status):
- super(FakeConnectionTestCase, self)._check_available_resource_fields(
- host_status)
-
- hypervisor_type = host_status['hypervisor_type']
- supported_instances = host_status['supported_instances']
- try:
- # supported_instances could be JSON wrapped
- supported_instances = jsonutils.loads(supported_instances)
- except TypeError:
- pass
- self.assertTrue(any(hypervisor_type in x for x in supported_instances))
-
-
-class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- # Point _VirtDriverTestCase at the right module
- self.driver_module = 'nova.virt.libvirt.LibvirtDriver'
- super(LibvirtConnTestCase, self).setUp()
- self.stubs.Set(self.connection,
- '_set_host_enabled', mock.MagicMock())
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.context.get_admin_context',
- self._fake_admin_context))
-
- def _fake_admin_context(self, *args, **kwargs):
- return self.ctxt
-
- def test_force_hard_reboot(self):
- self.flags(wait_soft_reboot_seconds=0, group='libvirt')
- self.test_reboot()
-
- def test_migrate_disk_and_power_off(self):
- # there is lack of fake stuff to execute this method. so pass.
- self.skipTest("Test nothing, but this method"
- " needed to override superclass.")
-
- def test_internal_set_host_enabled(self):
- self.mox.UnsetStubs()
- service_mock = mock.MagicMock()
-
- # Previous status of the service: disabled: False
- service_mock.configure_mock(disabled_reason='None',
- disabled=False)
- with mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock):
- self.connection._set_host_enabled(False, 'ERROR!')
- self.assertTrue(service_mock.disabled)
- self.assertEqual(service_mock.disabled_reason, 'AUTO: ERROR!')
-
- def test_set_host_enabled_when_auto_disabled(self):
- self.mox.UnsetStubs()
- service_mock = mock.MagicMock()
-
- # Previous status of the service: disabled: True, 'AUTO: ERROR'
- service_mock.configure_mock(disabled_reason='AUTO: ERROR',
- disabled=True)
- with mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock):
- self.connection._set_host_enabled(True)
- self.assertFalse(service_mock.disabled)
- self.assertEqual(service_mock.disabled_reason, 'None')
-
- def test_set_host_enabled_when_manually_disabled(self):
- self.mox.UnsetStubs()
- service_mock = mock.MagicMock()
-
- # Previous status of the service: disabled: True, 'Manually disabled'
- service_mock.configure_mock(disabled_reason='Manually disabled',
- disabled=True)
- with mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock):
- self.connection._set_host_enabled(True)
- self.assertTrue(service_mock.disabled)
- self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
-
- def test_set_host_enabled_dont_override_manually_disabled(self):
- self.mox.UnsetStubs()
- service_mock = mock.MagicMock()
-
- # Previous status of the service: disabled: True, 'Manually disabled'
- service_mock.configure_mock(disabled_reason='Manually disabled',
- disabled=True)
- with mock.patch.object(objects.Service, "get_by_compute_host",
- return_value=service_mock):
- self.connection._set_host_enabled(False, 'ERROR!')
- self.assertTrue(service_mock.disabled)
- self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
-
- @catch_notimplementederror
- @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
- def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
- instance_ref, network_info = self._get_running_instance()
- self.connection.cleanup(self.ctxt, instance_ref, network_info,
- destroy_vifs=False)
- self.assertEqual(unplug_vifs_mock.call_count, 0)
-
- @catch_notimplementederror
- @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
- def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
- instance_ref, network_info = self._get_running_instance()
- self.connection.cleanup(self.ctxt, instance_ref, network_info,
- destroy_vifs=True)
- self.assertEqual(unplug_vifs_mock.call_count, 1)
- unplug_vifs_mock.assert_called_once_with(instance_ref,
- network_info, True)
diff --git a/nova/tests/virt/vmwareapi/stubs.py b/nova/tests/virt/vmwareapi/stubs.py
deleted file mode 100644
index fb207176cc..0000000000
--- a/nova/tests/virt/vmwareapi/stubs.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Stubouts for the test suite
-"""
-
-import contextlib
-
-import mock
-from oslo.vmware import exceptions as vexc
-
-from nova import db
-from nova.tests import test_flavors
-from nova.tests.virt.vmwareapi import fake
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import network_util
-
-
-def fake_get_vim_object(arg):
- """Stubs out the VMwareAPISession's get_vim_object method."""
- return fake.FakeVim()
-
-
-@property
-def fake_vim_prop(arg):
- """Stubs out the VMwareAPISession's vim property access method."""
- return fake.get_fake_vim_object(arg)
-
-
-def fake_is_vim_object(arg, module):
- """Stubs out the VMwareAPISession's is_vim_object method."""
- return isinstance(module, fake.FakeVim)
-
-
-def fake_temp_method_exception():
- raise vexc.VimFaultException(
- [vexc.NOT_AUTHENTICATED],
- "Session Empty/Not Authenticated")
-
-
-def fake_temp_session_exception():
- raise vexc.VimConnectionException("it's a fake!",
- "Session Exception")
-
-
-def fake_session_file_exception():
- fault_list = [vexc.FILE_ALREADY_EXISTS]
- raise vexc.VimFaultException(fault_list,
- Exception('fake'))
-
-
-def fake_session_permission_exception():
- fault_list = [vexc.NO_PERMISSION]
- fault_string = 'Permission to perform this operation was denied.'
- details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
- raise vexc.VimFaultException(fault_list, fault_string, details=details)
-
-
-def _fake_flavor_get(context, id):
- for instance_type in test_flavors.DEFAULT_FLAVORS:
- if instance_type['id'] == id:
- return instance_type
- return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
- 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
- 'ephemeral_gb': 0, 'updated_at': None,
- 'disabled': False, 'vcpus': 1, 'extra_specs': {},
- 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
- 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
-
-
-def set_stubs(stubs):
- """Set the stubs."""
- stubs.Set(network_util, 'get_network_with_the_name',
- fake.fake_get_network)
- stubs.Set(images, 'upload_image', fake.fake_upload_image)
- stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
- stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
- stubs.Set(driver.VMwareAPISession, "_is_vim_object",
- fake_is_vim_object)
- stubs.Set(db, 'flavor_get', _fake_flavor_get)
-
-
-def fake_suds_context(calls=None):
- """Generate a suds client which automatically mocks all SOAP method calls.
-
- Calls are stored in <calls>, indexed by the name of the call. If you need
- to mock the behaviour of specific API calls you can pre-populate <calls>
- with appropriate Mock objects.
- """
-
- calls = calls or {}
-
- class fake_factory:
- def create(self, name):
- return mock.NonCallableMagicMock(name=name)
-
- class fake_service:
- def __getattr__(self, attr_name):
- if attr_name in calls:
- return calls[attr_name]
-
- mock_call = mock.MagicMock(name=attr_name)
- calls[attr_name] = mock_call
- return mock_call
-
- class fake_client:
- def __init__(self, wdsl_url, **kwargs):
- self.service = fake_service()
- self.factory = fake_factory()
-
- return contextlib.nested(
- mock.patch('suds.client.Client', fake_client),
-
- # As we're not connecting to a real host there's no need to wait
- # between retries
- mock.patch.object(driver, 'TIME_BETWEEN_API_CALL_RETRIES', 0)
- )
diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py
deleted file mode 100644
index 3cb06dee18..0000000000
--- a/nova/tests/virt/vmwareapi/test_configdrive.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright 2013 IBM Corp.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-import mox
-
-from nova import context
-from nova.image import glance
-from nova import test
-from nova.tests import fake_instance
-import nova.tests.image.fake
-from nova.tests import utils
-from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
-from nova.tests.virt.vmwareapi import stubs
-from nova.virt import fake
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import vm_util
-from nova.virt.vmwareapi import vmops
-
-
-class ConfigDriveTestCase(test.NoDBTestCase):
-
- REQUIRES_LOCKING = True
-
- @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
- def setUp(self, mock_register):
- super(ConfigDriveTestCase, self).setUp()
- vm_util.vm_refs_cache_reset()
- self.context = context.RequestContext('fake', 'fake', is_admin=False)
- cluster_name = 'test_cluster'
- self.flags(cluster_name=[cluster_name],
- host_ip='test_url',
- host_username='test_username',
- host_password='test_pass',
- use_linked_clone=False, group='vmware')
- self.flags(vnc_enabled=False)
- vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
- self.network_info = utils.get_test_network_info()
- self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
- cluster_name)
- image_ref = nova.tests.image.fake.get_valid_image_id()
- instance_values = {
- 'vm_state': 'building',
- 'project_id': 'fake',
- 'user_id': 'fake',
- 'name': '1',
- 'kernel_id': '1',
- 'ramdisk_id': '1',
- 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
- 'memory_mb': 8192,
- 'flavor': 'm1.large',
- 'instance_type_id': 0,
- 'vcpus': 4,
- 'root_gb': 80,
- 'image_ref': image_ref,
- 'host': 'fake_host',
- 'task_state': 'scheduling',
- 'reservation_id': 'r-3t8muvr0',
- 'id': 1,
- 'uuid': 'fake-uuid',
- 'node': self.node_name,
- 'metadata': [],
- 'expected_attrs': ['system_metadata'],
- }
- self.test_instance = fake_instance.fake_instance_obj(self.context,
- **instance_values)
-
- (image_service, image_id) = glance.get_remote_image_service(context,
- image_ref)
- metadata = image_service.show(context, image_id)
- self.image = {
- 'id': image_ref,
- 'disk_format': 'vmdk',
- 'size': int(metadata['size']),
- }
-
- class FakeInstanceMetadata(object):
- def __init__(self, instance, content=None, extra_md=None):
- pass
-
- def metadata_for_config_drive(self):
- return []
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.api.metadata.base.InstanceMetadata',
- FakeInstanceMetadata))
-
- def fake_make_drive(_self, _path):
- pass
- # We can't actually make a config drive v2 because ensure_tree has
- # been faked out
- self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
- 'make_drive', fake_make_drive)
-
- def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
- pass
- self.stubs.Set(images,
- 'upload_iso_to_datastore',
- fake_upload_iso_to_datastore)
-
- def tearDown(self):
- super(ConfigDriveTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
- nova.tests.image.fake.FakeImageService_reset()
-
- def _spawn_vm(self, injected_files=None, admin_password=None,
- block_device_info=None):
-
- injected_files = injected_files or []
- self.conn.spawn(self.context, self.test_instance, self.image,
- injected_files=injected_files,
- admin_password=admin_password,
- network_info=self.network_info,
- block_device_info=block_device_info)
-
- def test_create_vm_with_config_drive_verify_method_invocation(self):
- self.test_instance.config_drive = 'True'
- self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
- self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
- self.conn._vmops._create_config_drive(self.test_instance,
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg()
- ).AndReturn('[ds1] fake.iso')
- self.conn._vmops._attach_cdrom_to_vm(mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.ReplayAll()
- # if spawn does not call the _create_config_drive or
- # _attach_cdrom_to_vm call with the correct set of parameters
- # then mox's VerifyAll will throw a Expected methods never called
- # Exception
- self._spawn_vm()
-
- def test_create_vm_without_config_drive(self):
- self.test_instance.config_drive = None
- self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
- self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
- self.mox.ReplayAll()
- # if spawn ends up calling _create_config_drive or
- # _attach_cdrom_to_vm then mox will log a Unexpected method call
- # exception
- self._spawn_vm()
-
- def test_create_vm_with_config_drive(self):
- self.test_instance.config_drive = 'True'
- self._spawn_vm()
diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py
deleted file mode 100644
index 384f579ffa..0000000000
--- a/nova/tests/virt/vmwareapi/test_driver_api.py
+++ /dev/null
@@ -1,2650 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-# Copyright (c) 2012 VMware, Inc.
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for VMwareAPI.
-"""
-
-import collections
-import contextlib
-import copy
-import datetime
-
-from eventlet import greenthread
-import mock
-import mox
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import timeutils
-from oslo.utils import units
-from oslo.vmware import exceptions as vexc
-from oslo.vmware import pbm
-from oslo.vmware import vim
-from oslo.vmware import vim_util as oslo_vim_util
-import suds
-
-from nova import block_device
-from nova.compute import api as compute_api
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import vm_states
-from nova import context
-from nova import exception
-from nova.image import glance
-from nova.network import model as network_model
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_instance
-import nova.tests.image.fake
-from nova.tests import matchers
-from nova.tests import test_flavors
-from nova.tests import utils
-from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
-from nova.tests.virt.vmwareapi import stubs
-from nova import utils as nova_utils
-from nova.virt import driver as v_driver
-from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import ds_util
-from nova.virt.vmwareapi import error_util
-from nova.virt.vmwareapi import imagecache
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import vif
-from nova.virt.vmwareapi import vim_util
-from nova.virt.vmwareapi import vm_util
-from nova.virt.vmwareapi import vmops
-from nova.virt.vmwareapi import volumeops
-
-CONF = cfg.CONF
-CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('remove_unused_original_minimum_age_seconds',
- 'nova.virt.imagecache')
-
-
-class fake_vm_ref(object):
- def __init__(self):
- self.value = 4
- self._type = 'VirtualMachine'
-
-
-class fake_service_content(object):
- def __init__(self):
- self.ServiceContent = vmwareapi_fake.DataObject()
- self.ServiceContent.fake = 'fake'
-
-
-class VMwareSudsTest(test.NoDBTestCase):
-
- def setUp(self):
- super(VMwareSudsTest, self).setUp()
-
- def new_client_init(self, url, **kwargs):
- return
-
- mock.patch.object(suds.client.Client,
- '__init__', new=new_client_init).start()
- self.vim = self._vim_create()
- self.addCleanup(mock.patch.stopall)
-
- def _mock_getattr(self, attr_name):
- self.assertEqual("RetrieveServiceContent", attr_name)
- return lambda obj, **kwargs: fake_service_content()
-
- def _vim_create(self):
- with mock.patch.object(vim.Vim, '__getattr__', self._mock_getattr):
- return vim.Vim()
-
- def test_exception_with_deepcopy(self):
- self.assertIsNotNone(self.vim)
- self.assertRaises(vexc.VimException,
- copy.deepcopy, self.vim)
-
-
-def _fake_create_session(inst):
- session = vmwareapi_fake.DataObject()
- session.key = 'fake_key'
- session.userName = 'fake_username'
- session._pbm_wsdl_loc = None
- session._pbm = None
- inst._session = session
-
-
-class VMwareDriverStartupTestCase(test.NoDBTestCase):
- def _start_driver_with_flags(self, expected_exception_type, startup_flags):
- self.flags(**startup_flags)
- with mock.patch(
- 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
- e = self.assertRaises(
- Exception, driver.VMwareVCDriver, None) # noqa
- self.assertIs(type(e), expected_exception_type)
-
- def test_start_driver_no_user(self):
- self._start_driver_with_flags(
- Exception,
- dict(host_ip='ip', host_password='password',
- group='vmware'))
-
- def test_start_driver_no_host(self):
- self._start_driver_with_flags(
- Exception,
- dict(host_username='username', host_password='password',
- group='vmware'))
-
- def test_start_driver_no_password(self):
- self._start_driver_with_flags(
- Exception,
- dict(host_ip='ip', host_username='username',
- group='vmware'))
-
- def test_start_driver_with_user_host_password(self):
- # Getting the InvalidInput exception signifies that no exception
- # is raised regarding missing user/password/host
- self._start_driver_with_flags(
- nova.exception.InvalidInput,
- dict(host_ip='ip', host_password='password',
- host_username="user", datastore_regex="bad(regex",
- group='vmware'))
-
-
-class VMwareSessionTestCase(test.NoDBTestCase):
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=False)
- def test_call_method(self, mock_is_vim):
- with contextlib.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- session._vim = mock.Mock()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira', session._vim)
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=True)
- def test_call_method_vim(self, mock_is_vim):
- with contextlib.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira')
-
-
-class VMwareAPIVMTestCase(test.NoDBTestCase):
- """Unit tests for Vmware API connection calls."""
-
- REQUIRES_LOCKING = True
-
- @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
- def setUp(self, mock_register, create_connection=True):
- super(VMwareAPIVMTestCase, self).setUp()
- vm_util.vm_refs_cache_reset()
- self.context = context.RequestContext('fake', 'fake', is_admin=False)
- cluster_name = 'test_cluster'
- cluster_name2 = 'test_cluster2'
- self.flags(cluster_name=[cluster_name, cluster_name2],
- host_ip='test_url',
- host_username='test_username',
- host_password='test_pass',
- api_retry_count=1,
- use_linked_clone=False, group='vmware')
- self.flags(vnc_enabled=False,
- image_cache_subdirectory_name='vmware_base',
- my_ip='')
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
- stubs.set_stubs(self.stubs)
- vmwareapi_fake.reset()
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- if create_connection:
- self.conn = driver.VMwareVCDriver(None, False)
- self._set_exception_vars()
- self.node_name = self.conn._resources.keys()[0]
- self.node_name2 = self.conn._resources.keys()[1]
- if cluster_name2 in self.node_name2:
- self.ds = 'ds1'
- else:
- self.ds = 'ds2'
-
- self.vim = vmwareapi_fake.FakeVim()
-
- # NOTE(vish): none of the network plugging code is actually
- # being tested
- self.network_info = utils.get_test_network_info()
- image_ref = nova.tests.image.fake.get_valid_image_id()
- (image_service, image_id) = glance.get_remote_image_service(
- self.context, image_ref)
- metadata = image_service.show(self.context, image_id)
- self.image = {
- 'id': image_ref,
- 'disk_format': 'vmdk',
- 'size': int(metadata['size']),
- }
- self.fake_image_uuid = self.image['id']
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.vnc_host = 'ha-host'
- self.instance_without_compute = {'node': None,
- 'vm_state': 'building',
- 'project_id': 'fake',
- 'user_id': 'fake',
- 'name': '1',
- 'display_description': '1',
- 'kernel_id': '1',
- 'ramdisk_id': '1',
- 'mac_addresses': [
- {'address': 'de:ad:be:ef:be:ef'}
- ],
- 'memory_mb': 8192,
- 'instance_type': 'm1.large',
- 'vcpus': 4,
- 'root_gb': 80,
- 'image_ref': self.image['id'],
- 'host': 'fake_host',
- 'task_state':
- 'scheduling',
- 'reservation_id': 'r-3t8muvr0',
- 'id': 1,
- 'uuid': 'fake-uuid',
- 'metadata': []}
-
- def tearDown(self):
- super(VMwareAPIVMTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
- nova.tests.image.fake.FakeImageService_reset()
-
- def test_get_host_ip_addr(self):
- self.assertEqual('test_url', self.conn.get_host_ip_addr())
-
- def test_init_host_with_no_session(self):
- self.conn._session = mock.Mock()
- self.conn._session.vim = None
- self.conn.init_host('fake_host')
- self.conn._session._create_session.assert_called_once_with()
-
- def test_init_host(self):
- try:
- self.conn.init_host("fake_host")
- except Exception as ex:
- self.fail("init_host raised: %s" % ex)
-
- def _set_exception_vars(self):
- self.wait_task = self.conn._session._wait_for_task
- self.call_method = self.conn._session._call_method
- self.task_ref = None
- self.exception = False
-
- def test_cleanup_host(self):
- self.conn.init_host("fake_host")
- try:
- self.conn.cleanup_host("fake_host")
- except Exception as ex:
- self.fail("cleanup_host raised: %s" % ex)
-
- @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
- def test_cleanup_host_direct(self, mock_init):
- mock_init.return_value = None
- vcdriver = driver.VMwareVCDriver(None, False)
- vcdriver._session = mock.Mock()
- vcdriver.cleanup_host("foo")
- vcdriver._session.vim.client.service.Logout.assert_called_once_with(
- vcdriver._session.vim.service_content.sessionManager
- )
-
- @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
- def test_cleanup_host_direct_with_bad_logout(self, mock_init):
- mock_init.return_value = None
- vcdriver = driver.VMwareVCDriver(None, False)
- vcdriver._session = mock.Mock()
- fault = suds.WebFault(mock.Mock(), mock.Mock())
- vcdriver._session.vim.client.service.Logout.side_effect = fault
- vcdriver.cleanup_host("foo")
-
- def test_driver_capabilities(self):
- self.assertTrue(self.conn.capabilities['has_imagecache'])
- self.assertFalse(self.conn.capabilities['supports_recreate'])
-
- def test_configuration_linked_clone(self):
- self.flags(use_linked_clone=None, group='vmware')
- self.assertRaises(vexc.UseLinkedCloneConfigurationFault,
- self.conn._validate_configuration)
-
- @mock.patch.object(pbm, 'get_profile_id_by_name')
- def test_configuration_pbm(self, get_profile_mock):
- get_profile_mock.return_value = 'fake-profile'
- self.flags(pbm_enabled=True,
- pbm_default_policy='fake-policy',
- pbm_wsdl_location='fake-location', group='vmware')
- self.conn._validate_configuration()
-
- @mock.patch.object(pbm, 'get_profile_id_by_name')
- def test_configuration_pbm_bad_default(self, get_profile_mock):
- get_profile_mock.return_value = None
- self.flags(pbm_enabled=True,
- pbm_wsdl_location='fake-location',
- pbm_default_policy='fake-policy', group='vmware')
- self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
- self.conn._validate_configuration)
-
- def test_login_retries(self):
- self.attempts = 0
- self.login_session = vmwareapi_fake.FakeVim()._login()
-
- def _fake_login(_self):
- self.attempts += 1
- if self.attempts == 1:
- raise vexc.VimConnectionException('Here is my fake exception')
- return self.login_session
-
- def _fake_check_session(_self):
- return True
-
- self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
- self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
- _fake_check_session)
-
- with mock.patch.object(greenthread, 'sleep'):
- self.conn = driver.VMwareAPISession()
- self.assertEqual(self.attempts, 2)
-
- def _get_instance_type_by_name(self, type):
- for instance_type in test_flavors.DEFAULT_FLAVORS:
- if instance_type['name'] == type:
- return instance_type
- if type == 'm1.micro':
- return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
- 'name': 'm1.micro', 'deleted': 0, 'created_at': None,
- 'ephemeral_gb': 0, 'updated_at': None,
- 'disabled': False, 'vcpus': 1, 'extra_specs': {},
- 'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
- 'flavorid': '1', 'vcpu_weight': None, 'id': 2}
-
- def _create_instance(self, node=None, set_image_ref=True,
- uuid=None, instance_type='m1.large'):
- if not node:
- node = self.node_name
- if not uuid:
- uuid = uuidutils.generate_uuid()
- self.type_data = self._get_instance_type_by_name(instance_type)
- values = {'name': 'fake_name',
- 'id': 1,
- 'uuid': uuid,
- 'project_id': self.project_id,
- 'user_id': self.user_id,
- 'kernel_id': "fake_kernel_uuid",
- 'ramdisk_id': "fake_ramdisk_uuid",
- 'mac_address': "de:ad:be:ef:be:ef",
- 'flavor': instance_type,
- 'node': node,
- 'memory_mb': self.type_data['memory_mb'],
- 'root_gb': self.type_data['root_gb'],
- 'ephemeral_gb': self.type_data['ephemeral_gb'],
- 'vcpus': self.type_data['vcpus'],
- 'swap': self.type_data['swap'],
- 'expected_attrs': ['system_metadata'],
- }
- if set_image_ref:
- values['image_ref'] = self.fake_image_uuid
- self.instance_node = node
- self.uuid = uuid
- self.instance = fake_instance.fake_instance_obj(
- self.context, **values)
-
- def _create_vm(self, node=None, num_instances=1, uuid=None,
- instance_type='m1.large', powered_on=True):
- """Create and spawn the VM."""
- if not node:
- node = self.node_name
- self._create_instance(node=node, uuid=uuid,
- instance_type=instance_type)
- self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
- self.conn.spawn(self.context, self.instance, self.image,
- injected_files=[], admin_password=None,
- network_info=self.network_info,
- block_device_info=None)
- self._check_vm_record(num_instances=num_instances,
- powered_on=powered_on)
- self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
-
- def _get_vm_record(self):
- # Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- for vm in vms.objects:
- if vm.get('name') == self.uuid:
- return vm
- self.fail('Unable to find VM backing!')
-
- def _check_vm_record(self, num_instances=1, powered_on=True):
- """Check if the spawned VM's properties correspond to the instance in
- the db.
- """
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), num_instances)
-
- # Get Nova record for VM
- vm_info = self.conn.get_info({'uuid': self.uuid,
- 'name': 1,
- 'node': self.instance_node})
-
- vm = self._get_vm_record()
-
- # Check that m1.large above turned into the right thing.
- mem_kib = long(self.type_data['memory_mb']) << 10
- vcpus = self.type_data['vcpus']
- self.assertEqual(vm_info['max_mem'], mem_kib)
- self.assertEqual(vm_info['mem'], mem_kib)
- self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
- self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
- self.assertEqual(vm.get("summary.config.memorySizeMB"),
- self.type_data['memory_mb'])
-
- self.assertEqual(
- vm.get("config.hardware.device").VirtualDevice[2].obj_name,
- "ns0:VirtualE1000")
- if powered_on:
- # Check that the VM is running according to Nova
- self.assertEqual(power_state.RUNNING, vm_info['state'])
-
- # Check that the VM is running according to vSphere API.
- self.assertEqual('poweredOn', vm.get("runtime.powerState"))
- else:
- # Check that the VM is not running according to Nova
- self.assertEqual(power_state.SHUTDOWN, vm_info['state'])
-
- # Check that the VM is not running according to vSphere API.
- self.assertEqual('poweredOff', vm.get("runtime.powerState"))
-
- found_vm_uuid = False
- found_iface_id = False
- extras = vm.get("config.extraConfig")
- for c in extras.OptionValue:
- if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
- found_vm_uuid = True
- if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
- found_iface_id = True
-
- self.assertTrue(found_vm_uuid)
- self.assertTrue(found_iface_id)
-
- def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
- """Check if the get_info returned values correspond to the instance
- object in the db.
- """
- mem_kib = long(self.type_data['memory_mb']) << 10
- self.assertEqual(info["state"], pwr_state)
- self.assertEqual(info["max_mem"], mem_kib)
- self.assertEqual(info["mem"], mem_kib)
- self.assertEqual(info["num_cpu"], self.type_data['vcpus'])
-
- def test_instance_exists(self):
- self._create_vm()
- self.assertTrue(self.conn.instance_exists(self.instance))
- invalid_instance = dict(uuid='foo', name='bar', node=self.node_name)
- self.assertFalse(self.conn.instance_exists(invalid_instance))
-
- def test_list_instances(self):
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 0)
-
- def test_list_instances_1(self):
- self._create_vm()
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 1)
-
- def test_list_instance_uuids(self):
- self._create_vm()
- uuids = self.conn.list_instance_uuids()
- self.assertEqual(len(uuids), 1)
-
- def test_list_instance_uuids_invalid_uuid(self):
- self._create_vm(uuid='fake_id')
- uuids = self.conn.list_instance_uuids()
- self.assertEqual(len(uuids), 0)
-
- def _cached_files_exist(self, exists=True):
- cache = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.vmdk' % self.fake_image_uuid)
- if exists:
- self.assertTrue(vmwareapi_fake.get_file(str(cache)))
- else:
- self.assertFalse(vmwareapi_fake.get_file(str(cache)))
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_instance_dir_disk_created(self, mock_from_image):
- """Test image file is cached when even when use_linked_clone
- is False
- """
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- linked_clone=False)
-
- mock_from_image.return_value = img_props
- self._create_vm()
- path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(path)))
- self._cached_files_exist()
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_cache_dir_disk_created(self, mock_from_image):
- """Test image disk is cached when use_linked_clone is True."""
- self.flags(use_linked_clone=True, group='vmware')
-
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=1 * units.Ki,
- disk_type=constants.DISK_TYPE_SPARSE)
-
- mock_from_image.return_value = img_props
-
- self._create_vm()
- path = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.vmdk' % self.fake_image_uuid)
- root = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.80.vmdk' % self.fake_image_uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(path)))
- self.assertTrue(vmwareapi_fake.get_file(str(root)))
-
- def _iso_disk_type_created(self, instance_type='m1.large'):
- self.image['disk_format'] = 'iso'
- self._create_vm(instance_type=instance_type)
- path = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.iso' % self.fake_image_uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(path)))
-
- def test_iso_disk_type_created(self):
- self._iso_disk_type_created()
- path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(path)))
-
- def test_iso_disk_type_created_with_root_gb_0(self):
- self._iso_disk_type_created(instance_type='m1.micro')
- path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(str(path)))
-
- def test_iso_disk_cdrom_attach(self):
- iso_path = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.iso' % self.fake_image_uuid)
-
- def fake_attach_cdrom(vm_ref, instance, data_store_ref,
- iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, str(iso_path))
-
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
- fake_attach_cdrom)
- self.image['disk_format'] = 'iso'
- self._create_vm()
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_iso_disk_cdrom_attach_with_config_drive(self,
- mock_from_image):
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=80 * units.Gi,
- file_type='iso',
- linked_clone=False)
-
- mock_from_image.return_value = img_props
-
- self.flags(force_config_drive=True)
- iso_path = [
- ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.iso' % self.fake_image_uuid),
- ds_util.DatastorePath(self.ds, 'fake-config-drive')]
- self.iso_index = 0
-
- def fake_create_config_drive(instance, injected_files, password,
- data_store_name, folder, uuid, cookies):
- return 'fake-config-drive'
-
- def fake_attach_cdrom(vm_ref, instance, data_store_ref,
- iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
- self.iso_index += 1
-
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
- fake_attach_cdrom)
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
- fake_create_config_drive)
-
- self.image['disk_format'] = 'iso'
- self._create_vm()
- self.assertEqual(self.iso_index, 2)
-
- def test_cdrom_attach_with_config_drive(self):
- self.flags(force_config_drive=True)
-
- iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive')
- self.cd_attach_called = False
-
- def fake_create_config_drive(instance, injected_files, password,
- data_store_name, folder, uuid, cookies):
- return 'fake-config-drive'
-
- def fake_attach_cdrom(vm_ref, instance, data_store_ref,
- iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, str(iso_path))
- self.cd_attach_called = True
-
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
- fake_attach_cdrom)
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
- fake_create_config_drive)
-
- self._create_vm()
- self.assertTrue(self.cd_attach_called)
-
- def test_spawn(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_spawn_vm_ref_cached(self):
- uuid = uuidutils.generate_uuid()
- self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
- self._create_vm(uuid=uuid)
- self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
-
- def _spawn_power_state(self, power_on):
- self._spawn = self.conn._vmops.spawn
- self._power_on = power_on
-
- def _fake_spawn(context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None,
- instance_name=None, power_on=True):
- return self._spawn(context, instance, image_meta,
- injected_files, admin_password, network_info,
- block_device_info=block_device_info,
- instance_name=instance_name,
- power_on=self._power_on)
-
- with (
- mock.patch.object(self.conn._vmops, 'spawn', _fake_spawn)
- ):
- self._create_vm(powered_on=power_on)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- if power_on:
- self._check_vm_info(info, power_state.RUNNING)
- else:
- self._check_vm_info(info, power_state.SHUTDOWN)
-
- def test_spawn_no_power_on(self):
- self._spawn_power_state(False)
-
- def test_spawn_power_on(self):
- self._spawn_power_state(True)
-
- def test_spawn_root_size_0(self):
- self._create_vm(instance_type='m1.micro')
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- cache = ('[%s] vmware_base/%s/%s.vmdk' %
- (self.ds, self.fake_image_uuid, self.fake_image_uuid))
- gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
- (self.ds, self.fake_image_uuid, self.fake_image_uuid))
- self.assertTrue(vmwareapi_fake.get_file(cache))
- self.assertFalse(vmwareapi_fake.get_file(gb_cache))
-
- def _spawn_with_delete_exception(self, fault=None):
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "DeleteDatastoreFile_Task":
- self.exception = True
- task_mdo = vmwareapi_fake.create_task(method, "error",
- error_fault=fault)
- return task_mdo.obj
- return task_ref
-
- with (
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ):
- if fault:
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- else:
- self.assertRaises(vexc.VMwareDriverException, self._create_vm)
- self.assertTrue(self.exception)
-
- def test_spawn_with_delete_exception_not_found(self):
- self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
-
- def test_spawn_with_delete_exception_file_fault(self):
- self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
-
- def test_spawn_with_delete_exception_cannot_delete_file(self):
- self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
-
- def test_spawn_with_delete_exception_file_locked(self):
- self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
-
- def test_spawn_with_delete_exception_general(self):
- self._spawn_with_delete_exception()
-
- def test_spawn_disk_extend(self):
- self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
- requested_size = 80 * units.Mi
- self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
- requested_size, mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_spawn_disk_extend_exists(self):
- root = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.80.vmdk' % self.fake_image_uuid)
-
- def _fake_extend(instance, requested_size, name, dc_ref):
- vmwareapi_fake._add_file(str(root))
-
- self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
- _fake_extend)
-
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.assertTrue(vmwareapi_fake.get_file(str(root)))
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_spawn_disk_extend_sparse(self, mock_from_image):
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=units.Ki,
- disk_type=constants.DISK_TYPE_SPARSE,
- linked_clone=True)
-
- mock_from_image.return_value = img_props
-
- with contextlib.nested(
- mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
- mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
- ) as (mock_extend, mock_get_dc):
- dc_val = mock.Mock()
- dc_val.ref = "fake_dc_ref"
- dc_val.name = "dc1"
- mock_get_dc.return_value = dc_val
- self._create_vm()
- iid = img_props.image_id
- cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
- iid, '%s.80.vmdk' % iid)
- mock_extend.assert_called_once_with(
- self.instance, self.instance.root_gb * units.Mi,
- str(cached_image), "fake_dc_ref")
-
- def test_spawn_disk_extend_failed_copy(self):
- # Spawn instance
- # copy for extend fails without creating a file
- #
- # Expect the copy error to be raised
- self.flags(use_linked_clone=True, group='vmware')
-
- CopyError = vexc.FileFaultException
-
- def fake_wait_for_task(task_ref):
- if task_ref == 'fake-copy-task':
- raise CopyError('Copy failed!')
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- if method == "CopyVirtualDisk_Task":
- return 'fake-copy-task'
-
- return self.call_method(module, method, *args, **kwargs)
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_call_method',
- new=fake_call_method),
- mock.patch.object(self.conn._session, '_wait_for_task',
- new=fake_wait_for_task)):
- self.assertRaises(CopyError, self._create_vm)
-
- def test_spawn_disk_extend_failed_partial_copy(self):
- # Spawn instance
- # Copy for extend fails, leaving a file behind
- #
- # Expect the file to be cleaned up
- # Expect the copy error to be raised
- self.flags(use_linked_clone=True, group='vmware')
- self.task_ref = None
- uuid = self.fake_image_uuid
- cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
- uuid, uuid)
-
- CopyError = vexc.FileFaultException
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- self.assertTrue(vmwareapi_fake.get_file(cached_image))
- # N.B. We don't test for -flat here because real
- # CopyVirtualDisk_Task doesn't actually create it
- raise CopyError('Copy failed!')
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "CopyVirtualDisk_Task":
- self.task_ref = task_ref
- return task_ref
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_call_method',
- new=fake_call_method),
- mock.patch.object(self.conn._session, '_wait_for_task',
- new=fake_wait_for_task)):
- self.assertRaises(CopyError, self._create_vm)
- self.assertFalse(vmwareapi_fake.get_file(cached_image))
-
- def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
- # Spawn instance
- # Copy for extend fails, leaves file behind
- # File cleanup fails
- #
- # Expect file to be left behind
- # Expect file cleanup error to be raised
- self.flags(use_linked_clone=True, group='vmware')
- self.task_ref = None
- uuid = self.fake_image_uuid
- cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
- uuid, uuid)
-
- CopyError = vexc.FileFaultException
- DeleteError = vexc.CannotDeleteFileException
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- self.assertTrue(vmwareapi_fake.get_file(cached_image))
- # N.B. We don't test for -flat here because real
- # CopyVirtualDisk_Task doesn't actually create it
- raise CopyError('Copy failed!')
- elif task_ref == 'fake-delete-task':
- raise DeleteError('Delete failed!')
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- if method == "DeleteDatastoreFile_Task":
- return 'fake-delete-task'
-
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "CopyVirtualDisk_Task":
- self.task_ref = task_ref
- return task_ref
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_wait_for_task',
- new=fake_wait_for_task),
- mock.patch.object(self.conn._session, '_call_method',
- new=fake_call_method)):
- self.assertRaises(DeleteError, self._create_vm)
- self.assertTrue(vmwareapi_fake.get_file(cached_image))
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_spawn_disk_invalid_disk_size(self, mock_from_image):
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=82 * units.Gi,
- disk_type=constants.DISK_TYPE_SPARSE,
- linked_clone=True)
-
- mock_from_image.return_value = img_props
-
- self.assertRaises(exception.InstanceUnacceptable,
- self._create_vm)
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
- img_props = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=1024,
- disk_type=constants.DISK_TYPE_SPARSE,
- linked_clone=True)
-
- mock_from_image.return_value = img_props
-
- cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.80.vmdk' %
- self.fake_image_uuid)
- tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- '%s.80-flat.vmdk' %
- self.fake_image_uuid)
-
- NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- raise NoDiskSpace()
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == 'ExtendVirtualDisk_Task':
- self.task_ref = task_ref
- return task_ref
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_wait_for_task',
- fake_wait_for_task),
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ) as (mock_wait_for_task, mock_call_method):
- self.assertRaises(NoDiskSpace, self._create_vm)
- self.assertFalse(vmwareapi_fake.get_file(str(cached_image)))
- self.assertFalse(vmwareapi_fake.get_file(str(tmp_file)))
-
- def test_spawn_with_move_file_exists_exception(self):
- # The test will validate that the spawn completes
- # successfully. The "MoveDatastoreFile_Task" will
- # raise an file exists exception. The flag
- # self.exception will be checked to see that
- # the exception has indeed been raised.
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- self.exception = True
- raise vexc.FileAlreadyExistsException()
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "MoveDatastoreFile_Task":
- self.task_ref = task_ref
- return task_ref
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_wait_for_task',
- fake_wait_for_task),
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ) as (_wait_for_task, _call_method):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.assertTrue(self.exception)
-
- def test_spawn_with_move_general_exception(self):
- # The test will validate that the spawn completes
- # successfully. The "MoveDatastoreFile_Task" will
- # raise a general exception. The flag self.exception
- # will be checked to see that the exception has
- # indeed been raised.
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- self.exception = True
- raise vexc.VMwareDriverException('Exception!')
- return self.wait_task(task_ref)
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "MoveDatastoreFile_Task":
- self.task_ref = task_ref
- return task_ref
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_wait_for_task',
- fake_wait_for_task),
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ) as (_wait_for_task, _call_method):
- self.assertRaises(vexc.VMwareDriverException,
- self._create_vm)
- self.assertTrue(self.exception)
-
- def test_spawn_with_move_poll_exception(self):
- self.call_method = self.conn._session._call_method
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "MoveDatastoreFile_Task":
- task_mdo = vmwareapi_fake.create_task(method, "error")
- return task_mdo.obj
- return task_ref
-
- with (
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ):
- self.assertRaises(vexc.VMwareDriverException,
- self._create_vm)
-
- def test_spawn_with_move_file_exists_poll_exception(self):
- # The test will validate that the spawn completes
- # successfully. The "MoveDatastoreFile_Task" will
- # raise a file exists exception. The flag self.exception
- # will be checked to see that the exception has
- # indeed been raised.
-
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "MoveDatastoreFile_Task":
- self.exception = True
- task_mdo = vmwareapi_fake.create_task(method, "error",
- error_fault=vmwareapi_fake.FileAlreadyExists())
- return task_mdo.obj
- return task_ref
-
- with (
- mock.patch.object(self.conn._session, '_call_method',
- fake_call_method)
- ):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.assertTrue(self.exception)
-
- def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
- self._create_instance(set_image_ref=set_image_ref)
- self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
- self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
- connection_info = self._test_vmdk_connection_info('vmdk')
- root_disk = [{'connection_info': connection_info}]
- v_driver.block_device_info_get_mapping(
- mox.IgnoreArg()).AndReturn(root_disk)
- if vc_support:
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_get_res_pool_of_vm')
- volumeops.VMwareVolumeOps._get_res_pool_of_vm(
- mox.IgnoreArg()).AndReturn('fake_res_pool')
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_relocate_vmdk_volume')
- volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
- 'fake_res_pool', mox.IgnoreArg())
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'attach_volume')
- volumeops.VMwareVolumeOps.attach_volume(connection_info,
- self.instance, mox.IgnoreArg())
- self.mox.ReplayAll()
- block_device_info = {'mount_device': 'vda'}
- self.conn.spawn(self.context, self.instance, self.image,
- injected_files=[], admin_password=None,
- network_info=self.network_info,
- block_device_info=block_device_info)
-
- def test_spawn_attach_volume_iscsi(self):
- self._create_instance()
- self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
- self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
- connection_info = self._test_vmdk_connection_info('iscsi')
- root_disk = [{'connection_info': connection_info}]
- v_driver.block_device_info_get_mapping(
- mox.IgnoreArg()).AndReturn(root_disk)
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'attach_volume')
- volumeops.VMwareVolumeOps.attach_volume(connection_info,
- self.instance, mox.IgnoreArg())
- self.mox.ReplayAll()
- block_device_info = {'mount_device': 'vda'}
- self.conn.spawn(self.context, self.instance, self.image,
- injected_files=[], admin_password=None,
- network_info=self.network_info,
- block_device_info=block_device_info)
-
- def mock_upload_image(self, context, image, instance, **kwargs):
- self.assertEqual(image, 'Test-Snapshot')
- self.assertEqual(instance, self.instance)
- self.assertEqual(kwargs['disk_type'], 'preallocated')
-
- def test_get_vm_ref_using_extra_config(self):
- self._create_vm()
- vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
- self.instance['uuid'])
- self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
- # Disrupt the fake Virtual Machine object so that extraConfig
- # cannot be matched.
- fake_vm = self._get_vm_record()
- fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
- # We should not get a Virtual Machine through extraConfig.
- vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
- self.instance['uuid'])
- self.assertIsNone(vm_ref, 'VM Reference should be none')
- # Check if we can find the Virtual Machine using the name.
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
-
- def test_search_vm_ref_by_identifier(self):
- self._create_vm()
- vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
- self.instance['uuid'])
- self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
- fake_vm = self._get_vm_record()
- fake_vm.set("summary.config.instanceUuid", "foo")
- fake_vm.set("name", "foo")
- fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
- self.assertIsNone(vm_util.search_vm_ref_by_identifier(
- self.conn._session, self.instance['uuid']),
- "VM Reference should be none")
- self.assertIsNotNone(
- vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
- "VM Reference should not be none")
-
- def test_get_object_for_optionvalue(self):
- self._create_vm()
- vms = self.conn._session._call_method(vim_util, "get_objects",
- "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
- vm_ref = vm_util._get_object_for_optionvalue(vms,
- self.instance["uuid"])
- self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
-
- def _test_snapshot(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- with mock.patch.object(images, 'upload_image',
- self.mock_upload_image):
- self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
- func_call_matcher.call)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.assertIsNone(func_call_matcher.match())
-
- def test_snapshot(self):
- self._create_vm()
- self._test_snapshot()
-
- def test_snapshot_no_root_disk(self):
- self._iso_disk_type_created(instance_type='m1.micro')
- self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
- self.context, self.instance, "Test-Snapshot",
- lambda *args, **kwargs: None)
-
- def test_snapshot_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
- self.context, self.instance, "Test-Snapshot",
- lambda *args, **kwargs: None)
-
- def test_snapshot_delete_vm_snapshot(self):
- self._create_vm()
- fake_vm = self._get_vm_record()
- snapshot_ref = vmwareapi_fake.ManagedObjectReference(
- value="Snapshot-123",
- name="VirtualMachineSnapshot")
-
- self.mox.StubOutWithMock(vmops.VMwareVMOps,
- '_create_vm_snapshot')
- self.conn._vmops._create_vm_snapshot(
- self.instance, fake_vm.obj).AndReturn(snapshot_ref)
-
- self.mox.StubOutWithMock(vmops.VMwareVMOps,
- '_delete_vm_snapshot')
- self.conn._vmops._delete_vm_snapshot(
- self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
- self.mox.ReplayAll()
-
- self._test_snapshot()
-
- def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
- self._create_vm()
- fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
- snapshot_ref = vmwareapi_fake.ManagedObjectReference(
- value="Snapshot-123",
- name="VirtualMachineSnapshot")
-
- with contextlib.nested(
- mock.patch.object(self.conn._session, '_wait_for_task',
- side_effect=exception),
- mock.patch.object(vmops, '_time_sleep_wrapper')
- ) as (_fake_wait, _fake_sleep):
- if exception != error_util.TaskInProgress:
- self.assertRaises(exception,
- self.conn._vmops._delete_vm_snapshot,
- self.instance, fake_vm, snapshot_ref)
- self.assertEqual(0, _fake_sleep.call_count)
- else:
- self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
- snapshot_ref)
- self.assertEqual(call_count - 1, _fake_sleep.call_count)
- self.assertEqual(call_count, _fake_wait.call_count)
-
- def test_snapshot_delete_vm_snapshot_exception(self):
- self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
-
- def test_snapshot_delete_vm_snapshot_exception_retry(self):
- self.flags(api_retry_count=5, group='vmware')
- self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress,
- 5)
-
- def test_reboot(self):
- self._create_vm()
- info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- reboot_type = "SOFT"
- self.conn.reboot(self.context, self.instance, self.network_info,
- reboot_type)
- info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_reboot_with_uuid(self):
- """Test fall back to use name when can't find by uuid."""
- self._create_vm()
- info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- reboot_type = "SOFT"
- self.conn.reboot(self.context, self.instance, self.network_info,
- reboot_type)
- info = self.conn.get_info({'name': 'fake-name', 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_reboot_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
- self.context, self.instance, self.network_info,
- 'SOFT')
-
- def test_poll_rebooting_instances(self):
- self.mox.StubOutWithMock(compute_api.API, 'reboot')
- compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.ReplayAll()
- self._create_vm()
- instances = [self.instance]
- self.conn.poll_rebooting_instances(60, instances)
-
- def test_reboot_not_poweredon(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SUSPENDED)
- self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
- self.context, self.instance, self.network_info,
- 'SOFT')
-
- def test_suspend(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SUSPENDED)
-
- def test_suspend_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
- self.instance)
-
- def test_resume(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SUSPENDED)
- self.conn.resume(self.context, self.instance, self.network_info)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_resume_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.resume,
- self.context, self.instance, self.network_info)
-
- def test_resume_not_suspended(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
- self.context, self.instance, self.network_info)
-
- def test_power_on(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.power_off(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SHUTDOWN)
- self.conn.power_on(self.context, self.instance, self.network_info)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_power_on_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
- self.context, self.instance, self.network_info)
-
- def test_power_off(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.power_off(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SHUTDOWN)
-
- def test_power_off_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
- self.instance)
-
- def test_resume_state_on_host_boot(self):
- self._create_vm()
- self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
- self.mox.StubOutWithMock(self.conn, "reboot")
- vm_util.get_vm_state_from_name(mox.IgnoreArg(),
- self.instance['uuid']).AndReturn("poweredOff")
- self.conn.reboot(self.context, self.instance, 'network_info',
- 'hard', None)
- self.mox.ReplayAll()
- self.conn.resume_state_on_host_boot(self.context, self.instance,
- 'network_info')
-
- def test_resume_state_on_host_boot_no_reboot_1(self):
- """Don't call reboot on instance which is poweredon."""
- self._create_vm()
- self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
- self.mox.StubOutWithMock(self.conn, 'reboot')
- vm_util.get_vm_state_from_name(mox.IgnoreArg(),
- self.instance['uuid']).AndReturn("poweredOn")
- self.mox.ReplayAll()
- self.conn.resume_state_on_host_boot(self.context, self.instance,
- 'network_info')
-
- def test_resume_state_on_host_boot_no_reboot_2(self):
- """Don't call reboot on instance which is suspended."""
- self._create_vm()
- self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
- self.mox.StubOutWithMock(self.conn, 'reboot')
- vm_util.get_vm_state_from_name(mox.IgnoreArg(),
- self.instance['uuid']).AndReturn("suspended")
- self.mox.ReplayAll()
- self.conn.resume_state_on_host_boot(self.context, self.instance,
- 'network_info')
-
- def destroy_rescued(self, fake_method):
- self._rescue()
- with contextlib.nested(
- mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
- fake_method),
- mock.patch.object(vm_util, "power_on_instance"),
- ) as (fake_detach, fake_power_on):
- self.instance['vm_state'] = vm_states.RESCUED
- self.conn.destroy(self.context, self.instance, self.network_info)
- inst_path = ds_util.DatastorePath(self.ds, self.uuid,
- '%s.vmdk' % self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(str(inst_path)))
- rescue_file_path = ds_util.DatastorePath(
- self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path)))
- # Unrescue does not power on with destroy
- self.assertFalse(fake_power_on.called)
-
- def test_destroy_rescued(self):
- def fake_detach_disk_from_vm(*args, **kwargs):
- pass
- self.destroy_rescued(fake_detach_disk_from_vm)
-
- def test_destroy_rescued_with_exception(self):
- def fake_detach_disk_from_vm(*args, **kwargs):
- raise exception.NovaException('Here is my fake exception')
- self.destroy_rescued(fake_detach_disk_from_vm)
-
- def test_destroy(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 1)
- self.conn.destroy(self.context, self.instance, self.network_info)
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 0)
- self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
-
- def test_destroy_no_datastore(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 1)
- # Delete the vmPathName
- vm = self._get_vm_record()
- vm.delete('config.files.vmPathName')
- self.conn.destroy(self.context, self.instance, self.network_info)
- instances = self.conn.list_instances()
- self.assertEqual(len(instances), 0)
-
- def test_destroy_non_existent(self):
- self.destroy_disks = True
- with mock.patch.object(self.conn._vmops,
- "destroy") as mock_destroy:
- self._create_instance()
- self.conn.destroy(self.context, self.instance,
- self.network_info,
- None, self.destroy_disks)
- mock_destroy.assert_called_once_with(self.instance,
- self.destroy_disks)
-
- def test_destroy_instance_without_compute(self):
- self.destroy_disks = True
- with mock.patch.object(self.conn._vmops,
- "destroy") as mock_destroy:
- self.conn.destroy(self.context, self.instance_without_compute,
- self.network_info,
- None, self.destroy_disks)
- self.assertFalse(mock_destroy.called)
-
- def _destroy_instance_without_vm_ref(self, resize_exists=False,
- task_state=None):
-
- def fake_vm_ref_from_name(session, vm_name):
- if resize_exists:
- return 'fake-ref'
-
- self._create_instance()
- with contextlib.nested(
- mock.patch.object(vm_util, 'get_vm_ref_from_name',
- fake_vm_ref_from_name),
- mock.patch.object(self.conn._session,
- '_call_method'),
- mock.patch.object(self.conn._vmops,
- '_destroy_instance')
- ) as (mock_get, mock_call, mock_destroy):
- self.instance.task_state = task_state
- self.conn.destroy(self.context, self.instance,
- self.network_info,
- None, True)
- if resize_exists:
- if task_state == task_states.RESIZE_REVERTING:
- expected = 1
- else:
- expected = 2
- else:
- expected = 1
- self.assertEqual(expected, mock_destroy.call_count)
- self.assertFalse(mock_call.called)
-
- def test_destroy_instance_without_vm_ref(self):
- self._destroy_instance_without_vm_ref()
-
- def test_destroy_instance_without_vm_ref_with_resize(self):
- self._destroy_instance_without_vm_ref(resize_exists=True)
-
- def test_destroy_instance_without_vm_ref_with_resize_revert(self):
- self._destroy_instance_without_vm_ref(resize_exists=True,
- task_state=task_states.RESIZE_REVERTING)
-
- def _rescue(self, config_drive=False):
- # validate that the power on is only called once
- self._power_on = vm_util.power_on_instance
- self._power_on_called = 0
-
- def fake_attach_disk_to_vm(vm_ref, instance,
- adapter_type, disk_type, vmdk_path=None,
- disk_size=None, linked_clone=False,
- controller_key=None, unit_number=None,
- device_name=None):
- info = self.conn.get_info(instance)
- self._check_vm_info(info, power_state.SHUTDOWN)
-
- if config_drive:
- def fake_create_config_drive(instance, injected_files, password,
- data_store_name, folder,
- instance_uuid, cookies):
- self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
- return str(ds_util.DatastorePath(data_store_name,
- instance_uuid, 'fake.iso'))
-
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
- fake_create_config_drive)
-
- self._create_vm()
-
- def fake_power_on_instance(session, instance, vm_ref=None):
- self._power_on_called += 1
- return self._power_on(session, instance, vm_ref=vm_ref)
-
- info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.stubs.Set(vm_util, "power_on_instance",
- fake_power_on_instance)
- self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
- fake_attach_disk_to_vm)
-
- self.conn.rescue(self.context, self.instance, self.network_info,
- self.image, 'fake-password')
-
- info = self.conn.get_info({'name': '1-rescue',
- 'uuid': '%s-rescue' % self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SHUTDOWN)
- self.assertIsNotNone(vm_util.vm_ref_cache_get('%s-rescue' % self.uuid))
- self.assertEqual(1, self._power_on_called)
-
- def test_rescue(self):
- self._rescue()
- inst_file_path = ds_util.DatastorePath(self.ds, self.uuid,
- '%s.vmdk' % self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path)))
- rescue_file_path = ds_util.DatastorePath(self.ds,
- '%s-rescue' % self.uuid,
- '%s-rescue.vmdk' % self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path)))
-
- def test_rescue_with_config_drive(self):
- self.flags(force_config_drive=True)
- self._rescue(config_drive=True)
-
- def test_unrescue(self):
- # NOTE(dims): driver unrescue ends up eventually in vmops.unrescue
- # with power_on=True, the test_destroy_rescued tests the
- # vmops.unrescue with power_on=False
- self._rescue()
- vm_ref = vm_util.get_vm_ref(self.conn._session,
- self.instance)
- vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session,
- '%s-rescue' % self.uuid)
-
- self.poweroff_instance = vm_util.power_off_instance
-
- def fake_power_off_instance(session, instance, vm_ref):
- # This is called so that we actually poweroff the simulated vm.
- # The reason for this is that there is a validation in destroy
- # that the instance is not powered on.
- self.poweroff_instance(session, instance, vm_ref)
-
- def fake_detach_disk_from_vm(vm_ref, instance,
- device_name, destroy_disk=False):
- self.test_device_name = device_name
- info = self.conn.get_info(instance)
- self._check_vm_info(info, power_state.SHUTDOWN)
-
- with contextlib.nested(
- mock.patch.object(vm_util, "power_off_instance",
- side_effect=fake_power_off_instance),
- mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
- side_effect=fake_detach_disk_from_vm),
- mock.patch.object(vm_util, "power_on_instance"),
- ) as (poweroff, detach, fake_power_on):
- self.conn.unrescue(self.instance, None)
- poweroff.assert_called_once_with(self.conn._session, mock.ANY,
- vm_rescue_ref)
- detach.assert_called_once_with(vm_rescue_ref, mock.ANY,
- self.test_device_name)
- fake_power_on.assert_called_once_with(self.conn._session,
- self.instance,
- vm_ref=vm_ref)
- self.test_vm_ref = None
- self.test_device_name = None
-
- def test_get_diagnostics(self):
- self._create_vm()
- expected = {'memoryReservation': 0, 'suspendInterval': 0,
- 'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
- 'consumedOverheadMemory': 20, 'numEthernetCards': 1,
- 'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
- 'memoryOverhead': 21417984,
- 'guestMemoryUsage': 0, 'connectionState': 'connected',
- 'memorySizeMB': 512, 'balloonedMemory': 0,
- 'vmPathName': 'fake_path', 'template': False,
- 'overallCpuUsage': 0, 'powerState': 'poweredOn',
- 'cpuReservation': 0, 'overallCpuDemand': 0,
- 'numVirtualDisks': 1, 'hostMemoryUsage': 141}
- expected = dict([('vmware:' + k, v) for k, v in expected.items()])
- self.assertThat(
- self.conn.get_diagnostics({'name': 1, 'uuid': self.uuid,
- 'node': self.instance_node}),
- matchers.DictMatches(expected))
-
- def test_get_instance_diagnostics(self):
- self._create_vm()
- expected = {'uptime': 0,
- 'memory_details': {'used': 0, 'maximum': 512},
- 'nic_details': [],
- 'driver': 'vmwareapi',
- 'state': 'running',
- 'version': '1.0',
- 'cpu_details': [],
- 'disk_details': [],
- 'hypervisor_os': 'esxi',
- 'config_drive': False}
- actual = self.conn.get_instance_diagnostics(
- {'name': 1, 'uuid': self.uuid, 'node': self.instance_node})
- self.assertThat(actual.serialize(), matchers.DictMatches(expected))
-
- def test_get_console_output(self):
- self.assertRaises(NotImplementedError, self.conn.get_console_output,
- None, None)
-
- def _test_finish_migration(self, power_on, resize_instance=False):
- self._create_vm()
- self.conn.finish_migration(context=self.context,
- migration=None,
- instance=self.instance,
- disk_info=None,
- network_info=None,
- block_device_info=None,
- resize_instance=resize_instance,
- image_meta=None,
- power_on=power_on)
-
- def _test_finish_revert_migration(self, power_on):
- self._create_vm()
- # Ensure ESX driver throws an error
- self.assertRaises(NotImplementedError,
- self.conn.finish_revert_migration,
- self.context,
- instance=self.instance,
- network_info=None)
-
- def test_get_vnc_console_non_existent(self):
- self._create_instance()
- self.assertRaises(exception.InstanceNotFound,
- self.conn.get_vnc_console,
- self.context,
- self.instance)
-
- def _test_get_vnc_console(self):
- self._create_vm()
- fake_vm = self._get_vm_record()
- OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
- opt_val = OptionValue(key='', value=5906)
- fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
- vnc_console = self.conn.get_vnc_console(self.context, self.instance)
- self.assertEqual(self.vnc_host, vnc_console.host)
- self.assertEqual(5906, vnc_console.port)
-
- def test_get_vnc_console(self):
- self._test_get_vnc_console()
-
- def test_get_vnc_console_noport(self):
- self._create_vm()
- self.assertRaises(exception.ConsoleTypeUnavailable,
- self.conn.get_vnc_console,
- self.context,
- self.instance)
-
- def test_get_volume_connector(self):
- self._create_vm()
- connector_dict = self.conn.get_volume_connector(self.instance)
- fake_vm = self._get_vm_record()
- fake_vm_id = fake_vm.obj.value
- self.assertEqual(connector_dict['ip'], 'test_url')
- self.assertEqual(connector_dict['initiator'], 'iscsi-name')
- self.assertEqual(connector_dict['host'], 'test_url')
- self.assertEqual(connector_dict['instance'], fake_vm_id)
-
- def _test_vmdk_connection_info(self, type):
- return {'driver_volume_type': type,
- 'serial': 'volume-fake-id',
- 'data': {'volume': 'vm-10',
- 'volume_id': 'volume-fake-id'}}
-
- def test_volume_attach_vmdk(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('vmdk')
- mount_point = '/dev/vdc'
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_attach_volume_vmdk')
- volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
- self.instance, mount_point)
- self.mox.ReplayAll()
- self.conn.attach_volume(None, connection_info, self.instance,
- mount_point)
-
- def test_volume_detach_vmdk(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('vmdk')
- mount_point = '/dev/vdc'
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_detach_volume_vmdk')
- volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
- self.instance, mount_point)
- self.mox.ReplayAll()
- self.conn.detach_volume(connection_info, self.instance, mount_point,
- encryption=None)
-
- def test_attach_vmdk_disk_to_vm(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('vmdk')
- mount_point = '/dev/vdc'
-
- # create fake backing info
- volume_device = vmwareapi_fake.DataObject()
- volume_device.backing = vmwareapi_fake.DataObject()
- volume_device.backing.fileName = 'fake_path'
-
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_get_vmdk_base_volume_device')
- volumeops.VMwareVolumeOps._get_vmdk_base_volume_device(
- mox.IgnoreArg()).AndReturn(volume_device)
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'attach_disk_to_vm')
- volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
- self.instance, mox.IgnoreArg(), mox.IgnoreArg(),
- vmdk_path='fake_path')
- self.mox.ReplayAll()
- self.conn.attach_volume(None, connection_info, self.instance,
- mount_point)
-
- def test_detach_vmdk_disk_from_vm(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('vmdk')
- mount_point = '/dev/vdc'
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_get_volume_uuid')
- volumeops.VMwareVolumeOps._get_volume_uuid(mox.IgnoreArg(),
- 'volume-fake-id').AndReturn('fake_disk_uuid')
- self.mox.StubOutWithMock(vm_util, 'get_vmdk_backed_disk_device')
- vm_util.get_vmdk_backed_disk_device(mox.IgnoreArg(),
- 'fake_disk_uuid').AndReturn('fake_device')
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_consolidate_vmdk_volume')
- volumeops.VMwareVolumeOps._consolidate_vmdk_volume(self.instance,
- mox.IgnoreArg(), 'fake_device', mox.IgnoreArg())
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'detach_disk_from_vm')
- volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
- self.instance, mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conn.detach_volume(connection_info, self.instance, mount_point,
- encryption=None)
-
- def test_volume_attach_iscsi(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('iscsi')
- mount_point = '/dev/vdc'
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_attach_volume_iscsi')
- volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
- self.instance, mount_point)
- self.mox.ReplayAll()
- self.conn.attach_volume(None, connection_info, self.instance,
- mount_point)
-
- def test_volume_detach_iscsi(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('iscsi')
- mount_point = '/dev/vdc'
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_detach_volume_iscsi')
- volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
- self.instance, mount_point)
- self.mox.ReplayAll()
- self.conn.detach_volume(connection_info, self.instance, mount_point,
- encryption=None)
-
- def test_attach_iscsi_disk_to_vm(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('iscsi')
- connection_info['data']['target_portal'] = 'fake_target_host:port'
- connection_info['data']['target_iqn'] = 'fake_target_iqn'
- mount_point = '/dev/vdc'
- discover = ('fake_name', 'fake_uuid')
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_iscsi_get_target')
- # simulate target not found
- volumeops.VMwareVolumeOps._iscsi_get_target(
- connection_info['data']).AndReturn((None, None))
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_iscsi_add_send_target_host')
- # rescan gets called with target portal
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_iscsi_rescan_hba')
- volumeops.VMwareVolumeOps._iscsi_rescan_hba(
- connection_info['data']['target_portal'])
- # simulate target found
- volumeops.VMwareVolumeOps._iscsi_get_target(
- connection_info['data']).AndReturn(discover)
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'attach_disk_to_vm')
- volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
- self.instance, mox.IgnoreArg(), 'rdmp',
- device_name=mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conn.attach_volume(None, connection_info, self.instance,
- mount_point)
-
- def test_iscsi_rescan_hba(self):
- fake_target_portal = 'fake_target_host:port'
- host_storage_sys = vmwareapi_fake._get_objects(
- "HostStorageSystem").objects[0]
- iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
- '.hostBusAdapter')
- iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
- # Check the host system does not have the send target
- self.assertRaises(AttributeError, getattr, iscsi_hba,
- 'configuredSendTarget')
- # Rescan HBA with the target portal
- vops = volumeops.VMwareVolumeOps(self.conn._session)
- vops._iscsi_rescan_hba(fake_target_portal)
- # Check if HBA has the target portal configured
- self.assertEqual('fake_target_host',
- iscsi_hba.configuredSendTarget[0].address)
- # Rescan HBA with same portal
- vops._iscsi_rescan_hba(fake_target_portal)
- self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
-
- def test_iscsi_get_target(self):
- data = {'target_portal': 'fake_target_host:port',
- 'target_iqn': 'fake_target_iqn'}
- host = vmwareapi_fake._get_objects('HostSystem').objects[0]
- host._add_iscsi_target(data)
- vops = volumeops.VMwareVolumeOps(self.conn._session)
- result = vops._iscsi_get_target(data)
- self.assertEqual(('fake-device', 'fake-uuid'), result)
-
- def test_detach_iscsi_disk_from_vm(self):
- self._create_vm()
- connection_info = self._test_vmdk_connection_info('iscsi')
- connection_info['data']['target_portal'] = 'fake_target_portal'
- connection_info['data']['target_iqn'] = 'fake_target_iqn'
- mount_point = '/dev/vdc'
- find = ('fake_name', 'fake_uuid')
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- '_iscsi_get_target')
- volumeops.VMwareVolumeOps._iscsi_get_target(
- connection_info['data']).AndReturn(find)
- self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
- device = 'fake_device'
- vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
- self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
- 'detach_disk_from_vm')
- volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
- self.instance, device, destroy_disk=True)
- self.mox.ReplayAll()
- self.conn.detach_volume(connection_info, self.instance, mount_point,
- encryption=None)
-
- def test_connection_info_get(self):
- self._create_vm()
- connector = self.conn.get_volume_connector(self.instance)
- self.assertEqual(connector['ip'], 'test_url')
- self.assertEqual(connector['host'], 'test_url')
- self.assertEqual(connector['initiator'], 'iscsi-name')
- self.assertIn('instance', connector)
-
- def test_connection_info_get_after_destroy(self):
- self._create_vm()
- self.conn.destroy(self.context, self.instance, self.network_info)
- connector = self.conn.get_volume_connector(self.instance)
- self.assertEqual(connector['ip'], 'test_url')
- self.assertEqual(connector['host'], 'test_url')
- self.assertEqual(connector['initiator'], 'iscsi-name')
- self.assertNotIn('instance', connector)
-
- def test_refresh_instance_security_rules(self):
- self.assertRaises(NotImplementedError,
- self.conn.refresh_instance_security_rules,
- instance=None)
-
- def test_image_aging_image_used(self):
- self._create_vm()
- all_instances = [self.instance]
- self.conn.manage_image_cache(self.context, all_instances)
- self._cached_files_exist()
-
- def _get_timestamp_filename(self):
- return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
- timeutils.strtime(at=self.old_time,
- fmt=imagecache.TIMESTAMP_FORMAT))
-
- def _override_time(self):
- self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
-
- def _fake_get_timestamp_filename(fake):
- return self._get_timestamp_filename()
-
- self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
- _fake_get_timestamp_filename)
-
- def _timestamp_file_exists(self, exists=True):
- timestamp = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid,
- self._get_timestamp_filename() + '/')
- if exists:
- self.assertTrue(vmwareapi_fake.get_file(str(timestamp)))
- else:
- self.assertFalse(vmwareapi_fake.get_file(str(timestamp)))
-
- def _image_aging_image_marked_for_deletion(self):
- self._create_vm(uuid=uuidutils.generate_uuid())
- self._cached_files_exist()
- all_instances = []
- self.conn.manage_image_cache(self.context, all_instances)
- self._cached_files_exist()
- self._timestamp_file_exists()
-
- def test_image_aging_image_marked_for_deletion(self):
- self._override_time()
- self._image_aging_image_marked_for_deletion()
-
- def _timestamp_file_removed(self):
- self._override_time()
- self._image_aging_image_marked_for_deletion()
- self._create_vm(num_instances=2,
- uuid=uuidutils.generate_uuid())
- self._timestamp_file_exists(exists=False)
-
- def test_timestamp_file_removed_spawn(self):
- self._timestamp_file_removed()
-
- def test_timestamp_file_removed_aging(self):
- self._timestamp_file_removed()
- ts = self._get_timestamp_filename()
- ts_path = ds_util.DatastorePath(self.ds, 'vmware_base',
- self.fake_image_uuid, ts + '/')
- vmwareapi_fake._add_file(str(ts_path))
- self._timestamp_file_exists()
- all_instances = [self.instance]
- self.conn.manage_image_cache(self.context, all_instances)
- self._timestamp_file_exists(exists=False)
-
- def test_image_aging_disabled(self):
- self._override_time()
- self.flags(remove_unused_base_images=False)
- self._create_vm()
- self._cached_files_exist()
- all_instances = []
- self.conn.manage_image_cache(self.context, all_instances)
- self._cached_files_exist(exists=True)
- self._timestamp_file_exists(exists=False)
-
- def _image_aging_aged(self, aging_time=100):
- self._override_time()
- cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
- self.flags(remove_unused_original_minimum_age_seconds=aging_time)
- self._image_aging_image_marked_for_deletion()
- all_instances = []
- timeutils.set_time_override(cur_time)
- self.conn.manage_image_cache(self.context, all_instances)
-
- def test_image_aging_aged(self):
- self._image_aging_aged(aging_time=8)
- self._cached_files_exist(exists=False)
-
- def test_image_aging_not_aged(self):
- self._image_aging_aged()
- self._cached_files_exist()
-
-
-class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
-
- @mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
- def setUp(self, mock_register):
- super(VMwareAPIVCDriverTestCase, self).setUp(create_connection=False)
- cluster_name = 'test_cluster'
- cluster_name2 = 'test_cluster2'
- self.flags(cluster_name=[cluster_name, cluster_name2],
- api_retry_count=1,
- task_poll_interval=10, datastore_regex='.*', group='vmware')
- self.flags(vnc_enabled=False,
- image_cache_subdirectory_name='vmware_base')
- vmwareapi_fake.reset()
- self.conn = driver.VMwareVCDriver(None, False)
- self._set_exception_vars()
- self.node_name = self.conn._resources.keys()[0]
- self.node_name2 = self.conn._resources.keys()[1]
- if cluster_name2 in self.node_name2:
- self.ds = 'ds1'
- else:
- self.ds = 'ds2'
- self.vnc_host = 'ha-host'
-
- def tearDown(self):
- super(VMwareAPIVCDriverTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
-
- def test_public_api_signatures(self):
- self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
-
- def test_register_extension(self):
- with mock.patch.object(self.conn._session, '_call_method',
- return_value=None) as mock_call_method:
- self.conn._register_openstack_extension()
- mock_call_method.assert_has_calls(
- [mock.call(oslo_vim_util, 'find_extension',
- constants.EXTENSION_KEY),
- mock.call(oslo_vim_util, 'register_extension',
- constants.EXTENSION_KEY,
- constants.EXTENSION_TYPE_INSTANCE)])
-
- def test_register_extension_already_exists(self):
- with mock.patch.object(self.conn._session, '_call_method',
- return_value='fake-extension') as mock_find_ext:
- self.conn._register_openstack_extension()
- mock_find_ext.assert_called_once_with(oslo_vim_util,
- 'find_extension',
- constants.EXTENSION_KEY)
-
- def test_list_instances(self):
- instances = self.conn.list_instances()
- self.assertEqual(0, len(instances))
-
- def test_list_instances_from_nodes(self):
- # Create instance on node1
- self._create_vm(self.node_name)
- # Create instances on the other node
- self._create_vm(self.node_name2, num_instances=2)
- self._create_vm(self.node_name2, num_instances=3)
- node1_vmops = self.conn._get_vmops_for_compute_node(self.node_name)
- node2_vmops = self.conn._get_vmops_for_compute_node(self.node_name2)
- self.assertEqual(1, len(node1_vmops.list_instances()))
- self.assertEqual(2, len(node2_vmops.list_instances()))
- self.assertEqual(3, len(self.conn.list_instances()))
-
- def _setup_mocks_for_session(self, mock_init):
- mock_init.return_value = None
-
- vcdriver = driver.VMwareVCDriver(None, False)
- vcdriver._session = mock.Mock()
- vcdriver._session.vim = None
-
- def side_effect():
- vcdriver._session.vim = mock.Mock()
- vcdriver._session._create_session.side_effect = side_effect
- return vcdriver
-
- def test_host_power_action(self):
- self.assertRaises(NotImplementedError,
- self.conn.host_power_action, 'host', 'action')
-
- def test_host_maintenance_mode(self):
- self.assertRaises(NotImplementedError,
- self.conn.host_maintenance_mode, 'host', 'mode')
-
- def test_set_host_enabled(self):
- self.assertRaises(NotImplementedError,
- self.conn.set_host_enabled, 'host', 'state')
-
- def test_datastore_regex_configured(self):
- for node in self.conn._resources.keys():
- self.assertEqual(self.conn._datastore_regex,
- self.conn._resources[node]['vmops']._datastore_regex)
-
- def test_get_available_resource(self):
- stats = self.conn.get_available_resource(self.node_name)
- cpu_info = {"model": ["Intel(R) Xeon(R)", "Intel(R) Xeon(R)"],
- "vendor": ["Intel", "Intel"],
- "topology": {"cores": 16,
- "threads": 32}}
- self.assertEqual(stats['vcpus'], 32)
- self.assertEqual(stats['local_gb'], 1024)
- self.assertEqual(stats['local_gb_used'], 1024 - 500)
- self.assertEqual(stats['memory_mb'], 1000)
- self.assertEqual(stats['memory_mb_used'], 500)
- self.assertEqual(stats['hypervisor_type'], 'VMware vCenter Server')
- self.assertEqual(stats['hypervisor_version'], 5001000)
- self.assertEqual(stats['hypervisor_hostname'], self.node_name)
- self.assertEqual(stats['cpu_info'], jsonutils.dumps(cpu_info))
- self.assertEqual(stats['supported_instances'],
- '[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]')
-
- def test_invalid_datastore_regex(self):
-
- # Tests if we raise an exception for Invalid Regular Expression in
- # vmware_datastore_regex
- self.flags(cluster_name=['test_cluster'], datastore_regex='fake-ds(01',
- group='vmware')
- self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
-
- def test_get_available_nodes(self):
- nodelist = self.conn.get_available_nodes()
- self.assertEqual(len(nodelist), 2)
- self.assertIn(self.node_name, nodelist)
- self.assertIn(self.node_name2, nodelist)
-
- def test_spawn_multiple_node(self):
-
- def fake_is_neutron():
- return False
-
- self.stubs.Set(nova_utils, 'is_neutron', fake_is_neutron)
- uuid1 = uuidutils.generate_uuid()
- uuid2 = uuidutils.generate_uuid()
- self._create_vm(node=self.node_name, num_instances=1,
- uuid=uuid1)
- info = self.conn.get_info({'uuid': uuid1,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.destroy(self.context, self.instance, self.network_info)
- self._create_vm(node=self.node_name2, num_instances=1,
- uuid=uuid2)
- info = self.conn.get_info({'uuid': uuid2,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot(self):
- self._create_vm()
- self._test_snapshot()
-
- def test_snapshot_using_file_manager(self):
- self._create_vm()
- uuid_str = uuidutils.generate_uuid()
- self.mox.StubOutWithMock(uuidutils,
- 'generate_uuid')
- uuidutils.generate_uuid().AndReturn(uuid_str)
-
- self.mox.StubOutWithMock(ds_util, 'file_delete')
- disk_ds_path = ds_util.DatastorePath(
- self.ds, "vmware_temp", "%s.vmdk" % uuid_str)
- disk_ds_flat_path = ds_util.DatastorePath(
- self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str)
- # Check calls for delete vmdk and -flat.vmdk pair
- ds_util.file_delete(
- mox.IgnoreArg(), disk_ds_flat_path,
- mox.IgnoreArg()).AndReturn(None)
- ds_util.file_delete(
- mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None)
-
- self.mox.ReplayAll()
- self._test_snapshot()
-
- def test_spawn_invalid_node(self):
- self._create_instance(node='InvalidNodeName')
- self.assertRaises(exception.NotFound, self.conn.spawn,
- self.context, self.instance, self.image,
- injected_files=[], admin_password=None,
- network_info=self.network_info,
- block_device_info=None)
-
- @mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
- 'from_image')
- def test_spawn_with_sparse_image(self, mock_from_image):
- img_info = images.VMwareImage(
- image_id=self.fake_image_uuid,
- file_size=1024,
- disk_type=constants.DISK_TYPE_SPARSE,
- linked_clone=False)
-
- mock_from_image.return_value = img_info
-
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_plug_vifs(self):
- # Check to make sure the method raises NotImplementedError.
- self._create_instance()
- self.assertRaises(NotImplementedError,
- self.conn.plug_vifs,
- instance=self.instance, network_info=None)
-
- def test_unplug_vifs(self):
- # Check to make sure the method raises NotImplementedError.
- self._create_instance()
- self.assertRaises(NotImplementedError,
- self.conn.unplug_vifs,
- instance=self.instance, network_info=None)
-
- def _create_vif(self):
- gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
- dns_4 = network_model.IP(address='8.8.8.8', type=None)
- subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
- dns=[dns_4],
- gateway=gw_4,
- routes=None,
- dhcp_server='191.168.1.1')
-
- gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
- subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
- dns=None,
- gateway=gw_6,
- ips=None,
- routes=None)
-
- network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
- bridge=None,
- label=None,
- subnets=[subnet_4,
- subnet_6],
- bridge_interface='eth0',
- vlan=99)
-
- vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
- address='ca:fe:de:ad:be:ef',
- network=network_neutron,
- type=None,
- devname='tap-xxx-yyy-zzz',
- ovs_interfaceid='aaa-bbb-ccc')
- return vif_bridge_neutron
-
- def _validate_interfaces(self, id, index, num_iface_ids):
- vm = self._get_vm_record()
- found_iface_id = False
- extras = vm.get("config.extraConfig")
- key = "nvp.iface-id.%s" % index
- num_found = 0
- for c in extras.OptionValue:
- if c.key.startswith("nvp.iface-id."):
- num_found += 1
- if c.key == key and c.value == id:
- found_iface_id = True
- self.assertTrue(found_iface_id)
- self.assertEqual(num_found, num_iface_ids)
-
- def _attach_interface(self, vif):
- self.conn.attach_interface(self.instance, self.image, vif)
- self._validate_interfaces(vif['id'], 1, 2)
-
- def test_attach_interface(self):
- self._create_vm()
- vif = self._create_vif()
- self._attach_interface(vif)
-
- def test_attach_interface_with_exception(self):
- self._create_vm()
- vif = self._create_vif()
-
- with mock.patch.object(self.conn._session, '_wait_for_task',
- side_effect=Exception):
- self.assertRaises(exception.InterfaceAttachFailed,
- self.conn.attach_interface,
- self.instance, self.image, vif)
-
- @mock.patch.object(vif, 'get_network_device',
- return_value='fake_device')
- def _detach_interface(self, vif, mock_get_device):
- self._create_vm()
- self._attach_interface(vif)
- self.conn.detach_interface(self.instance, vif)
- self._validate_interfaces('free', 1, 2)
-
- def test_detach_interface(self):
- vif = self._create_vif()
- self._detach_interface(vif)
-
- def test_detach_interface_and_attach(self):
- vif = self._create_vif()
- self._detach_interface(vif)
- self.conn.attach_interface(self.instance, self.image, vif)
- self._validate_interfaces(vif['id'], 1, 2)
-
- def test_detach_interface_no_device(self):
- self._create_vm()
- vif = self._create_vif()
- self._attach_interface(vif)
- self.assertRaises(exception.NotFound, self.conn.detach_interface,
- self.instance, vif)
-
- def test_detach_interface_no_vif_match(self):
- self._create_vm()
- vif = self._create_vif()
- self._attach_interface(vif)
- vif['id'] = 'bad-id'
- self.assertRaises(exception.NotFound, self.conn.detach_interface,
- self.instance, vif)
-
- @mock.patch.object(vif, 'get_network_device',
- return_value='fake_device')
- def test_detach_interface_with_exception(self, mock_get_device):
- self._create_vm()
- vif = self._create_vif()
- self._attach_interface(vif)
-
- with mock.patch.object(self.conn._session, '_wait_for_task',
- side_effect=Exception):
- self.assertRaises(exception.InterfaceDetachFailed,
- self.conn.detach_interface,
- self.instance, vif)
-
- def test_migrate_disk_and_power_off(self):
- def fake_update_instance_progress(context, instance, step,
- total_steps):
- pass
-
- def fake_get_host_ref_from_name(dest):
- return None
-
- self._create_vm(instance_type='m1.large')
- vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
- flavor = self._get_instance_type_by_name('m1.large')
- self.stubs.Set(self.conn._vmops, "_update_instance_progress",
- fake_update_instance_progress)
- self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
- fake_get_host_ref_from_name)
- self.conn.migrate_disk_and_power_off(self.context, self.instance,
- 'fake_dest', flavor,
- None)
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- self.assertNotEqual(vm_ref_orig.value, vm_ref.value,
- "These should be different")
-
- def test_disassociate_vmref_from_instance(self):
- self._create_vm()
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- vm_util.disassociate_vmref_from_instance(self.conn._session,
- self.instance, vm_ref, "-backup")
- self.assertRaises(exception.InstanceNotFound,
- vm_util.get_vm_ref, self.conn._session, self.instance)
-
- def test_clone_vmref_for_instance(self):
- self._create_vm()
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- vm_util.disassociate_vmref_from_instance(self.conn._session,
- self.instance, vm_ref, "-backup")
- host_ref = vmwareapi_fake._get_object_refs("HostSystem")[0]
- ds_ref = vmwareapi_fake._get_object_refs("Datastore")[0]
- dc_obj = vmwareapi_fake._get_objects("Datacenter").objects[0]
- vm_util.clone_vmref_for_instance(self.conn._session, self.instance,
- vm_ref, host_ref, ds_ref,
- dc_obj.get("vmFolder"))
- self.assertIsNotNone(
- vm_util.get_vm_ref(self.conn._session, self.instance),
- "No VM found")
- cloned_vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- self.assertNotEqual(vm_ref.value, cloned_vm_ref.value,
- "Reference for the cloned VM should be different")
- vm_obj = vmwareapi_fake._get_vm_mdo(vm_ref)
- cloned_vm_obj = vmwareapi_fake._get_vm_mdo(cloned_vm_ref)
- self.assertEqual(vm_obj.name, self.instance['uuid'] + "-backup",
- "Original VM name should be with suffix -backup")
- self.assertEqual(cloned_vm_obj.name, self.instance['uuid'],
- "VM name does not match instance['uuid']")
- self.assertRaises(vexc.MissingParameter,
- vm_util.clone_vmref_for_instance, self.conn._session,
- self.instance, None, host_ref, ds_ref,
- dc_obj.get("vmFolder"))
-
- def test_associate_vmref_for_instance(self):
- self._create_vm()
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- # First disassociate the VM from the instance so that we have a VM
- # to later associate using the associate_vmref_for_instance method
- vm_util.disassociate_vmref_from_instance(self.conn._session,
- self.instance, vm_ref, "-backup")
- # Ensure that the VM is indeed disassociated and that we cannot find
- # the VM using the get_vm_ref method
- self.assertRaises(exception.InstanceNotFound,
- vm_util.get_vm_ref, self.conn._session, self.instance)
- # Associate the VM back to the instance
- vm_util.associate_vmref_for_instance(self.conn._session, self.instance,
- suffix="-backup")
- # Verify if we can get the VM reference
- self.assertIsNotNone(
- vm_util.get_vm_ref(self.conn._session, self.instance),
- "No VM found")
-
- def test_confirm_migration(self):
- self._create_vm()
- self.conn.confirm_migration(self.context, self.instance, None)
-
- def test_resize_to_smaller_disk(self):
- self._create_vm(instance_type='m1.large')
- flavor = self._get_instance_type_by_name('m1.small')
- self.assertRaises(exception.InstanceFaultRollback,
- self.conn.migrate_disk_and_power_off, self.context,
- self.instance, 'fake_dest', flavor, None)
-
- def test_spawn_attach_volume_vmdk(self):
- self._spawn_attach_volume_vmdk(vc_support=True)
-
- def test_spawn_attach_volume_vmdk_no_image_ref(self):
- self._spawn_attach_volume_vmdk(set_image_ref=False, vc_support=True)
-
- def test_pause(self):
- # Tests that the VMwareVCDriver does not implement the pause method.
- self._create_instance()
- self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
-
- def test_unpause(self):
- # Tests that the VMwareVCDriver does not implement the unpause method.
- self._create_instance()
- self.assertRaises(NotImplementedError, self.conn.unpause,
- self.instance)
-
- def test_datastore_dc_map(self):
- vmops = self.conn._resources[self.node_name]['vmops']
- self.assertEqual({}, vmops._datastore_dc_mapping)
- self._create_vm()
- # currently there are 2 data stores
- self.assertEqual(2, len(vmops._datastore_dc_mapping))
-
- def test_rollback_live_migration_at_destination(self):
- with mock.patch.object(self.conn, "destroy") as mock_destroy:
- self.conn.rollback_live_migration_at_destination(self.context,
- "instance", [], None)
- mock_destroy.assert_called_once_with(self.context,
- "instance", [], None)
-
- def test_get_instance_disk_info_is_implemented(self):
- # Ensure that the method has been implemented in the driver
- try:
- disk_info = self.conn.get_instance_disk_info('fake_instance_name')
- self.assertIsNone(disk_info)
- except NotImplementedError:
- self.fail("test_get_instance_disk_info() should not raise "
- "NotImplementedError")
-
- def test_destroy(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEqual(1, len(instances))
- self.conn.destroy(self.context, self.instance, self.network_info)
- instances = self.conn.list_instances()
- self.assertEqual(0, len(instances))
- self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
-
- def test_destroy_no_datastore(self):
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEqual(1, len(instances))
- # Overwrite the vmPathName
- vm = self._get_vm_record()
- vm.set("config.files.vmPathName", None)
- self.conn.destroy(self.context, self.instance, self.network_info)
- instances = self.conn.list_instances()
- self.assertEqual(0, len(instances))
-
- def test_destroy_non_existent(self):
- self.destroy_disks = True
- with mock.patch.object(self.conn._vmops,
- "destroy") as mock_destroy:
- self._create_instance()
- self.conn.destroy(self.context, self.instance,
- self.network_info,
- None, self.destroy_disks)
- mock_destroy.assert_called_once_with(self.instance,
- self.destroy_disks)
-
- def test_destroy_instance_without_compute(self):
- self.destroy_disks = True
- with mock.patch.object(self.conn._vmops,
- "destroy") as mock_destroy:
- self.conn.destroy(self.context, self.instance_without_compute,
- self.network_info,
- None, self.destroy_disks)
- self.assertFalse(mock_destroy.called)
-
- def test_get_host_uptime(self):
- self.assertRaises(NotImplementedError,
- self.conn.get_host_uptime, 'host')
-
- def _test_finish_migration(self, power_on, resize_instance=False):
- """Tests the finish_migration method on VC Driver."""
- # setup the test instance in the database
- self._create_vm()
- if resize_instance:
- self.instance.system_metadata = {'old_instance_type_root_gb': '0'}
- vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
- datastore = ds_util.Datastore(ref='fake-ref', name='fake')
- dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
- vmFolder='fake_folder')
- with contextlib.nested(
- mock.patch.object(self.conn._session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(self.conn._vmops,
- "_update_instance_progress"),
- mock.patch.object(self.conn._session, "_wait_for_task"),
- mock.patch.object(vm_util, "get_vm_resize_spec",
- return_value='fake-spec'),
- mock.patch.object(ds_util, "get_datastore",
- return_value=datastore),
- mock.patch.object(self.conn._vmops,
- 'get_datacenter_ref_and_name',
- return_value=dc_info),
- mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
- mock.patch.object(vm_util, "power_on_instance")
- ) as (fake_call_method, fake_update_instance_progress,
- fake_wait_for_task, fake_vm_resize_spec,
- fake_get_datastore, fake_get_datacenter_ref_and_name,
- fake_extend_virtual_disk, fake_power_on):
- self.conn.finish_migration(context=self.context,
- migration=None,
- instance=self.instance,
- disk_info=None,
- network_info=None,
- block_device_info=None,
- resize_instance=resize_instance,
- image_meta=None,
- power_on=power_on)
- if resize_instance:
- fake_vm_resize_spec.assert_called_once_with(
- self.conn._session.vim.client.factory,
- self.instance)
- fake_call_method.assert_any_call(
- self.conn._session.vim,
- "ReconfigVM_Task",
- vm_ref,
- spec='fake-spec')
- fake_wait_for_task.assert_called_once_with('fake-task')
- fake_extend_virtual_disk.assert_called_once_with(
- self.instance, self.instance['root_gb'] * units.Mi,
- None, dc_info.ref)
- else:
- self.assertFalse(fake_vm_resize_spec.called)
- self.assertFalse(fake_call_method.called)
- self.assertFalse(fake_wait_for_task.called)
- self.assertFalse(fake_extend_virtual_disk.called)
-
- if power_on:
- fake_power_on.assert_called_once_with(self.conn._session,
- self.instance,
- vm_ref=vm_ref)
- else:
- self.assertFalse(fake_power_on.called)
- fake_update_instance_progress.called_once_with(
- self.context, self.instance, 4, vmops.RESIZE_TOTAL_STEPS)
-
- def test_finish_migration_power_on(self):
- self._test_finish_migration(power_on=True)
-
- def test_finish_migration_power_off(self):
- self._test_finish_migration(power_on=False)
-
- def test_finish_migration_power_on_resize(self):
- self._test_finish_migration(power_on=True,
- resize_instance=True)
-
- @mock.patch.object(vm_util, 'associate_vmref_for_instance')
- @mock.patch.object(vm_util, 'power_on_instance')
- def _test_finish_revert_migration(self, fake_power_on,
- fake_associate_vmref, power_on):
- """Tests the finish_revert_migration method on VC Driver."""
-
- # setup the test instance in the database
- self._create_instance()
- self.conn.finish_revert_migration(self.context,
- instance=self.instance,
- network_info=None,
- block_device_info=None,
- power_on=power_on)
- fake_associate_vmref.assert_called_once_with(self.conn._session,
- self.instance,
- suffix='-orig')
- if power_on:
- fake_power_on.assert_called_once_with(self.conn._session,
- self.instance)
- else:
- self.assertFalse(fake_power_on.called)
-
- def test_finish_revert_migration_power_on(self):
- self._test_finish_revert_migration(power_on=True)
-
- def test_finish_revert_migration_power_off(self):
- self._test_finish_revert_migration(power_on=False)
-
- def test_pbm_wsdl_location(self):
- self.flags(pbm_enabled=True,
- pbm_wsdl_location='fira',
- group='vmware')
- self.conn._update_pbm_location()
- self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
- self.assertIsNone(self.conn._session._pbm)
diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py
deleted file mode 100644
index 0ad8ef213d..0000000000
--- a/nova/tests/virt/vmwareapi/test_ds_util.py
+++ /dev/null
@@ -1,548 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import re
-
-import mock
-from oslo.utils import units
-from oslo.vmware import exceptions as vexc
-from testtools import matchers
-
-from nova import exception
-from nova.i18n import _
-from nova import test
-from nova.tests.virt.vmwareapi import fake
-from nova.virt.vmwareapi import ds_util
-
-
-class DsUtilTestCase(test.NoDBTestCase):
- def setUp(self):
- super(DsUtilTestCase, self).setUp()
- self.session = fake.FakeSession()
- self.flags(api_retry_count=1, group='vmware')
- fake.reset()
-
- def tearDown(self):
- super(DsUtilTestCase, self).tearDown()
- fake.reset()
-
- def test_file_delete(self):
- def fake_call_method(module, method, *args, **kwargs):
- self.assertEqual('DeleteDatastoreFile_Task', method)
- name = kwargs.get('name')
- self.assertEqual('[ds] fake/path', name)
- datacenter = kwargs.get('datacenter')
- self.assertEqual('fake-dc-ref', datacenter)
- return 'fake_delete_task'
-
- with contextlib.nested(
- mock.patch.object(self.session, '_wait_for_task'),
- mock.patch.object(self.session, '_call_method',
- fake_call_method)
- ) as (_wait_for_task, _call_method):
- ds_path = ds_util.DatastorePath('ds', 'fake/path')
- ds_util.file_delete(self.session,
- ds_path, 'fake-dc-ref')
- _wait_for_task.assert_has_calls([
- mock.call('fake_delete_task')])
-
- def test_file_move(self):
- def fake_call_method(module, method, *args, **kwargs):
- self.assertEqual('MoveDatastoreFile_Task', method)
- sourceName = kwargs.get('sourceName')
- self.assertEqual('[ds] tmp/src', sourceName)
- destinationName = kwargs.get('destinationName')
- self.assertEqual('[ds] base/dst', destinationName)
- sourceDatacenter = kwargs.get('sourceDatacenter')
- self.assertEqual('fake-dc-ref', sourceDatacenter)
- destinationDatacenter = kwargs.get('destinationDatacenter')
- self.assertEqual('fake-dc-ref', destinationDatacenter)
- return 'fake_move_task'
-
- with contextlib.nested(
- mock.patch.object(self.session, '_wait_for_task'),
- mock.patch.object(self.session, '_call_method',
- fake_call_method)
- ) as (_wait_for_task, _call_method):
- src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
- dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
- ds_util.file_move(self.session,
- 'fake-dc-ref', src_ds_path, dst_ds_path)
- _wait_for_task.assert_has_calls([
- mock.call('fake_move_task')])
-
- def test_mkdir(self):
- def fake_call_method(module, method, *args, **kwargs):
- self.assertEqual('MakeDirectory', method)
- name = kwargs.get('name')
- self.assertEqual('[ds] fake/path', name)
- datacenter = kwargs.get('datacenter')
- self.assertEqual('fake-dc-ref', datacenter)
- createParentDirectories = kwargs.get('createParentDirectories')
- self.assertTrue(createParentDirectories)
-
- with mock.patch.object(self.session, '_call_method',
- fake_call_method):
- ds_path = ds_util.DatastorePath('ds', 'fake/path')
- ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
-
- def test_file_exists(self):
- def fake_call_method(module, method, *args, **kwargs):
- if method == 'SearchDatastore_Task':
- ds_browser = args[0]
- self.assertEqual('fake-browser', ds_browser)
- datastorePath = kwargs.get('datastorePath')
- self.assertEqual('[ds] fake/path', datastorePath)
- return 'fake_exists_task'
-
- # Should never get here
- self.fail()
-
- def fake_wait_for_task(task_ref):
- if task_ref == 'fake_exists_task':
- result_file = fake.DataObject()
- result_file.path = 'fake-file'
-
- result = fake.DataObject()
- result.file = [result_file]
- result.path = '[ds] fake/path'
-
- task_info = fake.DataObject()
- task_info.result = result
-
- return task_info
-
- # Should never get here
- self.fail()
-
- with contextlib.nested(
- mock.patch.object(self.session, '_call_method',
- fake_call_method),
- mock.patch.object(self.session, '_wait_for_task',
- fake_wait_for_task)):
- ds_path = ds_util.DatastorePath('ds', 'fake/path')
- file_exists = ds_util.file_exists(self.session,
- 'fake-browser', ds_path, 'fake-file')
- self.assertTrue(file_exists)
-
- def test_file_exists_fails(self):
- def fake_call_method(module, method, *args, **kwargs):
- if method == 'SearchDatastore_Task':
- return 'fake_exists_task'
-
- # Should never get here
- self.fail()
-
- def fake_wait_for_task(task_ref):
- if task_ref == 'fake_exists_task':
- raise vexc.FileNotFoundException()
-
- # Should never get here
- self.fail()
-
- with contextlib.nested(
- mock.patch.object(self.session, '_call_method',
- fake_call_method),
- mock.patch.object(self.session, '_wait_for_task',
- fake_wait_for_task)):
- ds_path = ds_util.DatastorePath('ds', 'fake/path')
- file_exists = ds_util.file_exists(self.session,
- 'fake-browser', ds_path, 'fake-file')
- self.assertFalse(file_exists)
-
- def _mock_get_datastore_calls(self, *datastores):
- """Mock vim_util calls made by get_datastore."""
-
- datastores_i = [None]
-
- # For the moment, at least, this list of datastores is simply passed to
- # get_properties_for_a_collection_of_objects, which we mock below. We
- # don't need to over-complicate the fake function by worrying about its
- # contents.
- fake_ds_list = ['fake-ds']
-
- def fake_call_method(module, method, *args, **kwargs):
- # Mock the call which returns a list of datastores for the cluster
- if (module == ds_util.vim_util and
- method == 'get_dynamic_property' and
- args == ('fake-cluster', 'ClusterComputeResource',
- 'datastore')):
- fake_ds_mor = fake.DataObject()
- fake_ds_mor.ManagedObjectReference = fake_ds_list
- return fake_ds_mor
-
- # Return the datastore result sets we were passed in, in the order
- # given
- if (module == ds_util.vim_util and
- method == 'get_properties_for_a_collection_of_objects' and
- args[0] == 'Datastore' and
- args[1] == fake_ds_list):
- # Start a new iterator over given datastores
- datastores_i[0] = iter(datastores)
- return datastores_i[0].next()
-
- # Continue returning results from the current iterator.
- if (module == ds_util.vim_util and
- method == 'continue_to_get_objects'):
- try:
- return datastores_i[0].next()
- except StopIteration:
- return None
-
- # Sentinel that get_datastore's use of vim has changed
- self.fail('Unexpected vim call in get_datastore: %s' % method)
-
- return mock.patch.object(self.session, '_call_method',
- side_effect=fake_call_method)
-
- def test_get_datastore(self):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore())
- fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
- False, "normal"))
- fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
- True, "inMaintenance"))
-
- with self._mock_get_datastore_calls(fake_objects):
- result = ds_util.get_datastore(self.session, 'fake-cluster')
- self.assertEqual("fake-ds", result.name)
- self.assertEqual(units.Ti, result.capacity)
- self.assertEqual(500 * units.Gi, result.freespace)
-
- def test_get_datastore_with_regex(self):
- # Test with a regex that matches with a datastore
- datastore_valid_regex = re.compile("^openstack.*\d$")
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("openstack-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds1"))
-
- with self._mock_get_datastore_calls(fake_objects):
- result = ds_util.get_datastore(self.session, 'fake-cluster',
- datastore_valid_regex)
- self.assertEqual("openstack-ds0", result.name)
-
- def test_get_datastore_with_token(self):
- regex = re.compile("^ds.*\d$")
- fake0 = fake.FakeRetrieveResult()
- fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
- fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
- setattr(fake0, 'token', 'token-0')
- fake1 = fake.FakeRetrieveResult()
- fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
- fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
-
- with self._mock_get_datastore_calls(fake0, fake1):
- result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
- self.assertEqual("ds2", result.name)
-
- def test_get_datastore_with_list(self):
- # Test with a regex containing whitelist of datastores
- datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("openstack-ds0"))
- fake_objects.add_object(fake.Datastore("openstack-ds1"))
- fake_objects.add_object(fake.Datastore("openstack-ds2"))
-
- with self._mock_get_datastore_calls(fake_objects):
- result = ds_util.get_datastore(self.session, 'fake-cluster',
- datastore_valid_regex)
- self.assertNotEqual("openstack-ds1", result.name)
-
- def test_get_datastore_with_regex_error(self):
- # Test with a regex that has no match
- # Checks if code raises DatastoreNotFound with a specific message
- datastore_invalid_regex = re.compile("unknown-ds")
- exp_message = (_("Datastore regex %s did not match any datastores")
- % datastore_invalid_regex.pattern)
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("fake-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds1"))
- # assertRaisesRegExp would have been a good choice instead of
- # try/catch block, but it's available only from Py 2.7.
- try:
- with self._mock_get_datastore_calls(fake_objects):
- ds_util.get_datastore(self.session, 'fake-cluster',
- datastore_invalid_regex)
- except exception.DatastoreNotFound as e:
- self.assertEqual(exp_message, e.args[0])
- else:
- self.fail("DatastoreNotFound Exception was not raised with "
- "message: %s" % exp_message)
-
- def test_get_datastore_without_datastore(self):
- self.assertRaises(exception.DatastoreNotFound,
- ds_util.get_datastore,
- fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
-
- def test_get_datastore_inaccessible_ds(self):
- data_store = fake.Datastore()
- data_store.set("summary.accessible", False)
-
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(data_store)
-
- with self._mock_get_datastore_calls(fake_objects):
- self.assertRaises(exception.DatastoreNotFound,
- ds_util.get_datastore,
- self.session, 'fake-cluster')
-
- def test_get_datastore_ds_in_maintenance(self):
- data_store = fake.Datastore()
- data_store.set("summary.maintenanceMode", "inMaintenance")
-
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(data_store)
-
- with self._mock_get_datastore_calls(fake_objects):
- self.assertRaises(exception.DatastoreNotFound,
- ds_util.get_datastore,
- self.session, 'fake-cluster')
-
- def test_get_datastore_no_host_in_cluster(self):
- def fake_call_method(module, method, *args, **kwargs):
- return ''
-
- with mock.patch.object(self.session, '_call_method',
- fake_call_method):
- self.assertRaises(exception.DatastoreNotFound,
- ds_util.get_datastore,
- self.session, 'fake-cluster')
-
- def _test_is_datastore_valid(self, accessible=True,
- maintenance_mode="normal",
- type="VMFS",
- datastore_regex=None):
- propdict = {}
- propdict["summary.accessible"] = accessible
- propdict["summary.maintenanceMode"] = maintenance_mode
- propdict["summary.type"] = type
- propdict["summary.name"] = "ds-1"
-
- return ds_util._is_datastore_valid(propdict, datastore_regex)
-
- def test_is_datastore_valid(self):
- for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
- self.assertTrue(self._test_is_datastore_valid(True,
- "normal",
- ds_type))
-
- def test_is_datastore_valid_inaccessible_ds(self):
- self.assertFalse(self._test_is_datastore_valid(False,
- "normal",
- "VMFS"))
-
- def test_is_datastore_valid_ds_in_maintenance(self):
- self.assertFalse(self._test_is_datastore_valid(True,
- "inMaintenance",
- "VMFS"))
-
- def test_is_datastore_valid_ds_type_invalid(self):
- self.assertFalse(self._test_is_datastore_valid(True,
- "normal",
- "vfat"))
-
- def test_is_datastore_valid_not_matching_regex(self):
- datastore_regex = re.compile("ds-2")
- self.assertFalse(self._test_is_datastore_valid(True,
- "normal",
- "VMFS",
- datastore_regex))
-
- def test_is_datastore_valid_matching_regex(self):
- datastore_regex = re.compile("ds-1")
- self.assertTrue(self._test_is_datastore_valid(True,
- "normal",
- "VMFS",
- datastore_regex))
-
-
-class DatastoreTestCase(test.NoDBTestCase):
- def test_ds(self):
- ds = ds_util.Datastore(
- "fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
- self.assertEqual('ds_name', ds.name)
- self.assertEqual('fake_ref', ds.ref)
- self.assertEqual(2 * units.Gi, ds.capacity)
- self.assertEqual(1 * units.Gi, ds.freespace)
-
- def test_ds_invalid_space(self):
- self.assertRaises(ValueError, ds_util.Datastore,
- "fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
- self.assertRaises(ValueError, ds_util.Datastore,
- "fake_ref", "ds_name", None, 2 * units.Gi)
-
- def test_ds_no_capacity_no_freespace(self):
- ds = ds_util.Datastore("fake_ref", "ds_name")
- self.assertIsNone(ds.capacity)
- self.assertIsNone(ds.freespace)
-
- def test_ds_invalid(self):
- self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
- self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
-
- def test_build_path(self):
- ds = ds_util.Datastore("fake_ref", "ds_name")
- ds_path = ds.build_path("some_dir", "foo.vmdk")
- self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
-
-
-class DatastorePathTestCase(test.NoDBTestCase):
-
- def test_ds_path(self):
- p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
- self.assertEqual('[dsname] a/b/c/file.iso', str(p))
- self.assertEqual('a/b/c/file.iso', p.rel_path)
- self.assertEqual('a/b/c', p.parent.rel_path)
- self.assertEqual('[dsname] a/b/c', str(p.parent))
- self.assertEqual('dsname', p.datastore)
- self.assertEqual('file.iso', p.basename)
- self.assertEqual('a/b/c', p.dirname)
-
- def test_ds_path_no_ds_name(self):
- bad_args = [
- ('', ['a/b/c', 'file.iso']),
- (None, ['a/b/c', 'file.iso'])]
- for t in bad_args:
- self.assertRaises(
- ValueError, ds_util.DatastorePath,
- t[0], *t[1])
-
- def test_ds_path_invalid_path_components(self):
- bad_args = [
- ('dsname', [None]),
- ('dsname', ['', None]),
- ('dsname', ['a', None]),
- ('dsname', ['a', None, 'b']),
- ('dsname', [None, '']),
- ('dsname', [None, 'b'])]
-
- for t in bad_args:
- self.assertRaises(
- ValueError, ds_util.DatastorePath,
- t[0], *t[1])
-
- def test_ds_path_no_subdir(self):
- args = [
- ('dsname', ['', 'x.vmdk']),
- ('dsname', ['x.vmdk'])]
-
- canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
- self.assertEqual('[dsname] x.vmdk', str(canonical_p))
- self.assertEqual('', canonical_p.dirname)
- self.assertEqual('x.vmdk', canonical_p.basename)
- self.assertEqual('x.vmdk', canonical_p.rel_path)
- for t in args:
- p = ds_util.DatastorePath(t[0], *t[1])
- self.assertEqual(str(canonical_p), str(p))
-
- def test_ds_path_ds_only(self):
- args = [
- ('dsname', []),
- ('dsname', ['']),
- ('dsname', ['', ''])]
-
- canonical_p = ds_util.DatastorePath('dsname')
- self.assertEqual('[dsname]', str(canonical_p))
- self.assertEqual('', canonical_p.rel_path)
- self.assertEqual('', canonical_p.basename)
- self.assertEqual('', canonical_p.dirname)
- for t in args:
- p = ds_util.DatastorePath(t[0], *t[1])
- self.assertEqual(str(canonical_p), str(p))
- self.assertEqual(canonical_p.rel_path, p.rel_path)
-
- def test_ds_path_equivalence(self):
- args = [
- ('dsname', ['a/b/c/', 'x.vmdk']),
- ('dsname', ['a/', 'b/c/', 'x.vmdk']),
- ('dsname', ['a', 'b', 'c', 'x.vmdk']),
- ('dsname', ['a/b/c', 'x.vmdk'])]
-
- canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
- for t in args:
- p = ds_util.DatastorePath(t[0], *t[1])
- self.assertEqual(str(canonical_p), str(p))
- self.assertEqual(canonical_p.datastore, p.datastore)
- self.assertEqual(canonical_p.rel_path, p.rel_path)
- self.assertEqual(str(canonical_p.parent), str(p.parent))
-
- def test_ds_path_non_equivalence(self):
- args = [
- # leading slash
- ('dsname', ['/a', 'b', 'c', 'x.vmdk']),
- ('dsname', ['/a/b/c/', 'x.vmdk']),
- ('dsname', ['a/b/c', '/x.vmdk']),
- # leading space
- ('dsname', ['a/b/c/', ' x.vmdk']),
- ('dsname', ['a/', ' b/c/', 'x.vmdk']),
- ('dsname', [' a', 'b', 'c', 'x.vmdk']),
- # trailing space
- ('dsname', ['/a/b/c/', 'x.vmdk ']),
- ('dsname', ['a/b/c/ ', 'x.vmdk'])]
-
- canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
- for t in args:
- p = ds_util.DatastorePath(t[0], *t[1])
- self.assertNotEqual(str(canonical_p), str(p))
-
- def test_ds_path_hashable(self):
- ds1 = ds_util.DatastorePath('dsname', 'path')
- ds2 = ds_util.DatastorePath('dsname', 'path')
-
- # If the above objects have the same hash, they will only be added to
- # the set once
- self.assertThat(set([ds1, ds2]), matchers.HasLength(1))
-
- def test_equal(self):
- a = ds_util.DatastorePath('ds_name', 'a')
- b = ds_util.DatastorePath('ds_name', 'a')
- self.assertEqual(a, b)
-
- def test_join(self):
- p = ds_util.DatastorePath('ds_name', 'a')
- ds_path = p.join('b')
- self.assertEqual('[ds_name] a/b', str(ds_path))
-
- p = ds_util.DatastorePath('ds_name', 'a')
- ds_path = p.join()
- self.assertEqual('[ds_name] a', str(ds_path))
-
- bad_args = [
- [None],
- ['', None],
- ['a', None],
- ['a', None, 'b']]
- for arg in bad_args:
- self.assertRaises(ValueError, p.join, *arg)
-
- def test_ds_path_parse(self):
- p = ds_util.DatastorePath.parse('[dsname]')
- self.assertEqual('dsname', p.datastore)
- self.assertEqual('', p.rel_path)
-
- p = ds_util.DatastorePath.parse('[dsname] folder')
- self.assertEqual('dsname', p.datastore)
- self.assertEqual('folder', p.rel_path)
-
- p = ds_util.DatastorePath.parse('[dsname] folder/file')
- self.assertEqual('dsname', p.datastore)
- self.assertEqual('folder/file', p.rel_path)
-
- for p in [None, '']:
- self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
-
- for p in ['bad path', '/a/b/c', 'a/b/c']:
- self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py
deleted file mode 100644
index 9f4fa91376..0000000000
--- a/nova/tests/virt/vmwareapi/test_imagecache.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import datetime
-
-import mock
-from oslo.config import cfg
-from oslo.utils import timeutils
-
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.virt.vmwareapi import fake
-from nova.virt.vmwareapi import ds_util
-from nova.virt.vmwareapi import imagecache
-from nova.virt.vmwareapi import vim_util
-from nova.virt.vmwareapi import vmops
-
-CONF = cfg.CONF
-
-
-class ImageCacheManagerTestCase(test.NoDBTestCase):
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(ImageCacheManagerTestCase, self).setUp()
- self._session = mock.Mock(name='session')
- self._imagecache = imagecache.ImageCacheManager(self._session,
- 'fake-base-folder')
- self._time = datetime.datetime(2012, 11, 22, 12, 00, 00)
- self._file_name = 'ts-2012-11-22-12-00-00'
- fake.reset()
-
- def tearDown(self):
- super(ImageCacheManagerTestCase, self).tearDown()
- fake.reset()
-
- def test_timestamp_cleanup(self):
- def fake_get_timestamp(ds_browser, ds_path):
- self.assertEqual('fake-ds-browser', ds_browser)
- self.assertEqual('[fake-ds] fake-path', str(ds_path))
- if not self.exists:
- return
- ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
- timeutils.strtime(at=self._time,
- fmt=imagecache.TIMESTAMP_FORMAT))
- return ts
-
- with contextlib.nested(
- mock.patch.object(self._imagecache, '_get_timestamp',
- fake_get_timestamp),
- mock.patch.object(ds_util, 'file_delete')
- ) as (_get_timestamp, _file_delete):
- self.exists = False
- self._imagecache.timestamp_cleanup(
- 'fake-dc-ref', 'fake-ds-browser',
- ds_util.DatastorePath('fake-ds', 'fake-path'))
- self.assertEqual(0, _file_delete.call_count)
- self.exists = True
- self._imagecache.timestamp_cleanup(
- 'fake-dc-ref', 'fake-ds-browser',
- ds_util.DatastorePath('fake-ds', 'fake-path'))
- expected_ds_path = ds_util.DatastorePath(
- 'fake-ds', 'fake-path', self._file_name)
- _file_delete.assert_called_once_with(self._session,
- expected_ds_path, 'fake-dc-ref')
-
- def test_get_timestamp(self):
- def fake_get_sub_folders(session, ds_browser, ds_path):
- self.assertEqual('fake-ds-browser', ds_browser)
- self.assertEqual('[fake-ds] fake-path', str(ds_path))
- if self.exists:
- files = set()
- files.add(self._file_name)
- return files
-
- with contextlib.nested(
- mock.patch.object(ds_util, 'get_sub_folders',
- fake_get_sub_folders)
- ):
- self.exists = True
- ts = self._imagecache._get_timestamp(
- 'fake-ds-browser',
- ds_util.DatastorePath('fake-ds', 'fake-path'))
- self.assertEqual(self._file_name, ts)
- self.exists = False
- ts = self._imagecache._get_timestamp(
- 'fake-ds-browser',
- ds_util.DatastorePath('fake-ds', 'fake-path'))
- self.assertIsNone(ts)
-
- def test_get_timestamp_filename(self):
- timeutils.set_time_override(override_time=self._time)
- fn = self._imagecache._get_timestamp_filename()
- self.assertEqual(self._file_name, fn)
-
- def test_get_datetime_from_filename(self):
- t = self._imagecache._get_datetime_from_filename(self._file_name)
- self.assertEqual(self._time, t)
-
- def test_get_ds_browser(self):
- cache = self._imagecache._ds_browser
- ds_browser = mock.Mock()
- moref = fake.ManagedObjectReference('datastore-100')
- self.assertIsNone(cache.get(moref.value))
- mock_get_method = mock.Mock(return_value=ds_browser)
- with mock.patch.object(vim_util, 'get_dynamic_property',
- mock_get_method):
- ret = self._imagecache._get_ds_browser(moref)
- mock_get_method.assert_called_once_with(mock.ANY, moref,
- 'Datastore', 'browser')
- self.assertIs(ds_browser, ret)
- self.assertIs(ds_browser, cache.get(moref.value))
-
- def test_list_base_images(self):
- def fake_get_dynamic_property(vim, mobj, type, property_name):
- return 'fake-ds-browser'
-
- def fake_get_sub_folders(session, ds_browser, ds_path):
- files = set()
- files.add('image-ref-uuid')
- return files
-
- with contextlib.nested(
- mock.patch.object(vim_util, 'get_dynamic_property',
- fake_get_dynamic_property),
- mock.patch.object(ds_util, 'get_sub_folders',
- fake_get_sub_folders)
- ) as (_get_dynamic, _get_sub_folders):
- fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
- datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref)
- ds_path = datastore.build_path('base_folder')
- images = self._imagecache._list_datastore_images(
- ds_path, datastore)
- originals = set()
- originals.add('image-ref-uuid')
- self.assertEqual({'originals': originals,
- 'unexplained_images': []},
- images)
-
- @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_folder_get')
- @mock.patch.object(imagecache.ImageCacheManager, 'timestamp_cleanup')
- @mock.patch.object(imagecache.ImageCacheManager, '_get_ds_browser')
- def test_enlist_image(self,
- mock_get_ds_browser,
- mock_timestamp_cleanup,
- mock_timestamp_folder_get):
- image_id = "fake_image_id"
- dc_ref = "fake_dc_ref"
- fake_ds_ref = mock.Mock()
- ds = ds_util.Datastore(
- ref=fake_ds_ref, name='fake_ds',
- capacity=1,
- freespace=1)
-
- ds_browser = mock.Mock()
- mock_get_ds_browser.return_value = ds_browser
- timestamp_folder_path = mock.Mock()
- mock_timestamp_folder_get.return_value = timestamp_folder_path
-
- self._imagecache.enlist_image(image_id, ds, dc_ref)
-
- cache_root_folder = ds.build_path("fake-base-folder")
- mock_get_ds_browser.assert_called_once_with(
- ds.ref)
- mock_timestamp_folder_get.assert_called_once_with(
- cache_root_folder, "fake_image_id")
- mock_timestamp_cleanup.assert_called_once_with(
- dc_ref, ds_browser, timestamp_folder_path)
-
- def test_age_cached_images(self):
- def fake_get_ds_browser(ds_ref):
- return 'fake-ds-browser'
-
- def fake_get_timestamp(ds_browser, ds_path):
- self._get_timestamp_called += 1
- path = str(ds_path)
- if path == '[fake-ds] fake-path/fake-image-1':
- # No time stamp exists
- return
- if path == '[fake-ds] fake-path/fake-image-2':
- # Timestamp that will be valid => no deletion
- return 'ts-2012-11-22-10-00-00'
- if path == '[fake-ds] fake-path/fake-image-3':
- # Timestamp that will be invalid => deletion
- return 'ts-2012-11-20-12-00-00'
- self.fail()
-
- def fake_mkdir(session, ts_path, dc_ref):
- self.assertEqual(
- '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
- str(ts_path))
-
- def fake_file_delete(session, ds_path, dc_ref):
- self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
-
- def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
- self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
-
- with contextlib.nested(
- mock.patch.object(self._imagecache, '_get_ds_browser',
- fake_get_ds_browser),
- mock.patch.object(self._imagecache, '_get_timestamp',
- fake_get_timestamp),
- mock.patch.object(ds_util, 'mkdir',
- fake_mkdir),
- mock.patch.object(ds_util, 'file_delete',
- fake_file_delete),
- mock.patch.object(self._imagecache, 'timestamp_cleanup',
- fake_timestamp_cleanup),
- ) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
- _timestamp_cleanup):
- timeutils.set_time_override(override_time=self._time)
- datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
- dc_info = vmops.DcInfo(ref='dc_ref', name='name',
- vmFolder='vmFolder')
- self._get_timestamp_called = 0
- self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
- 'fake-image-3', 'fake-image-4'])
- self._imagecache.used_images = set(['fake-image-4'])
- self._imagecache._age_cached_images(
- 'fake-context', datastore, dc_info,
- ds_util.DatastorePath('fake-ds', 'fake-path'))
- self.assertEqual(3, self._get_timestamp_called)
-
- def test_update(self):
- def fake_list_datastore_images(ds_path, datastore):
- return {'unexplained_images': [],
- 'originals': self.images}
-
- def fake_age_cached_images(context, datastore,
- dc_info, ds_path):
- self.assertEqual('[ds] fake-base-folder', str(ds_path))
- self.assertEqual(self.images,
- self._imagecache.used_images)
- self.assertEqual(self.images,
- self._imagecache.originals)
-
- with contextlib.nested(
- mock.patch.object(self._imagecache, '_list_datastore_images',
- fake_list_datastore_images),
- mock.patch.object(self._imagecache,
- '_age_cached_images',
- fake_age_cached_images)
- ) as (_list_base, _age_and_verify):
- instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'inst-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '2',
- 'host': CONF.host,
- 'name': 'inst-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''}]
- all_instances = [fake_instance.fake_instance_obj(None, **instance)
- for instance in instances]
- self.images = set(['1', '2'])
- datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
- dc_info = vmops.DcInfo(ref='dc_ref', name='name',
- vmFolder='vmFolder')
- datastores_info = [(datastore, dc_info)]
- self._imagecache.update('context', all_instances, datastores_info)
diff --git a/nova/tests/virt/vmwareapi/test_images.py b/nova/tests/virt/vmwareapi/test_images.py
deleted file mode 100644
index 613705cf78..0000000000
--- a/nova/tests/virt/vmwareapi/test_images.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# Copyright (c) 2014 VMware, Inc.
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for images.
-"""
-
-import contextlib
-
-import mock
-from oslo.utils import units
-
-from nova import exception
-from nova import test
-import nova.tests.image.fake
-from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import read_write_util
-
-
-class VMwareImagesTestCase(test.NoDBTestCase):
- """Unit tests for Vmware API connection calls."""
-
- def test_fetch_image(self):
- """Test fetching images."""
-
- dc_name = 'fake-dc'
- file_path = 'fake_file'
- ds_name = 'ds1'
- host = mock.MagicMock()
- context = mock.MagicMock()
-
- image_data = {
- 'id': nova.tests.image.fake.get_valid_image_id(),
- 'disk_format': 'vmdk',
- 'size': 512,
- }
- read_file_handle = mock.MagicMock()
- write_file_handle = mock.MagicMock()
- read_iter = mock.MagicMock()
- instance = {}
- instance['image_ref'] = image_data['id']
- instance['uuid'] = 'fake-uuid'
-
- def fake_read_handle(read_iter):
- return read_file_handle
-
- def fake_write_handle(host, dc_name, ds_name, cookies,
- file_path, file_size):
- return write_file_handle
-
- with contextlib.nested(
- mock.patch.object(read_write_util, 'GlanceFileRead',
- side_effect=fake_read_handle),
- mock.patch.object(read_write_util, 'VMwareHTTPWriteFile',
- side_effect=fake_write_handle),
- mock.patch.object(images, 'start_transfer'),
- mock.patch.object(images.IMAGE_API, 'get',
- return_value=image_data),
- mock.patch.object(images.IMAGE_API, 'download',
- return_value=read_iter),
- ) as (glance_read, http_write, start_transfer, image_show,
- image_download):
- images.fetch_image(context, instance,
- host, dc_name,
- ds_name, file_path)
-
- glance_read.assert_called_once_with(read_iter)
- http_write.assert_called_once_with(host, dc_name, ds_name, None,
- file_path, image_data['size'])
- start_transfer.assert_called_once_with(
- context, read_file_handle,
- image_data['size'],
- write_file_handle=write_file_handle)
- image_download.assert_called_once_with(context, instance['image_ref'])
- image_show.assert_called_once_with(context, instance['image_ref'])
-
- def _setup_mock_get_remote_image_service(self,
- mock_get_remote_image_service,
- metadata):
- mock_image_service = mock.MagicMock()
- mock_image_service.show.return_value = metadata
- mock_get_remote_image_service.return_value = [mock_image_service, 'i']
-
- def test_from_image_with_image_ref(self):
- raw_disk_size_in_gb = 83
- raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
- image_id = nova.tests.image.fake.get_valid_image_id()
- mdata = {'size': raw_disk_size_in_bytes,
- 'disk_format': 'vmdk',
- 'properties': {
- "vmware_ostype": constants.DEFAULT_OS_TYPE,
- "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
- "vmware_disktype": constants.DEFAULT_DISK_TYPE,
- "hw_vif_model": constants.DEFAULT_VIF_MODEL,
- images.LINKED_CLONE_PROPERTY: True}}
-
- img_props = images.VMwareImage.from_image(image_id, mdata)
-
- image_size_in_kb = raw_disk_size_in_bytes / units.Ki
-
- # assert that defaults are set and no value returned is left empty
- self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
- self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
- img_props.adapter_type)
- self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
- self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
- self.assertTrue(img_props.linked_clone)
- self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
-
- def _image_build(self, image_lc_setting, global_lc_setting,
- disk_format=constants.DEFAULT_DISK_FORMAT,
- os_type=constants.DEFAULT_OS_TYPE,
- adapter_type=constants.DEFAULT_ADAPTER_TYPE,
- disk_type=constants.DEFAULT_DISK_TYPE,
- vif_model=constants.DEFAULT_VIF_MODEL):
- self.flags(use_linked_clone=global_lc_setting, group='vmware')
- raw_disk_size_in_gb = 93
- raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
-
- image_id = nova.tests.image.fake.get_valid_image_id()
- mdata = {'size': raw_disk_size_in_btyes,
- 'disk_format': disk_format,
- 'properties': {
- "vmware_ostype": os_type,
- "vmware_adaptertype": adapter_type,
- "vmware_disktype": disk_type,
- "hw_vif_model": vif_model}}
-
- if image_lc_setting is not None:
- mdata['properties'][
- images.LINKED_CLONE_PROPERTY] = image_lc_setting
-
- return images.VMwareImage.from_image(image_id, mdata)
-
- def test_use_linked_clone_override_nf(self):
- image_props = self._image_build(None, False)
- self.assertFalse(image_props.linked_clone,
- "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_nt(self):
- image_props = self._image_build(None, True)
- self.assertTrue(image_props.linked_clone,
- "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_ny(self):
- image_props = self._image_build(None, "yes")
- self.assertTrue(image_props.linked_clone,
- "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_ft(self):
- image_props = self._image_build(False, True)
- self.assertFalse(image_props.linked_clone,
- "image level metadata failed to override global")
-
- def test_use_linked_clone_override_string_nt(self):
- image_props = self._image_build("no", True)
- self.assertFalse(image_props.linked_clone,
- "image level metadata failed to override global")
-
- def test_use_linked_clone_override_string_yf(self):
- image_props = self._image_build("yes", False)
- self.assertTrue(image_props.linked_clone,
- "image level metadata failed to override global")
-
- def test_use_disk_format_none(self):
- image = self._image_build(None, True, disk_format=None)
- self.assertIsNone(image.file_type)
- self.assertFalse(image.is_iso)
-
- def test_use_disk_format_iso(self):
- image = self._image_build(None, True, disk_format='iso')
- self.assertEqual('iso', image.file_type)
- self.assertTrue(image.is_iso)
-
- def test_use_bad_disk_format(self):
- self.assertRaises(exception.InvalidDiskFormat,
- self._image_build,
- None,
- True,
- disk_format='bad_disk_format')
-
- def test_image_no_defaults(self):
- image = self._image_build(False, False,
- disk_format='iso',
- os_type='fake-os-type',
- adapter_type='fake-adapter-type',
- disk_type='fake-disk-type',
- vif_model='fake-vif-model')
- self.assertEqual('iso', image.file_type)
- self.assertEqual('fake-os-type', image.os_type)
- self.assertEqual('fake-adapter-type', image.adapter_type)
- self.assertEqual('fake-disk-type', image.disk_type)
- self.assertEqual('fake-vif-model', image.vif_model)
- self.assertFalse(image.linked_clone)
-
- def test_image_defaults(self):
- image = images.VMwareImage(image_id='fake-image-id')
-
- # N.B. We intentially don't use the defined constants here. Amongst
- # other potential failures, we're interested in changes to their
- # values, which would not otherwise be picked up.
- self.assertEqual('otherGuest', image.os_type)
- self.assertEqual('lsiLogic', image.adapter_type)
- self.assertEqual('preallocated', image.disk_type)
- self.assertEqual('e1000', image.vif_model)
diff --git a/nova/tests/virt/vmwareapi/test_vif.py b/nova/tests/virt/vmwareapi/test_vif.py
deleted file mode 100644
index 3e446e5e7d..0000000000
--- a/nova/tests/virt/vmwareapi/test_vif.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# Copyright 2013 Canonical Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-from oslo.config import cfg
-from oslo.vmware import exceptions as vexc
-
-from nova import exception
-from nova.network import model as network_model
-from nova import test
-from nova.tests import matchers
-from nova.tests import utils
-from nova.tests.virt.vmwareapi import fake
-from nova.virt.vmwareapi import network_util
-from nova.virt.vmwareapi import vif
-from nova.virt.vmwareapi import vim_util
-from nova.virt.vmwareapi import vm_util
-
-CONF = cfg.CONF
-
-
-class VMwareVifTestCase(test.NoDBTestCase):
- def setUp(self):
- super(VMwareVifTestCase, self).setUp()
- self.flags(vlan_interface='vmnet0', group='vmware')
- network = network_model.Network(id=0,
- bridge='fa0',
- label='fake',
- vlan=3,
- bridge_interface='eth0',
- injected=True)
-
- self.vif = network_model.NetworkInfo([
- network_model.VIF(id=None,
- address='DE:AD:BE:EF:00:00',
- network=network,
- type=None,
- devname=None,
- ovs_interfaceid=None,
- rxtx_cap=3)
- ])[0]
- self.session = fake.FakeSession()
- self.cluster = None
-
- def tearDown(self):
- super(VMwareVifTestCase, self).tearDown()
-
- def test_ensure_vlan_bridge(self):
- self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
- self.mox.StubOutWithMock(network_util,
- 'get_vswitch_for_vlan_interface')
- self.mox.StubOutWithMock(network_util,
- 'check_if_vlan_interface_exists')
- self.mox.StubOutWithMock(network_util, 'create_port_group')
- network_util.get_network_with_the_name(self.session, 'fa0',
- self.cluster).AndReturn(None)
- network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
- self.cluster).AndReturn('vmnet0')
- network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
- self.cluster).AndReturn(True)
- network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
- self.cluster)
- network_util.get_network_with_the_name(self.session, 'fa0', None)
-
- self.mox.ReplayAll()
- vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
-
- # FlatDHCP network mode without vlan - network doesn't exist with the host
- def test_ensure_vlan_bridge_without_vlan(self):
- self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
- self.mox.StubOutWithMock(network_util,
- 'get_vswitch_for_vlan_interface')
- self.mox.StubOutWithMock(network_util,
- 'check_if_vlan_interface_exists')
- self.mox.StubOutWithMock(network_util, 'create_port_group')
-
- network_util.get_network_with_the_name(self.session, 'fa0',
- self.cluster).AndReturn(None)
- network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
- self.cluster).AndReturn('vmnet0')
- network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
- self.cluster).AndReturn(True)
- network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
- self.cluster)
- network_util.get_network_with_the_name(self.session, 'fa0', None)
- self.mox.ReplayAll()
- vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
-
- # FlatDHCP network mode without vlan - network exists with the host
- # Get vswitch and check vlan interface should not be called
- def test_ensure_vlan_bridge_with_network(self):
- self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
- self.mox.StubOutWithMock(network_util,
- 'get_vswitch_for_vlan_interface')
- self.mox.StubOutWithMock(network_util,
- 'check_if_vlan_interface_exists')
- self.mox.StubOutWithMock(network_util, 'create_port_group')
- vm_network = {'name': 'VM Network', 'type': 'Network'}
- network_util.get_network_with_the_name(self.session, 'fa0',
- self.cluster).AndReturn(vm_network)
- self.mox.ReplayAll()
- vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
-
- # Flat network mode with DVS
- def test_ensure_vlan_bridge_with_existing_dvs(self):
- network_ref = {'dvpg': 'dvportgroup-2062',
- 'type': 'DistributedVirtualPortgroup'}
- self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
- self.mox.StubOutWithMock(network_util,
- 'get_vswitch_for_vlan_interface')
- self.mox.StubOutWithMock(network_util,
- 'check_if_vlan_interface_exists')
- self.mox.StubOutWithMock(network_util, 'create_port_group')
-
- network_util.get_network_with_the_name(self.session, 'fa0',
- self.cluster).AndReturn(network_ref)
- self.mox.ReplayAll()
- ref = vif.ensure_vlan_bridge(self.session,
- self.vif,
- create_vlan=False)
- self.assertThat(ref, matchers.DictMatches(network_ref))
-
- def test_get_network_ref_neutron(self):
- self.mox.StubOutWithMock(vif, 'get_neutron_network')
- vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
- self.mox.ReplayAll()
- vif.get_network_ref(self.session, self.cluster, self.vif, True)
-
- def test_get_network_ref_flat_dhcp(self):
- self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
- vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
- create_vlan=False)
- self.mox.ReplayAll()
- vif.get_network_ref(self.session, self.cluster, self.vif, False)
-
- def test_get_network_ref_bridge(self):
- self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
- vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
- create_vlan=True)
- self.mox.ReplayAll()
- network = network_model.Network(id=0,
- bridge='fa0',
- label='fake',
- vlan=3,
- bridge_interface='eth0',
- injected=True,
- should_create_vlan=True)
- self.vif = network_model.NetworkInfo([
- network_model.VIF(id=None,
- address='DE:AD:BE:EF:00:00',
- network=network,
- type=None,
- devname=None,
- ovs_interfaceid=None,
- rxtx_cap=3)
- ])[0]
- vif.get_network_ref(self.session, self.cluster, self.vif, False)
-
- def test_get_network_ref_bridge_from_opaque(self):
- opaque_networks = [{'opaqueNetworkId': 'bridge_id',
- 'opaqueNetworkName': 'name',
- 'opaqueNetworkType': 'OpaqueNetwork'}]
- network_ref = vif._get_network_ref_from_opaque(opaque_networks,
- 'integration_bridge', 'bridge_id')
- self.assertEqual('bridge_id', network_ref['network-id'])
-
- def test_get_network_ref_multiple_bridges_from_opaque(self):
- opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
- 'opaqueNetworkName': 'name1',
- 'opaqueNetworkType': 'OpaqueNetwork'},
- {'opaqueNetworkId': 'bridge_id2',
- 'opaqueNetworkName': 'name2',
- 'opaqueNetworkType': 'OpaqueNetwork'}]
- network_ref = vif._get_network_ref_from_opaque(opaque_networks,
- 'integration_bridge', 'bridge_id2')
- self.assertEqual('bridge_id2', network_ref['network-id'])
-
- def test_get_network_ref_integration(self):
- opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
- 'opaqueNetworkName': 'name',
- 'opaqueNetworkType': 'OpaqueNetwork'}]
- network_ref = vif._get_network_ref_from_opaque(opaque_networks,
- 'integration_bridge', 'bridge_id')
- self.assertEqual('integration_bridge', network_ref['network-id'])
-
- def test_get_network_ref_bridge_none(self):
- opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
- 'opaqueNetworkName': 'name1',
- 'opaqueNetworkType': 'OpaqueNetwork'},
- {'opaqueNetworkId': 'bridge_id2',
- 'opaqueNetworkName': 'name2',
- 'opaqueNetworkType': 'OpaqueNetwork'}]
- network_ref = vif._get_network_ref_from_opaque(opaque_networks,
- 'integration_bridge', 'bridge_id')
- self.assertIsNone(network_ref)
-
- def test_get_network_ref_integration_multiple(self):
- opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
- 'opaqueNetworkName': 'name1',
- 'opaqueNetworkType': 'OpaqueNetwork'},
- {'opaqueNetworkId': 'integration_bridge',
- 'opaqueNetworkName': 'name2',
- 'opaqueNetworkType': 'OpaqueNetwork'}]
- network_ref = vif._get_network_ref_from_opaque(opaque_networks,
- 'integration_bridge', 'bridge_id')
- self.assertIsNone(network_ref)
-
- def test_get_neutron_network(self):
- self.mox.StubOutWithMock(vm_util, 'get_host_ref')
- self.mox.StubOutWithMock(self.session, '_call_method')
- self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
- vm_util.get_host_ref(self.session,
- self.cluster).AndReturn('fake-host')
- opaque = fake.DataObject()
- opaque.HostOpaqueNetworkInfo = ['fake-network-info']
- self.session._call_method(vim_util, "get_dynamic_property",
- 'fake-host', 'HostSystem',
- 'config.network.opaqueNetwork').AndReturn(opaque)
- vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
- CONF.vmware.integration_bridge,
- self.vif['network']['id']).AndReturn('fake-network-ref')
- self.mox.ReplayAll()
- network_ref = vif.get_neutron_network(self.session,
- self.vif['network']['id'],
- self.cluster,
- self.vif)
- self.assertEqual(network_ref, 'fake-network-ref')
-
- def test_get_neutron_network_opaque_network_not_found(self):
- self.mox.StubOutWithMock(vm_util, 'get_host_ref')
- self.mox.StubOutWithMock(self.session, '_call_method')
- self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
- vm_util.get_host_ref(self.session,
- self.cluster).AndReturn('fake-host')
- opaque = fake.DataObject()
- opaque.HostOpaqueNetworkInfo = ['fake-network-info']
- self.session._call_method(vim_util, "get_dynamic_property",
- 'fake-host', 'HostSystem',
- 'config.network.opaqueNetwork').AndReturn(opaque)
- vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
- CONF.vmware.integration_bridge,
- self.vif['network']['id']).AndReturn(None)
- self.mox.ReplayAll()
- self.assertRaises(exception.NetworkNotFoundForBridge,
- vif.get_neutron_network, self.session,
- self.vif['network']['id'], self.cluster, self.vif)
-
- def test_get_neutron_network_bridge_network_not_found(self):
- self.mox.StubOutWithMock(vm_util, 'get_host_ref')
- self.mox.StubOutWithMock(self.session, '_call_method')
- self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
- vm_util.get_host_ref(self.session,
- self.cluster).AndReturn('fake-host')
- opaque = fake.DataObject()
- opaque.HostOpaqueNetworkInfo = ['fake-network-info']
- self.session._call_method(vim_util, "get_dynamic_property",
- 'fake-host', 'HostSystem',
- 'config.network.opaqueNetwork').AndReturn(None)
- network_util.get_network_with_the_name(self.session, 0,
- self.cluster).AndReturn(None)
- self.mox.ReplayAll()
- self.assertRaises(exception.NetworkNotFoundForBridge,
- vif.get_neutron_network, self.session,
- self.vif['network']['id'], self.cluster, self.vif)
-
- def test_create_port_group_already_exists(self):
- def fake_call_method(module, method, *args, **kwargs):
- if method == 'AddPortGroup':
- raise vexc.AlreadyExistsException()
-
- with contextlib.nested(
- mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
- mock.patch.object(vm_util, 'get_host_ref'),
- mock.patch.object(self.session, '_call_method',
- fake_call_method)
- ) as (_add_vswitch, _get_host, _call_method):
- network_util.create_port_group(self.session, 'pg_name',
- 'vswitch_name', vlan_id=0,
- cluster=None)
-
- def test_create_port_group_exception(self):
- def fake_call_method(module, method, *args, **kwargs):
- if method == 'AddPortGroup':
- raise vexc.VMwareDriverException()
-
- with contextlib.nested(
- mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
- mock.patch.object(vm_util, 'get_host_ref'),
- mock.patch.object(self.session, '_call_method',
- fake_call_method)
- ) as (_add_vswitch, _get_host, _call_method):
- self.assertRaises(vexc.VMwareDriverException,
- network_util.create_port_group,
- self.session, 'pg_name',
- 'vswitch_name', vlan_id=0,
- cluster=None)
-
- def test_get_neutron_network_invalid_property(self):
- def fake_call_method(module, method, *args, **kwargs):
- if method == 'get_dynamic_property':
- raise vexc.InvalidPropertyException()
-
- with contextlib.nested(
- mock.patch.object(vm_util, 'get_host_ref'),
- mock.patch.object(self.session, '_call_method',
- fake_call_method),
- mock.patch.object(network_util, 'get_network_with_the_name')
- ) as (_get_host, _call_method, _get_name):
- vif.get_neutron_network(self.session, 'network_name',
- 'cluster', self.vif)
-
- def test_get_vif_info_none(self):
- vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
- 'is_neutron', 'fake_model', None)
- self.assertEqual([], vif_info)
-
- def test_get_vif_info_empty_list(self):
- vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
- 'is_neutron', 'fake_model', [])
- self.assertEqual([], vif_info)
-
- @mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
- def test_get_vif_info(self, mock_get_network_ref):
- network_info = utils.get_test_network_info()
- vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
- 'is_neutron', 'fake_model', network_info)
- expected = [{'iface_id': 'vif-xxx-yyy-zzz',
- 'mac_address': 'fake',
- 'network_name': 'fake',
- 'network_ref': 'fake_ref',
- 'vif_model': 'fake_model'}]
- self.assertEqual(expected, vif_info)
diff --git a/nova/tests/virt/vmwareapi/test_vim_util.py b/nova/tests/virt/vmwareapi/test_vim_util.py
deleted file mode 100644
index 0eb9a096f3..0000000000
--- a/nova/tests/virt/vmwareapi/test_vim_util.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Copyright (c) 2013 VMware, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-
-import fixtures
-import mock
-
-from nova import test
-from nova.tests.virt.vmwareapi import fake
-from nova.tests.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import vim_util
-
-
-def _fake_get_object_properties(vim, collector, mobj,
- type, properties):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.ObjectContent(None))
- return fake_objects
-
-
-def _fake_get_object_properties_missing(vim, collector, mobj,
- type, properties):
- fake_objects = fake.FakeRetrieveResult()
- ml = [fake.MissingProperty()]
- fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
- return fake_objects
-
-
-class VMwareVIMUtilTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(VMwareVIMUtilTestCase, self).setUp()
- fake.reset()
- self.vim = fake.FakeVim()
- self.vim._login()
-
- def test_get_dynamic_properties_missing(self):
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.vmwareapi.vim_util.get_object_properties',
- _fake_get_object_properties))
- res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
- 'fake-type', 'fake-property')
- self.assertIsNone(res)
-
- def test_get_dynamic_properties_missing_path_exists(self):
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.virt.vmwareapi.vim_util.get_object_properties',
- _fake_get_object_properties_missing))
- res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
- 'fake-type', 'fake-property')
- self.assertIsNone(res)
-
- def test_get_dynamic_properties_with_token(self):
- ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
- DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
-
- # Add a token to our results, indicating that more are available
- result = fake.FakeRetrieveResult(token='fake_token')
-
- # We expect these properties to be returned
- result.add_object(ObjectContent(propSet=[
- DynamicProperty(name='name1', val='value1'),
- DynamicProperty(name='name2', val='value2')
- ]))
-
- # These properties should be ignored
- result.add_object(ObjectContent(propSet=[
- DynamicProperty(name='name3', val='value3')
- ]))
-
- retrievePropertiesEx = mock.MagicMock(name='RetrievePropertiesEx')
- retrievePropertiesEx.return_value = result
-
- calls = {'RetrievePropertiesEx': retrievePropertiesEx}
- with stubs.fake_suds_context(calls):
- session = driver.VMwareAPISession(host_ip='localhost')
-
- service_content = session.vim.service_content
- props = session._call_method(vim_util, "get_dynamic_properties",
- service_content.propertyCollector,
- 'fake_type', None)
-
- self.assertEqual(props, {
- 'name1': 'value1',
- 'name2': 'value2'
- })
-
- @mock.patch.object(vim_util, 'get_object_properties', return_value=None)
- def test_get_dynamic_properties_no_objects(self, mock_get_object_props):
- res = vim_util.get_dynamic_properties('fake-vim', 'fake-obj',
- 'fake-type', 'fake-property')
- self.assertEqual({}, res)
-
- def test_get_inner_objects(self):
- property = ['summary.name']
- # Get the fake datastores directly from the cluster
- cluster_refs = fake._get_object_refs('ClusterComputeResource')
- cluster = fake._get_object(cluster_refs[0])
- expected_ds = cluster.datastore.ManagedObjectReference
- # Get the fake datastores using inner objects utility method
- result = vim_util.get_inner_objects(
- self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
- datastores = [oc.obj for oc in result.objects]
- self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py
deleted file mode 100644
index 4e4e141ca6..0000000000
--- a/nova/tests/virt/vmwareapi/test_vm_util.py
+++ /dev/null
@@ -1,1069 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-# Copyright 2013 Canonical Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import contextlib
-import re
-
-import mock
-from oslo.vmware import exceptions as vexc
-
-from nova import context
-from nova import exception
-from nova.network import model as network_model
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.virt.vmwareapi import fake
-from nova.tests.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import vm_util
-
-
-class partialObject(object):
- def __init__(self, path='fake-path'):
- self.path = path
- self.fault = fake.DataObject()
-
-
-class VMwareVMUtilTestCase(test.NoDBTestCase):
- def setUp(self):
- super(VMwareVMUtilTestCase, self).setUp()
- fake.reset()
- stubs.set_stubs(self.stubs)
- vm_util.vm_refs_cache_reset()
-
- def _test_get_stats_from_cluster(self, connection_state="connected",
- maintenance_mode=False):
- ManagedObjectRefs = [fake.ManagedObjectReference("host1",
- "HostSystem"),
- fake.ManagedObjectReference("host2",
- "HostSystem")]
- hosts = fake._convert_to_array_of_mor(ManagedObjectRefs)
- respool = fake.ManagedObjectReference("resgroup-11", "ResourcePool")
- prop_dict = {'host': hosts, 'resourcePool': respool}
-
- hardware = fake.DataObject()
- hardware.numCpuCores = 8
- hardware.numCpuThreads = 16
- hardware.vendor = "Intel"
- hardware.cpuModel = "Intel(R) Xeon(R)"
-
- runtime_host_1 = fake.DataObject()
- runtime_host_1.connectionState = "connected"
- runtime_host_1.inMaintenanceMode = False
-
- runtime_host_2 = fake.DataObject()
- runtime_host_2.connectionState = connection_state
- runtime_host_2.inMaintenanceMode = maintenance_mode
-
- prop_list_host_1 = [fake.Prop(name="hardware_summary", val=hardware),
- fake.Prop(name="runtime_summary",
- val=runtime_host_1)]
- prop_list_host_2 = [fake.Prop(name="hardware_summary", val=hardware),
- fake.Prop(name="runtime_summary",
- val=runtime_host_2)]
-
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.ObjectContent("prop_list_host1",
- prop_list_host_1))
- fake_objects.add_object(fake.ObjectContent("prop_list_host1",
- prop_list_host_2))
-
- respool_resource_usage = fake.DataObject()
- respool_resource_usage.maxUsage = 5368709120
- respool_resource_usage.overallUsage = 2147483648
-
- def fake_call_method(*args):
- if "get_dynamic_properties" in args:
- return prop_dict
- elif "get_properties_for_a_collection_of_objects" in args:
- return fake_objects
- else:
- return respool_resource_usage
-
- session = fake.FakeSession()
- with mock.patch.object(session, '_call_method', fake_call_method):
- result = vm_util.get_stats_from_cluster(session, "cluster1")
- cpu_info = {}
- mem_info = {}
- if connection_state == "connected" and not maintenance_mode:
- cpu_info['vcpus'] = 32
- cpu_info['cores'] = 16
- cpu_info['vendor'] = ["Intel", "Intel"]
- cpu_info['model'] = ["Intel(R) Xeon(R)",
- "Intel(R) Xeon(R)"]
- else:
- cpu_info['vcpus'] = 16
- cpu_info['cores'] = 8
- cpu_info['vendor'] = ["Intel"]
- cpu_info['model'] = ["Intel(R) Xeon(R)"]
- mem_info['total'] = 5120
- mem_info['free'] = 3072
- expected_stats = {'cpu': cpu_info, 'mem': mem_info}
- self.assertEqual(expected_stats, result)
-
- def test_get_stats_from_cluster_hosts_connected_and_active(self):
- self._test_get_stats_from_cluster()
-
- def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
- self._test_get_stats_from_cluster(connection_state="disconnected")
-
- def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
- self._test_get_stats_from_cluster(maintenance_mode=True)
-
- def test_get_host_ref_no_hosts_in_cluster(self):
- self.assertRaises(exception.NoValidHost,
- vm_util.get_host_ref,
- fake.FakeObjectRetrievalSession(""), 'fake_cluster')
-
- def test_get_resize_spec(self):
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
- 'vcpus': 2, 'memory_mb': 2048}
- result = vm_util.get_vm_resize_spec(fake.FakeFactory(),
- fake_instance)
- expected = """{'memoryMB': 2048,
- 'numCPUs': 2,
- 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_cdrom_attach_config_spec(self):
-
- result = vm_util.get_cdrom_attach_config_spec(fake.FakeFactory(),
- fake.Datastore(),
- "/tmp/foo.iso",
- 200, 0)
- expected = """{
- 'deviceChange': [
- {
- 'device': {
- 'connectable': {
- 'allowGuestControl': False,
- 'startConnected': True,
- 'connected': True,
- 'obj_name': 'ns0: VirtualDeviceConnectInfo'
- },
- 'backing': {
- 'datastore': {
- "summary.maintenanceMode": "normal",
- "summary.type": "VMFS",
- "summary.accessible":true,
- "summary.name": "fake-ds",
- "summary.capacity": 1099511627776,
- "summary.freeSpace": 536870912000,
- "browser": ""
- },
- 'fileName': '/tmp/foo.iso',
- 'obj_name': 'ns0: VirtualCdromIsoBackingInfo'
- },
- 'controllerKey': 200,
- 'unitNumber': 0,
- 'key': -1,
- 'obj_name': 'ns0: VirtualCdrom'
- },
- 'operation': 'add',
- 'obj_name': 'ns0: VirtualDeviceConfigSpec'
- }
- ],
- 'obj_name': 'ns0: VirtualMachineConfigSpec'
-}
-"""
-
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_lsilogic_controller_spec(self):
- # Test controller spec returned for lsiLogic sas adapter type
- config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
- adapter_type="lsiLogicsas")
- self.assertEqual("ns0:VirtualLsiLogicSASController",
- config_spec.device.obj_name)
-
- def test_paravirtual_controller_spec(self):
- # Test controller spec returned for paraVirtual adapter type
- config_spec = vm_util.create_controller_spec(fake.FakeFactory(), -101,
- adapter_type="paraVirtual")
- self.assertEqual("ns0:ParaVirtualSCSIController",
- config_spec.device.obj_name)
-
- def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
- # Test the adapter_type returned for a lsiLogic sas controller
- controller_key = 1000
- disk = fake.VirtualDisk()
- disk.controllerKey = controller_key
- disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
- disk_backing.fileName = filename
- if parent:
- disk_backing.parent = parent
- disk.backing = disk_backing
- controller = fake.VirtualLsiLogicSASController()
- controller.key = controller_key
- devices = [disk, controller]
- return devices
-
- def test_get_vmdk_path(self):
- uuid = '00000000-0000-0000-0000-000000000000'
- filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
- devices = self._vmdk_path_and_adapter_type_devices(filename)
- session = fake.FakeSession()
-
- with mock.patch.object(session, '_call_method',
- return_value=devices):
- instance = {'uuid': uuid}
- vmdk_path = vm_util.get_vmdk_path(session, None, instance)
- self.assertEqual(filename, vmdk_path)
-
- def test_get_vmdk_path_and_adapter_type(self):
- filename = '[test_datastore] test_file.vmdk'
- devices = self._vmdk_path_and_adapter_type_devices(filename)
- vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
- adapter_type = vmdk_info[1]
- self.assertEqual('lsiLogicsas', adapter_type)
- self.assertEqual(vmdk_info[0], filename)
-
- def test_get_vmdk_path_and_adapter_type_with_match(self):
- n_filename = '[test_datastore] uuid/uuid.vmdk'
- devices = self._vmdk_path_and_adapter_type_devices(n_filename)
- vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
- devices, uuid='uuid')
- adapter_type = vmdk_info[1]
- self.assertEqual('lsiLogicsas', adapter_type)
- self.assertEqual(n_filename, vmdk_info[0])
-
- def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
- n_filename = '[test_datastore] diuu/diuu.vmdk'
- devices = self._vmdk_path_and_adapter_type_devices(n_filename)
- vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
- devices, uuid='uuid')
- adapter_type = vmdk_info[1]
- self.assertEqual('lsiLogicsas', adapter_type)
- self.assertIsNone(vmdk_info[0])
-
- def test_get_vmdk_adapter_type(self):
- # Test for the adapter_type to be used in vmdk descriptor
- # Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic
- # and ParaVirtual
- vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogic")
- self.assertEqual("lsiLogic", vmdk_adapter_type)
- vmdk_adapter_type = vm_util.get_vmdk_adapter_type("lsiLogicsas")
- self.assertEqual("lsiLogic", vmdk_adapter_type)
- vmdk_adapter_type = vm_util.get_vmdk_adapter_type("paraVirtual")
- self.assertEqual("lsiLogic", vmdk_adapter_type)
- vmdk_adapter_type = vm_util.get_vmdk_adapter_type("dummyAdapter")
- self.assertEqual("dummyAdapter", vmdk_adapter_type)
-
- def test_find_allocated_slots(self):
- disk1 = fake.VirtualDisk(200, 0)
- disk2 = fake.VirtualDisk(200, 1)
- disk3 = fake.VirtualDisk(201, 1)
- ide0 = fake.VirtualIDEController(200)
- ide1 = fake.VirtualIDEController(201)
- scsi0 = fake.VirtualLsiLogicController(key=1000, scsiCtlrUnitNumber=7)
- devices = [disk1, disk2, disk3, ide0, ide1, scsi0]
- taken = vm_util._find_allocated_slots(devices)
- self.assertEqual([0, 1], sorted(taken[200]))
- self.assertEqual([1], taken[201])
- self.assertEqual([7], taken[1000])
-
- def test_allocate_controller_key_and_unit_number_ide_default(self):
- # Test that default IDE controllers are used when there is a free slot
- # on them
- disk1 = fake.VirtualDisk(200, 0)
- disk2 = fake.VirtualDisk(200, 1)
- ide0 = fake.VirtualIDEController(200)
- ide1 = fake.VirtualIDEController(201)
- devices = [disk1, disk2, ide0, ide1]
- (controller_key, unit_number,
- controller_spec) = vm_util.allocate_controller_key_and_unit_number(
- None,
- devices,
- 'ide')
- self.assertEqual(201, controller_key)
- self.assertEqual(0, unit_number)
- self.assertIsNone(controller_spec)
-
- def test_allocate_controller_key_and_unit_number_ide(self):
- # Test that a new controller is created when there is no free slot on
- # the default IDE controllers
- ide0 = fake.VirtualIDEController(200)
- ide1 = fake.VirtualIDEController(201)
- devices = [ide0, ide1]
- for controller_key in [200, 201]:
- for unit_number in [0, 1]:
- disk = fake.VirtualDisk(controller_key, unit_number)
- devices.append(disk)
- factory = fake.FakeFactory()
- (controller_key, unit_number,
- controller_spec) = vm_util.allocate_controller_key_and_unit_number(
- factory,
- devices,
- 'ide')
- self.assertEqual(-101, controller_key)
- self.assertEqual(0, unit_number)
- self.assertIsNotNone(controller_spec)
-
- def test_allocate_controller_key_and_unit_number_scsi(self):
- # Test that we allocate on existing SCSI controller if there is a free
- # slot on it
- devices = [fake.VirtualLsiLogicController(1000, scsiCtlrUnitNumber=7)]
- for unit_number in range(7):
- disk = fake.VirtualDisk(1000, unit_number)
- devices.append(disk)
- factory = fake.FakeFactory()
- (controller_key, unit_number,
- controller_spec) = vm_util.allocate_controller_key_and_unit_number(
- factory,
- devices,
- 'lsiLogic')
- self.assertEqual(1000, controller_key)
- self.assertEqual(8, unit_number)
- self.assertIsNone(controller_spec)
-
- def _test_get_vnc_config_spec(self, port):
-
- result = vm_util.get_vnc_config_spec(fake.FakeFactory(),
- port)
- return result
-
- def test_get_vnc_config_spec(self):
- result = self._test_get_vnc_config_spec(7)
- expected = """{'extraConfig': [
- {'value': 'true',
- 'key': 'RemoteDisplay.vnc.enabled',
- 'obj_name': 'ns0:OptionValue'},
- {'value': 7,
- 'key': 'RemoteDisplay.vnc.port',
- 'obj_name': 'ns0:OptionValue'}],
- 'obj_name': 'ns0:VirtualMachineConfigSpec'}"""
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def _create_fake_vms(self):
- fake_vms = fake.FakeRetrieveResult()
- OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
- for i in range(10):
- vm = fake.ManagedObject()
- opt_val = OptionValue(key='', value=5900 + i)
- vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
- fake_vms.add_object(vm)
- return fake_vms
-
- def test_get_vnc_port(self):
- fake_vms = self._create_fake_vms()
- self.flags(vnc_port=5900, group='vmware')
- self.flags(vnc_port_total=10000, group='vmware')
- actual = vm_util.get_vnc_port(
- fake.FakeObjectRetrievalSession(fake_vms))
- self.assertEqual(actual, 5910)
-
- def test_get_vnc_port_exhausted(self):
- fake_vms = self._create_fake_vms()
- self.flags(vnc_port=5900, group='vmware')
- self.flags(vnc_port_total=10, group='vmware')
- self.assertRaises(exception.ConsolePortRangeExhausted,
- vm_util.get_vnc_port,
- fake.FakeObjectRetrievalSession(fake_vms))
-
- def test_get_all_cluster_refs_by_name_none(self):
- fake_objects = fake.FakeRetrieveResult()
- refs = vm_util.get_all_cluster_refs_by_name(
- fake.FakeObjectRetrievalSession(fake_objects), ['fake_cluster'])
- self.assertEqual({}, refs)
-
- def test_get_all_cluster_refs_by_name_exists(self):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.ClusterComputeResource(name='cluster'))
- refs = vm_util.get_all_cluster_refs_by_name(
- fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
- self.assertEqual(1, len(refs))
-
- def test_get_all_cluster_refs_by_name_missing(self):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(partialObject(path='cluster'))
- refs = vm_util.get_all_cluster_refs_by_name(
- fake.FakeObjectRetrievalSession(fake_objects), ['cluster'])
- self.assertEqual({}, refs)
-
- def test_propset_dict_simple(self):
- ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
- DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
-
- object = ObjectContent(propSet=[
- DynamicProperty(name='foo', val="bar")])
- propdict = vm_util.propset_dict(object.propSet)
- self.assertEqual("bar", propdict['foo'])
-
- def test_propset_dict_complex(self):
- ObjectContent = collections.namedtuple('ObjectContent', ['propSet'])
- DynamicProperty = collections.namedtuple('Property', ['name', 'val'])
- MoRef = collections.namedtuple('Val', ['value'])
-
- object = ObjectContent(propSet=[
- DynamicProperty(name='foo', val="bar"),
- DynamicProperty(name='some.thing',
- val=MoRef(value='else')),
- DynamicProperty(name='another.thing', val='value')])
-
- propdict = vm_util.propset_dict(object.propSet)
- self.assertEqual("bar", propdict['foo'])
- self.assertTrue(hasattr(propdict['some.thing'], 'value'))
- self.assertEqual("else", propdict['some.thing'].value)
- self.assertEqual("value", propdict['another.thing'])
-
- def _test_detach_virtual_disk_spec(self, destroy_disk=False):
- virtual_device_config = vm_util.detach_virtual_disk_spec(
- fake.FakeFactory(),
- 'fake_device',
- destroy_disk)
- self.assertEqual('remove', virtual_device_config.operation)
- self.assertEqual('fake_device', virtual_device_config.device)
- self.assertEqual('ns0:VirtualDeviceConfigSpec',
- virtual_device_config.obj_name)
- if destroy_disk:
- self.assertEqual('destroy', virtual_device_config.fileOperation)
- else:
- self.assertFalse(hasattr(virtual_device_config, 'fileOperation'))
-
- def test_detach_virtual_disk_spec(self):
- self._test_detach_virtual_disk_spec(destroy_disk=False)
-
- def test_detach_virtual_disk_destroy_spec(self):
- self._test_detach_virtual_disk_spec(destroy_disk=True)
-
- def test_get_vm_create_spec(self):
- instance_uuid = uuidutils.generate_uuid()
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': instance_uuid,
- 'vcpus': 2, 'memory_mb': 2048}
- result = vm_util.get_vm_create_spec(fake.FakeFactory(),
- fake_instance, instance_uuid,
- 'fake-datastore', [])
- expected = """{
- 'files': {'vmPathName': '[fake-datastore]',
- 'obj_name': 'ns0:VirtualMachineFileInfo'},
- 'instanceUuid': '%(instance_uuid)s',
- 'name': '%(instance_uuid)s', 'deviceChange': [],
- 'extraConfig': [{'value': '%(instance_uuid)s',
- 'key': 'nvp.vm-uuid',
- 'obj_name': 'ns0:OptionValue'}],
- 'memoryMB': 2048,
- 'managedBy': {'extensionKey': 'org.openstack.compute',
- 'type': 'instance',
- 'obj_name': 'ns0:ManagedByInfo'},
- 'obj_name': 'ns0:VirtualMachineConfigSpec',
- 'guestId': 'otherGuest',
- 'tools': {'beforeGuestStandby': True,
- 'beforeGuestReboot': True,
- 'beforeGuestShutdown': True,
- 'afterResume': True,
- 'afterPowerOn': True,
- 'obj_name': 'ns0:ToolsConfigInfo'},
- 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_vm_create_spec_with_allocations(self):
- instance_uuid = uuidutils.generate_uuid()
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': instance_uuid,
- 'vcpus': 2, 'memory_mb': 2048}
- result = vm_util.get_vm_create_spec(fake.FakeFactory(),
- fake_instance, instance_uuid,
- 'fake-datastore', [],
- allocations={'cpu_limit': 7,
- 'cpu_reservation': 6})
- expected = """{
- 'files': {'vmPathName': '[fake-datastore]',
- 'obj_name': 'ns0:VirtualMachineFileInfo'},
- 'instanceUuid': '%(instance_uuid)s',
- 'name': '%(instance_uuid)s', 'deviceChange': [],
- 'extraConfig': [{'value': '%(instance_uuid)s',
- 'key': 'nvp.vm-uuid',
- 'obj_name': 'ns0:OptionValue'}],
- 'memoryMB': 2048,
- 'managedBy': {'extensionKey': 'org.openstack.compute',
- 'type': 'instance',
- 'obj_name': 'ns0:ManagedByInfo'},
- 'obj_name': 'ns0:VirtualMachineConfigSpec',
- 'guestId': 'otherGuest',
- 'tools': {'beforeGuestStandby': True,
- 'beforeGuestReboot': True,
- 'beforeGuestShutdown': True,
- 'afterResume': True,
- 'afterPowerOn': True,
- 'obj_name': 'ns0:ToolsConfigInfo'},
- 'cpuAllocation': {'reservation': 6,
- 'limit': 7,
- 'obj_name': 'ns0:ResourceAllocationInfo'},
- 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_vm_create_spec_with_limit(self):
- instance_uuid = uuidutils.generate_uuid()
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': instance_uuid,
- 'vcpus': 2, 'memory_mb': 2048}
- result = vm_util.get_vm_create_spec(fake.FakeFactory(),
- fake_instance, instance_uuid,
- 'fake-datastore', [],
- allocations={'cpu_limit': 7})
- expected = """{
- 'files': {'vmPathName': '[fake-datastore]',
- 'obj_name': 'ns0:VirtualMachineFileInfo'},
- 'instanceUuid': '%(instance_uuid)s',
- 'name': '%(instance_uuid)s', 'deviceChange': [],
- 'extraConfig': [{'value': '%(instance_uuid)s',
- 'key': 'nvp.vm-uuid',
- 'obj_name': 'ns0:OptionValue'}],
- 'memoryMB': 2048,
- 'managedBy': {'extensionKey': 'org.openstack.compute',
- 'type': 'instance',
- 'obj_name': 'ns0:ManagedByInfo'},
- 'obj_name': 'ns0:VirtualMachineConfigSpec',
- 'guestId': 'otherGuest',
- 'tools': {'beforeGuestStandby': True,
- 'beforeGuestReboot': True,
- 'beforeGuestShutdown': True,
- 'afterResume': True,
- 'afterPowerOn': True,
- 'obj_name': 'ns0:ToolsConfigInfo'},
- 'cpuAllocation': {'limit': 7,
- 'obj_name': 'ns0:ResourceAllocationInfo'},
- 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_vm_create_spec_with_share(self):
- instance_uuid = uuidutils.generate_uuid()
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': instance_uuid,
- 'vcpus': 2, 'memory_mb': 2048}
- shares = {'cpu_shares_level': 'high'}
- result = vm_util.get_vm_create_spec(fake.FakeFactory(),
- fake_instance, instance_uuid,
- 'fake-datastore', [],
- allocations=shares)
- expected = """{
- 'files': {'vmPathName': '[fake-datastore]',
- 'obj_name': 'ns0:VirtualMachineFileInfo'},
- 'instanceUuid': '%(instance_uuid)s',
- 'name': '%(instance_uuid)s', 'deviceChange': [],
- 'extraConfig': [{'value': '%(instance_uuid)s',
- 'key': 'nvp.vm-uuid',
- 'obj_name': 'ns0:OptionValue'}],
- 'memoryMB': 2048,
- 'managedBy': {'extensionKey': 'org.openstack.compute',
- 'type': 'instance',
- 'obj_name': 'ns0:ManagedByInfo'},
- 'obj_name': 'ns0:VirtualMachineConfigSpec',
- 'guestId': 'otherGuest',
- 'tools': {'beforeGuestStandby': True,
- 'beforeGuestReboot': True,
- 'beforeGuestShutdown': True,
- 'afterResume': True,
- 'afterPowerOn': True,
- 'obj_name': 'ns0:ToolsConfigInfo'},
- 'cpuAllocation': {'shares': {'level': 'high',
- 'shares': 0,
- 'obj_name':'ns0:SharesInfo'},
- 'obj_name':'ns0:ResourceAllocationInfo'},
- 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_vm_create_spec_with_share_custom(self):
- instance_uuid = uuidutils.generate_uuid()
- fake_instance = {'id': 7, 'name': 'fake!',
- 'uuid': instance_uuid,
- 'vcpus': 2, 'memory_mb': 2048}
- shares = {'cpu_shares_level': 'custom',
- 'cpu_shares_share': 1948}
- result = vm_util.get_vm_create_spec(fake.FakeFactory(),
- fake_instance, instance_uuid,
- 'fake-datastore', [],
- allocations=shares)
- expected = """{
- 'files': {'vmPathName': '[fake-datastore]',
- 'obj_name': 'ns0:VirtualMachineFileInfo'},
- 'instanceUuid': '%(instance_uuid)s',
- 'name': '%(instance_uuid)s', 'deviceChange': [],
- 'extraConfig': [{'value': '%(instance_uuid)s',
- 'key': 'nvp.vm-uuid',
- 'obj_name': 'ns0:OptionValue'}],
- 'memoryMB': 2048,
- 'managedBy': {'extensionKey': 'org.openstack.compute',
- 'type': 'instance',
- 'obj_name': 'ns0:ManagedByInfo'},
- 'obj_name': 'ns0:VirtualMachineConfigSpec',
- 'guestId': 'otherGuest',
- 'tools': {'beforeGuestStandby': True,
- 'beforeGuestReboot': True,
- 'beforeGuestShutdown': True,
- 'afterResume': True,
- 'afterPowerOn': True,
- 'obj_name': 'ns0:ToolsConfigInfo'},
- 'cpuAllocation': {'shares': {'level': 'custom',
- 'shares': 1948,
- 'obj_name':'ns0:SharesInfo'},
- 'obj_name':'ns0:ResourceAllocationInfo'},
- 'numCPUs': 2}""" % {'instance_uuid': instance_uuid}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_create_vm(self):
-
- method_list = ['CreateVM_Task', 'get_dynamic_property']
-
- def fake_call_method(module, method, *args, **kwargs):
- expected_method = method_list.pop(0)
- self.assertEqual(expected_method, method)
- if (expected_method == 'CreateVM_Task'):
- return 'fake_create_vm_task'
- elif (expected_method == 'get_dynamic_property'):
- task_info = mock.Mock(state="success", result="fake_vm_ref")
- return task_info
- else:
- self.fail('Should not get here....')
-
- def fake_wait_for_task(self, *args):
- task_info = mock.Mock(state="success", result="fake_vm_ref")
- return task_info
-
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- fake_call_mock = mock.Mock(side_effect=fake_call_method)
- fake_wait_mock = mock.Mock(side_effect=fake_wait_for_task)
- with contextlib.nested(
- mock.patch.object(session, '_wait_for_task',
- fake_wait_mock),
- mock.patch.object(session, '_call_method',
- fake_call_mock)
- ) as (wait_for_task, call_method):
- vm_ref = vm_util.create_vm(
- session,
- fake_instance,
- 'fake_vm_folder',
- 'fake_config_spec',
- 'fake_res_pool_ref')
- self.assertEqual('fake_vm_ref', vm_ref)
-
- call_method.assert_called_once_with(mock.ANY, 'CreateVM_Task',
- 'fake_vm_folder', config='fake_config_spec',
- pool='fake_res_pool_ref')
- wait_for_task.assert_called_once_with('fake_create_vm_task')
-
- @mock.patch.object(vm_util.LOG, 'warning')
- def test_create_vm_invalid_guestid(self, mock_log_warn):
- """Ensure we warn when create_vm() fails after we passed an
- unrecognised guestId
- """
-
- found = [False]
-
- def fake_log_warn(msg, values):
- if not isinstance(values, dict):
- return
- if values.get('ostype') == 'invalid_os_type':
- found[0] = True
- mock_log_warn.side_effect = fake_log_warn
-
- instance_values = {'id': 7, 'name': 'fake-name',
- 'uuid': uuidutils.generate_uuid(),
- 'vcpus': 2, 'memory_mb': 2048}
- instance = fake_instance.fake_instance_obj(
- context.RequestContext('fake', 'fake', is_admin=False),
- **instance_values)
-
- session = driver.VMwareAPISession()
-
- config_spec = vm_util.get_vm_create_spec(
- session.vim.client.factory,
- instance, instance.name, 'fake-datastore', [],
- os_type='invalid_os_type')
-
- self.assertRaises(vexc.VMwareDriverException,
- vm_util.create_vm, session, instance, 'folder',
- config_spec, 'res-pool')
- self.assertTrue(found[0])
-
- def test_convert_vif_model(self):
- expected = "VirtualE1000"
- result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
- self.assertEqual(expected, result)
- expected = "VirtualE1000e"
- result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
- self.assertEqual(expected, result)
- types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
- "VirtualVmxnet"]
- for type in types:
- self.assertEqual(type,
- vm_util.convert_vif_model(type))
- self.assertRaises(exception.Invalid,
- vm_util.convert_vif_model,
- "InvalidVifModel")
-
- def test_power_on_instance_with_vm_ref(self):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(session, "_wait_for_task"),
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.power_on_instance(session, fake_instance,
- vm_ref='fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOnVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def test_power_on_instance_without_vm_ref(self):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(vm_util, "get_vm_ref",
- return_value='fake-vm-ref'),
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(session, "_wait_for_task"),
- ) as (fake_get_vm_ref, fake_call_method, fake_wait_for_task):
- vm_util.power_on_instance(session, fake_instance)
- fake_get_vm_ref.assert_called_once_with(session, fake_instance)
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOnVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def test_power_on_instance_with_exception(self):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(session, "_wait_for_task",
- side_effect=exception.NovaException('fake')),
- ) as (fake_call_method, fake_wait_for_task):
- self.assertRaises(exception.NovaException,
- vm_util.power_on_instance,
- session, fake_instance,
- vm_ref='fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOnVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def test_power_on_instance_with_power_state_exception(self):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(
- session, "_wait_for_task",
- side_effect=vexc.InvalidPowerStateException),
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.power_on_instance(session, fake_instance,
- vm_ref='fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOnVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def test_create_virtual_disk(self):
- session = fake.FakeSession()
- dm = session.vim.service_content.virtualDiskManager
- with contextlib.nested(
- mock.patch.object(vm_util, "get_vmdk_create_spec",
- return_value='fake-spec'),
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(session, "_wait_for_task"),
- ) as (fake_get_spec, fake_call_method, fake_wait_for_task):
- vm_util.create_virtual_disk(session, 'fake-dc-ref',
- 'fake-adapter-type', 'fake-disk-type',
- 'fake-path', 7)
- fake_get_spec.assert_called_once_with(
- session.vim.client.factory, 7,
- 'fake-adapter-type',
- 'fake-disk-type')
- fake_call_method.assert_called_once_with(
- session.vim,
- "CreateVirtualDisk_Task",
- dm,
- name='fake-path',
- datacenter='fake-dc-ref',
- spec='fake-spec')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def test_copy_virtual_disk(self):
- session = fake.FakeSession()
- dm = session.vim.service_content.virtualDiskManager
- with contextlib.nested(
- mock.patch.object(session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(session, "_wait_for_task"),
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.copy_virtual_disk(session, 'fake-dc-ref',
- 'fake-source', 'fake-dest')
- fake_call_method.assert_called_once_with(
- session.vim,
- "CopyVirtualDisk_Task",
- dm,
- sourceName='fake-source',
- sourceDatacenter='fake-dc-ref',
- destName='fake-dest')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- def _create_fake_vm_objects(self):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.VirtualMachine())
- return fake_objects
-
- def test_get_values(self):
- objects = self._create_fake_vm_objects()
- query = vm_util.get_values_from_object_properties(
- fake.FakeObjectRetrievalSession(objects), objects)
- self.assertEqual('poweredOn', query['runtime.powerState'])
- self.assertEqual('guestToolsRunning',
- query['summary.guest.toolsRunningStatus'])
- self.assertEqual('toolsOk', query['summary.guest.toolsStatus'])
-
- def test_reconfigure_vm(self):
- session = fake.FakeSession()
- with contextlib.nested(
- mock.patch.object(session, '_call_method',
- return_value='fake_reconfigure_task'),
- mock.patch.object(session, '_wait_for_task')
- ) as (_call_method, _wait_for_task):
- vm_util.reconfigure_vm(session, 'fake-ref', 'fake-spec')
- _call_method.assert_called_once_with(mock.ANY,
- 'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
- _wait_for_task.assert_called_once_with(
- 'fake_reconfigure_task')
-
- def test_get_network_attach_config_spec_opaque(self):
- vif_info = {'network_name': 'br-int',
- 'mac_address': '00:00:00:ca:fe:01',
- 'network_ref': {'type': 'OpaqueNetwork',
- 'network-id': 'fake-network-id',
- 'network-type': 'opaque'},
- 'iface_id': 7,
- 'vif_model': 'VirtualE1000'}
- result = vm_util.get_network_attach_config_spec(
- fake.FakeFactory(), vif_info, 1)
- card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
- expected = """{
- 'extraConfig': [{'value': 7,
- 'key': 'nvp.iface-id.1',
- 'obj_name':'ns0:OptionValue'}],
- 'deviceChange': [
- {'device': {
- 'macAddress':'00:00:00:ca:fe:01',
- 'addressType': 'manual',
- 'connectable': {
- 'allowGuestControl':True,
- 'startConnected': True,
- 'connected': True,
- 'obj_name':'ns0:VirtualDeviceConnectInfo'},
- 'backing': {
- 'opaqueNetworkType': 'opaque',
- 'opaqueNetworkId': 'fake-network-id',
- 'obj_name': '%(card)s'},
- 'key': -47,
- 'obj_name': 'ns0:VirtualE1000',
- 'wakeOnLanEnabled': True},
- 'operation': 'add',
- 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
- 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_network_attach_config_spec_dvs(self):
- vif_info = {'network_name': 'br100',
- 'mac_address': '00:00:00:ca:fe:01',
- 'network_ref': {'type': 'DistributedVirtualPortgroup',
- 'dvsw': 'fake-network-id',
- 'dvpg': 'fake-group'},
- 'iface_id': 7,
- 'vif_model': 'VirtualE1000'}
- result = vm_util.get_network_attach_config_spec(
- fake.FakeFactory(), vif_info, 1)
- port = 'ns0:DistributedVirtualSwitchPortConnection'
- backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
- expected = """{
- 'extraConfig': [{'value': 7,
- 'key': 'nvp.iface-id.1',
- 'obj_name': 'ns0:OptionValue'}],
- 'deviceChange': [
- {'device': {'macAddress': '00:00:00:ca:fe:01',
- 'addressType': 'manual',
- 'connectable': {
- 'allowGuestControl': True,
- 'startConnected': True,
- 'connected': True,
- 'obj_name': 'ns0:VirtualDeviceConnectInfo'},
- 'backing': {
- 'port': {
- 'portgroupKey': 'fake-group',
- 'switchUuid': 'fake-network-id',
- 'obj_name': '%(obj_name_port)s'},
- 'obj_name': '%(obj_name_backing)s'},
- 'key': -47,
- 'obj_name': 'ns0:VirtualE1000',
- 'wakeOnLanEnabled': True},
- 'operation': 'add',
- 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
- 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
- 'obj_name_backing': backing,
- 'obj_name_port': port}
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- def test_get_network_detach_config_spec(self):
- result = vm_util.get_network_detach_config_spec(
- fake.FakeFactory(), 'fake-device', 2)
- expected = """{
- 'extraConfig': [{'value': 'free',
- 'key': 'nvp.iface-id.2',
- 'obj_name': 'ns0:OptionValue'}],
- 'deviceChange': [{'device': 'fake-device',
- 'operation': 'remove',
- 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
- 'obj_name':'ns0:VirtualMachineConfigSpec'}"""
- expected = re.sub(r'\s+', '', expected)
- result = re.sub(r'\s+', '', repr(result))
- self.assertEqual(expected, result)
-
- @mock.patch.object(vm_util, "get_vm_ref")
- def test_power_off_instance(self, fake_get_ref):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, '_call_method',
- return_value='fake-task'),
- mock.patch.object(session, '_wait_for_task')
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOffVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
- self.assertFalse(fake_get_ref.called)
-
- @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
- def test_power_off_instance_no_vm_ref(self, fake_get_ref):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, '_call_method',
- return_value='fake-task'),
- mock.patch.object(session, '_wait_for_task')
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.power_off_instance(session, fake_instance)
- fake_get_ref.assert_called_once_with(session, fake_instance)
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOffVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
-
- @mock.patch.object(vm_util, "get_vm_ref")
- def test_power_off_instance_with_exception(self, fake_get_ref):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, '_call_method',
- return_value='fake-task'),
- mock.patch.object(session, '_wait_for_task',
- side_effect=exception.NovaException('fake'))
- ) as (fake_call_method, fake_wait_for_task):
- self.assertRaises(exception.NovaException,
- vm_util.power_off_instance,
- session, fake_instance, 'fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOffVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
- self.assertFalse(fake_get_ref.called)
-
- @mock.patch.object(vm_util, "get_vm_ref")
- def test_power_off_instance_power_state_exception(self, fake_get_ref):
- session = fake.FakeSession()
- fake_instance = mock.MagicMock()
- with contextlib.nested(
- mock.patch.object(session, '_call_method',
- return_value='fake-task'),
- mock.patch.object(
- session, '_wait_for_task',
- side_effect=vexc.InvalidPowerStateException)
- ) as (fake_call_method, fake_wait_for_task):
- vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
- fake_call_method.assert_called_once_with(session.vim,
- "PowerOffVM_Task",
- 'fake-vm-ref')
- fake_wait_for_task.assert_called_once_with('fake-task')
- self.assertFalse(fake_get_ref.called)
-
-
-@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
-class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
- # N.B. Mocking on the class only mocks test_*(), but we need
- # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
- # setUp causes object initialisation to fail. Not mocking in tests results
- # in vim calls not using FakeVim.
- @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
- def setUp(self):
- super(VMwareVMUtilGetHostRefTestCase, self).setUp()
- fake.reset()
- vm_util.vm_refs_cache_reset()
-
- self.session = driver.VMwareAPISession()
-
- # Create a fake VirtualMachine running on a known host
- self.host_ref = fake._db_content['HostSystem'].keys()[0]
- self.vm_ref = fake.create_vm(host_ref=self.host_ref)
-
- @mock.patch.object(vm_util, 'get_vm_ref')
- def test_get_host_ref_for_vm(self, mock_get_vm_ref):
- mock_get_vm_ref.return_value = self.vm_ref
-
- ret = vm_util.get_host_ref_for_vm(self.session, 'fake-instance')
-
- mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
- self.assertEqual(self.host_ref, ret)
-
- @mock.patch.object(vm_util, 'get_vm_ref')
- def test_get_host_name_for_vm(self, mock_get_vm_ref):
- mock_get_vm_ref.return_value = self.vm_ref
-
- host = fake._get_object(self.host_ref)
-
- ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
-
- mock_get_vm_ref.assert_called_once_with(self.session, 'fake-instance')
- self.assertEqual(host.name, ret)
diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py
deleted file mode 100644
index dde695455c..0000000000
--- a/nova/tests/virt/vmwareapi/test_vmops.py
+++ /dev/null
@@ -1,1293 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import contextlib
-
-import mock
-from oslo.utils import units
-from oslo.vmware import exceptions as vexc
-
-from nova.compute import power_state
-from nova import context
-from nova import db
-from nova import exception
-from nova.network import model as network_model
-from nova import objects
-from nova.openstack.common import uuidutils
-from nova import test
-from nova.tests import fake_instance
-import nova.tests.image.fake
-from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
-from nova.tests.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import ds_util
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import vim_util
-from nova.virt.vmwareapi import vm_util
-from nova.virt.vmwareapi import vmops
-
-
-class DsPathMatcher:
- def __init__(self, expected_ds_path_str):
- self.expected_ds_path_str = expected_ds_path_str
-
- def __eq__(self, ds_path_param):
- return str(ds_path_param) == self.expected_ds_path_str
-
-
-class VMwareVMOpsTestCase(test.NoDBTestCase):
- def setUp(self):
- super(VMwareVMOpsTestCase, self).setUp()
- vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
- self.flags(image_cache_subdirectory_name='vmware_base',
- my_ip='',
- flat_injected=True,
- vnc_enabled=True)
- self._context = context.RequestContext('fake_user', 'fake_project')
- self._session = driver.VMwareAPISession()
-
- self._virtapi = mock.Mock()
- self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None)
-
- self._image_id = nova.tests.image.fake.get_valid_image_id()
- self._instance_values = {
- 'name': 'fake_name',
- 'uuid': 'fake_uuid',
- 'vcpus': 1,
- 'memory_mb': 512,
- 'image_ref': self._image_id,
- 'root_gb': 10,
- 'node': 'respool-1001(MyResPoolName)',
- 'expected_attrs': ['system_metadata'],
- }
- self._instance = fake_instance.fake_instance_obj(
- self._context, **self._instance_values)
-
- fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
- self._ds = ds_util.Datastore(
- ref=fake_ds_ref, name='fake_ds',
- capacity=10 * units.Gi,
- freespace=10 * units.Gi)
- self._dc_info = vmops.DcInfo(
- ref='fake_dc_ref', name='fake_dc',
- vmFolder='fake_vm_folder')
-
- subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
- dns=[network_model.IP('192.168.0.1')],
- gateway=
- network_model.IP('192.168.0.1'),
- ips=[
- network_model.IP('192.168.0.100')],
- routes=None)
- subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
- dns=None,
- gateway=
- network_model.IP('dead:beef::1'),
- ips=[network_model.IP(
- 'dead:beef::dcad:beff:feef:0')],
- routes=None)
- network = network_model.Network(id=0,
- bridge='fa0',
- label='fake',
- subnets=[subnet_4, subnet_6],
- vlan=None,
- bridge_interface=None,
- injected=True)
- self._network_values = {
- 'id': None,
- 'address': 'DE:AD:BE:EF:00:00',
- 'network': network,
- 'type': None,
- 'devname': None,
- 'ovs_interfaceid': None,
- 'rxtx_cap': 3
- }
- self.network_info = network_model.NetworkInfo([
- network_model.VIF(**self._network_values)
- ])
- pure_IPv6_network = network_model.Network(id=0,
- bridge='fa0',
- label='fake',
- subnets=[subnet_6],
- vlan=None,
- bridge_interface=None,
- injected=True)
- self.pure_IPv6_network_info = network_model.NetworkInfo([
- network_model.VIF(id=None,
- address='DE:AD:BE:EF:00:00',
- network=pure_IPv6_network,
- type=None,
- devname=None,
- ovs_interfaceid=None,
- rxtx_cap=3)
- ])
-
- def test_get_machine_id_str(self):
- result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
- self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
- '192.168.0.1;192.168.0.255;192.168.0.1#', result)
- result = vmops.VMwareVMOps._get_machine_id_str(
- self.pure_IPv6_network_info)
- self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
-
- def _setup_create_folder_mocks(self):
- ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
- base_name = 'folder'
- ds_name = "datastore"
- ds_ref = mock.Mock()
- ds_ref.value = 1
- dc_ref = mock.Mock()
- ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
- ref=dc_ref,
- name='fake-name',
- vmFolder='fake-folder')
- path = ds_util.DatastorePath(ds_name, base_name)
- return ds_name, ds_ref, ops, path, dc_ref
-
- @mock.patch.object(ds_util, 'mkdir')
- def test_create_folder_if_missing(self, mock_mkdir):
- ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
- ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
- mock_mkdir.assert_called_with(ops._session, path, dc)
-
- @mock.patch.object(ds_util, 'mkdir')
- def test_create_folder_if_missing_exception(self, mock_mkdir):
- ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
- ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
- ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
- mock_mkdir.assert_called_with(ops._session, path, dc)
-
- @mock.patch.object(ds_util, 'file_exists', return_value=True)
- def test_check_if_folder_file_exists_with_existing(self,
- mock_exists):
- ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
- ops._create_folder_if_missing = mock.Mock()
- mock_ds_ref = mock.Mock()
- ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
- "folder", "some_file")
- ops._create_folder_if_missing.assert_called_once_with('datastore',
- mock_ds_ref,
- 'vmware_base')
-
- @mock.patch.object(ds_util, 'file_exists', return_value=False)
- def test_check_if_folder_file_exists_no_existing(self, mock_exists):
- ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
- ops._create_folder_if_missing = mock.Mock()
- mock_ds_ref = mock.Mock()
- ops._check_if_folder_file_exists(mock.Mock(), mock_ds_ref, "datastore",
- "folder", "some_file")
- ops._create_folder_if_missing.assert_called_once_with('datastore',
- mock_ds_ref,
- 'vmware_base')
-
- def test_get_valid_vms_from_retrieve_result(self):
- ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
- fake_objects = vmwareapi_fake.FakeRetrieveResult()
- fake_objects.add_object(vmwareapi_fake.VirtualMachine())
- fake_objects.add_object(vmwareapi_fake.VirtualMachine())
- fake_objects.add_object(vmwareapi_fake.VirtualMachine())
- vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
- self.assertEqual(3, len(vms))
-
- def test_get_valid_vms_from_retrieve_result_with_invalid(self):
- ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
- fake_objects = vmwareapi_fake.FakeRetrieveResult()
- fake_objects.add_object(vmwareapi_fake.VirtualMachine())
- invalid_vm1 = vmwareapi_fake.VirtualMachine()
- invalid_vm1.set('runtime.connectionState', 'orphaned')
- invalid_vm2 = vmwareapi_fake.VirtualMachine()
- invalid_vm2.set('runtime.connectionState', 'inaccessible')
- fake_objects.add_object(invalid_vm1)
- fake_objects.add_object(invalid_vm2)
- vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
- self.assertEqual(1, len(vms))
-
- def test_delete_vm_snapshot(self):
- def fake_call_method(module, method, *args, **kwargs):
- self.assertEqual('RemoveSnapshot_Task', method)
- self.assertEqual('fake_vm_snapshot', args[0])
- self.assertFalse(kwargs['removeChildren'])
- self.assertTrue(kwargs['consolidate'])
- return 'fake_remove_snapshot_task'
-
- with contextlib.nested(
- mock.patch.object(self._session, '_wait_for_task'),
- mock.patch.object(self._session, '_call_method', fake_call_method)
- ) as (_wait_for_task, _call_method):
- self._vmops._delete_vm_snapshot(self._instance,
- "fake_vm_ref", "fake_vm_snapshot")
- _wait_for_task.assert_has_calls([
- mock.call('fake_remove_snapshot_task')])
-
- def test_create_vm_snapshot(self):
-
- method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
-
- def fake_call_method(module, method, *args, **kwargs):
- expected_method = method_list.pop(0)
- self.assertEqual(expected_method, method)
- if (expected_method == 'CreateSnapshot_Task'):
- self.assertEqual('fake_vm_ref', args[0])
- self.assertFalse(kwargs['memory'])
- self.assertTrue(kwargs['quiesce'])
- return 'fake_snapshot_task'
- elif (expected_method == 'get_dynamic_property'):
- task_info = mock.Mock()
- task_info.result = "fake_snapshot_ref"
- self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
- return task_info
-
- with contextlib.nested(
- mock.patch.object(self._session, '_wait_for_task'),
- mock.patch.object(self._session, '_call_method', fake_call_method)
- ) as (_wait_for_task, _call_method):
- snap = self._vmops._create_vm_snapshot(self._instance,
- "fake_vm_ref")
- self.assertEqual("fake_snapshot_ref", snap)
- _wait_for_task.assert_has_calls([
- mock.call('fake_snapshot_task')])
-
- def test_update_instance_progress(self):
- instance = objects.Instance(context=mock.MagicMock(), uuid='fake-uuid')
- with mock.patch.object(instance, 'save') as mock_save:
- self._vmops._update_instance_progress(instance._context,
- instance, 5, 10)
- mock_save.assert_called_once_with()
- self.assertEqual(50, instance.progress)
-
- @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
- @mock.patch.object(driver.VMwareAPISession, '_call_method')
- def test_get_info(self, mock_call, mock_get_vm_ref):
- props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
- 'runtime.powerState']
- prop_cpu = vmwareapi_fake.Prop(props[0], 4)
- prop_mem = vmwareapi_fake.Prop(props[1], 128)
- prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
- prop_list = [prop_state, prop_mem, prop_cpu]
- obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
- result = vmwareapi_fake.FakeRetrieveResult()
- result.add_object(obj_content)
- mock_call.return_value = result
- info = self._vmops.get_info(self._instance)
- mock_call.assert_called_once_with(vim_util,
- 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
- props)
- mock_get_vm_ref.assert_called_once_with(self._session,
- self._instance)
- self.assertEqual(power_state.RUNNING, info['state'])
- self.assertEqual(128 * 1024, info['max_mem'])
- self.assertEqual(128 * 1024, info['mem'])
- self.assertEqual(4, info['num_cpu'])
- self.assertEqual(0, info['cpu_time'])
-
- @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
- @mock.patch.object(driver.VMwareAPISession, '_call_method')
- def test_get_info_when_ds_unavailable(self, mock_call, mock_get_vm_ref):
- props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
- 'runtime.powerState']
- prop_state = vmwareapi_fake.Prop(props[2], 'poweredOff')
- # when vm's ds not available, only power state can be received
- prop_list = [prop_state]
- obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
- result = vmwareapi_fake.FakeRetrieveResult()
- result.add_object(obj_content)
- mock_call.return_value = result
- info = self._vmops.get_info(self._instance)
- mock_call.assert_called_once_with(vim_util,
- 'get_object_properties', None, 'fake_ref', 'VirtualMachine',
- props)
- mock_get_vm_ref.assert_called_once_with(self._session,
- self._instance)
- self.assertEqual(power_state.SHUTDOWN, info['state'])
- self.assertEqual(0, info['max_mem'])
- self.assertEqual(0, info['mem'])
- self.assertEqual(0, info['num_cpu'])
- self.assertEqual(0, info['cpu_time'])
-
- def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
- instance_ds_ref = mock.Mock()
- instance_ds_ref.value = "ds-1"
- _vcvmops = vmops.VMwareVMOps(self._session, None, None)
- if ds_ref_exists:
- ds_ref = mock.Mock()
- ds_ref.value = "ds-1"
- else:
- ds_ref = None
-
- def fake_call_method(module, method, *args, **kwargs):
- fake_object1 = vmwareapi_fake.FakeRetrieveResult()
- fake_object1.add_object(vmwareapi_fake.Datacenter(
- ds_ref=ds_ref))
- if not ds_ref:
- # Token is set for the fake_object1, so it will continue to
- # fetch the next object.
- setattr(fake_object1, 'token', 'token-0')
- if method == "continue_to_get_objects":
- fake_object2 = vmwareapi_fake.FakeRetrieveResult()
- fake_object2.add_object(vmwareapi_fake.Datacenter())
- return fake_object2
-
- return fake_object1
-
- with mock.patch.object(self._session, '_call_method',
- side_effect=fake_call_method) as fake_call:
- dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
-
- if ds_ref:
- self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
- fake_call.assert_called_once_with(vim_util, "get_objects",
- "Datacenter", ["name", "datastore", "vmFolder"])
- self.assertEqual("ha-datacenter", dc_info.name)
- else:
- calls = [mock.call(vim_util, "get_objects", "Datacenter",
- ["name", "datastore", "vmFolder"]),
- mock.call(vim_util, "continue_to_get_objects",
- "token-0")]
- fake_call.assert_has_calls(calls)
- self.assertIsNone(dc_info)
-
- def test_get_datacenter_ref_and_name(self):
- self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
-
- def test_get_datacenter_ref_and_name_with_no_datastore(self):
- self._test_get_datacenter_ref_and_name()
-
- def test_unrescue_power_on(self):
- self._test_unrescue(True)
-
- def test_unrescue_power_off(self):
- self._test_unrescue(False)
-
- def _test_unrescue(self, power_on):
- self._vmops._volumeops = mock.Mock()
- vm_rescue_ref = mock.Mock()
- vm_ref = mock.Mock()
-
- args_list = [(vm_ref, 'VirtualMachine',
- 'config.hardware.device'),
- (vm_rescue_ref, 'VirtualMachine',
- 'config.hardware.device')]
-
- def fake_call_method(module, method, *args, **kwargs):
- expected_args = args_list.pop(0)
- self.assertEqual('get_dynamic_property', method)
- self.assertEqual(expected_args, args)
-
- path = mock.Mock()
- path_and_type = (path, mock.Mock(), mock.Mock())
- with contextlib.nested(
- mock.patch.object(vm_util, 'get_vmdk_path_and_adapter_type',
- return_value=path_and_type),
- mock.patch.object(vm_util, 'get_vmdk_volume_disk'),
- mock.patch.object(vm_util, 'power_on_instance'),
- mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
- mock.patch.object(vm_util, 'get_vm_ref_from_name',
- return_value=vm_rescue_ref),
- mock.patch.object(self._session, '_call_method',
- fake_call_method),
- mock.patch.object(vm_util, 'power_off_instance'),
- mock.patch.object(self._vmops, '_destroy_instance'),
- ) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk,
- _power_on_instance, _get_vm_ref, _get_vm_ref_from_name,
- _call_method, _power_off, _destroy_instance):
- self._vmops.unrescue(self._instance, power_on=power_on)
-
- _get_vmdk_path_and_adapter_type.assert_called_once_with(
- None, uuid='fake_uuid')
- _get_vmdk_volume_disk.assert_called_once_with(None, path=path)
- if power_on:
- _power_on_instance.assert_called_once_with(self._session,
- self._instance,
- vm_ref=vm_ref)
- else:
- self.assertFalse(_power_on_instance.called)
- _get_vm_ref.assert_called_once_with(self._session,
- self._instance)
- _get_vm_ref_from_name.assert_called_once_with(self._session,
- 'fake_uuid-rescue')
- _power_off.assert_called_once_with(self._session, self._instance,
- vm_rescue_ref)
- _destroy_instance.assert_called_once_with(self._instance,
- instance_name='fake_uuid-rescue')
-
- def _test_finish_migration(self, power_on=True, resize_instance=False):
- """Tests the finish_migration method on vmops."""
- if resize_instance:
- self._instance.system_metadata = {'old_instance_type_root_gb': '0'}
- datastore = ds_util.Datastore(ref='fake-ref', name='fake')
- dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
- vmFolder='fake_folder')
- with contextlib.nested(
- mock.patch.object(self._session, "_call_method",
- return_value='fake-task'),
- mock.patch.object(self._vmops, "_update_instance_progress"),
- mock.patch.object(self._session, "_wait_for_task"),
- mock.patch.object(vm_util, "get_vm_resize_spec",
- return_value='fake-spec'),
- mock.patch.object(ds_util, "get_datastore",
- return_value=datastore),
- mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
- return_value=dc_info),
- mock.patch.object(self._vmops, '_extend_virtual_disk'),
- mock.patch.object(vm_util, "power_on_instance")
- ) as (fake_call_method, fake_update_instance_progress,
- fake_wait_for_task, fake_vm_resize_spec,
- fake_get_datastore, fake_get_datacenter_ref_and_name,
- fake_extend_virtual_disk, fake_power_on):
- self._vmops.finish_migration(context=self._context,
- migration=None,
- instance=self._instance,
- disk_info=None,
- network_info=None,
- block_device_info=None,
- resize_instance=resize_instance,
- image_meta=None,
- power_on=power_on)
- if resize_instance:
- fake_vm_resize_spec.assert_called_once_with(
- self._session.vim.client.factory,
- self._instance)
- fake_call_method.assert_has_calls(mock.call(
- self._session.vim,
- "ReconfigVM_Task",
- 'f',
- spec='fake-spec'))
- fake_wait_for_task.assert_called_once_with('fake-task')
- fake_extend_virtual_disk.assert_called_once_with(
- self._instance, self._instance['root_gb'] * units.Mi,
- None, dc_info.ref)
- else:
- self.assertFalse(fake_vm_resize_spec.called)
- self.assertFalse(fake_wait_for_task.called)
- self.assertFalse(fake_extend_virtual_disk.called)
-
- if power_on:
- fake_power_on.assert_called_once_with(self._session,
- self._instance,
- vm_ref='f')
- else:
- self.assertFalse(fake_power_on.called)
- fake_update_instance_progress.called_once_with(
- self._context, self._instance, 4, vmops.RESIZE_TOTAL_STEPS)
-
- def test_finish_migration_power_on(self):
- self._test_finish_migration(power_on=True, resize_instance=False)
-
- def test_finish_migration_power_off(self):
- self._test_finish_migration(power_on=False, resize_instance=False)
-
- def test_finish_migration_power_on_resize(self):
- self._test_finish_migration(power_on=True, resize_instance=True)
-
- @mock.patch.object(vm_util, 'associate_vmref_for_instance')
- @mock.patch.object(vm_util, 'power_on_instance')
- def _test_finish_revert_migration(self, fake_power_on,
- fake_associate_vmref, power_on):
- """Tests the finish_revert_migration method on vmops."""
-
- # setup the test instance in the database
- self._vmops.finish_revert_migration(self._context,
- instance=self._instance,
- network_info=None,
- block_device_info=None,
- power_on=power_on)
- fake_associate_vmref.assert_called_once_with(self._session,
- self._instance,
- suffix='-orig')
- if power_on:
- fake_power_on.assert_called_once_with(self._session,
- self._instance)
- else:
- self.assertFalse(fake_power_on.called)
-
- def test_finish_revert_migration_power_on(self):
- self._test_finish_revert_migration(power_on=True)
-
- def test_finish_revert_migration_power_off(self):
- self._test_finish_revert_migration(power_on=False)
-
- @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
- @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
- def test_configure_config_drive(self,
- mock_create_config_drive,
- mock_attach_cdrom_to_vm):
- injected_files = mock.Mock()
- admin_password = mock.Mock()
- vm_ref = mock.Mock()
- mock_create_config_drive.return_value = "fake_iso_path"
- self._vmops._configure_config_drive(
- self._instance, vm_ref, self._dc_info, self._ds,
- injected_files, admin_password)
-
- upload_iso_path = self._ds.build_path("fake_iso_path")
- mock_create_config_drive.assert_called_once_with(self._instance,
- injected_files, admin_password, self._ds.name,
- self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
- mock_attach_cdrom_to_vm.assert_called_once_with(
- vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
-
- @mock.patch.object(vmops.LOG, 'debug')
- @mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
- @mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
- def test_spawn_mask_block_device_info_password(self,
- mock_build_virtual_machine,
- mock_get_vm_config_info,
- mock_debug):
- # Very simple test that just ensures block_device_info auth_password
- # is masked when logged; the rest of the test just fails out early.
- data = {'auth_password': 'scrubme'}
- bdm = [{'connection_info': {'data': data}}]
- bdi = {'block_device_mapping': bdm}
-
- self.password_logged = False
-
- # Tests that the parameters to the to_xml method are sanitized for
- # passwords when logged.
- def fake_debug(*args, **kwargs):
- if 'auth_password' in args[0]:
- self.password_logged = True
- self.assertNotIn('scrubme', args[0])
-
- mock_debug.side_effect = fake_debug
- self.flags(flat_injected=False, vnc_enabled=False)
-
- # Call spawn(). We don't care what it does as long as it generates
- # the log message, which we check below.
- with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
- mock_vo.attach_root_volume.side_effect = test.TestingException
- try:
- self._vmops.spawn(
- self._context, self._instance, {},
- injected_files=None, admin_password=None,
- network_info=[], block_device_info=bdi
- )
- except test.TestingException:
- pass
-
- # Check that the relevant log message was generated, and therefore
- # that we checked it was scrubbed
- self.assertTrue(self.password_logged)
-
- def test_get_ds_browser(self):
- cache = self._vmops._datastore_browser_mapping
- ds_browser = mock.Mock()
- moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
- self.assertIsNone(cache.get(moref.value))
- mock_call_method = mock.Mock(return_value=ds_browser)
- with mock.patch.object(self._session, '_call_method',
- mock_call_method):
- ret = self._vmops._get_ds_browser(moref)
- mock_call_method.assert_called_once_with(vim_util,
- 'get_dynamic_property', moref, 'Datastore', 'browser')
- self.assertIs(ds_browser, ret)
- self.assertIs(ds_browser, cache.get(moref.value))
-
- @mock.patch.object(
- vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
- @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
- @mock.patch.object(vm_util, 'copy_virtual_disk')
- def _test_use_disk_image_as_linked_clone(self,
- mock_copy_virtual_disk,
- mock_extend_virtual_disk,
- mock_sized_image_exists,
- flavor_fits_image=False):
- file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
- image_info = images.VMwareImage(
- image_id=self._image_id,
- file_size=file_size,
- linked_clone=False)
-
- cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
- mock_imagecache = mock.Mock()
- mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
- vi = vmops.VirtualMachineInstanceConfigInfo(
- self._instance, "fake_uuid", image_info,
- self._ds, self._dc_info, mock_imagecache)
-
- sized_cached_image_ds_loc = cache_root_folder.join(
- "%s.%s.vmdk" % (self._image_id, vi.root_gb))
-
- self._vmops._volumeops = mock.Mock()
- mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
-
- self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
-
- mock_copy_virtual_disk.assert_called_once_with(
- self._session, self._dc_info.ref,
- str(vi.cache_image_path),
- str(sized_cached_image_ds_loc))
-
- if not flavor_fits_image:
- mock_extend_virtual_disk.assert_called_once_with(
- self._instance, vi.root_gb * units.Mi,
- str(sized_cached_image_ds_loc),
- self._dc_info.ref)
-
- mock_attach_disk_to_vm.assert_called_once_with(
- "fake_vm_ref", self._instance, vi.ii.adapter_type,
- vi.ii.disk_type,
- str(sized_cached_image_ds_loc),
- vi.root_gb * units.Mi, False)
-
- def test_use_disk_image_as_linked_clone(self):
- self._test_use_disk_image_as_linked_clone()
-
- def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
- self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
-
- @mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
- @mock.patch.object(vm_util, 'copy_virtual_disk')
- def _test_use_disk_image_as_full_clone(self,
- mock_copy_virtual_disk,
- mock_extend_virtual_disk,
- flavor_fits_image=False):
- file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
- image_info = images.VMwareImage(
- image_id=self._image_id,
- file_size=file_size,
- linked_clone=False)
-
- cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
- mock_imagecache = mock.Mock()
- mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
- vi = vmops.VirtualMachineInstanceConfigInfo(
- self._instance, "fake_uuid", image_info,
- self._ds, self._dc_info, mock_imagecache)
-
- self._vmops._volumeops = mock.Mock()
- mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
-
- self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
-
- mock_copy_virtual_disk.assert_called_once_with(
- self._session, self._dc_info.ref,
- str(vi.cache_image_path),
- '[fake_ds] fake_uuid/fake_uuid.vmdk')
-
- if not flavor_fits_image:
- mock_extend_virtual_disk.assert_called_once_with(
- self._instance, vi.root_gb * units.Mi,
- '[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
-
- mock_attach_disk_to_vm.assert_called_once_with(
- "fake_vm_ref", self._instance, vi.ii.adapter_type,
- vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
- vi.root_gb * units.Mi, False)
-
- def test_use_disk_image_as_full_clone(self):
- self._test_use_disk_image_as_full_clone()
-
- def test_use_disk_image_as_full_clone_image_too_big(self):
- self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
-
- @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
- @mock.patch.object(vm_util, 'create_virtual_disk')
- def _test_use_iso_image(self,
- mock_create_virtual_disk,
- mock_attach_cdrom,
- with_root_disk):
- image_info = images.VMwareImage(
- image_id=self._image_id,
- file_size=10 * units.Mi,
- linked_clone=True)
-
- cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
- mock_imagecache = mock.Mock()
- mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
- vi = vmops.VirtualMachineInstanceConfigInfo(
- self._instance, "fake_uuid", image_info,
- self._ds, self._dc_info, mock_imagecache)
-
- self._vmops._volumeops = mock.Mock()
- mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
-
- self._vmops._use_iso_image("fake_vm_ref", vi)
-
- mock_attach_cdrom.assert_called_once_with(
- "fake_vm_ref", self._instance, self._ds.ref,
- str(vi.cache_image_path))
-
- if with_root_disk:
- mock_create_virtual_disk.assert_called_once_with(
- self._session, self._dc_info.ref,
- vi.ii.adapter_type, vi.ii.disk_type,
- '[fake_ds] fake_uuid/fake_uuid.vmdk',
- vi.root_gb * units.Mi)
- linked_clone = False
- mock_attach_disk_to_vm.assert_called_once_with(
- "fake_vm_ref", self._instance,
- vi.ii.adapter_type, vi.ii.disk_type,
- '[fake_ds] fake_uuid/fake_uuid.vmdk',
- vi.root_gb * units.Mi, linked_clone)
-
- def test_use_iso_image_with_root_disk(self):
- self._test_use_iso_image(with_root_disk=True)
-
- def test_use_iso_image_without_root_disk(self):
- self._test_use_iso_image(with_root_disk=False)
-
- def _verify_spawn_method_calls(self, mock_call_method):
- # TODO(vui): More explicit assertions of spawn() behavior
- # are waiting on additional refactoring pertaining to image
- # handling/manipulation. Till then, we continue to assert on the
- # sequence of VIM operations invoked.
- expected_methods = ['get_dynamic_property',
- 'SearchDatastore_Task',
- 'CreateVirtualDisk_Task',
- 'DeleteDatastoreFile_Task',
- 'MoveDatastoreFile_Task',
- 'DeleteDatastoreFile_Task',
- 'SearchDatastore_Task',
- 'ExtendVirtualDisk_Task',
- ]
-
- recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
- self.assertEqual(expected_methods, recorded_methods)
-
- @mock.patch(
- 'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
- @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
- @mock.patch(
- 'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
- @mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance',
- return_value='fake_node_mo_id')
- @mock.patch('nova.virt.vmwareapi.vm_util.get_res_pool_ref',
- return_value='fake_rp_ref')
- @mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
- return_value=[])
- @mock.patch('nova.utils.is_neutron',
- return_value=False)
- @mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
- return_value='fake_create_spec')
- @mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
- return_value='fake_vm_ref')
- @mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
- @mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
- @mock.patch(
- 'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
- @mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
- @mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
- @mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
- # TODO(dims): Need to add tests for create_virtual_disk after the
- # disk/image code in spawn gets refactored
- def _test_spawn(self,
- mock_copy_virtual_disk,
- mock_power_on_instance,
- mock_get_and_set_vnc_config,
- mock_enlist_image,
- mock_set_machine_id,
- mock_mkdir,
- mock_create_vm,
- mock_get_create_spec,
- mock_is_neutron,
- mock_get_vif_info,
- mock_get_res_pool_ref,
- mock_get_mo_id_for_instance,
- mock_get_datacenter_ref_and_name,
- mock_get_datastore,
- mock_configure_config_drive,
- block_device_info=None,
- power_on=True,
- allocations=None,
- config_drive=False):
-
- self._vmops._volumeops = mock.Mock()
- image = {
- 'id': 'fake-image-d',
- 'disk_format': 'vmdk',
- 'size': 1 * units.Gi,
- }
- network_info = mock.Mock()
- mock_get_datastore.return_value = self._ds
- mock_get_datacenter_ref_and_name.return_value = self._dc_info
- mock_call_method = mock.Mock(return_value='fake_task')
-
- with contextlib.nested(
- mock.patch.object(self._session, '_wait_for_task'),
- mock.patch.object(self._session, '_call_method',
- mock_call_method),
- mock.patch.object(uuidutils, 'generate_uuid',
- return_value='tmp-uuid'),
- mock.patch.object(images, 'fetch_image')
- ) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image):
- self._vmops.spawn(self._context, self._instance, image,
- injected_files='fake_files',
- admin_password='password',
- network_info=network_info,
- block_device_info=block_device_info,
- power_on=power_on)
-
- mock_is_neutron.assert_called_once_with()
-
- expected_mkdir_calls = 2
- if block_device_info and len(block_device_info.get(
- 'block_device_mapping', [])) > 0:
- # if block_device_info contains key 'block_device_mapping'
- # with any information, method mkdir wouldn't be called in
- # method self._vmops.spawn()
- expected_mkdir_calls = 0
-
- self.assertEqual(expected_mkdir_calls, len(mock_mkdir.mock_calls))
-
- mock_get_mo_id_for_instance.assert_called_once_with(self._instance)
- mock_get_res_pool_ref.assert_called_once_with(
- self._session, None, 'fake_node_mo_id')
- mock_get_vif_info.assert_called_once_with(
- self._session, None, False,
- constants.DEFAULT_VIF_MODEL, network_info)
- if allocations is None:
- allocations = {}
- mock_get_create_spec.assert_called_once_with(
- self._session.vim.client.factory,
- self._instance,
- 'fake_uuid',
- 'fake_ds',
- [],
- 'otherGuest',
- allocations=allocations)
- mock_create_vm.assert_called_once_with(
- self._session,
- self._instance,
- 'fake_vm_folder',
- 'fake_create_spec',
- 'fake_rp_ref')
- mock_get_and_set_vnc_config.assert_called_once_with(
- self._session.vim.client.factory,
- self._instance)
- mock_set_machine_id.assert_called_once_with(
- self._session.vim.client.factory,
- self._instance,
- network_info)
- if power_on:
- mock_power_on_instance.assert_called_once_with(
- self._session, self._instance, vm_ref='fake_vm_ref')
- else:
- self.assertFalse(mock_power_on_instance.called)
-
- if block_device_info:
- root_disk = block_device_info['block_device_mapping'][0]
- mock_attach = self._vmops._volumeops.attach_root_volume
- mock_attach.assert_called_once_with(
- root_disk['connection_info'], self._instance, 'vda',
- self._ds.ref)
- self.assertFalse(_wait_for_task.called)
- self.assertFalse(_fetch_image.called)
- self.assertFalse(_call_method.called)
- else:
- mock_enlist_image.assert_called_once_with(
- self._image_id, self._ds, self._dc_info.ref)
-
- upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
- self._image_id, self._image_id)
- _fetch_image.assert_called_once_with(
- self._context,
- self._instance,
- self._session._host,
- self._dc_info.name,
- self._ds.name,
- upload_file_name,
- cookies='Fake-CookieJar')
- self.assertTrue(len(_wait_for_task.mock_calls) > 0)
- self._verify_spawn_method_calls(_call_method)
-
- dc_ref = 'fake_dc_ref'
- source_file = unicode('[fake_ds] vmware_base/%s/%s.vmdk' %
- (self._image_id, self._image_id))
- dest_file = unicode('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
- (self._image_id, self._image_id,
- self._instance['root_gb']))
- # TODO(dims): add more tests for copy_virtual_disk after
- # the disk/image code in spawn gets refactored
- mock_copy_virtual_disk.assert_called_with(self._session,
- dc_ref,
- source_file,
- dest_file)
- if config_drive:
- mock_configure_config_drive.assert_called_once_with(
- self._instance, 'fake_vm_ref', self._dc_info,
- self._ds, 'fake_files', 'password')
-
- @mock.patch.object(ds_util, 'get_datastore')
- @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
- def _test_get_spawn_vm_config_info(self,
- mock_get_datacenter_ref_and_name,
- mock_get_datastore,
- image_size_bytes=0,
- instance_name=None):
- image_info = images.VMwareImage(
- image_id=self._image_id,
- file_size=image_size_bytes,
- linked_clone=True)
-
- mock_get_datastore.return_value = self._ds
- mock_get_datacenter_ref_and_name.return_value = self._dc_info
-
- vi = self._vmops._get_vm_config_info(
- self._instance, image_info, instance_name=instance_name)
- self.assertEqual(image_info, vi.ii)
- self.assertEqual(self._ds, vi.datastore)
- self.assertEqual(self._instance.root_gb, vi.root_gb)
- self.assertEqual(self._instance, vi.instance)
- if instance_name is not None:
- self.assertEqual(instance_name, vi.instance_name)
- else:
- self.assertEqual(self._instance.uuid, vi.instance_name)
-
- cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
- self._ds.name, self._image_id, self._image_id)
- self.assertEqual(cache_image_path, str(vi.cache_image_path))
-
- cache_image_folder = '[%s] vmware_base/%s' % (
- self._ds.name, self._image_id)
- self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
-
- def test_get_spawn_vm_config_info(self):
- image_size = (self._instance.root_gb) * units.Gi / 2
- self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
-
- def test_get_spawn_vm_config_info_image_too_big(self):
- image_size = (self._instance.root_gb + 1) * units.Gi
- self.assertRaises(exception.InstanceUnacceptable,
- self._test_get_spawn_vm_config_info,
- image_size_bytes=image_size)
-
- def test_get_spawn_vm_config_info_with_instance_name(self):
- image_size = (self._instance.root_gb) * units.Gi / 2
- self._test_get_spawn_vm_config_info(
- image_size_bytes=image_size,
- instance_name="foo_instance_name")
-
- def test_spawn(self):
- self._test_spawn()
-
- def test_spawn_config_drive_enabled(self):
- self.flags(force_config_drive=True)
- self._test_spawn(config_drive=True)
-
- def test_spawn_no_power_on(self):
- self._test_spawn(power_on=False)
-
- def test_spawn_with_block_device_info(self):
- block_device_info = {
- 'block_device_mapping': [{'connection_info': 'fake'}]
- }
- self._test_spawn(block_device_info=block_device_info)
-
- def test_spawn_with_block_device_info_with_config_drive(self):
- self.flags(force_config_drive=True)
- block_device_info = {
- 'block_device_mapping': [{'connection_info': 'fake'}]
- }
- self._test_spawn(block_device_info=block_device_info,
- config_drive=True)
-
- def test_build_virtual_machine(self):
- image_id = nova.tests.image.fake.get_valid_image_id()
- image = images.VMwareImage(image_id=image_id)
-
- vm_ref = self._vmops.build_virtual_machine(self._instance,
- 'fake-instance-name',
- image, self._dc_info,
- self._ds, self.network_info)
-
- vm = vmwareapi_fake._get_object(vm_ref)
-
- # Test basic VM parameters
- self.assertEqual('fake-instance-name', vm.name)
- # NOTE(mdbooth): The instanceUuid behaviour below is apparently
- # deliberate.
- self.assertEqual('fake-instance-name',
- vm.get('summary.config.instanceUuid'))
- self.assertEqual(self._instance_values['vcpus'],
- vm.get('summary.config.numCpu'))
- self.assertEqual(self._instance_values['memory_mb'],
- vm.get('summary.config.memorySizeMB'))
-
- # Test NSX config
- for optval in vm.get('config.extraConfig').OptionValue:
- if optval.key == 'nvp.vm-uuid':
- self.assertEqual(self._instance_values['uuid'], optval.value)
- break
- else:
- self.fail('nvp.vm-uuid not found in extraConfig')
-
- # Test that the VM is associated with the specified datastore
- datastores = vm.datastore.ManagedObjectReference
- self.assertEqual(1, len(datastores))
-
- datastore = vmwareapi_fake._get_object(datastores[0])
- self.assertEqual(self._ds.name, datastore.get('summary.name'))
-
- # Test that the VM's network is configured as specified
- devices = vm.get('config.hardware.device').VirtualDevice
- for device in devices:
- if device.obj_name != 'ns0:VirtualE1000':
- continue
- self.assertEqual(self._network_values['address'],
- device.macAddress)
- break
- else:
- self.fail('NIC not configured')
-
- def test_spawn_cpu_limit(self):
- def _fake_flavor_get(context, id):
- flavor = stubs._fake_flavor_get(context, id)
- flavor['extra_specs'].update({'quota:cpu_limit': 7})
- return flavor
-
- with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
- self._test_spawn(allocations={'cpu_limit': 7})
-
- def test_spawn_cpu_reservation(self):
- def _fake_flavor_get(context, id):
- flavor = stubs._fake_flavor_get(context, id)
- flavor['extra_specs'].update({'quota:cpu_reservation': 7})
- return flavor
-
- with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
- self._test_spawn(allocations={'cpu_reservation': 7})
-
- def test_spawn_cpu_allocations(self):
- def _fake_flavor_get(context, id):
- flavor = stubs._fake_flavor_get(context, id)
- flavor['extra_specs'].update({'quota:cpu_limit': 7,
- 'quota:cpu_reservation': 6})
- return flavor
-
- with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
- self._test_spawn(allocations={'cpu_limit': 7,
- 'cpu_reservation': 6})
-
- def test_spawn_cpu_shares_level(self):
- def _fake_flavor_get(context, id):
- flavor = stubs._fake_flavor_get(context, id)
- flavor['extra_specs'].update({'quota:cpu_shares_level': 'high'})
- return flavor
-
- with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
- self._test_spawn(allocations={'cpu_shares_level': 'high'})
-
- def test_spawn_cpu_shares_custom(self):
- def _fake_flavor_get(context, id):
- flavor = stubs._fake_flavor_get(context, id)
- flavor['extra_specs'].update({'quota:cpu_shares_level': 'custom',
- 'quota:cpu_shares_share': 1948})
- return flavor
-
- with mock.patch.object(db, 'flavor_get', _fake_flavor_get):
- self._test_spawn(allocations={'cpu_shares_level': 'custom',
- 'cpu_shares_share': 1948})
-
- def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
- disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
- else constants.DEFAULT_DISK_TYPE)
- file_type = (constants.DISK_FORMAT_ISO if is_iso
- else constants.DEFAULT_DISK_FORMAT)
-
- image_info = images.VMwareImage(
- image_id=self._image_id,
- file_size=10 * units.Mi,
- file_type=file_type,
- disk_type=disk_type,
- linked_clone=True)
- cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
- mock_imagecache = mock.Mock()
- mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
- vi = vmops.VirtualMachineInstanceConfigInfo(
- self._instance, "fake_uuid", image_info,
- self._ds, self._dc_info, mock_imagecache)
- return vi
-
- @mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
- @mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
- @mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
- @mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
- @mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
- @mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
- @mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
- @mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
- @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
- def _test_fetch_image_if_missing(self,
- mock_delete_datastore_file,
- mock_cache_flat_image,
- mock_cache_sparse_image,
- mock_cache_iso_image,
- mock_prepare_flat_image,
- mock_prepare_sparse_image,
- mock_prepare_iso_image,
- mock_fetch_image_as_file,
- mock_check_cache_folder,
- is_iso=False,
- is_sparse_disk=False):
-
- tmp_dir_path = mock.Mock()
- tmp_image_path = mock.Mock()
- if is_iso:
- mock_prepare = mock_prepare_iso_image
- mock_cache = mock_cache_iso_image
- elif is_sparse_disk:
- mock_prepare = mock_prepare_sparse_image
- mock_cache = mock_cache_sparse_image
- else:
- mock_prepare = mock_prepare_flat_image
- mock_cache = mock_cache_flat_image
- mock_prepare.return_value = tmp_dir_path, tmp_image_path
-
- vi = self._make_vm_config_info(is_iso, is_sparse_disk)
- self._vmops._fetch_image_if_missing(self._context, vi)
-
- mock_check_cache_folder.assert_called_once_with(
- self._ds.name, self._ds.ref)
- mock_prepare.assert_called_once_with(vi)
- mock_fetch_image_as_file.assert_called_once_with(
- self._context, vi, tmp_image_path)
- mock_cache.assert_called_once_with(vi, tmp_image_path)
- mock_delete_datastore_file.assert_called_once_with(
- str(tmp_dir_path), self._dc_info.ref)
-
- def test_fetch_image_if_missing(self):
- self._test_fetch_image_if_missing()
-
- def test_fetch_image_if_missing_with_sparse(self):
- self._test_fetch_image_if_missing(
- is_sparse_disk=True)
-
- def test_fetch_image_if_missing_with_iso(self):
- self._test_fetch_image_if_missing(
- is_iso=True)
-
- @mock.patch.object(images, 'fetch_image')
- def test_fetch_image_as_file(self, mock_fetch_image):
- vi = self._make_vm_config_info()
- image_ds_loc = mock.Mock()
- self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
- mock_fetch_image.assert_called_once_with(
- self._context,
- vi.instance,
- self._session._host,
- self._dc_info.name,
- self._ds.name,
- image_ds_loc.rel_path,
- cookies='Fake-CookieJar')
-
- @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
- def test_prepare_iso_image(self, mock_generate_uuid):
- vi = self._make_vm_config_info(is_iso=True)
- tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
-
- expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
- expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
- self._ds.name, self._image_id, self._image_id)
-
- self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
- self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
-
- @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
- def test_prepare_sparse_image(self, mock_generate_uuid):
- vi = self._make_vm_config_info(is_sparse_disk=True)
- tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
-
- expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
- expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
- self._ds.name, self._image_id, "tmp-sparse.vmdk")
-
- self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
- self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
-
- @mock.patch.object(ds_util, 'mkdir')
- @mock.patch.object(vm_util, 'create_virtual_disk')
- @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
- @mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
- def test_prepare_flat_image(self,
- mock_generate_uuid,
- mock_delete_datastore_file,
- mock_create_virtual_disk,
- mock_mkdir):
- vi = self._make_vm_config_info()
- tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
-
- expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
- expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
- self._ds.name, self._image_id, self._image_id)
- expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
- self._ds.name, self._image_id)
- expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
- self._ds.name, self._image_id, self._image_id)
-
- mock_mkdir.assert_called_once_with(
- self._session, DsPathMatcher(expected_image_path_parent),
- self._dc_info.ref)
-
- self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
- self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
-
- image_info = vi.ii
- mock_create_virtual_disk.assert_called_once_with(
- self._session, self._dc_info.ref,
- image_info.adapter_type,
- image_info.disk_type,
- DsPathMatcher(expected_path_to_create),
- image_info.file_size_in_kb)
- mock_delete_datastore_file.assert_called_once_with(
- DsPathMatcher(expected_image_path),
- self._dc_info.ref)
-
- @mock.patch.object(ds_util, 'file_move')
- def test_cache_iso_image(self, mock_file_move):
- vi = self._make_vm_config_info(is_iso=True)
- tmp_image_ds_loc = mock.Mock()
-
- self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
-
- mock_file_move.assert_called_once_with(
- self._session, self._dc_info.ref,
- tmp_image_ds_loc.parent,
- DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
-
- @mock.patch.object(ds_util, 'file_move')
- def test_cache_flat_image(self, mock_file_move):
- vi = self._make_vm_config_info()
- tmp_image_ds_loc = mock.Mock()
-
- self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
-
- mock_file_move.assert_called_once_with(
- self._session, self._dc_info.ref,
- tmp_image_ds_loc.parent,
- DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
-
- @mock.patch.object(ds_util, 'file_move')
- @mock.patch.object(vm_util, 'copy_virtual_disk')
- @mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
- def test_cache_sparse_image(self,
- mock_delete_datastore_file,
- mock_copy_virtual_disk,
- mock_file_move):
- vi = self._make_vm_config_info(is_sparse_disk=True)
-
- sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
- self._ds.name, self._image_id)
- tmp_image_ds_loc = ds_util.DatastorePath.parse(sparse_disk_path)
-
- self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
-
- target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
- self._ds.name,
- self._image_id, self._image_id)
- mock_copy_virtual_disk.assert_called_once_with(
- self._session, self._dc_info.ref,
- sparse_disk_path,
- DsPathMatcher(target_disk_path))
diff --git a/nova/tests/virt/vmwareapi/test_volumeops.py b/nova/tests/virt/vmwareapi/test_volumeops.py
deleted file mode 100644
index 1d05843a54..0000000000
--- a/nova/tests/virt/vmwareapi/test_volumeops.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import mock
-
-from nova import test
-from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
-from nova.tests.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import volumeops
-
-
-class VMwareVolumeOpsTestCase(test.NoDBTestCase):
-
- def setUp(self):
-
- super(VMwareVolumeOpsTestCase, self).setUp()
- vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
- self._session = driver.VMwareAPISession()
-
- self._volumeops = volumeops.VMwareVolumeOps(self._session)
- self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'}
-
- def _test_detach_disk_from_vm(self, destroy_disk=False):
- def fake_call_method(module, method, *args, **kwargs):
- vmdk_detach_config_spec = kwargs.get('spec')
- virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
- self.assertEqual('remove', virtual_device_config.operation)
- self.assertEqual('ns0:VirtualDeviceConfigSpec',
- virtual_device_config.obj_name)
- if destroy_disk:
- self.assertEqual('destroy',
- virtual_device_config.fileOperation)
- else:
- self.assertFalse(hasattr(virtual_device_config,
- 'fileOperation'))
- return 'fake_configure_task'
- with contextlib.nested(
- mock.patch.object(self._session, '_wait_for_task'),
- mock.patch.object(self._session, '_call_method',
- fake_call_method)
- ) as (_wait_for_task, _call_method):
- fake_device = vmwareapi_fake.DataObject()
- fake_device.backing = vmwareapi_fake.DataObject()
- fake_device.backing.fileName = 'fake_path'
- fake_device.key = 'fake_key'
- self._volumeops.detach_disk_from_vm('fake_vm_ref', self.instance,
- fake_device, destroy_disk)
- _wait_for_task.assert_has_calls([
- mock.call('fake_configure_task')])
-
- def test_detach_with_destroy_disk_from_vm(self):
- self._test_detach_disk_from_vm(destroy_disk=True)
-
- def test_detach_without_destroy_disk_from_vm(self):
- self._test_detach_disk_from_vm(destroy_disk=False)
-
- def _fake_call_get_dynamic_property(self, uuid, result):
- def fake_call_method(vim, method, vm_ref, type, prop):
- expected_prop = 'config.extraConfig["volume-%s"]' % uuid
- self.assertEqual('VirtualMachine', type)
- self.assertEqual(expected_prop, prop)
- return result
- return fake_call_method
-
- def test_get_volume_uuid(self):
- vm_ref = mock.Mock()
- uuid = '1234'
- opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
- fake_call = self._fake_call_get_dynamic_property(uuid, opt_val)
- with mock.patch.object(self._session, "_call_method", fake_call):
- val = self._volumeops._get_volume_uuid(vm_ref, uuid)
- self.assertEqual('volume-val', val)
-
- def test_get_volume_uuid_not_found(self):
- vm_ref = mock.Mock()
- uuid = '1234'
- fake_call = self._fake_call_get_dynamic_property(uuid, None)
- with mock.patch.object(self._session, "_call_method", fake_call):
- val = self._volumeops._get_volume_uuid(vm_ref, uuid)
- self.assertIsNone(val)
diff --git a/nova/tests/virt/xenapi/client/test_objects.py b/nova/tests/virt/xenapi/client/test_objects.py
deleted file mode 100644
index 31d3df5c63..0000000000
--- a/nova/tests/virt/xenapi/client/test_objects.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Copyright (c) 2014 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.tests.virt.xenapi import stubs
-from nova import utils
-from nova.virt.xenapi.client import objects
-
-
-class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(XenAPISessionObjectTestCase, self).setUp()
- self.session = mock.Mock()
- self.obj = objects.XenAPISessionObject(self.session, "FAKE")
-
- def test_call_method_via_attr(self):
- self.session.call_xenapi.return_value = "asdf"
-
- result = self.obj.get_X("ref")
-
- self.assertEqual(result, "asdf")
- self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
-
-
-class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(ObjectsTestCase, self).setUp()
- self.session = mock.Mock()
-
- def test_VM(self):
- vm = objects.VM(self.session)
- vm.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
-
- def test_SR(self):
- sr = objects.SR(self.session)
- sr.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
-
- def test_VDI(self):
- vdi = objects.VDI(self.session)
- vdi.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
-
- def test_VBD(self):
- vbd = objects.VBD(self.session)
- vbd.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
-
- def test_PBD(self):
- pbd = objects.PBD(self.session)
- pbd.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
-
- def test_PIF(self):
- pif = objects.PIF(self.session)
- pif.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
-
- def test_VLAN(self):
- vlan = objects.VLAN(self.session)
- vlan.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
-
- def test_host(self):
- host = objects.Host(self.session)
- host.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
-
- def test_network(self):
- network = objects.Network(self.session)
- network.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("network.get_X",
- "ref")
-
- def test_pool(self):
- pool = objects.Pool(self.session)
- pool.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
-
-
-class VBDTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(VBDTestCase, self).setUp()
- self.session = mock.Mock()
- self.session.VBD = objects.VBD(self.session)
-
- def test_plug(self):
- self.session.VBD.plug("vbd_ref", "vm_ref")
- self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
-
- def test_unplug(self):
- self.session.VBD.unplug("vbd_ref", "vm_ref")
- self.session.call_xenapi.assert_called_once_with("VBD.unplug",
- "vbd_ref")
-
- @mock.patch.object(utils, 'synchronized')
- def test_vbd_plug_check_synchronized(self, mock_synchronized):
- self.session.VBD.unplug("vbd_ref", "vm_ref")
- mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
diff --git a/nova/tests/virt/xenapi/client/test_session.py b/nova/tests/virt/xenapi/client/test_session.py
deleted file mode 100644
index 02361db42f..0000000000
--- a/nova/tests/virt/xenapi/client/test_session.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# Copyright (c) 2014 Rackspace Hosting
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import errno
-import socket
-
-import mock
-
-from nova import exception
-from nova.tests.virt.xenapi import stubs
-from nova import version
-from nova.virt.xenapi.client import session
-
-
-class SessionTestCase(stubs.XenAPITestBaseNoDB):
- @mock.patch.object(session.XenAPISession, '_create_session')
- @mock.patch.object(session.XenAPISession, '_get_product_version_and_brand')
- @mock.patch.object(session.XenAPISession, '_verify_plugin_version')
- def test_session_passes_version(self, mock_verify, mock_version,
- create_session):
- sess = mock.Mock()
- create_session.return_value = sess
- mock_version.return_value = ('version', 'brand')
-
- session.XenAPISession('url', 'username', 'password')
-
- expected_version = '%s %s %s' % (version.vendor_string(),
- version.product_string(),
- version.version_string_with_package())
- sess.login_with_password.assert_called_with('username', 'password',
- expected_version,
- 'OpenStack')
-
-
-class ApplySessionHelpersTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(ApplySessionHelpersTestCase, self).setUp()
- self.session = mock.Mock()
- session.apply_session_helpers(self.session)
-
- def test_apply_session_helpers_add_VM(self):
- self.session.VM.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
-
- def test_apply_session_helpers_add_SR(self):
- self.session.SR.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
-
- def test_apply_session_helpers_add_VDI(self):
- self.session.VDI.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
-
- def test_apply_session_helpers_add_VBD(self):
- self.session.VBD.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
-
- def test_apply_session_helpers_add_PBD(self):
- self.session.PBD.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
-
- def test_apply_session_helpers_add_PIF(self):
- self.session.PIF.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
-
- def test_apply_session_helpers_add_VLAN(self):
- self.session.VLAN.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
-
- def test_apply_session_helpers_add_host(self):
- self.session.host.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
-
- def test_apply_session_helpers_add_network(self):
- self.session.network.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("network.get_X",
- "ref")
-
- def test_apply_session_helpers_add_pool(self):
- self.session.pool.get_X("ref")
- self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
-
-
-class CallPluginTestCase(stubs.XenAPITestBaseNoDB):
- def _get_fake_xapisession(self):
- class FakeXapiSession(session.XenAPISession):
- def __init__(self, **kwargs):
- "Skip the superclass's dirty init"
- self.XenAPI = mock.MagicMock()
-
- return FakeXapiSession()
-
- def setUp(self):
- super(CallPluginTestCase, self).setUp()
- self.session = self._get_fake_xapisession()
-
- def test_serialized_with_retry_socket_error_conn_reset(self):
- exc = socket.error
- exc.errno = errno.ECONNRESET
- plugin = 'glance'
- fn = 'download_vhd'
- num_retries = 1
- callback = None
- retry_cb = mock.Mock()
- with mock.patch.object(self.session, 'call_plugin_serialized',
- autospec=True) as call_plugin_serialized:
- call_plugin_serialized.side_effect = exc
- self.assertRaises(exception.PluginRetriesExceeded,
- self.session.call_plugin_serialized_with_retry, plugin, fn,
- num_retries, callback, retry_cb)
- call_plugin_serialized.assert_called_with(plugin, fn)
- self.assertEqual(2, call_plugin_serialized.call_count)
- self.assertEqual(2, retry_cb.call_count)
-
- def test_serialized_with_retry_socket_error_reraised(self):
- exc = socket.error
- exc.errno = errno.ECONNREFUSED
- plugin = 'glance'
- fn = 'download_vhd'
- num_retries = 1
- callback = None
- retry_cb = mock.Mock()
- with mock.patch.object(self.session, 'call_plugin_serialized',
- autospec=True) as call_plugin_serialized:
- call_plugin_serialized.side_effect = exc
- self.assertRaises(socket.error,
- self.session.call_plugin_serialized_with_retry, plugin, fn,
- num_retries, callback, retry_cb)
- call_plugin_serialized.assert_called_once_with(plugin, fn)
- self.assertEqual(0, retry_cb.call_count)
-
- def test_serialized_with_retry_socket_reset_reraised(self):
- exc = socket.error
- exc.errno = errno.ECONNRESET
- plugin = 'glance'
- fn = 'download_vhd'
- num_retries = 1
- callback = None
- retry_cb = mock.Mock()
- with mock.patch.object(self.session, 'call_plugin_serialized',
- autospec=True) as call_plugin_serialized:
- call_plugin_serialized.side_effect = exc
- self.assertRaises(exception.PluginRetriesExceeded,
- self.session.call_plugin_serialized_with_retry, plugin, fn,
- num_retries, callback, retry_cb)
- call_plugin_serialized.assert_called_with(plugin, fn)
- self.assertEqual(2, call_plugin_serialized.call_count)
diff --git a/nova/tests/virt/xenapi/image/test_bittorrent.py b/nova/tests/virt/xenapi/image/test_bittorrent.py
deleted file mode 100644
index 267b8c9393..0000000000
--- a/nova/tests/virt/xenapi/image/test_bittorrent.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mox
-import pkg_resources
-import six
-
-from nova import context
-from nova.i18n import _
-from nova import test
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import driver as xenapi_conn
-from nova.virt.xenapi import fake
-from nova.virt.xenapi.image import bittorrent
-from nova.virt.xenapi import vm_utils
-
-
-class TestBittorrentStore(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(TestBittorrentStore, self).setUp()
- self.store = bittorrent.BittorrentStore()
- self.mox = mox.Mox()
-
- self.flags(torrent_base_url='http://foo',
- connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- self.context = context.RequestContext(
- 'user', 'project', auth_token='foobar')
-
- fake.reset()
- stubs.stubout_session(self.stubs, fake.SessionBase)
-
- def mock_iter_eps(namespace):
- return []
-
- self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_eps)
-
- driver = xenapi_conn.XenAPIDriver(False)
- self.session = driver._session
-
- self.stubs.Set(
- vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
-
- def test_download_image(self):
-
- instance = {'uuid': '00000000-0000-0000-0000-000000007357'}
- params = {'image_id': 'fake_image_uuid',
- 'sr_path': '/fake/sr/path',
- 'torrent_download_stall_cutoff': 600,
- 'torrent_listen_port_end': 6891,
- 'torrent_listen_port_start': 6881,
- 'torrent_max_last_accessed': 86400,
- 'torrent_max_seeder_processes_per_host': 1,
- 'torrent_seed_chance': 1.0,
- 'torrent_seed_duration': 3600,
- 'torrent_url': 'http://foo/fake_image_uuid.torrent',
- 'uuid_stack': ['uuid1']}
-
- self.stubs.Set(vm_utils, '_make_uuid_stack',
- lambda *a, **kw: ['uuid1'])
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.session.call_plugin_serialized(
- 'bittorrent', 'download_vhd', **params)
- self.mox.ReplayAll()
-
- self.store.download_image(self.context, self.session,
- instance, 'fake_image_uuid')
-
- self.mox.VerifyAll()
-
- def test_upload_image(self):
- self.assertRaises(NotImplementedError, self.store.upload_image,
- self.context, self.session, mox.IgnoreArg, 'fake_image_uuid',
- ['fake_vdi_uuid'])
-
-
-def bad_fetcher(image_id):
- raise test.TestingException("just plain bad.")
-
-
-def another_fetcher(image_id):
- return "http://www.foobar.com/%s" % image_id
-
-
-class MockEntryPoint(object):
- name = "torrent_url"
-
- def load(self):
- return another_fetcher
-
-
-class LookupTorrentURLTestCase(test.NoDBTestCase):
- def setUp(self):
- super(LookupTorrentURLTestCase, self).setUp()
- self.store = bittorrent.BittorrentStore()
- self.image_id = 'fakeimageid'
-
- def _mock_iter_none(self, namespace):
- return []
-
- def _mock_iter_single(self, namespace):
- return [MockEntryPoint()]
-
- def test_default_fetch_url_no_base_url_set(self):
- self.flags(torrent_base_url=None,
- group='xenserver')
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- self._mock_iter_none)
-
- exc = self.assertRaises(
- RuntimeError, self.store._lookup_torrent_url_fn)
- self.assertEqual(_('Cannot create default bittorrent URL without'
- ' torrent_base_url set'
- ' or torrent URL fetcher extension'),
- six.text_type(exc))
-
- def test_default_fetch_url_base_url_is_set(self):
- self.flags(torrent_base_url='http://foo',
- group='xenserver')
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- self._mock_iter_single)
-
- lookup_fn = self.store._lookup_torrent_url_fn()
- self.assertEqual('http://foo/fakeimageid.torrent',
- lookup_fn(self.image_id))
-
- def test_with_extension(self):
- self.stubs.Set(pkg_resources, 'iter_entry_points',
- self._mock_iter_single)
-
- lookup_fn = self.store._lookup_torrent_url_fn()
- self.assertEqual("http://www.foobar.com/%s" % self.image_id,
- lookup_fn(self.image_id))
-
- def test_multiple_extensions_found(self):
- self.flags(torrent_base_url=None,
- group='xenserver')
-
- def mock_iter_multiple(namespace):
- return [MockEntryPoint(), MockEntryPoint()]
-
- self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_multiple)
-
- exc = self.assertRaises(
- RuntimeError, self.store._lookup_torrent_url_fn)
- self.assertEqual(_('Multiple torrent URL fetcher extensions found.'
- ' Failing.'),
- six.text_type(exc))
diff --git a/nova/tests/virt/xenapi/image/test_glance.py b/nova/tests/virt/xenapi/image/test_glance.py
deleted file mode 100644
index b2150980f7..0000000000
--- a/nova/tests/virt/xenapi/image/test_glance.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import random
-import time
-
-import mock
-from mox3 import mox
-
-from nova.compute import utils as compute_utils
-from nova import context
-from nova import exception
-from nova.openstack.common import log as logging
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import driver as xenapi_conn
-from nova.virt.xenapi import fake
-from nova.virt.xenapi.image import glance
-from nova.virt.xenapi import vm_utils
-
-
-class TestGlanceStore(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(TestGlanceStore, self).setUp()
- self.store = glance.GlanceStore()
-
- self.flags(host='1.1.1.1',
- port=123,
- api_insecure=False, group='glance')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- self.context = context.RequestContext(
- 'user', 'project', auth_token='foobar')
-
- fake.reset()
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
- self.session = driver._session
-
- self.stubs.Set(
- vm_utils, 'get_sr_path', lambda *a, **kw: '/fake/sr/path')
-
- self.instance = {'uuid': 'blah',
- 'system_metadata': [],
- 'auto_disk_config': True,
- 'os_type': 'default',
- 'xenapi_use_agent': 'true'}
-
- def _get_params(self):
- return {'image_id': 'fake_image_uuid',
- 'glance_host': '1.1.1.1',
- 'glance_port': 123,
- 'glance_use_ssl': False,
- 'sr_path': '/fake/sr/path',
- 'extra_headers': {'X-Service-Catalog': '[]',
- 'X-Auth-Token': 'foobar',
- 'X-Roles': '',
- 'X-Tenant-Id': 'project',
- 'X-User-Id': 'user',
- 'X-Identity-Status': 'Confirmed'}}
-
- def _get_download_params(self):
- params = self._get_params()
- params['uuid_stack'] = ['uuid1']
- return params
-
- def test_download_image(self):
- params = self._get_download_params()
-
- self.stubs.Set(vm_utils, '_make_uuid_stack',
- lambda *a, **kw: ['uuid1'])
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.session.call_plugin_serialized('glance', 'download_vhd', **params)
- self.mox.ReplayAll()
-
- self.store.download_image(self.context, self.session,
- self.instance, 'fake_image_uuid')
-
- self.mox.VerifyAll()
-
- @mock.patch.object(vm_utils, '_make_uuid_stack', return_value=['uuid1'])
- @mock.patch.object(random, 'shuffle')
- @mock.patch.object(time, 'sleep')
- @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
- @mock.patch.object(logging.getLogger('nova.virt.xenapi.client.session'),
- 'debug')
- def test_download_image_retry(self, mock_log_debug, mock_fault, mock_sleep,
- mock_shuffle, mock_make_uuid_stack):
- params = self._get_download_params()
- self.flags(num_retries=2, group='glance')
-
- params.pop("glance_port")
- params.pop("glance_host")
- calls = [mock.call('glance', 'download_vhd', glance_port=9292,
- glance_host='10.0.1.1', **params),
- mock.call('glance', 'download_vhd', glance_port=9293,
- glance_host='10.0.0.1', **params)]
- log_calls = [mock.call(mock.ANY, {'callback_result': '10.0.1.1',
- 'attempts': 3, 'attempt': 1,
- 'fn': 'download_vhd',
- 'plugin': 'glance'}),
- mock.call(mock.ANY, {'callback_result': '10.0.0.1',
- 'attempts': 3, 'attempt': 2,
- 'fn': 'download_vhd',
- 'plugin': 'glance'})]
-
- glance_api_servers = ['10.0.1.1:9292',
- 'http://10.0.0.1:9293']
- self.flags(api_servers=glance_api_servers, group='glance')
-
- with (mock.patch.object(self.session, 'call_plugin_serialized')
- ) as mock_call_plugin_serialized:
- error_details = ["", "", "RetryableError", ""]
- error = self.session.XenAPI.Failure(details=error_details)
- mock_call_plugin_serialized.side_effect = [error, "success"]
-
- self.store.download_image(self.context, self.session,
- self.instance, 'fake_image_uuid')
-
- mock_call_plugin_serialized.assert_has_calls(calls)
- mock_log_debug.assert_has_calls(log_calls, any_order=True)
-
- self.assertEqual(1, mock_fault.call_count)
-
- def _get_upload_params(self, auto_disk_config=True,
- expected_os_type='default'):
- params = self._get_params()
- params['vdi_uuids'] = ['fake_vdi_uuid']
- params['properties'] = {'auto_disk_config': auto_disk_config,
- 'os_type': expected_os_type}
- return params
-
- def _test_upload_image(self, auto_disk_config, expected_os_type='default'):
- params = self._get_upload_params(auto_disk_config, expected_os_type)
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.session.call_plugin_serialized('glance', 'upload_vhd', **params)
-
- self.mox.ReplayAll()
- self.store.upload_image(self.context, self.session, self.instance,
- 'fake_image_uuid', ['fake_vdi_uuid'])
- self.mox.VerifyAll()
-
- def test_upload_image(self):
- self._test_upload_image(True)
-
- def test_upload_image_None_os_type(self):
- self.instance['os_type'] = None
- self._test_upload_image(True, 'linux')
-
- def test_upload_image_no_os_type(self):
- del self.instance['os_type']
- self._test_upload_image(True, 'linux')
-
- def test_upload_image_auto_config_disk_disabled(self):
- sys_meta = [{"key": "image_auto_disk_config", "value": "Disabled"}]
- self.instance["system_metadata"] = sys_meta
- self._test_upload_image("disabled")
-
- def test_upload_image_raises_exception(self):
- params = self._get_upload_params()
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(RuntimeError)
- self.mox.ReplayAll()
-
- self.assertRaises(RuntimeError, self.store.upload_image,
- self.context, self.session, self.instance,
- 'fake_image_uuid', ['fake_vdi_uuid'])
- self.mox.VerifyAll()
-
- def test_upload_image_retries_then_raises_exception(self):
- self.flags(num_retries=2, group='glance')
- params = self._get_upload_params()
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.mox.StubOutWithMock(time, 'sleep')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- error_details = ["", "", "RetryableError", ""]
- error = self.session.XenAPI.Failure(details=error_details)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(error)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- error, (fake.Failure,
- error,
- mox.IgnoreArg()))
- time.sleep(0.5)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(error)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- error, (fake.Failure,
- error,
- mox.IgnoreArg()))
- time.sleep(1)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(error)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- error, (fake.Failure,
- error,
- mox.IgnoreArg()))
- self.mox.ReplayAll()
-
- self.assertRaises(exception.CouldNotUploadImage,
- self.store.upload_image,
- self.context, self.session, self.instance,
- 'fake_image_uuid', ['fake_vdi_uuid'])
- self.mox.VerifyAll()
-
- def test_upload_image_retries_on_signal_exception(self):
- self.flags(num_retries=2, group='glance')
- params = self._get_upload_params()
-
- self.mox.StubOutWithMock(self.session, 'call_plugin_serialized')
- self.mox.StubOutWithMock(time, 'sleep')
- self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
- error_details = ["", "task signaled", "", ""]
- error = self.session.XenAPI.Failure(details=error_details)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(error)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- error, (fake.Failure,
- error,
- mox.IgnoreArg()))
- time.sleep(0.5)
- # Note(johngarbutt) XenServer 6.1 and later has this error
- error_details = ["", "signal: SIGTERM", "", ""]
- error = self.session.XenAPI.Failure(details=error_details)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params).AndRaise(error)
- compute_utils.add_instance_fault_from_exc(self.context, self.instance,
- error, (fake.Failure,
- error,
- mox.IgnoreArg()))
- time.sleep(1)
- self.session.call_plugin_serialized('glance', 'upload_vhd',
- **params)
- self.mox.ReplayAll()
-
- self.store.upload_image(self.context, self.session, self.instance,
- 'fake_image_uuid', ['fake_vdi_uuid'])
- self.mox.VerifyAll()
diff --git a/nova/tests/virt/xenapi/stubs.py b/nova/tests/virt/xenapi/stubs.py
deleted file mode 100644
index 4ea0301b91..0000000000
--- a/nova/tests/virt/xenapi/stubs.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# Copyright (c) 2010 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Stubouts, mocks and fixtures for the test suite."""
-
-import pickle
-import random
-
-from oslo.serialization import jsonutils
-
-from nova import test
-import nova.tests.image.fake
-from nova.virt.xenapi.client import session
-from nova.virt.xenapi import fake
-from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi import vmops
-
-
-def stubout_firewall_driver(stubs, conn):
-
- def fake_none(self, *args):
- return
-
- _vmops = conn._vmops
- stubs.Set(_vmops.firewall_driver, 'prepare_instance_filter', fake_none)
- stubs.Set(_vmops.firewall_driver, 'instance_filter_exists', fake_none)
-
-
-def stubout_instance_snapshot(stubs):
- def fake_fetch_image(context, session, instance, name_label, image, type):
- return {'root': dict(uuid=_make_fake_vdi(), file=None),
- 'kernel': dict(uuid=_make_fake_vdi(), file=None),
- 'ramdisk': dict(uuid=_make_fake_vdi(), file=None)}
-
- stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
-
- def fake_wait_for_vhd_coalesce(*args):
- # TODO(sirp): Should we actually fake out the data here
- return "fakeparent", "fakebase"
-
- stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
-
-
-def stubout_session(stubs, cls, product_version=(5, 6, 2),
- product_brand='XenServer', **opt_args):
- """Stubs out methods from XenAPISession."""
- stubs.Set(session.XenAPISession, '_create_session',
- lambda s, url: cls(url, **opt_args))
- stubs.Set(session.XenAPISession, '_get_product_version_and_brand',
- lambda s: (product_version, product_brand))
-
-
-def stubout_get_this_vm_uuid(stubs):
- def f(session):
- vms = [rec['uuid'] for ref, rec
- in fake.get_all_records('VM').iteritems()
- if rec['is_control_domain']]
- return vms[0]
- stubs.Set(vm_utils, 'get_this_vm_uuid', f)
-
-
-def stubout_image_service_download(stubs):
- def fake_download(*args, **kwargs):
- pass
- stubs.Set(nova.tests.image.fake._FakeImageService,
- 'download', fake_download)
-
-
-def stubout_stream_disk(stubs):
- def fake_stream_disk(*args, **kwargs):
- pass
- stubs.Set(vm_utils, '_stream_disk', fake_stream_disk)
-
-
-def stubout_determine_is_pv_objectstore(stubs):
- """Assumes VMs stu have PV kernels."""
-
- def f(*args):
- return False
- stubs.Set(vm_utils, '_determine_is_pv_objectstore', f)
-
-
-def stubout_is_snapshot(stubs):
- """Always returns true
-
- xenapi fake driver does not create vmrefs for snapshots.
- """
-
- def f(*args):
- return True
- stubs.Set(vm_utils, 'is_snapshot', f)
-
-
-def stubout_lookup_image(stubs):
- """Simulates a failure in lookup image."""
- def f(_1, _2, _3, _4):
- raise Exception("Test Exception raised by fake lookup_image")
- stubs.Set(vm_utils, 'lookup_image', f)
-
-
-def stubout_fetch_disk_image(stubs, raise_failure=False):
- """Simulates a failure in fetch image_glance_disk."""
-
- def _fake_fetch_disk_image(context, session, instance, name_label, image,
- image_type):
- if raise_failure:
- raise fake.Failure("Test Exception raised by "
- "fake fetch_image_glance_disk")
- elif image_type == vm_utils.ImageType.KERNEL:
- filename = "kernel"
- elif image_type == vm_utils.ImageType.RAMDISK:
- filename = "ramdisk"
- else:
- filename = "unknown"
-
- vdi_type = vm_utils.ImageType.to_string(image_type)
- return {vdi_type: dict(uuid=None, file=filename)}
-
- stubs.Set(vm_utils, '_fetch_disk_image', _fake_fetch_disk_image)
-
-
-def stubout_create_vm(stubs):
- """Simulates a failure in create_vm."""
-
- def f(*args):
- raise fake.Failure("Test Exception raised by fake create_vm")
- stubs.Set(vm_utils, 'create_vm', f)
-
-
-def stubout_attach_disks(stubs):
- """Simulates a failure in _attach_disks."""
-
- def f(*args):
- raise fake.Failure("Test Exception raised by fake _attach_disks")
- stubs.Set(vmops.VMOps, '_attach_disks', f)
-
-
-def _make_fake_vdi():
- sr_ref = fake.get_all('SR')[0]
- vdi_ref = fake.create_vdi('', sr_ref)
- vdi_rec = fake.get_record('VDI', vdi_ref)
- return vdi_rec['uuid']
-
-
-class FakeSessionForVMTests(fake.SessionBase):
- """Stubs out a XenAPISession for VM tests."""
-
- _fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
- "Sun Nov 6 22:49:02 2011\n"
- "*filter\n"
- ":INPUT ACCEPT [0:0]\n"
- ":FORWARD ACCEPT [0:0]\n"
- ":OUTPUT ACCEPT [0:0]\n"
- "COMMIT\n"
- "# Completed on Sun Nov 6 22:49:02 2011\n")
-
- def host_call_plugin(self, _1, _2, plugin, method, _5):
- if (plugin, method) == ('glance', 'download_vhd'):
- root_uuid = _make_fake_vdi()
- return pickle.dumps(dict(root=dict(uuid=root_uuid)))
- elif (plugin, method) == ("xenhost", "iptables_config"):
- return fake.as_json(out=self._fake_iptables_save_output,
- err='')
- else:
- return (super(FakeSessionForVMTests, self).
- host_call_plugin(_1, _2, plugin, method, _5))
-
- def VM_start(self, _1, ref, _2, _3):
- vm = fake.get_record('VM', ref)
- if vm['power_state'] != 'Halted':
- raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
- vm['power_state']])
- vm['power_state'] = 'Running'
- vm['is_a_template'] = False
- vm['is_control_domain'] = False
- vm['domid'] = random.randrange(1, 1 << 16)
- return vm
-
- def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
- vm_rec = self.VM_start(_1, vm_ref, _2, _3)
- vm_rec['resident_on'] = host_ref
-
- def VDI_snapshot(self, session_ref, vm_ref, _1):
- sr_ref = "fakesr"
- return fake.create_vdi('fakelabel', sr_ref, read_only=True)
-
- def SR_scan(self, session_ref, sr_ref):
- pass
-
-
-class FakeSessionForFirewallTests(FakeSessionForVMTests):
- """Stubs out a XenApi Session for doing IPTable Firewall tests."""
-
- def __init__(self, uri, test_case=None):
- super(FakeSessionForFirewallTests, self).__init__(uri)
- if hasattr(test_case, '_in_rules'):
- self._in_rules = test_case._in_rules
- if hasattr(test_case, '_in6_filter_rules'):
- self._in6_filter_rules = test_case._in6_filter_rules
- self._test_case = test_case
-
- def host_call_plugin(self, _1, _2, plugin, method, args):
- """Mock method four host_call_plugin to be used in unit tests
- for the dom0 iptables Firewall drivers for XenAPI
-
- """
- if plugin == "xenhost" and method == "iptables_config":
- # The command to execute is a json-encoded list
- cmd_args = args.get('cmd_args', None)
- cmd = jsonutils.loads(cmd_args)
- if not cmd:
- ret_str = ''
- else:
- output = ''
- process_input = args.get('process_input', None)
- if cmd == ['ip6tables-save', '-c']:
- output = '\n'.join(self._in6_filter_rules)
- if cmd == ['iptables-save', '-c']:
- output = '\n'.join(self._in_rules)
- if cmd == ['iptables-restore', '-c', ]:
- lines = process_input.split('\n')
- if '*filter' in lines:
- if self._test_case is not None:
- self._test_case._out_rules = lines
- output = '\n'.join(lines)
- if cmd == ['ip6tables-restore', '-c', ]:
- lines = process_input.split('\n')
- if '*filter' in lines:
- output = '\n'.join(lines)
- ret_str = fake.as_json(out=output, err='')
- return ret_str
- else:
- return (super(FakeSessionForVMTests, self).
- host_call_plugin(_1, _2, plugin, method, args))
-
-
-def stub_out_vm_methods(stubs):
- def fake_acquire_bootlock(self, vm):
- pass
-
- def fake_release_bootlock(self, vm):
- pass
-
- def fake_generate_ephemeral(*args):
- pass
-
- def fake_wait_for_device(dev):
- pass
-
- stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
- stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
- stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
- stubs.Set(vm_utils, '_wait_for_device', fake_wait_for_device)
-
-
-class FakeSessionForVolumeTests(fake.SessionBase):
- """Stubs out a XenAPISession for Volume tests."""
- def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
- _6, _7, _8, _9, _10, _11):
- valid_vdi = False
- refs = fake.get_all('VDI')
- for ref in refs:
- rec = fake.get_record('VDI', ref)
- if rec['uuid'] == uuid:
- valid_vdi = True
- if not valid_vdi:
- raise fake.Failure([['INVALID_VDI', 'session', self._session]])
-
-
-class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
- """Stubs out a XenAPISession for Volume tests: it injects failures."""
- def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
- _6, _7, _8, _9, _10, _11):
- # This is for testing failure
- raise fake.Failure([['INVALID_VDI', 'session', self._session]])
-
- def PBD_unplug(self, _1, ref):
- rec = fake.get_record('PBD', ref)
- rec['currently-attached'] = False
-
- def SR_forget(self, _1, ref):
- pass
-
-
-def stub_out_migration_methods(stubs):
- fakesr = fake.create_sr()
-
- def fake_import_all_migrated_disks(session, instance):
- vdi_ref = fake.create_vdi(instance['name'], fakesr)
- vdi_rec = fake.get_record('VDI', vdi_ref)
- vdi_rec['other_config']['nova_disk_type'] = 'root'
- return {"root": {'uuid': vdi_rec['uuid'], 'ref': vdi_ref},
- "ephemerals": {}}
-
- def fake_wait_for_instance_to_start(self, *args):
- pass
-
- def fake_get_vdi(session, vm_ref, userdevice='0'):
- vdi_ref_parent = fake.create_vdi('derp-parent', fakesr)
- vdi_rec_parent = fake.get_record('VDI', vdi_ref_parent)
- vdi_ref = fake.create_vdi('derp', fakesr,
- sm_config={'vhd-parent': vdi_rec_parent['uuid']})
- vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
- return vdi_ref, vdi_rec
-
- def fake_sr(session, *args):
- return fakesr
-
- def fake_get_sr_path(*args):
- return "fake"
-
- def fake_destroy(*args, **kwargs):
- pass
-
- def fake_generate_ephemeral(*args):
- pass
-
- stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
- stubs.Set(vmops.VMOps, '_wait_for_instance_to_start',
- fake_wait_for_instance_to_start)
- stubs.Set(vm_utils, 'import_all_migrated_disks',
- fake_import_all_migrated_disks)
- stubs.Set(vm_utils, 'scan_default_sr', fake_sr)
- stubs.Set(vm_utils, 'get_vdi_for_vm_safely', fake_get_vdi)
- stubs.Set(vm_utils, 'get_sr_path', fake_get_sr_path)
- stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
-
-
-class FakeSessionForFailedMigrateTests(FakeSessionForVMTests):
- def VM_assert_can_migrate(self, session, vmref, migrate_data,
- live, vdi_map, vif_map, options):
- raise fake.Failure("XenAPI VM.assert_can_migrate failed")
-
- def host_migrate_receive(self, session, hostref, networkref, options):
- raise fake.Failure("XenAPI host.migrate_receive failed")
-
- def VM_migrate_send(self, session, vmref, migrate_data, islive, vdi_map,
- vif_map, options):
- raise fake.Failure("XenAPI VM.migrate_send failed")
-
-
-# FIXME(sirp): XenAPITestBase is deprecated, all tests should be converted
-# over to use XenAPITestBaseNoDB
-class XenAPITestBase(test.TestCase):
- def setUp(self):
- super(XenAPITestBase, self).setUp()
- self.useFixture(test.ReplaceModule('XenAPI', fake))
- fake.reset()
-
-
-class XenAPITestBaseNoDB(test.NoDBTestCase):
- def setUp(self):
- super(XenAPITestBaseNoDB, self).setUp()
- self.useFixture(test.ReplaceModule('XenAPI', fake))
- fake.reset()
diff --git a/nova/tests/virt/xenapi/test_driver.py b/nova/tests/virt/xenapi/test_driver.py
deleted file mode 100644
index 6fc3db5100..0000000000
--- a/nova/tests/virt/xenapi/test_driver.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) 2013 Rackspace Hosting
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import math
-
-import mock
-from oslo.utils import units
-
-from nova.compute import arch
-from nova.tests.virt.xenapi import stubs
-from nova.virt import driver
-from nova.virt import fake
-from nova.virt import xenapi
-from nova.virt.xenapi import driver as xenapi_driver
-
-
-class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB):
- """Unit tests for Driver operations."""
-
- def _get_driver(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.flags(connection_url='test_url',
- connection_password='test_pass', group='xenserver')
- return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def host_stats(self, refresh=True):
- return {'host_memory_total': 3 * units.Mi,
- 'host_memory_free_computed': 2 * units.Mi,
- 'disk_total': 5 * units.Gi,
- 'disk_used': 2 * units.Gi,
- 'disk_allocated': 4 * units.Gi,
- 'host_hostname': 'somename',
- 'supported_instances': arch.X86_64,
- 'host_cpu_info': {'cpu_count': 50},
- 'vcpus_used': 10,
- 'pci_passthrough_devices': ''}
-
- def test_available_resource(self):
- driver = self._get_driver()
- driver._session.product_version = (6, 8, 2)
-
- self.stubs.Set(driver.host_state, 'get_host_stats', self.host_stats)
-
- resources = driver.get_available_resource(None)
- self.assertEqual(6008002, resources['hypervisor_version'])
- self.assertEqual(50, resources['vcpus'])
- self.assertEqual(3, resources['memory_mb'])
- self.assertEqual(5, resources['local_gb'])
- self.assertEqual(10, resources['vcpus_used'])
- self.assertEqual(3 - 2, resources['memory_mb_used'])
- self.assertEqual(2, resources['local_gb_used'])
- self.assertEqual('xen', resources['hypervisor_type'])
- self.assertEqual('somename', resources['hypervisor_hostname'])
- self.assertEqual(1, resources['disk_available_least'])
-
- def test_overhead(self):
- driver = self._get_driver()
- instance = {'memory_mb': 30720, 'vcpus': 4}
-
- # expected memory overhead per:
- # https://wiki.openstack.org/wiki/XenServer/Overhead
- expected = ((instance['memory_mb'] * xenapi_driver.OVERHEAD_PER_MB) +
- (instance['vcpus'] * xenapi_driver.OVERHEAD_PER_VCPU) +
- xenapi_driver.OVERHEAD_BASE)
- expected = math.ceil(expected)
- overhead = driver.estimate_instance_overhead(instance)
- self.assertEqual(expected, overhead['memory_mb'])
-
- def test_set_bootable(self):
- driver = self._get_driver()
-
- self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
- driver._vmops.set_bootable('inst', True)
- self.mox.ReplayAll()
-
- driver.set_bootable('inst', True)
-
- def test_post_interrupted_snapshot_cleanup(self):
- driver = self._get_driver()
- fake_vmops_cleanup = mock.Mock()
- driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
-
- driver.post_interrupted_snapshot_cleanup("context", "instance")
-
- fake_vmops_cleanup.assert_called_once_with("context", "instance")
-
- def test_public_api_signatures(self):
- inst = self._get_driver()
- self.assertPublicAPISignatures(driver.ComputeDriver(None), inst)
diff --git a/nova/tests/virt/xenapi/test_network_utils.py b/nova/tests/virt/xenapi/test_network_utils.py
deleted file mode 100644
index 9c42c5871f..0000000000
--- a/nova/tests/virt/xenapi/test_network_utils.py
+++ /dev/null
@@ -1,76 +0,0 @@
-
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import exception
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import network_utils
-
-
-class NetworkUtilsTestCase(stubs.XenAPITestBaseNoDB):
- def test_find_network_with_name_label_works(self):
- session = mock.Mock()
- session.network.get_by_name_label.return_value = ["net"]
-
- result = network_utils.find_network_with_name_label(session, "label")
-
- self.assertEqual("net", result)
- session.network.get_by_name_label.assert_called_once_with("label")
-
- def test_find_network_with_name_returns_none(self):
- session = mock.Mock()
- session.network.get_by_name_label.return_value = []
-
- result = network_utils.find_network_with_name_label(session, "label")
-
- self.assertIsNone(result)
-
- def test_find_network_with_name_label_raises(self):
- session = mock.Mock()
- session.network.get_by_name_label.return_value = ["net", "net2"]
-
- self.assertRaises(exception.NovaException,
- network_utils.find_network_with_name_label,
- session, "label")
-
- def test_find_network_with_bridge_works(self):
- session = mock.Mock()
- session.network.get_all_records_where.return_value = {"net": "asdf"}
-
- result = network_utils.find_network_with_bridge(session, "bridge")
-
- self.assertEqual(result, "net")
- expr = 'field "name__label" = "bridge" or field "bridge" = "bridge"'
- session.network.get_all_records_where.assert_called_once_with(expr)
-
- def test_find_network_with_bridge_raises_too_many(self):
- session = mock.Mock()
- session.network.get_all_records_where.return_value = {
- "net": "asdf",
- "net2": "asdf2"
- }
-
- self.assertRaises(exception.NovaException,
- network_utils.find_network_with_bridge,
- session, "bridge")
-
- def test_find_network_with_bridge_raises_no_networks(self):
- session = mock.Mock()
- session.network.get_all_records_where.return_value = {}
-
- self.assertRaises(exception.NovaException,
- network_utils.find_network_with_bridge,
- session, "bridge")
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
deleted file mode 100644
index f88e9ede51..0000000000
--- a/nova/tests/virt/xenapi/test_vm_utils.py
+++ /dev/null
@@ -1,2422 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-import uuid
-
-from eventlet import greenthread
-import fixtures
-import mock
-import mox
-from oslo.concurrency import lockutils
-from oslo.concurrency import processutils
-from oslo.config import cfg
-from oslo.utils import timeutils
-from oslo.utils import units
-import six
-
-from nova.compute import flavors
-from nova.compute import power_state
-from nova.compute import vm_mode
-from nova import context
-from nova import exception
-from nova.i18n import _
-from nova.openstack.common.fixture import config as config_fixture
-from nova import test
-from nova.tests.virt.xenapi import stubs
-from nova.tests.virt.xenapi import test_xenapi
-from nova import utils
-from nova.virt.xenapi.client import session as xenapi_session
-from nova.virt.xenapi import driver as xenapi_conn
-from nova.virt.xenapi import fake
-from nova.virt.xenapi import vm_utils
-
-CONF = cfg.CONF
-XENSM_TYPE = 'xensm'
-ISCSI_TYPE = 'iscsi'
-
-
-def get_fake_connection_data(sr_type):
- fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
- 'name_label': 'fake_storage',
- 'name_description': 'test purposes',
- 'server': 'myserver',
- 'serverpath': '/local/scratch/myname',
- 'sr_type': 'nfs',
- 'introduce_sr_keys': ['server',
- 'serverpath',
- 'sr_type'],
- 'vdi_uuid': 'falseVDI'},
- ISCSI_TYPE: {'volume_id': 'fake_volume_id',
- 'target_lun': 1,
- 'target_iqn': 'fake_iqn:volume-fake_volume_id',
- 'target_portal': u'localhost:3260',
- 'target_discovered': False}, }
- return fakes[sr_type]
-
-
-def _get_fake_session(error=None):
- session = mock.Mock()
- xenapi_session.apply_session_helpers(session)
-
- if error is not None:
- class FakeException(Exception):
- details = [error, "a", "b", "c"]
-
- session.XenAPI.Failure = FakeException
- session.call_xenapi.side_effect = FakeException
-
- return session
-
-
-@contextlib.contextmanager
-def contextified(result):
- yield result
-
-
-def _fake_noop(*args, **kwargs):
- return
-
-
-class VMUtilsTestBase(stubs.XenAPITestBaseNoDB):
- pass
-
-
-class LookupTestCase(VMUtilsTestBase):
- def setUp(self):
- super(LookupTestCase, self).setUp()
- self.session = self.mox.CreateMockAnything('Fake Session')
- self.name_label = 'my_vm'
-
- def _do_mock(self, result):
- self.session.call_xenapi(
- "VM.get_by_name_label", self.name_label).AndReturn(result)
- self.mox.ReplayAll()
-
- def test_normal(self):
- self._do_mock(['x'])
- result = vm_utils.lookup(self.session, self.name_label)
- self.assertEqual('x', result)
-
- def test_no_result(self):
- self._do_mock([])
- result = vm_utils.lookup(self.session, self.name_label)
- self.assertIsNone(result)
-
- def test_too_many(self):
- self._do_mock(['a', 'b'])
- self.assertRaises(exception.InstanceExists,
- vm_utils.lookup,
- self.session, self.name_label)
-
- def test_rescue_none(self):
- self.session.call_xenapi(
- "VM.get_by_name_label", self.name_label + '-rescue').AndReturn([])
- self._do_mock(['x'])
- result = vm_utils.lookup(self.session, self.name_label,
- check_rescue=True)
- self.assertEqual('x', result)
-
- def test_rescue_found(self):
- self.session.call_xenapi(
- "VM.get_by_name_label",
- self.name_label + '-rescue').AndReturn(['y'])
- self.mox.ReplayAll()
- result = vm_utils.lookup(self.session, self.name_label,
- check_rescue=True)
- self.assertEqual('y', result)
-
- def test_rescue_too_many(self):
- self.session.call_xenapi(
- "VM.get_by_name_label",
- self.name_label + '-rescue').AndReturn(['a', 'b', 'c'])
- self.mox.ReplayAll()
- self.assertRaises(exception.InstanceExists,
- vm_utils.lookup,
- self.session, self.name_label,
- check_rescue=True)
-
-
-class GenerateConfigDriveTestCase(VMUtilsTestBase):
- def test_no_admin_pass(self):
- instance = {}
-
- self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
- vm_utils.safe_find_sr('session').AndReturn('sr_ref')
-
- self.mox.StubOutWithMock(vm_utils, 'create_vdi')
- vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
- 'configdrive',
- 64 * units.Mi).AndReturn('vdi_ref')
-
- self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
- vm_utils.vdi_attached_here(
- 'session', 'vdi_ref', read_only=False).AndReturn(
- contextified('mounted_dev'))
-
- class FakeInstanceMetadata(object):
- def __init__(_self, instance, content=None, extra_md=None,
- network_info=None):
- self.assertEqual(network_info, "nw_info")
-
- def metadata_for_config_drive(_self):
- return []
-
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.api.metadata.base.InstanceMetadata',
- FakeInstanceMetadata))
-
- self.mox.StubOutWithMock(utils, 'execute')
- utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
- '-allow-lowercase', '-allow-multidot', '-l',
- '-publisher', mox.IgnoreArg(), '-quiet',
- '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
- attempts=1, run_as_root=False).AndReturn(None)
- utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg(), run_as_root=True).AndReturn(None)
-
- self.mox.StubOutWithMock(vm_utils, 'create_vbd')
- vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
- bootable=False, read_only=True).AndReturn(None)
-
- self.mox.ReplayAll()
-
- # And the actual call we're testing
- vm_utils.generate_configdrive('session', instance, 'vm_ref',
- 'userdevice', "nw_info")
-
- @mock.patch.object(vm_utils, "destroy_vdi")
- @mock.patch.object(vm_utils, "vdi_attached_here")
- @mock.patch.object(vm_utils, "create_vdi")
- @mock.patch.object(vm_utils, "safe_find_sr")
- def test_vdi_cleaned_up(self, mock_find, mock_create_vdi, mock_attached,
- mock_destroy):
- mock_create_vdi.return_value = 'vdi_ref'
- mock_attached.side_effect = test.TestingException
- mock_destroy.side_effect = exception.StorageError(reason="")
-
- instance = {"uuid": "asdf"}
- self.assertRaises(test.TestingException,
- vm_utils.generate_configdrive,
- 'session', instance, 'vm_ref', 'userdevice',
- 'nw_info')
- mock_destroy.assert_called_once_with('session', 'vdi_ref')
-
-
-class XenAPIGetUUID(VMUtilsTestBase):
- def test_get_this_vm_uuid_new_kernel(self):
- self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
-
- vm_utils._get_sys_hypervisor_uuid().AndReturn(
- '2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f')
-
- self.mox.ReplayAll()
- self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
- vm_utils.get_this_vm_uuid(None))
- self.mox.VerifyAll()
-
- def test_get_this_vm_uuid_old_kernel_reboot(self):
- self.mox.StubOutWithMock(vm_utils, '_get_sys_hypervisor_uuid')
- self.mox.StubOutWithMock(utils, 'execute')
-
- vm_utils._get_sys_hypervisor_uuid().AndRaise(
- IOError(13, 'Permission denied'))
- utils.execute('xenstore-read', 'domid', run_as_root=True).AndReturn(
- ('27', ''))
- utils.execute('xenstore-read', '/local/domain/27/vm',
- run_as_root=True).AndReturn(
- ('/vm/2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f', ''))
-
- self.mox.ReplayAll()
- self.assertEqual('2f46f0f5-f14c-ef1b-1fac-9eeca0888a3f',
- vm_utils.get_this_vm_uuid(None))
- self.mox.VerifyAll()
-
-
-class FakeSession(object):
- def call_xenapi(self, *args):
- pass
-
- def call_plugin(self, *args):
- pass
-
- def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
- pass
-
- def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
- callback, *args, **kwargs):
- pass
-
-
-class FetchVhdImageTestCase(VMUtilsTestBase):
- def setUp(self):
- super(FetchVhdImageTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.context.auth_token = 'auth_token'
- self.session = FakeSession()
- self.instance = {"uuid": "uuid"}
-
- self.mox.StubOutWithMock(vm_utils, '_make_uuid_stack')
- vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
-
- self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
- vm_utils.get_sr_path(self.session).AndReturn('sr_path')
-
- def _stub_glance_download_vhd(self, raise_exc=None):
- self.mox.StubOutWithMock(
- self.session, 'call_plugin_serialized_with_retry')
- func = self.session.call_plugin_serialized_with_retry(
- 'glance', 'download_vhd', 0, mox.IgnoreArg(), mox.IgnoreArg(),
- extra_headers={'X-Service-Catalog': '[]',
- 'X-Auth-Token': 'auth_token',
- 'X-Roles': '',
- 'X-Tenant-Id': None,
- 'X-User-Id': None,
- 'X-Identity-Status': 'Confirmed'},
- image_id='image_id',
- uuid_stack=["uuid_stack"],
- sr_path='sr_path')
-
- if raise_exc:
- func.AndRaise(raise_exc)
- else:
- func.AndReturn({'root': {'uuid': 'vdi'}})
-
- def _stub_bittorrent_download_vhd(self, raise_exc=None):
- self.mox.StubOutWithMock(
- self.session, 'call_plugin_serialized')
- func = self.session.call_plugin_serialized(
- 'bittorrent', 'download_vhd',
- image_id='image_id',
- uuid_stack=["uuid_stack"],
- sr_path='sr_path',
- torrent_download_stall_cutoff=600,
- torrent_listen_port_start=6881,
- torrent_listen_port_end=6891,
- torrent_max_last_accessed=86400,
- torrent_max_seeder_processes_per_host=1,
- torrent_seed_chance=1.0,
- torrent_seed_duration=3600,
- torrent_url='http://foo/image_id.torrent'
- )
- if raise_exc:
- func.AndRaise(raise_exc)
- else:
- func.AndReturn({'root': {'uuid': 'vdi'}})
-
- def test_fetch_vhd_image_works_with_glance(self):
- self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
- vm_utils._image_uses_bittorrent(
- self.context, self.instance).AndReturn(False)
-
- self._stub_glance_download_vhd()
-
- self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
- vm_utils.safe_find_sr(self.session).AndReturn("sr")
-
- self.mox.StubOutWithMock(vm_utils, '_scan_sr')
- vm_utils._scan_sr(self.session, "sr")
-
- self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
- vm_utils._check_vdi_size(
- self.context, self.session, self.instance, "vdi")
-
- self.mox.ReplayAll()
-
- self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
- self.session, self.instance, 'image_id')['root']['uuid'])
-
- self.mox.VerifyAll()
-
- def test_fetch_vhd_image_works_with_bittorrent(self):
- cfg.CONF.import_opt('torrent_base_url',
- 'nova.virt.xenapi.image.bittorrent',
- group='xenserver')
- self.flags(torrent_base_url='http://foo', group='xenserver')
-
- self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
- vm_utils._image_uses_bittorrent(
- self.context, self.instance).AndReturn(True)
-
- self._stub_bittorrent_download_vhd()
-
- self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
- vm_utils.safe_find_sr(self.session).AndReturn("sr")
-
- self.mox.StubOutWithMock(vm_utils, '_scan_sr')
- vm_utils._scan_sr(self.session, "sr")
-
- self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
- vm_utils._check_vdi_size(self.context, self.session, self.instance,
- "vdi")
-
- self.mox.ReplayAll()
-
- self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
- self.session, self.instance, 'image_id')['root']['uuid'])
-
- self.mox.VerifyAll()
-
- def test_fetch_vhd_image_cleans_up_vdi_on_fail(self):
- self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
- vm_utils._image_uses_bittorrent(
- self.context, self.instance).AndReturn(False)
-
- self._stub_glance_download_vhd()
-
- self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
- vm_utils.safe_find_sr(self.session).AndReturn("sr")
-
- self.mox.StubOutWithMock(vm_utils, '_scan_sr')
- vm_utils._scan_sr(self.session, "sr")
-
- self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
- vm_utils._check_vdi_size(self.context, self.session, self.instance,
- "vdi").AndRaise(exception.FlavorDiskTooSmall)
-
- self.mox.StubOutWithMock(self.session, 'call_xenapi')
- self.session.call_xenapi("VDI.get_by_uuid", "vdi").AndReturn("ref")
-
- self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
- vm_utils.destroy_vdi(self.session,
- "ref").AndRaise(exception.StorageError(reason=""))
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- vm_utils._fetch_vhd_image, self.context, self.session,
- self.instance, 'image_id')
-
- self.mox.VerifyAll()
-
- def test_fallback_to_default_handler(self):
- cfg.CONF.import_opt('torrent_base_url',
- 'nova.virt.xenapi.image.bittorrent',
- group='xenserver')
- self.flags(torrent_base_url='http://foo', group='xenserver')
-
- self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
- vm_utils._image_uses_bittorrent(
- self.context, self.instance).AndReturn(True)
-
- self._stub_bittorrent_download_vhd(raise_exc=RuntimeError)
-
- vm_utils._make_uuid_stack().AndReturn(["uuid_stack"])
- vm_utils.get_sr_path(self.session).AndReturn('sr_path')
-
- self._stub_glance_download_vhd()
-
- self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
- vm_utils.safe_find_sr(self.session).AndReturn("sr")
-
- self.mox.StubOutWithMock(vm_utils, '_scan_sr')
- vm_utils._scan_sr(self.session, "sr")
-
- self.mox.StubOutWithMock(vm_utils, '_check_vdi_size')
- vm_utils._check_vdi_size(self.context, self.session, self.instance,
- "vdi")
-
- self.mox.ReplayAll()
-
- self.assertEqual("vdi", vm_utils._fetch_vhd_image(self.context,
- self.session, self.instance, 'image_id')['root']['uuid'])
-
- self.mox.VerifyAll()
-
- def test_default_handler_does_not_fallback_to_itself(self):
- cfg.CONF.import_opt('torrent_base_url',
- 'nova.virt.xenapi.image.bittorrent',
- group='xenserver')
- self.flags(torrent_base_url='http://foo', group='xenserver')
-
- self.mox.StubOutWithMock(vm_utils, '_image_uses_bittorrent')
- vm_utils._image_uses_bittorrent(
- self.context, self.instance).AndReturn(False)
-
- self._stub_glance_download_vhd(raise_exc=RuntimeError)
-
- self.mox.ReplayAll()
-
- self.assertRaises(RuntimeError, vm_utils._fetch_vhd_image,
- self.context, self.session, self.instance, 'image_id')
-
- self.mox.VerifyAll()
-
-
-class TestImageCompression(VMUtilsTestBase):
- def test_image_compression(self):
- # Testing for nova.conf, too low, negative, and a correct value.
- self.assertIsNone(vm_utils.get_compression_level())
- self.flags(image_compression_level=0, group='xenserver')
- self.assertIsNone(vm_utils.get_compression_level())
- self.flags(image_compression_level=-6, group='xenserver')
- self.assertIsNone(vm_utils.get_compression_level())
- self.flags(image_compression_level=6, group='xenserver')
- self.assertEqual(vm_utils.get_compression_level(), 6)
-
-
-class ResizeHelpersTestCase(VMUtilsTestBase):
- def test_repair_filesystem(self):
- self.mox.StubOutWithMock(utils, 'execute')
-
- utils.execute('e2fsck', '-f', "-y", "fakepath",
- run_as_root=True, check_exit_code=[0, 1, 2]).AndReturn(
- ("size is: 42", ""))
-
- self.mox.ReplayAll()
-
- vm_utils._repair_filesystem("fakepath")
-
- def _call_tune2fs_remove_journal(self, path):
- utils.execute("tune2fs", "-O ^has_journal", path, run_as_root=True)
-
- def _call_tune2fs_add_journal(self, path):
- utils.execute("tune2fs", "-j", path, run_as_root=True)
-
- def _call_parted_mkpart(self, path, start, end):
- utils.execute('parted', '--script', path, 'rm', '1',
- run_as_root=True)
- utils.execute('parted', '--script', path, 'mkpart',
- 'primary', '%ds' % start, '%ds' % end, run_as_root=True)
-
- def _call_parted_boot_flag(sef, path):
- utils.execute('parted', '--script', path, 'set', '1',
- 'boot', 'on', run_as_root=True)
-
- def test_resize_part_and_fs_down_succeeds(self):
- self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
- self.mox.StubOutWithMock(utils, 'execute')
-
- dev_path = "/dev/fake"
- partition_path = "%s1" % dev_path
- vm_utils._repair_filesystem(partition_path)
- self._call_tune2fs_remove_journal(partition_path)
- utils.execute("resize2fs", partition_path, "10s", run_as_root=True)
- self._call_parted_mkpart(dev_path, 0, 9)
- self._call_parted_boot_flag(dev_path)
- self._call_tune2fs_add_journal(partition_path)
-
- self.mox.ReplayAll()
-
- vm_utils._resize_part_and_fs("fake", 0, 20, 10, "boot")
-
- def test_log_progress_if_required(self):
- self.mox.StubOutWithMock(vm_utils.LOG, "debug")
- vm_utils.LOG.debug(_("Sparse copy in progress, "
- "%(complete_pct).2f%% complete. "
- "%(left)s bytes left to copy"),
- {"complete_pct": 50.0, "left": 1})
- current = timeutils.utcnow()
- timeutils.set_time_override(current)
- timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS + 1)
- self.mox.ReplayAll()
- vm_utils._log_progress_if_required(1, current, 2)
-
- def test_log_progress_if_not_required(self):
- self.mox.StubOutWithMock(vm_utils.LOG, "debug")
- current = timeutils.utcnow()
- timeutils.set_time_override(current)
- timeutils.advance_time_seconds(vm_utils.PROGRESS_INTERVAL_SECONDS - 1)
- self.mox.ReplayAll()
- vm_utils._log_progress_if_required(1, current, 2)
-
- def test_resize_part_and_fs_down_fails_disk_too_big(self):
- self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
- self.mox.StubOutWithMock(utils, 'execute')
-
- dev_path = "/dev/fake"
- partition_path = "%s1" % dev_path
- new_sectors = 10
- vm_utils._repair_filesystem(partition_path)
- self._call_tune2fs_remove_journal(partition_path)
- mobj = utils.execute("resize2fs",
- partition_path,
- "%ss" % new_sectors,
- run_as_root=True)
- mobj.AndRaise(processutils.ProcessExecutionError)
- self.mox.ReplayAll()
- self.assertRaises(exception.ResizeError,
- vm_utils._resize_part_and_fs,
- "fake", 0, 20, 10, "boot")
-
- def test_resize_part_and_fs_up_succeeds(self):
- self.mox.StubOutWithMock(vm_utils, "_repair_filesystem")
- self.mox.StubOutWithMock(utils, 'execute')
-
- dev_path = "/dev/fake"
- partition_path = "%s1" % dev_path
- vm_utils._repair_filesystem(partition_path)
- self._call_tune2fs_remove_journal(partition_path)
- self._call_parted_mkpart(dev_path, 0, 29)
- utils.execute("resize2fs", partition_path, run_as_root=True)
- self._call_tune2fs_add_journal(partition_path)
-
- self.mox.ReplayAll()
-
- vm_utils._resize_part_and_fs("fake", 0, 20, 30, "")
-
- def test_resize_disk_throws_on_zero_size(self):
- self.assertRaises(exception.ResizeError,
- vm_utils.resize_disk, "session", "instance", "vdi_ref",
- {"root_gb": 0})
-
- def test_auto_config_disk_returns_early_on_zero_size(self):
- vm_utils.try_auto_configure_disk("bad_session", "bad_vdi_ref", 0)
-
- @mock.patch.object(utils, "execute")
- def test_get_partitions(self, mock_execute):
- parted_return = "BYT;\n...\n"
- parted_return += "1:2s:11s:10s:ext3::boot;\n"
- parted_return += "2:20s:11s:10s::bob:;\n"
- mock_execute.return_value = (parted_return, None)
-
- partitions = vm_utils._get_partitions("abc")
-
- self.assertEqual(2, len(partitions))
- self.assertEqual((1, 2, 10, "ext3", "", "boot"), partitions[0])
- self.assertEqual((2, 20, 10, "", "bob", ""), partitions[1])
-
-
-class CheckVDISizeTestCase(VMUtilsTestBase):
- def setUp(self):
- super(CheckVDISizeTestCase, self).setUp()
- self.context = 'fakecontext'
- self.session = 'fakesession'
- self.instance = dict(uuid='fakeinstance')
- self.vdi_uuid = 'fakeuuid'
-
- def test_not_too_large(self):
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- flavors.extract_flavor(self.instance).AndReturn(
- dict(root_gb=1))
-
- self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
- vm_utils._get_vdi_chain_size(self.session,
- self.vdi_uuid).AndReturn(1073741824)
-
- self.mox.ReplayAll()
-
- vm_utils._check_vdi_size(self.context, self.session, self.instance,
- self.vdi_uuid)
-
- def test_too_large(self):
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- flavors.extract_flavor(self.instance).AndReturn(
- dict(root_gb=1))
-
- self.mox.StubOutWithMock(vm_utils, '_get_vdi_chain_size')
- vm_utils._get_vdi_chain_size(self.session,
- self.vdi_uuid).AndReturn(11811160065) # 10GB overhead allowed
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.FlavorDiskTooSmall,
- vm_utils._check_vdi_size, self.context, self.session,
- self.instance, self.vdi_uuid)
-
- def test_zero_root_gb_disables_check(self):
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- flavors.extract_flavor(self.instance).AndReturn(
- dict(root_gb=0))
-
- self.mox.ReplayAll()
-
- vm_utils._check_vdi_size(self.context, self.session, self.instance,
- self.vdi_uuid)
-
-
-class GetInstanceForVdisForSrTestCase(VMUtilsTestBase):
- def setUp(self):
- super(GetInstanceForVdisForSrTestCase, self).setUp()
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- def test_get_instance_vdis_for_sr(self):
- vm_ref = fake.create_vm("foo", "Running")
- sr_ref = fake.create_sr()
-
- vdi_1 = fake.create_vdi('vdiname1', sr_ref)
- vdi_2 = fake.create_vdi('vdiname2', sr_ref)
-
- for vdi_ref in [vdi_1, vdi_2]:
- fake.create_vbd(vm_ref, vdi_ref)
-
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
-
- result = list(vm_utils.get_instance_vdis_for_sr(
- driver._session, vm_ref, sr_ref))
-
- self.assertEqual([vdi_1, vdi_2], result)
-
- def test_get_instance_vdis_for_sr_no_vbd(self):
- vm_ref = fake.create_vm("foo", "Running")
- sr_ref = fake.create_sr()
-
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
-
- result = list(vm_utils.get_instance_vdis_for_sr(
- driver._session, vm_ref, sr_ref))
-
- self.assertEqual([], result)
-
-
-class VMRefOrRaiseVMFoundTestCase(VMUtilsTestBase):
-
- def test_lookup_call(self):
- mock = mox.Mox()
- mock.StubOutWithMock(vm_utils, 'lookup')
-
- vm_utils.lookup('session', 'somename').AndReturn('ignored')
-
- mock.ReplayAll()
- vm_utils.vm_ref_or_raise('session', 'somename')
- mock.VerifyAll()
-
- def test_return_value(self):
- mock = mox.Mox()
- mock.StubOutWithMock(vm_utils, 'lookup')
-
- vm_utils.lookup(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('vmref')
-
- mock.ReplayAll()
- self.assertEqual(
- 'vmref', vm_utils.vm_ref_or_raise('session', 'somename'))
- mock.VerifyAll()
-
-
-class VMRefOrRaiseVMNotFoundTestCase(VMUtilsTestBase):
-
- def test_exception_raised(self):
- mock = mox.Mox()
- mock.StubOutWithMock(vm_utils, 'lookup')
-
- vm_utils.lookup('session', 'somename').AndReturn(None)
-
- mock.ReplayAll()
- self.assertRaises(
- exception.InstanceNotFound,
- lambda: vm_utils.vm_ref_or_raise('session', 'somename')
- )
- mock.VerifyAll()
-
- def test_exception_msg_contains_vm_name(self):
- mock = mox.Mox()
- mock.StubOutWithMock(vm_utils, 'lookup')
-
- vm_utils.lookup('session', 'somename').AndReturn(None)
-
- mock.ReplayAll()
- try:
- vm_utils.vm_ref_or_raise('session', 'somename')
- except exception.InstanceNotFound as e:
- self.assertIn('somename', six.text_type(e))
- mock.VerifyAll()
-
-
-@mock.patch.object(vm_utils, 'safe_find_sr', return_value='safe_find_sr')
-class CreateCachedImageTestCase(VMUtilsTestBase):
- def setUp(self):
- super(CreateCachedImageTestCase, self).setUp()
- self.session = _get_fake_session()
-
- @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
- def test_cached(self, mock_clone_vdi, mock_safe_find_sr):
- self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
- None, None, None, 'vdi_uuid']
- self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
- vm_utils._create_cached_image('context', self.session,
- 'instance', 'name', 'uuid',
- vm_utils.ImageType.DISK_VHD))
-
- @mock.patch.object(vm_utils, '_safe_copy_vdi', return_value='new_vdi_ref')
- def test_no_cow(self, mock_safe_copy_vdi, mock_safe_find_sr):
- self.flags(use_cow_images=False)
- self.session.call_xenapi.side_effect = ['ext', {'vdi_ref': 2},
- None, None, None, 'vdi_uuid']
- self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
- vm_utils._create_cached_image('context', self.session,
- 'instance', 'name', 'uuid',
- vm_utils.ImageType.DISK_VHD))
-
- def test_no_cow_no_ext(self, mock_safe_find_sr):
- self.flags(use_cow_images=False)
- self.session.call_xenapi.side_effect = ['non-ext', {'vdi_ref': 2},
- 'vdi_ref', None, None, None,
- 'vdi_uuid']
- self.assertEqual((False, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
- vm_utils._create_cached_image('context', self.session,
- 'instance', 'name', 'uuid',
- vm_utils.ImageType.DISK_VHD))
-
- @mock.patch.object(vm_utils, '_clone_vdi', return_value='new_vdi_ref')
- @mock.patch.object(vm_utils, '_fetch_image',
- return_value={'root': {'uuid': 'vdi_uuid',
- 'file': None}})
- def test_noncached(self, mock_fetch_image, mock_clone_vdi,
- mock_safe_find_sr):
- self.session.call_xenapi.side_effect = ['ext', {}, 'cache_vdi_ref',
- None, None, None, None, None,
- None, 'vdi_uuid']
- self.assertEqual((True, {'root': {'uuid': 'vdi_uuid', 'file': None}}),
- vm_utils._create_cached_image('context', self.session,
- 'instance', 'name', 'uuid',
- vm_utils.ImageType.DISK_VHD))
-
-
-class BittorrentTestCase(VMUtilsTestBase):
- def setUp(self):
- super(BittorrentTestCase, self).setUp()
- self.context = context.get_admin_context()
-
- def test_image_uses_bittorrent(self):
- instance = {'system_metadata': {'image_bittorrent': True}}
- self.flags(torrent_images='some', group='xenserver')
- self.assertTrue(vm_utils._image_uses_bittorrent(self.context,
- instance))
-
- def _test_create_image(self, cache_type):
- instance = {'system_metadata': {'image_cache_in_nova': True}}
- self.flags(cache_images=cache_type, group='xenserver')
-
- was = {'called': None}
-
- def fake_create_cached_image(*args):
- was['called'] = 'some'
- return (False, {})
- self.stubs.Set(vm_utils, '_create_cached_image',
- fake_create_cached_image)
-
- def fake_fetch_image(*args):
- was['called'] = 'none'
- return {}
- self.stubs.Set(vm_utils, '_fetch_image',
- fake_fetch_image)
-
- vm_utils.create_image(self.context, None, instance,
- 'foo', 'bar', 'baz')
-
- self.assertEqual(was['called'], cache_type)
-
- def test_create_image_cached(self):
- self._test_create_image('some')
-
- def test_create_image_uncached(self):
- self._test_create_image('none')
-
-
-class ShutdownTestCase(VMUtilsTestBase):
-
- def test_hardshutdown_should_return_true_when_vm_is_shutdown(self):
- self.mock = mox.Mox()
- session = FakeSession()
- instance = "instance"
- vm_ref = "vm-ref"
- self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
- self.mock.StubOutWithMock(vm_utils, 'LOG')
- self.assertTrue(vm_utils.hard_shutdown_vm(
- session, instance, vm_ref))
-
- def test_cleanshutdown_should_return_true_when_vm_is_shutdown(self):
- self.mock = mox.Mox()
- session = FakeSession()
- instance = "instance"
- vm_ref = "vm-ref"
- self.mock.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- vm_utils.is_vm_shutdown(session, vm_ref).AndReturn(True)
- self.mock.StubOutWithMock(vm_utils, 'LOG')
- self.assertTrue(vm_utils.clean_shutdown_vm(
- session, instance, vm_ref))
-
-
-class CreateVBDTestCase(VMUtilsTestBase):
- def setUp(self):
- super(CreateVBDTestCase, self).setUp()
- self.session = FakeSession()
- self.mock = mox.Mox()
- self.mock.StubOutWithMock(self.session, 'call_xenapi')
- self.vbd_rec = self._generate_vbd_rec()
-
- def _generate_vbd_rec(self):
- vbd_rec = {}
- vbd_rec['VM'] = 'vm_ref'
- vbd_rec['VDI'] = 'vdi_ref'
- vbd_rec['userdevice'] = '0'
- vbd_rec['bootable'] = False
- vbd_rec['mode'] = 'RW'
- vbd_rec['type'] = 'disk'
- vbd_rec['unpluggable'] = True
- vbd_rec['empty'] = False
- vbd_rec['other_config'] = {}
- vbd_rec['qos_algorithm_type'] = ''
- vbd_rec['qos_algorithm_params'] = {}
- vbd_rec['qos_supported_algorithms'] = []
- return vbd_rec
-
- def test_create_vbd_default_args(self):
- self.session.call_xenapi('VBD.create',
- self.vbd_rec).AndReturn("vbd_ref")
- self.mock.ReplayAll()
-
- result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0)
- self.assertEqual(result, "vbd_ref")
- self.mock.VerifyAll()
-
- def test_create_vbd_osvol(self):
- self.session.call_xenapi('VBD.create',
- self.vbd_rec).AndReturn("vbd_ref")
- self.session.call_xenapi('VBD.add_to_other_config', "vbd_ref",
- "osvol", "True")
- self.mock.ReplayAll()
- result = vm_utils.create_vbd(self.session, "vm_ref", "vdi_ref", 0,
- osvol=True)
- self.assertEqual(result, "vbd_ref")
- self.mock.VerifyAll()
-
- def test_create_vbd_extra_args(self):
- self.vbd_rec['VDI'] = 'OpaqueRef:NULL'
- self.vbd_rec['type'] = 'a'
- self.vbd_rec['mode'] = 'RO'
- self.vbd_rec['bootable'] = True
- self.vbd_rec['empty'] = True
- self.vbd_rec['unpluggable'] = False
- self.session.call_xenapi('VBD.create',
- self.vbd_rec).AndReturn("vbd_ref")
- self.mock.ReplayAll()
-
- result = vm_utils.create_vbd(self.session, "vm_ref", None, 0,
- vbd_type="a", read_only=True, bootable=True,
- empty=True, unpluggable=False)
- self.assertEqual(result, "vbd_ref")
- self.mock.VerifyAll()
-
- def test_attach_cd(self):
- self.mock.StubOutWithMock(vm_utils, 'create_vbd')
-
- vm_utils.create_vbd(self.session, "vm_ref", None, 1,
- vbd_type='cd', read_only=True, bootable=True,
- empty=True, unpluggable=False).AndReturn("vbd_ref")
- self.session.call_xenapi('VBD.insert', "vbd_ref", "vdi_ref")
- self.mock.ReplayAll()
-
- result = vm_utils.attach_cd(self.session, "vm_ref", "vdi_ref", 1)
- self.assertEqual(result, "vbd_ref")
- self.mock.VerifyAll()
-
-
-class UnplugVbdTestCase(VMUtilsTestBase):
- @mock.patch.object(greenthread, 'sleep')
- def test_unplug_vbd_works(self, mock_sleep):
- session = _get_fake_session()
- vbd_ref = "vbd_ref"
- vm_ref = 'vm_ref'
-
- vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
-
- session.call_xenapi.assert_called_once_with('VBD.unplug', vbd_ref)
- self.assertEqual(0, mock_sleep.call_count)
-
- def test_unplug_vbd_raises_unexpected_error(self):
- session = _get_fake_session()
- vbd_ref = "vbd_ref"
- vm_ref = 'vm_ref'
- session.call_xenapi.side_effect = test.TestingException()
-
- self.assertRaises(test.TestingException, vm_utils.unplug_vbd,
- session, vm_ref, vbd_ref)
- self.assertEqual(1, session.call_xenapi.call_count)
-
- def test_unplug_vbd_already_detached_works(self):
- error = "DEVICE_ALREADY_DETACHED"
- session = _get_fake_session(error)
- vbd_ref = "vbd_ref"
- vm_ref = 'vm_ref'
-
- vm_utils.unplug_vbd(session, vbd_ref, vm_ref)
- self.assertEqual(1, session.call_xenapi.call_count)
-
- def test_unplug_vbd_already_raises_unexpected_xenapi_error(self):
- session = _get_fake_session("")
- vbd_ref = "vbd_ref"
- vm_ref = 'vm_ref'
-
- self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
- session, vbd_ref, vm_ref)
- self.assertEqual(1, session.call_xenapi.call_count)
-
- def _test_uplug_vbd_retries(self, mock_sleep, error):
- session = _get_fake_session(error)
- vbd_ref = "vbd_ref"
- vm_ref = 'vm_ref'
-
- self.assertRaises(exception.StorageError, vm_utils.unplug_vbd,
- session, vm_ref, vbd_ref)
-
- self.assertEqual(11, session.call_xenapi.call_count)
- self.assertEqual(10, mock_sleep.call_count)
-
- @mock.patch.object(greenthread, 'sleep')
- def test_uplug_vbd_retries_on_rejected(self, mock_sleep):
- self._test_uplug_vbd_retries(mock_sleep,
- "DEVICE_DETACH_REJECTED")
-
- @mock.patch.object(greenthread, 'sleep')
- def test_uplug_vbd_retries_on_internal_error(self, mock_sleep):
- self._test_uplug_vbd_retries(mock_sleep,
- "INTERNAL_ERROR")
-
-
-class VDIOtherConfigTestCase(VMUtilsTestBase):
- """Tests to ensure that the code is populating VDI's `other_config`
- attribute with the correct metadta.
- """
-
- def setUp(self):
- super(VDIOtherConfigTestCase, self).setUp()
-
- class _FakeSession():
- def call_xenapi(self, operation, *args, **kwargs):
- # VDI.add_to_other_config -> VDI_add_to_other_config
- method = getattr(self, operation.replace('.', '_'), None)
- if method:
- return method(*args, **kwargs)
-
- self.operation = operation
- self.args = args
- self.kwargs = kwargs
-
- self.session = _FakeSession()
- self.context = context.get_admin_context()
- self.fake_instance = {'uuid': 'aaaa-bbbb-cccc-dddd',
- 'name': 'myinstance'}
-
- def test_create_vdi(self):
- # Some images are registered with XenServer explicitly by calling
- # `create_vdi`
- vm_utils.create_vdi(self.session, 'sr_ref', self.fake_instance,
- 'myvdi', 'root', 1024, read_only=True)
-
- expected = {'nova_disk_type': 'root',
- 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
-
- self.assertEqual(expected, self.session.args[0]['other_config'])
-
- def test_create_image(self):
- # Other images are registered implicitly when they are dropped into
- # the SR by a dom0 plugin or some other process
- self.flags(cache_images='none', group='xenserver')
-
- def fake_fetch_image(*args):
- return {'root': {'uuid': 'fake-uuid'}}
-
- self.stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
-
- other_config = {}
-
- def VDI_add_to_other_config(ref, key, value):
- other_config[key] = value
-
- # Stubbing on the session object and not class so we don't pollute
- # other tests
- self.session.VDI_add_to_other_config = VDI_add_to_other_config
- self.session.VDI_get_other_config = lambda vdi: {}
-
- vm_utils.create_image(self.context, self.session, self.fake_instance,
- 'myvdi', 'image1', vm_utils.ImageType.DISK_VHD)
-
- expected = {'nova_disk_type': 'root',
- 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
-
- self.assertEqual(expected, other_config)
-
- def test_import_migrated_vhds(self):
- # Migrated images should preserve the `other_config`
- other_config = {}
-
- def VDI_add_to_other_config(ref, key, value):
- other_config[key] = value
-
- def call_plugin_serialized(*args, **kwargs):
- return {'root': {'uuid': 'aaaa-bbbb-cccc-dddd'}}
-
- # Stubbing on the session object and not class so we don't pollute
- # other tests
- self.session.VDI_add_to_other_config = VDI_add_to_other_config
- self.session.VDI_get_other_config = lambda vdi: {}
- self.session.call_plugin_serialized = call_plugin_serialized
-
- self.stubs.Set(vm_utils, 'get_sr_path', lambda *a, **k: None)
- self.stubs.Set(vm_utils, 'scan_default_sr', lambda *a, **k: None)
-
- vm_utils._import_migrated_vhds(self.session, self.fake_instance,
- "disk_label", "root", "vdi_label")
-
- expected = {'nova_disk_type': 'root',
- 'nova_instance_uuid': 'aaaa-bbbb-cccc-dddd'}
-
- self.assertEqual(expected, other_config)
-
-
-class GenerateDiskTestCase(VMUtilsTestBase):
- def setUp(self):
- super(GenerateDiskTestCase, self).setUp()
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
- self.session = driver._session
- self.session.is_local_connection = False
- self.vm_ref = fake.create_vm("foo", "Running")
-
- def tearDown(self):
- super(GenerateDiskTestCase, self).tearDown()
- fake.destroy_vm(self.vm_ref)
-
- def _expect_parted_calls(self):
- self.mox.StubOutWithMock(utils, "execute")
- self.mox.StubOutWithMock(utils, "trycmd")
- self.mox.StubOutWithMock(vm_utils, "destroy_vdi")
- self.mox.StubOutWithMock(vm_utils.os.path, "exists")
- if self.session.is_local_connection:
- utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
- 'msdos', check_exit_code=False, run_as_root=True)
- utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
- 'primary', '0', '-0',
- check_exit_code=False, run_as_root=True)
- vm_utils.os.path.exists('/dev/mapper/fakedev1').AndReturn(True)
- utils.trycmd('kpartx', '-a', '/dev/fakedev',
- discard_warnings=True, run_as_root=True)
- else:
- utils.execute('parted', '--script', '/dev/fakedev', 'mklabel',
- 'msdos', check_exit_code=True, run_as_root=True)
- utils.execute('parted', '--script', '/dev/fakedev', '--', 'mkpart',
- 'primary', '0', '-0',
- check_exit_code=True, run_as_root=True)
-
- def _check_vdi(self, vdi_ref, check_attached=True):
- vdi_rec = self.session.call_xenapi("VDI.get_record", vdi_ref)
- self.assertEqual(str(10 * units.Mi), vdi_rec["virtual_size"])
- if check_attached:
- vbd_ref = vdi_rec["VBDs"][0]
- vbd_rec = self.session.call_xenapi("VBD.get_record", vbd_ref)
- self.assertEqual(self.vm_ref, vbd_rec['VM'])
- else:
- self.assertEqual(0, len(vdi_rec["VBDs"]))
-
- @test_xenapi.stub_vm_utils_with_vdi_attached_here
- def test_generate_disk_with_no_fs_given(self):
- self._expect_parted_calls()
-
- self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "user", 10, None)
- self._check_vdi(vdi_ref)
-
- @test_xenapi.stub_vm_utils_with_vdi_attached_here
- def test_generate_disk_swap(self):
- self._expect_parted_calls()
- utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
-
- self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "swap", 10, "linux-swap")
- self._check_vdi(vdi_ref)
-
- @test_xenapi.stub_vm_utils_with_vdi_attached_here
- def test_generate_disk_ephemeral(self):
- self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
- run_as_root=True)
-
- self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
- self._check_vdi(vdi_ref)
-
- @test_xenapi.stub_vm_utils_with_vdi_attached_here
- def test_generate_disk_ensure_cleanup_called(self):
- self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
- run_as_root=True).AndRaise(test.TestingException)
- vm_utils.destroy_vdi(self.session,
- mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
-
- self.mox.ReplayAll()
- self.assertRaises(test.TestingException, vm_utils._generate_disk,
- self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
-
- @test_xenapi.stub_vm_utils_with_vdi_attached_here
- def test_generate_disk_ephemeral_local_not_attached(self):
- self.session.is_local_connection = True
- self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
- run_as_root=True)
-
- self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- None, "2", "name", "ephemeral", 10, "ext4")
- self._check_vdi(vdi_ref, check_attached=False)
-
-
-class GenerateEphemeralTestCase(VMUtilsTestBase):
- def setUp(self):
- super(GenerateEphemeralTestCase, self).setUp()
- self.session = "session"
- self.instance = "instance"
- self.vm_ref = "vm_ref"
- self.name_label = "name"
- self.ephemeral_name_label = "name ephemeral"
- self.userdevice = 4
- self.mox.StubOutWithMock(vm_utils, "_generate_disk")
- self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
-
- def test_get_ephemeral_disk_sizes_simple(self):
- result = vm_utils.get_ephemeral_disk_sizes(20)
- expected = [20]
- self.assertEqual(expected, list(result))
-
- def test_get_ephemeral_disk_sizes_three_disks_2000(self):
- result = vm_utils.get_ephemeral_disk_sizes(4030)
- expected = [2000, 2000, 30]
- self.assertEqual(expected, list(result))
-
- def test_get_ephemeral_disk_sizes_two_disks_1024(self):
- result = vm_utils.get_ephemeral_disk_sizes(2048)
- expected = [1024, 1024]
- self.assertEqual(expected, list(result))
-
- def _expect_generate_disk(self, size, device, name_label):
- vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
- str(device), name_label, 'ephemeral',
- size * 1024, None).AndReturn(device)
-
- def test_generate_ephemeral_adds_one_disk(self):
- self._expect_generate_disk(20, self.userdevice,
- self.ephemeral_name_label)
- self.mox.ReplayAll()
-
- vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
- str(self.userdevice), self.name_label, 20)
-
- def test_generate_ephemeral_adds_multiple_disks(self):
- self._expect_generate_disk(2000, self.userdevice,
- self.ephemeral_name_label)
- self._expect_generate_disk(2000, self.userdevice + 1,
- self.ephemeral_name_label + " (1)")
- self._expect_generate_disk(30, self.userdevice + 2,
- self.ephemeral_name_label + " (2)")
- self.mox.ReplayAll()
-
- vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
- str(self.userdevice), self.name_label, 4030)
-
- def test_generate_ephemeral_cleans_up_on_error(self):
- self._expect_generate_disk(1024, self.userdevice,
- self.ephemeral_name_label)
- self._expect_generate_disk(1024, self.userdevice + 1,
- self.ephemeral_name_label + " (1)")
-
- vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
- str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
- units.Mi, None).AndRaise(exception.NovaException)
-
- vm_utils.safe_destroy_vdis(self.session, [4, 5])
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
- self.session, self.instance, self.vm_ref,
- str(self.userdevice), self.name_label, 4096)
-
-
-class FakeFile(object):
- def __init__(self):
- self._file_operations = []
-
- def seek(self, offset):
- self._file_operations.append((self.seek, offset))
-
-
-class StreamDiskTestCase(VMUtilsTestBase):
- def setUp(self):
- import __builtin__
- super(StreamDiskTestCase, self).setUp()
- self.mox.StubOutWithMock(vm_utils.utils, 'make_dev_path')
- self.mox.StubOutWithMock(vm_utils.utils, 'temporary_chown')
- self.mox.StubOutWithMock(vm_utils, '_write_partition')
-
- # NOTE(matelakat): This might hide the fail reason, as test runners
- # are unhappy with a mocked out open.
- self.mox.StubOutWithMock(__builtin__, 'open')
- self.image_service_func = self.mox.CreateMockAnything()
-
- def test_non_ami(self):
- fake_file = FakeFile()
-
- vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
- vm_utils.utils.temporary_chown(
- 'some_path').AndReturn(contextified(None))
- open('some_path', 'wb').AndReturn(contextified(fake_file))
- self.image_service_func(fake_file)
-
- self.mox.ReplayAll()
-
- vm_utils._stream_disk("session", self.image_service_func,
- vm_utils.ImageType.KERNEL, None, 'dev')
-
- self.assertEqual([(fake_file.seek, 0)], fake_file._file_operations)
-
- def test_ami_disk(self):
- fake_file = FakeFile()
-
- vm_utils._write_partition("session", 100, 'dev')
- vm_utils.utils.make_dev_path('dev').AndReturn('some_path')
- vm_utils.utils.temporary_chown(
- 'some_path').AndReturn(contextified(None))
- open('some_path', 'wb').AndReturn(contextified(fake_file))
- self.image_service_func(fake_file)
-
- self.mox.ReplayAll()
-
- vm_utils._stream_disk("session", self.image_service_func,
- vm_utils.ImageType.DISK, 100, 'dev')
-
- self.assertEqual(
- [(fake_file.seek, vm_utils.MBR_SIZE_BYTES)],
- fake_file._file_operations)
-
-
-class VMUtilsSRPath(VMUtilsTestBase):
- def setUp(self):
- super(VMUtilsSRPath, self).setUp()
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
- self.session = driver._session
- self.session.is_local_connection = False
-
- def test_defined(self):
- self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
- self.mox.StubOutWithMock(self.session, "call_xenapi")
-
- vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
- self.session.host_ref = "host_ref"
- self.session.call_xenapi('PBD.get_all_records_where',
- 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
- {'pbd_ref': {'device_config': {'path': 'sr_path'}}})
-
- self.mox.ReplayAll()
- self.assertEqual(vm_utils.get_sr_path(self.session), "sr_path")
-
- def test_default(self):
- self.mox.StubOutWithMock(vm_utils, "safe_find_sr")
- self.mox.StubOutWithMock(self.session, "call_xenapi")
-
- vm_utils.safe_find_sr(self.session).AndReturn("sr_ref")
- self.session.host_ref = "host_ref"
- self.session.call_xenapi('PBD.get_all_records_where',
- 'field "host"="host_ref" and field "SR"="sr_ref"').AndReturn(
- {'pbd_ref': {'device_config': {}}})
- self.session.call_xenapi("SR.get_record", "sr_ref").AndReturn(
- {'uuid': 'sr_uuid', 'type': 'ext'})
- self.mox.ReplayAll()
- self.assertEqual(vm_utils.get_sr_path(self.session),
- "/var/run/sr-mount/sr_uuid")
-
-
-class CreateKernelRamdiskTestCase(VMUtilsTestBase):
- def setUp(self):
- super(CreateKernelRamdiskTestCase, self).setUp()
- self.context = "context"
- self.session = FakeSession()
- self.instance = {"kernel_id": None, "ramdisk_id": None}
- self.name_label = "name"
- self.mox.StubOutWithMock(self.session, "call_plugin")
- self.mox.StubOutWithMock(uuid, "uuid4")
- self.mox.StubOutWithMock(vm_utils, "_fetch_disk_image")
-
- def test_create_kernel_and_ramdisk_no_create(self):
- self.mox.ReplayAll()
- result = vm_utils.create_kernel_and_ramdisk(self.context,
- self.session, self.instance, self.name_label)
- self.assertEqual((None, None), result)
-
- def test_create_kernel_and_ramdisk_create_both_cached(self):
- kernel_id = "kernel"
- ramdisk_id = "ramdisk"
- self.instance["kernel_id"] = kernel_id
- self.instance["ramdisk_id"] = ramdisk_id
-
- args_kernel = {}
- args_kernel['cached-image'] = kernel_id
- args_kernel['new-image-uuid'] = "fake_uuid1"
- uuid.uuid4().AndReturn("fake_uuid1")
- self.session.call_plugin('kernel', 'create_kernel_ramdisk',
- args_kernel).AndReturn("k")
-
- args_ramdisk = {}
- args_ramdisk['cached-image'] = ramdisk_id
- args_ramdisk['new-image-uuid'] = "fake_uuid2"
- uuid.uuid4().AndReturn("fake_uuid2")
- self.session.call_plugin('kernel', 'create_kernel_ramdisk',
- args_ramdisk).AndReturn("r")
-
- self.mox.ReplayAll()
- result = vm_utils.create_kernel_and_ramdisk(self.context,
- self.session, self.instance, self.name_label)
- self.assertEqual(("k", "r"), result)
-
- def test_create_kernel_and_ramdisk_create_kernel_not_cached(self):
- kernel_id = "kernel"
- self.instance["kernel_id"] = kernel_id
-
- args_kernel = {}
- args_kernel['cached-image'] = kernel_id
- args_kernel['new-image-uuid'] = "fake_uuid1"
- uuid.uuid4().AndReturn("fake_uuid1")
- self.session.call_plugin('kernel', 'create_kernel_ramdisk',
- args_kernel).AndReturn("")
-
- kernel = {"kernel": {"file": "k"}}
- vm_utils._fetch_disk_image(self.context, self.session, self.instance,
- self.name_label, kernel_id, 0).AndReturn(kernel)
-
- self.mox.ReplayAll()
- result = vm_utils.create_kernel_and_ramdisk(self.context,
- self.session, self.instance, self.name_label)
- self.assertEqual(("k", None), result)
-
-
-class ScanSrTestCase(VMUtilsTestBase):
- @mock.patch.object(vm_utils, "_scan_sr")
- @mock.patch.object(vm_utils, "safe_find_sr")
- def test_scan_default_sr(self, mock_safe_find_sr, mock_scan_sr):
- mock_safe_find_sr.return_value = "sr_ref"
-
- self.assertEqual("sr_ref", vm_utils.scan_default_sr("fake_session"))
-
- mock_scan_sr.assert_called_once_with("fake_session", "sr_ref")
-
- def test_scan_sr_works(self):
- session = mock.Mock()
- vm_utils._scan_sr(session, "sr_ref")
- session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
-
- def test_scan_sr_unknown_error_fails_once(self):
- session = mock.Mock()
- session.call_xenapi.side_effect = test.TestingException
- self.assertRaises(test.TestingException,
- vm_utils._scan_sr, session, "sr_ref")
- session.call_xenapi.assert_called_once_with('SR.scan', "sr_ref")
-
- @mock.patch.object(greenthread, 'sleep')
- def test_scan_sr_known_error_retries_then_throws(self, mock_sleep):
- session = mock.Mock()
-
- class FakeException(Exception):
- details = ['SR_BACKEND_FAILURE_40', "", "", ""]
-
- session.XenAPI.Failure = FakeException
- session.call_xenapi.side_effect = FakeException
-
- self.assertRaises(FakeException,
- vm_utils._scan_sr, session, "sr_ref")
-
- session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
- self.assertEqual(4, session.call_xenapi.call_count)
- mock_sleep.assert_has_calls([mock.call(2), mock.call(4), mock.call(8)])
-
- @mock.patch.object(greenthread, 'sleep')
- def test_scan_sr_known_error_retries_then_succeeds(self, mock_sleep):
- session = mock.Mock()
-
- class FakeException(Exception):
- details = ['SR_BACKEND_FAILURE_40', "", "", ""]
-
- session.XenAPI.Failure = FakeException
-
- def fake_call_xenapi(*args):
- fake_call_xenapi.count += 1
- if fake_call_xenapi.count != 2:
- raise FakeException()
-
- fake_call_xenapi.count = 0
- session.call_xenapi.side_effect = fake_call_xenapi
-
- vm_utils._scan_sr(session, "sr_ref")
-
- session.call_xenapi.assert_called_with('SR.scan', "sr_ref")
- self.assertEqual(2, session.call_xenapi.call_count)
- mock_sleep.assert_called_once_with(2)
-
-
-@mock.patch.object(flavors, 'extract_flavor',
- return_value={
- 'memory_mb': 1024,
- 'vcpus': 1,
- 'vcpu_weight': 1.0,
- })
-class CreateVmTestCase(VMUtilsTestBase):
- def test_vss_provider(self, mock_extract):
- self.flags(vcpu_pin_set="2,3")
- session = _get_fake_session()
- instance = {
- "uuid": "uuid", "os_type": "windows"
- }
-
- vm_utils.create_vm(session, instance, "label",
- "kernel", "ramdisk")
-
- vm_rec = {
- 'VCPUs_params': {'cap': '0', 'mask': '2,3', 'weight': '1.0'},
- 'PV_args': '',
- 'memory_static_min': '0',
- 'ha_restart_priority': '',
- 'HVM_boot_policy': 'BIOS order',
- 'PV_bootloader': '', 'tags': [],
- 'VCPUs_max': '1',
- 'memory_static_max': '1073741824',
- 'actions_after_shutdown': 'destroy',
- 'memory_dynamic_max': '1073741824',
- 'user_version': '0',
- 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
- 'blocked_operations': {},
- 'is_a_template': False,
- 'name_description': '',
- 'memory_dynamic_min': '1073741824',
- 'actions_after_crash': 'destroy',
- 'memory_target': '1073741824',
- 'PV_ramdisk': '',
- 'PV_bootloader_args': '',
- 'PCI_bus': '',
- 'other_config': {'nova_uuid': 'uuid'},
- 'name_label': 'label',
- 'actions_after_reboot': 'restart',
- 'VCPUs_at_startup': '1',
- 'HVM_boot_params': {'order': 'dc'},
- 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
- 'timeoffset': '0', 'viridian': 'true',
- 'acpi': 'true'},
- 'PV_legacy_args': '',
- 'PV_kernel': '',
- 'affinity': '',
- 'recommendations': '',
- 'ha_always_run': False
- }
- session.call_xenapi.assert_called_once_with("VM.create", vm_rec)
-
- def test_invalid_cpu_mask_raises(self, mock_extract):
- self.flags(vcpu_pin_set="asdf")
- session = mock.Mock()
- instance = {
- "uuid": "uuid",
- }
- self.assertRaises(exception.Invalid,
- vm_utils.create_vm,
- session, instance, "label",
- "kernel", "ramdisk")
-
- def test_destroy_vm(self, mock_extract):
- session = mock.Mock()
- instance = {
- "uuid": "uuid",
- }
-
- vm_utils.destroy_vm(session, instance, "vm_ref")
-
- session.VM.destroy.assert_called_once_with("vm_ref")
-
- def test_destroy_vm_silently_fails(self, mock_extract):
- session = mock.Mock()
- exc = test.TestingException()
- session.XenAPI.Failure = test.TestingException
- session.VM.destroy.side_effect = exc
- instance = {
- "uuid": "uuid",
- }
-
- vm_utils.destroy_vm(session, instance, "vm_ref")
-
- session.VM.destroy.assert_called_once_with("vm_ref")
-
-
-class DetermineVmModeTestCase(VMUtilsTestBase):
- def test_determine_vm_mode_returns_xen_mode(self):
- instance = {"vm_mode": "xen"}
- self.assertEqual(vm_mode.XEN,
- vm_utils.determine_vm_mode(instance, None))
-
- def test_determine_vm_mode_returns_hvm_mode(self):
- instance = {"vm_mode": "hvm"}
- self.assertEqual(vm_mode.HVM,
- vm_utils.determine_vm_mode(instance, None))
-
- def test_determine_vm_mode_returns_xen_for_linux(self):
- instance = {"vm_mode": None, "os_type": "linux"}
- self.assertEqual(vm_mode.XEN,
- vm_utils.determine_vm_mode(instance, None))
-
- def test_determine_vm_mode_returns_hvm_for_windows(self):
- instance = {"vm_mode": None, "os_type": "windows"}
- self.assertEqual(vm_mode.HVM,
- vm_utils.determine_vm_mode(instance, None))
-
- def test_determine_vm_mode_returns_hvm_by_default(self):
- instance = {"vm_mode": None, "os_type": None}
- self.assertEqual(vm_mode.HVM,
- vm_utils.determine_vm_mode(instance, None))
-
- def test_determine_vm_mode_returns_xen_for_VHD(self):
- instance = {"vm_mode": None, "os_type": None}
- self.assertEqual(vm_mode.XEN,
- vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK_VHD))
-
- def test_determine_vm_mode_returns_xen_for_DISK(self):
- instance = {"vm_mode": None, "os_type": None}
- self.assertEqual(vm_mode.XEN,
- vm_utils.determine_vm_mode(instance, vm_utils.ImageType.DISK))
-
-
-class CallXenAPIHelpersTestCase(VMUtilsTestBase):
- def test_vm_get_vbd_refs(self):
- session = mock.Mock()
- session.call_xenapi.return_value = "foo"
- self.assertEqual("foo", vm_utils._vm_get_vbd_refs(session, "vm_ref"))
- session.call_xenapi.assert_called_once_with("VM.get_VBDs", "vm_ref")
-
- def test_vbd_get_rec(self):
- session = mock.Mock()
- session.call_xenapi.return_value = "foo"
- self.assertEqual("foo", vm_utils._vbd_get_rec(session, "vbd_ref"))
- session.call_xenapi.assert_called_once_with("VBD.get_record",
- "vbd_ref")
-
- def test_vdi_get_rec(self):
- session = mock.Mock()
- session.call_xenapi.return_value = "foo"
- self.assertEqual("foo", vm_utils._vdi_get_rec(session, "vdi_ref"))
- session.call_xenapi.assert_called_once_with("VDI.get_record",
- "vdi_ref")
-
- def test_vdi_snapshot(self):
- session = mock.Mock()
- session.call_xenapi.return_value = "foo"
- self.assertEqual("foo", vm_utils._vdi_snapshot(session, "vdi_ref"))
- session.call_xenapi.assert_called_once_with("VDI.snapshot",
- "vdi_ref", {})
-
- def test_vdi_get_virtual_size(self):
- session = mock.Mock()
- session.call_xenapi.return_value = "123"
- self.assertEqual(123, vm_utils._vdi_get_virtual_size(session, "ref"))
- session.call_xenapi.assert_called_once_with("VDI.get_virtual_size",
- "ref")
-
- @mock.patch.object(vm_utils, '_get_resize_func_name')
- def test_vdi_resize(self, mock_get_resize_func_name):
- session = mock.Mock()
- mock_get_resize_func_name.return_value = "VDI.fake"
- vm_utils._vdi_resize(session, "ref", 123)
- session.call_xenapi.assert_called_once_with("VDI.fake", "ref", "123")
-
- @mock.patch.object(vm_utils, '_vdi_resize')
- @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
- def test_update_vdi_virtual_size_works(self, mock_get_size, mock_resize):
- mock_get_size.return_value = (1024 ** 3) - 1
- instance = {"uuid": "a"}
-
- vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
-
- mock_get_size.assert_called_once_with("s", "ref")
- mock_resize.assert_called_once_with("s", "ref", 1024 ** 3)
-
- @mock.patch.object(vm_utils, '_vdi_resize')
- @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
- def test_update_vdi_virtual_size_skips_resize_down(self, mock_get_size,
- mock_resize):
- mock_get_size.return_value = 1024 ** 3
- instance = {"uuid": "a"}
-
- vm_utils.update_vdi_virtual_size("s", instance, "ref", 1)
-
- mock_get_size.assert_called_once_with("s", "ref")
- self.assertFalse(mock_resize.called)
-
- @mock.patch.object(vm_utils, '_vdi_resize')
- @mock.patch.object(vm_utils, '_vdi_get_virtual_size')
- def test_update_vdi_virtual_size_raise_if_disk_big(self, mock_get_size,
- mock_resize):
- mock_get_size.return_value = 1024 ** 3 + 1
- instance = {"uuid": "a"}
-
- self.assertRaises(exception.ResizeError,
- vm_utils.update_vdi_virtual_size,
- "s", instance, "ref", 1)
-
- mock_get_size.assert_called_once_with("s", "ref")
- self.assertFalse(mock_resize.called)
-
-
-@mock.patch.object(vm_utils, '_vdi_get_rec')
-@mock.patch.object(vm_utils, '_vbd_get_rec')
-@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
-class GetVdiForVMTestCase(VMUtilsTestBase):
- def test_get_vdi_for_vm_safely(self, vm_get_vbd_refs,
- vbd_get_rec, vdi_get_rec):
- session = "session"
-
- vm_get_vbd_refs.return_value = ["a", "b"]
- vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
- vdi_get_rec.return_value = {}
-
- result = vm_utils.get_vdi_for_vm_safely(session, "vm_ref")
- self.assertEqual(('vdi_ref', {}), result)
-
- vm_get_vbd_refs.assert_called_once_with(session, "vm_ref")
- vbd_get_rec.assert_called_once_with(session, "a")
- vdi_get_rec.assert_called_once_with(session, "vdi_ref")
-
- def test_get_vdi_for_vm_safely_fails(self, vm_get_vbd_refs,
- vbd_get_rec, vdi_get_rec):
- session = "session"
-
- vm_get_vbd_refs.return_value = ["a", "b"]
- vbd_get_rec.return_value = {'userdevice': '0', 'VDI': 'vdi_ref'}
-
- self.assertRaises(exception.NovaException,
- vm_utils.get_vdi_for_vm_safely,
- session, "vm_ref", userdevice='1')
-
- self.assertEqual([], vdi_get_rec.call_args_list)
- self.assertEqual(2, len(vbd_get_rec.call_args_list))
-
-
-@mock.patch.object(vm_utils, '_vdi_get_uuid')
-@mock.patch.object(vm_utils, '_vbd_get_rec')
-@mock.patch.object(vm_utils, '_vm_get_vbd_refs')
-class GetAllVdiForVMTestCase(VMUtilsTestBase):
- def _setup_get_all_vdi_uuids_for_vm(self, vm_get_vbd_refs,
- vbd_get_rec, vdi_get_uuid):
- def fake_vbd_get_rec(session, vbd_ref):
- return {'userdevice': vbd_ref, 'VDI': "vdi_ref_%s" % vbd_ref}
-
- def fake_vdi_get_uuid(session, vdi_ref):
- return vdi_ref
-
- vm_get_vbd_refs.return_value = ["0", "2"]
- vbd_get_rec.side_effect = fake_vbd_get_rec
- vdi_get_uuid.side_effect = fake_vdi_get_uuid
-
- def test_get_all_vdi_uuids_for_vm_works(self, vm_get_vbd_refs,
- vbd_get_rec, vdi_get_uuid):
- self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
- vbd_get_rec, vdi_get_uuid)
-
- result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref")
- expected = ['vdi_ref_0', 'vdi_ref_2']
- self.assertEqual(expected, list(result))
-
- def test_get_all_vdi_uuids_for_vm_finds_none(self, vm_get_vbd_refs,
- vbd_get_rec, vdi_get_uuid):
- self._setup_get_all_vdi_uuids_for_vm(vm_get_vbd_refs,
- vbd_get_rec, vdi_get_uuid)
-
- result = vm_utils.get_all_vdi_uuids_for_vm('session', "vm_ref",
- min_userdevice=1)
- expected = ["vdi_ref_2"]
- self.assertEqual(expected, list(result))
-
-
-class GetAllVdisTestCase(VMUtilsTestBase):
- def test_get_all_vdis_in_sr(self):
-
- def fake_get_rec(record_type, ref):
- if ref == "2":
- return "vdi_rec_2"
-
- session = mock.Mock()
- session.call_xenapi.return_value = ["1", "2"]
- session.get_rec.side_effect = fake_get_rec
-
- sr_ref = "sr_ref"
- actual = list(vm_utils._get_all_vdis_in_sr(session, sr_ref))
- self.assertEqual(actual, [('2', 'vdi_rec_2')])
-
- session.call_xenapi.assert_called_once_with("SR.get_VDIs", sr_ref)
-
-
-class VDIAttachedHere(VMUtilsTestBase):
- @mock.patch.object(vm_utils, 'destroy_vbd')
- @mock.patch.object(vm_utils, '_get_this_vm_ref')
- @mock.patch.object(vm_utils, 'create_vbd')
- @mock.patch.object(vm_utils, '_remap_vbd_dev')
- @mock.patch.object(vm_utils, '_wait_for_device')
- @mock.patch.object(utils, 'execute')
- def test_sync_called(self, mock_execute, mock_wait_for_device,
- mock_remap_vbd_dev, mock_create_vbd,
- mock_get_this_vm_ref, mock_destroy_vbd):
- session = _get_fake_session()
- with vm_utils.vdi_attached_here(session, 'vdi_ref'):
- pass
- mock_execute.assert_called_with('sync', run_as_root=True)
-
-
-class SnapshotAttachedHereTestCase(VMUtilsTestBase):
- @mock.patch.object(vm_utils, '_snapshot_attached_here_impl')
- def test_snapshot_attached_here(self, mock_impl):
- def fake_impl(session, instance, vm_ref, label, userdevice,
- post_snapshot_callback):
- self.assertEqual("session", session)
- self.assertEqual("instance", instance)
- self.assertEqual("vm_ref", vm_ref)
- self.assertEqual("label", label)
- self.assertEqual('0', userdevice)
- self.assertIsNone(post_snapshot_callback)
- yield "fake"
-
- mock_impl.side_effect = fake_impl
-
- with vm_utils.snapshot_attached_here("session", "instance", "vm_ref",
- "label") as result:
- self.assertEqual("fake", result)
-
- mock_impl.assert_called_once_with("session", "instance", "vm_ref",
- "label", '0', None)
-
- @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
- @mock.patch.object(vm_utils, 'safe_destroy_vdis')
- @mock.patch.object(vm_utils, '_walk_vdi_chain')
- @mock.patch.object(vm_utils, '_wait_for_vhd_coalesce')
- @mock.patch.object(vm_utils, '_vdi_get_uuid')
- @mock.patch.object(vm_utils, '_vdi_snapshot')
- @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
- def test_snapshot_attached_here_impl(self, mock_get_vdi_for_vm_safely,
- mock_vdi_snapshot, mock_vdi_get_uuid,
- mock_wait_for_vhd_coalesce, mock_walk_vdi_chain,
- mock_safe_destroy_vdis, mock_delete_snapshots_in_vdi_chain):
- session = "session"
- instance = {"uuid": "uuid"}
- mock_callback = mock.Mock()
-
- mock_get_vdi_for_vm_safely.return_value = ("vdi_ref",
- {"SR": "sr_ref",
- "uuid": "vdi_uuid"})
- mock_vdi_snapshot.return_value = "snap_ref"
- mock_vdi_get_uuid.return_value = "snap_uuid"
- mock_walk_vdi_chain.return_value = [{"uuid": "a"}, {"uuid": "b"}]
-
- try:
- with vm_utils.snapshot_attached_here(session, instance, "vm_ref",
- "label", '2', mock_callback) as result:
- self.assertEqual(["a", "b"], result)
- raise test.TestingException()
- self.assertTrue(False)
- except test.TestingException:
- pass
-
- mock_get_vdi_for_vm_safely.assert_called_once_with(session, "vm_ref",
- '2')
- mock_vdi_snapshot.assert_called_once_with(session, "vdi_ref")
- mock_wait_for_vhd_coalesce.assert_called_once_with(session, instance,
- "sr_ref", "vdi_ref", ['a', 'b'])
- mock_vdi_get_uuid.assert_called_once_with(session, "snap_ref")
- mock_walk_vdi_chain.assert_has_calls([mock.call(session, "vdi_uuid"),
- mock.call(session, "snap_uuid")])
- mock_callback.assert_called_once_with(
- task_state="image_pending_upload")
- mock_safe_destroy_vdis.assert_called_once_with(session, ["snap_ref"])
- mock_delete_snapshots_in_vdi_chain.assert_called_once_with(session,
- instance, ['a', 'b'], "sr_ref")
-
- @mock.patch.object(greenthread, 'sleep')
- def test_wait_for_vhd_coalesce_leaf_node(self, mock_sleep):
- instance = {"uuid": "fake"}
- vm_utils._wait_for_vhd_coalesce("session", instance,
- "sr_ref", "vdi_ref", ["uuid"])
- self.assertFalse(mock_sleep.called)
-
- @mock.patch.object(vm_utils, '_count_children')
- @mock.patch.object(greenthread, 'sleep')
- def test_wait_for_vhd_coalesce_parent_snapshot(self, mock_sleep,
- mock_count):
- mock_count.return_value = 2
- instance = {"uuid": "fake"}
-
- vm_utils._wait_for_vhd_coalesce("session", instance,
- "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
-
- self.assertFalse(mock_sleep.called)
- self.assertTrue(mock_count.called)
-
- @mock.patch.object(greenthread, 'sleep')
- @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
- @mock.patch.object(vm_utils, '_count_children')
- @mock.patch.object(vm_utils, '_scan_sr')
- def test_wait_for_vhd_coalesce_raises(self, mock_scan_sr,
- mock_count, mock_get_vhd_parent_uuid, mock_sleep):
- mock_count.return_value = 1
- instance = {"uuid": "fake"}
-
- self.assertRaises(exception.NovaException,
- vm_utils._wait_for_vhd_coalesce, "session", instance,
- "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
-
- self.assertTrue(mock_count.called)
- self.assertEqual(20, mock_sleep.call_count)
- self.assertEqual(20, mock_scan_sr.call_count)
-
- @mock.patch.object(greenthread, 'sleep')
- @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
- @mock.patch.object(vm_utils, '_count_children')
- @mock.patch.object(vm_utils, '_scan_sr')
- def test_wait_for_vhd_coalesce_success(self, mock_scan_sr,
- mock_count, mock_get_vhd_parent_uuid, mock_sleep):
- mock_count.return_value = 1
- instance = {"uuid": "fake"}
- mock_get_vhd_parent_uuid.side_effect = ["bad", "uuid2"]
-
- vm_utils._wait_for_vhd_coalesce("session", instance,
- "sr_ref", "vdi_ref", ["uuid1", "uuid2"])
-
- self.assertEqual(1, mock_sleep.call_count)
- self.assertEqual(2, mock_scan_sr.call_count)
-
- @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
- def test_count_children(self, mock_get_all_vdis_in_sr):
- vdis = [('child1', {'sm_config': {'vhd-parent': 'parent1'}}),
- ('child2', {'sm_config': {'vhd-parent': 'parent2'}}),
- ('child3', {'sm_config': {'vhd-parent': 'parent1'}})]
- mock_get_all_vdis_in_sr.return_value = vdis
- self.assertEqual(2, vm_utils._count_children('session',
- 'parent1', 'sr'))
-
-
-class ImportMigratedDisksTestCase(VMUtilsTestBase):
- @mock.patch.object(vm_utils, '_import_migrate_ephemeral_disks')
- @mock.patch.object(vm_utils, '_import_migrated_root_disk')
- def test_import_all_migrated_disks(self, mock_root, mock_ephemeral):
- session = "session"
- instance = "instance"
- mock_root.return_value = "root_vdi"
- mock_ephemeral.return_value = ["a", "b"]
-
- result = vm_utils.import_all_migrated_disks(session, instance)
-
- expected = {'root': 'root_vdi', 'ephemerals': ["a", "b"]}
- self.assertEqual(expected, result)
- mock_root.assert_called_once_with(session, instance)
- mock_ephemeral.assert_called_once_with(session, instance)
-
- @mock.patch.object(vm_utils, '_import_migrated_vhds')
- def test_import_migrated_root_disk(self, mock_migrate):
- mock_migrate.return_value = "foo"
- instance = {"uuid": "uuid", "name": "name"}
-
- result = vm_utils._import_migrated_root_disk("s", instance)
-
- self.assertEqual("foo", result)
- mock_migrate.assert_called_once_with("s", instance, "uuid", "root",
- "name")
-
- @mock.patch.object(vm_utils, '_import_migrated_vhds')
- def test_import_migrate_ephemeral_disks(self, mock_migrate):
- mock_migrate.return_value = "foo"
- instance = {"uuid": "uuid", "name": "name", "ephemeral_gb": 4000}
-
- result = vm_utils._import_migrate_ephemeral_disks("s", instance)
-
- self.assertEqual({'4': 'foo', '5': 'foo'}, result)
- expected_calls = [mock.call("s", instance, "uuid_ephemeral_1",
- "ephemeral", "name ephemeral (1)"),
- mock.call("s", instance, "uuid_ephemeral_2",
- "ephemeral", "name ephemeral (2)")]
- self.assertEqual(expected_calls, mock_migrate.call_args_list)
-
- @mock.patch.object(vm_utils, '_set_vdi_info')
- @mock.patch.object(vm_utils, 'scan_default_sr')
- @mock.patch.object(vm_utils, 'get_sr_path')
- def test_import_migrated_vhds(self, mock_get_sr_path, mock_scan_sr,
- mock_set_info):
- session = mock.Mock()
- instance = {"uuid": "uuid"}
- session.call_plugin_serialized.return_value = {"root": {"uuid": "a"}}
- session.call_xenapi.return_value = "vdi_ref"
- mock_get_sr_path.return_value = "sr_path"
-
- result = vm_utils._import_migrated_vhds(session, instance,
- 'chain_label', 'disk_type', 'vdi_label')
-
- expected = {'uuid': "a", 'ref': "vdi_ref"}
- self.assertEqual(expected, result)
- mock_get_sr_path.assert_called_once_with(session)
- session.call_plugin_serialized.assert_called_once_with('migration',
- 'move_vhds_into_sr', instance_uuid='chain_label',
- sr_path='sr_path', uuid_stack=mock.ANY)
- mock_scan_sr.assert_called_once_with(session)
- session.call_xenapi.assert_called_once_with('VDI.get_by_uuid', 'a')
- mock_set_info.assert_called_once_with(session, 'vdi_ref', 'disk_type',
- 'vdi_label', 'disk_type', instance)
-
- def test_get_vhd_parent_uuid_rec_provided(self):
- session = mock.Mock()
- vdi_ref = 'vdi_ref'
- vdi_rec = {'sm_config': {}}
- self.assertIsNone(vm_utils._get_vhd_parent_uuid(session,
- vdi_ref,
- vdi_rec))
- self.assertFalse(session.call_xenapi.called)
-
-
-class MigrateVHDTestCase(VMUtilsTestBase):
- def _assert_transfer_called(self, session, label):
- session.call_plugin_serialized.assert_called_once_with(
- 'migration', 'transfer_vhd', instance_uuid=label, host="dest",
- vdi_uuid="vdi_uuid", sr_path="sr_path", seq_num=2)
-
- def test_migrate_vhd_root(self):
- session = mock.Mock()
- instance = {"uuid": "a"}
-
- vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
- "sr_path", 2)
-
- self._assert_transfer_called(session, "a")
-
- def test_migrate_vhd_ephemeral(self):
- session = mock.Mock()
- instance = {"uuid": "a"}
-
- vm_utils.migrate_vhd(session, instance, "vdi_uuid", "dest",
- "sr_path", 2, 2)
-
- self._assert_transfer_called(session, "a_ephemeral_2")
-
- def test_migrate_vhd_converts_exceptions(self):
- session = mock.Mock()
- session.XenAPI.Failure = test.TestingException
- session.call_plugin_serialized.side_effect = test.TestingException()
- instance = {"uuid": "a"}
-
- self.assertRaises(exception.MigrationError, vm_utils.migrate_vhd,
- session, instance, "vdi_uuid", "dest", "sr_path", 2)
- self._assert_transfer_called(session, "a")
-
-
-class StripBaseMirrorTestCase(VMUtilsTestBase):
- def test_strip_base_mirror_from_vdi_works(self):
- session = mock.Mock()
- vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
- session.call_xenapi.assert_called_once_with(
- "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
-
- def test_strip_base_mirror_from_vdi_hides_error(self):
- session = mock.Mock()
- session.XenAPI.Failure = test.TestingException
- session.call_xenapi.side_effect = test.TestingException()
-
- vm_utils._try_strip_base_mirror_from_vdi(session, "vdi_ref")
-
- session.call_xenapi.assert_called_once_with(
- "VDI.remove_from_sm_config", "vdi_ref", "base_mirror")
-
- @mock.patch.object(vm_utils, '_try_strip_base_mirror_from_vdi')
- def test_strip_base_mirror_from_vdis(self, mock_strip):
- def call_xenapi(method, arg):
- if method == "VM.get_VBDs":
- return ['VBD_ref_1', 'VBD_ref_2']
- if method == "VBD.get_VDI":
- return 'VDI' + arg[3:]
- return "Unexpected call_xenapi: %s.%s" % (method, arg)
-
- session = mock.Mock()
- session.call_xenapi.side_effect = call_xenapi
-
- vm_utils.strip_base_mirror_from_vdis(session, "vm_ref")
-
- expected = [mock.call('VM.get_VBDs', "vm_ref"),
- mock.call('VBD.get_VDI', "VBD_ref_1"),
- mock.call('VBD.get_VDI', "VBD_ref_2")]
- self.assertEqual(expected, session.call_xenapi.call_args_list)
-
- expected = [mock.call(session, "VDI_ref_1"),
- mock.call(session, "VDI_ref_2")]
- self.assertEqual(expected, mock_strip.call_args_list)
-
-
-class DeviceIdTestCase(VMUtilsTestBase):
- def test_device_id_is_none_if_not_specified_in_meta_data(self):
- image_meta = {}
- session = mock.Mock()
- session.product_version = (6, 1, 0)
- self.assertIsNone(vm_utils.get_vm_device_id(session, image_meta))
-
- def test_get_device_id_if_hypervisor_version_is_greater_than_6_1(self):
- image_meta = {'xenapi_device_id': '0002'}
- session = mock.Mock()
- session.product_version = (6, 2, 0)
- self.assertEqual('0002',
- vm_utils.get_vm_device_id(session, image_meta))
- session.product_version = (6, 3, 1)
- self.assertEqual('0002',
- vm_utils.get_vm_device_id(session, image_meta))
-
- def test_raise_exception_if_device_id_not_supported_by_hyp_version(self):
- image_meta = {'xenapi_device_id': '0002'}
- session = mock.Mock()
- session.product_version = (6, 0)
- exc = self.assertRaises(exception.NovaException,
- vm_utils.get_vm_device_id, session, image_meta)
- self.assertEqual("Device id 0002 specified is not supported by "
- "hypervisor version (6, 0)", exc.message)
- session.product_version = ('6a')
- exc = self.assertRaises(exception.NovaException,
- vm_utils.get_vm_device_id, session, image_meta)
- self.assertEqual("Device id 0002 specified is not supported by "
- "hypervisor version 6a", exc.message)
-
-
-class CreateVmRecordTestCase(VMUtilsTestBase):
- @mock.patch.object(flavors, 'extract_flavor')
- def test_create_vm_record_linux(self, mock_extract_flavor):
- instance = {"uuid": "uuid123", "os_type": "linux"}
- self._test_create_vm_record(mock_extract_flavor, instance, False)
-
- @mock.patch.object(flavors, 'extract_flavor')
- def test_create_vm_record_windows(self, mock_extract_flavor):
- instance = {"uuid": "uuid123", "os_type": "windows"}
- self._test_create_vm_record(mock_extract_flavor, instance, True)
-
- def _test_create_vm_record(self, mock_extract_flavor, instance,
- is_viridian):
- session = _get_fake_session()
- flavor = {"memory_mb": 1024, "vcpus": 1, "vcpu_weight": 2}
- mock_extract_flavor.return_value = flavor
-
- vm_utils.create_vm(session, instance, "name", "kernel", "ramdisk",
- device_id="0002")
-
- is_viridian_str = str(is_viridian).lower()
-
- expected_vm_rec = {
- 'VCPUs_params': {'cap': '0', 'weight': '2'},
- 'PV_args': '',
- 'memory_static_min': '0',
- 'ha_restart_priority': '',
- 'HVM_boot_policy': 'BIOS order',
- 'PV_bootloader': '',
- 'tags': [],
- 'VCPUs_max': '1',
- 'memory_static_max': '1073741824',
- 'actions_after_shutdown': 'destroy',
- 'memory_dynamic_max': '1073741824',
- 'user_version': '0',
- 'xenstore_data': {'vm-data/allowvssprovider': 'false'},
- 'blocked_operations': {},
- 'is_a_template': False,
- 'name_description': '',
- 'memory_dynamic_min': '1073741824',
- 'actions_after_crash': 'destroy',
- 'memory_target': '1073741824',
- 'PV_ramdisk': '',
- 'PV_bootloader_args': '',
- 'PCI_bus': '',
- 'other_config': {'nova_uuid': 'uuid123'},
- 'name_label': 'name',
- 'actions_after_reboot': 'restart',
- 'VCPUs_at_startup': '1',
- 'HVM_boot_params': {'order': 'dc'},
- 'platform': {'nx': 'true', 'pae': 'true', 'apic': 'true',
- 'timeoffset': '0', 'viridian': is_viridian_str,
- 'acpi': 'true', 'device_id': '0002'},
- 'PV_legacy_args': '',
- 'PV_kernel': '',
- 'affinity': '',
- 'recommendations': '',
- 'ha_always_run': False}
-
- session.call_xenapi.assert_called_with('VM.create', expected_vm_rec)
-
- def test_list_vms(self):
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- fake.create_vm("foo1", "Halted")
- vm_ref = fake.create_vm("foo2", "Running")
-
- stubs.stubout_session(self.stubs, fake.SessionBase)
- driver = xenapi_conn.XenAPIDriver(False)
-
- result = list(vm_utils.list_vms(driver._session))
-
- # Will have 3 VMs - but one is Dom0 and one is not running on the host
- self.assertEqual(len(driver._session.call_xenapi('VM.get_all')), 3)
- self.assertEqual(len(result), 1)
-
- result_keys = [key for (key, value) in result]
-
- self.assertIn(vm_ref, result_keys)
-
-
-class ChildVHDsTestCase(test.NoDBTestCase):
- all_vdis = [
- ("my-vdi-ref",
- {"uuid": "my-uuid", "sm_config": {},
- "is_a_snapshot": False, "other_config": {}}),
- ("non-parent",
- {"uuid": "uuid-1", "sm_config": {},
- "is_a_snapshot": False, "other_config": {}}),
- ("diff-parent",
- {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
- "is_a_snapshot": False, "other_config": {}}),
- ("child",
- {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
- "is_a_snapshot": False, "other_config": {}}),
- ("child-snap",
- {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
- "is_a_snapshot": True, "other_config": {}}),
- ]
-
- @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
- def test_child_vhds_defaults(self, mock_get_all):
- mock_get_all.return_value = self.all_vdis
-
- result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"])
-
- self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
-
- @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
- def test_child_vhds_only_snapshots(self, mock_get_all):
- mock_get_all.return_value = self.all_vdis
-
- result = vm_utils._child_vhds("session", "sr_ref", ["my-uuid"],
- old_snapshots_only=True)
-
- self.assertEqual(['uuid-child-snap'], result)
-
- @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
- def test_child_vhds_chain(self, mock_get_all):
- mock_get_all.return_value = self.all_vdis
-
- result = vm_utils._child_vhds("session", "sr_ref",
- ["my-uuid", "other-uuid"], old_snapshots_only=True)
-
- self.assertEqual(['uuid-child-snap'], result)
-
- def test_is_vdi_a_snapshot_works(self):
- vdi_rec = {"is_a_snapshot": True,
- "other_config": {}}
-
- self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
-
- def test_is_vdi_a_snapshot_base_images_false(self):
- vdi_rec = {"is_a_snapshot": True,
- "other_config": {"image-id": "fake"}}
-
- self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
-
- def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
- vdi_rec = {"is_a_snapshot": False,
- "other_config": {}}
-
- self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
-
-
-class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
-
- @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
- @mock.patch.object(vm_utils, '_walk_vdi_chain')
- @mock.patch.object(vm_utils, '_delete_snapshots_in_vdi_chain')
- def test_remove_old_snapshots(self, mock_delete, mock_walk, mock_get):
- instance = {"uuid": "fake"}
- mock_get.return_value = ("ref", {"uuid": "vdi", "SR": "sr_ref"})
- mock_walk.return_value = [{"uuid": "uuid1"}, {"uuid": "uuid2"}]
-
- vm_utils.remove_old_snapshots("session", instance, "vm_ref")
-
- mock_delete.assert_called_once_with("session", instance,
- ["uuid1", "uuid2"], "sr_ref")
- mock_get.assert_called_once_with("session", "vm_ref")
- mock_walk.assert_called_once_with("session", "vdi")
-
- @mock.patch.object(vm_utils, '_child_vhds')
- def test_delete_snapshots_in_vdi_chain_no_chain(self, mock_child):
- instance = {"uuid": "fake"}
-
- vm_utils._delete_snapshots_in_vdi_chain("session", instance,
- ["uuid"], "sr")
-
- self.assertFalse(mock_child.called)
-
- @mock.patch.object(vm_utils, '_child_vhds')
- def test_delete_snapshots_in_vdi_chain_no_snapshots(self, mock_child):
- instance = {"uuid": "fake"}
- mock_child.return_value = []
-
- vm_utils._delete_snapshots_in_vdi_chain("session", instance,
- ["uuid1", "uuid2"], "sr")
-
- mock_child.assert_called_once_with("session", "sr", ["uuid2"],
- old_snapshots_only=True)
-
- @mock.patch.object(vm_utils, '_scan_sr')
- @mock.patch.object(vm_utils, 'safe_destroy_vdis')
- @mock.patch.object(vm_utils, '_child_vhds')
- def test_delete_snapshots_in_vdi_chain_calls_destroy(self, mock_child,
- mock_destroy, mock_scan):
- instance = {"uuid": "fake"}
- mock_child.return_value = ["suuid1", "suuid2"]
- session = mock.Mock()
- session.VDI.get_by_uuid.side_effect = ["ref1", "ref2"]
-
- vm_utils._delete_snapshots_in_vdi_chain(session, instance,
- ["uuid1", "uuid2"], "sr")
-
- mock_child.assert_called_once_with(session, "sr", ["uuid2"],
- old_snapshots_only=True)
- session.VDI.get_by_uuid.assert_has_calls([
- mock.call("suuid1"), mock.call("suuid2")])
- mock_destroy.assert_called_once_with(session, ["ref1", "ref2"])
- mock_scan.assert_called_once_with(session, "sr")
-
-
-class ResizeFunctionTestCase(test.NoDBTestCase):
- def _call_get_resize_func_name(self, brand, version):
- session = mock.Mock()
- session.product_brand = brand
- session.product_version = version
-
- return vm_utils._get_resize_func_name(session)
-
- def _test_is_resize(self, brand, version):
- result = self._call_get_resize_func_name(brand, version)
- self.assertEqual("VDI.resize", result)
-
- def _test_is_resize_online(self, brand, version):
- result = self._call_get_resize_func_name(brand, version)
- self.assertEqual("VDI.resize_online", result)
-
- def test_xenserver_5_5(self):
- self._test_is_resize_online("XenServer", (5, 5, 0))
-
- def test_xenserver_6_0(self):
- self._test_is_resize("XenServer", (6, 0, 0))
-
- def test_xcp_1_1(self):
- self._test_is_resize_online("XCP", (1, 1, 0))
-
- def test_xcp_1_2(self):
- self._test_is_resize("XCP", (1, 2, 0))
-
- def test_xcp_2_0(self):
- self._test_is_resize("XCP", (2, 0, 0))
-
- def test_random_brand(self):
- self._test_is_resize("asfd", (1, 1, 0))
-
- def test_default(self):
- self._test_is_resize(None, None)
-
- def test_empty(self):
- self._test_is_resize("", "")
-
- def test_bad_version(self):
- self._test_is_resize("XenServer", "asdf")
-
-
-class VMInfoTests(VMUtilsTestBase):
- def setUp(self):
- super(VMInfoTests, self).setUp()
- self.session = mock.Mock()
-
- def test_get_power_state_valid(self):
- # Save on test setup calls by having these simple tests in one method
- self.session.call_xenapi.return_value = "Running"
- self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
- power_state.RUNNING)
-
- self.session.call_xenapi.return_value = "Halted"
- self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
- power_state.SHUTDOWN)
-
- self.session.call_xenapi.return_value = "Paused"
- self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
- power_state.PAUSED)
-
- self.session.call_xenapi.return_value = "Suspended"
- self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
- power_state.SUSPENDED)
-
- self.session.call_xenapi.return_value = "Crashed"
- self.assertEqual(vm_utils.get_power_state(self.session, "ref"),
- power_state.CRASHED)
-
- def test_get_power_state_invalid(self):
- self.session.call_xenapi.return_value = "Invalid"
- self.assertRaises(KeyError,
- vm_utils.get_power_state, self.session, "ref")
-
- _XAPI_record = {'power_state': 'Running',
- 'memory_static_max': str(10 << 10),
- 'memory_dynamic_max': str(9 << 10),
- 'VCPUs_max': '5'}
-
- def test_compile_info(self):
-
- def call_xenapi(method, *args):
- if method.startswith('VM.get_') and args[0] == 'dummy':
- return self._XAPI_record[method[7:]]
-
- self.session.call_xenapi.side_effect = call_xenapi
-
- expected = {'state': power_state.RUNNING,
- 'max_mem': 10L,
- 'mem': 9L,
- 'num_cpu': '5',
- 'cpu_time': 0}
-
- self.assertEqual(vm_utils.compile_info(self.session, "dummy"),
- expected)
diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py
deleted file mode 100644
index e6bd462e9b..0000000000
--- a/nova/tests/virt/xenapi/test_vmops.py
+++ /dev/null
@@ -1,1124 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from eventlet import greenthread
-import mock
-
-from nova.compute import power_state
-from nova.compute import task_states
-from nova import context
-from nova import exception
-from nova import objects
-from nova.pci import manager as pci_manager
-from nova import test
-from nova.tests import fake_instance
-from nova.tests.virt.xenapi import stubs
-from nova.virt import fake
-from nova.virt.xenapi import agent as xenapi_agent
-from nova.virt.xenapi.client import session as xenapi_session
-from nova.virt.xenapi import fake as xenapi_fake
-from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi import vmops
-from nova.virt.xenapi import volume_utils
-from nova.virt.xenapi import volumeops
-
-
-class VMOpsTestBase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(VMOpsTestBase, self).setUp()
- self._setup_mock_vmops()
- self.vms = []
-
- def _setup_mock_vmops(self, product_brand=None, product_version=None):
- stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- self._session = xenapi_session.XenAPISession('test_url', 'root',
- 'test_pass')
- self.vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
-
- def create_vm(self, name, state="Running"):
- vm_ref = xenapi_fake.create_vm(name, state)
- self.vms.append(vm_ref)
- vm = xenapi_fake.get_record("VM", vm_ref)
- return vm, vm_ref
-
- def tearDown(self):
- super(VMOpsTestBase, self).tearDown()
- for vm in self.vms:
- xenapi_fake.destroy_vm(vm)
-
-
-class VMOpsTestCase(VMOpsTestBase):
- def setUp(self):
- super(VMOpsTestCase, self).setUp()
- self._setup_mock_vmops()
-
- def _setup_mock_vmops(self, product_brand=None, product_version=None):
- self._session = self._get_mock_session(product_brand, product_version)
- self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())
-
- def _get_mock_session(self, product_brand, product_version):
- class Mock(object):
- pass
-
- mock_session = Mock()
- mock_session.product_brand = product_brand
- mock_session.product_version = product_version
- return mock_session
-
- def _test_finish_revert_migration_after_crash(self, backup_made, new_made,
- vm_shutdown=True):
- instance = {'name': 'foo',
- 'task_state': task_states.RESIZE_MIGRATING}
- context = 'fake_context'
-
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- self.mox.StubOutWithMock(self._vmops, '_destroy')
- self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')
- self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')
- self.mox.StubOutWithMock(self._vmops, '_start')
- self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
-
- vm_utils.lookup(self._session, 'foo-orig').AndReturn(
- backup_made and 'foo' or None)
- vm_utils.lookup(self._session, 'foo').AndReturn(
- (not backup_made or new_made) and 'foo' or None)
- if backup_made:
- if new_made:
- self._vmops._destroy(instance, 'foo')
- vm_utils.set_vm_name_label(self._session, 'foo', 'foo')
- self._vmops._attach_mapped_block_devices(instance, [])
-
- vm_utils.is_vm_shutdown(self._session, 'foo').AndReturn(vm_shutdown)
- if vm_shutdown:
- self._vmops._start(instance, 'foo')
-
- self.mox.ReplayAll()
-
- self._vmops.finish_revert_migration(context, instance, [])
-
- def test_finish_revert_migration_after_crash(self):
- self._test_finish_revert_migration_after_crash(True, True)
-
- def test_finish_revert_migration_after_crash_before_new(self):
- self._test_finish_revert_migration_after_crash(True, False)
-
- def test_finish_revert_migration_after_crash_before_backup(self):
- self._test_finish_revert_migration_after_crash(False, False)
-
- def test_xsm_sr_check_relaxed_cached(self):
- self.make_plugin_call_count = 0
-
- def fake_make_plugin_call(plugin, method, **args):
- self.make_plugin_call_count = self.make_plugin_call_count + 1
- return "true"
-
- self.stubs.Set(self._vmops, "_make_plugin_call",
- fake_make_plugin_call)
-
- self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
- self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())
-
- self.assertEqual(self.make_plugin_call_count, 1)
-
- def test_get_vm_opaque_ref_raises_instance_not_found(self):
- instance = {"name": "dummy"}
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- vm_utils.lookup(self._session, instance['name'], False).AndReturn(None)
- self.mox.ReplayAll()
-
- self.assertRaises(exception.InstanceNotFound,
- self._vmops._get_vm_opaque_ref, instance)
-
-
-class InjectAutoDiskConfigTestCase(VMOpsTestBase):
- def test_inject_auto_disk_config_when_present(self):
- vm, vm_ref = self.create_vm("dummy")
- instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": True}
- self.vmops._inject_auto_disk_config(instance, vm_ref)
- xenstore_data = vm['xenstore_data']
- self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'True')
-
- def test_inject_auto_disk_config_none_as_false(self):
- vm, vm_ref = self.create_vm("dummy")
- instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
- self.vmops._inject_auto_disk_config(instance, vm_ref)
- xenstore_data = vm['xenstore_data']
- self.assertEqual(xenstore_data['vm-data/auto-disk-config'], 'False')
-
-
-class GetConsoleOutputTestCase(VMOpsTestBase):
- def test_get_console_output_works(self):
- self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
-
- instance = {"name": "dummy"}
- self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(42)
- self.mox.ReplayAll()
-
- self.assertEqual("dom_id: 42", self.vmops.get_console_output(instance))
-
- def test_get_console_output_throws_nova_exception(self):
- self.mox.StubOutWithMock(self.vmops, '_get_dom_id')
-
- instance = {"name": "dummy"}
- # dom_id=0 used to trigger exception in fake XenAPI
- self.vmops._get_dom_id(instance, check_rescue=True).AndReturn(0)
- self.mox.ReplayAll()
-
- self.assertRaises(exception.NovaException,
- self.vmops.get_console_output, instance)
-
- def test_get_dom_id_works(self):
- instance = {"name": "dummy"}
- vm, vm_ref = self.create_vm("dummy")
- self.assertEqual(vm["domid"], self.vmops._get_dom_id(instance))
-
- def test_get_dom_id_works_with_rescue_vm(self):
- instance = {"name": "dummy"}
- vm, vm_ref = self.create_vm("dummy-rescue")
- self.assertEqual(vm["domid"],
- self.vmops._get_dom_id(instance, check_rescue=True))
-
- def test_get_dom_id_raises_not_found(self):
- instance = {"name": "dummy"}
- self.create_vm("not-dummy")
- self.assertRaises(exception.NotFound, self.vmops._get_dom_id, instance)
-
- def test_get_dom_id_works_with_vmref(self):
- vm, vm_ref = self.create_vm("dummy")
- self.assertEqual(vm["domid"],
- self.vmops._get_dom_id(vm_ref=vm_ref))
-
-
-class SpawnTestCase(VMOpsTestBase):
- def _stub_out_common(self):
- self.mox.StubOutWithMock(self.vmops, '_ensure_instance_name_unique')
- self.mox.StubOutWithMock(self.vmops, '_ensure_enough_free_mem')
- self.mox.StubOutWithMock(self.vmops, '_update_instance_progress')
- self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
- self.mox.StubOutWithMock(self.vmops, '_get_vdis_for_instance')
- self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
- self.mox.StubOutWithMock(self.vmops._volumeops,
- 'safe_cleanup_from_vdis')
- self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
- self.mox.StubOutWithMock(vm_utils,
- 'create_kernel_and_ramdisk')
- self.mox.StubOutWithMock(vm_utils, 'destroy_kernel_ramdisk')
- self.mox.StubOutWithMock(self.vmops, '_create_vm_record')
- self.mox.StubOutWithMock(self.vmops, '_destroy')
- self.mox.StubOutWithMock(self.vmops, '_attach_disks')
- self.mox.StubOutWithMock(pci_manager, 'get_instance_pci_devs')
- self.mox.StubOutWithMock(vm_utils, 'set_other_config_pci')
- self.mox.StubOutWithMock(self.vmops, '_attach_orig_disks')
- self.mox.StubOutWithMock(self.vmops, 'inject_network_info')
- self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
- self.mox.StubOutWithMock(self.vmops, '_inject_instance_metadata')
- self.mox.StubOutWithMock(self.vmops, '_inject_auto_disk_config')
- self.mox.StubOutWithMock(self.vmops, '_file_inject_vm_settings')
- self.mox.StubOutWithMock(self.vmops, '_create_vifs')
- self.mox.StubOutWithMock(self.vmops.firewall_driver,
- 'setup_basic_filtering')
- self.mox.StubOutWithMock(self.vmops.firewall_driver,
- 'prepare_instance_filter')
- self.mox.StubOutWithMock(self.vmops, '_start')
- self.mox.StubOutWithMock(self.vmops, '_wait_for_instance_to_start')
- self.mox.StubOutWithMock(self.vmops,
- '_configure_new_instance_with_agent')
- self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
- self.mox.StubOutWithMock(self.vmops.firewall_driver,
- 'apply_instance_filter')
-
- def _test_spawn(self, name_label_param=None, block_device_info_param=None,
- rescue=False, include_root_vdi=True, throw_exception=None,
- attach_pci_dev=False):
- self._stub_out_common()
-
- instance = {"name": "dummy", "uuid": "fake_uuid"}
- name_label = name_label_param
- if name_label is None:
- name_label = "dummy"
- image_meta = {"id": "image_id"}
- context = "context"
- session = self.vmops._session
- injected_files = "fake_files"
- admin_password = "password"
- network_info = "net_info"
- steps = 10
- if rescue:
- steps += 1
-
- block_device_info = block_device_info_param
- if block_device_info and not block_device_info['root_device_name']:
- block_device_info = dict(block_device_info_param)
- block_device_info['root_device_name'] = \
- self.vmops.default_root_dev
-
- di_type = "di_type"
- vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
- step = 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- vdis = {"other": {"ref": "fake_ref_2", "osvol": True}}
- if include_root_vdi:
- vdis["root"] = {"ref": "fake_ref"}
- self.vmops._get_vdis_for_instance(context, instance,
- name_label, "image_id", di_type,
- block_device_info).AndReturn(vdis)
- self.vmops._resize_up_vdis(instance, vdis)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- kernel_file = "kernel"
- ramdisk_file = "ramdisk"
- vm_utils.create_kernel_and_ramdisk(context, session,
- instance, name_label).AndReturn((kernel_file, ramdisk_file))
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- vm_ref = "fake_vm_ref"
- self.vmops._ensure_instance_name_unique(name_label)
- self.vmops._ensure_enough_free_mem(instance)
- self.vmops._create_vm_record(context, instance, name_label,
- di_type, kernel_file,
- ramdisk_file, image_meta).AndReturn(vm_ref)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
- network_info, rescue, admin_password, injected_files)
- if attach_pci_dev:
- fake_dev = {
- 'created_at': None,
- 'updated_at': None,
- 'deleted_at': None,
- 'deleted': None,
- 'id': 1,
- 'compute_node_id': 1,
- 'address': '00:00.0',
- 'vendor_id': '1234',
- 'product_id': 'abcd',
- 'dev_type': 'type-PCI',
- 'status': 'available',
- 'dev_id': 'devid',
- 'label': 'label',
- 'instance_uuid': None,
- 'extra_info': '{}',
- }
- pci_manager.get_instance_pci_devs(instance).AndReturn([fake_dev])
- vm_utils.set_other_config_pci(self.vmops._session,
- vm_ref,
- "0/0000:00:00.0")
- else:
- pci_manager.get_instance_pci_devs(instance).AndReturn([])
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- self.vmops._inject_instance_metadata(instance, vm_ref)
- self.vmops._inject_auto_disk_config(instance, vm_ref)
- self.vmops._inject_hostname(instance, vm_ref, rescue)
- self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
- network_info)
- self.vmops.inject_network_info(instance, network_info, vm_ref)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- self.vmops._create_vifs(instance, vm_ref, network_info)
- self.vmops.firewall_driver.setup_basic_filtering(instance,
- network_info).AndRaise(NotImplementedError)
- self.vmops.firewall_driver.prepare_instance_filter(instance,
- network_info)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- if rescue:
- self.vmops._attach_orig_disks(instance, vm_ref)
- step += 1
- self.vmops._update_instance_progress(context, instance, step,
- steps)
- self.vmops._start(instance, vm_ref)
- self.vmops._wait_for_instance_to_start(instance, vm_ref)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- self.vmops._configure_new_instance_with_agent(instance, vm_ref,
- injected_files, admin_password)
- self.vmops._remove_hostname(instance, vm_ref)
- step += 1
- self.vmops._update_instance_progress(context, instance, step, steps)
-
- self.vmops.firewall_driver.apply_instance_filter(instance,
- network_info)
- step += 1
- last_call = self.vmops._update_instance_progress(context, instance,
- step, steps)
- if throw_exception:
- last_call.AndRaise(throw_exception)
- self.vmops._destroy(instance, vm_ref, network_info=network_info)
- vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
- kernel_file, ramdisk_file)
- vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
- self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
-
- self.mox.ReplayAll()
- self.vmops.spawn(context, instance, image_meta, injected_files,
- admin_password, network_info,
- block_device_info_param, name_label_param, rescue)
-
- def test_spawn(self):
- self._test_spawn()
-
- def test_spawn_with_alternate_options(self):
- self._test_spawn(include_root_vdi=False, rescue=True,
- name_label_param="bob",
- block_device_info_param={"root_device_name": ""})
-
- def test_spawn_with_pci_available_on_the_host(self):
- self._test_spawn(attach_pci_dev=True)
-
- def test_spawn_performs_rollback_and_throws_exception(self):
- self.assertRaises(test.TestingException, self._test_spawn,
- throw_exception=test.TestingException())
-
- def _test_finish_migration(self, power_on=True, resize_instance=True,
- throw_exception=None):
- self._stub_out_common()
- self.mox.StubOutWithMock(vm_utils, "import_all_migrated_disks")
- self.mox.StubOutWithMock(self.vmops, "_attach_mapped_block_devices")
-
- context = "context"
- migration = {}
- name_label = "dummy"
- instance = {"name": name_label, "uuid": "fake_uuid"}
- disk_info = "disk_info"
- network_info = "net_info"
- image_meta = {"id": "image_id"}
- block_device_info = "bdi"
- session = self.vmops._session
-
- self.vmops._ensure_instance_name_unique(name_label)
- self.vmops._ensure_enough_free_mem(instance)
-
- di_type = "di_type"
- vm_utils.determine_disk_image_type(image_meta).AndReturn(di_type)
-
- root_vdi = {"ref": "fake_ref"}
- ephemeral_vdi = {"ref": "fake_ref_e"}
- vdis = {"root": root_vdi, "ephemerals": {4: ephemeral_vdi}}
- vm_utils.import_all_migrated_disks(self.vmops._session,
- instance).AndReturn(vdis)
-
- kernel_file = "kernel"
- ramdisk_file = "ramdisk"
- vm_utils.create_kernel_and_ramdisk(context, session,
- instance, name_label).AndReturn((kernel_file, ramdisk_file))
-
- vm_ref = "fake_vm_ref"
- self.vmops._create_vm_record(context, instance, name_label,
- di_type, kernel_file,
- ramdisk_file, image_meta).AndReturn(vm_ref)
-
- if resize_instance:
- self.vmops._resize_up_vdis(instance, vdis)
- self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
- network_info, False, None, None)
- self.vmops._attach_mapped_block_devices(instance, block_device_info)
- pci_manager.get_instance_pci_devs(instance).AndReturn([])
-
- self.vmops._inject_instance_metadata(instance, vm_ref)
- self.vmops._inject_auto_disk_config(instance, vm_ref)
- self.vmops._file_inject_vm_settings(instance, vm_ref, vdis,
- network_info)
- self.vmops.inject_network_info(instance, network_info, vm_ref)
-
- self.vmops._create_vifs(instance, vm_ref, network_info)
- self.vmops.firewall_driver.setup_basic_filtering(instance,
- network_info).AndRaise(NotImplementedError)
- self.vmops.firewall_driver.prepare_instance_filter(instance,
- network_info)
-
- if power_on:
- self.vmops._start(instance, vm_ref)
- self.vmops._wait_for_instance_to_start(instance, vm_ref)
-
- self.vmops.firewall_driver.apply_instance_filter(instance,
- network_info)
-
- last_call = self.vmops._update_instance_progress(context, instance,
- step=5, total_steps=5)
- if throw_exception:
- last_call.AndRaise(throw_exception)
- self.vmops._destroy(instance, vm_ref, network_info=network_info)
- vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
- kernel_file, ramdisk_file)
- vm_utils.safe_destroy_vdis(self.vmops._session,
- ["fake_ref_e", "fake_ref"])
-
- self.mox.ReplayAll()
- self.vmops.finish_migration(context, migration, instance, disk_info,
- network_info, image_meta, resize_instance,
- block_device_info, power_on)
-
- def test_finish_migration(self):
- self._test_finish_migration()
-
- def test_finish_migration_no_power_on(self):
- self._test_finish_migration(power_on=False, resize_instance=False)
-
- def test_finish_migrate_performs_rollback_on_error(self):
- self.assertRaises(test.TestingException, self._test_finish_migration,
- power_on=False, resize_instance=False,
- throw_exception=test.TestingException())
-
- def test_remove_hostname(self):
- vm, vm_ref = self.create_vm("dummy")
- instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
- self.mox.StubOutWithMock(self._session, 'call_xenapi')
- self._session.call_xenapi("VM.remove_from_xenstore_data", vm_ref,
- "vm-data/hostname")
-
- self.mox.ReplayAll()
- self.vmops._remove_hostname(instance, vm_ref)
- self.mox.VerifyAll()
-
- def test_reset_network(self):
- class mock_agent(object):
- def __init__(self):
- self.called = False
-
- def resetnetwork(self):
- self.called = True
-
- vm, vm_ref = self.create_vm("dummy")
- instance = {"name": "dummy", "uuid": "1234", "auto_disk_config": None}
- agent = mock_agent()
-
- self.mox.StubOutWithMock(self.vmops, 'agent_enabled')
- self.mox.StubOutWithMock(self.vmops, '_get_agent')
- self.mox.StubOutWithMock(self.vmops, '_inject_hostname')
- self.mox.StubOutWithMock(self.vmops, '_remove_hostname')
-
- self.vmops.agent_enabled(instance).AndReturn(True)
- self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
- self.vmops._inject_hostname(instance, vm_ref, False)
- self.vmops._remove_hostname(instance, vm_ref)
- self.mox.ReplayAll()
- self.vmops.reset_network(instance)
- self.assertTrue(agent.called)
- self.mox.VerifyAll()
-
- def test_inject_hostname(self):
- instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
- vm_ref = "vm_ref"
-
- self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
- self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname', 'dummy')
-
- self.mox.ReplayAll()
- self.vmops._inject_hostname(instance, vm_ref, rescue=False)
-
- def test_inject_hostname_with_rescue_prefix(self):
- instance = {"hostname": "dummy", "os_type": "fake", "uuid": "uuid"}
- vm_ref = "vm_ref"
-
- self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
- self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
- 'RESCUE-dummy')
-
- self.mox.ReplayAll()
- self.vmops._inject_hostname(instance, vm_ref, rescue=True)
-
- def test_inject_hostname_with_windows_name_truncation(self):
- instance = {"hostname": "dummydummydummydummydummy",
- "os_type": "windows", "uuid": "uuid"}
- vm_ref = "vm_ref"
-
- self.mox.StubOutWithMock(self.vmops, '_add_to_param_xenstore')
- self.vmops._add_to_param_xenstore(vm_ref, 'vm-data/hostname',
- 'RESCUE-dummydum')
-
- self.mox.ReplayAll()
- self.vmops._inject_hostname(instance, vm_ref, rescue=True)
-
- def test_wait_for_instance_to_start(self):
- instance = {"uuid": "uuid"}
- vm_ref = "vm_ref"
-
- self.mox.StubOutWithMock(vm_utils, 'get_power_state')
- self.mox.StubOutWithMock(greenthread, 'sleep')
- vm_utils.get_power_state(self._session, vm_ref).AndReturn(
- power_state.SHUTDOWN)
- greenthread.sleep(0.5)
- vm_utils.get_power_state(self._session, vm_ref).AndReturn(
- power_state.RUNNING)
-
- self.mox.ReplayAll()
- self.vmops._wait_for_instance_to_start(instance, vm_ref)
-
- def test_attach_orig_disks(self):
- instance = {"name": "dummy"}
- vm_ref = "vm_ref"
- vbd_refs = {vmops.DEVICE_ROOT: "vdi_ref"}
-
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- self.mox.StubOutWithMock(self.vmops, '_find_vdi_refs')
- self.mox.StubOutWithMock(vm_utils, 'create_vbd')
-
- vm_utils.lookup(self.vmops._session, "dummy").AndReturn("ref")
- self.vmops._find_vdi_refs("ref", exclude_volumes=True).AndReturn(
- vbd_refs)
- vm_utils.create_vbd(self.vmops._session, vm_ref, "vdi_ref",
- vmops.DEVICE_RESCUE, bootable=False)
-
- self.mox.ReplayAll()
- self.vmops._attach_orig_disks(instance, vm_ref)
-
- def test_agent_update_setup(self):
- # agent updates need to occur after networking is configured
- instance = {'name': 'betelgeuse',
- 'uuid': '1-2-3-4-5-6'}
- vm_ref = 'vm_ref'
- agent = xenapi_agent.XenAPIBasedAgent(self.vmops._session,
- self.vmops._virtapi, instance, vm_ref)
-
- self.mox.StubOutWithMock(xenapi_agent, 'should_use_agent')
- self.mox.StubOutWithMock(self.vmops, '_get_agent')
- self.mox.StubOutWithMock(agent, 'get_version')
- self.mox.StubOutWithMock(agent, 'resetnetwork')
- self.mox.StubOutWithMock(agent, 'update_if_needed')
-
- xenapi_agent.should_use_agent(instance).AndReturn(True)
- self.vmops._get_agent(instance, vm_ref).AndReturn(agent)
- agent.get_version().AndReturn('1.2.3')
- agent.resetnetwork()
- agent.update_if_needed('1.2.3')
-
- self.mox.ReplayAll()
- self.vmops._configure_new_instance_with_agent(instance, vm_ref,
- None, None)
-
-
-class DestroyTestCase(VMOpsTestBase):
- def setUp(self):
- super(DestroyTestCase, self).setUp()
- self.context = context.RequestContext(user_id=None, project_id=None)
- self.instance = fake_instance.fake_instance_obj(self.context)
-
- @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
- @mock.patch.object(vm_utils, 'hard_shutdown_vm')
- @mock.patch.object(volume_utils, 'find_sr_by_uuid')
- @mock.patch.object(volume_utils, 'forget_sr')
- def test_no_vm_no_bdm(self, forget_sr, find_sr_by_uuid, hard_shutdown_vm,
- lookup):
- self.vmops.destroy(self.instance, 'network_info',
- {'block_device_mapping': []})
- self.assertEqual(0, find_sr_by_uuid.call_count)
- self.assertEqual(0, forget_sr.call_count)
- self.assertEqual(0, hard_shutdown_vm.call_count)
-
- @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
- @mock.patch.object(vm_utils, 'hard_shutdown_vm')
- @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value=None)
- @mock.patch.object(volume_utils, 'forget_sr')
- def test_no_vm_orphaned_volume_no_sr(self, forget_sr, find_sr_by_uuid,
- hard_shutdown_vm, lookup):
- self.vmops.destroy(self.instance, 'network_info',
- {'block_device_mapping': [{'connection_info':
- {'data': {'volume_id': 'fake-uuid'}}}]})
- find_sr_by_uuid.assert_called_once_with(self.vmops._session,
- 'FA15E-D15C-fake-uuid')
- self.assertEqual(0, forget_sr.call_count)
- self.assertEqual(0, hard_shutdown_vm.call_count)
-
- @mock.patch.object(vm_utils, 'lookup', side_effect=[None, None])
- @mock.patch.object(vm_utils, 'hard_shutdown_vm')
- @mock.patch.object(volume_utils, 'find_sr_by_uuid', return_value='sr_ref')
- @mock.patch.object(volume_utils, 'forget_sr')
- def test_no_vm_orphaned_volume(self, forget_sr, find_sr_by_uuid,
- hard_shutdown_vm, lookup):
- self.vmops.destroy(self.instance, 'network_info',
- {'block_device_mapping': [{'connection_info':
- {'data': {'volume_id': 'fake-uuid'}}}]})
- find_sr_by_uuid.assert_called_once_with(self.vmops._session,
- 'FA15E-D15C-fake-uuid')
- forget_sr.assert_called_once_with(self.vmops._session, 'sr_ref')
- self.assertEqual(0, hard_shutdown_vm.call_count)
-
-
-@mock.patch.object(vmops.VMOps, '_update_instance_progress')
-@mock.patch.object(vmops.VMOps, '_get_vm_opaque_ref')
-@mock.patch.object(vm_utils, 'get_sr_path')
-@mock.patch.object(vmops.VMOps, '_detach_block_devices_from_orig_vm')
-@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_down')
-@mock.patch.object(vmops.VMOps, '_migrate_disk_resizing_up')
-class MigrateDiskAndPowerOffTestCase(VMOpsTestBase):
- def test_migrate_disk_and_power_off_works_down(self,
- migrate_up, migrate_down, *mocks):
- instance = {"root_gb": 2, "ephemeral_gb": 0, "uuid": "uuid"}
- flavor = {"root_gb": 1, "ephemeral_gb": 0}
-
- self.vmops.migrate_disk_and_power_off(None, instance, None,
- flavor, None)
-
- self.assertFalse(migrate_up.called)
- self.assertTrue(migrate_down.called)
-
- def test_migrate_disk_and_power_off_works_up(self,
- migrate_up, migrate_down, *mocks):
- instance = {"root_gb": 1, "ephemeral_gb": 1, "uuid": "uuid"}
- flavor = {"root_gb": 2, "ephemeral_gb": 2}
-
- self.vmops.migrate_disk_and_power_off(None, instance, None,
- flavor, None)
-
- self.assertFalse(migrate_down.called)
- self.assertTrue(migrate_up.called)
-
- def test_migrate_disk_and_power_off_resize_down_ephemeral_fails(self,
- migrate_up, migrate_down, *mocks):
- instance = {"ephemeral_gb": 2}
- flavor = {"ephemeral_gb": 1}
-
- self.assertRaises(exception.ResizeError,
- self.vmops.migrate_disk_and_power_off,
- None, instance, None, flavor, None)
-
-
-@mock.patch.object(vm_utils, 'migrate_vhd')
-@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
-@mock.patch.object(vm_utils, 'get_all_vdi_uuids_for_vm')
-@mock.patch.object(vmops.VMOps, '_update_instance_progress')
-@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
-class MigrateDiskResizingUpTestCase(VMOpsTestBase):
- def _fake_snapshot_attached_here(self, session, instance, vm_ref, label,
- userdevice, post_snapshot_callback):
- self.assertIsInstance(instance, dict)
- if userdevice == '0':
- self.assertEqual("vm_ref", vm_ref)
- self.assertEqual("fake-snapshot", label)
- yield ["leaf", "parent", "grandp"]
- else:
- leaf = userdevice + "-leaf"
- parent = userdevice + "-parent"
- yield [leaf, parent]
-
- def test_migrate_disk_resizing_up_works_no_ephemeral(self,
- mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
- mock_shutdown, mock_migrate_vhd):
- context = "ctxt"
- instance = {"name": "fake", "uuid": "uuid"}
- dest = "dest"
- vm_ref = "vm_ref"
- sr_path = "sr_path"
-
- mock_get_all_vdi_uuids.return_value = None
-
- with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
- self._fake_snapshot_attached_here):
- self.vmops._migrate_disk_resizing_up(context, instance, dest,
- vm_ref, sr_path)
-
- mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
- vm_ref, min_userdevice=4)
- mock_apply_orig.assert_called_once_with(instance, vm_ref)
- mock_shutdown.assert_called_once_with(instance, vm_ref)
-
- m_vhd_expected = [mock.call(self.vmops._session, instance, "parent",
- dest, sr_path, 1),
- mock.call(self.vmops._session, instance, "grandp",
- dest, sr_path, 2),
- mock.call(self.vmops._session, instance, "leaf",
- dest, sr_path, 0)]
- self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
-
- prog_expected = [
- mock.call(context, instance, 1, 5),
- mock.call(context, instance, 2, 5),
- mock.call(context, instance, 3, 5),
- mock.call(context, instance, 4, 5)
- # 5/5: step to be executed by finish migration.
- ]
- self.assertEqual(prog_expected, mock_update_progress.call_args_list)
-
- def test_migrate_disk_resizing_up_works_with_two_ephemeral(self,
- mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
- mock_shutdown, mock_migrate_vhd):
- context = "ctxt"
- instance = {"name": "fake", "uuid": "uuid"}
- dest = "dest"
- vm_ref = "vm_ref"
- sr_path = "sr_path"
-
- mock_get_all_vdi_uuids.return_value = ["vdi-eph1", "vdi-eph2"]
-
- with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
- self._fake_snapshot_attached_here):
- self.vmops._migrate_disk_resizing_up(context, instance, dest,
- vm_ref, sr_path)
-
- mock_get_all_vdi_uuids.assert_called_once_with(self.vmops._session,
- vm_ref, min_userdevice=4)
- mock_apply_orig.assert_called_once_with(instance, vm_ref)
- mock_shutdown.assert_called_once_with(instance, vm_ref)
-
- m_vhd_expected = [mock.call(self.vmops._session, instance,
- "parent", dest, sr_path, 1),
- mock.call(self.vmops._session, instance,
- "grandp", dest, sr_path, 2),
- mock.call(self.vmops._session, instance,
- "4-parent", dest, sr_path, 1, 1),
- mock.call(self.vmops._session, instance,
- "5-parent", dest, sr_path, 1, 2),
- mock.call(self.vmops._session, instance,
- "leaf", dest, sr_path, 0),
- mock.call(self.vmops._session, instance,
- "4-leaf", dest, sr_path, 0, 1),
- mock.call(self.vmops._session, instance,
- "5-leaf", dest, sr_path, 0, 2)]
- self.assertEqual(m_vhd_expected, mock_migrate_vhd.call_args_list)
-
- prog_expected = [
- mock.call(context, instance, 1, 5),
- mock.call(context, instance, 2, 5),
- mock.call(context, instance, 3, 5),
- mock.call(context, instance, 4, 5)
- # 5/5: step to be executed by finish migration.
- ]
- self.assertEqual(prog_expected, mock_update_progress.call_args_list)
-
- @mock.patch.object(vmops.VMOps, '_restore_orig_vm_and_cleanup_orphan')
- def test_migrate_disk_resizing_up_rollback(self,
- mock_restore,
- mock_apply_orig, mock_update_progress, mock_get_all_vdi_uuids,
- mock_shutdown, mock_migrate_vhd):
- context = "ctxt"
- instance = {"name": "fake", "uuid": "fake"}
- dest = "dest"
- vm_ref = "vm_ref"
- sr_path = "sr_path"
-
- mock_migrate_vhd.side_effect = test.TestingException
- mock_restore.side_effect = test.TestingException
-
- with mock.patch.object(vm_utils, '_snapshot_attached_here_impl',
- self._fake_snapshot_attached_here):
- self.assertRaises(exception.InstanceFaultRollback,
- self.vmops._migrate_disk_resizing_up,
- context, instance, dest, vm_ref, sr_path)
-
- mock_apply_orig.assert_called_once_with(instance, vm_ref)
- mock_restore.assert_called_once_with(instance)
- mock_migrate_vhd.assert_called_once_with(self.vmops._session,
- instance, "parent", dest, sr_path, 1)
-
-
-class CreateVMRecordTestCase(VMOpsTestBase):
- @mock.patch.object(vm_utils, 'determine_vm_mode')
- @mock.patch.object(vm_utils, 'get_vm_device_id')
- @mock.patch.object(vm_utils, 'create_vm')
- def test_create_vm_record_with_vm_device_id(self, mock_create_vm,
- mock_get_vm_device_id, mock_determine_vm_mode):
-
- context = "context"
- instance = objects.Instance(vm_mode="vm_mode", uuid="uuid123")
- name_label = "dummy"
- disk_image_type = "vhd"
- kernel_file = "kernel"
- ramdisk_file = "ram"
- device_id = "0002"
- image_properties = {"xenapi_device_id": device_id}
- image_meta = {"properties": image_properties}
- session = "session"
- self.vmops._session = session
- mock_get_vm_device_id.return_value = device_id
- mock_determine_vm_mode.return_value = "vm_mode"
-
- self.vmops._create_vm_record(context, instance, name_label,
- disk_image_type, kernel_file, ramdisk_file, image_meta)
-
- mock_get_vm_device_id.assert_called_with(session, image_properties)
- mock_create_vm.assert_called_with(session, instance, name_label,
- kernel_file, ramdisk_file, False, device_id)
-
-
-class BootableTestCase(VMOpsTestBase):
-
- def setUp(self):
- super(BootableTestCase, self).setUp()
-
- self.instance = {"name": "test", "uuid": "fake"}
- vm_rec, self.vm_ref = self.create_vm('test')
-
- # sanity check bootlock is initially disabled:
- self.assertEqual({}, vm_rec['blocked_operations'])
-
- def _get_blocked(self):
- vm_rec = self._session.call_xenapi("VM.get_record", self.vm_ref)
- return vm_rec['blocked_operations']
-
- def test_acquire_bootlock(self):
- self.vmops._acquire_bootlock(self.vm_ref)
- blocked = self._get_blocked()
- self.assertIn('start', blocked)
-
- def test_release_bootlock(self):
- self.vmops._acquire_bootlock(self.vm_ref)
- self.vmops._release_bootlock(self.vm_ref)
- blocked = self._get_blocked()
- self.assertNotIn('start', blocked)
-
- def test_set_bootable(self):
- self.vmops.set_bootable(self.instance, True)
- blocked = self._get_blocked()
- self.assertNotIn('start', blocked)
-
- def test_set_not_bootable(self):
- self.vmops.set_bootable(self.instance, False)
- blocked = self._get_blocked()
- self.assertIn('start', blocked)
-
-
-@mock.patch.object(vm_utils, 'update_vdi_virtual_size', autospec=True)
-class ResizeVdisTestCase(VMOpsTestBase):
- def test_dont_resize_root_volumes_osvol_false(self, mock_resize):
- instance = fake_instance.fake_db_instance(root_gb=20)
- vdis = {'root': {'osvol': False, 'ref': 'vdi_ref'}}
- self.vmops._resize_up_vdis(instance, vdis)
- self.assertTrue(mock_resize.called)
-
- def test_dont_resize_root_volumes_osvol_true(self, mock_resize):
- instance = fake_instance.fake_db_instance(root_gb=20)
- vdis = {'root': {'osvol': True}}
- self.vmops._resize_up_vdis(instance, vdis)
- self.assertFalse(mock_resize.called)
-
- def test_dont_resize_root_volumes_no_osvol(self, mock_resize):
- instance = fake_instance.fake_db_instance(root_gb=20)
- vdis = {'root': {}}
- self.vmops._resize_up_vdis(instance, vdis)
- self.assertFalse(mock_resize.called)
-
- @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
- def test_ensure_ephemeral_resize_with_root_volume(self, mock_sizes,
- mock_resize):
- mock_sizes.return_value = [2000, 1000]
- instance = fake_instance.fake_db_instance(root_gb=20, ephemeral_gb=20)
- ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
- vdis = {'root': {'osvol': True, 'ref': 'vdi_ref'},
- 'ephemerals': ephemerals}
- with mock.patch.object(vm_utils, 'generate_single_ephemeral',
- autospec=True) as g:
- self.vmops._resize_up_vdis(instance, vdis)
- self.assertEqual([mock.call(self.vmops._session, instance, 4,
- 2000),
- mock.call(self.vmops._session, instance, 5,
- 1000)],
- mock_resize.call_args_list)
- self.assertFalse(g.called)
-
- def test_resize_up_vdis_root(self, mock_resize):
- instance = {"root_gb": 20, "ephemeral_gb": 0}
- self.vmops._resize_up_vdis(instance, {"root": {"ref": "vdi_ref"}})
- mock_resize.assert_called_once_with(self.vmops._session, instance,
- "vdi_ref", 20)
-
- def test_resize_up_vdis_zero_disks(self, mock_resize):
- instance = {"root_gb": 0, "ephemeral_gb": 0}
- self.vmops._resize_up_vdis(instance, {"root": {}})
- self.assertFalse(mock_resize.called)
-
- def test_resize_up_vdis_no_vdis_like_initial_spawn(self, mock_resize):
- instance = {"root_gb": 0, "ephemeral_gb": 3000}
- vdis = {}
-
- self.vmops._resize_up_vdis(instance, vdis)
-
- self.assertFalse(mock_resize.called)
-
- @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
- def test_resize_up_vdis_ephemeral(self, mock_sizes, mock_resize):
- mock_sizes.return_value = [2000, 1000]
- instance = {"root_gb": 0, "ephemeral_gb": 3000}
- ephemerals = {"4": {"ref": 4}, "5": {"ref": 5}}
- vdis = {"ephemerals": ephemerals}
-
- self.vmops._resize_up_vdis(instance, vdis)
-
- mock_sizes.assert_called_once_with(3000)
- expected = [mock.call(self.vmops._session, instance, 4, 2000),
- mock.call(self.vmops._session, instance, 5, 1000)]
- self.assertEqual(expected, mock_resize.call_args_list)
-
- @mock.patch.object(vm_utils, 'generate_single_ephemeral')
- @mock.patch.object(vm_utils, 'get_ephemeral_disk_sizes')
- def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
- mock_generate,
- mock_resize):
- mock_sizes.return_value = [2000, 1000]
- instance = {"root_gb": 0, "ephemeral_gb": 3000, "uuid": "a"}
- ephemerals = {"4": {"ref": 4}}
- vdis = {"ephemerals": ephemerals}
-
- self.vmops._resize_up_vdis(instance, vdis)
-
- mock_sizes.assert_called_once_with(3000)
- mock_resize.assert_called_once_with(self.vmops._session, instance,
- 4, 2000)
- mock_generate.assert_called_once_with(self.vmops._session, instance,
- None, 5, 1000)
-
-
-@mock.patch.object(vm_utils, 'remove_old_snapshots')
-class CleanupFailedSnapshotTestCase(VMOpsTestBase):
- def test_post_interrupted_snapshot_cleanup(self, mock_remove):
- self.vmops._get_vm_opaque_ref = mock.Mock()
- self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
-
- self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
-
- mock_remove.assert_called_once_with(self.vmops._session,
- "instance", "vm_ref")
-
-
-class LiveMigrateHelperTestCase(VMOpsTestBase):
- def test_connect_block_device_volumes_none(self):
- self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
-
- @mock.patch.object(volumeops.VolumeOps, "connect_volume")
- def test_connect_block_device_volumes_calls_connect(self, mock_connect):
- with mock.patch.object(self.vmops._session,
- "call_xenapi") as mock_session:
- mock_connect.return_value = ("sr_uuid", None)
- mock_session.return_value = "sr_ref"
- bdm = {"connection_info": "c_info"}
- bdi = {"block_device_mapping": [bdm]}
- result = self.vmops.connect_block_device_volumes(bdi)
-
- self.assertEqual({'sr_uuid': 'sr_ref'}, result)
-
- mock_connect.assert_called_once_with("c_info")
- mock_session.assert_called_once_with("SR.get_by_uuid",
- "sr_uuid")
-
-
-@mock.patch.object(vmops.VMOps, '_resize_ensure_vm_is_shutdown')
-@mock.patch.object(vmops.VMOps, '_apply_orig_vm_name_label')
-@mock.patch.object(vmops.VMOps, '_update_instance_progress')
-@mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
-@mock.patch.object(vm_utils, 'resize_disk')
-@mock.patch.object(vm_utils, 'migrate_vhd')
-@mock.patch.object(vm_utils, 'destroy_vdi')
-class MigrateDiskResizingDownTestCase(VMOpsTestBase):
- def test_migrate_disk_resizing_down_works_no_ephemeral(
- self,
- mock_destroy_vdi,
- mock_migrate_vhd,
- mock_resize_disk,
- mock_get_vdi_for_vm_safely,
- mock_update_instance_progress,
- mock_apply_orig_vm_name_label,
- mock_resize_ensure_vm_is_shutdown):
-
- context = "ctx"
- instance = {"name": "fake", "uuid": "uuid"}
- dest = "dest"
- vm_ref = "vm_ref"
- sr_path = "sr_path"
- instance_type = dict(root_gb=1)
- old_vdi_ref = "old_ref"
- new_vdi_ref = "new_ref"
- new_vdi_uuid = "new_uuid"
-
- mock_get_vdi_for_vm_safely.return_value = (old_vdi_ref, None)
- mock_resize_disk.return_value = (new_vdi_ref, new_vdi_uuid)
-
- self.vmops._migrate_disk_resizing_down(context, instance, dest,
- instance_type, vm_ref, sr_path)
-
- mock_get_vdi_for_vm_safely.assert_called_once_with(
- self.vmops._session,
- vm_ref)
- mock_resize_ensure_vm_is_shutdown.assert_called_once_with(
- instance, vm_ref)
- mock_apply_orig_vm_name_label.assert_called_once_with(
- instance, vm_ref)
- mock_resize_disk.assert_called_once_with(
- self.vmops._session,
- instance,
- old_vdi_ref,
- instance_type)
- mock_migrate_vhd.assert_called_once_with(
- self.vmops._session,
- instance,
- new_vdi_uuid,
- dest,
- sr_path, 0)
- mock_destroy_vdi.assert_called_once_with(
- self.vmops._session,
- new_vdi_ref)
-
- prog_expected = [
- mock.call(context, instance, 1, 5),
- mock.call(context, instance, 2, 5),
- mock.call(context, instance, 3, 5),
- mock.call(context, instance, 4, 5)
- # 5/5: step to be executed by finish migration.
- ]
- self.assertEqual(prog_expected,
- mock_update_instance_progress.call_args_list)
-
-
-class GetVdisForInstanceTestCase(VMOpsTestBase):
- """Tests get_vdis_for_instance utility method."""
- def setUp(self):
- super(GetVdisForInstanceTestCase, self).setUp()
- self.context = context.get_admin_context()
- self.context.auth_token = 'auth_token'
- self.session = mock.Mock()
- self.vmops._session = self.session
- self.instance = fake_instance.fake_instance_obj(self.context)
- self.name_label = 'name'
- self.image = 'fake_image_id'
-
- @mock.patch.object(volumeops.VolumeOps, "connect_volume",
- return_value=("sr", "vdi_uuid"))
- def test_vdis_for_instance_bdi_password_scrubbed(self, get_uuid_mock):
- # setup fake data
- data = {'name_label': self.name_label,
- 'sr_uuid': 'fake',
- 'auth_password': 'scrubme'}
- bdm = [{'mount_device': '/dev/vda',
- 'connection_info': {'data': data}}]
- bdi = {'root_device_name': 'vda',
- 'block_device_mapping': bdm}
-
- # Tests that the parameters to the to_xml method are sanitized for
- # passwords when logged.
- def fake_debug(*args, **kwargs):
- if 'auth_password' in args[0]:
- self.assertNotIn('scrubme', args[0])
- fake_debug.matched = True
-
- fake_debug.matched = False
-
- with mock.patch.object(vmops.LOG, 'debug',
- side_effect=fake_debug) as debug_mock:
- vdis = self.vmops._get_vdis_for_instance(self.context,
- self.instance, self.name_label, self.image,
- image_type=4, block_device_info=bdi)
- self.assertEqual(1, len(vdis))
- get_uuid_mock.assert_called_once_with({"data": data})
- # we don't care what the log message is, we just want to make sure
- # our stub method is called which asserts the password is scrubbed
- self.assertTrue(debug_mock.called)
- self.assertTrue(fake_debug.matched)
diff --git a/nova/tests/virt/xenapi/test_volume_utils.py b/nova/tests/virt/xenapi/test_volume_utils.py
deleted file mode 100644
index 02b33e05d0..0000000000
--- a/nova/tests/virt/xenapi/test_volume_utils.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from eventlet import greenthread
-import mock
-
-from nova import exception
-from nova import test
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import volume_utils
-
-
-class SROps(stubs.XenAPITestBaseNoDB):
- def test_find_sr_valid_uuid(self):
- self.session = mock.Mock()
- self.session.call_xenapi.return_value = 'sr_ref'
- self.assertEqual(volume_utils.find_sr_by_uuid(self.session,
- 'sr_uuid'),
- 'sr_ref')
-
- def test_find_sr_invalid_uuid(self):
- class UUIDException(Exception):
- details = ["UUID_INVALID", "", "", ""]
-
- self.session = mock.Mock()
- self.session.XenAPI.Failure = UUIDException
- self.session.call_xenapi.side_effect = UUIDException
- self.assertIsNone(
- volume_utils.find_sr_by_uuid(self.session, 'sr_uuid'))
-
- def test_find_sr_from_vdi(self):
- vdi_ref = 'fake-ref'
-
- def fake_call_xenapi(method, *args):
- self.assertEqual(method, 'VDI.get_SR')
- self.assertEqual(args[0], vdi_ref)
- return args[0]
-
- session = mock.Mock()
- session.call_xenapi.side_effect = fake_call_xenapi
- self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
- vdi_ref)
-
- def test_find_sr_from_vdi_exception(self):
- vdi_ref = 'fake-ref'
-
- class FakeException(Exception):
- pass
-
- def fake_call_xenapi(method, *args):
- self.assertEqual(method, 'VDI.get_SR')
- self.assertEqual(args[0], vdi_ref)
- return args[0]
-
- session = mock.Mock()
- session.XenAPI.Failure = FakeException
- session.call_xenapi.side_effect = FakeException
- self.assertRaises(exception.StorageError,
- volume_utils.find_sr_from_vdi, session, vdi_ref)
-
-
-class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
- def test_target_host(self):
- self.assertEqual(volume_utils._get_target_host('host:port'),
- 'host')
-
- self.assertEqual(volume_utils._get_target_host('host'),
- 'host')
-
- # There is no default value
- self.assertIsNone(volume_utils._get_target_host(':port'))
-
- self.assertIsNone(volume_utils._get_target_host(None))
-
- def test_target_port(self):
- self.assertEqual(volume_utils._get_target_port('host:port'),
- 'port')
-
- self.assertEqual(volume_utils._get_target_port('host'),
- '3260')
-
-
-class IntroduceTestCase(stubs.XenAPITestBaseNoDB):
-
- @mock.patch.object(volume_utils, '_get_vdi_ref')
- @mock.patch.object(greenthread, 'sleep')
- def test_introduce_vdi_retry(self, mock_sleep, mock_get_vdi_ref):
- def fake_get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
- fake_get_vdi_ref.call_count += 1
- if fake_get_vdi_ref.call_count == 2:
- return 'vdi_ref'
-
- def fake_call_xenapi(method, *args):
- if method == 'SR.scan':
- return
- elif method == 'VDI.get_record':
- return {'managed': 'true'}
-
- session = mock.Mock()
- session.call_xenapi.side_effect = fake_call_xenapi
-
- mock_get_vdi_ref.side_effect = fake_get_vdi_ref
- fake_get_vdi_ref.call_count = 0
-
- self.assertEqual(volume_utils.introduce_vdi(session, 'sr_ref'),
- 'vdi_ref')
- mock_sleep.assert_called_once_with(20)
-
- @mock.patch.object(volume_utils, '_get_vdi_ref')
- @mock.patch.object(greenthread, 'sleep')
- def test_introduce_vdi_exception(self, mock_sleep, mock_get_vdi_ref):
- def fake_call_xenapi(method, *args):
- if method == 'SR.scan':
- return
- elif method == 'VDI.get_record':
- return {'managed': 'true'}
-
- session = mock.Mock()
- session.call_xenapi.side_effect = fake_call_xenapi
- mock_get_vdi_ref.return_value = None
-
- self.assertRaises(exception.StorageError,
- volume_utils.introduce_vdi, session, 'sr_ref')
- mock_sleep.assert_called_once_with(20)
-
-
-class ParseVolumeInfoTestCase(stubs.XenAPITestBaseNoDB):
- def test_mountpoint_to_number(self):
- cases = {
- 'sda': 0,
- 'sdp': 15,
- 'hda': 0,
- 'hdp': 15,
- 'vda': 0,
- 'xvda': 0,
- '0': 0,
- '10': 10,
- 'vdq': -1,
- 'sdq': -1,
- 'hdq': -1,
- 'xvdq': -1,
- }
-
- for (input, expected) in cases.iteritems():
- actual = volume_utils._mountpoint_to_number(input)
- self.assertEqual(actual, expected,
- '%s yielded %s, not %s' % (input, actual, expected))
-
- @classmethod
- def _make_connection_info(cls):
- target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
- return {'driver_volume_type': 'iscsi',
- 'data': {'volume_id': 1,
- 'target_iqn': target_iqn,
- 'target_portal': '127.0.0.1:3260,fake',
- 'target_lun': None,
- 'auth_method': 'CHAP',
- 'auth_username': 'username',
- 'auth_password': 'password'}}
-
- def test_parse_volume_info_parsing_auth_details(self):
- conn_info = self._make_connection_info()
- result = volume_utils._parse_volume_info(conn_info['data'])
-
- self.assertEqual('username', result['chapuser'])
- self.assertEqual('password', result['chappassword'])
-
- def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
- self.assertRaises(
- exception.StorageError,
- volume_utils.get_device_number,
- 'dev/sd')
-
-
-class FindVBDTestCase(stubs.XenAPITestBaseNoDB):
- def test_find_vbd_by_number_works(self):
- session = mock.Mock()
- session.VM.get_VBDs.return_value = ["a", "b"]
- session.VBD.get_userdevice.return_value = "1"
-
- result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
-
- self.assertEqual("a", result)
- session.VM.get_VBDs.assert_called_once_with("vm_ref")
- session.VBD.get_userdevice.assert_called_once_with("a")
-
- def test_find_vbd_by_number_no_matches(self):
- session = mock.Mock()
- session.VM.get_VBDs.return_value = ["a", "b"]
- session.VBD.get_userdevice.return_value = "3"
-
- result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
-
- self.assertIsNone(result)
- session.VM.get_VBDs.assert_called_once_with("vm_ref")
- expected = [mock.call("a"), mock.call("b")]
- self.assertEqual(expected,
- session.VBD.get_userdevice.call_args_list)
-
- def test_find_vbd_by_number_no_vbds(self):
- session = mock.Mock()
- session.VM.get_VBDs.return_value = []
-
- result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
-
- self.assertIsNone(result)
- session.VM.get_VBDs.assert_called_once_with("vm_ref")
- self.assertFalse(session.VBD.get_userdevice.called)
-
- def test_find_vbd_by_number_ignores_exception(self):
- session = mock.Mock()
- session.XenAPI.Failure = test.TestingException
- session.VM.get_VBDs.return_value = ["a"]
- session.VBD.get_userdevice.side_effect = test.TestingException
-
- result = volume_utils.find_vbd_by_number(session, "vm_ref", 1)
-
- self.assertIsNone(result)
- session.VM.get_VBDs.assert_called_once_with("vm_ref")
- session.VBD.get_userdevice.assert_called_once_with("a")
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
deleted file mode 100644
index fbb4ad09af..0000000000
--- a/nova/tests/virt/xenapi/test_volumeops.py
+++ /dev/null
@@ -1,549 +0,0 @@
-# Copyright (c) 2012 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import exception
-from nova import test
-from nova.tests.virt.xenapi import stubs
-from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi import volume_utils
-from nova.virt.xenapi import volumeops
-
-
-class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(VolumeOpsTestBase, self).setUp()
- self._setup_mock_volumeops()
-
- def _setup_mock_volumeops(self):
- self.session = stubs.FakeSessionForVolumeTests('fake_uri')
- self.ops = volumeops.VolumeOps(self.session)
-
-
-class VolumeDetachTestCase(VolumeOpsTestBase):
- def test_detach_volume_call(self):
- registered_calls = []
-
- def regcall(label):
- def side_effect(*args, **kwargs):
- registered_calls.append(label)
- return side_effect
-
- ops = volumeops.VolumeOps('session')
- self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
- self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
- self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
- self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
- self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
- self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
- self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
- self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
-
- volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
- 'vmref')
-
- volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
- 'devnumber')
-
- volumeops.volume_utils.find_vbd_by_number(
- 'session', 'vmref', 'devnumber').AndReturn('vbdref')
-
- volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
- False)
-
- volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
-
- volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
- regcall('destroy_vbd'))
-
- volumeops.volume_utils.find_sr_from_vbd(
- 'session', 'vbdref').WithSideEffects(
- regcall('find_sr_from_vbd')).AndReturn('srref')
-
- volumeops.volume_utils.purge_sr('session', 'srref')
-
- self.mox.ReplayAll()
-
- ops.detach_volume(
- dict(driver_volume_type='iscsi', data='conn_data'),
- 'instance_1', 'mountpoint')
-
- self.assertEqual(
- ['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
-
- @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
- @mock.patch.object(volume_utils, "find_vbd_by_number")
- @mock.patch.object(vm_utils, "vm_ref_or_raise")
- def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
- mock_vm.return_value = "vm_ref"
- mock_vbd.return_value = "vbd_ref"
-
- self.ops.detach_volume({}, "name", "/dev/xvdd")
-
- mock_vm.assert_called_once_with(self.session, "name")
- mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
- mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
-
- @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
- @mock.patch.object(volume_utils, "find_vbd_by_number")
- @mock.patch.object(vm_utils, "vm_ref_or_raise")
- def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
- mock_detach):
- mock_vm.return_value = "vm_ref"
- mock_vbd.return_value = None
-
- self.ops.detach_volume({}, "name", "/dev/xvdd")
-
- self.assertFalse(mock_detach.called)
-
- @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
- @mock.patch.object(volume_utils, "find_vbd_by_number")
- @mock.patch.object(vm_utils, "vm_ref_or_raise")
- def test_detach_volume_raises(self, mock_vm, mock_vbd,
- mock_detach):
- mock_vm.return_value = "vm_ref"
- mock_vbd.side_effect = test.TestingException
-
- self.assertRaises(test.TestingException,
- self.ops.detach_volume, {}, "name", "/dev/xvdd")
- self.assertFalse(mock_detach.called)
-
- @mock.patch.object(volume_utils, "purge_sr")
- @mock.patch.object(vm_utils, "destroy_vbd")
- @mock.patch.object(volume_utils, "find_sr_from_vbd")
- @mock.patch.object(vm_utils, "unplug_vbd")
- @mock.patch.object(vm_utils, "is_vm_shutdown")
- def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
- mock_find_sr, mock_destroy, mock_purge):
- mock_shutdown.return_value = False
- mock_find_sr.return_value = "sr_ref"
-
- self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
-
- mock_shutdown.assert_called_once_with(self.session, "vm_ref")
- mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
- mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
- mock_destroy.assert_called_once_with(self.session, "vbd_ref")
- mock_purge.assert_called_once_with(self.session, "sr_ref")
-
- @mock.patch.object(volume_utils, "purge_sr")
- @mock.patch.object(vm_utils, "destroy_vbd")
- @mock.patch.object(volume_utils, "find_sr_from_vbd")
- @mock.patch.object(vm_utils, "unplug_vbd")
- @mock.patch.object(vm_utils, "is_vm_shutdown")
- def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
- mock_find_sr, mock_destroy, mock_purge):
- mock_shutdown.return_value = True
- mock_find_sr.return_value = "sr_ref"
-
- self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
-
- expected = [mock.call(self.session, "vbd_ref_1"),
- mock.call(self.session, "vbd_ref_2")]
- self.assertEqual(expected, mock_destroy.call_args_list)
- mock_purge.assert_called_with(self.session, "sr_ref")
- self.assertFalse(mock_unplug.called)
-
- @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
- mock_get_all.return_value = []
-
- self.ops.detach_all("vm_ref")
-
- mock_get_all.assert_called_once_with("vm_ref")
- self.assertFalse(mock_detach.called)
-
- @mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_detach_all_volumes(self, mock_get_all, mock_detach):
- mock_get_all.return_value = ["1"]
-
- self.ops.detach_all("vm_ref")
-
- mock_get_all.assert_called_once_with("vm_ref")
- mock_detach.assert_called_once_with("vm_ref", ["1"])
-
- def test_get_all_volume_vbd_refs_no_vbds(self):
- with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
- with mock.patch.object(self.session.VBD,
- "get_other_config") as mock_conf:
- mock_get.return_value = []
-
- result = self.ops._get_all_volume_vbd_refs("vm_ref")
-
- self.assertEqual([], list(result))
- mock_get.assert_called_once_with("vm_ref")
- self.assertFalse(mock_conf.called)
-
- def test_get_all_volume_vbd_refs_no_volumes(self):
- with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
- with mock.patch.object(self.session.VBD,
- "get_other_config") as mock_conf:
- mock_get.return_value = ["1"]
- mock_conf.return_value = {}
-
- result = self.ops._get_all_volume_vbd_refs("vm_ref")
-
- self.assertEqual([], list(result))
- mock_get.assert_called_once_with("vm_ref")
- mock_conf.assert_called_once_with("1")
-
- def test_get_all_volume_vbd_refs_with_volumes(self):
- with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
- with mock.patch.object(self.session.VBD,
- "get_other_config") as mock_conf:
- mock_get.return_value = ["1", "2"]
- mock_conf.return_value = {"osvol": True}
-
- result = self.ops._get_all_volume_vbd_refs("vm_ref")
-
- self.assertEqual(["1", "2"], list(result))
- mock_get.assert_called_once_with("vm_ref")
-
-
-class AttachVolumeTestCase(VolumeOpsTestBase):
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
- @mock.patch.object(vm_utils, "vm_ref_or_raise")
- def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
- mock_get_vm.return_value = "vm_ref"
-
- self.ops.attach_volume({}, "instance_name", "/dev/xvda")
-
- mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
- True)
-
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
- @mock.patch.object(vm_utils, "vm_ref_or_raise")
- def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
- mock_get_vm.return_value = "vm_ref"
-
- self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
-
- mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
- False)
-
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume")
- def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
- self.ops.connect_volume({})
- mock_attach.assert_called_once_with({})
-
- @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
- @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
- @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
- def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
- mock_provider, mock_driver):
- connection_info = {"data": {}}
- with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
- mock_provider.return_value = ("sr_ref", "sr_uuid")
- mock_vdi.return_value = "vdi_uuid"
-
- result = self.ops._attach_volume(connection_info)
-
- self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
-
- mock_driver.assert_called_once_with(connection_info)
- mock_provider.assert_called_once_with({}, None)
- mock_hypervisor.assert_called_once_with("sr_ref", {})
- self.assertFalse(mock_attach.called)
-
- @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
- @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
- @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
- def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
- mock_provider, mock_driver):
- connection_info = {"data": {}}
- with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
- mock_provider.return_value = ("sr_ref", "sr_uuid")
- mock_hypervisor.return_value = "vdi_ref"
- mock_vdi.return_value = "vdi_uuid"
-
- result = self.ops._attach_volume(connection_info, "vm_ref",
- "name", 2, True)
-
- self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
-
- mock_driver.assert_called_once_with(connection_info)
- mock_provider.assert_called_once_with({}, "name")
- mock_hypervisor.assert_called_once_with("sr_ref", {})
- mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
- True)
-
- @mock.patch.object(volume_utils, "forget_sr")
- @mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
- @mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
- @mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
- @mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
- def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
- mock_provider, mock_driver, mock_forget):
- connection_info = {"data": {}}
- mock_provider.return_value = ("sr_ref", "sr_uuid")
- mock_hypervisor.side_effect = test.TestingException
-
- self.assertRaises(test.TestingException,
- self.ops._attach_volume, connection_info)
-
- mock_driver.assert_called_once_with(connection_info)
- mock_provider.assert_called_once_with({}, None)
- mock_hypervisor.assert_called_once_with("sr_ref", {})
- mock_forget.assert_called_once_with(self.session, "sr_ref")
- self.assertFalse(mock_attach.called)
-
- def test_check_is_supported_driver_type_pass_iscsi(self):
- conn_info = {"driver_volume_type": "iscsi"}
- self.ops._check_is_supported_driver_type(conn_info)
-
- def test_check_is_supported_driver_type_pass_xensm(self):
- conn_info = {"driver_volume_type": "xensm"}
- self.ops._check_is_supported_driver_type(conn_info)
-
- def test_check_is_supported_driver_type_pass_bad(self):
- conn_info = {"driver_volume_type": "bad"}
- self.assertRaises(exception.VolumeDriverNotFound,
- self.ops._check_is_supported_driver_type, conn_info)
-
- @mock.patch.object(volume_utils, "introduce_sr")
- @mock.patch.object(volume_utils, "find_sr_by_uuid")
- @mock.patch.object(volume_utils, "parse_sr_info")
- def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
- mock_introduce_sr):
- mock_parse.return_value = ("uuid", "label", "params")
- mock_find_sr.return_value = None
- mock_introduce_sr.return_value = "sr_ref"
-
- ref, uuid = self.ops._connect_to_volume_provider({}, "name")
-
- self.assertEqual("sr_ref", ref)
- self.assertEqual("uuid", uuid)
- mock_parse.assert_called_once_with({}, "Disk-for:name")
- mock_find_sr.assert_called_once_with(self.session, "uuid")
- mock_introduce_sr.assert_called_once_with(self.session, "uuid",
- "label", "params")
-
- @mock.patch.object(volume_utils, "introduce_sr")
- @mock.patch.object(volume_utils, "find_sr_by_uuid")
- @mock.patch.object(volume_utils, "parse_sr_info")
- def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
- mock_introduce_sr):
- mock_parse.return_value = ("uuid", "label", "params")
- mock_find_sr.return_value = "sr_ref"
-
- ref, uuid = self.ops._connect_to_volume_provider({}, "name")
-
- self.assertEqual("sr_ref", ref)
- self.assertEqual("uuid", uuid)
- mock_parse.assert_called_once_with({}, "Disk-for:name")
- mock_find_sr.assert_called_once_with(self.session, "uuid")
- self.assertFalse(mock_introduce_sr.called)
-
- @mock.patch.object(volume_utils, "introduce_vdi")
- def test_connect_hypervisor_to_volume_regular(self, mock_intro):
- mock_intro.return_value = "vdi"
-
- result = self.ops._connect_hypervisor_to_volume("sr", {})
-
- self.assertEqual("vdi", result)
- mock_intro.assert_called_once_with(self.session, "sr")
-
- @mock.patch.object(volume_utils, "introduce_vdi")
- def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
- mock_intro.return_value = "vdi"
-
- conn = {"vdi_uuid": "id"}
- result = self.ops._connect_hypervisor_to_volume("sr", conn)
-
- self.assertEqual("vdi", result)
- mock_intro.assert_called_once_with(self.session, "sr",
- vdi_uuid="id")
-
- @mock.patch.object(volume_utils, "introduce_vdi")
- def test_connect_hypervisor_to_volume_lun(self, mock_intro):
- mock_intro.return_value = "vdi"
-
- conn = {"target_lun": "lun"}
- result = self.ops._connect_hypervisor_to_volume("sr", conn)
-
- self.assertEqual("vdi", result)
- mock_intro.assert_called_once_with(self.session, "sr",
- target_lun="lun")
-
- @mock.patch.object(vm_utils, "is_vm_shutdown")
- @mock.patch.object(vm_utils, "create_vbd")
- def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
- mock_vbd.return_value = "vbd"
- mock_shutdown.return_value = False
-
- with mock.patch.object(self.session.VBD, "plug") as mock_plug:
- self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
- mock_plug.assert_called_once_with("vbd", "vm")
-
- mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
- bootable=False, osvol=True)
- mock_shutdown.assert_called_once_with(self.session, "vm")
-
- @mock.patch.object(vm_utils, "is_vm_shutdown")
- @mock.patch.object(vm_utils, "create_vbd")
- def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
- mock_vbd.return_value = "vbd"
- mock_shutdown.return_value = True
-
- with mock.patch.object(self.session.VBD, "plug") as mock_plug:
- self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
- self.assertFalse(mock_plug.called)
-
- mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
- bootable=False, osvol=True)
- mock_shutdown.assert_called_once_with(self.session, "vm")
-
- @mock.patch.object(vm_utils, "is_vm_shutdown")
- @mock.patch.object(vm_utils, "create_vbd")
- def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
- mock_vbd.return_value = "vbd"
-
- with mock.patch.object(self.session.VBD, "plug") as mock_plug:
- self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
- self.assertFalse(mock_plug.called)
-
- mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
- bootable=False, osvol=True)
- self.assertFalse(mock_shutdown.called)
-
-
-class FindBadVolumeTestCase(VolumeOpsTestBase):
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_find_bad_volumes_no_vbds(self, mock_get_all):
- mock_get_all.return_value = []
-
- result = self.ops.find_bad_volumes("vm_ref")
-
- mock_get_all.assert_called_once_with("vm_ref")
- self.assertEqual([], result)
-
- @mock.patch.object(volume_utils, "find_sr_from_vbd")
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
- mock_get_all.return_value = ["1", "2"]
- mock_find_sr.return_value = "sr_ref"
-
- with mock.patch.object(self.session.SR, "scan") as mock_scan:
- result = self.ops.find_bad_volumes("vm_ref")
-
- mock_get_all.assert_called_once_with("vm_ref")
- expected_find = [mock.call(self.session, "1"),
- mock.call(self.session, "2")]
- self.assertEqual(expected_find, mock_find_sr.call_args_list)
- expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
- self.assertEqual(expected_scan, mock_scan.call_args_list)
- self.assertEqual([], result)
-
- @mock.patch.object(volume_utils, "find_sr_from_vbd")
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
- mock_get_all.return_value = ["vbd_ref"]
- mock_find_sr.return_value = "sr_ref"
-
- class FakeException(Exception):
- details = ['SR_BACKEND_FAILURE_40', "", "", ""]
-
- session = mock.Mock()
- session.XenAPI.Failure = FakeException
- self.ops._session = session
-
- with mock.patch.object(session.SR, "scan") as mock_scan:
- with mock.patch.object(session.VBD,
- "get_device") as mock_get:
- mock_scan.side_effect = FakeException
- mock_get.return_value = "xvdb"
-
- result = self.ops.find_bad_volumes("vm_ref")
-
- mock_get_all.assert_called_once_with("vm_ref")
- mock_scan.assert_called_once_with("sr_ref")
- mock_get.assert_called_once_with("vbd_ref")
- self.assertEqual(["/dev/xvdb"], result)
-
- @mock.patch.object(volume_utils, "find_sr_from_vbd")
- @mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
- def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
- mock_get_all.return_value = ["vbd_ref"]
- mock_find_sr.return_value = "sr_ref"
-
- class FakeException(Exception):
- details = ['foo', "", "", ""]
-
- session = mock.Mock()
- session.XenAPI.Failure = FakeException
- self.ops._session = session
-
- with mock.patch.object(session.SR, "scan") as mock_scan:
- with mock.patch.object(session.VBD,
- "get_device") as mock_get:
- mock_scan.side_effect = FakeException
- mock_get.return_value = "xvdb"
-
- self.assertRaises(FakeException,
- self.ops.find_bad_volumes, "vm_ref")
- mock_scan.assert_called_once_with("sr_ref")
-
-
-class CleanupFromVDIsTestCase(VolumeOpsTestBase):
- def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
- sr_refs):
- find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
- in vdi_refs]
- find_sr_from_vdi.assert_has_calls(find_sr_calls)
- purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
- in sr_refs]
- purge_sr.assert_has_calls(purge_sr_calls)
-
- @mock.patch.object(volume_utils, 'find_sr_from_vdi')
- @mock.patch.object(volume_utils, 'purge_sr')
- def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
- vdi_refs = ['vdi_ref1', 'vdi_ref2']
- sr_refs = ['sr_ref1', 'sr_ref2']
- find_sr_from_vdi.side_effect = sr_refs
- self.ops.safe_cleanup_from_vdis(vdi_refs)
-
- self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
- sr_refs)
-
- @mock.patch.object(volume_utils, 'find_sr_from_vdi',
- side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
- @mock.patch.object(volume_utils, 'purge_sr')
- def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
- find_sr_from_vdi):
- vdi_refs = ['vdi_ref1', 'vdi_ref2']
- sr_refs = ['sr_ref2']
- find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
- sr_refs[0]]
- self.ops.safe_cleanup_from_vdis(vdi_refs)
-
- self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
- sr_refs)
-
- @mock.patch.object(volume_utils, 'find_sr_from_vdi')
- @mock.patch.object(volume_utils, 'purge_sr')
- def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
- find_sr_from_vdi):
- vdi_refs = ['vdi_ref1', 'vdi_ref2']
- sr_refs = ['sr_ref1', 'sr_ref2']
- find_sr_from_vdi.side_effect = sr_refs
- purge_sr.side_effects = [test.TestingException, None]
- self.ops.safe_cleanup_from_vdis(vdi_refs)
-
- self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
- sr_refs)
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
deleted file mode 100644
index 98e0658f55..0000000000
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ /dev/null
@@ -1,4104 +0,0 @@
-# Copyright (c) 2010 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test suite for XenAPI."""
-
-import ast
-import base64
-import contextlib
-import copy
-import functools
-import os
-import re
-
-import mock
-import mox
-from oslo.concurrency import lockutils
-from oslo.config import cfg
-from oslo.serialization import jsonutils
-from oslo.utils import importutils
-
-from nova.compute import api as compute_api
-from nova.compute import arch
-from nova.compute import flavors
-from nova.compute import hvtype
-from nova.compute import power_state
-from nova.compute import task_states
-from nova.compute import utils as compute_utils
-from nova.compute import vm_states
-from nova.conductor import api as conductor_api
-from nova import context
-from nova import crypto
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common.fixture import config as config_fixture
-from nova.openstack.common import log as logging
-from nova import test
-from nova.tests.db import fakes as db_fakes
-from nova.tests import fake_instance
-from nova.tests import fake_network
-from nova.tests import fake_processutils
-import nova.tests.image.fake as fake_image
-from nova.tests import matchers
-from nova.tests.objects import test_aggregate
-from nova.tests.virt.xenapi import stubs
-from nova.virt import fake
-from nova.virt.xenapi import agent
-from nova.virt.xenapi.client import session as xenapi_session
-from nova.virt.xenapi import driver as xenapi_conn
-from nova.virt.xenapi import fake as xenapi_fake
-from nova.virt.xenapi import host
-from nova.virt.xenapi.image import glance
-from nova.virt.xenapi import pool
-from nova.virt.xenapi import pool_states
-from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi import vmops
-from nova.virt.xenapi import volume_utils
-
-LOG = logging.getLogger(__name__)
-
-CONF = cfg.CONF
-CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('network_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('default_availability_zone', 'nova.availability_zones')
-CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
- group="xenserver")
-
-IMAGE_MACHINE = '1'
-IMAGE_KERNEL = '2'
-IMAGE_RAMDISK = '3'
-IMAGE_RAW = '4'
-IMAGE_VHD = '5'
-IMAGE_ISO = '6'
-IMAGE_IPXE_ISO = '7'
-IMAGE_FROM_VOLUME = '8'
-
-IMAGE_FIXTURES = {
- IMAGE_MACHINE: {
- 'image_meta': {'name': 'fakemachine', 'size': 0,
- 'disk_format': 'ami',
- 'container_format': 'ami'},
- },
- IMAGE_KERNEL: {
- 'image_meta': {'name': 'fakekernel', 'size': 0,
- 'disk_format': 'aki',
- 'container_format': 'aki'},
- },
- IMAGE_RAMDISK: {
- 'image_meta': {'name': 'fakeramdisk', 'size': 0,
- 'disk_format': 'ari',
- 'container_format': 'ari'},
- },
- IMAGE_RAW: {
- 'image_meta': {'name': 'fakeraw', 'size': 0,
- 'disk_format': 'raw',
- 'container_format': 'bare'},
- },
- IMAGE_VHD: {
- 'image_meta': {'name': 'fakevhd', 'size': 0,
- 'disk_format': 'vhd',
- 'container_format': 'ovf'},
- },
- IMAGE_ISO: {
- 'image_meta': {'name': 'fakeiso', 'size': 0,
- 'disk_format': 'iso',
- 'container_format': 'bare'},
- },
- IMAGE_IPXE_ISO: {
- 'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
- 'disk_format': 'iso',
- 'container_format': 'bare',
- 'properties': {'ipxe_boot': 'true'}},
- },
- IMAGE_FROM_VOLUME: {
- 'image_meta': {'name': 'fake_ipxe_iso',
- 'properties': {'foo': 'bar'}},
- },
-}
-
-
-def get_session():
- return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
-
-
-def set_image_fixtures():
- image_service = fake_image.FakeImageService()
- image_service.images.clear()
- for image_id, image_meta in IMAGE_FIXTURES.items():
- image_meta = image_meta['image_meta']
- image_meta['id'] = image_id
- image_service.create(None, image_meta)
-
-
-def get_fake_device_info():
- # FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
- # can be removed from the dict when LP bug #1087308 is fixed
- fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
- fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
- fake = {'block_device_mapping':
- [{'connection_info': {'driver_volume_type': 'iscsi',
- 'data': {'sr_uuid': 'falseSR',
- 'introduce_sr_keys': ['sr_type'],
- 'sr_type': 'iscsi',
- 'vdi_uuid': fake_vdi_uuid,
- 'target_discovered': False,
- 'target_iqn': 'foo_iqn:foo_volid',
- 'target_portal': 'localhost:3260',
- 'volume_id': 'foo_volid',
- 'target_lun': 1,
- 'auth_password': 'my-p@55w0rd',
- 'auth_username': 'johndoe',
- 'auth_method': u'CHAP'}, },
- 'mount_device': 'vda',
- 'delete_on_termination': False}, ],
- 'root_device_name': '/dev/sda',
- 'ephemerals': [],
- 'swap': None, }
- return fake
-
-
-def stub_vm_utils_with_vdi_attached_here(function):
- """vm_utils.with_vdi_attached_here needs to be stubbed out because it
- calls down to the filesystem to attach a vdi. This provides a
- decorator to handle that.
- """
- @functools.wraps(function)
- def decorated_function(self, *args, **kwargs):
- @contextlib.contextmanager
- def fake_vdi_attached_here(*args, **kwargs):
- fake_dev = 'fakedev'
- yield fake_dev
-
- def fake_image_download(*args, **kwargs):
- pass
-
- orig_vdi_attached_here = vm_utils.vdi_attached_here
- orig_image_download = fake_image._FakeImageService.download
- try:
- vm_utils.vdi_attached_here = fake_vdi_attached_here
- fake_image._FakeImageService.download = fake_image_download
- return function(self, *args, **kwargs)
- finally:
- fake_image._FakeImageService.download = orig_image_download
- vm_utils.vdi_attached_here = orig_vdi_attached_here
-
- return decorated_function
-
-
-def get_create_system_metadata(context, instance_type_id):
- flavor = db.flavor_get(context, instance_type_id)
- return flavors.save_flavor_info({}, flavor)
-
-
-def create_instance_with_system_metadata(context, instance_values):
- instance_values['system_metadata'] = get_create_system_metadata(
- context, instance_values['instance_type_id'])
- instance_values['pci_devices'] = []
- return db.instance_create(context, instance_values)
-
-
-class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
- """Unit tests for Volume operations."""
- def setUp(self):
- super(XenAPIVolumeTestCase, self).setUp()
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
-
- self.instance = fake_instance.fake_db_instance(name='foo')
-
- @classmethod
- def _make_connection_info(cls):
- target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
- return {'driver_volume_type': 'iscsi',
- 'data': {'volume_id': 1,
- 'target_iqn': target_iqn,
- 'target_portal': '127.0.0.1:3260,fake',
- 'target_lun': None,
- 'auth_method': 'CHAP',
- 'auth_username': 'username',
- 'auth_password': 'password'}}
-
- def test_attach_volume(self):
- # This shows how to test Ops classes' methods.
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
- conn_info = self._make_connection_info()
- self.assertIsNone(
- conn.attach_volume(None, conn_info, self.instance, '/dev/sdc'))
-
- # check that the VM has a VBD attached to it
- # Get XenAPI record for VBD
- vbds = xenapi_fake.get_all('VBD')
- vbd = xenapi_fake.get_record('VBD', vbds[0])
- vm_ref = vbd['VM']
- self.assertEqual(vm_ref, vm)
-
- def test_attach_volume_raise_exception(self):
- # This shows how to test when exceptions are raised.
- stubs.stubout_session(self.stubs,
- stubs.FakeSessionForVolumeFailedTests)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(self.instance['name'], 'Running')
- self.assertRaises(exception.VolumeDriverNotFound,
- conn.attach_volume,
- None, {'driver_volume_type': 'nonexist'},
- self.instance, '/dev/sdc')
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIVMTestCase(stubs.XenAPITestBase):
- """Unit tests for VM operations."""
- def setUp(self):
- super(XenAPIVMTestCase, self).setUp()
- self.useFixture(test.SampleNetworks())
- self.network = importutils.import_object(CONF.network_manager)
- self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
- self.fixture.config(disable_process_locking=True,
- group='oslo_concurrency')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', 'fake_br1')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- stubs.stubout_get_this_vm_uuid(self.stubs)
- stubs.stub_out_vm_methods(self.stubs)
- fake_processutils.stub_out_processutils_execute(self.stubs)
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.conn._session.is_local_connection = False
-
- fake_image.stub_out_image_service(self.stubs)
- set_image_fixtures()
- stubs.stubout_image_service_download(self.stubs)
- stubs.stubout_stream_disk(self.stubs)
-
- def fake_inject_instance_metadata(self, instance, vm):
- pass
- self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
- fake_inject_instance_metadata)
-
- def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
- name_label = "fakenamelabel"
- disk_type = "fakedisktype"
- virtual_size = 777
- return vm_utils.create_vdi(
- session, sr_ref, instance, name_label, disk_type,
- virtual_size)
- self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
-
- def tearDown(self):
- fake_image.FakeImageService_reset()
- super(XenAPIVMTestCase, self).tearDown()
-
- def test_init_host(self):
- session = get_session()
- vm = vm_utils._get_this_vm_ref(session)
- # Local root disk
- vdi0 = xenapi_fake.create_vdi('compute', None)
- vbd0 = xenapi_fake.create_vbd(vm, vdi0)
- # Instance VDI
- vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
- other_config={'nova_instance_uuid': 'aaaa'})
- xenapi_fake.create_vbd(vm, vdi1)
- # Only looks like instance VDI
- vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
- vbd2 = xenapi_fake.create_vbd(vm, vdi2)
-
- self.conn.init_host(None)
- self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
-
- def test_instance_exists(self):
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
- self.mox.ReplayAll()
-
- self.stubs.Set(objects.Instance, 'name', 'foo')
- instance = objects.Instance(uuid='fake-uuid')
- self.assertTrue(self.conn.instance_exists(instance))
-
- def test_instance_not_exists(self):
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
- self.mox.ReplayAll()
-
- self.stubs.Set(objects.Instance, 'name', 'bar')
- instance = objects.Instance(uuid='fake-uuid')
- self.assertFalse(self.conn.instance_exists(instance))
-
- def test_list_instances_0(self):
- instances = self.conn.list_instances()
- self.assertEqual(instances, [])
-
- def test_list_instance_uuids_0(self):
- instance_uuids = self.conn.list_instance_uuids()
- self.assertEqual(instance_uuids, [])
-
- def test_list_instance_uuids(self):
- uuids = []
- for x in xrange(1, 4):
- instance = self._create_instance(x)
- uuids.append(instance['uuid'])
- instance_uuids = self.conn.list_instance_uuids()
- self.assertEqual(len(uuids), len(instance_uuids))
- self.assertEqual(set(uuids), set(instance_uuids))
-
- def test_get_rrd_server(self):
- self.flags(connection_url='myscheme://myaddress/',
- group='xenserver')
- server_info = vm_utils._get_rrd_server()
- self.assertEqual(server_info[0], 'myscheme')
- self.assertEqual(server_info[1], 'myaddress')
-
- expected_raw_diagnostics = {
- 'vbd_xvdb_write': '0.0',
- 'memory_target': '4294967296.0000',
- 'memory_internal_free': '1415564.0000',
- 'memory': '4294967296.0000',
- 'vbd_xvda_write': '0.0',
- 'cpu0': '0.0042',
- 'vif_0_tx': '287.4134',
- 'vbd_xvda_read': '0.0',
- 'vif_0_rx': '1816.0144',
- 'vif_2_rx': '0.0',
- 'vif_2_tx': '0.0',
- 'vbd_xvdb_read': '0.0',
- 'last_update': '1328795567',
- }
-
- def test_get_diagnostics(self):
- def fake_get_rrd(host, vm_uuid):
- path = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(path, 'vm_rrd.xml')) as f:
- return re.sub(r'\s', '', f.read())
- self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
-
- expected = self.expected_raw_diagnostics
- instance = self._create_instance()
- actual = self.conn.get_diagnostics(instance)
- self.assertThat(actual, matchers.DictMatches(expected))
-
- def test_get_instance_diagnostics(self):
- def fake_get_rrd(host, vm_uuid):
- path = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(path, 'vm_rrd.xml')) as f:
- return re.sub(r'\s', '', f.read())
- self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
-
- expected = {
- 'config_drive': False,
- 'state': 'running',
- 'driver': 'xenapi',
- 'version': '1.0',
- 'uptime': 0,
- 'hypervisor_os': None,
- 'cpu_details': [{'time': 0}, {'time': 0},
- {'time': 0}, {'time': 0}],
- 'nic_details': [{'mac_address': '00:00:00:00:00:00',
- 'rx_drop': 0,
- 'rx_errors': 0,
- 'rx_octets': 0,
- 'rx_packets': 0,
- 'tx_drop': 0,
- 'tx_errors': 0,
- 'tx_octets': 0,
- 'tx_packets': 0}],
- 'disk_details': [{'errors_count': 0,
- 'id': '',
- 'read_bytes': 0,
- 'read_requests': 0,
- 'write_bytes': 0,
- 'write_requests': 0}],
- 'memory_details': {'maximum': 8192, 'used': 0}}
-
- instance = self._create_instance()
- actual = self.conn.get_instance_diagnostics(instance)
- self.assertEqual(expected, actual.serialize())
-
- def test_get_vnc_console(self):
- instance = self._create_instance(obj=True)
- session = get_session()
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vm_ref = vm_utils.lookup(session, instance['name'])
-
- console = conn.get_vnc_console(self.context, instance)
-
- # Note(sulo): We don't care about session id in test
- # they will always differ so strip that out
- actual_path = console.internal_access_path.split('&')[0]
- expected_path = "/console?ref=%s" % str(vm_ref)
-
- self.assertEqual(expected_path, actual_path)
-
- def test_get_vnc_console_for_rescue(self):
- instance = self._create_instance(obj=True)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
- 'Running')
- # Set instance state to rescued
- instance['vm_state'] = 'rescued'
-
- console = conn.get_vnc_console(self.context, instance)
-
- # Note(sulo): We don't care about session id in test
- # they will always differ so strip that out
- actual_path = console.internal_access_path.split('&')[0]
- expected_path = "/console?ref=%s" % str(rescue_vm)
-
- self.assertEqual(expected_path, actual_path)
-
- def test_get_vnc_console_instance_not_ready(self):
- instance = self._create_instance(obj=True, spawn=False)
- instance.vm_state = 'building'
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.InstanceNotFound,
- conn.get_vnc_console, self.context, instance)
-
- def test_get_vnc_console_rescue_not_ready(self):
- instance = self._create_instance(obj=True, spawn=False)
- instance.vm_state = 'rescued'
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.InstanceNotReady,
- conn.get_vnc_console, self.context, instance)
-
- def test_instance_snapshot_fails_with_no_primary_vdi(self):
-
- def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
- vbd_type='disk', read_only=False, bootable=False,
- osvol=False):
- vbd_rec = {'VM': vm_ref,
- 'VDI': vdi_ref,
- 'userdevice': 'fake',
- 'currently_attached': False}
- vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
- xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
- return vbd_ref
-
- self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
- stubs.stubout_instance_snapshot(self.stubs)
- # Stubbing out firewall driver as previous stub sets alters
- # xml rpc result parsing
- stubs.stubout_firewall_driver(self.stubs, self.conn)
- instance = self._create_instance()
-
- image_id = "my_snapshot_id"
- self.assertRaises(exception.NovaException, self.conn.snapshot,
- self.context, instance, image_id,
- lambda *args, **kwargs: None)
-
- def test_instance_snapshot(self):
- expected_calls = [
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_PENDING_UPLOAD}},
- {'args': (),
- 'kwargs':
- {'task_state': task_states.IMAGE_UPLOADING,
- 'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
- func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
- image_id = "my_snapshot_id"
-
- stubs.stubout_instance_snapshot(self.stubs)
- stubs.stubout_is_snapshot(self.stubs)
- # Stubbing out firewall driver as previous stub sets alters
- # xml rpc result parsing
- stubs.stubout_firewall_driver(self.stubs, self.conn)
-
- instance = self._create_instance()
-
- self.fake_upload_called = False
-
- def fake_image_upload(_self, ctx, session, inst, img_id, vdi_uuids):
- self.fake_upload_called = True
- self.assertEqual(ctx, self.context)
- self.assertEqual(inst, instance)
- self.assertIsInstance(vdi_uuids, list)
- self.assertEqual(img_id, image_id)
-
- self.stubs.Set(glance.GlanceStore, 'upload_image',
- fake_image_upload)
-
- self.conn.snapshot(self.context, instance, image_id,
- func_call_matcher.call)
-
- # Ensure VM was torn down
- vm_labels = []
- for vm_ref in xenapi_fake.get_all('VM'):
- vm_rec = xenapi_fake.get_record('VM', vm_ref)
- if not vm_rec["is_control_domain"]:
- vm_labels.append(vm_rec["name_label"])
-
- self.assertEqual(vm_labels, [instance['name']])
-
- # Ensure VBDs were torn down
- vbd_labels = []
- for vbd_ref in xenapi_fake.get_all('VBD'):
- vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
- vbd_labels.append(vbd_rec["vm_name_label"])
-
- self.assertEqual(vbd_labels, [instance['name']])
-
- # Ensure task states changed in correct order
- self.assertIsNone(func_call_matcher.match())
-
- # Ensure VDIs were torn down
- for vdi_ref in xenapi_fake.get_all('VDI'):
- vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
- name_label = vdi_rec["name_label"]
- self.assertFalse(name_label.endswith('snapshot'))
-
- self.assertTrue(self.fake_upload_called)
-
- def create_vm_record(self, conn, os_type, name):
- instances = conn.list_instances()
- self.assertEqual(instances, [name])
-
- # Get Nova record for VM
- vm_info = conn.get_info({'name': name})
- # Get XenAPI record for VM
- vms = [rec for ref, rec
- in xenapi_fake.get_all_records('VM').iteritems()
- if not rec['is_control_domain']]
- vm = vms[0]
- self.vm_info = vm_info
- self.vm = vm
-
- def check_vm_record(self, conn, instance_type_id, check_injection):
- flavor = db.flavor_get(conn, instance_type_id)
- mem_kib = long(flavor['memory_mb']) << 10
- mem_bytes = str(mem_kib << 10)
- vcpus = flavor['vcpus']
- vcpu_weight = flavor['vcpu_weight']
-
- self.assertEqual(self.vm_info['max_mem'], mem_kib)
- self.assertEqual(self.vm_info['mem'], mem_kib)
- self.assertEqual(self.vm['memory_static_max'], mem_bytes)
- self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
- self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
- self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
- self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
- if vcpu_weight is None:
- self.assertEqual(self.vm['VCPUs_params'], {})
- else:
- self.assertEqual(self.vm['VCPUs_params'],
- {'weight': str(vcpu_weight), 'cap': '0'})
-
- # Check that the VM is running according to Nova
- self.assertEqual(self.vm_info['state'], power_state.RUNNING)
-
- # Check that the VM is running according to XenAPI.
- self.assertEqual(self.vm['power_state'], 'Running')
-
- if check_injection:
- xenstore_data = self.vm['xenstore_data']
- self.assertNotIn('vm-data/hostname', xenstore_data)
- key = 'vm-data/networking/DEADBEEF0001'
- xenstore_value = xenstore_data[key]
- tcpip_data = ast.literal_eval(xenstore_value)
- self.assertEqual(tcpip_data,
- {'broadcast': '192.168.1.255',
- 'dns': ['192.168.1.4', '192.168.1.3'],
- 'gateway': '192.168.1.1',
- 'gateway_v6': '2001:db8:0:1::1',
- 'ip6s': [{'enabled': '1',
- 'ip': '2001:db8:0:1:dcad:beff:feef:1',
- 'netmask': 64,
- 'gateway': '2001:db8:0:1::1'}],
- 'ips': [{'enabled': '1',
- 'ip': '192.168.1.100',
- 'netmask': '255.255.255.0',
- 'gateway': '192.168.1.1'},
- {'enabled': '1',
- 'ip': '192.168.1.101',
- 'netmask': '255.255.255.0',
- 'gateway': '192.168.1.1'}],
- 'label': 'test1',
- 'mac': 'DE:AD:BE:EF:00:01'})
-
- def check_vm_params_for_windows(self):
- self.assertEqual(self.vm['platform']['nx'], 'true')
- self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
- self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
-
- # check that these are not set
- self.assertEqual(self.vm['PV_args'], '')
- self.assertEqual(self.vm['PV_bootloader'], '')
- self.assertEqual(self.vm['PV_kernel'], '')
- self.assertEqual(self.vm['PV_ramdisk'], '')
-
- def check_vm_params_for_linux(self):
- self.assertEqual(self.vm['platform']['nx'], 'false')
- self.assertEqual(self.vm['PV_args'], '')
- self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
-
- # check that these are not set
- self.assertEqual(self.vm['PV_kernel'], '')
- self.assertEqual(self.vm['PV_ramdisk'], '')
- self.assertEqual(self.vm['HVM_boot_params'], {})
- self.assertEqual(self.vm['HVM_boot_policy'], '')
-
- def check_vm_params_for_linux_with_external_kernel(self):
- self.assertEqual(self.vm['platform']['nx'], 'false')
- self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
- self.assertNotEqual(self.vm['PV_kernel'], '')
- self.assertNotEqual(self.vm['PV_ramdisk'], '')
-
- # check that these are not set
- self.assertEqual(self.vm['HVM_boot_params'], {})
- self.assertEqual(self.vm['HVM_boot_policy'], '')
-
- def _list_vdis(self):
- session = get_session()
- return session.call_xenapi('VDI.get_all')
-
- def _list_vms(self):
- session = get_session()
- return session.call_xenapi('VM.get_all')
-
- def _check_vdis(self, start_list, end_list):
- for vdi_ref in end_list:
- if vdi_ref not in start_list:
- vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
- # If the cache is turned on then the base disk will be
- # there even after the cleanup
- if 'other_config' in vdi_rec:
- if 'image-id' not in vdi_rec['other_config']:
- self.fail('Found unexpected VDI:%s' % vdi_ref)
- else:
- self.fail('Found unexpected VDI:%s' % vdi_ref)
-
- def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
- instance_type_id="3", os_type="linux",
- hostname="test", architecture="x86-64", instance_id=1,
- injected_files=None, check_injection=False,
- create_record=True, empty_dns=False,
- block_device_info=None,
- key_data=None):
- if injected_files is None:
- injected_files = []
-
- # Fake out inject_instance_metadata
- def fake_inject_instance_metadata(self, instance, vm):
- pass
- self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
- fake_inject_instance_metadata)
-
- if create_record:
- instance = objects.Instance(context=self.context)
- instance.project_id = self.project_id
- instance.user_id = self.user_id
- instance.image_ref = image_ref
- instance.kernel_id = kernel_id
- instance.ramdisk_id = ramdisk_id
- instance.root_gb = 20
- instance.ephemeral_gb = 0
- instance.instance_type_id = instance_type_id
- instance.os_type = os_type
- instance.hostname = hostname
- instance.key_data = key_data
- instance.architecture = architecture
- instance.system_metadata = get_create_system_metadata(
- self.context, instance_type_id)
- instance.create()
- else:
- instance = objects.Instance.get_by_id(self.context, instance_id)
-
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- if empty_dns:
- # NOTE(tr3buchet): this is a terrible way to do this...
- network_info[0]['network']['subnets'][0]['dns'] = []
-
- image_meta = {}
- if image_ref:
- image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
- self.conn.spawn(self.context, instance, image_meta, injected_files,
- 'herp', network_info, block_device_info)
- self.create_vm_record(self.conn, os_type, instance['name'])
- self.check_vm_record(self.conn, instance_type_id, check_injection)
- self.assertEqual(instance['os_type'], os_type)
- self.assertEqual(instance['architecture'], architecture)
-
- def test_spawn_ipxe_iso_success(self):
- self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
- vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
-
- self.flags(ipxe_network_name='test1',
- ipxe_boot_menu_url='http://boot.example.com',
- ipxe_mkisofs_cmd='/root/mkisofs',
- group='xenserver')
- self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
- self.conn._session.call_plugin_serialized(
- 'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
- 'http://boot.example.com', '192.168.1.100', '255.255.255.0',
- '192.168.1.1', '192.168.1.3', '/root/mkisofs')
-
- self.mox.ReplayAll()
- self._test_spawn(IMAGE_IPXE_ISO, None, None)
-
- def test_spawn_ipxe_iso_no_network_name(self):
- self.flags(ipxe_network_name=None,
- ipxe_boot_menu_url='http://boot.example.com',
- group='xenserver')
-
- # call_plugin_serialized shouldn't be called
- self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
-
- self.mox.ReplayAll()
- self._test_spawn(IMAGE_IPXE_ISO, None, None)
-
- def test_spawn_ipxe_iso_no_boot_menu_url(self):
- self.flags(ipxe_network_name='test1',
- ipxe_boot_menu_url=None,
- group='xenserver')
-
- # call_plugin_serialized shouldn't be called
- self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
-
- self.mox.ReplayAll()
- self._test_spawn(IMAGE_IPXE_ISO, None, None)
-
- def test_spawn_ipxe_iso_unknown_network_name(self):
- self.flags(ipxe_network_name='test2',
- ipxe_boot_menu_url='http://boot.example.com',
- group='xenserver')
-
- # call_plugin_serialized shouldn't be called
- self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
-
- self.mox.ReplayAll()
- self._test_spawn(IMAGE_IPXE_ISO, None, None)
-
- def test_spawn_empty_dns(self):
- # Test spawning with an empty dns list.
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64",
- empty_dns=True)
- self.check_vm_params_for_linux()
-
- def test_spawn_not_enough_memory(self):
- self.assertRaises(exception.InsufficientFreeMemory,
- self._test_spawn,
- '1', 2, 3, "4") # m1.xlarge
-
- def test_spawn_fail_cleanup_1(self):
- """Simulates an error while downloading an image.
-
- Verifies that the VM and VDIs created are properly cleaned up.
- """
- vdi_recs_start = self._list_vdis()
- start_vms = self._list_vms()
- stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
- self.assertRaises(xenapi_fake.Failure,
- self._test_spawn, '1', 2, 3)
- # No additional VDI should be found.
- vdi_recs_end = self._list_vdis()
- end_vms = self._list_vms()
- self._check_vdis(vdi_recs_start, vdi_recs_end)
- # No additional VMs should be found.
- self.assertEqual(start_vms, end_vms)
-
- def test_spawn_fail_cleanup_2(self):
- """Simulates an error while creating VM record.
-
- Verifies that the VM and VDIs created are properly cleaned up.
- """
- vdi_recs_start = self._list_vdis()
- start_vms = self._list_vms()
- stubs.stubout_create_vm(self.stubs)
- self.assertRaises(xenapi_fake.Failure,
- self._test_spawn, '1', 2, 3)
- # No additional VDI should be found.
- vdi_recs_end = self._list_vdis()
- end_vms = self._list_vms()
- self._check_vdis(vdi_recs_start, vdi_recs_end)
- # No additional VMs should be found.
- self.assertEqual(start_vms, end_vms)
-
- def test_spawn_fail_cleanup_3(self):
- """Simulates an error while attaching disks.
-
- Verifies that the VM and VDIs created are properly cleaned up.
- """
- stubs.stubout_attach_disks(self.stubs)
- vdi_recs_start = self._list_vdis()
- start_vms = self._list_vms()
- self.assertRaises(xenapi_fake.Failure,
- self._test_spawn, '1', 2, 3)
- # No additional VDI should be found.
- vdi_recs_end = self._list_vdis()
- end_vms = self._list_vms()
- self._check_vdis(vdi_recs_start, vdi_recs_end)
- # No additional VMs should be found.
- self.assertEqual(start_vms, end_vms)
-
- def test_spawn_raw_glance(self):
- self._test_spawn(IMAGE_RAW, None, None, os_type=None)
- self.check_vm_params_for_windows()
-
- def test_spawn_vhd_glance_linux(self):
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64")
- self.check_vm_params_for_linux()
-
- def test_spawn_vhd_glance_windows(self):
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="windows", architecture="i386",
- instance_type_id=5)
- self.check_vm_params_for_windows()
-
- def test_spawn_iso_glance(self):
- self._test_spawn(IMAGE_ISO, None, None,
- os_type="windows", architecture="i386")
- self.check_vm_params_for_windows()
-
- def test_spawn_glance(self):
-
- def fake_fetch_disk_image(context, session, instance, name_label,
- image_id, image_type):
- sr_ref = vm_utils.safe_find_sr(session)
- image_type_str = vm_utils.ImageType.to_string(image_type)
- vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
- name_label, image_type_str, "20")
- vdi_role = vm_utils.ImageType.get_role(image_type)
- vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
- return {vdi_role: dict(uuid=vdi_uuid, file=None)}
- self.stubs.Set(vm_utils, '_fetch_disk_image',
- fake_fetch_disk_image)
-
- self._test_spawn(IMAGE_MACHINE,
- IMAGE_KERNEL,
- IMAGE_RAMDISK)
- self.check_vm_params_for_linux_with_external_kernel()
-
- def test_spawn_boot_from_volume_no_image_meta(self):
- dev_info = get_fake_device_info()
- self._test_spawn(None, None, None,
- block_device_info=dev_info)
-
- def test_spawn_boot_from_volume_no_glance_image_meta(self):
- dev_info = get_fake_device_info()
- self._test_spawn(IMAGE_FROM_VOLUME, None, None,
- block_device_info=dev_info)
-
- def test_spawn_boot_from_volume_with_image_meta(self):
- dev_info = get_fake_device_info()
- self._test_spawn(IMAGE_VHD, None, None,
- block_device_info=dev_info)
-
- def test_spawn_netinject_file(self):
- self.flags(flat_injected=True)
- db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
-
- self._tee_executed = False
-
- def _tee_handler(cmd, **kwargs):
- actual = kwargs.get('process_input', None)
- expected = """\
-# Injected by Nova on instance boot
-#
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-auto eth0
-iface eth0 inet static
- address 192.168.1.100
- netmask 255.255.255.0
- broadcast 192.168.1.255
- gateway 192.168.1.1
- dns-nameservers 192.168.1.3 192.168.1.4
-iface eth0 inet6 static
- address 2001:db8:0:1:dcad:beff:feef:1
- netmask 64
- gateway 2001:db8:0:1::1
-"""
- self.assertEqual(expected, actual)
- self._tee_executed = True
- return '', ''
-
- def _readlink_handler(cmd_parts, **kwargs):
- return os.path.realpath(cmd_parts[2]), ''
-
- fake_processutils.fake_execute_set_repliers([
- # Capture the tee .../etc/network/interfaces command
- (r'tee.*interfaces', _tee_handler),
- (r'readlink -nm.*', _readlink_handler),
- ])
- self._test_spawn(IMAGE_MACHINE,
- IMAGE_KERNEL,
- IMAGE_RAMDISK,
- check_injection=True)
- self.assertTrue(self._tee_executed)
-
- def test_spawn_netinject_xenstore(self):
- db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
-
- self._tee_executed = False
-
- def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
- # When mounting, create real files under the mountpoint to simulate
- # files in the mounted filesystem
-
- # mount point will be the last item of the command list
- self._tmpdir = cmd[len(cmd) - 1]
- LOG.debug('Creating files in %s to simulate guest agent',
- self._tmpdir)
- os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
- # Touch the file using open
- open(os.path.join(self._tmpdir, 'usr', 'sbin',
- 'xe-update-networking'), 'w').close()
- return '', ''
-
- def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
- # Umount would normally make files in the mounted filesystem
- # disappear, so do that here
- LOG.debug('Removing simulated guest agent files in %s',
- self._tmpdir)
- os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
- 'xe-update-networking'))
- os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
- os.rmdir(os.path.join(self._tmpdir, 'usr'))
- return '', ''
-
- def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
- self._tee_executed = True
- return '', ''
-
- fake_processutils.fake_execute_set_repliers([
- (r'mount', _mount_handler),
- (r'umount', _umount_handler),
- (r'tee.*interfaces', _tee_handler)])
- self._test_spawn('1', 2, 3, check_injection=True)
-
- # tee must not run in this case, where an injection-capable
- # guest agent is detected
- self.assertFalse(self._tee_executed)
-
- def test_spawn_injects_auto_disk_config_to_xenstore(self):
- instance = self._create_instance(spawn=False)
- self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
- self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
- self.mox.ReplayAll()
- self.conn.spawn(self.context, instance,
- IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
-
- def test_spawn_vlanmanager(self):
- self.flags(network_manager='nova.network.manager.VlanManager',
- vlan_interface='fake0')
-
- def dummy(*args, **kwargs):
- pass
-
- self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
- # Reset network table
- xenapi_fake.reset_table('network')
- # Instance id = 2 will use vlan network (see db/fakes.py)
- ctxt = self.context.elevated()
- self.network.conductor_api = conductor_api.LocalAPI()
- self._create_instance(2, False)
- networks = self.network.db.network_get_all(ctxt)
- with mock.patch('nova.objects.network.Network._from_db_object'):
- for network in networks:
- self.network.set_network_host(ctxt, network)
-
- self.network.allocate_for_instance(ctxt,
- instance_id=2,
- instance_uuid='00000000-0000-0000-0000-000000000002',
- host=CONF.host,
- vpn=None,
- rxtx_factor=3,
- project_id=self.project_id,
- macs=None)
- self._test_spawn(IMAGE_MACHINE,
- IMAGE_KERNEL,
- IMAGE_RAMDISK,
- instance_id=2,
- create_record=False)
- # TODO(salvatore-orlando): a complete test here would require
- # a check for making sure the bridge for the VM's VIF is
- # consistent with bridge specified in nova db
-
- def test_spawn_with_network_qos(self):
- self._create_instance()
- for vif_ref in xenapi_fake.get_all('VIF'):
- vif_rec = xenapi_fake.get_record('VIF', vif_ref)
- self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
- self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
- str(3 * 10 * 1024))
-
- def test_spawn_ssh_key_injection(self):
- # Test spawning with key_data on an instance. Should use
- # agent file injection.
- self.flags(use_agent_default=True,
- group='xenserver')
- actual_injected_files = []
-
- def fake_inject_file(self, method, args):
- path = base64.b64decode(args['b64_path'])
- contents = base64.b64decode(args['b64_contents'])
- actual_injected_files.append((path, contents))
- return jsonutils.dumps({'returncode': '0', 'message': 'success'})
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- '_plugin_agent_inject_file', fake_inject_file)
-
- def fake_encrypt_text(sshkey, new_pass):
- self.assertEqual("ssh-rsa fake_keydata", sshkey)
- return "fake"
-
- self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
-
- expected_data = ('\n# The following ssh key was injected by '
- 'Nova\nssh-rsa fake_keydata\n')
-
- injected_files = [('/root/.ssh/authorized_keys', expected_data)]
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64",
- key_data='ssh-rsa fake_keydata')
- self.assertEqual(actual_injected_files, injected_files)
-
- def test_spawn_ssh_key_injection_non_rsa(self):
- # Test spawning with key_data on an instance. Should use
- # agent file injection.
- self.flags(use_agent_default=True,
- group='xenserver')
- actual_injected_files = []
-
- def fake_inject_file(self, method, args):
- path = base64.b64decode(args['b64_path'])
- contents = base64.b64decode(args['b64_contents'])
- actual_injected_files.append((path, contents))
- return jsonutils.dumps({'returncode': '0', 'message': 'success'})
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- '_plugin_agent_inject_file', fake_inject_file)
-
- def fake_encrypt_text(sshkey, new_pass):
- raise NotImplementedError("Should not be called")
-
- self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
-
- expected_data = ('\n# The following ssh key was injected by '
- 'Nova\nssh-dsa fake_keydata\n')
-
- injected_files = [('/root/.ssh/authorized_keys', expected_data)]
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64",
- key_data='ssh-dsa fake_keydata')
- self.assertEqual(actual_injected_files, injected_files)
-
- def test_spawn_injected_files(self):
- # Test spawning with injected_files.
- self.flags(use_agent_default=True,
- group='xenserver')
- actual_injected_files = []
-
- def fake_inject_file(self, method, args):
- path = base64.b64decode(args['b64_path'])
- contents = base64.b64decode(args['b64_contents'])
- actual_injected_files.append((path, contents))
- return jsonutils.dumps({'returncode': '0', 'message': 'success'})
- self.stubs.Set(stubs.FakeSessionForVMTests,
- '_plugin_agent_inject_file', fake_inject_file)
-
- injected_files = [('/tmp/foo', 'foobar')]
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64",
- injected_files=injected_files)
- self.check_vm_params_for_linux()
- self.assertEqual(actual_injected_files, injected_files)
-
- @mock.patch('nova.db.agent_build_get_by_triple')
- def test_spawn_agent_upgrade(self, mock_get):
- self.flags(use_agent_default=True,
- group='xenserver')
-
- mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
- "hypervisor": "xen", "os": "windows",
- "url": "url", "md5hash": "asdf",
- 'created_at': None, 'updated_at': None,
- 'deleted_at': None, 'deleted': False,
- 'id': 1}
-
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64")
-
- @mock.patch('nova.db.agent_build_get_by_triple')
- def test_spawn_agent_upgrade_fails_silently(self, mock_get):
- mock_get.return_value = {"version": "1.1.0", "architecture": "x86-64",
- "hypervisor": "xen", "os": "windows",
- "url": "url", "md5hash": "asdf",
- 'created_at': None, 'updated_at': None,
- 'deleted_at': None, 'deleted': False,
- 'id': 1}
-
- self._test_spawn_fails_silently_with(exception.AgentError,
- method="_plugin_agent_agentupdate", failure="fake_error")
-
- def test_spawn_with_resetnetwork_alternative_returncode(self):
- self.flags(use_agent_default=True,
- group='xenserver')
-
- def fake_resetnetwork(self, method, args):
- fake_resetnetwork.called = True
- # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
- return jsonutils.dumps({'returncode': '500',
- 'message': 'success'})
- self.stubs.Set(stubs.FakeSessionForVMTests,
- '_plugin_agent_resetnetwork', fake_resetnetwork)
- fake_resetnetwork.called = False
-
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64")
- self.assertTrue(fake_resetnetwork.called)
-
- def _test_spawn_fails_silently_with(self, expected_exception_cls,
- method="_plugin_agent_version",
- failure=None, value=None):
- self.flags(use_agent_default=True,
- agent_version_timeout=0,
- group='xenserver')
-
- def fake_agent_call(self, method, args):
- if failure:
- raise xenapi_fake.Failure([failure])
- else:
- return value
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- method, fake_agent_call)
-
- called = {}
-
- def fake_add_instance_fault(*args, **kwargs):
- called["fake_add_instance_fault"] = args[2]
-
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
- fake_add_instance_fault)
-
- self._test_spawn(IMAGE_VHD, None, None,
- os_type="linux", architecture="x86-64")
- actual_exception = called["fake_add_instance_fault"]
- self.assertIsInstance(actual_exception, expected_exception_cls)
-
- def test_spawn_fails_silently_with_agent_timeout(self):
- self._test_spawn_fails_silently_with(exception.AgentTimeout,
- failure="TIMEOUT:fake")
-
- def test_spawn_fails_silently_with_agent_not_implemented(self):
- self._test_spawn_fails_silently_with(exception.AgentNotImplemented,
- failure="NOT IMPLEMENTED:fake")
-
- def test_spawn_fails_silently_with_agent_error(self):
- self._test_spawn_fails_silently_with(exception.AgentError,
- failure="fake_error")
-
- def test_spawn_fails_silently_with_agent_bad_return(self):
- error = jsonutils.dumps({'returncode': -1, 'message': 'fake'})
- self._test_spawn_fails_silently_with(exception.AgentError,
- value=error)
-
- def test_rescue(self):
- instance = self._create_instance(spawn=False)
- xenapi_fake.create_vm(instance['name'], 'Running')
-
- session = get_session()
- vm_ref = vm_utils.lookup(session, instance['name'])
-
- swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
- root_vdi_ref = xenapi_fake.create_vdi('root', None)
- eph1_vdi_ref = xenapi_fake.create_vdi('eph', None)
- eph2_vdi_ref = xenapi_fake.create_vdi('eph', None)
- vol_vdi_ref = xenapi_fake.create_vdi('volume', None)
-
- xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=2)
- xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
- xenapi_fake.create_vbd(vm_ref, eph1_vdi_ref, userdevice=4)
- xenapi_fake.create_vbd(vm_ref, eph2_vdi_ref, userdevice=5)
- xenapi_fake.create_vbd(vm_ref, vol_vdi_ref, userdevice=6,
- other_config={'osvol': True})
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
- conn.rescue(self.context, instance, [], image_meta, '')
-
- vm = xenapi_fake.get_record('VM', vm_ref)
- rescue_name = "%s-rescue" % vm["name_label"]
- rescue_ref = vm_utils.lookup(session, rescue_name)
- rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
-
- vdi_refs = {}
- for vbd_ref in rescue_vm['VBDs']:
- vbd = xenapi_fake.get_record('VBD', vbd_ref)
- vdi_refs[vbd['VDI']] = vbd['userdevice']
-
- self.assertEqual('1', vdi_refs[root_vdi_ref])
- self.assertEqual('2', vdi_refs[swap_vdi_ref])
- self.assertEqual('4', vdi_refs[eph1_vdi_ref])
- self.assertEqual('5', vdi_refs[eph2_vdi_ref])
- self.assertNotIn(vol_vdi_ref, vdi_refs)
-
- def test_rescue_preserve_disk_on_failure(self):
- # test that the original disk is preserved if rescue setup fails
- # bug #1227898
- instance = self._create_instance()
- session = get_session()
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
-
- vm_ref = vm_utils.lookup(session, instance['name'])
- vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
-
- # raise an error in the spawn setup process and trigger the
- # undo manager logic:
- def fake_start(*args, **kwargs):
- raise test.TestingException('Start Error')
-
- self.stubs.Set(self.conn._vmops, '_start', fake_start)
-
- self.assertRaises(test.TestingException, self.conn.rescue,
- self.context, instance, [], image_meta, '')
-
- # confirm original disk still exists:
- vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
- self.assertEqual(vdi_ref, vdi_ref2)
- self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
-
- def test_unrescue(self):
- instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- # Unrescue expects the original instance to be powered off
- conn.power_off(instance)
- xenapi_fake.create_vm(instance['name'] + '-rescue', 'Running')
- conn.unrescue(instance, None)
-
- def test_unrescue_not_in_rescue(self):
- instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- # Ensure that it will not unrescue a non-rescued instance.
- self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
- instance, None)
-
- def test_finish_revert_migration(self):
- instance = self._create_instance()
-
- class VMOpsMock():
-
- def __init__(self):
- self.finish_revert_migration_called = False
-
- def finish_revert_migration(self, context, instance, block_info,
- power_on):
- self.finish_revert_migration_called = True
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn._vmops = VMOpsMock()
- conn.finish_revert_migration(self.context, instance, None)
- self.assertTrue(conn._vmops.finish_revert_migration_called)
-
- def test_reboot_hard(self):
- instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.reboot(self.context, instance, None, "HARD")
-
- def test_poll_rebooting_instances(self):
- self.mox.StubOutWithMock(compute_api.API, 'reboot')
- compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg())
- self.mox.ReplayAll()
- instance = self._create_instance()
- instances = [instance]
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.poll_rebooting_instances(60, instances)
-
- def test_reboot_soft(self):
- instance = self._create_instance()
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.reboot(self.context, instance, None, "SOFT")
-
- def test_reboot_halted(self):
- session = get_session()
- instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance['name'], 'Halted')
- conn.reboot(self.context, instance, None, "SOFT")
- vm_ref = vm_utils.lookup(session, instance['name'])
- vm = xenapi_fake.get_record('VM', vm_ref)
- self.assertEqual(vm['power_state'], 'Running')
-
- def test_reboot_unknown_state(self):
- instance = self._create_instance(spawn=False)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- xenapi_fake.create_vm(instance['name'], 'Unknown')
- self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
- instance, None, "SOFT")
-
- def test_reboot_rescued(self):
- instance = self._create_instance()
- instance['vm_state'] = vm_states.RESCUED
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- real_result = vm_utils.lookup(conn._session, instance['name'])
-
- self.mox.StubOutWithMock(vm_utils, 'lookup')
- vm_utils.lookup(conn._session, instance['name'],
- True).AndReturn(real_result)
- self.mox.ReplayAll()
-
- conn.reboot(self.context, instance, None, "SOFT")
-
- def test_get_console_output_succeeds(self):
-
- def fake_get_console_output(instance):
- self.assertEqual("instance", instance)
- return "console_log"
- self.stubs.Set(self.conn._vmops, 'get_console_output',
- fake_get_console_output)
-
- self.assertEqual(self.conn.get_console_output('context', "instance"),
- "console_log")
-
- def _test_maintenance_mode(self, find_host, find_aggregate):
- real_call_xenapi = self.conn._session.call_xenapi
- instance = self._create_instance(spawn=True)
- api_calls = {}
-
- # Record all the xenapi calls, and return a fake list of hosts
- # for the host.get_all call
- def fake_call_xenapi(method, *args):
- api_calls[method] = args
- if method == 'host.get_all':
- return ['foo', 'bar', 'baz']
- return real_call_xenapi(method, *args)
- self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
-
- def fake_aggregate_get(context, host, key):
- if find_aggregate:
- return [test_aggregate.fake_aggregate]
- else:
- return []
- self.stubs.Set(db, 'aggregate_get_by_host',
- fake_aggregate_get)
-
- def fake_host_find(context, session, src, dst):
- if find_host:
- return 'bar'
- else:
- raise exception.NoValidHost("I saw this one coming...")
- self.stubs.Set(host, '_host_find', fake_host_find)
-
- result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
- self.assertEqual(result, 'on_maintenance')
-
- # We expect the VM.pool_migrate call to have been called to
- # migrate our instance to the 'bar' host
- vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
- host_ref = "foo"
- expected = (vm_ref, host_ref, {"live": "true"})
- self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
-
- instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
- self.assertEqual(instance['task_state'], task_states.MIGRATING)
-
- def test_maintenance_mode(self):
- self._test_maintenance_mode(True, True)
-
- def test_maintenance_mode_no_host(self):
- self.assertRaises(exception.NoValidHost,
- self._test_maintenance_mode, False, True)
-
- def test_maintenance_mode_no_aggregate(self):
- self.assertRaises(exception.NotFound,
- self._test_maintenance_mode, True, False)
-
- def test_uuid_find(self):
- self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
- fake_inst = fake_instance.fake_db_instance(id=123)
- fake_inst2 = fake_instance.fake_db_instance(id=456)
- db.instance_get_all_by_host(self.context, fake_inst['host'],
- columns_to_join=None,
- use_slave=False
- ).AndReturn([fake_inst, fake_inst2])
- self.mox.ReplayAll()
- expected_name = CONF.instance_name_template % fake_inst['id']
- inst_uuid = host._uuid_find(self.context, fake_inst['host'],
- expected_name)
- self.assertEqual(inst_uuid, fake_inst['uuid'])
-
- def test_session_virtapi(self):
- was = {'called': False}
-
- def fake_aggregate_get_by_host(self, *args, **kwargs):
- was['called'] = True
- raise test.TestingException()
- self.stubs.Set(db, "aggregate_get_by_host",
- fake_aggregate_get_by_host)
-
- self.stubs.Set(self.conn._session, "is_slave", True)
-
- self.assertRaises(test.TestingException,
- self.conn._session._get_host_uuid)
- self.assertTrue(was['called'])
-
- def test_per_instance_usage_running(self):
- instance = self._create_instance(spawn=True)
- flavor = flavors.get_flavor(3)
-
- expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
- 'uuid': instance['uuid']}}
- actual = self.conn.get_per_instance_usage()
- self.assertEqual(expected, actual)
-
- # Paused instances still consume resources:
- self.conn.pause(instance)
- actual = self.conn.get_per_instance_usage()
- self.assertEqual(expected, actual)
-
- def test_per_instance_usage_suspended(self):
- # Suspended instances do not consume memory:
- instance = self._create_instance(spawn=True)
- self.conn.suspend(instance)
- actual = self.conn.get_per_instance_usage()
- self.assertEqual({}, actual)
-
- def test_per_instance_usage_halted(self):
- instance = self._create_instance(spawn=True)
- self.conn.power_off(instance)
- actual = self.conn.get_per_instance_usage()
- self.assertEqual({}, actual)
-
- def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
- """Creates and spawns a test instance."""
- instance_values = {
- 'id': instance_id,
- 'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
- 'display_name': 'host-%d' % instance_id,
- 'project_id': self.project_id,
- 'user_id': self.user_id,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'root_gb': 80,
- 'ephemeral_gb': 0,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'vm_mode': 'hvm',
- 'architecture': 'x86-64'}
- instance_values.update(attrs)
-
- instance = create_instance_with_system_metadata(self.context,
- instance_values)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
- if spawn:
- self.conn.spawn(self.context, instance, image_meta, [], 'herp',
- network_info)
- if obj:
- instance = objects.Instance._from_db_object(
- self.context, objects.Instance(), instance,
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
- return instance
-
- def test_destroy_clean_up_kernel_and_ramdisk(self):
- def fake_lookup_kernel_ramdisk(session, vm_ref):
- return "kernel", "ramdisk"
-
- self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
- fake_lookup_kernel_ramdisk)
-
- def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
- fake_destroy_kernel_ramdisk.called = True
- self.assertEqual("kernel", kernel)
- self.assertEqual("ramdisk", ramdisk)
-
- fake_destroy_kernel_ramdisk.called = False
-
- self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
- fake_destroy_kernel_ramdisk)
-
- instance = self._create_instance(spawn=True)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- self.conn.destroy(self.context, instance, network_info)
-
- vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
- self.assertIsNone(vm_ref)
- self.assertTrue(fake_destroy_kernel_ramdisk.called)
-
-
-class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
- """Unit tests for Diffie-Hellman code."""
- def setUp(self):
- super(XenAPIDiffieHellmanTestCase, self).setUp()
- self.alice = agent.SimpleDH()
- self.bob = agent.SimpleDH()
-
- def test_shared(self):
- alice_pub = self.alice.get_public()
- bob_pub = self.bob.get_public()
- alice_shared = self.alice.compute_shared(bob_pub)
- bob_shared = self.bob.compute_shared(alice_pub)
- self.assertEqual(alice_shared, bob_shared)
-
- def _test_encryption(self, message):
- enc = self.alice.encrypt(message)
- self.assertFalse(enc.endswith('\n'))
- dec = self.bob.decrypt(enc)
- self.assertEqual(dec, message)
-
- def test_encrypt_simple_message(self):
- self._test_encryption('This is a simple message.')
-
- def test_encrypt_message_with_newlines_at_end(self):
- self._test_encryption('This message has a newline at the end.\n')
-
- def test_encrypt_many_newlines_at_end(self):
- self._test_encryption('Message with lotsa newlines.\n\n\n')
-
- def test_encrypt_newlines_inside_message(self):
- self._test_encryption('Message\nwith\ninterior\nnewlines.')
-
- def test_encrypt_with_leading_newlines(self):
- self._test_encryption('\n\nMessage with leading newlines.')
-
- def test_encrypt_really_long_message(self):
- self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIMigrateInstance(stubs.XenAPITestBase):
- """Unit test for verifying migration-related actions."""
-
- REQUIRES_LOCKING = True
-
- def setUp(self):
- super(XenAPIMigrateInstance, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- db_fakes.stub_out_db_instance_api(self.stubs)
- xenapi_fake.create_network('fake', 'fake_br1')
- self.user_id = 'fake'
- self.project_id = 'fake'
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.instance_values = {'id': 1,
- 'project_id': self.project_id,
- 'user_id': self.user_id,
- 'image_ref': 1,
- 'kernel_id': None,
- 'ramdisk_id': None,
- 'root_gb': 80,
- 'ephemeral_gb': 0,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
-
- migration_values = {
- 'source_compute': 'nova-compute',
- 'dest_compute': 'nova-compute',
- 'dest_host': '10.127.5.114',
- 'status': 'post-migrating',
- 'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
- 'old_instance_type_id': 5,
- 'new_instance_type_id': 1
- }
- self.migration = db.migration_create(
- context.get_admin_context(), migration_values)
-
- fake_processutils.stub_out_processutils_execute(self.stubs)
- stubs.stub_out_migration_methods(self.stubs)
- stubs.stubout_get_this_vm_uuid(self.stubs)
-
- def fake_inject_instance_metadata(self, instance, vm):
- pass
- self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
- fake_inject_instance_metadata)
-
- def test_migrate_disk_and_power_off(self):
- instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance['name'], 'Running')
- flavor = {"root_gb": 80, 'ephemeral_gb': 0}
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.migrate_disk_and_power_off(self.context, instance,
- '127.0.0.1', flavor, None)
-
- def test_migrate_disk_and_power_off_passes_exceptions(self):
- instance = db.instance_create(self.context, self.instance_values)
- xenapi_fake.create_vm(instance['name'], 'Running')
- flavor = {"root_gb": 80, 'ephemeral_gb': 0}
-
- def fake_raise(*args, **kwargs):
- raise exception.MigrationError(reason='test failure')
- self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.MigrationError,
- conn.migrate_disk_and_power_off,
- self.context, instance,
- '127.0.0.1', flavor, None)
-
- def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
- instance = db.instance_create(self.context, self.instance_values)
- flavor = {"root_gb": 0, 'ephemeral_gb': 0}
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.ResizeError,
- conn.migrate_disk_and_power_off,
- self.context, instance,
- 'fake_dest', flavor, None)
-
- def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
- flavor = {"root_gb": 0, 'ephemeral_gb': 0}
- values = copy.copy(self.instance_values)
- values["root_gb"] = 0
- values["ephemeral_gb"] = 0
- instance = db.instance_create(self.context, values)
- xenapi_fake.create_vm(instance['name'], 'Running')
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- conn.migrate_disk_and_power_off(self.context, instance,
- '127.0.0.1', flavor, None)
-
- def _test_revert_migrate(self, power_on):
- instance = create_instance_with_system_metadata(self.context,
- self.instance_values)
- self.called = False
- self.fake_vm_start_called = False
- self.fake_finish_revert_migration_called = False
- context = 'fake_context'
-
- def fake_vm_start(*args, **kwargs):
- self.fake_vm_start_called = True
-
- def fake_vdi_resize(*args, **kwargs):
- self.called = True
-
- def fake_finish_revert_migration(*args, **kwargs):
- self.fake_finish_revert_migration_called = True
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- "VDI_resize_online", fake_vdi_resize)
- self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
- self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
- fake_finish_revert_migration)
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
- product_version=(4, 0, 0),
- product_brand='XenServer')
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
- base = xenapi_fake.create_vdi('hurr', 'fake')
- base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
- cow = xenapi_fake.create_vdi('durr', 'fake')
- cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
- conn.finish_migration(self.context, self.migration, instance,
- dict(base_copy=base_uuid, cow=cow_uuid),
- network_info, image_meta, resize_instance=True,
- block_device_info=None, power_on=power_on)
- self.assertEqual(self.called, True)
- self.assertEqual(self.fake_vm_start_called, power_on)
-
- conn.finish_revert_migration(context, instance, network_info)
- self.assertEqual(self.fake_finish_revert_migration_called, True)
-
- def test_revert_migrate_power_on(self):
- self._test_revert_migrate(True)
-
- def test_revert_migrate_power_off(self):
- self._test_revert_migrate(False)
-
- def _test_finish_migrate(self, power_on):
- instance = create_instance_with_system_metadata(self.context,
- self.instance_values)
- self.called = False
- self.fake_vm_start_called = False
-
- def fake_vm_start(*args, **kwargs):
- self.fake_vm_start_called = True
-
- def fake_vdi_resize(*args, **kwargs):
- self.called = True
-
- self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
- self.stubs.Set(stubs.FakeSessionForVMTests,
- "VDI_resize_online", fake_vdi_resize)
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
- product_version=(4, 0, 0),
- product_brand='XenServer')
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
- conn.finish_migration(self.context, self.migration, instance,
- dict(base_copy='hurr', cow='durr'),
- network_info, image_meta, resize_instance=True,
- block_device_info=None, power_on=power_on)
- self.assertEqual(self.called, True)
- self.assertEqual(self.fake_vm_start_called, power_on)
-
- def test_finish_migrate_power_on(self):
- self._test_finish_migrate(True)
-
- def test_finish_migrate_power_off(self):
- self._test_finish_migrate(False)
-
- def test_finish_migrate_no_local_storage(self):
- values = copy.copy(self.instance_values)
- values["root_gb"] = 0
- values["ephemeral_gb"] = 0
- instance = create_instance_with_system_metadata(self.context, values)
-
- def fake_vdi_resize(*args, **kwargs):
- raise Exception("This shouldn't be called")
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- "VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
- conn.finish_migration(self.context, self.migration, instance,
- dict(base_copy='hurr', cow='durr'),
- network_info, image_meta, resize_instance=True)
-
- def test_finish_migrate_no_resize_vdi(self):
- instance = create_instance_with_system_metadata(self.context,
- self.instance_values)
-
- def fake_vdi_resize(*args, **kwargs):
- raise Exception("This shouldn't be called")
-
- self.stubs.Set(stubs.FakeSessionForVMTests,
- "VDI_resize_online", fake_vdi_resize)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs)
- # Resize instance would be determined by the compute call
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
- conn.finish_migration(self.context, self.migration, instance,
- dict(base_copy='hurr', cow='durr'),
- network_info, image_meta, resize_instance=False)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_migrate_too_many_partitions_no_resize_down(self):
- instance_values = self.instance_values
- instance = db.instance_create(self.context, instance_values)
- xenapi_fake.create_vm(instance['name'], 'Running')
- flavor = db.flavor_get_by_name(self.context, 'm1.small')
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_get_partitions(partition):
- return [(1, 2, 3, 4, "", ""), (1, 2, 3, 4, "", "")]
-
- self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
-
- self.assertRaises(exception.InstanceFaultRollback,
- conn.migrate_disk_and_power_off,
- self.context, instance,
- '127.0.0.1', flavor, None)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_migrate_bad_fs_type_no_resize_down(self):
- instance_values = self.instance_values
- instance = db.instance_create(self.context, instance_values)
- xenapi_fake.create_vm(instance['name'], 'Running')
- flavor = db.flavor_get_by_name(self.context, 'm1.small')
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_get_partitions(partition):
- return [(1, 2, 3, "ext2", "", "boot")]
-
- self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
-
- self.assertRaises(exception.InstanceFaultRollback,
- conn.migrate_disk_and_power_off,
- self.context, instance,
- '127.0.0.1', flavor, None)
-
- def test_migrate_rollback_when_resize_down_fs_fails(self):
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vmops = conn._vmops
-
- self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
- self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
- self.mox.StubOutWithMock(vm_utils, 'resize_disk')
- self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
- self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
- self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
- self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
-
- instance = objects.Instance(context=self.context,
- auto_disk_config=True, uuid='uuid')
- instance.obj_reset_changes()
- vm_ref = "vm_ref"
- dest = "dest"
- flavor = "type"
- sr_path = "sr_path"
-
- vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
- vmops._apply_orig_vm_name_label(instance, vm_ref)
- old_vdi_ref = "old_ref"
- vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
- (old_vdi_ref, None))
- new_vdi_ref = "new_ref"
- new_vdi_uuid = "new_uuid"
- vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
- flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
- vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
- sr_path, 0).AndRaise(
- exception.ResizeError(reason="asdf"))
-
- vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
- vmops._restore_orig_vm_and_cleanup_orphan(instance)
-
- self.mox.ReplayAll()
-
- with mock.patch.object(instance, 'save') as mock_save:
- self.assertRaises(exception.InstanceFaultRollback,
- vmops._migrate_disk_resizing_down, self.context,
- instance, dest, flavor, vm_ref, sr_path)
- self.assertEqual(3, mock_save.call_count)
- self.assertEqual(60.0, instance.progress)
-
- def test_resize_ensure_vm_is_shutdown_cleanly(self):
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vmops = conn._vmops
- fake_instance = {'uuid': 'uuid'}
-
- self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
- self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
-
- vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
- vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
- "ref").AndReturn(True)
-
- self.mox.ReplayAll()
-
- vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
-
- def test_resize_ensure_vm_is_shutdown_forced(self):
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vmops = conn._vmops
- fake_instance = {'uuid': 'uuid'}
-
- self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
- self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
-
- vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
- vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
- "ref").AndReturn(False)
- vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
- "ref").AndReturn(True)
-
- self.mox.ReplayAll()
-
- vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
-
- def test_resize_ensure_vm_is_shutdown_fails(self):
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vmops = conn._vmops
- fake_instance = {'uuid': 'uuid'}
-
- self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
- self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
-
- vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
- vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
- "ref").AndReturn(False)
- vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
- "ref").AndReturn(False)
-
- self.mox.ReplayAll()
-
- self.assertRaises(exception.ResizeError,
- vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
-
- def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- vmops = conn._vmops
- fake_instance = {'uuid': 'uuid'}
-
- self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
- self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
- self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
-
- vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
-
- self.mox.ReplayAll()
-
- vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
-
-
-class XenAPIImageTypeTestCase(test.NoDBTestCase):
- """Test ImageType class."""
-
- def test_to_string(self):
- # Can convert from type id to type string.
- self.assertEqual(
- vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
- vm_utils.ImageType.KERNEL_STR)
-
- def _assert_role(self, expected_role, image_type_id):
- self.assertEqual(
- expected_role,
- vm_utils.ImageType.get_role(image_type_id))
-
- def test_get_image_role_kernel(self):
- self._assert_role('kernel', vm_utils.ImageType.KERNEL)
-
- def test_get_image_role_ramdisk(self):
- self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
-
- def test_get_image_role_disk(self):
- self._assert_role('root', vm_utils.ImageType.DISK)
-
- def test_get_image_role_disk_raw(self):
- self._assert_role('root', vm_utils.ImageType.DISK_RAW)
-
- def test_get_image_role_disk_vhd(self):
- self._assert_role('root', vm_utils.ImageType.DISK_VHD)
-
-
-class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
- """Unit tests for code that detects the ImageType."""
- def assert_disk_type(self, image_meta, expected_disk_type):
- actual = vm_utils.determine_disk_image_type(image_meta)
- self.assertEqual(expected_disk_type, actual)
-
- def test_machine(self):
- image_meta = {'id': 'a', 'disk_format': 'ami'}
- self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
-
- def test_raw(self):
- image_meta = {'id': 'a', 'disk_format': 'raw'}
- self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
-
- def test_vhd(self):
- image_meta = {'id': 'a', 'disk_format': 'vhd'}
- self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
-
- def test_none(self):
- image_meta = None
- self.assert_disk_type(image_meta, None)
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIHostTestCase(stubs.XenAPITestBase):
- """Tests HostState, which holds metrics from XenServer that get
- reported back to the Schedulers.
- """
-
- def setUp(self):
- super(XenAPIHostTestCase, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.context = context.get_admin_context()
- self.flags(use_local=True, group='conductor')
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.instance = fake_instance.fake_db_instance(name='foo')
-
- def test_host_state(self):
- stats = self.conn.host_state.get_host_stats(False)
- # Values from fake.create_local_srs (ext SR)
- self.assertEqual(stats['disk_total'], 40000)
- self.assertEqual(stats['disk_used'], 20000)
- # Values from fake._plugin_xenhost_host_data
- self.assertEqual(stats['host_memory_total'], 10)
- self.assertEqual(stats['host_memory_overhead'], 20)
- self.assertEqual(stats['host_memory_free'], 30)
- self.assertEqual(stats['host_memory_free_computed'], 40)
- self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
- self.assertThat({'cpu_count': 50},
- matchers.DictMatches(stats['host_cpu_info']))
- # No VMs running
- self.assertEqual(stats['vcpus_used'], 0)
-
- def test_host_state_vcpus_used(self):
- stats = self.conn.host_state.get_host_stats(True)
- self.assertEqual(stats['vcpus_used'], 0)
- xenapi_fake.create_vm(self.instance['name'], 'Running')
- stats = self.conn.host_state.get_host_stats(True)
- self.assertEqual(stats['vcpus_used'], 4)
-
- def test_pci_passthrough_devices_whitelist(self):
- # NOTE(guillaume-thouvenin): This pci whitelist will be used to
- # match with _plugin_xenhost_get_pci_device_details method in fake.py.
- white_list = '{"vendor_id":"10de", "product_id":"11bf"}'
- self.flags(pci_passthrough_whitelist=[white_list])
- stats = self.conn.host_state.get_host_stats(False)
- self.assertEqual(len(stats['pci_passthrough_devices']), 1)
-
- def test_pci_passthrough_devices_no_whitelist(self):
- stats = self.conn.host_state.get_host_stats(False)
- self.assertEqual(len(stats['pci_passthrough_devices']), 0)
-
- def test_host_state_missing_sr(self):
- # Must trigger construction of 'host_state' property
- # before introducing the stub which raises the error
- hs = self.conn.host_state
-
- def fake_safe_find_sr(session):
- raise exception.StorageRepositoryNotFound('not there')
-
- self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
- self.assertRaises(exception.StorageRepositoryNotFound,
- hs.get_host_stats,
- refresh=True)
-
- def _test_host_action(self, method, action, expected=None):
- result = method('host', action)
- if not expected:
- expected = action
- self.assertEqual(result, expected)
-
- def test_host_reboot(self):
- self._test_host_action(self.conn.host_power_action, 'reboot')
-
- def test_host_shutdown(self):
- self._test_host_action(self.conn.host_power_action, 'shutdown')
-
- def test_host_startup(self):
- self.assertRaises(NotImplementedError,
- self.conn.host_power_action, 'host', 'startup')
-
- def test_host_maintenance_on(self):
- self._test_host_action(self.conn.host_maintenance_mode,
- True, 'on_maintenance')
-
- def test_host_maintenance_off(self):
- self._test_host_action(self.conn.host_maintenance_mode,
- False, 'off_maintenance')
-
- def test_set_enable_host_enable(self):
- _create_service_entries(self.context, values={'nova': ['fake-mini']})
- self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
- service = db.service_get_by_args(self.context, 'fake-mini',
- 'nova-compute')
- self.assertEqual(service.disabled, False)
-
- def test_set_enable_host_disable(self):
- _create_service_entries(self.context, values={'nova': ['fake-mini']})
- self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
- service = db.service_get_by_args(self.context, 'fake-mini',
- 'nova-compute')
- self.assertEqual(service.disabled, True)
-
- def test_get_host_uptime(self):
- result = self.conn.get_host_uptime('host')
- self.assertEqual(result, 'fake uptime')
-
- def test_supported_instances_is_included_in_host_state(self):
- stats = self.conn.host_state.get_host_stats(False)
- self.assertIn('supported_instances', stats)
-
- def test_supported_instances_is_calculated_by_to_supported_instances(self):
-
- def to_supported_instances(somedata):
- self.assertIsNone(somedata)
- return "SOMERETURNVALUE"
- self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
-
- stats = self.conn.host_state.get_host_stats(False)
- self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
-
- def test_update_stats_caches_hostname(self):
- self.mox.StubOutWithMock(host, 'call_xenhost')
- self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
- self.mox.StubOutWithMock(vm_utils, 'list_vms')
- self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
- data = {'disk_total': 0,
- 'disk_used': 0,
- 'disk_available': 0,
- 'supported_instances': 0,
- 'host_capabilities': [],
- 'host_hostname': 'foo',
- 'vcpus_used': 0,
- }
- sr_rec = {
- 'physical_size': 0,
- 'physical_utilisation': 0,
- 'virtual_allocation': 0,
- }
-
- for i in range(3):
- host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
- vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
- vm_utils.list_vms(self.conn._session).AndReturn([])
- self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
- sr_rec)
- if i == 2:
- # On the third call (the second below) change the hostname
- data = dict(data, host_hostname='bar')
-
- self.mox.ReplayAll()
- stats = self.conn.host_state.get_host_stats(refresh=True)
- self.assertEqual('foo', stats['hypervisor_hostname'])
- stats = self.conn.host_state.get_host_stats(refresh=True)
- self.assertEqual('foo', stats['hypervisor_hostname'])
-
-
-class ToSupportedInstancesTestCase(test.NoDBTestCase):
- def test_default_return_value(self):
- self.assertEqual([],
- host.to_supported_instances(None))
-
- def test_return_value(self):
- self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
- host.to_supported_instances([u'xen-3.0-x86_64']))
-
- def test_invalid_values_do_not_break(self):
- self.assertEqual([(arch.X86_64, hvtype.XEN, 'xen')],
- host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
-
- def test_multiple_values(self):
- self.assertEqual(
- [
- (arch.X86_64, hvtype.XEN, 'xen'),
- (arch.I686, hvtype.XEN, 'hvm')
- ],
- host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
- )
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
- def setUp(self):
- super(XenAPIAutoDiskConfigTestCase, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self.user_id = 'fake'
- self.project_id = 'fake'
-
- self.instance_values = {'id': 1,
- 'project_id': self.project_id,
- 'user_id': self.user_id,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'root_gb': 80,
- 'ephemeral_gb': 0,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
-
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
- vbd_type='disk', read_only=False, bootable=True,
- osvol=False):
- pass
-
- self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
-
- def assertIsPartitionCalled(self, called):
- marker = {"partition_called": False}
-
- def fake_resize_part_and_fs(dev, start, old_sectors, new_sectors,
- flags):
- marker["partition_called"] = True
- self.stubs.Set(vm_utils, "_resize_part_and_fs",
- fake_resize_part_and_fs)
-
- context.RequestContext(self.user_id, self.project_id)
- session = get_session()
-
- disk_image_type = vm_utils.ImageType.DISK_VHD
- instance = create_instance_with_system_metadata(self.context,
- self.instance_values)
- vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
- vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
-
- vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
- vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
-
- self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
- vdis, disk_image_type, "fake_nw_inf")
-
- self.assertEqual(marker["partition_called"], called)
-
- def test_instance_not_auto_disk_config(self):
- """Should not partition unless instance is marked as
- auto_disk_config.
- """
- self.instance_values['auto_disk_config'] = False
- self.assertIsPartitionCalled(False)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_instance_auto_disk_config_fails_safe_two_partitions(self):
- # Should not partition unless fail safes pass.
- self.instance_values['auto_disk_config'] = True
-
- def fake_get_partitions(dev):
- return [(1, 0, 100, 'ext4', "", ""), (2, 100, 200, 'ext4' "", "")]
- self.stubs.Set(vm_utils, "_get_partitions",
- fake_get_partitions)
-
- self.assertIsPartitionCalled(False)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_instance_auto_disk_config_fails_safe_badly_numbered(self):
- # Should not partition unless fail safes pass.
- self.instance_values['auto_disk_config'] = True
-
- def fake_get_partitions(dev):
- return [(2, 100, 200, 'ext4', "", "")]
- self.stubs.Set(vm_utils, "_get_partitions",
- fake_get_partitions)
-
- self.assertIsPartitionCalled(False)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_instance_auto_disk_config_fails_safe_bad_fstype(self):
- # Should not partition unless fail safes pass.
- self.instance_values['auto_disk_config'] = True
-
- def fake_get_partitions(dev):
- return [(1, 100, 200, 'asdf', "", "")]
- self.stubs.Set(vm_utils, "_get_partitions",
- fake_get_partitions)
-
- self.assertIsPartitionCalled(False)
-
- @stub_vm_utils_with_vdi_attached_here
- def test_instance_auto_disk_config_passes_fail_safes(self):
- """Should partition if instance is marked as auto_disk_config=True and
- virt-layer specific fail-safe checks pass.
- """
- self.instance_values['auto_disk_config'] = True
-
- def fake_get_partitions(dev):
- return [(1, 0, 100, 'ext4', "", "boot")]
- self.stubs.Set(vm_utils, "_get_partitions",
- fake_get_partitions)
-
- self.assertIsPartitionCalled(True)
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIGenerateLocal(stubs.XenAPITestBase):
- """Test generating of local disks, like swap and ephemeral."""
- def setUp(self):
- super(XenAPIGenerateLocal, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- db_fakes.stub_out_db_instance_api(self.stubs)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self.user_id = 'fake'
- self.project_id = 'fake'
-
- self.instance_values = {'id': 1,
- 'project_id': self.project_id,
- 'user_id': self.user_id,
- 'image_ref': 1,
- 'kernel_id': 2,
- 'ramdisk_id': 3,
- 'root_gb': 80,
- 'ephemeral_gb': 0,
- 'instance_type_id': '3', # m1.large
- 'os_type': 'linux',
- 'architecture': 'x86-64'}
-
- self.context = context.RequestContext(self.user_id, self.project_id)
-
- def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
- vbd_type='disk', read_only=False, bootable=True,
- osvol=False, empty=False, unpluggable=True):
- return session.call_xenapi('VBD.create', {'VM': vm_ref,
- 'VDI': vdi_ref})
-
- self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
-
- def assertCalled(self, instance,
- disk_image_type=vm_utils.ImageType.DISK_VHD):
- context.RequestContext(self.user_id, self.project_id)
- session = get_session()
-
- vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
- vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
-
- vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
-
- vdi_key = 'root'
- if disk_image_type == vm_utils.ImageType.DISK_ISO:
- vdi_key = 'iso'
- vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
-
- self.called = False
- self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
- vdis, disk_image_type, "fake_nw_inf")
- self.assertTrue(self.called)
-
- def test_generate_swap(self):
- # Test swap disk generation.
- instance_values = dict(self.instance_values, instance_type_id=5)
- instance = create_instance_with_system_metadata(self.context,
- instance_values)
-
- def fake_generate_swap(*args, **kwargs):
- self.called = True
- self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
-
- self.assertCalled(instance)
-
- def test_generate_ephemeral(self):
- # Test ephemeral disk generation.
- instance_values = dict(self.instance_values, instance_type_id=4)
- instance = create_instance_with_system_metadata(self.context,
- instance_values)
-
- def fake_generate_ephemeral(*args):
- self.called = True
- self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
-
- self.assertCalled(instance)
-
- def test_generate_iso_blank_root_disk(self):
- instance_values = dict(self.instance_values, instance_type_id=4)
- instance_values.pop('kernel_id')
- instance_values.pop('ramdisk_id')
- instance = create_instance_with_system_metadata(self.context,
- instance_values)
-
- def fake_generate_ephemeral(*args):
- pass
- self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
-
- def fake_generate_iso(*args):
- self.called = True
- self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
- fake_generate_iso)
-
- self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
-
-
-class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
- FAKE_VMS = {'test1:ref': dict(name_label='test1',
- other_config=dict(nova_uuid='hash'),
- domid='12',
- _vifmap={'0': "a:b:c:d...",
- '1': "e:f:12:q..."}),
- 'test2:ref': dict(name_label='test2',
- other_config=dict(nova_uuid='hash'),
- domid='42',
- _vifmap={'0': "a:3:c:d...",
- '1': "e:f:42:q..."}),
- }
-
- def setUp(self):
- super(XenAPIBWCountersTestCase, self).setUp()
- self.stubs.Set(vm_utils, 'list_vms',
- XenAPIBWCountersTestCase._fake_list_vms)
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def _fake_get_vif_device_map(vm_rec):
- return vm_rec['_vifmap']
-
- self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
- _fake_get_vif_device_map)
-
- @classmethod
- def _fake_list_vms(cls, session):
- return cls.FAKE_VMS.iteritems()
-
- @staticmethod
- def _fake_fetch_bandwidth_mt(session):
- return {}
-
- @staticmethod
- def _fake_fetch_bandwidth(session):
- return {'42':
- {'0': {'bw_in': 21024, 'bw_out': 22048},
- '1': {'bw_in': 231337, 'bw_out': 221212121}},
- '12':
- {'0': {'bw_in': 1024, 'bw_out': 2048},
- '1': {'bw_in': 31337, 'bw_out': 21212121}},
- }
-
- def test_get_all_bw_counters(self):
- instances = [dict(name='test1', uuid='1-2-3'),
- dict(name='test2', uuid='4-5-6')]
-
- self.stubs.Set(vm_utils, 'fetch_bandwidth',
- self._fake_fetch_bandwidth)
- result = self.conn.get_all_bw_counters(instances)
- self.assertEqual(len(result), 4)
- self.assertIn(dict(uuid='1-2-3',
- mac_address="a:b:c:d...",
- bw_in=1024,
- bw_out=2048), result)
- self.assertIn(dict(uuid='1-2-3',
- mac_address="e:f:12:q...",
- bw_in=31337,
- bw_out=21212121), result)
-
- self.assertIn(dict(uuid='4-5-6',
- mac_address="a:3:c:d...",
- bw_in=21024,
- bw_out=22048), result)
- self.assertIn(dict(uuid='4-5-6',
- mac_address="e:f:42:q...",
- bw_in=231337,
- bw_out=221212121), result)
-
- def test_get_all_bw_counters_in_failure_case(self):
- """Test that get_all_bw_conters returns an empty list when
- no data returned from Xenserver. c.f. bug #910045.
- """
- instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
-
- self.stubs.Set(vm_utils, 'fetch_bandwidth',
- self._fake_fetch_bandwidth_mt)
- result = self.conn.get_all_bw_counters(instances)
- self.assertEqual(result, [])
-
-
-# TODO(salvatore-orlando): this class and
-# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of
-# code. Consider abstracting common code in a base class for firewall driver
-# testing.
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
-
- REQUIRES_LOCKING = True
-
- _in_rules = [
- '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
- '*nat',
- ':PREROUTING ACCEPT [1170:189210]',
- ':INPUT ACCEPT [844:71028]',
- ':OUTPUT ACCEPT [5149:405186]',
- ':POSTROUTING ACCEPT [5063:386098]',
- '# Completed on Mon Dec 6 11:54:13 2010',
- '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
- '*mangle',
- ':INPUT ACCEPT [969615:281627771]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [915599:63811649]',
- ':nova-block-ipv4 - [0:0]',
- '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
- ',ESTABLISHED -j ACCEPT ',
- '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -o virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- '[0:0] -A FORWARD -i virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- 'COMMIT',
- '# Completed on Mon Dec 6 11:54:13 2010',
- '# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
- '*filter',
- ':INPUT ACCEPT [969615:281627771]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [915599:63811649]',
- ':nova-block-ipv4 - [0:0]',
- '[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
- ',ESTABLISHED -j ACCEPT ',
- '[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '[0:0] -A FORWARD -o virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- '[0:0] -A FORWARD -i virbr0 -j REJECT '
- '--reject-with icmp-port-unreachable ',
- 'COMMIT',
- '# Completed on Mon Dec 6 11:54:13 2010',
- ]
-
- _in6_filter_rules = [
- '# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
- '*filter',
- ':INPUT ACCEPT [349155:75810423]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [349256:75777230]',
- 'COMMIT',
- '# Completed on Tue Jan 18 23:47:56 2011',
- ]
-
- def setUp(self):
- super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- self.user_id = 'mappin'
- self.project_id = 'fake'
- stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
- test_case=self)
- self.context = context.RequestContext(self.user_id, self.project_id)
- self.network = importutils.import_object(CONF.network_manager)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.fw = self.conn._vmops.firewall_driver
-
- def _create_instance_ref(self):
- return db.instance_create(self.context,
- {'user_id': self.user_id,
- 'project_id': self.project_id,
- 'instance_type_id': 1})
-
- def _create_test_security_group(self):
- admin_ctxt = context.get_admin_context()
- secgroup = db.security_group_create(admin_ctxt,
- {'user_id': self.user_id,
- 'project_id': self.project_id,
- 'name': 'testgroup',
- 'description': 'test group'})
- db.security_group_rule_create(admin_ctxt,
- {'parent_group_id': secgroup['id'],
- 'protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '192.168.11.0/24'})
-
- db.security_group_rule_create(admin_ctxt,
- {'parent_group_id': secgroup['id'],
- 'protocol': 'icmp',
- 'from_port': 8,
- 'to_port': -1,
- 'cidr': '192.168.11.0/24'})
-
- db.security_group_rule_create(admin_ctxt,
- {'parent_group_id': secgroup['id'],
- 'protocol': 'tcp',
- 'from_port': 80,
- 'to_port': 81,
- 'cidr': '192.168.10.0/24'})
- return secgroup
-
- def _validate_security_group(self):
- in_rules = filter(lambda l: not l.startswith('#'),
- self._in_rules)
- for rule in in_rules:
- if 'nova' not in rule:
- self.assertTrue(rule in self._out_rules,
- 'Rule went missing: %s' % rule)
-
- instance_chain = None
- for rule in self._out_rules:
- # This is pretty crude, but it'll do for now
- # last two octets change
- if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
- instance_chain = rule.split(' ')[-1]
- break
- self.assertTrue(instance_chain, "The instance chain wasn't added")
- security_group_chain = None
- for rule in self._out_rules:
- # This is pretty crude, but it'll do for now
- if '-A %s -j' % instance_chain in rule:
- security_group_chain = rule.split(' ')[-1]
- break
- self.assertTrue(security_group_chain,
- "The security group chain wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
- ' -s 192.168.11.0/24')
- self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
- "ICMP acceptance rule wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
- ' --icmp-type 8 -s 192.168.11.0/24')
- self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
- "ICMP Echo Request acceptance rule wasn't added")
-
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
- ' -s 192.168.10.0/24')
- self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
- "TCP port 80/81 acceptance rule wasn't added")
-
- def test_static_filters(self):
- instance_ref = self._create_instance_ref()
- src_instance_ref = self._create_instance_ref()
- admin_ctxt = context.get_admin_context()
- secgroup = self._create_test_security_group()
-
- src_secgroup = db.security_group_create(admin_ctxt,
- {'user_id': self.user_id,
- 'project_id': self.project_id,
- 'name': 'testsourcegroup',
- 'description': 'src group'})
- db.security_group_rule_create(admin_ctxt,
- {'parent_group_id': secgroup['id'],
- 'protocol': 'tcp',
- 'from_port': 80,
- 'to_port': 81,
- 'group_id': src_secgroup['id']})
-
- db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
- secgroup['id'])
- db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
- src_secgroup['id'])
- instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
- src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
-
- network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
-
- from nova.compute import utils as compute_utils # noqa
- self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
- lambda instance: network_model)
-
- self.fw.prepare_instance_filter(instance_ref, network_model)
- self.fw.apply_instance_filter(instance_ref, network_model)
-
- self._validate_security_group()
- # Extra test for TCP acceptance rules
- for ip in network_model.fixed_ips():
- if ip['version'] != 4:
- continue
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
- ' --dport 80:81 -s %s' % ip['address'])
- self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
- "TCP port 80/81 acceptance rule wasn't added")
-
- db.instance_destroy(admin_ctxt, instance_ref['uuid'])
-
- def test_filters_for_instance_with_ip_v6(self):
- self.flags(use_ipv6=True)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
- rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
- self.assertEqual(len(rulesv4), 2)
- self.assertEqual(len(rulesv6), 1)
-
- def test_filters_for_instance_without_ip_v6(self):
- self.flags(use_ipv6=False)
- network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
- rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
- self.assertEqual(len(rulesv4), 2)
- self.assertEqual(len(rulesv6), 0)
-
- def test_multinic_iptables(self):
- ipv4_rules_per_addr = 1
- ipv4_addr_per_network = 2
- ipv6_rules_per_addr = 1
- ipv6_addr_per_network = 1
- networks_count = 5
- instance_ref = self._create_instance_ref()
- _get_instance_nw_info = fake_network.fake_get_instance_nw_info
- network_info = _get_instance_nw_info(self.stubs,
- networks_count,
- ipv4_addr_per_network)
- network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
- '1.1.1.1'
- ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
- ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
- inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
- network_info)
- self.fw.prepare_instance_filter(instance_ref, network_info)
- ipv4 = self.fw.iptables.ipv4['filter'].rules
- ipv6 = self.fw.iptables.ipv6['filter'].rules
- ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
- ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
- # Extra rules are for the DHCP request
- rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
- networks_count) + 2
- self.assertEqual(ipv4_network_rules, rules)
- self.assertEqual(ipv6_network_rules,
- ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
-
- def test_do_refresh_security_group_rules(self):
- admin_ctxt = context.get_admin_context()
- instance_ref = self._create_instance_ref()
- network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
- secgroup = self._create_test_security_group()
- db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
- secgroup['id'])
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.instance_info[instance_ref['id']] = (instance_ref,
- network_info)
- self._validate_security_group()
- # add a rule to the security group
- db.security_group_rule_create(admin_ctxt,
- {'parent_group_id': secgroup['id'],
- 'protocol': 'udp',
- 'from_port': 200,
- 'to_port': 299,
- 'cidr': '192.168.99.0/24'})
- # validate the extra rule
- self.fw.refresh_security_group_rules(secgroup)
- regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
- ' -s 192.168.99.0/24')
- self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
- "Rules were not updated properly."
- "The rule for UDP acceptance is missing")
-
- def test_provider_firewall_rules(self):
- # setup basic instance data
- instance_ref = self._create_instance_ref()
- # FRAGILE: as in libvirt tests
- # peeks at how the firewall names chains
- chain_name = 'inst-%s' % instance_ref['id']
-
- network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(0, len(rules))
-
- admin_ctxt = context.get_admin_context()
- # add a rule and send the update message, check for 1 rule
- db.provider_fw_rule_create(admin_ctxt,
- {'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535})
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
- # Add another, refresh, and make sure number of rules goes to two
- provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
- {'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535})
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(2, len(rules))
-
- # create the instance filter and make sure it has a jump rule
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.apply_instance_filter(instance_ref, network_info)
- inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == chain_name]
- jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
- provjump_rules = []
- # IptablesTable doesn't make rules unique internally
- for rule in jump_rules:
- if 'provider' in rule.rule and rule not in provjump_rules:
- provjump_rules.append(rule)
- self.assertEqual(1, len(provjump_rules))
-
- # remove a rule from the db, cast to compute to refresh rule
- db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
-
-class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
- """Unit tests for testing we find the right SR."""
- def test_safe_find_sr_raise_exception(self):
- # Ensure StorageRepositoryNotFound is raise when wrong filter.
- self.flags(sr_matching_filter='yadayadayada', group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = get_session()
- self.assertRaises(exception.StorageRepositoryNotFound,
- vm_utils.safe_find_sr, session)
-
- def test_safe_find_sr_local_storage(self):
- # Ensure the default local-storage is found.
- self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = get_session()
- # This test is only guaranteed if there is one host in the pool
- self.assertEqual(len(xenapi_fake.get_all('host')), 1)
- host_ref = xenapi_fake.get_all('host')[0]
- pbd_refs = xenapi_fake.get_all('PBD')
- for pbd_ref in pbd_refs:
- pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
- if pbd_rec['host'] != host_ref:
- continue
- sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
- if sr_rec['other_config']['i18n-key'] == 'local-storage':
- local_sr = pbd_rec['SR']
- expected = vm_utils.safe_find_sr(session)
- self.assertEqual(local_sr, expected)
-
- def test_safe_find_sr_by_other_criteria(self):
- # Ensure the SR is found when using a different filter.
- self.flags(sr_matching_filter='other-config:my_fake_sr=true',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = get_session()
- host_ref = xenapi_fake.get_all('host')[0]
- local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
- type='lvm',
- other_config={'my_fake_sr': 'true'},
- host_ref=host_ref)
- expected = vm_utils.safe_find_sr(session)
- self.assertEqual(local_sr, expected)
-
- def test_safe_find_sr_default(self):
- # Ensure the default SR is found regardless of other-config.
- self.flags(sr_matching_filter='default-sr:true',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- session = get_session()
- pool_ref = session.call_xenapi('pool.get_all')[0]
- expected = vm_utils.safe_find_sr(session)
- self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
- expected)
-
-
-def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
- 'fake_host2'],
- 'avail_zone2': ['fake_host3'], }):
- for avail_zone, hosts in values.iteritems():
- for service_host in hosts:
- db.service_create(context,
- {'host': service_host,
- 'binary': 'nova-compute',
- 'topic': 'compute',
- 'report_count': 0})
- return values
-
-
-# FIXME(sirp): convert this to use XenAPITestBaseNoDB
-class XenAPIAggregateTestCase(stubs.XenAPITestBase):
- """Unit tests for aggregate operations."""
- def setUp(self):
- super(XenAPIAggregateTestCase, self).setUp()
- self.flags(connection_url='http://test_url',
- connection_username='test_user',
- connection_password='test_pass',
- group='xenserver')
- self.flags(instance_name_template='%d',
- firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver',
- host='host',
- compute_driver='xenapi.XenAPIDriver',
- default_availability_zone='avail_zone1')
- self.flags(use_local=True, group='conductor')
- host_ref = xenapi_fake.get_all('host')[0]
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.context = context.get_admin_context()
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.compute = importutils.import_object(CONF.compute_manager)
- self.api = compute_api.AggregateAPI()
- values = {'name': 'test_aggr',
- 'metadata': {'availability_zone': 'test_zone',
- pool_states.POOL_FLAG: 'XenAPI'}}
- self.aggr = db.aggregate_create(self.context, values)
- self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
- 'master_compute': 'host',
- 'availability_zone': 'fake_zone',
- pool_states.KEY: pool_states.ACTIVE,
- 'host': xenapi_fake.get_record('host',
- host_ref)['uuid']}
-
- def test_pool_add_to_aggregate_called_by_driver(self):
-
- calls = []
-
- def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
- self.assertEqual("CONTEXT", context)
- self.assertEqual("AGGREGATE", aggregate)
- self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
- calls.append(pool_add_to_aggregate)
- self.stubs.Set(self.conn._pool,
- "add_to_aggregate",
- pool_add_to_aggregate)
-
- self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
-
- self.assertIn(pool_add_to_aggregate, calls)
-
- def test_pool_remove_from_aggregate_called_by_driver(self):
-
- calls = []
-
- def pool_remove_from_aggregate(context, aggregate, host,
- slave_info=None):
- self.assertEqual("CONTEXT", context)
- self.assertEqual("AGGREGATE", aggregate)
- self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
- calls.append(pool_remove_from_aggregate)
- self.stubs.Set(self.conn._pool,
- "remove_from_aggregate",
- pool_remove_from_aggregate)
-
- self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
-
- self.assertIn(pool_remove_from_aggregate, calls)
-
- def test_add_to_aggregate_for_first_host_sets_metadata(self):
- def fake_init_pool(id, name):
- fake_init_pool.called = True
- self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
-
- aggregate = self._aggregate_setup()
- self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate['id'])
- self.assertTrue(fake_init_pool.called)
- self.assertThat(self.fake_metadata,
- matchers.DictMatches(result['metadetails']))
-
- def test_join_slave(self):
- # Ensure join_slave gets called when the request gets to master.
- def fake_join_slave(id, compute_uuid, host, url, user, password):
- fake_join_slave.called = True
- self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
-
- aggregate = self._aggregate_setup(hosts=['host', 'host2'],
- metadata=self.fake_metadata)
- self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
- dict(compute_uuid='fake_uuid',
- url='fake_url',
- user='fake_user',
- passwd='fake_pass',
- xenhost_uuid='fake_uuid'))
- self.assertTrue(fake_join_slave.called)
-
- def test_add_to_aggregate_first_host(self):
- def fake_pool_set_name_label(self, session, pool_ref, name):
- fake_pool_set_name_label.called = True
- self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
- fake_pool_set_name_label)
- self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
-
- metadata = {'availability_zone': 'fake_zone',
- pool_states.POOL_FLAG: "XenAPI",
- pool_states.KEY: pool_states.CREATED}
-
- aggregate = objects.Aggregate()
- aggregate.name = 'fake_aggregate'
- aggregate.metadata = dict(metadata)
- aggregate.create(self.context)
- aggregate.add_host('host')
- self.assertEqual(["host"], aggregate.hosts)
- self.assertEqual(metadata, aggregate.metadata)
-
- self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
- self.assertTrue(fake_pool_set_name_label.called)
-
- def test_remove_from_aggregate_called(self):
- def fake_remove_from_aggregate(context, aggregate, host):
- fake_remove_from_aggregate.called = True
- self.stubs.Set(self.conn._pool,
- "remove_from_aggregate",
- fake_remove_from_aggregate)
-
- self.conn.remove_from_aggregate(None, None, None)
- self.assertTrue(fake_remove_from_aggregate.called)
-
- def test_remove_from_empty_aggregate(self):
- result = self._aggregate_setup()
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn._pool.remove_from_aggregate,
- self.context, result, "test_host")
-
- def test_remove_slave(self):
- # Ensure eject slave gets called.
- def fake_eject_slave(id, compute_uuid, host_uuid):
- fake_eject_slave.called = True
- self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
-
- self.fake_metadata['host2'] = 'fake_host2_uuid'
- aggregate = self._aggregate_setup(hosts=['host', 'host2'],
- metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
- self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
- self.assertTrue(fake_eject_slave.called)
-
- def test_remove_master_solo(self):
- # Ensure metadata are cleared after removal.
- def fake_clear_pool(id):
- fake_clear_pool.called = True
- self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
-
- aggregate = self._aggregate_setup(metadata=self.fake_metadata)
- self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
- result = db.aggregate_get(self.context, aggregate['id'])
- self.assertTrue(fake_clear_pool.called)
- self.assertThat({'availability_zone': 'fake_zone',
- pool_states.POOL_FLAG: 'XenAPI',
- pool_states.KEY: pool_states.ACTIVE},
- matchers.DictMatches(result['metadetails']))
-
- def test_remote_master_non_empty_pool(self):
- # Ensure AggregateError is raised if removing the master.
- aggregate = self._aggregate_setup(hosts=['host', 'host2'],
- metadata=self.fake_metadata)
-
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn._pool.remove_from_aggregate,
- self.context, aggregate, "host")
-
- def _aggregate_setup(self, aggr_name='fake_aggregate',
- aggr_zone='fake_zone',
- aggr_state=pool_states.CREATED,
- hosts=['host'], metadata=None):
- aggregate = objects.Aggregate()
- aggregate.name = aggr_name
- aggregate.metadata = {'availability_zone': aggr_zone,
- pool_states.POOL_FLAG: 'XenAPI',
- pool_states.KEY: aggr_state,
- }
- if metadata:
- aggregate.metadata.update(metadata)
- aggregate.create(self.context)
- for aggregate_host in hosts:
- aggregate.add_host(aggregate_host)
- return aggregate
-
- def test_add_host_to_aggregate_invalid_changing_status(self):
- """Ensure InvalidAggregateAction is raised when adding host while
- aggregate is not ready.
- """
- aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
- ex = self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'host')
- self.assertIn('setup in progress', str(ex))
-
- def test_add_host_to_aggregate_invalid_dismissed_status(self):
- """Ensure InvalidAggregateAction is raised when aggregate is
- deleted.
- """
- aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
- ex = self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'fake_host')
- self.assertIn('aggregate deleted', str(ex))
-
- def test_add_host_to_aggregate_invalid_error_status(self):
- """Ensure InvalidAggregateAction is raised when aggregate is
- in error.
- """
- aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
- ex = self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'fake_host')
- self.assertIn('aggregate in error', str(ex))
-
- def test_remove_host_from_aggregate_error(self):
- # Ensure we can remove a host from an aggregate even if in error.
- values = _create_service_entries(self.context)
- fake_zone = values.keys()[0]
- aggr = self.api.create_aggregate(self.context,
- 'fake_aggregate', fake_zone)
- # let's mock the fact that the aggregate is ready!
- metadata = {pool_states.POOL_FLAG: "XenAPI",
- pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(self.context, aggr['id'], metadata)
- for aggregate_host in values[fake_zone]:
- aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], aggregate_host)
- # let's mock the fact that the aggregate is in error!
- expected = self.api.remove_host_from_aggregate(self.context,
- aggr['id'],
- values[fake_zone][0])
- self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
- self.assertEqual(expected['metadata'][pool_states.KEY],
- pool_states.ACTIVE)
-
- def test_remove_host_from_aggregate_invalid_dismissed_status(self):
- """Ensure InvalidAggregateAction is raised when aggregate is
- deleted.
- """
- aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn.remove_from_aggregate, self.context,
- aggregate, 'fake_host')
-
- def test_remove_host_from_aggregate_invalid_changing_status(self):
- """Ensure InvalidAggregateAction is raised when aggregate is
- changing.
- """
- aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn.remove_from_aggregate, self.context,
- aggregate, 'fake_host')
-
- def test_add_aggregate_host_raise_err(self):
- # Ensure the undo operation works correctly on add.
- def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
- raise exception.AggregateError(
- aggregate_id='', action='', reason='')
- self.stubs.Set(self.compute.driver, "add_to_aggregate",
- fake_driver_add_to_aggregate)
- metadata = {pool_states.POOL_FLAG: "XenAPI",
- pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
- db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
-
- self.assertRaises(exception.AggregateError,
- self.compute.add_aggregate_host,
- self.context, host="fake_host",
- aggregate=jsonutils.to_primitive(self.aggr),
- slave_info=None)
- excepted = db.aggregate_get(self.context, self.aggr['id'])
- self.assertEqual(excepted['metadetails'][pool_states.KEY],
- pool_states.ERROR)
- self.assertEqual(excepted['hosts'], [])
-
-
-class MockComputeAPI(object):
- def __init__(self):
- self._mock_calls = []
-
- def add_aggregate_host(self, ctxt, aggregate,
- host_param, host, slave_info):
- self._mock_calls.append((
- self.add_aggregate_host, ctxt, aggregate,
- host_param, host, slave_info))
-
- def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
- host, slave_info):
- self._mock_calls.append((
- self.remove_aggregate_host, ctxt, aggregate_id,
- host_param, host, slave_info))
-
-
-class StubDependencies(object):
- """Stub dependencies for ResourcePool."""
-
- def __init__(self):
- self.compute_rpcapi = MockComputeAPI()
-
- def _is_hv_pool(self, *_ignore):
- return True
-
- def _get_metadata(self, *_ignore):
- return {
- pool_states.KEY: {},
- 'master_compute': 'master'
- }
-
- def _create_slave_info(self, *ignore):
- return "SLAVE_INFO"
-
-
-class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
- """A ResourcePool, use stub dependencies."""
-
-
-class HypervisorPoolTestCase(test.NoDBTestCase):
-
- fake_aggregate = {
- 'id': 98,
- 'hosts': [],
- 'metadata': {
- 'master_compute': 'master',
- pool_states.POOL_FLAG: {},
- pool_states.KEY: {}
- }
- }
-
- def test_slave_asks_master_to_add_slave_to_pool(self):
- slave = ResourcePoolWithStubs()
-
- slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
-
- self.assertIn(
- (slave.compute_rpcapi.add_aggregate_host,
- "CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
- "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
-
- def test_slave_asks_master_to_remove_slave_from_pool(self):
- slave = ResourcePoolWithStubs()
-
- slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
-
- self.assertIn(
- (slave.compute_rpcapi.remove_aggregate_host,
- "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
-
-
-class SwapXapiHostTestCase(test.NoDBTestCase):
-
- def test_swapping(self):
- self.assertEqual(
- "http://otherserver:8765/somepath",
- pool.swap_xapi_host(
- "http://someserver:8765/somepath", 'otherserver'))
-
- def test_no_port(self):
- self.assertEqual(
- "http://otherserver/somepath",
- pool.swap_xapi_host(
- "http://someserver/somepath", 'otherserver'))
-
- def test_no_path(self):
- self.assertEqual(
- "http://otherserver",
- pool.swap_xapi_host(
- "http://someserver", 'otherserver'))
-
-
-class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
- """Unit tests for live_migration."""
- def setUp(self):
- super(XenAPILiveMigrateTestCase, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver',
- host='host')
- db_fakes.stub_out_db_instance_api(self.stubs)
- self.context = context.get_admin_context()
-
- def test_live_migration_calls_vmops(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_live_migrate(context, instance_ref, dest, post_method,
- recover_method, block_migration, migrate_data):
- fake_live_migrate.called = True
-
- self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
-
- self.conn.live_migration(None, None, None, None, None)
- self.assertTrue(fake_live_migrate.called)
-
- def test_pre_live_migration(self):
- # ensure method is present
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.conn.pre_live_migration(None, None, None, None, None)
-
- def test_post_live_migration_at_destination(self):
- # ensure method is present
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- fake_instance = {"name": "name"}
- fake_network_info = "network_info"
-
- def fake_fw(instance, network_info):
- self.assertEqual(instance, fake_instance)
- self.assertEqual(network_info, fake_network_info)
- fake_fw.call_count += 1
-
- def fake_create_kernel_and_ramdisk(context, session, instance,
- name_label):
- return "fake-kernel-file", "fake-ramdisk-file"
-
- fake_fw.call_count = 0
- _vmops = self.conn._vmops
- self.stubs.Set(_vmops.firewall_driver,
- 'setup_basic_filtering', fake_fw)
- self.stubs.Set(_vmops.firewall_driver,
- 'prepare_instance_filter', fake_fw)
- self.stubs.Set(_vmops.firewall_driver,
- 'apply_instance_filter', fake_fw)
- self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
- fake_create_kernel_and_ramdisk)
-
- def fake_get_vm_opaque_ref(instance):
- fake_get_vm_opaque_ref.called = True
- self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
- fake_get_vm_opaque_ref.called = False
-
- def fake_strip_base_mirror_from_vdis(session, vm_ref):
- fake_strip_base_mirror_from_vdis.called = True
- self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
- fake_strip_base_mirror_from_vdis)
- fake_strip_base_mirror_from_vdis.called = False
-
- self.conn.post_live_migration_at_destination(None, fake_instance,
- fake_network_info, None)
- self.assertEqual(fake_fw.call_count, 3)
- self.assertTrue(fake_get_vm_opaque_ref.called)
- self.assertTrue(fake_strip_base_mirror_from_vdis.called)
-
- def test_check_can_live_migrate_destination_with_block_migration(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
-
- expected = {'block_migration': True,
- 'migrate_data': {
- 'migrate_send_data': "fake_migrate_data",
- 'destination_sr_ref': 'asdf'
- }
- }
- result = self.conn.check_can_live_migrate_destination(self.context,
- {'host': 'host'},
- {}, {},
- True, False)
- self.assertEqual(expected, result)
-
- def test_check_live_migrate_destination_verifies_ip(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- for pif_ref in xenapi_fake.get_all('PIF'):
- pif_rec = xenapi_fake.get_record('PIF', pif_ref)
- pif_rec['IP'] = ''
- pif_rec['IPv6'] = ''
-
- self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
-
- self.assertRaises(exception.MigrationError,
- self.conn.check_can_live_migrate_destination,
- self.context, {'host': 'host'},
- {}, {},
- True, False)
-
- def test_check_can_live_migrate_destination_block_migration_fails(self):
- stubs.stubout_session(self.stubs,
- stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.MigrationError,
- self.conn.check_can_live_migrate_destination,
- self.context, {'host': 'host'},
- {}, {},
- True, False)
-
- def _add_default_live_migrate_stubs(self, conn):
- def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
- pass
-
- def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
- return []
-
- def fake_get_vm_opaque_ref(instance):
- return "fake_vm"
-
- def fake_lookup_kernel_ramdisk(session, vm):
- return ("fake_PV_kernel", "fake_PV_ramdisk")
-
- self.stubs.Set(conn._vmops, "_generate_vdi_map",
- fake_generate_vdi_map)
- self.stubs.Set(conn._vmops, "_get_iscsi_srs",
- fake_get_iscsi_srs)
- self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
- fake_get_vm_opaque_ref)
- self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
- fake_lookup_kernel_ramdisk)
-
- def test_check_can_live_migrate_source_with_block_migrate(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- dest_check_data = {'block_migration': True,
- 'migrate_data': {
- 'destination_sr_ref': None,
- 'migrate_send_data': None
- }}
- result = self.conn.check_can_live_migrate_source(self.context,
- {'host': 'host'},
- dest_check_data)
- self.assertEqual(dest_check_data, result)
-
- def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
- return ['sr_ref']
- self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
- fake_get_iscsi_srs)
-
- def fake_make_plugin_call(plugin, method, **args):
- return "true"
- self.stubs.Set(self.conn._vmops, "_make_plugin_call",
- fake_make_plugin_call)
-
- dest_check_data = {'block_migration': True,
- 'migrate_data': {
- 'destination_sr_ref': None,
- 'migrate_send_data': None
- }}
- result = self.conn.check_can_live_migrate_source(self.context,
- {'host': 'host'},
- dest_check_data)
- self.assertEqual(dest_check_data, result)
-
- def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
- return ['sr_ref']
- self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
- fake_get_iscsi_srs)
-
- def fake_make_plugin_call(plugin, method, **args):
- return {'returncode': 'error', 'message': 'Plugin not found'}
- self.stubs.Set(self.conn._vmops, "_make_plugin_call",
- fake_make_plugin_call)
-
- self.assertRaises(exception.MigrationError,
- self.conn.check_can_live_migrate_source,
- self.context, {'host': 'host'},
- {})
-
- def test_check_can_live_migrate_source_with_block_migrate_fails(self):
- stubs.stubout_session(self.stubs,
- stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- dest_check_data = {'block_migration': True,
- 'migrate_data': {
- 'destination_sr_ref': None,
- 'migrate_send_data': None
- }}
- self.assertRaises(exception.MigrationError,
- self.conn.check_can_live_migrate_source,
- self.context,
- {'host': 'host'},
- dest_check_data)
-
- def test_check_can_live_migrate_works(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(CONF.host, host)
- return [dict(test_aggregate.fake_aggregate,
- metadetails={"host": "test_host_uuid"})]
-
- self.stubs.Set(db, "aggregate_get_by_host",
- fake_aggregate_get_by_host)
- self.conn.check_can_live_migrate_destination(self.context,
- {'host': 'host'}, False, False)
-
- def test_check_can_live_migrate_fails(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_aggregate_get_by_host(context, host, key=None):
- self.assertEqual(CONF.host, host)
- return [dict(test_aggregate.fake_aggregate,
- metadetails={"dest_other": "test_host_uuid"})]
-
- self.stubs.Set(db, "aggregate_get_by_host",
- fake_aggregate_get_by_host)
- self.assertRaises(exception.MigrationError,
- self.conn.check_can_live_migrate_destination,
- self.context, {'host': 'host'}, None, None)
-
- def test_live_migration(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_get_vm_opaque_ref(instance):
- return "fake_vm"
- self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
- fake_get_vm_opaque_ref)
-
- def fake_get_host_opaque_ref(context, destination_hostname):
- return "fake_host"
- self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
- fake_get_host_opaque_ref)
-
- def post_method(context, instance, destination_hostname,
- block_migration, migrate_data):
- post_method.called = True
-
- self.conn.live_migration(self.conn, None, None, post_method, None)
-
- self.assertTrue(post_method.called, "post_method.called")
-
- def test_live_migration_on_failure(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- def fake_get_vm_opaque_ref(instance):
- return "fake_vm"
- self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
- fake_get_vm_opaque_ref)
-
- def fake_get_host_opaque_ref(context, destination_hostname):
- return "fake_host"
- self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
- fake_get_host_opaque_ref)
-
- def fake_call_xenapi(*args):
- raise NotImplementedError()
- self.stubs.Set(self.conn._vmops._session, "call_xenapi",
- fake_call_xenapi)
-
- def recover_method(context, instance, destination_hostname,
- block_migration):
- recover_method.called = True
-
- self.assertRaises(NotImplementedError, self.conn.live_migration,
- self.conn, None, None, None, recover_method)
- self.assertTrue(recover_method.called, "recover_method.called")
-
- def test_live_migration_calls_post_migration(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def post_method(context, instance, destination_hostname,
- block_migration, migrate_data):
- post_method.called = True
-
- # pass block_migration = True and migrate data
- migrate_data = {"destination_sr_ref": "foo",
- "migrate_send_data": "bar"}
- self.conn.live_migration(self.conn, None, None, post_method, None,
- True, migrate_data)
- self.assertTrue(post_method.called, "post_method.called")
-
- def test_live_migration_block_cleans_srs(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def fake_get_iscsi_srs(context, instance):
- return ['sr_ref']
- self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
- fake_get_iscsi_srs)
-
- def fake_forget_sr(context, instance):
- fake_forget_sr.called = True
- self.stubs.Set(volume_utils, "forget_sr",
- fake_forget_sr)
-
- def post_method(context, instance, destination_hostname,
- block_migration, migrate_data):
- post_method.called = True
-
- migrate_data = {"destination_sr_ref": "foo",
- "migrate_send_data": "bar"}
- self.conn.live_migration(self.conn, None, None, post_method, None,
- True, migrate_data)
-
- self.assertTrue(post_method.called, "post_method.called")
- self.assertTrue(fake_forget_sr.called, "forget_sr.called")
-
- def test_live_migration_with_block_migration_raises_invalid_param(self):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def recover_method(context, instance, destination_hostname,
- block_migration):
- recover_method.called = True
- # pass block_migration = True and no migrate data
- self.assertRaises(exception.InvalidParameterValue,
- self.conn.live_migration, self.conn,
- None, None, None, recover_method, True, None)
- self.assertTrue(recover_method.called, "recover_method.called")
-
- def test_live_migration_with_block_migration_fails_migrate_send(self):
- stubs.stubout_session(self.stubs,
- stubs.FakeSessionForFailedMigrateTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(self.conn)
-
- def recover_method(context, instance, destination_hostname,
- block_migration):
- recover_method.called = True
- # pass block_migration = True and migrate data
- migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
- self.assertRaises(exception.MigrationError,
- self.conn.live_migration, self.conn,
- None, None, None, recover_method, True, migrate_data)
- self.assertTrue(recover_method.called, "recover_method.called")
-
- def test_live_migrate_block_migration_xapi_call_parameters(self):
-
- fake_vdi_map = object()
-
- class Session(xenapi_fake.SessionBase):
- def VM_migrate_send(self_, session, vmref, migrate_data, islive,
- vdi_map, vif_map, options):
- self.assertEqual('SOMEDATA', migrate_data)
- self.assertEqual(fake_vdi_map, vdi_map)
-
- stubs.stubout_session(self.stubs, Session)
-
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self._add_default_live_migrate_stubs(conn)
-
- def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
- return fake_vdi_map
-
- self.stubs.Set(conn._vmops, "_generate_vdi_map",
- fake_generate_vdi_map)
-
- def dummy_callback(*args, **kwargs):
- pass
-
- conn.live_migration(
- self.context, instance=dict(name='ignore'), dest=None,
- post_method=dummy_callback, recover_method=dummy_callback,
- block_migration="SOMEDATA",
- migrate_data=dict(migrate_send_data='SOMEDATA',
- destination_sr_ref="TARGET_SR_OPAQUE_REF"))
-
- def test_live_migrate_pool_migration_xapi_call_parameters(self):
-
- class Session(xenapi_fake.SessionBase):
- def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
- self.assertEqual("fake_ref", host_ref)
- self.assertEqual({"live": "true"}, options)
- raise IOError()
-
- stubs.stubout_session(self.stubs, Session)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- self._add_default_live_migrate_stubs(conn)
-
- def fake_get_host_opaque_ref(context, destination):
- return "fake_ref"
-
- self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
- fake_get_host_opaque_ref)
-
- def dummy_callback(*args, **kwargs):
- pass
-
- self.assertRaises(IOError, conn.live_migration,
- self.context, instance=dict(name='ignore'), dest=None,
- post_method=dummy_callback, recover_method=dummy_callback,
- block_migration=False, migrate_data={})
-
- def test_generate_vdi_map(self):
- stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- vm_ref = "fake_vm_ref"
-
- def fake_find_sr(_session):
- self.assertEqual(conn._session, _session)
- return "source_sr_ref"
- self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
-
- def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
- self.assertEqual(conn._session, _session)
- self.assertEqual(vm_ref, _vm_ref)
- self.assertEqual("source_sr_ref", _sr_ref)
- return ["vdi0", "vdi1"]
-
- self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
- fake_get_instance_vdis_for_sr)
-
- result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
-
- self.assertEqual({"vdi0": "dest_sr_ref",
- "vdi1": "dest_sr_ref"}, result)
-
- def test_rollback_live_migration_at_destination(self):
- stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
- conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- with mock.patch.object(conn, "destroy") as mock_destroy:
- conn.rollback_live_migration_at_destination("context",
- "instance", [], None)
- self.assertFalse(mock_destroy.called)
-
-
-class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
- def setUp(self):
- super(XenAPIInjectMetadataTestCase, self).setUp()
- self.flags(connection_url='test_url',
- connection_password='test_pass',
- group='xenserver')
- self.flags(firewall_driver='nova.virt.xenapi.firewall.'
- 'Dom0IptablesFirewallDriver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
-
- self.xenstore = dict(persist={}, ephem={})
-
- self.called_fake_get_vm_opaque_ref = False
-
- def fake_get_vm_opaque_ref(inst, instance):
- self.called_fake_get_vm_opaque_ref = True
- if instance["uuid"] == "not_found":
- raise exception.NotFound
- self.assertEqual(instance, {'uuid': 'fake'})
- return 'vm_ref'
-
- def fake_add_to_param_xenstore(inst, vm_ref, key, val):
- self.assertEqual(vm_ref, 'vm_ref')
- self.xenstore['persist'][key] = val
-
- def fake_remove_from_param_xenstore(inst, vm_ref, key):
- self.assertEqual(vm_ref, 'vm_ref')
- if key in self.xenstore['persist']:
- del self.xenstore['persist'][key]
-
- def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
- self.assertEqual(instance, {'uuid': 'fake'})
- self.assertEqual(vm_ref, 'vm_ref')
- self.xenstore['ephem'][path] = jsonutils.dumps(value)
-
- def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
- self.assertEqual(instance, {'uuid': 'fake'})
- self.assertEqual(vm_ref, 'vm_ref')
- if path in self.xenstore['ephem']:
- del self.xenstore['ephem'][path]
-
- self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
- fake_get_vm_opaque_ref)
- self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
- fake_add_to_param_xenstore)
- self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
- fake_remove_from_param_xenstore)
- self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
- fake_write_to_xenstore)
- self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
- fake_delete_from_xenstore)
-
- def test_inject_instance_metadata(self):
-
- # Add some system_metadata to ensure it doesn't get added
- # to xenstore
- instance = dict(metadata=[{'key': 'a', 'value': 1},
- {'key': 'b', 'value': 2},
- {'key': 'c', 'value': 3},
- # Check xenstore key sanitizing
- {'key': 'hi.there', 'value': 4},
- {'key': 'hi!t.e/e', 'value': 5}],
- # Check xenstore key sanitizing
- system_metadata=[{'key': 'sys_a', 'value': 1},
- {'key': 'sys_b', 'value': 2},
- {'key': 'sys_c', 'value': 3}],
- uuid='fake')
- self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
-
- self.assertEqual(self.xenstore, {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- 'vm-data/user-metadata/hi_there': '4',
- 'vm-data/user-metadata/hi_t_e_e': '5',
- },
- 'ephem': {},
- })
-
- def test_change_instance_metadata_add(self):
- # Test XenStore key sanitizing here, too.
- diff = {'test.key': ['+', 4]}
- instance = {'uuid': 'fake'}
- self.xenstore = {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- }
-
- self.conn._vmops.change_instance_metadata(instance, diff)
-
- self.assertEqual(self.xenstore, {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- 'vm-data/user-metadata/test_key': '4',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- 'vm-data/user-metadata/test_key': '4',
- },
- })
-
- def test_change_instance_metadata_update(self):
- diff = dict(b=['+', 4])
- instance = {'uuid': 'fake'}
- self.xenstore = {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- }
-
- self.conn._vmops.change_instance_metadata(instance, diff)
-
- self.assertEqual(self.xenstore, {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '4',
- 'vm-data/user-metadata/c': '3',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '4',
- 'vm-data/user-metadata/c': '3',
- },
- })
-
- def test_change_instance_metadata_delete(self):
- diff = dict(b=['-'])
- instance = {'uuid': 'fake'}
- self.xenstore = {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/b': '2',
- 'vm-data/user-metadata/c': '3',
- },
- }
-
- self.conn._vmops.change_instance_metadata(instance, diff)
-
- self.assertEqual(self.xenstore, {
- 'persist': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/c': '3',
- },
- 'ephem': {
- 'vm-data/user-metadata/a': '1',
- 'vm-data/user-metadata/c': '3',
- },
- })
-
- def test_change_instance_metadata_not_found(self):
- instance = {'uuid': 'not_found'}
- self.conn._vmops.change_instance_metadata(instance, "fake_diff")
- self.assertTrue(self.called_fake_get_vm_opaque_ref)
-
-
-class XenAPISessionTestCase(test.NoDBTestCase):
- def _get_mock_xapisession(self, software_version):
- class MockXapiSession(xenapi_session.XenAPISession):
- def __init__(_ignore):
- "Skip the superclass's dirty init"
-
- def _get_software_version(_ignore):
- return software_version
-
- return MockXapiSession()
-
- def test_local_session(self):
- session = self._get_mock_xapisession({})
- session.is_local_connection = True
- session.XenAPI = self.mox.CreateMockAnything()
- session.XenAPI.xapi_local().AndReturn("local_connection")
-
- self.mox.ReplayAll()
- self.assertEqual("local_connection",
- session._create_session("unix://local"))
-
- def test_remote_session(self):
- session = self._get_mock_xapisession({})
- session.is_local_connection = False
- session.XenAPI = self.mox.CreateMockAnything()
- session.XenAPI.Session("url").AndReturn("remote_connection")
-
- self.mox.ReplayAll()
- self.assertEqual("remote_connection", session._create_session("url"))
-
- def test_get_product_version_product_brand_does_not_fail(self):
- session = self._get_mock_xapisession({
- 'build_number': '0',
- 'date': '2012-08-03',
- 'hostname': 'komainu',
- 'linux': '3.2.0-27-generic',
- 'network_backend': 'bridge',
- 'platform_name': 'XCP_Kronos',
- 'platform_version': '1.6.0',
- 'xapi': '1.3',
- 'xen': '4.1.2',
- 'xencenter_max': '1.10',
- 'xencenter_min': '1.10'
- })
-
- self.assertEqual(
- ((1, 6, 0), None),
- session._get_product_version_and_brand()
- )
-
- def test_get_product_version_product_brand_xs_6(self):
- session = self._get_mock_xapisession({
- 'product_brand': 'XenServer',
- 'product_version': '6.0.50',
- 'platform_version': '0.0.1'
- })
-
- self.assertEqual(
- ((6, 0, 50), 'XenServer'),
- session._get_product_version_and_brand()
- )
-
- def test_verify_plugin_version_same(self):
- session = self._get_mock_xapisession({})
-
- session.PLUGIN_REQUIRED_VERSION = '2.4'
-
- self.mox.StubOutWithMock(session, 'call_plugin_serialized')
- session.call_plugin_serialized('nova_plugin_version', 'get_version',
- ).AndReturn("2.4")
-
- self.mox.ReplayAll()
- session._verify_plugin_version()
-
- def test_verify_plugin_version_compatible(self):
- session = self._get_mock_xapisession({})
- session.XenAPI = xenapi_fake.FakeXenAPI()
-
- session.PLUGIN_REQUIRED_VERSION = '2.4'
-
- self.mox.StubOutWithMock(session, 'call_plugin_serialized')
- session.call_plugin_serialized('nova_plugin_version', 'get_version',
- ).AndReturn("2.5")
-
- self.mox.ReplayAll()
- session._verify_plugin_version()
-
- def test_verify_plugin_version_bad_maj(self):
- session = self._get_mock_xapisession({})
- session.XenAPI = xenapi_fake.FakeXenAPI()
-
- session.PLUGIN_REQUIRED_VERSION = '2.4'
-
- self.mox.StubOutWithMock(session, 'call_plugin_serialized')
- session.call_plugin_serialized('nova_plugin_version', 'get_version',
- ).AndReturn("3.0")
-
- self.mox.ReplayAll()
- self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
-
- def test_verify_plugin_version_bad_min(self):
- session = self._get_mock_xapisession({})
- session.XenAPI = xenapi_fake.FakeXenAPI()
-
- session.PLUGIN_REQUIRED_VERSION = '2.4'
-
- self.mox.StubOutWithMock(session, 'call_plugin_serialized')
- session.call_plugin_serialized('nova_plugin_version', 'get_version',
- ).AndReturn("2.3")
-
- self.mox.ReplayAll()
- self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
-
- def test_verify_current_version_matches(self):
- session = self._get_mock_xapisession({})
-
- # Import the plugin to extract its version
- path = os.path.dirname(__file__)
- rel_path_elem = "../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
- "plugins/nova_plugin_version"
- for elem in rel_path_elem.split('/'):
- path = os.path.join(path, elem)
- path = os.path.realpath(path)
-
- plugin_version = None
- with open(path) as plugin_file:
- for line in plugin_file:
- if "PLUGIN_VERSION = " in line:
- plugin_version = line.strip()[17:].strip('"')
-
- self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
- plugin_version)
-
-
-class XenAPIFakeTestCase(test.NoDBTestCase):
- def test_query_matches(self):
- record = {'a': '1', 'b': '2', 'c_d': '3'}
-
- tests = {'field "a"="1"': True,
- 'field "b"="2"': True,
- 'field "b"="4"': False,
- 'not field "b"="4"': True,
- 'field "a"="1" and field "b"="4"': False,
- 'field "a"="1" or field "b"="4"': True,
- 'field "c__d"="3"': True,
- 'field \'b\'=\'2\'': True,
- }
-
- for query in tests.keys():
- expected = tests[query]
- fail_msg = "for test '%s'" % query
- self.assertEqual(xenapi_fake._query_matches(record, query),
- expected, fail_msg)
-
- def test_query_bad_format(self):
- record = {'a': '1', 'b': '2', 'c': '3'}
-
- tests = ['"a"="1" or "b"="4"',
- 'a=1',
- ]
-
- for query in tests:
- fail_msg = "for test '%s'" % query
- self.assertFalse(xenapi_fake._query_matches(record, query),
- fail_msg)
diff --git a/nova/tests/volume/encryptors/test_base.py b/nova/tests/volume/encryptors/test_base.py
deleted file mode 100644
index c8f81743ec..0000000000
--- a/nova/tests/volume/encryptors/test_base.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova.i18n import _LE
-from nova import keymgr
-from nova import test
-from nova.tests.keymgr import fake
-from nova.volume import encryptors
-
-
-class VolumeEncryptorTestCase(test.TestCase):
- def _create(self, device_path):
- pass
-
- def setUp(self):
- super(VolumeEncryptorTestCase, self).setUp()
-
- self.stubs.Set(keymgr, 'API', fake.fake_api)
-
- self.connection_info = {
- "data": {
- "device_path": "/dev/disk/by-path/"
- "ip-192.0.2.0:3260-iscsi-iqn.2010-10.org.openstack"
- ":volume-fake_uuid-lun-1",
- },
- }
- self.encryptor = self._create(self.connection_info)
-
- @mock.patch('nova.volume.encryptors.LOG')
- def test_error_log(self, log):
- encryption = {'control_location': 'front-end',
- 'provider': 'TestEncryptor'}
- provider = 'TestEncryptor'
- try:
- encryptors.get_volume_encryptor(self.connection_info, **encryption)
- except Exception as e:
- log.error.assert_called_once_with(_LE("Error instantiating "
- "%(provider)s: "
- "%(exception)s"),
- {'provider': provider, 'exception': e})
diff --git a/nova/tests/volume/encryptors/test_cryptsetup.py b/nova/tests/volume/encryptors/test_cryptsetup.py
deleted file mode 100644
index 5ed64217fc..0000000000
--- a/nova/tests/volume/encryptors/test_cryptsetup.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import array
-import os
-
-from nova.keymgr import key
-from nova.tests.volume.encryptors import test_base
-from nova import utils
-from nova.volume.encryptors import cryptsetup
-
-
-def fake__get_key(context):
- raw = array.array('B', ('0' * 64).decode('hex')).tolist()
-
- symmetric_key = key.SymmetricKey('AES', raw)
- return symmetric_key
-
-
-class CryptsetupEncryptorTestCase(test_base.VolumeEncryptorTestCase):
- def _create(self, connection_info):
- return cryptsetup.CryptsetupEncryptor(connection_info)
-
- def setUp(self):
- super(CryptsetupEncryptorTestCase, self).setUp()
-
- self.executes = []
-
- def fake_execute(*cmd, **kwargs):
- self.executes.append(cmd)
- return None, None
-
- self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(os.path, "realpath", lambda x: x)
-
- self.dev_path = self.connection_info['data']['device_path']
- self.dev_name = self.dev_path.split('/')[-1]
-
- self.symlink_path = self.dev_path
-
- def test__open_volume(self):
- self.encryptor._open_volume("passphrase")
-
- expected_commands = [('cryptsetup', 'create', '--key-file=-',
- self.dev_name, self.dev_path)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_attach_volume(self):
- self.stubs.Set(self.encryptor, '_get_key', fake__get_key)
-
- self.encryptor.attach_volume(None)
-
- expected_commands = [('cryptsetup', 'create', '--key-file=-',
- self.dev_name, self.dev_path),
- ('ln', '--symbolic', '--force',
- '/dev/mapper/%s' % self.dev_name,
- self.symlink_path)]
- self.assertEqual(expected_commands, self.executes)
-
- def test__close_volume(self):
- self.encryptor.detach_volume()
-
- expected_commands = [('cryptsetup', 'remove', self.dev_name)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_detach_volume(self):
- self.encryptor.detach_volume()
-
- expected_commands = [('cryptsetup', 'remove', self.dev_name)]
- self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/volume/encryptors/test_luks.py b/nova/tests/volume/encryptors/test_luks.py
deleted file mode 100644
index 6db390044e..0000000000
--- a/nova/tests/volume/encryptors/test_luks.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from nova.tests.volume.encryptors import test_cryptsetup
-from nova.volume.encryptors import luks
-
-
-"""
-The utility of these test cases is limited given the simplicity of the
-LuksEncryptor class. The attach_volume method has the only significant logic
-to handle cases where the volume has not previously been formatted, but
-exercising this logic requires "real" devices and actually executing the
-various cryptsetup commands rather than simply logging them.
-"""
-
-
-class LuksEncryptorTestCase(test_cryptsetup.CryptsetupEncryptorTestCase):
- def _create(self, connection_info):
- return luks.LuksEncryptor(connection_info)
-
- def test__format_volume(self):
- self.encryptor._format_volume("passphrase")
-
- expected_commands = [('cryptsetup', '--batch-mode', 'luksFormat',
- '--key-file=-', self.dev_path)]
- self.assertEqual(expected_commands, self.executes)
-
- def test__open_volume(self):
- self.encryptor._open_volume("passphrase")
-
- expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
- self.dev_path, self.dev_name)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_attach_volume(self):
- self.stubs.Set(self.encryptor, '_get_key',
- test_cryptsetup.fake__get_key)
-
- self.encryptor.attach_volume(None)
-
- expected_commands = [('cryptsetup', 'luksOpen', '--key-file=-',
- self.dev_path, self.dev_name),
- ('ln', '--symbolic', '--force',
- '/dev/mapper/%s' % self.dev_name,
- self.symlink_path)]
- self.assertEqual(expected_commands, self.executes)
-
- def test__close_volume(self):
- self.encryptor.detach_volume()
-
- expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
- self.assertEqual(expected_commands, self.executes)
-
- def test_detach_volume(self):
- self.encryptor.detach_volume()
-
- expected_commands = [('cryptsetup', 'luksClose', self.dev_name)]
- self.assertEqual(expected_commands, self.executes)
diff --git a/nova/tests/volume/encryptors/test_nop.py b/nova/tests/volume/encryptors/test_nop.py
deleted file mode 100644
index 90e43d924d..0000000000
--- a/nova/tests/volume/encryptors/test_nop.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.tests.volume.encryptors import test_base
-from nova.volume.encryptors import nop
-
-
-class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase):
- def _create(self, connection_info):
- return nop.NoOpEncryptor(connection_info)
-
- def test_attach_volume(self):
- self.encryptor.attach_volume(None)
-
- def test_detach_volume(self):
- self.encryptor.detach_volume()